# 如何使用taichi批量加速图像 处理

quention1、图片数据集太大了放不进ti.field,
quention2、目前taichi好像不支持数据切片，没办法直接将2个field相减，哪我该怎么加速这个过程哪？
quention3、taichi怎么使用两张显卡加速哪？

``````import taichi.math as tm
import random
import taichi as ti
import scipy.io as scio
import time
from torch.utils.data import Dataset
from scipy.linalg import toeplitz
import numpy as np
from itertools import combinations, permutations
ti.init(arch=ti.gpu, default_fp=ti.f64)
num,channel,h,w=can_data.shape[0],can_data.shape[1],can_data.shape[2],can_data.shape[3]
can_data =np.squeeze(can_data,1)
cal_data =np.squeeze(cal_data,1)
'''

'''
can_datagpu = ti.field(dtype=float, shape=(num,h,w))
can_datagpu.from_numpy(can_data)
cal_datagpu = ti.field(dtype=float, shape=(num,h,w))
cal_datagpu.from_numpy(cal_data)
Hcomb=np.zeros((channel,h,w))
H_comb= ti.field(dtype=float, shape=(channel,h,w))
H_comb.from_numpy(Hcomb)
@ti.func
def ESI(can_img,cal_img,h=0.5):
'''
这里是问题2，目前taichi好像不支持数据切片，没办法直接将2个field相减
'''
temp=-tm.pow((can_img-cal_img),2)/(2*h**2)
P=tm.exp(temp)/(tm.sqrt(2*np.pi)*h)
H=-P*tm.log(P)/tm.log(10)
return H

@ti.kernel
def ESIkeneal():
count = 0
for i in range(num):
print(i)
for j in range(num):
H=ESI(can_datagpu,cal_datagpu)
H_comb=H+H_comb
count+=1
H_comb = H_comb/count
start = time.time()
ESIkeneal()
end = time.time()
H_comb_data = H_comb.to_numpy()
np.save('./data/test.npy',H_comb_data)
print(end - start)
``````

@Andrewchen 非常欢迎来到Taichi Lang中文论坛。