我想问大佬们一个问题,我现在在把俩张图片相减计算信息熵,图片比较特殊只有一个维度,但是需要计算两个图片数据集之间两两图片之间的信息熵,每个数据大小为(8000,1,224,224)。
quention1、图片数据集太大了放不进ti.field,
quention2、目前taichi好像不支持数据切片,没办法直接将2个field相减,哪我该怎么加速这个过程哪?
quention3、taichi怎么使用两张显卡加速哪?
import taichi.math as tm
import random
import taichi as ti
import scipy.io as scio
import time
from torch.utils.data import Dataset
from scipy.linalg import toeplitz
import numpy as np
from itertools import combinations, permutations
ti.init(arch=ti.gpu, default_fp=ti.f64)
can_data = np.load('./data/0waveimg.npy')#数据1,(8000,1,224,224)
cal_data = np.load('./data/1waveimg.npy')#数据2,(8000,1,224,224)
num,channel,h,w=can_data.shape[0],can_data.shape[1],can_data.shape[2],can_data.shape[3]
can_data =np.squeeze(can_data,1)
cal_data =np.squeeze(cal_data,1)
'''
这里是问题1,图片数据集太大了放不进ti.field,
'''
can_datagpu = ti.field(dtype=float, shape=(num,h,w))
can_datagpu.from_numpy(can_data)
cal_datagpu = ti.field(dtype=float, shape=(num,h,w))
cal_datagpu.from_numpy(cal_data)
Hcomb=np.zeros((channel,h,w))
H_comb= ti.field(dtype=float, shape=(channel,h,w))
H_comb.from_numpy(Hcomb)
@ti.func
def ESI(can_img,cal_img,h=0.5):
'''
这里是问题2,目前taichi好像不支持数据切片,没办法直接将2个field相减
'''
temp=-tm.pow((can_img-cal_img),2)/(2*h**2)
P=tm.exp(temp)/(tm.sqrt(2*np.pi)*h)
H=-P*tm.log(P)/tm.log(10)
return H
@ti.kernel
def ESIkeneal():
count = 0
for i in range(num):
print(i)
for j in range(num):
H=ESI(can_datagpu,cal_datagpu)
H_comb=H+H_comb
count+=1
H_comb = H_comb/count
start = time.time()
ESIkeneal()
end = time.time()
H_comb_data = H_comb.to_numpy()
np.save('./data/test.npy',H_comb_data)
print(end - start)