工作环境是ubuntu20.04,python3.9.12,taichi1.1.4,cuda11.3,llvm 10.0.0,commit 1262a70a
最近在尝试用各种不同的语言来进行布料的可微模拟,但都不成功。所以来求教一下。
物理模型参考了git上 lyd405121 的 OpenClothPy
以下是完整代码:
以下是简化后的代码
import taichi as ti
ti.init(arch=ti.gpu,device_memory_fraction=0.3)
scalar = lambda: ti.field(dtype=ti.f32)
vec = lambda: ti.Vector.field(3, dtype=ti.f32)
pos=vec()
vel=vec()
F=vec()
acc=vec()
Spring_K=scalar()
loss_n=scalar()
mass=scalar()
airdamping=scalar()
ti.root.dense(ti.ijk,(1024,36,36)).place(pos,vel,F,acc)
ti.root.dense(ti.ij,(3,2)).place(Spring_K)
#弹簧的K值一共有struct,shear,bend三种。
ti.root.place(mass,airdamping,loss_n)
ti.root.lazy_grad()
Spring_Date=ti.Vector.field(3,dtype=ti.f32,shape=(36,36,12))
@ti.func
def Compute_Force(t:ti.i32,i:ti.i32,j:ti.i32):
p1=pos[t-1,i,j]
v1=vel[t-1,i,j]
F[t,i,j]=gravity*mass[None]-vel[t-1,i,j]*airdamping[None]
for n in range(0,12):
Spring_Length=Spring_Date[i,j,n][0]
if Spring_Length != 0 :
x=ti.cast(Spring_Date[i,j,n][1],ti.i32)
y=ti.cast(Spring_Date[i,j,n][2],ti.i32)
p2=pos[t-1,x,y]
v2=(p2-pos[t-2,x,y])/dt
dv=v1-v2
dp=p1-p2
dist=dp.norm()
AX=-Spring_K[n//4,0]*(dist-Spring_Length)
BX=-Spring_K[n//4,1]*(dv.dot(dp)/dist)
F[t,i,j] += dp.normalized()*(AX+BX)
@ti.kernel
def simulation(t:ti.i32):
for i,j in ti.ndrange(36,36):
Compute_Force(t,i,j)
if ((i == 0 and j == 0 ) or (i == 35 and j == 0 )):
if z[None] <= 2.0:
z[None] += (4.0/400)
else:
z[None] = 2.0
y=-0.5*z[None]**2+2
x=0.0
if i==0:
x=-2.0
else:
x=2.0
pos[t,i,j]=ti.Vector([x,y,z[None]])
acc[t,i,j]=F[t,i,j]/mass[None]
vel[t,i,j]=(pos[t,i,j]-pos[t-2,i,j])/dt
else:
acc[t,i,j]=F[t,i,j]/mass[None]
ax=pos[t-1,i,j]*2-pos[t-2,i,j]+acc[t,i,j]*dt*dt
if ax.y < 0:
ax.y=0
pos[t,i,j]=ax
vel[t,i,j]=(pos[t,i,j]-pos[t-2,i,j])/dt
@ti.kernel
def Compute_Loss(t:ti.i32):
current=pos[t,18,0]
target=pos[t,18,35]
Loss_Vector=current-target
loss_n[None]=Loss_Vector.norm()
def main():
Reset_Cloth()
Grad_Clear()
for i in range(step):
with ti.Tape(loss=loss_n,clear_gradients=True):
for t in range(2,step):
simulation(t)
Compute_Loss(int(step-1))
print
for i , j in Spring_K:
Spring_K[i,j]-=learning_rate * Spring_K.grad[i,j]