Cannot use x.to_numpy() with ti.tape()

When using x.to_numpy() with ti.tape() kernel crash will happen, even when x is a field with needs_grad = False.
Here is error:
indent preformatted text by 4 spaces

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-89-2fe73624e060> in <module>
     20     x.to_numpy()
     21     compute_y(x,y) # forward
---> 22     y.grad[None] = 3.0 # y.backward(3)s
     23 pre_x.backward(torch.from_numpy(x.grad.to_numpy()))
     24 print('dy/dx =', x.grad.to_numpy())

c:\users\ericc\anaconda3\envs\pytorch_latest_p37\lib\site-packages\taichi\lang\tape.py in __exit__(self, type, value, tb)
     16         self.runtime.target_tape = None
     17         if self.eval_on_exit:
---> 18             self.grad()
     19 
     20     def insert(self, func, args):

c:\users\ericc\anaconda3\envs\pytorch_latest_p37\lib\site-packages\taichi\lang\tape.py in grad(self)
     25         assert self.gradient_evaluated == False, "Gradients of grad can be evaluated only once."
     26         for func, args in reversed(self.calls):
---> 27             func.grad(*args)
     28         self.gradient_evaluated = True

c:\users\ericc\anaconda3\envs\pytorch_latest_p37\lib\site-packages\taichi\lang\shell.py in new_call(*args, **kwargs)
     38     def new_call(*args, **kwargs):
     39         _taichi_skip_traceback = 1
---> 40         ret = old_call(*args, **kwargs)
     41         # print's in kernel won't take effect until ti.sync(), discussion:
     42         # https://github.com/taichi-dev/taichi/pull/1303#discussion_r444897102

c:\users\ericc\anaconda3\envs\pytorch_latest_p37\lib\site-packages\taichi\lang\kernel_impl.py in __call__(self, *args, **kwargs)
    561         assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'
    562         key = self.ensure_compiled(*args)
--> 563         return self.compiled_functions[key](*args)
    564 
    565 

c:\users\ericc\anaconda3\envs\pytorch_latest_p37\lib\site-packages\taichi\lang\kernel_impl.py in func__(*args)
    515                 self.runtime.target_tape.insert(self, args)
    516 
--> 517             t_kernel(launch_ctx)
    518 
    519             ret = None

RuntimeError: [taichi/ir/ir.h:taichi::lang::IRNode::as@225] Assertion failure: is<T>()

Here is code:
indent preformatted text by 4 spaces

import taichi as ti
import numpy as np
import torch
ti.init(arch=ti.cuda)


a = torch.tensor([1.1,1.1],requires_grad=True)
pre_x = None
x = ti.field(float, shape=2, needs_grad=True)
y = ti.field(float, shape=(), needs_grad=True)
tar = np.array([1,1],dtype=np.float)
@ti.kernel
def compute_y(x: ti.template(),y: ti.template()):
    y[None] = (x[0]-tar[0])**2+(x[1]-tar[1])**2

with ti.Tape(y):
    pre_x = a**2 # net(x)
    x[0] = pre_x[0] # transfer
    x[1] = pre_x[1]
    x.to_numpy()
    compute_y(x,y) # forward
    y.grad[None] = 3.0 # y.backward(3)s
pre_x.backward(torch.from_numpy(x.grad.to_numpy()))
print('dy/dx =', x.grad.to_numpy())
print('pre_x =',a.grad)
print('at x =', x[0],x[1])

Hi Ericcsr, welcome and thanks for reporting this!

I don’t think .to_numpy() can be used in an auto-diff process

  1. numpy itself doesn’t support auto-diff
  2. I don’t think there is a .grad() function for .to_numpy()

And I am wondering could I ask If there is a specific reason that you would like to use x.to_numpy() inside an auto-diff process?

The reason I use to_numpy is that I hope to copy taichi field to a torch tensor with in the tape efficiently, so that I can use the intermediate result inside the simulation process to do something else on the torch side.