@ti.kernel def test(): for i in range(n): s = 0 s += 45 print(s) val[i] = s + i print(val[i]) test() for i in range(n): assert val[i] == i + 45 @ti.test(arch=ti.get_host_arch_list()) @ti.must_throw(Exception) def test_loop_var_life(): @ti.kernel def test(): for i in ti.static(range(8)): pass print(i) test() @ti.test(arch=ti.get_host_arch_list()) @ti.must_throw(Exception) def test_loop_var_life_double_iters(): @ti.kernel
def _test_scalar_ndarray(dtype, shape): x = ti.ndarray(dtype, shape) if isinstance(shape, tuple): assert x.shape == shape else: assert x.shape == (shape, ) assert x.element_shape == () assert x.dtype == dtype @pytest.mark.parametrize('dtype', data_types) @pytest.mark.parametrize('shape', ndarray_shapes) @pytest.mark.skipif(not ti.has_pytorch(), reason='Pytorch not installed.') @ti.test(arch=ti.get_host_arch_list()) def test_scalar_ndarray_torch(dtype, shape): _test_scalar_ndarray(dtype, shape) @pytest.mark.parametrize('dtype', data_types) @pytest.mark.parametrize('shape', ndarray_shapes) @ti.test(arch=ti.get_host_arch_list(), ndarray_use_torch=False) def test_scalar_ndarray(dtype, shape): _test_scalar_ndarray(dtype, shape) def _test_vector_ndarray(n, dtype, shape): x = ti.Vector.ndarray(n, dtype, shape) if isinstance(shape, tuple):