def decl_sparse_matrix(dtype): value_type = cook_dtype(dtype) ptr_type = cook_dtype(u64) # Treat the sparse matrix argument as a scalar since we only need to pass in the base pointer arg_id = impl.get_runtime().prog.decl_arg(ptr_type, False) return SparseMatrixProxy( _ti_core.make_arg_load_expr(arg_id, ptr_type, False), value_type)
def __init__(self, dtype, shape): self.host_accessor = None if impl.current_cfg().ndarray_use_torch: assert has_pytorch( ), "PyTorch must be available if you want to create a Taichi ndarray with PyTorch as its underlying storage." self.arr = torch.zeros(shape, dtype=to_pytorch_type(cook_dtype(dtype))) if impl.current_cfg().arch == _ti_core.Arch.cuda: self.arr = self.arr.cuda() else: self.arr = _ti_core.Ndarray(impl.get_runtime().prog, cook_dtype(dtype), shape)
def cast(obj, dtype): """Copy and cast a scalar or a matrix to a specified data type. Must be called in Taichi scope. Args: obj (Union[:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`]): \ Input scalar or matrix. dtype (:mod:`~taichi.types.primitive_types`): A primitive type defined in :mod:`~taichi.types.primitive_types`. Returns: A copy of `obj`, casted to the specified data type `dtype`. Example:: >>> @ti.kernel >>> def test(): >>> x = ti.Matrix([0, 1, 2], ti.i32) >>> y = ti.cast(x, ti.f32) >>> print(y) >>> >>> test() [0.0, 1.0, 2.0] """ dtype = cook_dtype(dtype) if is_taichi_class(obj): # TODO: unify with element_wise_unary return obj.cast(dtype) return expr.Expr(_ti_core.value_cast(expr.Expr(obj).ptr, dtype))
def cast(obj, dtype): _taichi_skip_traceback = 1 dtype = cook_dtype(dtype) if is_taichi_class(obj): # TODO: unify with element_wise_unary return obj.cast(dtype) return Expr(_ti_core.value_cast(Expr(obj).ptr, dtype))
def bit_cast(obj, dtype): """Copy and cast a scalar to a specified data type with its underlying bits preserved. Must be called in taichi scope. This function is equivalent to `reinterpret_cast` in C++. Args: obj (:mod:`~taichi.types.primitive_types`): Input scalar. dtype (:mod:`~taichi.types.primitive_types`): Target data type, must have \ the same precision bits as the input (hence `f32` -> `f64` is not allowed). Returns: A copy of `obj`, casted to the specified data type `dtype`. Example:: >>> @ti.kernel >>> def test(): >>> x = 3.14 >>> y = ti.bit_cast(x, ti.i32) >>> print(y) # 1078523331 >>> >>> z = ti.bit_cast(y, ti.f32) >>> print(z) # 3.14 """ dtype = cook_dtype(dtype) if is_taichi_class(obj): raise ValueError('Cannot apply bit_cast on Taichi classes') else: return expr.Expr(_ti_core.bits_cast(expr.Expr(obj).ptr, dtype))
def __init__(self, dtype, arr_shape): super().__init__() self.dtype = cook_dtype(dtype) self.arr = impl.get_runtime().prog.create_ndarray( self.dtype, arr_shape) self.shape = tuple(self.arr.shape) self.element_type = dtype
def bit_cast(obj, dtype): _taichi_skip_traceback = 1 dtype = cook_dtype(dtype) if is_taichi_class(obj): raise ValueError('Cannot apply bit_cast on Taichi classes') else: return Expr(_ti_core.bits_cast(Expr(obj).ptr, dtype))
def __init__(self, **kwargs): self.members = {} for k, dtype in kwargs.items(): if isinstance(dtype, CompoundType): self.members[k] = dtype else: self.members[k] = cook_dtype(dtype)
def decl_scalar_arg(dtype): is_ref = False if isinstance(dtype, RefType): is_ref = True dtype = dtype.tp dtype = cook_dtype(dtype) arg_id = impl.get_runtime().prog.decl_arg(dtype, False) return Expr(_ti_core.make_arg_load_expr(arg_id, dtype, is_ref))
def decl_any_arr_arg(dtype, dim, element_shape, layout): dtype = cook_dtype(dtype) element_dim = len(element_shape) arg_id = _ti_core.decl_arr_arg(dtype, dim, element_shape) if layout == Layout.AOS: element_dim = -element_dim return AnyArray( _ti_core.make_external_tensor_expr(dtype, dim, arg_id, element_dim), element_shape, layout)
def decl_ndarray_arg(dtype, dim, element_shape, layout): dtype = cook_dtype(dtype) element_dim = len(element_shape) arg_id = impl.get_runtime().prog.decl_arr_arg(dtype, dim, element_shape) if layout == Layout.AOS: element_dim = -element_dim return AnyArray( _ti_core.make_external_tensor_expr(dtype, dim, arg_id, element_dim, element_shape), element_shape, layout)
def __init__(self, **kwargs): self.members = {} self.methods = {} for k, dtype in kwargs.items(): if k == '__struct_methods': self.methods = dtype elif isinstance(dtype, CompoundType): self.members[k] = dtype else: self.members[k] = cook_dtype(dtype)
def random(dtype=float): """The random function. Args: dtype (DataType): Type of the random variable. Returns: A random variable whose type is `dtype`. """ dtype = cook_dtype(dtype) x = expr.Expr(_ti_core.make_rand_expr(dtype)) return impl.expr_init(x)
def field(dtype, shape=None, offset=None, needs_grad=False): _taichi_skip_traceback = 1 dtype = cook_dtype(dtype) if isinstance(shape, numbers.Number): shape = (shape, ) if isinstance(offset, numbers.Number): offset = (offset, ) if shape is not None and offset is not None: assert len(shape) == len( offset ), f'The dimensionality of shape and offset must be the same ({len(shape)} != {len(offset)})' assert (offset is not None and shape is None ) == False, f'The shape cannot be None when offset is being set' if get_runtime().materialized: raise RuntimeError( "No new variables can be declared after materialization, i.e. kernel invocations " "or Python-scope field accesses. I.e., data layouts must be specified before " "any computation. Try appending ti.init() or ti.reset() " "right after 'import taichi as ti' if you are using Jupyter notebook or Blender." ) del _taichi_skip_traceback # primal x = Expr(_ti_core.make_id_expr("")) x.declaration_tb = get_traceback(stacklevel=2) x.ptr = _ti_core.global_new(x.ptr, dtype) x.ptr.set_is_primal(True) pytaichi.global_vars.append(x) if _ti_core.needs_grad(dtype): # adjoint x_grad = Expr(_ti_core.make_id_expr("")) x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) x_grad.ptr.set_is_primal(False) x.set_grad(x_grad) if shape is not None: dim = len(shape) root.dense(index_nd(dim), shape).place(x, offset=offset) if needs_grad: root.dense(index_nd(dim), shape).place(x.grad) return x
def create_field_member(dtype, name, needs_grad, needs_dual): dtype = cook_dtype(dtype) # primal prog = get_runtime().prog if prog is None: raise TaichiRuntimeError( "Cannont create field, maybe you forgot to call `ti.init()` first?" ) x = Expr(prog.make_id_expr("")) x.declaration_tb = get_traceback(stacklevel=4) x.ptr = _ti_core.global_new(x.ptr, dtype) x.ptr.set_name(name) x.ptr.set_is_primal(True) pytaichi.global_vars.append(x) x_grad = None x_dual = None if _ti_core.is_real(dtype): # adjoint x_grad = Expr(get_runtime().prog.make_id_expr("")) x_grad.declaration_tb = get_traceback(stacklevel=4) x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) x_grad.ptr.set_name(name + ".grad") x_grad.ptr.set_is_primal(False) x.ptr.set_adjoint(x_grad.ptr) if needs_grad: pytaichi.grad_vars.append(x_grad) # dual x_dual = Expr(get_runtime().prog.make_id_expr("")) x_dual.ptr = _ti_core.global_new(x_dual.ptr, dtype) x_dual.ptr.set_name(name + ".dual") x_dual.ptr.set_is_primal(False) x.ptr.set_dual(x_dual.ptr) if needs_dual: pytaichi.dual_vars.append(x_dual) elif needs_grad or needs_dual: raise TaichiRuntimeError( f'{dtype} is not supported for field with `needs_grad=True` or `needs_dual=True`.' ) return x, x_grad, x_dual
def field(dtype, shape=None, name="", offset=None, needs_grad=False): _taichi_skip_traceback = 1 dtype = cook_dtype(dtype) if isinstance(shape, numbers.Number): shape = (shape, ) if isinstance(offset, numbers.Number): offset = (offset, ) if shape is not None and offset is not None: assert len(shape) == len( offset ), f'The dimensionality of shape and offset must be the same ({len(shape)} != {len(offset)})' assert (offset is not None and shape is None ) == False, f'The shape cannot be None when offset is being set' del _taichi_skip_traceback # primal x = Expr(_ti_core.make_id_expr("")) x.declaration_tb = get_traceback(stacklevel=2) x.ptr = _ti_core.global_new(x.ptr, dtype) x.ptr.set_name(name) x.ptr.set_is_primal(True) pytaichi.global_vars.append(x) if _ti_core.needs_grad(dtype): # adjoint x_grad = Expr(_ti_core.make_id_expr("")) x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) x_grad.ptr.set_name(name + ".grad") x_grad.ptr.set_is_primal(False) x.set_grad(x_grad) if shape is not None: dim = len(shape) root.dense(index_nd(dim), shape).place(x, offset=offset) if needs_grad: root.dense(index_nd(dim), shape).place(x.grad) return x
def create_field_member(dtype, name): dtype = cook_dtype(dtype) # primal x = Expr(_ti_core.make_id_expr("")) x.declaration_tb = get_traceback(stacklevel=4) x.ptr = _ti_core.global_new(x.ptr, dtype) x.ptr.set_name(name) x.ptr.set_is_primal(True) pytaichi.global_vars.append(x) x_grad = None if _ti_core.needs_grad(dtype): # adjoint x_grad = Expr(_ti_core.make_id_expr("")) x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) x_grad.ptr.set_name(name + ".grad") x_grad.ptr.set_is_primal(False) x.ptr.set_grad(x_grad.ptr) return x, x_grad
def random(dtype=float): """Return a single random float/integer according to the specified data type. Must be called in taichi scope. If the required `dtype` is float type, this function returns a random number sampled from the uniform distribution in the half-open interval [0, 1). For integer types this function returns a random integer in the half-open interval [0, 2^32) if a 32-bit integer is required, or a random integer in the half-open interval [0, 2^64) if a 64-bit integer is required. Args: dtype (:mod:`~taichi.types.primitive_types`): Type of the required random value. Returns: A random value with type `dtype`. Example:: >>> @ti.kernel >>> def test(): >>> x = ti.random(float) >>> print(x) # 0.090257 >>> >>> y = ti.random(ti.f64) >>> print(y) # 0.716101627301 >>> >>> i = ti.random(ti.i32) >>> print(i) # -963722261 >>> >>> j = ti.random(ti.i64) >>> print(j) # 73412986184350777 """ dtype = cook_dtype(dtype) x = expr.Expr(_ti_core.make_rand_expr(dtype)) return impl.expr_init(x)
def decl_scalar_ret(dtype): dtype = cook_dtype(dtype) id = taichi_lang_core.decl_ret(dtype) return id
def check_type_match(lhs, rhs): if cook_dtype(lhs) == cook_dtype(rhs): return True return False
def decl_ret(dtype): dtype = cook_dtype(dtype) return _ti_core.decl_ret(dtype)
def random(dtype=float): dtype = cook_dtype(dtype) x = Expr(_ti_core.make_rand_expr(dtype)) return impl.expr_init(x)
def decl_sparse_matrix(): ptr_type = cook_dtype(u64) # Treat the sparse matrix argument as a scalar since we only need to pass in the base pointer arg_id = _ti_core.decl_arg(ptr_type, False) return SparseMatrixProxy(_ti_core.make_arg_load_expr(arg_id, ptr_type))
def decl_scalar_arg(dtype): dtype = cook_dtype(dtype) arg_id = _ti_core.decl_arg(dtype, False) return Expr(_ti_core.make_arg_load_expr(arg_id, dtype))
def decl_scalar_arg(dtype): dtype = cook_dtype(dtype) id = taichi_lang_core.decl_arg(dtype, False) return Expr(taichi_lang_core.make_arg_load_expr(id, dtype))
def bit_cast(obj, dtype): dtype = cook_dtype(dtype) if is_taichi_class(obj): raise ValueError('Cannot apply bit_cast on Taichi classes') else: return expr.Expr(_ti_core.bits_cast(expr.Expr(obj).ptr, dtype))
def cast(obj, dtype): dtype = cook_dtype(dtype) if is_taichi_class(obj): # TODO: unify with element_wise_unary return obj.cast(dtype) return expr.Expr(_ti_core.value_cast(expr.Expr(obj).ptr, dtype))
def decl_ext_arr_arg(dtype, dim): dtype = cook_dtype(dtype) id = taichi_lang_core.decl_arg(dtype, True) return Expr(taichi_lang_core.make_external_tensor_expr(dtype, dim, id))
def decl_ret(dtype): if isinstance(dtype, MatrixType): dtype = _ti_core.decl_tensor_type([dtype.n, dtype.m], dtype.dtype) else: dtype = cook_dtype(dtype) return impl.get_runtime().prog.decl_ret(dtype)
def decl_ext_arr_arg(dtype, dim): dtype = cook_dtype(dtype) arg_id = _ti_core.decl_arg(dtype, True) return Expr(_ti_core.make_external_tensor_expr(dtype, dim, arg_id))