def atomic_xor(a, b): return impl.expr_init( Expr(ti_core.expr_atomic_bit_xor(a.ptr, b.ptr), tb=stack_info()))
def _ternary_operation(taichi_op, python_op, a, b, c): _taichi_skip_traceback = 1 if is_taichi_expr(a) or is_taichi_expr(b) or is_taichi_expr(c): a, b, c = wrap_if_not_expr(a), wrap_if_not_expr(b), wrap_if_not_expr(c) return Expr(taichi_op(a.ptr, b.ptr, c.ptr), tb=stack_info()) return python_op(a, b, c)
def _unary_operation(taichi_op, python_op, a): _taichi_skip_traceback = 1 if is_taichi_expr(a): return Expr(taichi_op(a.ptr), tb=stack_info()) else: return python_op(a)
def expr_python_mod(a, b): # a % b = (a // b) * b - a quotient = Expr(ti_core.expr_floordiv(a, b)) multiply = Expr(ti_core.expr_mul(b, quotient.ptr)) return ti_core.expr_sub(a, multiply.ptr)
def atomic_max(a, b): return impl.expr_init( Expr(_ti_core.expr_atomic_max(a.ptr, b.ptr), tb=stack_info()))
def length(l, indices): return Expr(_ti_core.insert_len(l.snode.ptr, make_expr_group(indices)))
def ti_assert(cond, msg, extra_args): # Mostly a wrapper to help us convert from Expr (defined in Python) to # _ti_core.Expr (defined in C++) _ti_core.create_assert_stmt( Expr(cond).ptr, msg, [Expr(x).ptr for x in extra_args])
def ti_assert(cond, msg, extra_args): # Mostly a wrapper to help us convert from Expr (defined in Python) to # _ti_core.Expr (defined in C++) get_runtime().prog.current_ast_builder().create_assert_stmt( Expr(cond).ptr, msg, [Expr(x).ptr for x in extra_args])
def assume_in_range(val, base, low, high): return _ti_core.expr_assume_in_range( Expr(val).ptr, Expr(base).ptr, low, high)
def entry2content(var): if isinstance(var, str): return var else: return Expr(var).ptr
def clear_gradients(vars: template()): for I in ti.grouped(Expr(vars[0])): for s in ti.static(vars): Expr(s)[I] = 0
def field(dtype, shape=None, name="", offset=None, needs_grad=False): """Defines a Taichi field A Taichi field can be viewed as an abstract N-dimensional array, hiding away the complexity of how its underlying :class:`~taichi.lang.snode.SNode` are actually defined. The data in a Taichi field can be directly accessed by a Taichi :func:`~taichi.lang.kernel_impl.kernel`. See also https://docs.taichi.graphics/docs/lang/articles/basic/field Args: dtype (DataType): data type of the field. shape (Union[int, tuple[int]], optional): shape of the field name (str, optional): name of the field offset (Union[int, tuple[int]], optional): offset of the field domain needs_grad (bool, optional): whether this field participates in autodiff and thus needs an adjoint field to store the gradients. Example: The code below shows how a Taichi field can be declared and defined:: >>> x1 = ti.field(ti.f32, shape=(16, 8)) >>> >>> # Equivalently >>> x2 = ti.field(ti.f32) >>> ti.root.dense(ti.ij, shape=(16, 8)).place(x2) """ _taichi_skip_traceback = 1 dtype = cook_dtype(dtype) if isinstance(shape, numbers.Number): shape = (shape, ) if isinstance(offset, numbers.Number): offset = (offset, ) if shape is not None and offset is not None: assert len(shape) == len( offset ), f'The dimensionality of shape and offset must be the same ({len(shape)} != {len(offset)})' assert (offset is not None and shape is None ) == False, f'The shape cannot be None when offset is being set' del _taichi_skip_traceback # primal x = Expr(_ti_core.make_id_expr("")) x.declaration_tb = get_traceback(stacklevel=2) x.ptr = _ti_core.global_new(x.ptr, dtype) x.ptr.set_name(name) x.ptr.set_is_primal(True) pytaichi.global_vars.append(x) if _ti_core.needs_grad(dtype): # adjoint x_grad = Expr(_ti_core.make_id_expr("")) x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) x_grad.ptr.set_name(name + ".grad") x_grad.ptr.set_is_primal(False) x.set_grad(x_grad) if shape is not None: dim = len(shape) root.dense(index_nd(dim), shape).place(x, offset=offset) if needs_grad: root.dense(index_nd(dim), shape).place(x.grad) return x
def make_constant_expr_i32(val): assert isinstance(val, (int, np.integer)) return Expr( _ti_core.make_const_expr_i32(_clamp_unsigned_to_range(np.int32, val)))
def make_constant_expr_i32(val): assert isinstance(val, (int, np.integer)) return Expr(_ti_core.make_const_expr_int(i32, val))
def make_stride_expr(_var, _indices, shape, stride): return Expr( _ti_core.make_stride_expr(_var, make_expr_group(*_indices), shape, stride))
def entry2content(_var): if isinstance(_var, str): return _var return Expr(_var).ptr
def make_index_expr(_var, _indices): return Expr(_ti_core.make_index_expr(_var, make_expr_group(*_indices)))
def loop_range(self): return Expr(_ti_core.global_var_expr_from_snode(self.ptr))
def create_field_member(dtype, name, needs_grad, needs_dual): dtype = cook_dtype(dtype) # primal prog = get_runtime().prog if prog is None: raise TaichiRuntimeError( "Cannont create field, maybe you forgot to call `ti.init()` first?" ) x = Expr(prog.make_id_expr("")) x.declaration_tb = get_traceback(stacklevel=4) x.ptr = _ti_core.global_new(x.ptr, dtype) x.ptr.set_name(name) x.ptr.set_is_primal(True) pytaichi.global_vars.append(x) x_grad = None x_dual = None if _ti_core.is_real(dtype): # adjoint x_grad = Expr(get_runtime().prog.make_id_expr("")) x_grad.declaration_tb = get_traceback(stacklevel=4) x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype) x_grad.ptr.set_name(name + ".grad") x_grad.ptr.set_is_primal(False) x.ptr.set_adjoint(x_grad.ptr) if needs_grad: pytaichi.grad_vars.append(x_grad) # dual x_dual = Expr(get_runtime().prog.make_id_expr("")) x_dual.ptr = _ti_core.global_new(x_dual.ptr, dtype) x_dual.ptr.set_name(name + ".dual") x_dual.ptr.set_is_primal(False) x.ptr.set_dual(x_dual.ptr) if needs_dual: pytaichi.dual_vars.append(x_dual) elif needs_grad or needs_dual: raise TaichiRuntimeError( f'{dtype} is not supported for field with `needs_grad=True` or `needs_dual=True`.' ) return x, x_grad, x_dual
def is_active(l, indices): return Expr( _ti_core.insert_is_active(l.snode.ptr, make_expr_group(indices)))
def clear_gradients(_vars: template()): for I in grouped(ScalarField(Expr(_vars[0]))): for s in static(_vars): ScalarField(Expr(s))[I] = 0
def decl_scalar_arg(dtype): dtype = cook_dtype(dtype) arg_id = _ti_core.decl_arg(dtype, False) return Expr(_ti_core.make_arg_load_expr(arg_id, dtype))
def wrap_scalar(x): if type(x) in [int, float]: return Expr(x) return x
def random(dtype=float): dtype = cook_dtype(dtype) x = Expr(ti_core.make_rand_expr(dtype)) return impl.expr_init(x)
def subscript(value, *_indices, skip_reordered=False): if isinstance(value, np.ndarray): return value.__getitem__(*_indices) if isinstance(value, (tuple, list, dict)): assert len(_indices) == 1 return value[_indices[0]] flattened_indices = [] for _index in _indices: if is_taichi_class(_index): ind = _index.entries else: ind = [_index] flattened_indices += ind _indices = tuple(flattened_indices) if isinstance(_indices, tuple) and len(_indices) == 1 and _indices[0] is None: _indices = () indices_expr_group = make_expr_group(*_indices) index_dim = indices_expr_group.size() if is_taichi_class(value): return value._subscript(*_indices) if isinstance(value, MeshElementFieldProxy): return value.subscript(*_indices) if isinstance(value, MeshRelationAccessProxy): return value.subscript(*_indices) if isinstance(value, (MeshReorderedScalarFieldProxy, MeshReorderedMatrixFieldProxy)) and not skip_reordered: assert index_dim == 1 reordered_index = tuple([ Expr( _ti_core.get_index_conversion(value.mesh_ptr, value.element_type, Expr(_indices[0]).ptr, ConvType.g2r)) ]) return subscript(value, *reordered_index, skip_reordered=True) if isinstance(value, SparseMatrixProxy): return value.subscript(*_indices) if isinstance(value, Field): _var = value._get_field_members()[0].ptr if _var.snode() is None: if _var.is_primal(): raise RuntimeError( f"{_var.get_expr_name()} has not been placed.") else: raise RuntimeError( f"Gradient {_var.get_expr_name()} has not been placed, check whether `needs_grad=True`" ) field_dim = int(_var.get_attribute("dim")) if field_dim != index_dim: raise IndexError( f'Field with dim {field_dim} accessed with indices of dim {index_dim}' ) if isinstance(value, MatrixField): return _MatrixFieldElement(value, indices_expr_group) if isinstance(value, StructField): return _IntermediateStruct( {k: subscript(v, *_indices) for k, v in value._items}) return Expr(_ti_core.subscript(_var, indices_expr_group)) if isinstance(value, AnyArray): # TODO: deprecate using get_attribute to get dim field_dim = int(value.ptr.get_attribute("dim")) element_dim = len(value.element_shape) if field_dim != index_dim + element_dim: raise IndexError( f'Field with dim {field_dim - element_dim} accessed with indices of dim {index_dim}' ) if element_dim == 0: return Expr(_ti_core.subscript(value.ptr, indices_expr_group)) n = value.element_shape[0] m = 1 if element_dim == 1 else value.element_shape[1] any_array_access = AnyArrayAccess(value, _indices) ret = _IntermediateMatrix(n, m, [ any_array_access.subscript(i, j) for i in range(n) for j in range(m) ]) ret.any_array_access = any_array_access return ret if isinstance(value, SNode): # When reading bit structure we only support the 0-D case for now. field_dim = 0 if field_dim != index_dim: raise IndexError( f'Field with dim {field_dim} accessed with indices of dim {index_dim}' ) return Expr(_ti_core.subscript(value.ptr, indices_expr_group)) # Directly evaluate in Python for non-Taichi types return value.__getitem__(*_indices)
def wrap_if_not_expr(a): _taichi_skip_traceback = 1 return Expr(a) if not is_taichi_expr(a) else a
def make_tensor_element_expr(_var, _indices, shape, stride): return Expr( _ti_core.make_tensor_element_expr(_var, make_expr_group(*_indices), shape, stride))
def append(l, indices, val): a = impl.expr_init( ti_core.insert_append(l.snode.ptr, make_expr_group(indices), Expr(val).ptr)) return a
def decl_scalar_arg(dtype): dtype = cook_dtype(dtype) arg_id = impl.get_runtime().prog.decl_arg(dtype, False) return Expr(_ti_core.make_arg_load_expr(arg_id, dtype))