Ejemplo n.º 1
0
    def run(self, args):
        # Support native python numerical types (int, float), Ndarray.
        # Taichi Matrix types are flattened into (int, float) arrays.
        # TODO diminish the flatten behavior when Matrix becomes a Taichi native type.
        arg_ptrs = {}
        arg_ints = {}
        arg_floats = {}
        arg_doubles = {}

        for k, v in args.items():
            if isinstance(v, Ndarray):
                arg_ptrs[k] = v.arr
            elif isinstance(v, int):
                arg_ints[k] = v
            elif isinstance(v, float):
                arg_doubles[k] = v
            elif isinstance(v, Matrix):
                mat_val_id = 0
                for a in range(v.n):
                    for b in range(v.m):
                        key = f"{k}_mat_arg_{mat_val_id}"
                        mat_val_id += 1
                        if isinstance(v[a, b], int):
                            arg_ints[key] = int(v[a, b])
                        elif isinstance(v[a, b], float):
                            arg_floats[key] = float(v[a, b])
                        else:
                            raise TaichiRuntimeError(
                                f'Only python int, float are supported as matrix runtime arguments but got {type(v)}'
                            )
            else:
                raise TaichiRuntimeError(
                    f'Only python int, float and ti.Ndarray are supported as runtime arguments but got {type(v)}'
                )
        self._compiled_graph.run(arg_ptrs, arg_ints, arg_floats, arg_doubles)
Ejemplo n.º 2
0
    def get_paddle_callbacks(self, v, has_pp):
        callbacks = []

        def get_call_back(u, v):
            def call_back():
                u.copy_(v, False)

            return call_back

        assert has_pp
        assert isinstance(v, paddle.Tensor)

        tmp = v.value().get_tensor()
        taichi_arch = self.runtime.prog.config.arch

        if v.place.is_gpu_place():
            # External tensor on cuda
            if taichi_arch != _ti_core.Arch.cuda:
                # copy data back to cpu
                host_v = v.cpu()
                tmp = host_v.value().get_tensor()
                callbacks.append(get_call_back(v, host_v))
        elif v.place.is_cpu_place():
            # External tensor on cpu
            if taichi_arch == _ti_core.Arch.cuda:
                gpu_v = v.cuda()
                tmp = gpu_v.value().get_tensor()
                callbacks.append(get_call_back(v, gpu_v))
        else:
            # Paddle do support many other backends like XPU, NPU, MLU, IPU.
            raise TaichiRuntimeError(
                f"Taichi do not support backend {v.place} that Paddle support."
            )

        return tmp, callbacks
Ejemplo n.º 3
0
def ndarray(dtype, shape, layout=Layout.NULL):
    """Defines a Taichi ndarray with scalar elements.

    Args:
        dtype (Union[DataType, MatrixType]): Data type of each element. This can be either a scalar type like ti.f32 or a compound type like ti.types.vector(3, ti.i32).
        shape (Union[int, tuple[int]]): Shape of the ndarray.
        layout (Layout, optional): Layout of ndarray, only applicable when element is non-scalar type. Default is Layout.AOS.

    Example:
        The code below shows how a Taichi ndarray with scalar elements can be declared and defined::

            >>> x = ti.ndarray(ti.f32, shape=(16, 8))  # ndarray of shape (16, 8), each element is ti.f32 scalar.
            >>> vec3 = ti.types.vector(3, ti.i32)
            >>> y = ti.ndarray(vec3, shape=(10, 2))  # ndarray of shape (10, 2), each element is a vector of 3 ti.i32 scalars.
            >>> matrix_ty = ti.types.matrix(3, 4, float)
            >>> z = ti.ndarray(matrix_ty, shape=(4, 5), layout=ti.Layout.SOA)  # ndarray of shape (4, 5), each element is a matrix of (3, 4) ti.float scalars.
    """
    if isinstance(shape, numbers.Number):
        shape = (shape, )
    if dtype in all_types:
        assert layout == Layout.NULL
        return ScalarNdarray(dtype, shape)
    if isinstance(dtype, MatrixType):
        layout = Layout.AOS if layout == Layout.NULL else layout
        return MatrixNdarray(dtype.n, dtype.m, dtype.dtype, shape, layout)

    raise TaichiRuntimeError(
        f'{dtype} is not supported as ndarray element type')
Ejemplo n.º 4
0
Archivo: impl.py Proyecto: k-ye/taichi
def create_field_member(dtype, name, needs_grad, needs_dual):
    dtype = cook_dtype(dtype)

    # primal
    prog = get_runtime().prog
    if prog is None:
        raise TaichiRuntimeError(
            "Cannont create field, maybe you forgot to call `ti.init()` first?"
        )

    x = Expr(prog.make_id_expr(""))
    x.declaration_tb = get_traceback(stacklevel=4)
    x.ptr = _ti_core.global_new(x.ptr, dtype)
    x.ptr.set_name(name)
    x.ptr.set_is_primal(True)
    pytaichi.global_vars.append(x)

    x_grad = None
    x_dual = None
    if _ti_core.is_real(dtype):
        # adjoint
        x_grad = Expr(get_runtime().prog.make_id_expr(""))
        x_grad.declaration_tb = get_traceback(stacklevel=4)
        x_grad.ptr = _ti_core.global_new(x_grad.ptr, dtype)
        x_grad.ptr.set_name(name + ".grad")
        x_grad.ptr.set_is_primal(False)
        x.ptr.set_adjoint(x_grad.ptr)
        if needs_grad:
            pytaichi.grad_vars.append(x_grad)

        # dual
        x_dual = Expr(get_runtime().prog.make_id_expr(""))
        x_dual.ptr = _ti_core.global_new(x_dual.ptr, dtype)
        x_dual.ptr.set_name(name + ".dual")
        x_dual.ptr.set_is_primal(False)
        x.ptr.set_dual(x_dual.ptr)
        if needs_dual:
            pytaichi.dual_vars.append(x_dual)
    elif needs_grad or needs_dual:
        raise TaichiRuntimeError(
            f'{dtype} is not supported for field with `needs_grad=True` or `needs_dual=True`.'
        )

    return x, x_grad, x_dual
Ejemplo n.º 5
0
 def __init__(self, dtype=f32, solver_type="LLT", ordering="AMD"):
     solver_type_list = ["LLT", "LDLT", "LU"]
     solver_ordering = ['AMD', 'COLAMD']
     if solver_type in solver_type_list and ordering in solver_ordering:
         taichi_arch = taichi.lang.impl.get_runtime().prog.config.arch
         assert taichi_arch == _ti_core.Arch.x64 or taichi_arch == _ti_core.Arch.arm64, "SparseSolver only supports CPU for now."
         self.solver = _ti_core.make_sparse_solver(dtype, solver_type,
                                                   ordering)
     else:
         raise TaichiRuntimeError(
             f"The solver type {solver_type} with {ordering} is not supported for now. Only {solver_type_list} with {solver_ordering} are supported."
         )
Ejemplo n.º 6
0
    def build_from_ndarray(self, ndarray):
        """Build the sparse matrix from a ndarray.

        Args:
            ndarray (Union[ti.ndarray, ti.Vector.ndarray, ti.Matrix.ndarray]): the ndarray to build the sparse matrix from.

        Raises:
            TaichiRuntimeError: If the input is not a ndarray or the length is not divisible by 3.

        Example::
            >>> N = 5
            >>> triplets = ti.Vector.ndarray(n=3, dtype=ti.f32, shape=10, layout=ti.Layout.AOS)
            >>> @ti.kernel
            >>> def fill(triplets: ti.types.ndarray()):
            >>>     for i in range(N):
            >>>        triplets[i] = ti.Vector([i, (i + 1) % N, i+1], dt=ti.f32)
            >>> fill(triplets)
            >>> A = ti.linalg.SparseMatrix(n=N, m=N, dtype=ti.f32)
            >>> A.build_from_ndarray(triplets)
            >>> print(A)
            [0, 1, 0, 0, 0]
            [0, 0, 2, 0, 0]
            [0, 0, 0, 3, 0]
            [0, 0, 0, 0, 4]
            [5, 0, 0, 0, 0]
        """
        if isinstance(ndarray, Ndarray):
            num_scalars = reduce(lambda x, y: x * y,
                                 ndarray.shape + ndarray.element_shape)
            if num_scalars % 3 != 0:
                raise TaichiRuntimeError(
                    "The number of ndarray elements must have a length that is divisible by 3."
                )
            get_runtime().prog.make_sparse_matrix_from_ndarray(
                self.matrix, ndarray.arr)
        else:
            raise TaichiRuntimeError(
                'Sparse matrix only supports building from [ti.ndarray, ti.Vector.ndarray, ti.Matrix.ndarray]'
            )
Ejemplo n.º 7
0
def Arg(tag,
        name,
        dtype=None,
        field_dim=0,
        element_shape=(),
        channel_format=None,
        shape=(),
        num_channels=None):
    if isinstance(dtype, MatrixType):
        if len(element_shape) > 0:
            raise TaichiRuntimeError(
                f'Element shape for MatrixType argument "{name}" is not supported.'
            )
        mat_type = dtype
        arg_list = []
        i = 0
        for _ in range(mat_type.n):
            arg_sublist = []
            for _ in range(mat_type.m):
                arg_sublist.append(
                    _ti_core.Arg(tag, f'{name}_mat_arg_{i}', dtype.dtype,
                                 field_dim, element_shape))
                i += 1
            arg_list.append(arg_sublist)
        return arg_list

    if tag == ArgKind.TEXTURE or tag == ArgKind.RWTEXTURE:
        if channel_format is None or len(shape) == 0 or num_channels is None:
            raise TaichiRuntimeError(
                'channel_format, num_channels and shape arguments are required for texture arguments'
            )
        return _ti_core.Arg(tag,
                            name,
                            channel_format=channel_format,
                            num_channels=num_channels,
                            shape=shape)
    return _ti_core.Arg(tag, name, dtype, field_dim, element_shape)
Ejemplo n.º 8
0
    def solve(self, b):  # pylint: disable=R1710
        """Computes the solution of the linear systems.
        Args:
            b (numpy.array or Field): The right-hand side of the linear systems.

        Returns:
            numpy.array: The solution of linear systems.
        """
        if isinstance(b, Field):
            return self.solver.solve(b.to_numpy())
        if isinstance(b, np.ndarray):
            return self.solver.solve(b)
        raise TaichiRuntimeError(
            f"The parameter type: {type(b)} is not supported in linear solvers for now."
        )
Ejemplo n.º 9
0
    def run(self, args):
        arg_ptrs = {}
        # Only support native python numerical types (int, float) for now.
        arg_ints = {}
        arg_floats = {}

        for k, v in args.items():
            if isinstance(v, Ndarray):
                arg_ptrs[k] = v.arr
            elif isinstance(v, int):
                arg_ints[k] = v
            elif isinstance(v, float):
                arg_floats[k] = v
            else:
                raise TaichiRuntimeError(
                    'Only python int, float and ti.Ndarray are supported as runtime arguments'
                )
        self._compiled_graph.run(arg_ptrs, arg_ints, arg_floats)
Ejemplo n.º 10
0
def Arg(tag, name, dtype, element_shape=()):
    if isinstance(dtype, MatrixType):
        if len(element_shape) > 0:
            raise TaichiRuntimeError(
                f'Element shape for MatrixType argument "{name}" is not supported.'
            )
        mat_type = dtype
        arg_list = []
        i = 0
        for _ in range(mat_type.n):
            arg_sublist = []
            for _ in range(mat_type.m):
                arg_sublist.append(
                    _ti_core.Arg(tag, f'{name}_mat_arg_{i}', dtype.dtype,
                                 element_shape))
                i += 1
            arg_list.append(arg_sublist)
        return arg_list

    return _ti_core.Arg(tag, name, dtype, element_shape)
Ejemplo n.º 11
0
    def __matmul__(self, other):
        """Matrix multiplication.

        Args:
            other (SparseMatrix, Field, or numpy.array): the other sparse matrix of the multiplication.
        Returns:
            The result of matrix multiplication.
        """
        if isinstance(other, SparseMatrix):
            assert self.m == other.n, f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})"
            sm = self.matrix.matmul(other.matrix)
            return SparseMatrix(sm=sm)
        if isinstance(other, Field):
            assert self.m == other.shape[
                0], f"Dimension mismatch between sparse matrix ({self.n}, {self.m}) and vector ({other.shape})"
            return self.matrix.mat_vec_mul(other.to_numpy())
        if isinstance(other, np.ndarray):
            assert self.m == other.shape[
                0], f"Dimension mismatch between sparse matrix ({self.n}, {self.m}) and vector ({other.shape})"
            return self.matrix.mat_vec_mul(other)
        raise TaichiRuntimeError(
            f"Sparse matrix-matrix/vector multiplication does not support {type(other)} for now. Supported types are SparseMatrix, ti.field, and numpy ndarray."
        )
Ejemplo n.º 12
0
 def run(self, args):
     # Support native python numerical types (int, float), Ndarray.
     # Taichi Matrix types are flattened into (int, float) arrays.
     # TODO diminish the flatten behavior when Matrix becomes a Taichi native type.
     flattened = {}
     for k, v in args.items():
         if isinstance(v, Ndarray):
             flattened[k] = v.arr
         elif isinstance(v, Texture):
             flattened[k] = v.tex
         elif isinstance(v, Matrix):
             mat_val_id = 0
             for a in range(v.n):
                 for b in range(v.m):
                     key = f"{k}_mat_arg_{mat_val_id}"
                     mat_val_id += 1
                     flattened[key] = v[a, b]
         elif isinstance(v, (int, float)):
             flattened[k] = v
         else:
             raise TaichiRuntimeError(
                 f'Only python int, float, ti.Matrix and ti.Ndarray are supported as runtime arguments but got {type(v)}'
             )
     self._compiled_graph.run(flattened)
Ejemplo n.º 13
0
 def _check_not_finalized(self):
     if self._finalized:
         raise TaichiRuntimeError('FieldsBuilder finalized')
Ejemplo n.º 14
0
 def _type_assert(sparse_matrix):
     raise TaichiRuntimeError(
         f"The parameter type: {type(sparse_matrix)} is not supported in linear solvers for now."
     )
Ejemplo n.º 15
0
        def func__(*args):
            assert len(args) == len(
                self.argument_annotations
            ), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided'

            tmps = []
            callbacks = []
            has_external_arrays = False
            has_torch = has_pytorch()

            actual_argument_slot = 0
            launch_ctx = t_kernel.make_launch_context()
            for i, v in enumerate(args):
                needed = self.argument_annotations[i]
                if isinstance(needed, template):
                    continue
                provided = type(v)
                # Note: do not use sth like "needed == f32". That would be slow.
                if id(needed) in primitive_types.real_type_ids:
                    if not isinstance(v, (float, int)):
                        raise TaichiRuntimeTypeError.get(
                            i, needed.to_string(), provided)
                    launch_ctx.set_arg_float(actual_argument_slot, float(v))
                elif id(needed) in primitive_types.integer_type_ids:
                    if not isinstance(v, int):
                        raise TaichiRuntimeTypeError.get(
                            i, needed.to_string(), provided)
                    launch_ctx.set_arg_int(actual_argument_slot, int(v))
                elif isinstance(needed, sparse_matrix_builder):
                    # Pass only the base pointer of the ti.types.sparse_matrix_builder() argument
                    launch_ctx.set_arg_int(actual_argument_slot, v._get_addr())
                elif isinstance(needed,
                                ndarray_type.NdarrayType) and isinstance(
                                    v, taichi.lang._ndarray.Ndarray):
                    has_external_arrays = True
                    v = v.arr
                    launch_ctx.set_arg_ndarray(actual_argument_slot, v)
                elif isinstance(
                        needed,
                        ndarray_type.NdarrayType) and (self.match_ext_arr(v)):
                    has_external_arrays = True
                    is_numpy = isinstance(v, np.ndarray)
                    if is_numpy:
                        tmp = np.ascontiguousarray(v)
                        # Purpose: DO NOT GC |tmp|!
                        tmps.append(tmp)
                        launch_ctx.set_arg_external_array_with_shape(
                            actual_argument_slot, int(tmp.ctypes.data),
                            tmp.nbytes, v.shape)
                    else:
                        is_ndarray = False
                        tmp, torch_callbacks = self.get_torch_callbacks(
                            v, has_torch, is_ndarray)
                        callbacks += torch_callbacks
                        launch_ctx.set_arg_external_array_with_shape(
                            actual_argument_slot, int(tmp.data_ptr()),
                            tmp.element_size() * tmp.nelement(), v.shape)

                elif isinstance(needed, MatrixType):
                    if id(needed.dtype) in primitive_types.real_type_ids:
                        for a in range(needed.n):
                            for b in range(needed.m):
                                if not isinstance(v[a, b], (int, float)):
                                    raise TaichiRuntimeTypeError.get(
                                        i, needed.dtype.to_string(),
                                        type(v[a, b]))
                                launch_ctx.set_arg_float(
                                    actual_argument_slot, float(v[a, b]))
                                actual_argument_slot += 1
                    elif id(needed.dtype) in primitive_types.integer_type_ids:
                        for a in range(needed.n):
                            for b in range(needed.m):
                                if not isinstance(v[a, b], int):
                                    raise TaichiRuntimeTypeError.get(
                                        i, needed.dtype.to_string(),
                                        type(v[a, b]))
                                launch_ctx.set_arg_int(actual_argument_slot,
                                                       int(v[a, b]))
                                actual_argument_slot += 1
                    else:
                        raise ValueError(
                            f'Matrix dtype {needed.dtype} is not integer type or real type.'
                        )
                    continue
                else:
                    raise ValueError(
                        f'Argument type mismatch. Expecting {needed}, got {type(v)}.'
                    )
                actual_argument_slot += 1
            # Both the class kernels and the plain-function kernels are unified now.
            # In both cases, |self.grad| is another Kernel instance that computes the
            # gradient. For class kernels, args[0] is always the kernel owner.
            if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced:
                self.runtime.target_tape.insert(self, args)

            if actual_argument_slot > 8 and (
                    impl.current_cfg().arch == _ti_core.opengl
                    or impl.current_cfg().arch == _ti_core.cc):
                raise TaichiRuntimeError(
                    f"The number of elements in kernel arguments is too big! Do not exceed 8 on {_ti_core.arch_name(impl.current_cfg().arch)} backend."
                )

            if actual_argument_slot > 64 and (
                (impl.current_cfg().arch != _ti_core.opengl
                 and impl.current_cfg().arch != _ti_core.cc)):
                raise TaichiRuntimeError(
                    f"The number of elements in kernel arguments is too big! Do not exceed 64 on {_ti_core.arch_name(impl.current_cfg().arch)} backend."
                )

            try:
                t_kernel(launch_ctx)
            except Exception as e:
                e = handle_exception_from_cpp(e)
                raise e from None

            ret = None
            ret_dt = self.return_type
            has_ret = ret_dt is not None

            if has_ret or (impl.current_cfg().async_mode
                           and has_external_arrays):
                runtime_ops.sync()

            if has_ret:
                if id(ret_dt) in primitive_types.integer_type_ids:
                    ret = t_kernel.get_ret_int(0)
                elif id(ret_dt) in primitive_types.real_type_ids:
                    ret = t_kernel.get_ret_float(0)
                elif id(ret_dt.dtype) in primitive_types.integer_type_ids:
                    it = iter(t_kernel.get_ret_int_tensor(0))
                    ret = Matrix([[next(it) for _ in range(ret_dt.m)]
                                  for _ in range(ret_dt.n)])
                else:
                    it = iter(t_kernel.get_ret_float_tensor(0))
                    ret = Matrix([[next(it) for _ in range(ret_dt.m)]
                                  for _ in range(ret_dt.n)])
            if callbacks:
                for c in callbacks:
                    c()

            return ret
Ejemplo n.º 16
0
 def __getattr__(self, item):
     if item == '__qualname__':
         # For sphinx docstring extraction.
         return '_UninitializedRootFieldsBuilder'
     raise TaichiRuntimeError('Please call init() first')
Ejemplo n.º 17
0
 def destroy(self):
     if self.destroyed:
         raise TaichiRuntimeError('SNode tree has been destroyed')
     self.ptr.destroy_snode_tree(impl.get_runtime().prog)
     self.destroyed = True
Ejemplo n.º 18
0
 def id(self):
     if self.destroyed:
         raise TaichiRuntimeError('SNode tree has been destroyed')
     return self.ptr.id()