Exemple #1
0
 def to_torch(self, device=None):
     import torch  # pylint: disable=C0415
     arr = torch.zeros(size=self.shape,
                       dtype=to_pytorch_type(self.dtype),
                       device=device)
     taichi.lang.meta.tensor_to_ext_arr(self, arr)
     ti.sync()
     return arr
Exemple #2
0
 def to_torch(self, device=None):
     import torch
     from taichi.lang.meta import tensor_to_ext_arr
     arr = torch.zeros(size=self.shape,
                       dtype=to_pytorch_type(self.dtype),
                       device=device)
     tensor_to_ext_arr(self, arr)
     ti.sync()
     return arr
Exemple #3
0
    def to_torch(self, device=None):
        import torch  # pylint: disable=C0415

        # pylint: disable=E1101
        arr = torch.zeros(size=self.shape,
                          dtype=to_pytorch_type(self.dtype),
                          device=device)
        from taichi._kernels import tensor_to_ext_arr  # pylint: disable=C0415
        tensor_to_ext_arr(self, arr)
        taichi.lang.runtime_ops.sync()
        return arr
Exemple #4
0
 def to_torch(self, device=None, keep_dims=False):
     import torch
     as_vector = self.m == 1 and not keep_dims
     shape_ext = (self.n, ) if as_vector else (self.n, self.m)
     ret = torch.empty(self.shape + shape_ext,
                       dtype=to_pytorch_type(self.dtype),
                       device=device)
     from taichi.lang.meta import matrix_to_ext_arr
     matrix_to_ext_arr(self, ret, as_vector)
     ti.sync()
     return ret
Exemple #5
0
 def __init__(self, dtype, shape):
     self.host_accessor = None
     if impl.current_cfg().ndarray_use_torch:
         assert has_pytorch(
         ), "PyTorch must be available if you want to create a Taichi ndarray with PyTorch as its underlying storage."
         self.arr = torch.zeros(shape,
                                dtype=to_pytorch_type(cook_dtype(dtype)))
         if impl.current_cfg().arch == _ti_core.Arch.cuda:
             self.arr = self.arr.cuda()
     else:
         self.arr = _ti_core.Ndarray(impl.get_runtime().prog,
                                     cook_dtype(dtype), shape)
Exemple #6
0
 def __init__(self, dtype, shape):
     if isinstance(shape, numbers.Number):
         shape = (shape, )
     assert has_pytorch(
     ), "PyTorch must be available if you want to create a Taichi ndarray."
     import torch
     if impl.current_cfg().arch == _ti_core.Arch.cuda:
         device = 'cuda:0'
     else:
         device = 'cpu'
     self.arr = torch.empty(shape,
                            dtype=to_pytorch_type(dtype),
                            device=device)
Exemple #7
0
    def to_torch(self, device=None):
        """Create a torch array containing the same elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally.

        This is an unified interface to match :func:`taichi.lang.Matrix.to_torch`.

        Args:
            device (DeviceType): The device type as a parameter passed into torch.zeros().

        Returns:
            The torch array containing the same elements when the class itself represents GlobalVariableExpression (field) or ExternalTensorExpression internally.
        """
        import torch
        from taichi.lang.meta import tensor_to_ext_arr
        arr = torch.zeros(size=self.shape,
                          dtype=to_pytorch_type(self.dtype),
                          device=device)
        tensor_to_ext_arr(self, arr)
        ti.sync()
        return arr
Exemple #8
0
    def to_torch(self, device=None, keep_dims=False):
        """Converts `self` to a torch tensor.

        Args:
            device (torch.device, optional): The desired device of returned tensor.
            keep_dims (bool, optional): Whether to keep the dimension after conversion.
                See :meth:`~taichi.lang.field.MatrixField.to_numpy` for more detailed explanation.

        Returns:
            torch.tensor: The result torch tensor.
        """
        import torch
        as_vector = self.m == 1 and not keep_dims
        shape_ext = (self.n, ) if as_vector else (self.n, self.m)
        arr = torch.empty(self.shape + shape_ext,
                          dtype=to_pytorch_type(self.dtype),
                          device=device)
        from taichi.lang.meta import matrix_to_ext_arr
        matrix_to_ext_arr(self, arr, as_vector)
        ti.sync()
        return arr