Exemple #1
0
    def transposed_stride(self,
                          coords_key: CoordsKey,
                          stride: Union[int, Sequence, np.ndarray,
                                        torch.Tensor],
                          kernel_size: Union[int, Sequence, np.ndarray,
                                             torch.Tensor],
                          dilation: Union[int, Sequence, np.ndarray,
                                          torch.Tensor],
                          force_creation: bool = False):
        assert isinstance(coords_key, CoordsKey)
        stride = convert_to_int_list(stride, self.D)
        kernel_size = convert_to_int_list(kernel_size, self.D)
        dilation = convert_to_int_list(dilation, self.D)
        region_type = 0
        region_offset = torch.IntTensor()

        strided_key = CoordsKey(self.D)
        tensor_stride = coords_key.getTensorStride()
        strided_key.setTensorStride(
            [int(t / s) for t, s in zip(tensor_stride, stride)])

        strided_key.setKey(
            self.CPPCoordsManager.createTransposedStridedRegionCoords(
                coords_key.getKey(), coords_key.getTensorStride(), stride,
                kernel_size, dilation, region_type, region_offset,
                force_creation))
        return strided_key
    def forward(ctx,
                input_features,
                kernel,
                tensor_stride=1,
                stride=1,
                kernel_size=-1,
                dilation=1,
                region_type=0,
                region_offset=None,
                generate_new_coords=False,
                in_coords_key=None,
                out_coords_key=None,
                coords_manager=None):
        """
        region_type=0 HyperCube
        """
        # Prep arguments
        # Kernel shape (n_spatial_kernels, in_nfeat, out_nfeat)
        assert input_features.shape[1] == kernel.shape[1], \
            "The input shape " + str(list(input_features.shape)) + \
            " does not match the kernel shape " + str(list(kernel.shape))
        if out_coords_key is None:
            out_coords_key = CoordsKey(in_coords_key.D)
        assert in_coords_key.D == out_coords_key.D
        assert input_features.type() == kernel.type(), \
            f"Type mismatch input: {input_features.type()} != kernel: {kernel.type()}"
        if not input_features.is_contiguous():
            input_features = input_features.contiguous()

        tensor_stride, stride, kernel_size, dilation, region_type = prep_args(
            tensor_stride, stride, kernel_size, dilation, region_type,
            in_coords_key.D)

        if region_offset is None:
            region_offset = torch.IntTensor()

        ctx.in_feat = input_features
        ctx.kernel = kernel
        ctx = save_ctx(ctx, tensor_stride, stride, kernel_size, dilation,
                       region_type, in_coords_key, out_coords_key,
                       coords_manager)

        D = in_coords_key.D
        out_feat = input_features.new()

        fw_fn = getattr(
            MEB, 'ConvolutionTransposeForward' + get_postfix(input_features))
        fw_fn(ctx.in_feat, out_feat, kernel,
              convert_to_int_list(ctx.tensor_stride, D),
              convert_to_int_list(ctx.stride, D),
              convert_to_int_list(ctx.kernel_size, D),
              convert_to_int_list(ctx.dilation, D), region_type, region_offset,
              ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
              ctx.coords_man.CPPCoordsManager, generate_new_coords)
        return out_feat
Exemple #3
0
 def backward(ctx, grad_out_feat):
     grad_in_feat = grad_out_feat.new()
     D = ctx.in_coords_key.D
     bw_fn = getattr(MEB, 'MaxPoolingBackward' + get_postfix(grad_out_feat))
     bw_fn(D, ctx.in_feat, grad_in_feat, grad_out_feat, ctx.max_index,
           convert_to_int_list(ctx.tensor_stride, D),
           convert_to_int_list(ctx.stride, D),
           convert_to_int_list(ctx.kernel_size, D),
           convert_to_int_list(ctx.dilation, D), ctx.region_type,
           ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
           ctx.coords_man.CPPCoordsManager)
     return grad_in_feat, None, None, None, None, None, None, None, None, None
 def backward(ctx, grad_out_feat):
     grad_in_feat = grad_out_feat.new()
     D = ctx.in_coords_key.D
     bw_fn = get_minkowski_function('PoolingTransposeBackward',
                                    grad_out_feat)
     bw_fn(ctx.in_feat, grad_in_feat, grad_out_feat, ctx.num_nonzero,
           convert_to_int_list(ctx.tensor_stride, D),
           convert_to_int_list(ctx.stride, D),
           convert_to_int_list(ctx.kernel_size, D),
           convert_to_int_list(ctx.dilation, D), ctx.region_type,
           ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
           ctx.coords_man.CPPCoordsManager)
     return grad_in_feat, None, None, None, None, None, None, None, None, None, None
    def get_kernel_map(self,
                       in_key_or_tensor_strides,
                       out_key_or_tensor_strides,
                       stride=1,
                       kernel_size=3,
                       dilation=1,
                       region_type=0,
                       region_offset=None,
                       is_transpose=False,
                       is_pool=False,
                       on_gpu=False):
        r"""Get kernel in-out maps for the specified coords keys or tensor strides.

        """
        # region type 1 iteration with kernel_size 1 is invalid
        assert kernel_size > 0, "Invalid kernel size."
        if kernel_size == 1:
            region_type = 0

        if isinstance(in_key_or_tensor_strides, CoordsKey):
            in_tensor_strides = in_key_or_tensor_strides.getTensorStride()
        else:
            in_tensor_strides = in_key_or_tensor_strides
        if region_offset is None:
            region_offset = torch.IntTensor()

        in_coords_key = self._get_coords_key(in_key_or_tensor_strides)
        out_coords_key = self._get_coords_key(out_key_or_tensor_strides)

        tensor_strides = convert_to_int_tensor(in_tensor_strides, self.D)
        strides = convert_to_int_tensor(stride, self.D)
        kernel_sizes = convert_to_int_tensor(kernel_size, self.D)
        dilations = convert_to_int_tensor(dilation, self.D)
        D = in_coords_key.D
        tensor_strides, strides, kernel_sizes, dilations, region_type = prep_args(
            tensor_strides, strides, kernel_sizes, dilations, region_type, D)
        kernel_map_fn = self.CPPCoordsManager.getKernelMapGPU \
            if on_gpu else self.CPPCoordsManager.getKernelMapGPU
        kernel_map = kernel_map_fn(
            convert_to_int_list(tensor_strides, D),  #
            convert_to_int_list(strides, D),  #
            convert_to_int_list(kernel_sizes, D),  #
            convert_to_int_list(dilations, D),  #
            region_type,
            region_offset,
            in_coords_key.CPPCoordsKey,
            out_coords_key.CPPCoordsKey,
            is_transpose,
            is_pool)

        return kernel_map
    def backward(ctx, grad_out_feat):
        if not grad_out_feat.is_contiguous():
            grad_out_feat = grad_out_feat.contiguous()

        grad_in_feat = grad_out_feat.new()
        D = ctx.in_coords_key.D
        bw_fn = getattr(MEB, 'AvgPoolingBackward' + get_postfix(grad_out_feat))
        bw_fn(ctx.in_feat, grad_in_feat, grad_out_feat, ctx.num_nonzero,
              convert_to_int_list(ctx.tensor_stride, D),
              convert_to_int_list(ctx.stride, D),
              convert_to_int_list(ctx.kernel_size, D),
              convert_to_int_list(ctx.dilation, D), ctx.region_type,
              ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
              ctx.coords_man.CPPCoordsManager, ctx.use_avg)
        return grad_in_feat, None, None, None, None, None, None, None, None, None, None
 def backward(ctx, grad_out_feat):
     assert grad_out_feat.type() == ctx.in_feat.type()
     grad_in_feat = grad_out_feat.new()
     grad_kernel = grad_out_feat.new()
     D = ctx.in_coords_key.D
     bw_fn = getattr(MEB,
                     'ConvolutionBackward' + get_postfix(grad_out_feat))
     bw_fn(D, ctx.in_feat, grad_in_feat, grad_out_feat, ctx.kernel,
           grad_kernel, convert_to_int_list(ctx.tensor_stride, D),
           convert_to_int_list(ctx.stride, D),
           convert_to_int_list(ctx.kernel_size, D),
           convert_to_int_list(ctx.dilation, D), ctx.region_type,
           ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
           ctx.coords_man.CPPCoordsManager)
     return grad_in_feat, grad_kernel, None, None, None, None, None, None, None, None, None
    def backward(ctx, grad_out_feat):
        if not grad_out_feat.is_contiguous():
            grad_out_feat = grad_out_feat.contiguous()

        grad_in_feat = grad_out_feat.new()
        D = ctx.in_coords_key.D
        bw_fn = get_minkowski_function('MaxPoolingBackward', grad_out_feat)
        bw_fn(ctx.in_feat, grad_in_feat, grad_out_feat, ctx.max_index,
              convert_to_int_list(ctx.tensor_stride, D),
              convert_to_int_list(ctx.stride, D),
              convert_to_int_list(ctx.kernel_size, D),
              convert_to_int_list(ctx.dilation, D), ctx.region_type,
              ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
              ctx.coords_man.CPPCoordsManager)
        return grad_in_feat, None, None, None, None, None, None, None, None, None
    def forward(ctx,
                input_features,
                kernel,
                tensor_stride=1,
                stride=1,
                kernel_size=-1,
                dilation=1,
                region_type=0,
                region_offset=None,
                in_coords_key=None,
                out_coords_key=None,
                coords_manager=None):
        """
        region_type=0 HyperCube
        """
        # Prep arguments
        # Kernel shape (n_spatial_kernels, in_nfeat, out_nfeat)
        assert input_features.shape[1] == kernel.shape[1]
        if out_coords_key is None:
            out_coords_key = CoordsKey(in_coords_key.D)
        assert in_coords_key.D == out_coords_key.D
        assert input_features.type() == kernel.type()
        tensor_stride, stride, kernel_size, dilation, region_type = prep_args(
            tensor_stride, stride, kernel_size, dilation, region_type,
            in_coords_key.D)

        if region_offset is None:
            region_offset = torch.IntTensor()

        ctx.in_feat = input_features
        ctx.kernel = kernel
        ctx = save_ctx(ctx, tensor_stride, stride, kernel_size, dilation,
                       region_type, in_coords_key, out_coords_key,
                       coords_manager)

        D = in_coords_key.D
        out_feat = input_features.new()

        fw_fn = getattr(MEB,
                        'ConvolutionForward' + get_postfix(input_features))
        fw_fn(D, ctx.in_feat, out_feat, kernel,
              convert_to_int_list(ctx.tensor_stride, D),
              convert_to_int_list(ctx.stride, D),
              convert_to_int_list(ctx.kernel_size, D),
              convert_to_int_list(ctx.dilation, D), region_type, region_offset,
              ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
              ctx.coords_man.CPPCoordsManager)
        return out_feat
Exemple #10
0
 def get_coords_key(self, tensor_strides):
     tensor_strides = convert_to_int_list(tensor_strides, self.D)
     key = self.CPPCoordsManager.getCoordsKey(tensor_strides)
     coords_key = CoordsKey(self.D)
     coords_key.setKey(key)
     coords_key.setTensorStride(tensor_strides)
     return coords_key
    def forward(ctx,
                input_features,
                tensor_stride=1,
                stride=1,
                kernel_size=-1,
                dilation=1,
                region_type=0,
                region_offset=None,
                in_coords_key=None,
                out_coords_key=None,
                coords_manager=None):
        assert isinstance(region_type, RegionType)
        if out_coords_key is None:
            out_coords_key = CoordsKey(in_coords_key.D)
        assert in_coords_key.D == out_coords_key.D
        if not input_features.is_contiguous():
            input_features = input_features.contiguous()

        tensor_stride, stride, kernel_size, dilation, region_type = prep_args(
            tensor_stride, stride, kernel_size, dilation, region_type,
            in_coords_key.D)

        if region_offset is None:
            region_offset = torch.IntTensor()

        ctx.in_feat = input_features
        ctx = save_ctx(ctx, tensor_stride, stride, kernel_size, dilation,
                       region_type, in_coords_key, out_coords_key,
                       coords_manager)

        D = in_coords_key.D
        out_feat = input_features.new()
        max_index = input_features.new().int()

        ctx.max_index = max_index

        fw_fn = getattr(MEB, 'MaxPoolingForward' + get_postfix(input_features))
        fw_fn(input_features, out_feat, max_index,
              convert_to_int_list(ctx.tensor_stride, D),
              convert_to_int_list(ctx.stride, D),
              convert_to_int_list(ctx.kernel_size, D),
              convert_to_int_list(ctx.dilation, D), region_type, region_offset,
              ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
              ctx.coords_man.CPPCoordsManager)
        return out_feat
    def forward(ctx,
                input_features,
                tensor_stride=1,
                stride=1,
                kernel_size=-1,
                dilation=1,
                region_type=0,
                region_offset=None,
                average=True,
                in_coords_key=None,
                out_coords_key=None,
                coords_manager=None):
        assert isinstance(region_type, RegionType)
        if out_coords_key is None:
            out_coords_key = CoordsKey(in_coords_key.D)
        assert in_coords_key.D == out_coords_key.D
        if not input_features.is_contiguous():
            input_features = input_features.contiguous()

        tensor_stride, stride, kernel_size, dilation, region_type = prep_args(
            tensor_stride, stride, kernel_size, dilation, region_type,
            in_coords_key.D)

        if region_offset is None:
            region_offset = torch.IntTensor()

        ctx.in_feat = input_features
        ctx = save_ctx(ctx, tensor_stride, stride, kernel_size, dilation,
                       region_type, in_coords_key, out_coords_key,
                       coords_manager)
        ctx.use_avg = average

        D = in_coords_key.D
        out_feat = input_features.new()
        ctx.num_nonzero = input_features.new()

        fw_fn = get_minkowski_function('AvgPoolingForward', input_features)
        fw_fn(ctx.in_feat, out_feat, ctx.num_nonzero,
              convert_to_int_list(ctx.tensor_stride, D),
              convert_to_int_list(ctx.stride, D),
              convert_to_int_list(ctx.kernel_size, D),
              convert_to_int_list(ctx.dilation, D), region_type, region_offset,
              ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
              ctx.coords_man.CPPCoordsManager, ctx.use_avg)
        return out_feat
    def get_kernel_map(self,
                       in_key_or_tensor_strides,
                       out_key_or_tensor_strides,
                       stride=1,
                       kernel_size=3,
                       dilation=1,
                       region_type=0,
                       is_transpose=False,
                       is_pool=False):
        r"""Get kernel in-out maps for the specified coords keys or tensor strides.

        """

        if isinstance(in_key_or_tensor_strides, CoordsKey):
            in_tensor_strides = in_key_or_tensor_strides.getTensorStride()
        else:
            in_tensor_strides = in_key_or_tensor_strides

        in_coords_key = self._get_coords_key(in_key_or_tensor_strides)
        out_coords_key = self._get_coords_key(out_key_or_tensor_strides)

        tensor_strides = convert_to_int_tensor(in_tensor_strides, self.D)
        strides = convert_to_int_tensor(stride, self.D)
        kernel_sizes = convert_to_int_tensor(kernel_size, self.D)
        dilations = convert_to_int_tensor(dilation, self.D)
        D = in_coords_key.D
        tensor_strides, strides, kernel_sizes, dilations, region_type = prep_args(
            tensor_strides, strides, kernel_sizes, dilations, region_type, D)

        kernel_map = self.CPPCoordsManager.getKernelMap(
            convert_to_int_list(tensor_strides, D),  #
            convert_to_int_list(strides, D),  #
            convert_to_int_list(kernel_sizes, D),  #
            convert_to_int_list(dilations, D),  #
            region_type,
            in_coords_key.CPPCoordsKey,
            out_coords_key.CPPCoordsKey,
            is_transpose,
            is_pool)

        return kernel_map
    def forward(ctx,
                input_features,
                tensor_stride=1,
                stride=1,
                kernel_size=-1,
                dilation=1,
                region_type=-1,
                region_offset=None,
                average=False,
                in_coords_key=None,
                out_coords_key=None,
                coords_manager=None):
        assert isinstance(region_type, RegionType)
        if out_coords_key is None:
            out_coords_key = CoordsKey(in_coords_key.D)
        assert in_coords_key.D == out_coords_key.D
        tensor_stride, stride, kernel_size, dilation, region_type = prep_args(
            tensor_stride, stride, kernel_size, dilation, region_type,
            in_coords_key.D)

        if region_offset is None:
            region_offset = torch.IntTensor()

        ctx.in_feat = input_features
        out_feat = input_features.new()
        ctx.num_nonzero = input_features.new()
        ctx = save_ctx(ctx, tensor_stride, stride, kernel_size, dilation,
                       region_type, in_coords_key, out_coords_key,
                       coords_manager)
        D = in_coords_key.D
        fw_fn = getattr(
            MEB, 'PoolingTransposeForward' + get_postfix(input_features))
        fw_fn(ctx.in_feat, out_feat, ctx.num_nonzero,
              convert_to_int_list(ctx.tensor_stride, D),
              convert_to_int_list(ctx.stride, D),
              convert_to_int_list(ctx.kernel_size, D),
              convert_to_int_list(ctx.dilation, D), region_type, region_offset,
              ctx.in_coords_key.CPPCoordsKey, ctx.out_coords_key.CPPCoordsKey,
              ctx.coords_man.CPPCoordsManager)
        return out_feat
Exemple #15
0
    def get_kernel_map_by_key(self,
                              in_coords_key,
                              out_coords_key,
                              tensor_strides=1,
                              stride=1,
                              kernel_size=3,
                              dilation=1,
                              region_type=0,
                              is_transpose=False):
        tensor_strides = convert_to_int_list(tensor_strides, self.D)
        strides = convert_to_int_list(stride, self.D)
        kernel_sizes = convert_to_int_list(kernel_size, self.D)
        dilations = convert_to_int_list(dilation, self.D)

        kernel_map = torch.IntTensor()
        self.CPPCoordsManager.getKernelMap(kernel_map, tensor_strides, strides,
                                           kernel_sizes, dilations,
                                           region_type,
                                           in_coords_key.CPPCoordsKey,
                                           out_coords_key.CPPCoordsKey,
                                           is_transpose)
        return kernel_map
Exemple #16
0
 def _get_coords_key(self, key_or_tensor_strides):
     assert isinstance(key_or_tensor_strides, CoordsKey) or \
         isinstance(key_or_tensor_strides, (Sequence, np.ndarray, torch.IntTensor, int)), \
         f"The input must be either a CoordsKey or tensor_stride of type (int, list, tuple, array, Tensor). Invalid: {key_or_tensor_strides}"
     if isinstance(key_or_tensor_strides, CoordsKey):
         # Do nothing and return the input
         return key_or_tensor_strides
     else:
         tensor_strides = convert_to_int_list(key_or_tensor_strides, self.D)
         key = self.CPPCoordsManager.getCoordsKey(tensor_strides)
         coords_key = CoordsKey(self.D)
         coords_key.setKey(key)
         coords_key.setTensorStride(tensor_strides)
         return coords_key
    def stride(self,
               coords_key: CoordsKey,
               stride: Union[int, Sequence, np.ndarray, torch.Tensor],
               force_creation: bool = False):
        assert isinstance(coords_key, CoordsKey)
        stride = convert_to_int_list(stride, self.D)

        strided_key = CoordsKey(self.D)
        tensor_stride = coords_key.getTensorStride()
        strided_key.setTensorStride(
            [t * s for t, s in zip(tensor_stride, stride)])

        strided_key.setKey(
            self.CPPCoordsManager.createStridedCoords(coords_key.getKey(),
                                                      tensor_stride, stride,
                                                      force_creation))
        return strided_key
    def create_coords_key(self,
                          coords: torch.IntTensor,
                          tensor_stride: int = 1,
                          force_creation: bool = False,
                          force_remap: bool = False,
                          allow_duplicate_coords: bool = False) -> CoordsKey:
        coords_key = CoordsKey(self.D)
        coords_key.setTensorStride(tensor_stride)
        mapping = self.initialize(coords,
                                  coords_key,
                                  force_creation=True,
                                  force_remap=True,
                                  allow_duplicate_coords=True)
        # Set the tensor stride
        tensor_stride = convert_to_int_list(tensor_stride, self.D)
        coords_key.setTensorStride(tensor_stride)

        return coords_key
Exemple #19
0
 def tensor_stride(self, p):
     r"""
     This function is not recommended to be used directly.
     """
     p = convert_to_int_list(p, self.D)
     self.coords_key.setTensorStride(p)
Exemple #20
0
 def set_tensor_stride(self, s):
     ss = convert_to_int_list(s)
     tensor_strides = self.coords_key.getTensorStride()
     self.coords_key.setTensorStride(
         [s * p for s, p in zip(ss, tensor_strides)])
Exemple #21
0
 def reduce(self):
     origin_key = CoordsKey(self.D)
     origin_key.setTensorStride(convert_to_int_list(0, self.D))
     origin_key.setKey(self.CPPCoordsManager.createOriginCoords(self.D))
     return origin_key
Exemple #22
0
    def __init__(self,
                 feats,
                 coords=None,
                 coords_key=None,
                 coords_manager=None,
                 force_creation=False,
                 allow_duplicate_coords=False,
                 tensor_stride=1):
        r"""

        Args:
            :attr:`feats` (:attr:`torch.FloatTensor`,
            :attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or
            :attr:`torch.cuda.DoubleTensor`): The features of the sparse
            tensor.

            :attr:`coords` (:attr:`torch.IntTensor`): The coordinates
            associated to the features. If not provided, :attr:`coords_key`
            must be provided.

            :attr:`coords_key` (:attr:`MinkowskiEngine.CoordsKey`): When the
            coordinates are already cached in the MinkowskiEngine, we could
            reuse the same coordinates by simply providing the coordinate hash
            key. In most case, this process is done automatically. If you
            provide one, make sure you understand what you are doing.

            :attr:`coords_manager` (:attr:`MinkowskiEngine.CoordsManager`): The
            MinkowskiEngine creates a dynamic computation graph and all
            coordinates inside the same computation graph are managed by a
            CoordsManager object. If not provided, the MinkowskiEngine will
            create a new computation graph. In most cases, this process is
            handled automatically and you do not need to use this. When you use
            it, make sure you understand what you are doing.

            :attr:`force_creation` (:attr:`bool`): Force creation of the
            coordinates. This allows generating a new set of coordinates even
            when there exists another set of coordinates with the same
            tensor stride. This could happen when you manually feed the same
            :attr:`coords_manager`.

            :attr:`allow_duplicate_coords` (:attr:`bool`): Allow duplicate
            coordinates when creating the sparse tensor. Internally, it will
            generate a new unique set of coordinates and use features of at the
            corresponding unique coordinates. In general, setting
            `allow_duplicate_coords=True` is not recommended as it could hide
            obvious errors in your data loading and preprocessing steps. Please
            refer to the quantization and data loading tutorial on `here
            <https://stanfordvl.github.io/MinkowskiEngine/demo/training.html>`_
            for more details.

            :attr:`tensor_stride` (:attr:`int`, :attr:`list`,
            :attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
            of the current sparse tensor. By default, it is 1.

        """
        assert isinstance(feats,
                          torch.Tensor), "Features must be a torch.Tensor"

        if coords is None and coords_key is None:
            raise ValueError('Either coords or coords_key must be provided')

        if coords_key is None:
            assert coords_manager is not None or coords is not None
            D = -1
            if coords_manager is None:
                D = coords.size(1) - 1
            else:
                D = coords_manager.D
            coords_key = CoordsKey(D)
            coords_key.setTensorStride(convert_to_int_list(tensor_stride, D))
        else:
            assert isinstance(coords_key, CoordsKey)

        if coords is not None:
            assert isinstance(coords, torch.Tensor), \
                "Coordinate must be of type torch.Tensor"

            if not isinstance(coords, torch.IntTensor):
                warnings.warn(
                    'Coords implicitly converted to torch.IntTensor. ' +
                    'To remove this warning, use `.int()` to convert the ' +
                    'coords into an torch.IntTensor')
                coords = coords.int()

            if coords.device.type != 'cpu':
                warnings.warn(
                    'Coords implicitly converted to CPU type. ' +
                    'To remove this warning, use `.cpu()` to convert the ' +
                    'coords into a CPU type')
                coords = coords.cpu()

            assert feats.shape[0] == coords.shape[0], \
                "Number of rows in features and coordinates do not match."

            coords = coords.contiguous()

        if coords_manager is None:
            # If set to share the coords man, use the global coords man
            global _sparse_tensor_operation_mode, _global_coords_man
            if _sparse_tensor_operation_mode == SparseTensorOperationMode.SHARE_COORDS_MANAGER:
                if _global_coords_man is None:
                    _global_coords_man = CoordsManager(D=coords.size(1) - 1)
                coords_manager = _global_coords_man
            else:
                assert coords is not None, "Initial coordinates must be given"
                coords_manager = CoordsManager(D=coords.size(1) - 1)

            if not coords_key.isKeySet():
                self.mapping = coords_manager.initialize(
                    coords,
                    coords_key,
                    force_creation=force_creation,
                    force_remap=allow_duplicate_coords,
                    allow_duplicate_coords=allow_duplicate_coords)
                if len(self.mapping) > 0:
                    coords = coords[self.mapping]
                    feats = feats[self.mapping]
        else:
            assert isinstance(coords_manager, CoordsManager)

            if not coords_key.isKeySet():
                assert coords is not None
                self.mapping = coords_manager.initialize(
                    coords,
                    coords_key,
                    force_creation=force_creation,
                    force_remap=allow_duplicate_coords,
                    allow_duplicate_coords=allow_duplicate_coords)
                if len(self.mapping) > 0:
                    coords = coords[self.mapping]
                    feats = feats[self.mapping]

        self._F = feats.contiguous()
        self._C = coords
        self.coords_key = coords_key
        self.coords_man = coords_manager
Exemple #23
0
 def setTensorStride(self, tensor_stride):
     tensor_stride = convert_to_int_list(tensor_stride, self.D)
     self.CPPCoordsKey.setTensorStride(tensor_stride)
    def __init__(self,
                 feats,
                 coords=None,
                 coords_key=None,
                 coords_manager=None,
                 tensor_stride=1):
        r"""

        Args:
            :attr:`feats` (:attr:`torch.FloatTensor`,
            :attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or
            :attr:`torch.cuda.DoubleTensor`): The features of the sparse
            tensor.

            :attr:`coords` (:attr:`torch.IntTensor`): The coordinates
            associated to the features. If not provided, :attr:`coords_key`
            must be provided.

            :attr:`coords_key` (:attr:`MinkowskiEngine.CoordsKey`): When the
            coordinates are already cached in the MinkowskiEngine, we could
            reuse the same coordinates by simply providing the coordinate hash
            key. In most case, this process is done automatically. If you
            provide one, make sure you understand what you are doing.

            :attr:`coords_manager` (:attr:`MinkowskiEngine.CoordsManager`): The
            MinkowskiEngine creates a dynamic computation graph using an input
            coordinates. If not provided, the MinkowskiEngine will create a new
            computation graph, so make sure to provide the same
            :attr:`CoordsManager` when you want to use the same computation
            graph. To use a sparse tensor within the same computation graph
            that you are using before, feed the :attr:`CoordsManager` of the
            sparse tensor that you want to use by
            :attr:`sparse_tensor.coords_man`. In most cases, this process is
            handled automatically. When you use it, make sure you understand
            what you are doing.

            :attr:`tensor_stride` (:attr:`int`, :attr:`list`,
            :attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
            of the current sparse tensor. By default, it is 1.

        """
        assert isinstance(feats,
                          torch.Tensor), "Features must be a torch.Tensor"

        if coords is None and coords_key is None:
            raise ValueError('Either coords or coords_key must be provided')

        if coords_key is None:
            assert coords_manager is not None or coords is not None
            D = -1
            if coords_manager is None:
                D = coords.size(1) - 1
            else:
                D = coords_manager.D
            coords_key = CoordsKey(D)
            coords_key.setTensorStride(convert_to_int_list(tensor_stride, D))
        else:
            assert isinstance(coords_key, CoordsKey)

        if coords is not None:
            assert isinstance(coords, torch.Tensor), \
                "Coordinate must be of type torch.Tensor"

            if not isinstance(coords, torch.IntTensor):
                warnings.warn(
                    'Coords implicitly converted to torch.IntTensor. ' +
                    'To remove this warning, use `.int()` to convert the ' +
                    'coords into an torch.IntTensor')
                coords = coords.int()

            assert feats.shape[0] == coords.shape[0], \
                "Number of rows in features and coordinates do not match."

            coords = coords.contiguous()

        if coords_manager is None:
            assert coords is not None, "Initial coordinates must be given"
            D = coords.size(1) - 1
            coords_manager = CoordsManager(D=D)
            coords_manager.initialize(coords, coords_key)
        else:
            assert isinstance(coords_manager, CoordsManager)

        self._F = feats.contiguous()
        self._C = coords
        self.coords_key = coords_key
        self.coords_man = coords_manager
 def set_tensor_stride(self, s):
     ss = convert_to_int_list(s, self.D)
     self.coords_key.setTensorStride(ss)
    def __init__(
            self,
            feats,
            coords=None,
            coords_key=None,
            coords_manager=None,
            force_creation=False,
            allow_duplicate_coords=False,
            quantization_mode=SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,
            tensor_stride=1):
        r"""

        Args:
            :attr:`feats` (:attr:`torch.FloatTensor`,
            :attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or
            :attr:`torch.cuda.DoubleTensor`): The features of the sparse
            tensor.

            :attr:`coords` (:attr:`torch.IntTensor`): The coordinates
            associated to the features. If not provided, :attr:`coords_key`
            must be provided.

            :attr:`coords_key` (:attr:`MinkowskiEngine.CoordsKey`): When the
            coordinates are already cached in the MinkowskiEngine, we could
            reuse the same coordinates by simply providing the coordinate hash
            key. In most case, this process is done automatically. When you
            provide a `coords_key`, all other arguments will be be ignored.

            :attr:`coords_manager` (:attr:`MinkowskiEngine.CoordsManager`): The
            MinkowskiEngine creates a dynamic computation graph and all
            coordinates inside the same computation graph are managed by a
            CoordsManager object. If not provided, the MinkowskiEngine will
            create a new computation graph. In most cases, this process is
            handled automatically and you do not need to use this. When you use
            it, make sure you understand what you are doing.

            :attr:`force_creation` (:attr:`bool`): Force creation of the
            coordinates. This allows generating a new set of coordinates even
            when there exists another set of coordinates with the same
            tensor stride. This could happen when you manually feed the same
            :attr:`coords_manager`.

            :attr:`allow_duplicate_coords` (:attr:`bool`): Allow duplicate
            coordinates when creating the sparse tensor. Internally, it will
            generate a new unique set of coordinates and use features of at the
            corresponding unique coordinates. In general, setting
            `allow_duplicate_coords=True` is not recommended as it could hide
            obvious errors in your data loading and preprocessing steps. Please
            refer to the quantization and data loading tutorial on `here
            <https://stanfordvl.github.io/MinkowskiEngine/demo/training.html>`_
            for more details.

            :attr:`quantizatino_mode`
            (:attr:`MinkowskiEngine.SparseTensorQuantizationMode`): Defines the
            quantization method and how to define features of a sparse tensor.
            Please refer to :attr:`SparseTensorQuantizationMode` for details.

            :attr:`tensor_stride` (:attr:`int`, :attr:`list`,
            :attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
            of the current sparse tensor. By default, it is 1.

        """
        assert isinstance(feats,
                          torch.Tensor), "Features must be a torch.Tensor"
        assert feats.ndim == 2, f"The feature should be a matrix, The input feature is an order-{feats.ndim} tensor."
        assert isinstance(quantization_mode, SparseTensorQuantizationMode)
        self.quantization_mode = quantization_mode

        if coords is None and coords_key is None:
            raise ValueError('Either coords or coords_key must be provided')

        if coords_key is None:
            assert coords_manager is not None or coords is not None
            D = -1
            if coords_manager is None:
                D = coords.size(1) - 1
            else:
                D = coords_manager.D
            coords_key = CoordsKey(D)
            coords_key.setTensorStride(convert_to_int_list(tensor_stride, D))
        else:
            assert isinstance(coords_key, CoordsKey)

        if coords is not None:
            assert isinstance(coords, torch.Tensor), \
                "Coordinate must be of type torch.Tensor"

            if not isinstance(coords, torch.IntTensor):
                warnings.warn(
                    'Coords implicitly converted to torch.IntTensor. ' +
                    'To remove this warning, use `.int()` to convert the ' +
                    'coords into an torch.IntTensor')
                coords = torch.floor(coords).int()

            if coords.device.type != 'cpu':
                warnings.warn(
                    'Coords implicitly converted to CPU type. ' +
                    'To remove this warning, use `.cpu()` to convert the ' +
                    'coords into a CPU type')
                coords = coords.cpu()

            assert feats.shape[0] == coords.shape[0], \
                "The number of rows in features and coordinates do not match."

            coords = coords.contiguous()

        ##########################
        # Setup CoordsManager
        ##########################
        if coords_manager is None:
            # If set to share the coords man, use the global coords man
            global _sparse_tensor_operation_mode, _global_coords_man
            if _sparse_tensor_operation_mode == SparseTensorOperationMode.SHARE_COORDS_MANAGER:
                if _global_coords_man is None:
                    _global_coords_man = CoordsManager(D=coords.size(1) - 1)
                coords_manager = _global_coords_man
            else:
                assert coords is not None, "Initial coordinates must be given"
                coords_manager = CoordsManager(D=coords.size(1) - 1)

        else:
            assert isinstance(coords_manager, CoordsManager)

        ##########################
        # Initialize coords
        ##########################
        if not coords_key.isKeySet() and coords is not None and len(
                coords) > 0:
            if quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE:
                force_remap = True
                return_inverse = False
            elif quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:
                force_remap = True
                return_inverse = True

            self.unique_index, self.inverse_mapping = coords_manager.initialize(
                coords,
                coords_key,
                force_creation=force_creation,
                force_remap=force_remap,
                allow_duplicate_coords=allow_duplicate_coords,
                return_inverse=return_inverse)

            if quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:
                self._CF = feats
                self._CC = coords
                feats = MEB.quantization_average_features(
                    feats, torch.arange(len(feats)), self.inverse_mapping,
                    len(self.unique_index), 0)
                coords = coords[self.unique_index]
            elif force_remap:
                assert len(self.unique_index) > 0
                self._CC = coords
                self._CF = feats
                coords = coords[self.unique_index]
                feats = feats[self.unique_index]

        elif coords is not None:  # empty / invalid coords
            assert isinstance(coords, torch.IntTensor)
            assert coords.ndim == 2
            coords_manager.initialize(coords,
                                      coords_key,
                                      force_creation=force_creation,
                                      force_remap=False,
                                      allow_duplicate_coords=False,
                                      return_inverse=False)
        elif coords_key is not None:
            assert coords_key.isKeySet()

        self._F = feats.contiguous()
        self._C = coords
        self.coords_key = coords_key
        self.coords_man = coords_manager
Exemple #27
0
    def get_kernel_map(self,
                       in_key_or_tensor_strides,
                       out_key_or_tensor_strides,
                       stride=1,
                       kernel_size=3,
                       dilation=1,
                       region_type=0,
                       region_offset=None,
                       is_transpose=False,
                       is_pool=False,
                       on_gpu=False):
        r"""Get kernel in-out maps for the specified coords keys or tensor strides.

        """
        # region type 1 iteration with kernel_size 1 is invalid
        if isinstance(kernel_size, torch.Tensor):
            assert (kernel_size >
                    0).all(), f"Invalid kernel size: {kernel_size}"
            if (kernel_size == 1).all() == 1:
                region_type = 0
        elif isinstance(kernel_size, int):
            assert kernel_size > 0, f"Invalid kernel size: {kernel_size}"
            if kernel_size == 1:
                region_type = 0

        if isinstance(in_key_or_tensor_strides, CoordsKey):
            in_tensor_strides = in_key_or_tensor_strides.getTensorStride()
        else:
            in_tensor_strides = in_key_or_tensor_strides
        if region_offset is None:
            region_offset = torch.IntTensor()

        in_coords_key = self._get_coords_key(in_key_or_tensor_strides)
        out_coords_key = self._get_coords_key(out_key_or_tensor_strides)

        tensor_strides = convert_to_int_tensor(in_tensor_strides, self.D)
        strides = convert_to_int_tensor(stride, self.D)
        kernel_sizes = convert_to_int_tensor(kernel_size, self.D)
        dilations = convert_to_int_tensor(dilation, self.D)
        D = in_coords_key.D
        tensor_strides, strides, kernel_sizes, dilations, region_type = prep_args(
            tensor_strides, strides, kernel_sizes, dilations, region_type, D)
        if on_gpu:
            assert hasattr(
                self.CPPCoordsManager, 'getKernelMapGPU'
            ), f"Function getKernelMapGPU not available. Please compile MinkowskiEngine where `torch.cuda.is_available()` is `True`."
            kernel_map_fn = getattr(self.CPPCoordsManager, 'getKernelMapGPU')
        else:
            kernel_map_fn = self.CPPCoordsManager.getKernelMap
        kernel_map = kernel_map_fn(
            convert_to_int_list(tensor_strides, D),  #
            convert_to_int_list(strides, D),  #
            convert_to_int_list(kernel_sizes, D),  #
            convert_to_int_list(dilations, D),  #
            region_type,
            region_offset,
            in_coords_key.CPPCoordsKey,
            out_coords_key.CPPCoordsKey,
            is_transpose,
            is_pool)

        return kernel_map