Beispiel #1
0
def unique_coordinate_map(
    coordinates: torch.Tensor,
    tensor_stride: Union[int, Sequence, np.ndarray] = 1,
) -> Tuple[torch.IntTensor, torch.IntTensor]:
    r"""Returns the unique indices and the inverse indices of the coordinates.

    :attr:`coordinates`: `torch.Tensor` (Int tensor. `CUDA` if
    coordinate_map_type == `CoordinateMapType.GPU`) that defines the
    coordinates.

    Example::

       >>> coordinates = torch.IntTensor([[0, 0], [0, 0], [0, 1], [0, 2]])
       >>> unique_map, inverse_map = unique_coordinates_map(coordinates)
       >>> coordinates[unique_map] # unique coordinates
       >>> torch.all(coordinates == coordinates[unique_map][inverse_map]) # True

    """
    assert coordinates.ndim == 2, "Coordinates must be a matrix"
    assert isinstance(coordinates, torch.Tensor)
    if not coordinates.is_cuda:
        manager = MEB.CoordinateMapManagerCPU()
    else:
        manager = MEB.CoordinateMapManagerGPU_c10()
    tensor_stride = convert_to_int_list(tensor_stride,
                                        coordinates.shape[-1] - 1)
    key, (unique_map,
          inverse_map) = manager.insert_and_map(coordinates, tensor_stride, "")
    return unique_map, inverse_map
    def test_conv(self):
        IC, OC = 3, 16
        coords, colors, pcd = load_file("1.ply")
        kernel_size = [3, 3, 3]
        kernel_stride = [2, 2, 2]
        kernel_dilation = [1, 1, 1]

        # size, in, out
        kernel = torch.rand(np.prod(kernel_size), IC, OC).to(0)
        kernel_generator = KernelGenerator(
            kernel_size=kernel_size,
            stride=kernel_stride,
            dilation=kernel_dilation,
            expand_coordinates=False,
            dimension=3,
        )

        for batch_size in [1, 5, 10, 20, 40]:
            for voxel_size in [0.05, 0.035, 0.02]:
                min_time = 100000

                dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
                bcoords = batched_coordinates(
                    [dcoords for i in range(batch_size)])

                for i in range(10):
                    manager = _C.CoordinateMapManagerGPU_c10()

                    # batch insert
                    in_key, (unique_map, inverse_map) = manager.insert_and_map(
                        bcoords.to(0), [1, 1, 1], "")
                    in_feats = torch.rand(manager.size(in_key), IC).to(0)
                    out_key = _C.CoordinateMapKey(4)

                    stime = time.time()
                    out_features = _C.ConvolutionForwardGPU(
                        in_feats,
                        kernel,
                        kernel_generator.kernel_size,
                        kernel_generator.kernel_stride,
                        kernel_generator.kernel_dilation,
                        kernel_generator.region_type,
                        kernel_generator.region_offsets,
                        kernel_generator.expand_coordinates,
                        in_key,
                        out_key,
                        manager,
                    )
                    min_time = min(time.time() - stime, min_time)

                print(
                    f"{batch_size}\t{manager.size(in_key)}\t{manager.size(out_key)}\t{min_time}"
                )
Beispiel #3
0
def sparse_quantize(
    coordinates,
    features=None,
    labels=None,
    ignore_label=-100,
    return_index=False,
    return_inverse=False,
    return_maps_only=False,
    quantization_size=None,
    device="cpu",
):
    r"""Given coordinates, and features (optionally labels), the function
    generates quantized (voxelized) coordinates.

    Args:
        :attr:`coordinates` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
        matrix of size :math:`N \times D` where :math:`N` is the number of
        points in the :math:`D` dimensional space.

        :attr:`features` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`, optional): a
        matrix of size :math:`N \times D_F` where :math:`N` is the number of
        points and :math:`D_F` is the dimension of the features. Must have the
        same container as `coords` (i.e. if `coords` is a torch.Tensor, `feats`
        must also be a torch.Tensor).

        :attr:`labels` (:attr:`numpy.ndarray` or :attr:`torch.IntTensor`,
        optional): integer labels associated to eah coordinates.  Must have the
        same container as `coords` (i.e. if `coords` is a torch.Tensor,
        `labels` must also be a torch.Tensor). For classification where a set
        of points are mapped to one label, do not feed the labels.

        :attr:`ignore_label` (:attr:`int`, optional): the int value of the
        IGNORE LABEL.
        :attr:`torch.nn.CrossEntropyLoss(ignore_index=ignore_label)`

        :attr:`return_index` (:attr:`bool`, optional): set True if you want the
        indices of the quantized coordinates. False by default.

        :attr:`return_inverse` (:attr:`bool`, optional): set True if you want
        the indices that can recover the discretized original coordinates.
        False by default. `return_index` must be True when `return_reverse` is True.

        :attr:`return_maps_only` (:attr:`bool`, optional): if set, return the
        unique_map or optionally inverse map, but not the coordinates. Can be
        used if you don't care about final coordinates or if you use
        device==cuda and you don't need coordinates on GPU. This returns either
        unique_map alone or (unique_map, inverse_map) if return_inverse is set.

        :attr:`quantization_size` (attr:`float`, optional): if set, will use
        the quanziation size to define the smallest distance between
        coordinates.

        :attr:`device` (attr:`str`, optional): Either 'cpu' or 'cuda'.

        Example::

           >>> unique_map, inverse_map = sparse_quantize(discrete_coords, return_index=True, return_inverse=True)
           >>> unique_coords = discrete_coords[unique_map]
           >>> print(unique_coords[inverse_map] == discrete_coords)  # True

        :attr:`quantization_size` (:attr:`float`, :attr:`list`, or
        :attr:`numpy.ndarray`, optional): the length of the each side of the
        hyperrectangle of of the grid cell.

     Example::

        >>> # Segmentation
        >>> criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
        >>> coords, feats, labels = MinkowskiEngine.utils.sparse_quantize(
        >>>     coords, feats, labels, ignore_label=-100, quantization_size=0.1)
        >>> output = net(MinkowskiEngine.SparseTensor(feats, coords))
        >>> loss = criterion(output.F, labels.long())
        >>>
        >>> # Classification
        >>> criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
        >>> coords, feats = MinkowskiEngine.utils.sparse_quantize(coords, feats)
        >>> output = net(MinkowskiEngine.SparseTensor(feats, coords))
        >>> loss = criterion(output.F, labels.long())


    """
    assert isinstance(
        coordinates,
        (np.ndarray,
         torch.Tensor)), "Coords must be either np.array or torch.Tensor."

    use_label = labels is not None
    use_feat = features is not None

    assert (
        coordinates.ndim == 2
    ), "The coordinates must be a 2D matrix. The shape of the input is " + str(
        coordinates.shape)

    if return_inverse:
        assert return_index, "return_reverse must be set with return_index"

    if use_feat:
        assert features.ndim == 2
        assert coordinates.shape[0] == features.shape[0]

    if use_label:
        assert coordinates.shape[0] == len(labels)

    dimension = coordinates.shape[1]
    # Quantize the coordinates
    if quantization_size is not None:
        if isinstance(quantization_size, (Sequence, np.ndarray, torch.Tensor)):
            assert (len(quantization_size) == dimension
                    ), "Quantization size and coordinates size mismatch."
            if isinstance(coordinates, np.ndarray):
                quantization_size = np.array([i for i in quantization_size])
                discrete_coordinates = np.floor(coordinates /
                                                quantization_size)
            else:
                quantization_size = torch.Tensor(
                    [i for i in quantization_size])
                discrete_coordinates = (coordinates /
                                        quantization_size).floor()

        elif np.isscalar(quantization_size):  # Assume that it is a scalar

            if quantization_size == 1:
                discrete_coordinates = coordinates
            else:
                discrete_coordinates = np.floor(coordinates /
                                                quantization_size)
        else:
            raise ValueError("Not supported type for quantization_size.")
    else:
        discrete_coordinates = coordinates

    discrete_coordinates = np.floor(discrete_coordinates)
    if isinstance(coordinates, np.ndarray):
        discrete_coordinates = discrete_coordinates.astype(np.int32)
    else:
        discrete_coordinates = discrete_coordinates.int()

    if device == "cpu":
        manager = MEB.CoordinateMapManagerCPU()
    elif "cuda" in device:
        manager = MEB.CoordinateMapManagerGPU_c10()
    else:
        raise ValueError("Invalid device. Only `cpu` or `cuda` supported.")

    # Return values accordingly
    if use_label:
        # unique_map, colabels = quantize_label(discrete_coordinates, labels, ignore_label)
        tensor_stride = [1 for i in range(discrete_coordinates.shape[1] - 1)]
        discrete_coordinates = (
            discrete_coordinates.to(device) if isinstance(
                discrete_coordinates, torch.Tensor) else
            torch.from_numpy(discrete_coordinates).to(device))
        _, (unique_map,
            inverse_map) = manager.insert_and_map(discrete_coordinates,
                                                  tensor_stride, "")

        # assert (
        #     device == "cpu"
        # ), "CUDA accelerated quantization with labels not supported currently"

        if return_maps_only:
            if return_inverse:
                return unique_map, inverse_map
            else:
                return unique_map

        return_args = [discrete_coordinates[unique_map]]
        if use_feat:
            return_args.append(features[unique_map])
        return_args.append(labels[unique_map])
        if return_index:
            return_args.append(unique_map)
        if return_inverse:
            return_args.append(inverse_map)

        if len(return_args) == 1:
            return return_args[0]
        else:
            return tuple(return_args)
    else:
        tensor_stride = [1 for i in range(discrete_coordinates.shape[1] - 1)]
        discrete_coordinates = (
            discrete_coordinates.to(device) if isinstance(
                discrete_coordinates, torch.Tensor) else
            torch.from_numpy(discrete_coordinates).to(device))
        _, (unique_map,
            inverse_map) = manager.insert_and_map(discrete_coordinates,
                                                  tensor_stride, "")
        if return_maps_only:
            if return_inverse:
                return unique_map, inverse_map
            else:
                return unique_map

        return_args = [discrete_coordinates[unique_map]]
        if use_feat:
            return_args.append(features[unique_map])
        if return_index:
            return_args.append(unique_map)
        if return_inverse:
            return_args.append(inverse_map)

        if len(return_args) == 1:
            return return_args[0]
        else:
            return tuple(return_args)