def _get_coordinate_map_key(
    input: SparseTensor,
    coordinates: torch.Tensor = None,
    tensor_stride: StrideType = 1,
    expand_coordinates: bool = False,
):
    r"""Returns the coordinates map key."""
    if coordinates is not None and not expand_coordinates:
        assert isinstance(coordinates,
                          (CoordinateMapKey, torch.Tensor, SparseTensor))
        if isinstance(coordinates, torch.Tensor):
            assert coordinates.ndim == 2
            coordinate_map_key = CoordinateMapKey(
                convert_to_int_list(tensor_stride,
                                    coordinates.size(1) - 1), "")

            (
                coordinate_map_key,
                (unique_index, inverse_mapping),
            ) = input._manager.insert_and_map(coordinates,
                                              *coordinate_map_key.get_key())
        elif isinstance(coordinates, SparseTensor):
            coordinate_map_key = coordinates.coordinate_map_key
        else:  # CoordinateMapKey type due to the previous assertion
            coordinate_map_key = coordinates
    else:  # coordinates is None
        coordinate_map_key = CoordinateMapKey(
            input.coordinate_map_key.get_coordinate_size())
    return coordinate_map_key
    def forward(
        ctx,
        input_features: torch.Tensor,
        pooling_mode: PoolingMode,
        in_coordinate_map_key: CoordinateMapKey,
        out_coordinate_map_key: CoordinateMapKey = None,
        coordinate_manager: CoordinateManager = None,
    ):
        if out_coordinate_map_key is None:
            out_coordinate_map_key = CoordinateMapKey(
                in_coordinate_map_key.get_coordinate_size())
        input_features = input_features.contiguous()

        ctx.input_features = input_features
        ctx.in_coords_key = in_coordinate_map_key
        ctx.out_coords_key = out_coordinate_map_key
        ctx.coordinate_manager = coordinate_manager
        ctx.pooling_mode = pooling_mode

        fw_fn = get_minkowski_function("GlobalPoolingForward", input_features)
        out_feat, num_nonzero = fw_fn(
            input_features,
            pooling_mode,
            ctx.in_coords_key,
            ctx.out_coords_key,
            ctx.coordinate_manager._manager,
        )
        ctx.num_nonzero = num_nonzero

        return out_feat
    def forward(
        ctx,
        in_feat: torch.Tensor,
        in_coords_key: CoordinateMapKey,
        glob_coords_key: CoordinateMapKey = None,
        coords_manager: CoordinateManager = None,
        gpooling_mode=PoolingMode.GLOBAL_AVG_POOLING_KERNEL,
    ):
        if glob_coords_key is None:
            glob_coords_key = CoordinateMapKey(
                in_coords_key.get_coordinate_size())

        gpool_avg_forward = get_minkowski_function("GlobalPoolingForward",
                                                   in_feat)
        broadcast_forward = get_minkowski_function("BroadcastForward", in_feat)

        mean, num_nonzero = gpool_avg_forward(
            in_feat,
            gpooling_mode,
            in_coords_key,
            glob_coords_key,
            coords_manager._manager,
        )

        # X - \mu
        centered_feat = broadcast_forward(
            in_feat,
            -mean,
            BroadcastMode.ELEMENTWISE_ADDITON,
            in_coords_key,
            glob_coords_key,
            coords_manager._manager,
        )

        # Variance = 1/N \sum (X - \mu) ** 2
        variance, num_nonzero = gpool_avg_forward(
            centered_feat**2,
            gpooling_mode,
            in_coords_key,
            glob_coords_key,
            coords_manager._manager,
        )

        # norm_feat = (X - \mu) / \sigma
        inv_std = 1 / (variance + 1e-8).sqrt()
        norm_feat = broadcast_forward(
            centered_feat,
            inv_std,
            BroadcastMode.ELEMENTWISE_MULTIPLICATION,
            in_coords_key,
            glob_coords_key,
            coords_manager._manager,
        )

        ctx.saved_vars = (in_coords_key, glob_coords_key, coords_manager,
                          gpooling_mode)
        # For GPU tensors, must use save_for_backward.
        ctx.save_for_backward(inv_std, norm_feat)
        return norm_feat
    def forward(self, input: SparseTensor, mask: torch.Tensor):
        r"""
        Args:
            :attr:`input` (:attr:`MinkowskiEnigne.SparseTensor`): a sparse tensor
            to remove coordinates from.

            :attr:`mask` (:attr:`torch.BoolTensor`): mask vector that specifies
            which one to keep. Coordinates with False will be removed.

        Returns:
            A :attr:`MinkowskiEngine.SparseTensor` with C = coordinates
            corresponding to `mask == True` F = copy of the features from `mask ==
            True`.

        Example::

            >>> # Define inputs
            >>> input = SparseTensor(feats, coords=coords)
            >>> # Any boolean tensor can be used as the filter
            >>> mask = torch.rand(feats.size(0)) < 0.5
            >>> pruning = MinkowskiPruning()
            >>> output = pruning(input, mask)

        """
        assert isinstance(input, SparseTensor)

        out_coords_key = CoordinateMapKey(
            input.coordinate_map_key.get_coordinate_size())
        output = self.pruning.apply(input.F, mask, input.coordinate_map_key,
                                    out_coords_key, input._manager)
        return SparseTensor(output,
                            coordinate_map_key=out_coords_key,
                            coordinate_manager=input._manager)
    def _binary_functor(self, other, binary_fn):
        assert isinstance(other, (self.__class__, torch.Tensor))
        if isinstance(other, self.__class__):
            assert self._manager == other._manager, COORDINATE_MANAGER_DIFFERENT_ERROR

            if self.coordinate_map_key == other.coordinate_map_key:
                return self.__class__(
                    binary_fn(self._F, other.F),
                    coordinate_map_key=self.coordinate_map_key,
                    coordinate_manager=self._manager,
                )
            else:
                # Generate union maps
                out_key = CoordinateMapKey(
                    self.coordinate_map_key.get_coordinate_size())
                union_maps = self.coordinate_manager.union_map(
                    [self.coordinate_map_key, other.coordinate_map_key],
                    out_key)
                N_out = self.coordinate_manager.size(out_key)
                out_F = torch.zeros((N_out, self._F.size(1)),
                                    dtype=self.dtype,
                                    device=self.device)
                out_F[union_maps[0][1]] = self._F[union_maps[0][0]]
                out_F[union_maps[1][1]] = binary_fn(out_F[union_maps[1][1]],
                                                    other._F[union_maps[1][0]])
                return self.__class__(out_F,
                                      coordinate_map_key=out_key,
                                      coordinate_manager=self._manager)
        else:  # when it is a torch.Tensor
            return self.__class__(
                binary_fn(self._F, other),
                coordinate_map_key=self.coordinate_map_key,
                coordinate_manager=self._manager,
            )
    def forward(
        self,
        input,
        coordinates: Union[torch.IntTensor, CoordinateMapKey,
                           SparseTensor] = None,
    ):
        # Get a new coordinate map key or extract one from the coordinates
        if isinstance(input, ME.TensorField):
            in_coordinate_map_key = input.coordinate_field_map_key
            out_coordinate_map_key = CoordinateMapKey(
                input.coordinate_field_map_key.get_coordinate_size())
        else:
            in_coordinate_map_key = input.coordinate_map_key
            out_coordinate_map_key = _get_coordinate_map_key(
                input, coordinates)
        output = self.pooling.apply(
            input.F,
            self.pooling_mode,
            in_coordinate_map_key,
            out_coordinate_map_key,
            input._manager,
        )

        return SparseTensor(
            output,
            coordinate_map_key=out_coordinate_map_key,
            coordinate_manager=input.coordinate_manager,
        )
Example #7
0
    def forward(self, *inputs):
        r"""
        Args:
            A variable number of :attr:`MinkowskiEngine.SparseTensor`'s.

        Returns:
            A :attr:`MinkowskiEngine.SparseTensor` with coordinates = union of all
            input coordinates, and features = sum of all features corresponding to the
            coordinate.

        Example::

            >>> # Define inputs
            >>> input1 = SparseTensor(
            >>>     torch.rand(N, in_channels, dtype=torch.double), coords=coords)
            >>> # All inputs must share the same coordinate manager
            >>> input2 = SparseTensor(
            >>>     torch.rand(N, in_channels, dtype=torch.double),
            >>>     coords=coords + 1,
            >>>     coords_manager=input1.coordinate_manager,  # Must use same coords manager
            >>>     force_creation=True  # The tensor stride [1, 1] already exists.
            >>> )
            >>> union = MinkowskiUnion()
            >>> output = union(input1, iput2)

        """
        assert isinstance(inputs,
                          (list, tuple)), "The input must be a list or tuple"
        for s in inputs:
            assert isinstance(s,
                              SparseTensor), "Inputs must be sparse tensors."
        assert len(
            inputs) > 1, "input must be a set with at least 2 SparseTensors"
        # Assert the same coordinate manager
        ref_coordinate_manager = inputs[0].coordinate_manager
        for s in inputs:
            assert (
                ref_coordinate_manager == s.coordinate_manager
            ), "Invalid coordinate manager. All inputs must have the same coordinate manager."

        in_coordinate_map_key = inputs[0].coordinate_map_key
        coordinate_manager = inputs[0].coordinate_manager
        out_coordinate_map_key = CoordinateMapKey(
            in_coordinate_map_key.get_coordinate_size())
        output = self.union.apply(
            [input.coordinate_map_key for input in inputs],
            out_coordinate_map_key,
            coordinate_manager,
            *[input.F for input in inputs],
        )
        return SparseTensor(
            output,
            coordinate_map_key=out_coordinate_map_key,
            coordinate_manager=coordinate_manager,
        )
Example #8
0
    def forward(
        ctx,
        input_features: torch.Tensor,
        kernel_weights: torch.Tensor,
        kernel_generator: KernelGenerator,
        convolution_mode: ConvolutionMode,
        in_coordinate_map_key: CoordinateMapKey,
        out_coordinate_map_key: CoordinateMapKey = None,
        coordinate_manager: CoordinateManager = None,
    ):
        if out_coordinate_map_key is None:
            out_coordinate_map_key = CoordinateMapKey(
                in_coordinate_map_key.get_coordinate_size())

        input_features = input_features.contiguous()

        ctx.input_features = input_features
        ctx.kernel_weights = kernel_weights
        ctx.misc = [
            kernel_generator,
            convolution_mode,
            in_coordinate_map_key,
            out_coordinate_map_key,
            coordinate_manager,
        ]

        fw_fn = get_minkowski_function("ConvolutionForward", input_features)
        return fw_fn(
            ctx.input_features,
            kernel_weights,
            kernel_generator.kernel_size,
            kernel_generator.kernel_stride,
            kernel_generator.kernel_dilation,
            kernel_generator.region_type,
            kernel_generator.region_offsets,
            kernel_generator.expand_coordinates,
            convolution_mode,
            in_coordinate_map_key,
            out_coordinate_map_key,
            coordinate_manager._manager,
        )
    def forward(
        ctx,
        input_features: torch.Tensor,
        pooling_mode: PoolingMode,
        kernel_generator: KernelGenerator,
        in_coordinate_map_key: CoordinateMapKey,
        out_coordinate_map_key: CoordinateMapKey = None,
        coordinate_manager: CoordinateManager = None,
    ):
        if out_coordinate_map_key is None:
            out_coordinate_map_key = CoordinateMapKey(
                in_coordinate_map_key.get_coordinate_size())

        input_features = input_features.contiguous()
        ctx.input_features = input_features
        ctx = save_ctx(
            ctx,
            kernel_generator,
            in_coordinate_map_key,
            out_coordinate_map_key,
            coordinate_manager,
        )
        ctx.pooling_mode = pooling_mode

        fw_fn = get_minkowski_function("LocalPoolingTransposeForward",
                                       input_features)
        out_feat, num_nonzero = fw_fn(
            ctx.input_features,
            kernel_generator.kernel_size,
            kernel_generator.kernel_stride,
            kernel_generator.kernel_dilation,
            kernel_generator.region_type,
            kernel_generator.region_offsets,
            kernel_generator.expand_coordinates,
            pooling_mode,
            ctx.in_coordinate_map_key,
            ctx.out_coordinate_map_key,
            ctx.coordinate_manager._manager,
        )
        ctx.num_nonzero = num_nonzero
        return out_feat
Example #10
0
    def inverse_mapping(self, sparse_tensor_map_key: CoordinateMapKey):
        if sparse_tensor_map_key not in self._inverse_mapping:
            if not self._manager.exists_field_to_sparse(
                self.coordinate_field_map_key, sparse_tensor_map_key
            ):
                sparse_keys = self.coordinate_manager.field_to_sparse_keys(
                    self.coordinate_field_map_key
                )
                one_key = None
                if len(sparse_keys) > 0:
                    for key in sparse_keys:
                        if np.prod(key.get_tensor_stride()) == 1:
                            one_key = key
                else:
                    one_key = CoordinateMapKey(
                        [
                            1,
                        ]
                        * self.D,
                        "",
                    )

                if one_key not in self._inverse_mapping:
                    (
                        _,
                        self._inverse_mapping[one_key],
                    ) = self._manager.get_field_to_sparse_map(
                        self.coordinate_field_map_key, one_key
                    )
                _, stride_map = self.coordinate_manager.stride_map(
                    one_key, sparse_tensor_map_key
                )
                field_map = self._inverse_mapping[one_key]
                self._inverse_mapping[sparse_tensor_map_key] = stride_map[field_map]
            else:
                # Extract the mapping
                (
                    _,
                    self._inverse_mapping[sparse_tensor_map_key],
                ) = self._manager.get_field_to_sparse_map(
                    self.coordinate_field_map_key, sparse_tensor_map_key
                )
        return self._inverse_mapping[sparse_tensor_map_key]
    def __init__(
        self,
        features: torch.Tensor,
        coordinates: torch.Tensor = None,
        # optional coordinate related arguments
        tensor_stride: StrideType = 1,
        coordinate_field_map_key: CoordinateMapKey = None,
        coordinate_manager: CoordinateManager = None,
        quantization_mode:
        SparseTensorQuantizationMode = SparseTensorQuantizationMode.
        UNWEIGHTED_AVERAGE,
        # optional manager related arguments
        allocator_type: GPUMemoryAllocatorType = None,
        minkowski_algorithm: MinkowskiAlgorithm = None,
        requires_grad=None,
        device=None,
    ):
        r"""

        Args:
            :attr:`features` (:attr:`torch.FloatTensor`,
            :attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or
            :attr:`torch.cuda.DoubleTensor`): The features of a sparse
            tensor.

            :attr:`coordinates` (:attr:`torch.IntTensor`): The coordinates
            associated to the features. If not provided, :attr:`coordinate_map_key`
            must be provided.

            :attr:`tensor_stride` (:attr:`int`, :attr:`list`,
            :attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
            of the current sparse tensor. By default, it is 1.

            :attr:`coordinate_field_map_key`
            (:attr:`MinkowskiEngine.CoordinateMapKey`): When the coordinates
            are already cached in the MinkowskiEngine, we could reuse the same
            coordinate map by simply providing the coordinate map key. In most
            case, this process is done automatically. When you provide a
            `coordinate_field_map_key`, `coordinates` will be be ignored.

            :attr:`coordinate_manager`
            (:attr:`MinkowskiEngine.CoordinateManager`): The MinkowskiEngine
            manages all coordinate maps using the `_C.CoordinateMapManager`. If
            not provided, the MinkowskiEngine will create a new computation
            graph. In most cases, this process is handled automatically and you
            do not need to use this.

            :attr:`quantization_mode`
            (:attr:`MinkowskiEngine.SparseTensorQuantizationMode`): Defines how
            continuous coordinates will be quantized to define a sparse tensor.
            Please refer to :attr:`SparseTensorQuantizationMode` for details.

            :attr:`allocator_type`
            (:attr:`MinkowskiEngine.GPUMemoryAllocatorType`): Defines the GPU
            memory allocator type. By default, it uses the c10 allocator.

            :attr:`minkowski_algorithm`
            (:attr:`MinkowskiEngine.MinkowskiAlgorithm`): Controls the mode the
            minkowski engine runs, Use
            :attr:`MinkowskiAlgorithm.MEMORY_EFFICIENT` if you want to reduce
            the memory footprint. Or use
            :attr:`MinkowskiAlgorithm.SPEED_OPTIMIZED` if you want to make it
            run fasterat the cost of more memory.

            :attr:`requires_grad` (:attr:`bool`): Set the requires_grad flag.

            :attr:`device` (:attr:`torch.device`): Set the device the sparse
            tensor is defined.
        """
        # Type checks
        assert isinstance(features,
                          torch.Tensor), "Features must be a torch.Tensor"
        assert (
            features.ndim == 2
        ), f"The feature should be a matrix, The input feature is an order-{features.ndim} tensor."
        assert isinstance(quantization_mode, SparseTensorQuantizationMode)
        assert quantization_mode in [
            SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
            SparseTensorQuantizationMode.UNWEIGHTED_SUM,
            SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,
            SparseTensorQuantizationMode.MAX_POOL,
        ], "invalid quantization mode"

        self.quantization_mode = quantization_mode

        if coordinates is not None:
            assert isinstance(coordinates, torch.Tensor)
        if coordinate_field_map_key is not None:
            assert isinstance(coordinate_field_map_key, CoordinateMapKey)
        if coordinate_manager is not None:
            assert isinstance(coordinate_manager, CoordinateManager)
        if coordinates is None and (coordinate_field_map_key is None
                                    or coordinate_manager is None):
            raise ValueError(
                "Either coordinates or (coordinate_field_map_key, coordinate_manager) pair must be provided."
            )

        Tensor.__init__(self)

        # To device
        if device is not None:
            features = features.to(device)
            if coordinates is not None:
                # assertion check for the map key done later
                coordinates = coordinates.to(device)

        self._D = (coordinates.size(1) -
                   1 if coordinates is not None else coordinate_manager.D)
        ##########################
        # Setup CoordsManager
        ##########################
        if coordinate_manager is None:
            # If set to share the coords man, use the global coords man
            if (sparse_tensor_operation_mode() ==
                    SparseTensorOperationMode.SHARE_COORDINATE_MANAGER):
                coordinate_manager = global_coordinate_manager()
                if coordinate_manager is None:
                    coordinate_manager = CoordinateManager(
                        D=self._D,
                        coordinate_map_type=CoordinateMapType.CUDA
                        if coordinates.is_cuda else CoordinateMapType.CPU,
                        allocator_type=allocator_type,
                        minkowski_algorithm=minkowski_algorithm,
                    )
                    set_global_coordinate_manager(coordinate_manager)
            else:
                coordinate_manager = CoordinateManager(
                    D=coordinates.size(1) - 1,
                    coordinate_map_type=CoordinateMapType.CUDA
                    if coordinates.is_cuda else CoordinateMapType.CPU,
                    allocator_type=allocator_type,
                    minkowski_algorithm=minkowski_algorithm,
                )
        self._manager = coordinate_manager

        ##########################
        # Initialize coords
        ##########################
        # Coordinate Management
        if coordinates is not None:
            assert (
                features.shape[0] == coordinates.shape[0]
            ), "The number of rows in features and coordinates must match."

            assert (features.is_cuda == coordinates.is_cuda
                    ), "Features and coordinates must have the same backend."

            coordinate_field_map_key = CoordinateMapKey(
                convert_to_int_list(tensor_stride, self._D), "")
            coordinate_field_map_key = self._manager.insert_field(
                coordinates.float(),
                convert_to_int_list(tensor_stride, self._D), "")
        else:
            assert (coordinate_field_map_key.is_key_set()
                    ), "The coordinate field map key must be valid."

        if requires_grad is not None:
            features.requires_grad_(requires_grad)

        self._F = features
        self._C = coordinates
        self.coordinate_field_map_key = coordinate_field_map_key
        self._batch_rows = None
        self._inverse_mapping = {}
 def field_to_sparse_keys(self, field_map_key: CoordinateMapKey):
     return self._manager.field_to_sparse_keys(field_map_key.get_key())
    def __init__(
        self,
        features: torch.Tensor,
        coordinates: torch.Tensor = None,
        # optional coordinate related arguments
        tensor_stride: StrideType = 1,
        coordinate_map_key: CoordinateMapKey = None,
        coordinate_manager: CoordinateManager = None,
        quantization_mode:
        SparseTensorQuantizationMode = SparseTensorQuantizationMode.
        RANDOM_SUBSAMPLE,
        # optional manager related arguments
        allocator_type: GPUMemoryAllocatorType = None,
        minkowski_algorithm: MinkowskiAlgorithm = None,
        device=None,
    ):
        r"""

        Args:
            :attr:`features` (:attr:`torch.FloatTensor`,
            :attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or
            :attr:`torch.cuda.DoubleTensor`): The features of a sparse
            tensor.

            :attr:`coordinates` (:attr:`torch.IntTensor`): The coordinates
            associated to the features. If not provided, :attr:`coordinate_map_key`
            must be provided.

            :attr:`coordinate_map_key`
            (:attr:`MinkowskiEngine.CoordinateMapKey`): When the coordinates
            are already cached in the MinkowskiEngine, we could reuse the same
            coordinate map by simply providing the coordinate map key. In most
            case, this process is done automatically. When you provide a
            `coordinate_map_key`, `coordinates` will be be ignored.

            :attr:`coordinate_manager`
            (:attr:`MinkowskiEngine.CoordinateManager`): The MinkowskiEngine
            manages all coordinate maps using the `_C.CoordinateMapManager`. If
            not provided, the MinkowskiEngine will create a new computation
            graph. In most cases, this process is handled automatically and you
            do not need to use this.

            :attr:`quantization_mode`
            (:attr:`MinkowskiEngine.SparseTensorQuantizationMode`): Defines how
            continuous coordinates will be quantized to define a sparse tensor.
            Please refer to :attr:`SparseTensorQuantizationMode` for details.

            :attr:`tensor_stride` (:attr:`int`, :attr:`list`,
            :attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
            of the current sparse tensor. By default, it is 1.

        """
        # Type checks
        assert isinstance(features,
                          torch.Tensor), "Features must be a torch.Tensor"
        assert (
            features.ndim == 2
        ), f"The feature should be a matrix, The input feature is an order-{features.ndim} tensor."
        assert isinstance(quantization_mode, SparseTensorQuantizationMode)
        self.quantization_mode = quantization_mode

        if coordinates is not None:
            assert isinstance(coordinates, torch.Tensor)
        if coordinate_map_key is not None:
            assert isinstance(coordinate_map_key, CoordinateMapKey)
        if coordinate_manager is not None:
            assert isinstance(coordinate_manager, CoordinateManager)

        # To device
        if device is not None:
            features = features.to(device)
            coordinates = coordinates.to(device)

        # Coordinate Management
        self._D = 0  # coordinate size - 1
        if coordinates is None and (coordinate_map_key is None
                                    or coordinate_manager is None):
            raise ValueError(
                "Either coordinates or (coordinate_map_key, coordinate_manager) pair must be provided."
            )
        elif coordinates is not None:
            assert (
                features.shape[0] == coordinates.shape[0]
            ), "The number of rows in features and coordinates must match."

            assert (features.is_cuda == coordinates.is_cuda
                    ), "Features and coordinates must have the same backend."

            self._D = coordinates.size(1) - 1

            coordinate_map_key = CoordinateMapKey(
                convert_to_int_list(tensor_stride, self._D), "")
        else:
            # not (coordinate_map_key is None or coordinate_manager is None)
            self._D = coordinate_manager.D

        ##########################
        # Setup CoordsManager
        ##########################
        if coordinate_manager is None:
            # If set to share the coords man, use the global coords man
            global _sparse_tensor_operation_mode, _global_coordinate_manager
            if (_sparse_tensor_operation_mode ==
                    SparseTensorOperationMode.SHARE_COORDINATE_MANAGER):
                if _global_coordinate_manager is None:
                    _global_coordinate_manager = CoordinateManager(
                        D=self._D,
                        coordinate_map_type=CoordinateMapType.CUDA
                        if coordinates.is_cuda else CoordinateMapType.CPU,
                        allocator_type=allocator_type,
                        minkowski_algorithm=minkowski_algorithm,
                    )
                coordinate_manager = _global_coordinate_manager
            else:
                coordinate_manager = CoordinateManager(
                    D=coordinates.size(1) - 1,
                    coordinate_map_type=CoordinateMapType.CUDA
                    if coordinates.is_cuda else CoordinateMapType.CPU,
                    allocator_type=allocator_type,
                    minkowski_algorithm=minkowski_algorithm,
                )
        self._manager = coordinate_manager

        ##########################
        # Initialize coords
        ##########################
        if coordinates is not None:
            coordinates, features, coordinate_map_key = self.initialize_coordinates(
                coordinates, features, coordinate_map_key)

        elif coordinate_map_key is not None:
            assert (coordinate_map_key.is_key_set()
                    ), "The coordinate key must be a valid key."
            self.coordinate_map_key = coordinate_map_key

        self._F = features
        self._C = coordinates
        self._batch_rows = None