def sparse(self): r"""Converts the current sparse tensor field to a sparse tensor.""" spmm = MinkowskiSPMMFunction() N = len(self._F) assert N == len(self.inverse_mapping), "invalid inverse mapping" cols = torch.arange( N, dtype=self.inverse_mapping.dtype, device=self.inverse_mapping.device, ) vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device) size = torch.Size([ self._manager.size(self.coordinate_map_key), len(self.inverse_mapping) ]) features = spmm.apply(self.inverse_mapping, cols, vals, size, self._F) # int_inverse_mapping = self.inverse_mapping.int() if self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE: nums = spmm.apply( self.inverse_mapping, cols, vals, size, vals.reshape(N, 1), ) features /= nums return SparseTensor( features, coordinate_map_key=self.coordinate_map_key, coordinate_manager=self.coordinate_manager, )
def sparse(self, tensor_stride: Union[int, Sequence, np.array] = 1, quantization_mode=None): r"""Converts the current sparse tensor field to a sparse tensor.""" if quantization_mode is None: quantization_mode = self.quantization_mode tensor_stride = convert_to_int_list(tensor_stride, self.D) sparse_tensor_key, ( unique_index, inverse_mapping, ) = self._manager.field_to_sparse_insert_and_map( self.coordinate_field_map_key, tensor_stride, ) self._inverse_mapping[sparse_tensor_key] = inverse_mapping if self.quantization_mode in [ SparseTensorQuantizationMode.UNWEIGHTED_SUM, SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE, ]: spmm = MinkowskiSPMMFunction() N = len(self._F) cols = torch.arange( N, dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device) size = torch.Size([len(unique_index), len(inverse_mapping)]) features = spmm.apply(inverse_mapping, cols, vals, size, self._F) if (self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE): nums = spmm.apply( inverse_mapping, cols, vals, size, vals.reshape(N, 1), ) features /= nums elif self.quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE: features = self._F[unique_index] else: # No quantization raise ValueError("Invalid quantization mode") sparse_tensor = SparseTensor( features, coordinate_map_key=sparse_tensor_key, coordinate_manager=self._manager, ) return sparse_tensor
def initialize_coordinates(self, coordinates, features, coordinate_map_key): if not isinstance(coordinates, (torch.IntTensor, torch.cuda.IntTensor)): warnings.warn( "coordinates implicitly converted to torch.IntTensor. " + "To remove this warning, use `.int()` to convert the " + "coords into an torch.IntTensor") coordinates = torch.floor(coordinates).int() ( coordinate_map_key, (unique_index, self.inverse_mapping), ) = self._manager.insert_and_map(coordinates, *coordinate_map_key.get_key()) self.unique_index = unique_index.long() coordinates = coordinates[self.unique_index] if self.quantization_mode in [ SparseTensorQuantizationMode.UNWEIGHTED_SUM, SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE, ]: spmm = MinkowskiSPMMFunction() N = len(features) cols = torch.arange( N, dtype=self.inverse_mapping.dtype, device=self.inverse_mapping.device, ) vals = torch.ones(N, dtype=features.dtype, device=features.device) size = torch.Size( [len(self.unique_index), len(self.inverse_mapping)]) features = spmm.apply(self.inverse_mapping, cols, vals, size, features) if (self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE): with torch.no_grad(): nums = spmm.apply( self.inverse_mapping, cols, vals, size, vals.reshape(N, 1), ) features /= nums elif self.quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE: features = features[self.unique_index] else: # No quantization pass return coordinates, features, coordinate_map_key
def splat(self): r""" For slice, use Y.slice(X) where X is the tensor field and Y is the resulting sparse tensor. """ splat_coordinates = create_splat_coordinates(self.C) (coordinate_map_key, _) = self._manager.insert_and_map(splat_coordinates) N_rows = self._manager.size(coordinate_map_key) tensor_map, field_map, weights = self._manager.interpolation_map_weight( coordinate_map_key, self._C ) # features N = len(self._F) assert weights.dtype == self._F.dtype size = torch.Size([N_rows, N]) # Save the results for slice self._splat[coordinate_map_key] = (tensor_map, field_map, weights, size) features = MinkowskiSPMMFunction().apply( tensor_map, field_map, weights, size, self._F ) return SparseTensor( features, coordinate_map_key=coordinate_map_key, coordinate_manager=self._manager, )
def interpolate(self, X): from MinkowskiTensorField import TensorField assert isinstance(X, TensorField) if self.coordinate_map_key in X._splat: tensor_map, field_map, weights, size = X._splat[ self.coordinate_map_key] size = torch.Size([size[1], size[0]]) # transpose features = MinkowskiSPMMFunction().apply(field_map, tensor_map, weights, size, self._F) else: features = self.features_at_coordinates(X.C) return TensorField( features=features, coordinate_field_map_key=X.coordinate_field_map_key, coordinate_manager=X.coordinate_manager, )
def sparse( self, tensor_stride: Union[int, Sequence, np.array] = 1, coordinate_map_key: CoordinateMapKey = None, quantization_mode=None, ): r"""Converts the current sparse tensor field to a sparse tensor.""" if quantization_mode is None: quantization_mode = self.quantization_mode if coordinate_map_key is None: tensor_stride = convert_to_int_list(tensor_stride, self.D) coordinate_map_key, ( unique_index, inverse_mapping, ) = self._manager.field_to_sparse_insert_and_map( self.coordinate_field_map_key, tensor_stride, ) N_rows = len(unique_index) else: # sparse index, field index inverse_mapping, unique_index = self._manager.field_to_sparse_map( self.coordinate_field_map_key, coordinate_map_key, ) N_rows = self._manager.size(coordinate_map_key) self._inverse_mapping[coordinate_map_key] = inverse_mapping if quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_SUM: spmm = MinkowskiSPMMFunction() N = len(self._F) cols = torch.arange( N, dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device) size = torch.Size([N_rows, len(inverse_mapping)]) features = spmm.apply(inverse_mapping, cols, vals, size, self._F) elif quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE: spmm_avg = MinkowskiSPMMAverageFunction() N = len(self._F) cols = torch.arange( N, dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) size = torch.Size([N_rows, len(inverse_mapping)]) features = spmm_avg.apply(inverse_mapping, cols, size, self._F) elif quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE: features = self._F[unique_index] elif quantization_mode == SparseTensorQuantizationMode.MAX_POOL: N = len(self._F) in_map = torch.arange( N, dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) features = MinkowskiDirectMaxPoolingFunction().apply( in_map, inverse_mapping, self._F, N_rows) else: # No quantization raise ValueError("Invalid quantization mode") sparse_tensor = SparseTensor( features, coordinate_map_key=coordinate_map_key, coordinate_manager=self._manager, ) return sparse_tensor
def initialize_coordinates(self, coordinates, features, coordinate_map_key): if not isinstance(coordinates, (torch.IntTensor, torch.cuda.IntTensor)): warnings.warn( "coordinates implicitly converted to torch.IntTensor. " + "To remove this warning, use `.int()` to convert the " + "coords into an torch.IntTensor") coordinates = torch.floor(coordinates).int() ( coordinate_map_key, (unique_index, inverse_mapping), ) = self._manager.insert_and_map(coordinates, *coordinate_map_key.get_key()) self.unique_index = unique_index.long() coordinates = coordinates[self.unique_index] if len(inverse_mapping) == 0: # When the input has the same shape as the output self.inverse_mapping = torch.arange( len(features), dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) return coordinates, features, coordinate_map_key self.inverse_mapping = inverse_mapping if self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_SUM: spmm = MinkowskiSPMMFunction() N = len(features) cols = torch.arange( N, dtype=self.inverse_mapping.dtype, device=self.inverse_mapping.device, ) vals = torch.ones(N, dtype=features.dtype, device=features.device) size = torch.Size( [len(self.unique_index), len(self.inverse_mapping)]) features = spmm.apply(self.inverse_mapping, cols, vals, size, features) elif self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE: spmm_avg = MinkowskiSPMMAverageFunction() N = len(features) cols = torch.arange( N, dtype=self.inverse_mapping.dtype, device=self.inverse_mapping.device, ) size = torch.Size( [len(self.unique_index), len(self.inverse_mapping)]) features = spmm_avg.apply(self.inverse_mapping, cols, size, features) elif self.quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE: features = features[self.unique_index] else: # No quantization pass return coordinates, features, coordinate_map_key
def sparse( self, tensor_stride: Union[int, Sequence, np.array] = 1, coordinate_map_key: CoordinateMapKey = None, quantization_mode: SparseTensorQuantizationMode = None, ): r"""Converts the current sparse tensor field to a sparse tensor.""" if quantization_mode is None: quantization_mode = self.quantization_mode assert ( quantization_mode != SparseTensorQuantizationMode.SPLAT_LINEAR_INTERPOLATION ), "Please use .splat() for splat quantization." if coordinate_map_key is None: tensor_stride = convert_to_int_list(tensor_stride, self.D) coordinate_map_key, ( unique_index, inverse_mapping, ) = self._manager.field_to_sparse_insert_and_map( self.coordinate_field_map_key, tensor_stride, ) N_rows = len(unique_index) else: # sparse index, field index inverse_mapping, unique_index = self._manager.field_to_sparse_map( self.coordinate_field_map_key, coordinate_map_key, ) N_rows = self._manager.size(coordinate_map_key) assert N_rows > 0, f"Invalid out coordinate map key. Found {N_row} elements." if len(inverse_mapping) == 0: # When the input has the same shape as the output self._inverse_mapping[coordinate_map_key] = torch.arange( len(self._F), dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) return SparseTensor( self._F, coordinate_map_key=coordinate_map_key, coordinate_manager=self._manager, ) # Create features if quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_SUM: N = len(self._F) cols = torch.arange( N, dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device) size = torch.Size([N_rows, len(inverse_mapping)]) features = MinkowskiSPMMFunction().apply( inverse_mapping, cols, vals, size, self._F ) elif quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE: N = len(self._F) cols = torch.arange( N, dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) size = torch.Size([N_rows, len(inverse_mapping)]) features = MinkowskiSPMMAverageFunction().apply( inverse_mapping, cols, size, self._F ) elif quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE: features = self._F[unique_index] elif quantization_mode == SparseTensorQuantizationMode.MAX_POOL: N = len(self._F) in_map = torch.arange( N, dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) features = MinkowskiDirectMaxPoolingFunction().apply( in_map, inverse_mapping, self._F, N_rows ) else: # No quantization raise ValueError("Invalid quantization mode") self._inverse_mapping[coordinate_map_key] = inverse_mapping return SparseTensor( features, coordinate_map_key=coordinate_map_key, coordinate_manager=self._manager, )