def forward(self, input: SparseTensor, input_glob: SparseTensor): assert isinstance(input, SparseTensor) assert isinstance(input_glob, SparseTensor) broadcast_feat = input.F.new(len(input), input_glob.size()[1]) batch_indices, batch_rows = input.coordinate_manager.origin_map( input.coordinate_map_key) for b, rows in zip(batch_indices, batch_rows): broadcast_feat[rows] = input_glob.F[b] return SparseTensor( broadcast_feat, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def splat(self): r""" For slice, use Y.slice(X) where X is the tensor field and Y is the resulting sparse tensor. """ splat_coordinates = create_splat_coordinates(self.C) (coordinate_map_key, _) = self._manager.insert_and_map(splat_coordinates) N_rows = self._manager.size(coordinate_map_key) tensor_map, field_map, weights = self._manager.interpolation_map_weight( coordinate_map_key, self._C ) # features N = len(self._F) assert weights.dtype == self._F.dtype size = torch.Size([N_rows, N]) # Save the results for slice self._splat[coordinate_map_key] = (tensor_map, field_map, weights, size) features = MinkowskiSPMMFunction().apply( tensor_map, field_map, weights, size, self._F ) return SparseTensor( features, coordinate_map_key=coordinate_map_key, coordinate_manager=self._manager, )
def sigmoid(input, *args, **kwargs): output = F.sigmoid(input.F, *args, **kwargs) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def log_softmax(input, *args, **kwargs): output = F.log_softmax(input.F, *args, **kwargs) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def leaky_relu(input, *args, **kwargs): output = F.leaky_relu(input.F, *args, **kwargs) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def nll_loss(input, target, *args, **kwargs): output = F.nll_loss(input.F, target, *args, **kwargs) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward( self, input, coordinates: Union[torch.IntTensor, CoordinateMapKey, SparseTensor] = None, ): # Get a new coordinate map key or extract one from the coordinates if isinstance(input, ME.TensorField): in_coordinate_map_key = input.coordinate_field_map_key out_coordinate_map_key = CoordinateMapKey( input.coordinate_field_map_key.get_coordinate_size()) else: in_coordinate_map_key = input.coordinate_map_key out_coordinate_map_key = _get_coordinate_map_key( input, coordinates) output = self.pooling.apply( input.F, self.pooling_mode, in_coordinate_map_key, out_coordinate_map_key, input._manager, ) return SparseTensor( output, coordinate_map_key=out_coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def _tuple_operator(*sparse_tensors, operator): if len(sparse_tensors) == 1: assert isinstance(sparse_tensors[0], (tuple, list)) sparse_tensors = sparse_tensors[0] assert ( len(sparse_tensors) > 1 ), f"Invalid number of inputs. The input must be at least two len(sparse_tensors) > 1" if isinstance(sparse_tensors[0], SparseTensor): device = sparse_tensors[0].device coordinate_manager = sparse_tensors[0].coordinate_manager coordinate_map_key = sparse_tensors[0].coordinate_map_key for s in sparse_tensors: assert isinstance( s, SparseTensor ), "Inputs must be either SparseTensors or TensorFields." assert (device == s.device ), f"Device must be the same. {device} != {s.device}" assert (coordinate_manager == s.coordinate_manager ), COORDINATE_MANAGER_DIFFERENT_ERROR assert coordinate_map_key == s.coordinate_map_key, ( COORDINATE_KEY_DIFFERENT_ERROR + str(coordinate_map_key) + " != " + str(s.coordinate_map_key)) tens = [] for s in sparse_tensors: tens.append(s.F) return SparseTensor( operator(tens), coordinate_map_key=coordinate_map_key, coordinate_manager=coordinate_manager, ) elif isinstance(sparse_tensors[0], TensorField): device = sparse_tensors[0].device coordinate_manager = sparse_tensors[0].coordinate_manager coordinate_field_map_key = sparse_tensors[0].coordinate_field_map_key for s in sparse_tensors: assert isinstance( s, TensorField ), "Inputs must be either SparseTensors or TensorFields." assert (device == s.device ), f"Device must be the same. {device} != {s.device}" assert (coordinate_manager == s.coordinate_manager ), COORDINATE_MANAGER_DIFFERENT_ERROR assert coordinate_field_map_key == s.coordinate_field_map_key, ( COORDINATE_KEY_DIFFERENT_ERROR + str(coordinate_field_map_key) + " != " + str(s.coordinate_field_map_key)) tens = [] for s in sparse_tensors: tens.append(s.F) return TensorField( operator(tens), coordinate_field_map_key=coordinate_field_map_key, coordinate_manager=coordinate_manager, ) else: raise ValueError( "Invalid data type. The input must be either a list of sparse tensors or a list of tensor fields." )
def forward( self, input: SparseTensor, coordinates: Union[torch.IntTensor, CoordinateMapKey, SparseTensor] = None, ): # Get a new coordinate map key or extract one from the coordinates if input._manager.number_of_unique_batch_indices() == 1: out_coordinate_map_key = input._manager.origin() output, _ = input.F.max(0, True) else: out_coordinate_map_key = _get_coordinate_map_key( input, coordinates) output = self.pooling.apply( input.F, self.pooling_mode, input.coordinate_map_key, out_coordinate_map_key, input._manager, ) return SparseTensor( output, coordinate_map_key=out_coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward( self, input: SparseTensor, coordinates: Union[torch.IntTensor, CoordinateMapKey, SparseTensor] = None, ): r""" :attr:`input` (`MinkowskiEngine.SparseTensor`): Input sparse tensor to apply a convolution on. :attr:`coordinates` ((`torch.IntTensor`, `MinkowskiEngine.CoordsKey`, `MinkowskiEngine.SparseTensor`), optional): If provided, generate results on the provided coordinates. None by default. """ assert isinstance(input, SparseTensor) assert input.D == self.dimension # Get a new coordinate map key or extract one from the coordinates out_coordinate_map_key = _get_coordinate_map_key(input, coordinates) outfeat = self.pooling.apply( input.F, self.pooling_mode, self.kernel_generator, input.coordinate_map_key, out_coordinate_map_key, input._manager, ) return SparseTensor( outfeat, coordinate_map_key=out_coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def cross_entropy(input, target, *args, **kwargs): output = F.cross_entropy(input.F, target, *args, **kwargs) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def cat(*sparse_tensors): r"""Concatenate sparse tensors Concatenate sparse tensor features. All sparse tensors must have the same `coords_key` (the same coordinates). To concatenate sparse tensors with different sparsity patterns, use SparseTensor binary operations, or :attr:`MinkowskiEngine.MinkowskiUnion`. Example:: >>> import MinkowskiEngine as ME >>> sin = ME.SparseTensor(feats, coords) >>> sin2 = ME.SparseTensor(feats2, coordinate_map_key=sin.coordinate_map_key, coords_man=sin.coordinate_manager) >>> sout = UNet(sin) # Returns an output sparse tensor on the same coordinates >>> sout2 = ME.cat(sin, sin2, sout) # Can concatenate multiple sparse tensors """ for s in sparse_tensors: assert isinstance(s, SparseTensor), "Inputs must be sparse tensors." coordinate_manager = sparse_tensors[0].coordinate_manager coordinate_map_key = sparse_tensors[0].coordinate_map_key for s in sparse_tensors: assert (coordinate_manager == s.coordinate_manager ), COORDINATE_MANAGER_DIFFERENT_ERROR assert coordinate_map_key == s.coordinate_map_key, ( COORDINATE_KEY_DIFFERENT_ERROR + str(coordinate_map_key) + " != " + str(s.coordinate_map_key)) tens = [] for s in sparse_tensors: tens.append(s.F) return SparseTensor( torch.cat(tens, dim=1), coordinate_map_key=coordinate_map_key, coordinate_manager=coordinate_manager, )
def to_sparse(dense_tensor: torch.Tensor, coordinates: torch.Tensor = None): r"""Converts a (differentiable) dense tensor to a sparse tensor. Assume the input to have BxCxD1xD2x....xDN format. If the shape of the tensor do not change, use `dense_coordinates` to cache the coordinates. Please refer to tests/python/dense.py for usage Example:: >>> dense_tensor = torch.rand(3, 4, 5, 6, 7, 8) # BxCxD1xD2xD3xD4 >>> dense_tensor.requires_grad = True >>> stensor = to_sparse(dense_tensor) """ spatial_dim = dense_tensor.ndim - 2 assert ( spatial_dim > 0 ), "Invalid shape. Shape must be batch x channel x spatial dimensions." if coordinates is None: coordinates = dense_coordinates(dense_tensor.shape) feat_tensor = dense_tensor.permute(0, *(2 + i for i in range(spatial_dim)), 1) return SparseTensor( feat_tensor.reshape(-1, dense_tensor.size(1)), coordinates, device=dense_tensor.dtype, )
def forward(self, input: SparseTensor, mask: torch.Tensor): r""" Args: :attr:`input` (:attr:`MinkowskiEnigne.SparseTensor`): a sparse tensor to remove coordinates from. :attr:`mask` (:attr:`torch.BoolTensor`): mask vector that specifies which one to keep. Coordinates with False will be removed. Returns: A :attr:`MinkowskiEngine.SparseTensor` with C = coordinates corresponding to `mask == True` F = copy of the features from `mask == True`. Example:: >>> # Define inputs >>> input = SparseTensor(feats, coords=coords) >>> # Any boolean tensor can be used as the filter >>> mask = torch.rand(feats.size(0)) < 0.5 >>> pruning = MinkowskiPruning() >>> output = pruning(input, mask) """ assert isinstance(input, SparseTensor) out_coords_key = CoordinateMapKey( input.coordinate_map_key.get_coordinate_size()) output = self.pruning.apply(input.F, mask, input.coordinate_map_key, out_coords_key, input._manager) return SparseTensor(output, coordinate_map_key=out_coords_key, coordinate_manager=input._manager)
def normalize(input, *args, **kwargs): output = F.normalize(input.F, *args, **kwargs) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def sparse(self): r"""Converts the current sparse tensor field to a sparse tensor.""" spmm = MinkowskiSPMMFunction() N = len(self._F) assert N == len(self.inverse_mapping), "invalid inverse mapping" cols = torch.arange( N, dtype=self.inverse_mapping.dtype, device=self.inverse_mapping.device, ) vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device) size = torch.Size([ self._manager.size(self.coordinate_map_key), len(self.inverse_mapping) ]) features = spmm.apply(self.inverse_mapping, cols, vals, size, self._F) # int_inverse_mapping = self.inverse_mapping.int() if self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE: nums = spmm.apply( self.inverse_mapping, cols, vals, size, vals.reshape(N, 1), ) features /= nums return SparseTensor( features, coordinate_map_key=self.coordinate_map_key, coordinate_manager=self.coordinate_manager, )
def forward(self, input): output = self.bn(input.F) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def binary_cross_entropy_with_logits(input, target, *args, **kwargs): output = F.binary_cross_entropy_with_logits(input.F, target, *args, **kwargs) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def sparse(self, tensor_stride: Union[int, Sequence, np.array] = 1, quantization_mode=None): r"""Converts the current sparse tensor field to a sparse tensor.""" if quantization_mode is None: quantization_mode = self.quantization_mode tensor_stride = convert_to_int_list(tensor_stride, self.D) sparse_tensor_key, ( unique_index, inverse_mapping, ) = self._manager.field_to_sparse_insert_and_map( self.coordinate_field_map_key, tensor_stride, ) self._inverse_mapping[sparse_tensor_key] = inverse_mapping if self.quantization_mode in [ SparseTensorQuantizationMode.UNWEIGHTED_SUM, SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE, ]: spmm = MinkowskiSPMMFunction() N = len(self._F) cols = torch.arange( N, dtype=inverse_mapping.dtype, device=inverse_mapping.device, ) vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device) size = torch.Size([len(unique_index), len(inverse_mapping)]) features = spmm.apply(inverse_mapping, cols, vals, size, self._F) if (self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE): nums = spmm.apply( inverse_mapping, cols, vals, size, vals.reshape(N, 1), ) features /= nums elif self.quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE: features = self._F[unique_index] else: # No quantization raise ValueError("Invalid quantization mode") sparse_tensor = SparseTensor( features, coordinate_map_key=sparse_tensor_key, coordinate_manager=self._manager, ) return sparse_tensor
def forward(self, *inputs): r""" Args: A variable number of :attr:`MinkowskiEngine.SparseTensor`'s. Returns: A :attr:`MinkowskiEngine.SparseTensor` with coordinates = union of all input coordinates, and features = sum of all features corresponding to the coordinate. Example:: >>> # Define inputs >>> input1 = SparseTensor( >>> torch.rand(N, in_channels, dtype=torch.double), coords=coords) >>> # All inputs must share the same coordinate manager >>> input2 = SparseTensor( >>> torch.rand(N, in_channels, dtype=torch.double), >>> coords=coords + 1, >>> coords_manager=input1.coordinate_manager, # Must use same coords manager >>> force_creation=True # The tensor stride [1, 1] already exists. >>> ) >>> union = MinkowskiUnion() >>> output = union(input1, iput2) """ assert isinstance(inputs, (list, tuple)), "The input must be a list or tuple" for s in inputs: assert isinstance(s, SparseTensor), "Inputs must be sparse tensors." assert len( inputs) > 1, "input must be a set with at least 2 SparseTensors" # Assert the same coordinate manager ref_coordinate_manager = inputs[0].coordinate_manager for s in inputs: assert ( ref_coordinate_manager == s.coordinate_manager ), "Invalid coordinate manager. All inputs must have the same coordinate manager." in_coordinate_map_key = inputs[0].coordinate_map_key coordinate_manager = inputs[0].coordinate_manager out_coordinate_map_key = CoordinateMapKey( in_coordinate_map_key.get_coordinate_size()) output = self.union.apply( [input.coordinate_map_key for input in inputs], out_coordinate_map_key, coordinate_manager, *[input.F for input in inputs], ) return SparseTensor( output, coordinate_map_key=out_coordinate_map_key, coordinate_manager=coordinate_manager, )
def forward(self, input: SparseTensor): assert isinstance(input, SparseTensor) output = self.inst_norm.apply(input.F, input.coordinate_map_key, None, input.coordinate_manager) output = output * self.weight + self.bias return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward( self, input: SparseTensor, coords: Union[torch.IntTensor, CoordinateMapKey, SparseTensor] = None, ): r""" :attr:`input` (`MinkowskiEngine.SparseTensor`): Input sparse tensor to apply a convolution on. :attr:`coords` ((`torch.IntTensor`, `MinkowskiEngine.CoordinateMapKey`, `MinkowskiEngine.SparseTensor`), optional): If provided, generate results on the provided coordinates. None by default. """ assert isinstance(input, SparseTensor) assert input.D == self.dimension assert ( self.in_channels == input.shape[1] ), f"Channel size mismatch {self.in_channels} != {input.shape[1]}" # Create a region_offset region_type_, region_offset_, _ = self.kernel_generator.get_kernel( input.tensor_stride, False) cm = input.coordinate_manager in_key = input.coordinate_map_key out_key = cm.stride(in_key, self.kernel_generator.kernel_stride) N_out = cm.size(out_key) out_F = input._F.new(N_out, self.in_channels).zero_() kernel_map = cm.get_kernel_map( in_key, out_key, self.kernel_generator.kernel_stride, self.kernel_generator.kernel_size, self.kernel_generator.kernel_dilation, region_type=region_type_, region_offset=region_offset_, ) for k, in_out in kernel_map.items(): in_out = in_out.long().to(input.device) out_F[in_out[1]] += input.F[in_out[0]] * self.kernel[k] if self.bias is not None: out_F += self.bias return SparseTensor(out_F, coordinate_map_key=out_key, coordinate_manager=cm)
def _wrap_tensor(input, F): if isinstance(input, TensorField): return TensorField( F, coordinate_field_map_key=input.coordinate_field_map_key, coordinate_manager=input.coordinate_manager, quantization_mode=input.quantization_mode, ) else: return SparseTensor( F, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward(self, x): neg_mean_in = self.mean_in( SparseTensor(-x.F, coords_key=x.coords_key, coords_manager=x.coords_man) ) centered_in = self.glob_sum(x, neg_mean_in) temp = SparseTensor( centered_in.F ** 2, coordinate_map_key=centered_in.coordinate_map_key, coordinate_manager=centered_in.coordinate_manager, ) var_in = self.glob_mean(temp) instd_in = SparseTensor( 1 / (var_in.F + self.eps).sqrt(), coordinate_map_key=var_in.coordinate_map_key, coordinate_manager=var_in.coordinate_manager, ) x = self.glob_times(self.glob_sum2(x, neg_mean_in), instd_in) return SparseTensor( x.F * self.weight + self.bias, coordinate_map_key=x.coordinate_map_key, coordinate_manager=x.coordinate_manager, )
def forward(self, input): output = self.bn(input.F) if isinstance(input, TensorField): return TensorField( output, coordinate_field_map_key=input.coordinate_field_map_key, coordinate_manager=input.coordinate_manager, quantization_mode=input.quantization_mode, ) else: return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def sparse(self, quantization_mode=None): r"""Converts the current sparse tensor field to a sparse tensor.""" if quantization_mode is None: quantization_mode = self.quantization_mode sparse_tensor = SparseTensor( self._F, coordinates=self.coordinates, quantization_mode=quantization_mode, coordinate_manager=self.coordinate_manager, ) # Save the inverse mapping self._inverse_mapping = sparse_tensor.inverse_mapping return sparse_tensor
def forward(self, input: SparseTensor, input_glob: SparseTensor): assert isinstance(input, SparseTensor) output = self.broadcast.apply( input.F, input_glob.F, self.operation_type, input.coordinate_map_key, input_glob.coordinate_map_key, input.coordinate_manager, ) return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward(self, input: Union[SparseTensor, TensorField]): out_F = torch.sin(input.F.mm(self.kernel) + self.bias) * self.coef if isinstance(input, TensorField): return TensorField( out_F, coordinate_field_map_key=input.coordinate_field_map_key, coordinate_manager=input.coordinate_manager, quantization_mode=input.quantization_mode, ) else: return SparseTensor( out_F, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward(self, input: Union[SparseTensor, TensorField]): output = self.linear(input.F) if isinstance(input, TensorField): return TensorField( output, coordinate_map_key=input.coordinate_map_key, coordinate_field_map_key=input.coordinate_field_map_key, coordinate_manager=input.coordinate_manager, inverse_mapping=input.inverse_mapping, quantization_mode=input.quantization_mode, ) else: return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward( self, input: SparseTensor, coordinates: Union[torch.Tensor, CoordinateMapKey, SparseTensor] = None, ): r""" :attr:`input` (`MinkowskiEngine.SparseTensor`): Input sparse tensor to apply a convolution on. :attr:`coordinates` ((`torch.IntTensor`, `MinkowskiEngine.CoordinateMapKey`, `MinkowskiEngine.SparseTensor`), optional): If provided, generate results on the provided coordinates. None by default. """ assert isinstance(input, SparseTensor) assert input.D == self.dimension if self.use_mm: # If the kernel_size == 1, the convolution is simply a matrix # multiplication out_coordinate_map_key = input.coordinate_map_key outfeat = input.F.mm(self.kernel) else: # Get a new coordinate_map_key or extract one from the coords out_coordinate_map_key = _get_coordinate_map_key( input, coordinates, self.kernel_generator.expand_coordinates) outfeat = self.conv.apply( input.F, self.kernel, self.kernel_generator, self.convolution_mode, input.coordinate_map_key, out_coordinate_map_key, input._manager, ) if self.bias is not None: outfeat += self.bias return SparseTensor( outfeat, coordinate_map_key=out_coordinate_map_key, coordinate_manager=input._manager, )