class TestTensorFieldSplat(unittest.TestCase): def setUp(self): coords, colors, pcd = load_file("1.ply") voxel_size = 0.02 colors = torch.from_numpy(colors).float() bcoords = batched_coordinates([coords / voxel_size], dtype=torch.float32) self.tensor_field = TensorField(coordinates=bcoords, features=colors) def test_splat(self): self.tensor_field.splat() def test_small(self): coords = torch.FloatTensor([[0, 0.1], [0, 1.1]]) feats = torch.FloatTensor([[1], [2]]) tfield = TensorField(coordinates=coords, features=feats) tensor = tfield.splat() print(tfield) print(tensor) print(tensor.interpolate(tfield)) def test_small2(self): coords = torch.FloatTensor([[0, 0.1, 0.1], [0, 1.1, 1.1]]) feats = torch.FloatTensor([[1], [2]]) tfield = TensorField(coordinates=coords, features=feats) tensor = tfield.splat() print(tfield) print(tensor) print(tensor.interpolate(tfield))
def setUp(self): coords, colors, pcd = load_file("1.ply") voxel_size = 0.02 colors = torch.from_numpy(colors).float() bcoords = batched_coordinates([coords / voxel_size], dtype=torch.float32) self.tensor_field = TensorField(coordinates=bcoords, features=colors)
def test_small2(self): coords = torch.FloatTensor([[0, 0.1, 0.1], [0, 1.1, 1.1]]) feats = torch.FloatTensor([[1], [2]]) tfield = TensorField(coordinates=coords, features=feats) tensor = tfield.splat() print(tfield) print(tensor) print(tensor.interpolate(tfield))
def slice(self, X, slicing_mode=0): r""" Args: :attr:`X` (:attr:`MinkowskiEngine.SparseTensor`): a sparse tensor that discretized the original input. :attr:`slicing_mode`: For future updates. Returns: :attr:`tensor_field` (:attr:`MinkowskiEngine.TensorField`): the resulting tensor field contains features on the continuous coordinates that generated the input X. Example:: >>> # coords, feats from a data loader >>> print(len(coords)) # 227742 >>> tfield = ME.TensorField(coords=coords, feats=feats, quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE) >>> print(len(tfield)) # 227742 >>> sinput = tfield.sparse() # 161890 quantization results in fewer voxels >>> soutput = MinkUNet(sinput) >>> print(len(soutput)) # 161890 Output with the same resolution >>> ofield = soutput.slice(tfield) >>> assert isinstance(ofield, ME.TensorField) >>> len(ofield) == len(coords) # recovers the original ordering and length >>> assert isinstance(ofield.F, torch.Tensor) # .F returns the features """ # Currently only supports unweighted slice. assert X.quantization_mode in [ SparseTensorQuantizationMode.RANDOM_SUBSAMPLE, SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE, ], "slice only available for sparse tensors with quantization RANDOM_SUBSAMPLE or UNWEIGHTED_AVERAGE" assert ( X.coordinate_map_key == self.coordinate_map_key ), "Slice can only be applied on the same coordinates (coordinate_map_key)" from MinkowskiTensorField import TensorField if isinstance(X, TensorField): return TensorField( self.F[X.inverse_mapping], coordinate_map_key=X.coordinate_map_key, coordinate_field_map_key=X.coordinate_field_map_key, coordinate_manager=X.coordinate_manager, inverse_mapping=X.inverse_mapping, quantization_mode=X.quantization_mode, ) else: return TensorField( self.F[X.inverse_mapping], coordinates=self.C[X.inverse_mapping], coordinate_map_key=X.coordinate_map_key, coordinate_manager=X.coordinate_manager, inverse_mapping=X.inverse_mapping, quantization_mode=X.quantization_mode, )
def test_pcd(self): coords, colors, pcd = load_file("1.ply") voxel_size = 0.02 colors = torch.from_numpy(colors) bcoords = batched_coordinates([coords / voxel_size]) tfield = TensorField(colors, bcoords) self.assertTrue(len(tfield) == len(colors)) stensor = tfield.sparse() print(stensor)
def cat_slice(self, X): r""" Args: :attr:`X` (:attr:`MinkowskiEngine.SparseTensor`): a sparse tensor that discretized the original input. Returns: :attr:`tensor_field` (:attr:`MinkowskiEngine.TensorField`): the resulting tensor field contains the concatenation of features on the original continuous coordinates that generated the input X and the self. Example:: >>> # coords, feats from a data loader >>> print(len(coords)) # 227742 >>> sinput = ME.SparseTensor(coordinates=coords, features=feats, quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE) >>> print(len(sinput)) # 161890 quantization results in fewer voxels >>> soutput = network(sinput) >>> print(len(soutput)) # 161890 Output with the same resolution >>> ofield = soutput.cat_slice(sinput) >>> assert soutput.F.size(1) + sinput.F.size(1) == ofield.F.size(1) # concatenation of features """ # Currently only supports unweighted slice. assert X.quantization_mode in [ SparseTensorQuantizationMode.RANDOM_SUBSAMPLE, SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE, ], "slice only available for sparse tensors with quantization RANDOM_SUBSAMPLE or UNWEIGHTED_AVERAGE" from MinkowskiTensorField import TensorField features = torch.cat( (self.F[X.inverse_mapping(self.coordinate_map_key)], X.F), dim=1) if isinstance(X, TensorField): return TensorField( features, coordinate_field_map_key=X.coordinate_field_map_key, coordinate_manager=X.coordinate_manager, quantization_mode=X.quantization_mode, ) elif isinstance(X, SparseTensor): assert ( X.coordinate_map_key == self.coordinate_map_key ), "Slice can only be applied on the same coordinates (coordinate_map_key)" return TensorField( features, coordinates=self.C[X.inverse_mapping(self.coordinate_map_key)], coordinate_manager=self.coordinate_manager, quantization_mode=self.quantization_mode, ) else: raise ValueError( "Invalid input. The input must be an instance of TensorField or SparseTensor." )
def test(self): coords = torch.IntTensor([[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]) feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T sfield = TensorField(feats, coords, device=feats.device) # Convert to a sparse tensor stensor = sfield.sparse() print(stensor) self.assertTrue( {0.5, 2.5, 5.5, 7} == {a for a in stensor.F.squeeze().numpy()})
def test(self): coords = torch.IntTensor([[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]) feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T sfield = TensorField(feats, coords, device=feats.device) # Convert to a sparse tensor stensor = sfield.sparse( quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE) print(stensor) self.assertTrue({0.5, 2.5, 5.5, 7} == {a for a in stensor.F.squeeze().detach().numpy()})
def slice(self): coords, colors, pcd = load_file("1.ply") voxel_size = 0.02 colors = torch.from_numpy(colors).float() bcoords = batched_coordinates([coords / voxel_size], dtype=torch.float32) tfield = TensorField(colors, bcoords) network = nn.Sequential( MinkowskiLinear(3, 16), MinkowskiBatchNorm(16), MinkowskiReLU(), MinkowskiLinear(16, 32), MinkowskiBatchNorm(32), MinkowskiReLU(), MinkowskiToSparseTensor(), MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3), MinkowskiConvolutionTranspose(64, 32, kernel_size=3, stride=2, dimension=3), ) otensor = network(tfield) ofield = otensor.slice(tfield) self.assertEqual(len(tfield), len(ofield)) self.assertEqual(ofield.F.size(1), otensor.F.size(1)) ofield = otensor.cat_slice(tfield) self.assertEqual(len(tfield), len(ofield)) self.assertEqual(ofield.F.size(1), (otensor.F.size(1) + tfield.F.size(1)))
def _tuple_operator(*sparse_tensors, operator): if len(sparse_tensors) == 1: assert isinstance(sparse_tensors[0], (tuple, list)) sparse_tensors = sparse_tensors[0] assert ( len(sparse_tensors) > 1 ), f"Invalid number of inputs. The input must be at least two len(sparse_tensors) > 1" if isinstance(sparse_tensors[0], SparseTensor): device = sparse_tensors[0].device coordinate_manager = sparse_tensors[0].coordinate_manager coordinate_map_key = sparse_tensors[0].coordinate_map_key for s in sparse_tensors: assert isinstance( s, SparseTensor ), "Inputs must be either SparseTensors or TensorFields." assert (device == s.device ), f"Device must be the same. {device} != {s.device}" assert (coordinate_manager == s.coordinate_manager ), COORDINATE_MANAGER_DIFFERENT_ERROR assert coordinate_map_key == s.coordinate_map_key, ( COORDINATE_KEY_DIFFERENT_ERROR + str(coordinate_map_key) + " != " + str(s.coordinate_map_key)) tens = [] for s in sparse_tensors: tens.append(s.F) return SparseTensor( operator(tens), coordinate_map_key=coordinate_map_key, coordinate_manager=coordinate_manager, ) elif isinstance(sparse_tensors[0], TensorField): device = sparse_tensors[0].device coordinate_manager = sparse_tensors[0].coordinate_manager coordinate_field_map_key = sparse_tensors[0].coordinate_field_map_key for s in sparse_tensors: assert isinstance( s, TensorField ), "Inputs must be either SparseTensors or TensorFields." assert (device == s.device ), f"Device must be the same. {device} != {s.device}" assert (coordinate_manager == s.coordinate_manager ), COORDINATE_MANAGER_DIFFERENT_ERROR assert coordinate_field_map_key == s.coordinate_field_map_key, ( COORDINATE_KEY_DIFFERENT_ERROR + str(coordinate_field_map_key) + " != " + str(s.coordinate_field_map_key)) tens = [] for s in sparse_tensors: tens.append(s.F) return TensorField( operator(tens), coordinate_field_map_key=coordinate_field_map_key, coordinate_manager=coordinate_manager, ) else: raise ValueError( "Invalid data type. The input must be either a list of sparse tensors or a list of tensor fields." )
def field_to_sparse(self): coords, colors, pcd = load_file("1.ply") voxel_size = 0.02 colors = torch.from_numpy(colors).float() bcoords = batched_coordinates([coords / voxel_size], dtype=torch.float32) tfield = TensorField(colors, bcoords) network = nn.Sequential( MinkowskiToSparseTensor(), MinkowskiConvolution(3, 8, kernel_size=3, stride=4, dimension=3), MinkowskiReLU(), MinkowskiConvolution(8, 16, kernel_size=3, stride=4, dimension=3), ) otensor = network(tfield) field_to_sparse = tfield.sparse( coordinate_map_key=otensor.coordinate_map_key) self.assertTrue(len(field_to_sparse.F) == len(otensor))
def test_maxpool(self): coords = torch.IntTensor([[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]) feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T sfield = TensorField(feats, coords) # Convert to a sparse tensor stensor = sfield.sparse( quantization_mode=SparseTensorQuantizationMode.MAX_POOL) print(stensor) self.assertTrue( {1, 3, 6, 7} == {a for a in stensor.F.squeeze().detach().numpy()}) # device cuda if not torch.cuda.is_available(): return sfield = TensorField(feats, coords, device="cuda") # Convert to a sparse tensor stensor = sfield.sparse( quantization_mode=SparseTensorQuantizationMode.MAX_POOL) print(stensor) self.assertTrue( {1, 3, 6, 7 } == {a for a in stensor.F.squeeze().detach().cpu().numpy()})
def _wrap_tensor(input, F): if isinstance(input, TensorField): return TensorField( F, coordinate_field_map_key=input.coordinate_field_map_key, coordinate_manager=input.coordinate_manager, quantization_mode=input.quantization_mode, ) else: return SparseTensor( F, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward(self, input): output = self.bn(input.F) if isinstance(input, TensorField): return TensorField( output, coordinate_field_map_key=input.coordinate_field_map_key, coordinate_manager=input.coordinate_manager, quantization_mode=input.quantization_mode, ) else: return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def stride_slice(self): coords, colors, pcd = load_file("1.ply") voxel_size = 0.02 colors = torch.from_numpy(colors).float() bcoords = batched_coordinates([coords / voxel_size], dtype=torch.float32) tfield = TensorField(colors, bcoords) network = nn.Sequential( MinkowskiToSparseTensor(), MinkowskiConvolution(3, 8, kernel_size=3, stride=4, dimension=3), MinkowskiReLU(), MinkowskiConvolution(8, 16, kernel_size=3, stride=4, dimension=3), ) otensor = network(tfield) ofield = otensor.slice(tfield)
def forward(self, input: Union[SparseTensor, TensorField]): output = self.linear(input.F) if isinstance(input, TensorField): return TensorField( output, coordinate_map_key=input.coordinate_map_key, coordinate_field_map_key=input.coordinate_field_map_key, coordinate_manager=input.coordinate_manager, inverse_mapping=input.inverse_mapping, quantization_mode=input.quantization_mode, ) else: return SparseTensor( output, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def forward(self, input: Union[SparseTensor, TensorField]): out_F = torch.sin(input.F.mm(self.kernel) + self.bias) * self.coef if isinstance(input, TensorField): return TensorField( out_F, coordinate_field_map_key=input.coordinate_field_map_key, coordinate_manager=input.coordinate_manager, quantization_mode=input.quantization_mode, ) else: return SparseTensor( out_F, coordinate_map_key=input.coordinate_map_key, coordinate_manager=input.coordinate_manager, )
def interpolate(self, X): from MinkowskiTensorField import TensorField assert isinstance(X, TensorField) if self.coordinate_map_key in X._splat: tensor_map, field_map, weights, size = X._splat[ self.coordinate_map_key] size = torch.Size([size[1], size[0]]) # transpose features = MinkowskiSPMMFunction().apply(field_map, tensor_map, weights, size, self._F) else: features = self.features_at_coordinates(X.C) return TensorField( features=features, coordinate_field_map_key=X.coordinate_field_map_key, coordinate_manager=X.coordinate_manager, )
def test_network_device(self): coords, colors, pcd = load_file("1.ply") voxel_size = 0.02 colors = torch.from_numpy(colors) bcoords = batched_coordinates([coords / voxel_size]) tfield = TensorField(colors, bcoords, device=0).float() network = nn.Sequential( MinkowskiLinear(3, 16), MinkowskiBatchNorm(16), MinkowskiReLU(), MinkowskiLinear(16, 32), MinkowskiBatchNorm(32), MinkowskiReLU(), MinkowskiToSparseTensor(), MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3), ).to(0) print(network(tfield))
def cat(*sparse_tensors): r"""Concatenate sparse tensors Concatenate sparse tensor features. All sparse tensors must have the same `coords_key` (the same coordinates). To concatenate sparse tensors with different sparsity patterns, use SparseTensor binary operations, or :attr:`MinkowskiEngine.MinkowskiUnion`. Example:: >>> import MinkowskiEngine as ME >>> sin = ME.SparseTensor(feats, coords) >>> sin2 = ME.SparseTensor(feats2, coordinate_map_key=sin.coordinate_map_key, coords_man=sin.coordinate_manager) >>> sout = UNet(sin) # Returns an output sparse tensor on the same coordinates >>> sout2 = ME.cat(sin, sin2, sout) # Can concatenate multiple sparse tensors """ assert ( len(sparse_tensors) > 1 ), f"Invalid number of inputs. The input must be at least two len(sparse_tensors) > 1" if isinstance(sparse_tensors[0], SparseTensor): device = sparse_tensors[0].device coordinate_manager = sparse_tensors[0].coordinate_manager coordinate_map_key = sparse_tensors[0].coordinate_map_key for s in sparse_tensors: assert isinstance( s, SparseTensor ), "Inputs must be either SparseTensors or TensorFields." assert (device == s.device ), f"Device must be the same. {device} != {s.device}" assert (coordinate_manager == s.coordinate_manager ), COORDINATE_MANAGER_DIFFERENT_ERROR assert coordinate_map_key == s.coordinate_map_key, ( COORDINATE_KEY_DIFFERENT_ERROR + str(coordinate_map_key) + " != " + str(s.coordinate_map_key)) tens = [] for s in sparse_tensors: tens.append(s.F) return SparseTensor( torch.cat(tens, dim=1), coordinate_map_key=coordinate_map_key, coordinate_manager=coordinate_manager, ) elif isinstance(sparse_tensors[0], TensorField): device = sparse_tensors[0].device coordinate_manager = sparse_tensors[0].coordinate_manager coordinate_field_map_key = sparse_tensors[0].coordinate_field_map_key for s in sparse_tensors: assert isinstance( s, TensorField ), "Inputs must be either SparseTensors or TensorFields." assert (device == s.device ), f"Device must be the same. {device} != {s.device}" assert (coordinate_manager == s.coordinate_manager ), COORDINATE_MANAGER_DIFFERENT_ERROR assert coordinate_field_map_key == s.coordinate_field_map_key, ( COORDINATE_KEY_DIFFERENT_ERROR + str(coordinate_field_map_key) + " != " + str(s.coordinate_field_map_key)) tens = [] for s in sparse_tensors: tens.append(s.F) return TensorField( torch.cat(tens, dim=1), coordinate_field_map_key=coordinate_field_map_key, coordinate_manager=coordinate_manager, ) else: raise ValueError( "Invalid data type. The input must be either a list of sparse tensors or a list of tensor fields." )