def test_union(self): coords1 = torch.IntTensor([[0, 0], [0, 1]]) coords2 = torch.IntTensor([[0, 1], [1, 1]]) feats1 = torch.DoubleTensor([[1], [2]]) feats2 = torch.DoubleTensor([[3], [4]]) union = MinkowskiUnion() input1 = SparseTensor(coordinates=ME.utils.batched_coordinates( [coords1]), features=feats1) input2 = SparseTensor( coordinates=ME.utils.batched_coordinates([coords2]), features=feats2, coordinate_manager=input1. coordinate_manager, # Must use same coords manager ) input1.requires_grad_() input2.requires_grad_() output = union(input1, input2) print(output) self.assertTrue(len(output) == 3) self.assertTrue(5 in output.F) output.F.sum().backward() # Grad of sum feature is 1. self.assertTrue(torch.prod(input1.F.grad) == 1) self.assertTrue(torch.prod(input2.F.grad) == 1)
def test_unpool_gpu(self): if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() input = SparseTensor(feats, coords) conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, dimension=D) conv = conv.double() unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D) input = conv(input) output = unpool(input) print(output) # Check backward fn = MinkowskiLocalPoolingTransposeFunction() self.assertTrue( gradcheck( fn, ( input.F, unpool.pooling_mode, unpool.kernel_generator, input.coordinate_map_key, None, input.coordinate_manager, ), )) with torch.cuda.device(0): conv = conv.to("cuda") input = SparseTensor(feats, coords, device="cuda") input = conv(input) input.requires_grad_() output = unpool(input) print(output) # Check backward self.assertTrue( gradcheck( fn, ( input.F, unpool.pooling_mode, unpool.kernel_generator, input.coordinate_map_key, None, input.coordinate_manager, ), ))
def test(self): print(f"{self.__class__.__name__}: test_dense") in_channels, out_channels, D = 2, 3, 2 coords1 = torch.IntTensor([[0, 0], [0, 1], [1, 1]]) feats1 = torch.DoubleTensor([[1, 2], [3, 4], [5, 6]]) coords2 = torch.IntTensor([[1, 1], [1, 2], [2, 1]]) feats2 = torch.DoubleTensor([[7, 8], [9, 10], [11, 12]]) coords, feats = ME.utils.sparse_collate([coords1, coords2], [feats1, feats2]) input = SparseTensor(feats, coords) input.requires_grad_() dinput, min_coord, tensor_stride = input.dense() self.assertTrue(dinput[0, 0, 0, 1] == 3) self.assertTrue(dinput[0, 1, 0, 1] == 4) self.assertTrue(dinput[0, 0, 1, 1] == 5) self.assertTrue(dinput[0, 1, 1, 1] == 6) self.assertTrue(dinput[1, 0, 1, 1] == 7) self.assertTrue(dinput[1, 1, 1, 1] == 8) self.assertTrue(dinput[1, 0, 2, 1] == 11) self.assertTrue(dinput[1, 1, 2, 1] == 12) # Initialize context conv = MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D, ) conv = conv.double() output = conv(input) print(input.C, output.C) # Convert to a dense tensor dense_output, min_coord, tensor_stride = output.dense() print(dense_output.shape) print(dense_output) print(min_coord) print(tensor_stride) dense_output, min_coord, tensor_stride = output.dense( min_coordinate=torch.IntTensor([-2, -2])) print(dense_output) print(min_coord) print(tensor_stride) print(feats.grad) loss = dense_output.sum() loss.backward() print(feats.grad)
def test_union(self): coords1 = torch.IntTensor([[0, 0], [0, 1]]) coords2 = torch.IntTensor([[0, 1], [1, 1]]) feats1 = torch.DoubleTensor([[1], [2]]) feats2 = torch.DoubleTensor([[3], [4]]) input1 = SparseTensor(coords=ME.utils.batched_coordinates([coords1]), feats=feats1) input2 = SparseTensor( feats=feats2, coords=ME.utils.batched_coordinates([coords2]), coords_manager=input1.coords_man, # Must use same coords manager force_creation=True # The tensor stride [1, 1] already exists. ) input1.requires_grad_() input2.requires_grad_() union = MinkowskiUnion() output = union(input1, input2) print(output) self.assertTrue(len(output) == 3) self.assertTrue(5 in output.F) output.F.sum().backward() # Grad of sum feature is 1. self.assertTrue(torch.prod(input1.F.grad) == 1) self.assertTrue(torch.prod(input2.F.grad) == 1) device = torch.device('cuda') with torch.cuda.device(0): input1, input2 = input1.to(device), input2.to(device) output = union(input1, input2) output.F.sum().backward() print(output) self.assertTrue(len(output) == 3) self.assertTrue(5 in output.F)
def test_operation_mode(self): print(f"{self.__class__.__name__}: test_operation_mode") # Set to use the global sparse tensor coords manager by default set_sparse_tensor_operation_mode( SparseTensorOperationMode.SHARE_COORDINATE_MANAGER ) coords, feats, labels = data_loader(nchannel=2) # Create a sparse tensor on two different coordinates. A = SparseTensor(torch.rand(feats.shape), coordinates=coords) B = SparseTensor( torch.rand(4, 2), coordinates=torch.IntTensor([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]), ) self.assertTrue(A.coordinate_manager == B.coordinate_manager) A.requires_grad_(True) B.requires_grad_(True) C = A + B C.F.sum().backward() self.assertTrue(torch.all(A.F.grad == 1).item()) self.assertTrue(torch.all(B.F.grad == 1).item()) C = A - B C = A * B C = A / B # Inplace A.requires_grad_(False) D = SparseTensor( torch.rand(feats.shape), coordinate_map_key=A.coordinate_map_key, coordinate_manager=A.coordinate_manager, ) A -= D A *= D A /= D clear_global_coordinate_manager() set_sparse_tensor_operation_mode( SparseTensorOperationMode.SEPARATE_COORDINATE_MANAGER )
def test_operation_mode(self): # Set to use the global sparse tensor coords manager by default set_sparse_tensor_operation_mode( SparseTensorOperationMode.SHARE_COORDS_MANAGER) coords, feats, labels = data_loader(nchannel=2) # Create a sparse tensor on two different coordinates. A = SparseTensor(torch.rand(feats.shape), coords, force_creation=True) B = SparseTensor(torch.rand(4, 2), torch.IntTensor([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]), force_creation=True) self.assertTrue(A.coords_man == B.coords_man) A.requires_grad_(True) B.requires_grad_(True) C = A + B C.F.sum().backward() self.assertTrue(torch.all(A.F.grad == 1).item()) self.assertTrue(torch.all(B.F.grad == 1).item()) C = A - B C = A * B C = A / B # Inplace A.requires_grad_(False) D = SparseTensor(torch.rand(feats.shape), coords_key=A.coords_key) A -= D A *= D A /= D