def test(self): in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() conv_tr = MinkowskiConvolutionTranspose(out_channels, in_channels, kernel_size=2, stride=2, has_bias=True, dimension=D) conv_tr = conv_tr.double() input = conv(input) output = conv_tr(input) print(output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck(fn, (input.F, conv_tr.kernel, input.tensor_stride, conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation, conv_tr.region_type_, conv_tr.region_offset_, input.coords_key, None, input.coords_man)))
def test(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() output = conv(input) print(output) kernel_map = input.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3) print(kernel_map) # Check backward fn = MinkowskiConvolutionFunction() self.assertTrue( gradcheck(fn, (input.F, conv.kernel, input.tensor_stride, conv.stride, conv.kernel_size, conv.dilation, conv.region_type_, conv.region_offset_, input.coords_key, None, input.coords_man)))
def test_analytic(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 2, 2 coords = torch.IntTensor([[0, 0, 0], [0, 1, 1], [0, 2, 1]]) feats = torch.FloatTensor([[0, 1], [1, 0], [1, 1]]) input = SparseTensor(feats, coordinates=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=2, stride=2, bias=False, dimension=D) conv.kernel[:] = torch.FloatTensor([[[1, 2], [2, 1]], [[0, 1], [1, 0]], [[0, 1], [1, 1]], [[1, 1], [1, 0]]]) output = conv(input) print(output) conv_tr = MinkowskiConvolutionTranspose(in_channels, out_channels, kernel_size=2, stride=2, bias=False, dimension=D) conv_tr.kernel[:] = torch.FloatTensor([[[1, 2], [2, 1]], [[0, 1], [1, 0]], [[0, 1], [1, 1]], [[1, 1], [1, 0]]]) output_tr = conv_tr(output) print(output_tr)
def test(self): print(f"{self.__class__.__name__}: test_dense") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() output = conv(input) print(input.C, output.C) # Convert to a dense tensor dense_output, min_coord, tensor_stride = output.dense() dense_output, min_coord, tensor_stride = output.dense( min_coords=torch.IntTensor([-2, -2]), max_coords=torch.IntTensor([4, 4])) print(dense_output) print(min_coord) print(tensor_stride) print(feats.grad) loss = dense_output.sum() loss.backward() print(feats.grad)
def test_unpool(self): in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() input = SparseTensor(feats, coords) conv = MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, dimension=D ) conv = conv.double() unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D) input = conv(input) output = unpool(input) print(output) # Check backward fn = MinkowskiPoolingTransposeFunction() self.assertTrue( gradcheck( fn, ( input.F, input.tensor_stride, unpool.stride, unpool.kernel_size, unpool.dilation, unpool.region_type_, unpool.region_offset_, False, input.coords_key, None, input.coords_man, ), ) )
def test_unpool(self): in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() input = SparseTensor(feats, coords) conv = MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, dimension=D ) conv = conv.double() unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D) input = conv(input) output = unpool(input) print(output) # Check backward fn = MinkowskiLocalPoolingTransposeFunction() self.assertTrue( gradcheck( fn, ( input.F, unpool.pooling_mode, unpool.kernel_generator, input.coordinate_map_key, None, input.coordinate_manager, ), ) )
def test_unpool_gpu(self): if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() input = SparseTensor(feats, coords) conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, dimension=D) conv = conv.double() unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D) input = conv(input) output = unpool(input) print(output) # Check backward fn = MinkowskiLocalPoolingTransposeFunction() self.assertTrue( gradcheck( fn, ( input.F, unpool.pooling_mode, unpool.kernel_generator, input.coordinate_map_key, None, input.coordinate_manager, ), )) with torch.cuda.device(0): conv = conv.to("cuda") input = SparseTensor(feats, coords, device="cuda") input = conv(input) input.requires_grad_() output = unpool(input) print(output) # Check backward self.assertTrue( gradcheck( fn, ( input.F, unpool.pooling_mode, unpool.kernel_generator, input.coordinate_map_key, None, input.coordinate_manager, ), ))
def test(self): print(f"{self.__class__.__name__}: test_dense") in_channels, out_channels, D = 2, 3, 2 coords1 = torch.IntTensor([[0, 0], [0, 1], [1, 1]]) feats1 = torch.DoubleTensor([[1, 2], [3, 4], [5, 6]]) coords2 = torch.IntTensor([[1, 1], [1, 2], [2, 1]]) feats2 = torch.DoubleTensor([[7, 8], [9, 10], [11, 12]]) coords, feats = ME.utils.sparse_collate([coords1, coords2], [feats1, feats2]) input = SparseTensor(feats, coords) input.requires_grad_() dinput, min_coord, tensor_stride = input.dense() self.assertTrue(dinput[0, 0, 0, 1] == 3) self.assertTrue(dinput[0, 1, 0, 1] == 4) self.assertTrue(dinput[0, 0, 1, 1] == 5) self.assertTrue(dinput[0, 1, 1, 1] == 6) self.assertTrue(dinput[1, 0, 1, 1] == 7) self.assertTrue(dinput[1, 1, 1, 1] == 8) self.assertTrue(dinput[1, 0, 2, 1] == 11) self.assertTrue(dinput[1, 1, 2, 1] == 12) # Initialize context conv = MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D, ) conv = conv.double() output = conv(input) print(input.C, output.C) # Convert to a dense tensor dense_output, min_coord, tensor_stride = output.dense() print(dense_output.shape) print(dense_output) print(min_coord) print(tensor_stride) dense_output, min_coord, tensor_stride = output.dense( min_coordinate=torch.IntTensor([-2, -2])) print(dense_output) print(min_coord) print(tensor_stride) print(feats.grad) loss = dense_output.sum() loss.backward() print(feats.grad)
def test_with_convtr(self): channels, D = [2, 3, 4], 2 coords, feats, labels = data_loader(channels[0], batch_size=1) feats = feats.double() feats.requires_grad_() # Create a sparse tensor with large tensor strides for upsampling start_tensor_stride = 4 input = SparseTensor( feats, coords * start_tensor_stride, tensor_stride=start_tensor_stride, ) conv_tr1 = MinkowskiConvolutionTranspose( channels[0], channels[1], kernel_size=3, stride=2, generate_new_coords=True, dimension=D, ).double() conv1 = MinkowskiConvolution(channels[1], channels[1], kernel_size=3, dimension=D).double() conv_tr2 = MinkowskiConvolutionTranspose( channels[1], channels[2], kernel_size=3, stride=2, generate_new_coords=True, dimension=D, ).double() conv2 = MinkowskiConvolution(channels[2], channels[2], kernel_size=3, dimension=D).double() pruning = MinkowskiPruning() out1 = conv_tr1(input) self.assertTrue(torch.prod(torch.abs(out1.F) > 0).item() == 1) out1 = conv1(out1) use_feat = torch.rand(len(out1)) < 0.5 out1 = pruning(out1, use_feat) out2 = conv_tr2(out1) self.assertTrue(torch.prod(torch.abs(out2.F) > 0).item() == 1) use_feat = torch.rand(len(out2)) < 0.5 out2 = pruning(out2, use_feat) out2 = conv2(out2) print(out2) out2.F.sum().backward() # Check gradient flow print(input.F.grad)
def test(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coordinates=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D) conv = conv.double() output = conv(input) print(output) self.assertEqual(input.coordinate_map_key.get_tensor_stride(), [1, 1]) self.assertEqual(output.coordinate_map_key.get_tensor_stride(), [2, 2]) if torch.cuda.is_available(): input_gpu = SparseTensor(feats, coordinates=coords, device="cuda") conv_gpu = conv.cuda() output_gpu = conv_gpu(input_gpu) self.assertTrue( torch.allclose(output_gpu.F.var(0).cpu(), output.F.var(0))) self.assertTrue( torch.allclose(output_gpu.F.mean(0).cpu(), output.F.mean(0))) # kernel_map = input.coords_man.kernel_map( # 1, 2, stride=2, kernel_size=3) # print(kernel_map) # Check backward fn = MinkowskiConvolutionFunction() conv = conv.cpu() self.assertTrue( gradcheck( fn, ( input.F, conv.kernel, conv.kernel_generator, conv.convolution_mode, input.coordinate_map_key, output.coordinate_map_key, input.coordinate_manager, ), )) for i in range(LEAK_TEST_ITER): input = SparseTensor(feats, coordinates=coords) conv(input).F.sum().backward()
def test_gpu(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D) print(conv) input = SparseTensor(feats, coordinates=coords) conv = conv.double() output = conv(input) print(output) device = torch.device("cuda") input = SparseTensor(feats.to(device), coordinates=coords.to(device)) conv = conv.to(device) output = conv(input) print(output) # Check backward fn = MinkowskiConvolutionFunction() grad = output.F.clone().zero_() grad[0] = 1 output.F.backward(grad) self.assertTrue( gradcheck( fn, ( input.F, conv.kernel, conv.kernel_generator, conv.convolution_mode, input.coordinate_map_key, None, input.coordinate_manager, ), ))
def test_network(self): dense_tensor = torch.rand(3, 4, 11, 11, 11, 11) # BxCxD1xD2x....xDN dense_tensor.requires_grad = True # Since the shape is fixed, cache the coordinates for faster inference coordinates = dense_coordinates(dense_tensor.shape) network = nn.Sequential( # Add layers that can be applied on a regular pytorch tensor nn.ReLU(), MinkowskiToSparseTensor(remove_zeros=False, coordinates=coordinates), MinkowskiConvolution(4, 5, stride=2, kernel_size=3, dimension=4), MinkowskiBatchNorm(5), MinkowskiReLU(), MinkowskiConvolutionTranspose(5, 6, stride=2, kernel_size=3, dimension=4), MinkowskiToDenseTensor( dense_tensor.shape), # must have the same tensor stride. ) for i in range(5): print(f"Iteration: {i}") output = network(dense_tensor) output.sum().backward() assert dense_tensor.grad is not None
def test_kernelmap(self): print(f"{self.__class__.__name__}: test_kernelmap") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coordinates=coords) cm = input.coordinate_manager print("Input coords: ") print("Convolution: ") # Initialize context conv = MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D, ).double() output = conv(input) iC = input.C.numpy() oC = output.C.numpy() print(iC) print(oC) in_maps, out_maps = output.coordinate_manager.get_kernel_map( 1, 2, stride=2, kernel_size=3) kernel_index = 0 for in_map, out_map in zip(in_maps, out_maps): for i, o in zip(in_map, out_map): print(kernel_index, iC[i], "->", oC[o]) kernel_index += 1 self.assertTrue(sum(len(in_map) for in_map in in_maps) == 26)
def test(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels, batch_size=2) # Create random coordinates with tensor stride == 2 out_coords, tensor_stride = get_random_coords() feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) cm = input.coords_man print(cm._get_coords_key(1)) conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=1, bias=False, dimension=D).double() print('Initial input: ', input) print('Specified output coords: ', out_coords) output = conv(input, out_coords) # To specify the tensor stride out_coords_key = cm.create_coords_key(out_coords, tensor_stride=2) output = conv(input, out_coords_key) print('Conv output: ', output) output.F.sum().backward() print(input.F.grad)
def test_kernelmap(self): print(f"{self.__class__.__name__}: test_kernelmap") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) cm = input.coords_man ikey = cm.get_coords_key(1) print('Input coords: ') cm.print_diagnostics(ikey) print('Convolution: ') # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D).double() output = conv(input) iC = input.C.numpy() oC = output.C.numpy() print(iC) print(oC) kernel_map = output.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3) for row in kernel_map: k, i, o = row print(k.item(), iC[i], oC[o]) self.assertTrue(len(kernel_map) == 26)
def test_forward(self): coords, colors, pcd = load_file("1.ply") device = "cuda" X = [] Y = [] W = [] for IC in [3, 8, 16, 24, 32, 48, 64, 96, 128]: for OC in [3, 8, 16, 24, 32, 48, 64, 96, 128, 192, 256]: for batch_size in [1, 5, 10, 15, 20]: for voxel_size in [0.2, 0.1, 0.075, 0.05, 0.025]: min_times = [] for mode in [ _C.ConvolutionMode.DIRECT_GEMM, _C.ConvolutionMode.COPY_GEMM, ]: min_time = 100000 dcoords = torch.from_numpy( np.floor(coords / voxel_size) ).int() bcoords = batched_coordinates( [dcoords for i in range(batch_size)] ) in_feats = torch.rand(len(bcoords), IC).to(0) sinput = SparseTensor( in_feats, coordinates=bcoords, device=device ) conv = MinkowskiConvolution( in_channels=IC, out_channels=OC, kernel_size=3, stride=2, convolution_mode=mode, dimension=3, ).to(device) soutput = conv(sinput) loss = soutput.F.sum() for i in range(10): stime = time.time() loss.backward() min_time = min(time.time() - stime, min_time) min_times.append(min_time) X.append( [ IC, OC, len(sinput), len(soutput), ] ) Y.append(np.argmin(min_times)) W.append(np.abs(min_times[0] - min_times[1])) print(X[-1], Y[-1], W[-1]) import pickle as pkl with open("forward-speed.pkl", "wb") as f: pkl.dump([X, Y, W], f)
def test_gpu(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return device = torch.device("cuda") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats.to(device), coordinates=coords.to(device)) # Initialize context conv = ( MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D, ) .double() .to(device) ) conv_tr = ( MinkowskiGenerativeConvolutionTranspose( out_channels, in_channels, kernel_size=3, stride=2, bias=True, dimension=D, ) .double() .to(device) ) tr_input = conv(input) print(tr_input) output = conv_tr(tr_input) print(output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck( fn, ( tr_input.F, conv_tr.kernel, conv_tr.kernel_generator, conv_tr.convolution_mode, tr_input.coordinate_map_key, output.coordinate_map_key, tr_input.coordinate_manager, ), ) )
def test_kernelmap(self): print(f"{self.__class__.__name__}: test_kernelmap") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() output = conv(input) print(input.C, output.C) print(output.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3))
def test_unpooling_gpu(self): if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() input = SparseTensor(feats, coords=coords) conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, dimension=D) conv = conv.double() unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D) input = conv(input) output = unpool(input) print(output) # Check backward fn = MinkowskiPoolingTransposeFunction() self.assertTrue( gradcheck(fn, (input.F, input.tensor_stride, unpool.stride, unpool.kernel_size, unpool.dilation, unpool.region_type_, unpool.region_offset_, False, input.coords_key, None, input.coords_man))) device = torch.device('cuda') with torch.cuda.device(0): input = input.to(device) output = unpool(input) print(output) # Check backward fn = MinkowskiAvgPoolingFunction() self.assertTrue( gradcheck(fn, (input.F, input.tensor_stride, unpool.stride, unpool.kernel_size, unpool.dilation, unpool.region_type_, unpool.region_offset_, True, input.coords_key, None, input.coords_man)))
def test_kernel_map(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 2, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() # Initialize context conv1 = MinkowskiConvolution(in_channels, out_channels, kernel_size=2, stride=2, bias=True, dimension=D).double() conv2 = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D).double() device = torch.device("cuda") input = SparseTensor( feats, coordinates=coords, device=device, minkowski_algorithm=MinkowskiAlgorithm.SPEED_OPTIMIZED, ) print(input) conv1 = conv1.to(device) conv2 = conv2.to(device) output = conv2(conv1(input)) print(output)
def test_gpu(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) print(conv) conv = conv.double() output = conv(input) print(output) device = torch.device('cuda') input = input.to(device) conv = conv.to(device) output = conv(input) print(output) print(output.F, output.coords) # Check backward fn = MinkowskiConvolutionFunction() grad = output.F.clone().zero_() grad[0] = 1 output.F.backward(grad) self.assertTrue( gradcheck(fn, (input.F, conv.kernel, input.tensor_stride, conv.stride, conv.kernel_size, conv.dilation, conv.region_type_, conv.region_offset_, input.coords_key, None, input.coords_man)))
def test_expansion(self): print(f"{self.__class__.__name__}: test_expansion") in_channels, out_channels, D = 2, 2, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() # Initialize context conv = MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, bias=False, expand_coordinates=True, dimension=D, ).double() input = SparseTensor( feats, coordinates=coords, minkowski_algorithm=MinkowskiAlgorithm.SPEED_OPTIMIZED, ) print(input) output = conv(input) print(output) if not torch.cuda.is_available(): return input = SparseTensor( feats, coordinates=coords, minkowski_algorithm=MinkowskiAlgorithm.SPEED_OPTIMIZED, device="cuda", ) conv = conv.to("cuda") print(input) output = conv(input) print(output)
def test_gpu(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return in_channels, out_channels, D = 3, 2, 2 coords, feats, labels = data_loader(in_channels, batch_size=20) feats = feats.double() feats.requires_grad_() device = torch.device("cuda") conv = ( MinkowskiConvolution( in_channels, out_channels, kernel_size=2, stride=1, bias=False, dimension=D, ) .to(device) .double() ) # Initialize context for mode in [_C.ConvolutionMode.DIRECT_GEMM, _C.ConvolutionMode.COPY_GEMM]: conv.convolution_mode = mode input = SparseTensor(feats, coordinates=coords, device=device) print(mode, input.F.numel(), len(input), input) output = conv(input) print(output) # Check backward fn = MinkowskiConvolutionFunction() grad = output.F.clone().zero_() grad[0] = 1 output.F.backward(grad) self.assertTrue( gradcheck( fn, ( input.F, conv.kernel, conv.kernel_generator, conv.convolution_mode, input.coordinate_map_key, None, input.coordinate_manager, ), ) )
def test(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coordinates=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D).double() conv_tr = MinkowskiGenerativeConvolutionTranspose( out_channels, in_channels, kernel_size=3, stride=2, bias=True, dimension=D).double() print("Initial input: ", input) input = conv(input) print("Conv output: ", input) output = conv_tr(input) print("Conv tr output: ", output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck( fn, ( input.F, conv_tr.kernel, conv_tr.kernel_generator, conv_tr.convolution_mode, input.coordinate_map_key, output.coordinate_map_key, input.coordinate_manager, ), ))
def test_kernelmap_gpu(self): print(f"{self.__class__.__name__}: test_kernelmap_gpu") if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor( feats, coordinates=coords, minkowski_algorithm=MinkowskiAlgorithm.SPEED_OPTIMIZED, device="cuda", ) # Initialize context conv = ( MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D, ) .double() .cuda() ) output = conv(input) iC = input.C.cpu().numpy() oC = output.C.cpu().numpy() print(iC) print(oC) kernel_maps = output.coordinate_manager.kernel_map( 1, 2, stride=2, kernel_size=3, ) for kernel_index, in_out_map in kernel_maps.items(): for i, o in zip(in_out_map[0], in_out_map[1]): print(kernel_index, iC[i], "->", oC[o]) self.assertTrue(sum(len(in_map[0]) for k, in_map in kernel_maps.items()) == 16)
def test_kernelmap_gpu(self): print(f"{self.__class__.__name__}: test_kernelmap_gpu") if not torch.cuda.is_available(): return in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) cm = input.coords_man ikey = cm._get_coords_key(1) print('Input coords: ') cm.print_diagnostics(ikey) print('Convolution: ') # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D).double() output = conv(input) iC = input.C.numpy() oC = output.C.numpy() print(iC) print(oC) in_maps, out_maps = output.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3, on_gpu=True) kernel_index = 0 for in_map, out_map in zip(in_maps, out_maps): for i, o in zip(in_map, out_map): print(kernel_index, iC[i], '->', oC[o]) kernel_index += 1 self.assertTrue(sum(len(in_map) for in_map in in_maps) == 26)
def test_gpu(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return device = torch.device('cuda') in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords).to(device) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D).double().to(device) conv_tr = MinkowskiConvolutionTranspose( out_channels, in_channels, kernel_size=3, stride=2, has_bias=True, dimension=D).double().to(device) tr_input = conv(input) print(tr_input) output = conv_tr(tr_input) print(output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck(fn, (tr_input.F, conv_tr.kernel, tr_input.tensor_stride, conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation, conv_tr.region_type_, conv_tr.region_offset_, False, tr_input.coords_key, None, tr_input.coords_man)))
def __init__(self, nchannels, spatial_sigma, chromatic_sigma, meanfield_iterations, is_temporal, config, **kwargs): D = 7 if is_temporal else 6 self.is_temporal = is_temporal # Setup metadata super(MeanField, self).__init__(nchannels, nchannels, config, D=D) self.spatial_sigma = spatial_sigma self.chromatic_sigma = chromatic_sigma # temporal sigma is 1 self.meanfield_iterations = meanfield_iterations self.pixel_dist = 1 self.stride = 1 self.dilation = 1 conv = MinkowskiConvolution(nchannels, nchannels, kernel_size=config.wrapper_kernel_size, has_bias=False, region_type=convert_region_type( config.wrapper_region_type), dimension=D) # Create a region_offset self.region_type_, self.region_offset_, _ = me_convert_region_type( conv.region_type, 1, conv.kernel_size, conv.up_stride, conv.dilation, conv.region_offset, conv.axis_types, conv.dimension) # Check whether the mapping is required self.requires_mapping = False self.conv = conv self.kernel = conv.kernel self.convs = {} self.softmaxes = {} for i in range(self.meanfield_iterations): self.softmaxes[i] = nn.Softmax(dim=1) self.convs[i] = MinkowskiConvolutionFunction()