def test(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D) conv = conv.double() conv_tr = MinkowskiConvolutionTranspose(out_channels, in_channels, kernel_size=2, stride=2, has_bias=True, dimension=D) conv_tr = conv_tr.double() input = conv(input) output = conv_tr(input) print(output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck(fn, (input.F, conv_tr.kernel, input.tensor_stride, conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation, conv_tr.region_type_, conv_tr.region_offset_, input.coords_key, None, input.coords_man)))
def test_gpu(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return device = torch.device("cuda") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats.to(device), coordinates=coords.to(device)) # Initialize context conv = ( MinkowskiConvolution( in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D, ) .double() .to(device) ) conv_tr = ( MinkowskiGenerativeConvolutionTranspose( out_channels, in_channels, kernel_size=3, stride=2, bias=True, dimension=D, ) .double() .to(device) ) tr_input = conv(input) print(tr_input) output = conv_tr(tr_input) print(output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck( fn, ( tr_input.F, conv_tr.kernel, conv_tr.kernel_generator, conv_tr.convolution_mode, tr_input.coordinate_map_key, output.coordinate_map_key, tr_input.coordinate_manager, ), ) )
def test(self): print(f"{self.__class__.__name__}: test") in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coordinates=coords) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, bias=True, dimension=D).double() conv_tr = MinkowskiGenerativeConvolutionTranspose( out_channels, in_channels, kernel_size=3, stride=2, bias=True, dimension=D).double() print("Initial input: ", input) input = conv(input) print("Conv output: ", input) output = conv_tr(input) print("Conv tr output: ", output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck( fn, ( input.F, conv_tr.kernel, conv_tr.kernel_generator, conv_tr.convolution_mode, input.coordinate_map_key, output.coordinate_map_key, input.coordinate_manager, ), ))
def test_gpu(self): print(f"{self.__class__.__name__}: test_gpu") if not torch.cuda.is_available(): return device = torch.device('cuda') in_channels, out_channels, D = 2, 3, 2 coords, feats, labels = data_loader(in_channels) feats = feats.double() feats.requires_grad_() input = SparseTensor(feats, coords=coords).to(device) # Initialize context conv = MinkowskiConvolution(in_channels, out_channels, kernel_size=3, stride=2, has_bias=True, dimension=D).double().to(device) conv_tr = MinkowskiConvolutionTranspose( out_channels, in_channels, kernel_size=3, stride=2, has_bias=True, dimension=D).double().to(device) tr_input = conv(input) print(tr_input) output = conv_tr(tr_input) print(output) # Check backward fn = MinkowskiConvolutionTransposeFunction() self.assertTrue( gradcheck(fn, (tr_input.F, conv_tr.kernel, tr_input.tensor_stride, conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation, conv_tr.region_type_, conv_tr.region_offset_, False, tr_input.coords_key, None, tr_input.coords_man)))