Example #1
0
    def test_empty(self):
        in_channels = 2
        coords, feats, labels = data_loader(in_channels, batch_size=1)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords)
        use_feat = torch.BoolTensor(len(input))
        use_feat.zero_()
        pruning = MinkowskiPruning()
        output = pruning(input, use_feat)
        print(input)
        print(use_feat)
        print(output)

        # Check backward
        fn = MinkowskiPruningFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    use_feat,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))
Example #2
0
    def test_device(self):
        in_channels, D = 2, 2
        device = torch.device("cuda")
        coords, feats, labels = data_loader(in_channels, batch_size=1)
        feats = feats.double()
        feats.requires_grad_()

        use_feat = (torch.rand(feats.size(0)) < 0.5).to(device)
        pruning = MinkowskiPruning()

        input = SparseTensor(feats, coords, device=device)
        output = pruning(input, use_feat)
        print(input)
        print(output)

        fn = MinkowskiPruningFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    use_feat,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))
Example #3
0
    def test_pruning(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        use_feat = torch.rand(feats.size(0)) < 0.5
        pruning = MinkowskiPruning(D)
        output = pruning(input, use_feat)
        print(use_feat, output)

        # Check backward
        fn = MinkowskiPruningFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, use_feat, input.coords_key,
                           output.coords_key, input.coords_man)))

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input = input.to(device)
            output = pruning(input, use_feat)
            print(output)

        self.assertTrue(
            gradcheck(fn, (input.F, use_feat, input.coords_key,
                           output.coords_key, input.coords_man)))
Example #4
0
    def test_with_convtr(self):
        channels, D = [2, 3, 4], 2
        coords, feats, labels = data_loader(channels[0], batch_size=1)
        feats = feats.double()
        feats.requires_grad_()
        # Create a sparse tensor with large tensor strides for upsampling
        start_tensor_stride = 4
        input = SparseTensor(
            feats,
            coords * start_tensor_stride,
            tensor_stride=start_tensor_stride,
        )
        conv_tr1 = MinkowskiConvolutionTranspose(
            channels[0],
            channels[1],
            kernel_size=3,
            stride=2,
            generate_new_coords=True,
            dimension=D,
        ).double()
        conv1 = MinkowskiConvolution(channels[1],
                                     channels[1],
                                     kernel_size=3,
                                     dimension=D).double()
        conv_tr2 = MinkowskiConvolutionTranspose(
            channels[1],
            channels[2],
            kernel_size=3,
            stride=2,
            generate_new_coords=True,
            dimension=D,
        ).double()
        conv2 = MinkowskiConvolution(channels[2],
                                     channels[2],
                                     kernel_size=3,
                                     dimension=D).double()
        pruning = MinkowskiPruning()

        out1 = conv_tr1(input)
        self.assertTrue(torch.prod(torch.abs(out1.F) > 0).item() == 1)
        out1 = conv1(out1)
        use_feat = torch.rand(len(out1)) < 0.5
        out1 = pruning(out1, use_feat)

        out2 = conv_tr2(out1)
        self.assertTrue(torch.prod(torch.abs(out2.F) > 0).item() == 1)
        use_feat = torch.rand(len(out2)) < 0.5
        out2 = pruning(out2, use_feat)
        out2 = conv2(out2)

        print(out2)

        out2.F.sum().backward()

        # Check gradient flow
        print(input.F.grad)
Example #5
0
 def test_device(self):
     in_channels = 2
     coords, feats, labels = data_loader(in_channels, batch_size=1)
     feats = feats.double()
     feats.requires_grad_()
     input = SparseTensor(feats, coords, device="cuda")
     use_feat = torch.rand(feats.size(0)) < 0.5
     pruning = MinkowskiPruning()
     output = pruning(input, use_feat.cuda())
     print(input)
     print(use_feat)
     print(output)