Exemplo n.º 1
0
    def test_unpool(self):
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords)
        conv = MinkowskiConvolution(
            in_channels, out_channels, kernel_size=3, stride=2, dimension=D
        )
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)

        # Check backward
        fn = MinkowskiLocalPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    unpool.pooling_mode,
                    unpool.kernel_generator,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            )
        )
Exemplo n.º 2
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        conv = conv.double()
        output = conv(input)
        print(output)

        kernel_map = input.coords_man.get_kernel_map(1,
                                                     2,
                                                     stride=2,
                                                     kernel_size=3)
        print(kernel_map)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        self.assertTrue(
            gradcheck(fn, (input.F, conv.kernel, input.tensor_stride,
                           conv.stride, conv.kernel_size, conv.dilation,
                           conv.region_type_, conv.region_offset_,
                           input.coords_key, None, input.coords_man)))
Exemplo n.º 3
0
    def test_unpool(self):
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords)
        conv = MinkowskiConvolution(
            in_channels, out_channels, kernel_size=3, stride=2, dimension=D
        )
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)

        # Check backward
        fn = MinkowskiPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    input.tensor_stride,
                    unpool.stride,
                    unpool.kernel_size,
                    unpool.dilation,
                    unpool.region_type_,
                    unpool.region_offset_,
                    False,
                    input.coords_key,
                    None,
                    input.coords_man,
                ),
            )
        )
Exemplo n.º 4
0
    def test(self):
        print(f"{self.__class__.__name__}: test_dense")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        conv = conv.double()
        output = conv(input)
        print(input.C, output.C)

        # Convert to a dense tensor
        dense_output, min_coord, tensor_stride = output.dense()

        dense_output, min_coord, tensor_stride = output.dense(
            min_coords=torch.IntTensor([-2, -2]),
            max_coords=torch.IntTensor([4, 4]))

        print(dense_output)
        print(min_coord)
        print(tensor_stride)

        print(feats.grad)

        loss = dense_output.sum()
        loss.backward()

        print(feats.grad)
Exemplo n.º 5
0
    def test(self):
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        conv = conv.double()
        conv_tr = MinkowskiConvolutionTranspose(out_channels,
                                                in_channels,
                                                kernel_size=2,
                                                stride=2,
                                                has_bias=True,
                                                dimension=D)
        conv_tr = conv_tr.double()
        input = conv(input)
        output = conv_tr(input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionTransposeFunction()

        self.assertTrue(
            gradcheck(fn,
                      (input.F, conv_tr.kernel, input.tensor_stride,
                       conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation,
                       conv_tr.region_type_, conv_tr.region_offset_,
                       input.coords_key, None, input.coords_man)))
Exemplo n.º 6
0
    def test_unpool_gpu(self):
        if not torch.cuda.is_available():
            return

        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords)
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    dimension=D)
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3,
                                           stride=2,
                                           dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)
        # Check backward
        fn = MinkowskiLocalPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    unpool.pooling_mode,
                    unpool.kernel_generator,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            ))

        with torch.cuda.device(0):
            conv = conv.to("cuda")
            input = SparseTensor(feats, coords, device="cuda")
            input = conv(input)
            input.requires_grad_()
            output = unpool(input)
            print(output)

        # Check backward
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    unpool.pooling_mode,
                    unpool.kernel_generator,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            ))
Exemplo n.º 7
0
    def test(self):
        print(f"{self.__class__.__name__}: test_dense")
        in_channels, out_channels, D = 2, 3, 2
        coords1 = torch.IntTensor([[0, 0], [0, 1], [1, 1]])
        feats1 = torch.DoubleTensor([[1, 2], [3, 4], [5, 6]])

        coords2 = torch.IntTensor([[1, 1], [1, 2], [2, 1]])
        feats2 = torch.DoubleTensor([[7, 8], [9, 10], [11, 12]])
        coords, feats = ME.utils.sparse_collate([coords1, coords2],
                                                [feats1, feats2])
        input = SparseTensor(feats, coords)
        input.requires_grad_()
        dinput, min_coord, tensor_stride = input.dense()
        self.assertTrue(dinput[0, 0, 0, 1] == 3)
        self.assertTrue(dinput[0, 1, 0, 1] == 4)
        self.assertTrue(dinput[0, 0, 1, 1] == 5)
        self.assertTrue(dinput[0, 1, 1, 1] == 6)

        self.assertTrue(dinput[1, 0, 1, 1] == 7)
        self.assertTrue(dinput[1, 1, 1, 1] == 8)
        self.assertTrue(dinput[1, 0, 2, 1] == 11)
        self.assertTrue(dinput[1, 1, 2, 1] == 12)

        # Initialize context
        conv = MinkowskiConvolution(
            in_channels,
            out_channels,
            kernel_size=3,
            stride=2,
            bias=True,
            dimension=D,
        )
        conv = conv.double()
        output = conv(input)
        print(input.C, output.C)

        # Convert to a dense tensor
        dense_output, min_coord, tensor_stride = output.dense()
        print(dense_output.shape)
        print(dense_output)
        print(min_coord)
        print(tensor_stride)

        dense_output, min_coord, tensor_stride = output.dense(
            min_coordinate=torch.IntTensor([-2, -2]))

        print(dense_output)
        print(min_coord)
        print(tensor_stride)

        print(feats.grad)

        loss = dense_output.sum()
        loss.backward()

        print(feats.grad)
Exemplo n.º 8
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    bias=True,
                                    dimension=D)
        conv = conv.double()
        output = conv(input)
        print(output)

        self.assertEqual(input.coordinate_map_key.get_tensor_stride(), [1, 1])
        self.assertEqual(output.coordinate_map_key.get_tensor_stride(), [2, 2])

        if torch.cuda.is_available():
            input_gpu = SparseTensor(feats, coordinates=coords, device="cuda")
            conv_gpu = conv.cuda()
            output_gpu = conv_gpu(input_gpu)
            self.assertTrue(
                torch.allclose(output_gpu.F.var(0).cpu(), output.F.var(0)))
            self.assertTrue(
                torch.allclose(output_gpu.F.mean(0).cpu(), output.F.mean(0)))

        # kernel_map = input.coords_man.kernel_map(
        #     1, 2, stride=2, kernel_size=3)
        # print(kernel_map)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        conv = conv.cpu()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    conv.kernel,
                    conv.kernel_generator,
                    conv.convolution_mode,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))

        for i in range(LEAK_TEST_ITER):
            input = SparseTensor(feats, coordinates=coords)
            conv(input).F.sum().backward()
Exemplo n.º 9
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()

        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    bias=True,
                                    dimension=D)

        print(conv)
        input = SparseTensor(feats, coordinates=coords)
        conv = conv.double()
        output = conv(input)
        print(output)

        device = torch.device("cuda")
        input = SparseTensor(feats.to(device), coordinates=coords.to(device))
        conv = conv.to(device)
        output = conv(input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        grad = output.F.clone().zero_()
        grad[0] = 1
        output.F.backward(grad)

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    conv.kernel,
                    conv.kernel_generator,
                    conv.convolution_mode,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            ))
Exemplo n.º 10
0
 def test_kernelmap(self):
     print(f"{self.__class__.__name__}: test_kernelmap")
     in_channels, out_channels, D = 2, 3, 2
     coords, feats, labels = data_loader(in_channels)
     feats = feats.double()
     feats.requires_grad_()
     input = SparseTensor(feats, coords=coords)
     # Initialize context
     conv = MinkowskiConvolution(in_channels,
                                 out_channels,
                                 kernel_size=3,
                                 stride=2,
                                 has_bias=True,
                                 dimension=D)
     conv = conv.double()
     output = conv(input)
     print(input.C, output.C)
     print(output.coords_man.get_kernel_map(1, 2, stride=2, kernel_size=3))
Exemplo n.º 11
0
    def test_unpooling_gpu(self):
        if not torch.cuda.is_available():
            return

        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords=coords)
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    dimension=D)
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3,
                                           stride=2,
                                           dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)

        # Check backward
        fn = MinkowskiPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(fn, (input.F, input.tensor_stride, unpool.stride,
                           unpool.kernel_size, unpool.dilation,
                           unpool.region_type_, unpool.region_offset_, False,
                           input.coords_key, None, input.coords_man)))

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input = input.to(device)
            output = unpool(input)
            print(output)

        # Check backward
        fn = MinkowskiAvgPoolingFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, input.tensor_stride, unpool.stride,
                           unpool.kernel_size, unpool.dilation,
                           unpool.region_type_, unpool.region_offset_, True,
                           input.coords_key, None, input.coords_man)))
Exemplo n.º 12
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        print(conv)
        conv = conv.double()
        output = conv(input)
        print(output)

        device = torch.device('cuda')
        input = input.to(device)
        conv = conv.to(device)
        output = conv(input)
        print(output)
        print(output.F, output.coords)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        grad = output.F.clone().zero_()
        grad[0] = 1
        output.F.backward(grad)

        self.assertTrue(
            gradcheck(fn, (input.F, conv.kernel, input.tensor_stride,
                           conv.stride, conv.kernel_size, conv.dilation,
                           conv.region_type_, conv.region_offset_,
                           input.coords_key, None, input.coords_man)))