Exemplo n.º 1
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        conv = conv.double()
        conv_tr = MinkowskiConvolutionTranspose(out_channels,
                                                in_channels,
                                                kernel_size=2,
                                                stride=2,
                                                has_bias=True,
                                                dimension=D)
        conv_tr = conv_tr.double()
        input = conv(input)
        output = conv_tr(input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionTransposeFunction()

        self.assertTrue(
            gradcheck(fn,
                      (input.F, conv_tr.kernel, input.tensor_stride,
                       conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation,
                       conv_tr.region_type_, conv_tr.region_offset_,
                       input.coords_key, None, input.coords_man)))
Exemplo n.º 2
0
    def test_analytic(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 2, 2
        coords = torch.IntTensor([[0, 0, 0], [0, 1, 1], [0, 2, 1]])
        feats = torch.FloatTensor([[0, 1], [1, 0], [1, 1]])
        input = SparseTensor(feats, coordinates=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=2,
                                    stride=2,
                                    bias=False,
                                    dimension=D)
        conv.kernel[:] = torch.FloatTensor([[[1, 2], [2, 1]], [[0, 1], [1, 0]],
                                            [[0, 1], [1, 1]], [[1, 1], [1,
                                                                        0]]])
        output = conv(input)
        print(output)

        conv_tr = MinkowskiConvolutionTranspose(in_channels,
                                                out_channels,
                                                kernel_size=2,
                                                stride=2,
                                                bias=False,
                                                dimension=D)
        conv_tr.kernel[:] = torch.FloatTensor([[[1, 2], [2, 1]],
                                               [[0, 1], [1, 0]],
                                               [[0, 1], [1, 1]],
                                               [[1, 1], [1, 0]]])
        output_tr = conv_tr(output)
        print(output_tr)
Exemplo n.º 3
0
    def test_with_convtr(self):
        channels, D = [2, 3, 4], 2
        coords, feats, labels = data_loader(channels[0], batch_size=1)
        feats = feats.double()
        feats.requires_grad_()
        # Create a sparse tensor with large tensor strides for upsampling
        start_tensor_stride = 4
        input = SparseTensor(
            feats,
            coords * start_tensor_stride,
            tensor_stride=start_tensor_stride,
        )
        conv_tr1 = MinkowskiConvolutionTranspose(
            channels[0],
            channels[1],
            kernel_size=3,
            stride=2,
            generate_new_coords=True,
            dimension=D,
        ).double()
        conv1 = MinkowskiConvolution(channels[1],
                                     channels[1],
                                     kernel_size=3,
                                     dimension=D).double()
        conv_tr2 = MinkowskiConvolutionTranspose(
            channels[1],
            channels[2],
            kernel_size=3,
            stride=2,
            generate_new_coords=True,
            dimension=D,
        ).double()
        conv2 = MinkowskiConvolution(channels[2],
                                     channels[2],
                                     kernel_size=3,
                                     dimension=D).double()
        pruning = MinkowskiPruning()

        out1 = conv_tr1(input)
        self.assertTrue(torch.prod(torch.abs(out1.F) > 0).item() == 1)
        out1 = conv1(out1)
        use_feat = torch.rand(len(out1)) < 0.5
        out1 = pruning(out1, use_feat)

        out2 = conv_tr2(out1)
        self.assertTrue(torch.prod(torch.abs(out2.F) > 0).item() == 1)
        use_feat = torch.rand(len(out2)) < 0.5
        out2 = pruning(out2, use_feat)
        out2 = conv2(out2)

        print(out2)

        out2.F.sum().backward()

        # Check gradient flow
        print(input.F.grad)
Exemplo n.º 4
0
    def test_network(self):
        dense_tensor = torch.rand(3, 4, 11, 11, 11, 11)  # BxCxD1xD2x....xDN
        dense_tensor.requires_grad = True

        # Since the shape is fixed, cache the coordinates for faster inference
        coordinates = dense_coordinates(dense_tensor.shape)

        network = nn.Sequential(
            # Add layers that can be applied on a regular pytorch tensor
            nn.ReLU(),
            MinkowskiToSparseTensor(remove_zeros=False,
                                    coordinates=coordinates),
            MinkowskiConvolution(4, 5, stride=2, kernel_size=3, dimension=4),
            MinkowskiBatchNorm(5),
            MinkowskiReLU(),
            MinkowskiConvolutionTranspose(5,
                                          6,
                                          stride=2,
                                          kernel_size=3,
                                          dimension=4),
            MinkowskiToDenseTensor(
                dense_tensor.shape),  # must have the same tensor stride.
        )

        for i in range(5):
            print(f"Iteration: {i}")
            output = network(dense_tensor)
            output.sum().backward()

        assert dense_tensor.grad is not None
Exemplo n.º 5
0
    def test_tr(self):
        print(f"{self.__class__.__name__}: test_tr")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels, batch_size=2)
        # tensor stride must be at least 2 for convolution transpose with stride 2
        coords[:, :2] *= 2
        out_coords = torch.rand(10, 3)
        out_coords[:, :2] *= 10  # random coords
        out_coords[:, 2] *= 2  # random batch index
        out_coords = out_coords.floor().int()

        feats = feats.double()
        feats.requires_grad_()

        input = SparseTensor(feats, coords=coords, tensor_stride=2)
        cm = input.coords_man
        print(cm._get_coords_key(2))

        conv_tr = MinkowskiConvolutionTranspose(in_channels,
                                                out_channels,
                                                kernel_size=3,
                                                stride=2,
                                                bias=False,
                                                dimension=D).double()
        print('Initial input: ', input)
        print('Specified output coords: ', out_coords)
        output = conv_tr(input, out_coords)
        print('Conv output: ', output)

        output.F.sum().backward()
        print(input.F.grad)
Exemplo n.º 6
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return

        device = torch.device("cuda")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats.to(device), coordinates=coords.to(device))
        # Initialize context
        conv = (
            MinkowskiConvolution(
                in_channels,
                out_channels,
                kernel_size=3,
                stride=2,
                bias=True,
                dimension=D,
            )
            .double()
            .to(device)
        )
        conv_tr = (
            MinkowskiConvolutionTranspose(
                out_channels,
                in_channels,
                kernel_size=3,
                stride=2,
                bias=True,
                dimension=D,
            )
            .double()
            .to(device)
        )
        tr_input = conv(input)
        print(tr_input)
        output = conv_tr(tr_input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    tr_input.F,
                    conv_tr.kernel,
                    conv_tr.kernel_generator,
                    conv_tr.convolution_mode,
                    tr_input.coordinate_map_key,
                    output.coordinate_map_key,
                    tr_input.coordinate_manager,
                ),
            )
        )
Exemplo n.º 7
0
    def test_gpu(self):
        if not torch.cuda.is_available():
            return

        device = torch.device('cuda')
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords).to(device)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D).to(device)
        conv = conv.double()
        conv_tr = MinkowskiConvolutionTranspose(out_channels,
                                                in_channels,
                                                kernel_size=3,
                                                stride=2,
                                                has_bias=True,
                                                dimension=D).to(device)
        conv_tr = conv_tr.double()
        input = conv(input)
        output = conv_tr(input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionTransposeFunction()

        self.assertTrue(
            gradcheck(fn,
                      (input.F, conv_tr.kernel, input.tensor_stride,
                       conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation,
                       conv_tr.region_type_, conv_tr.region_offset_,
                       input.coords_key, None, input.coords_man)))
Exemplo n.º 8
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords)

        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    bias=True,
                                    dimension=D).double()
        conv_tr = MinkowskiConvolutionTranspose(out_channels,
                                                in_channels,
                                                kernel_size=2,
                                                stride=2,
                                                bias=True,
                                                dimension=D).double()

        print("Initial input: ", input)
        input = conv(input)
        print("Conv output: ", input)

        output = conv_tr(input)
        print("Conv tr output: ", output)

        # Check backward
        fn = MinkowskiConvolutionTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    conv_tr.kernel,
                    conv_tr.kernel_generator,
                    conv_tr.convolution_mode,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))