Пример #1
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        conv = conv.double()
        output = conv(input)
        print(output)

        kernel_map = input.coords_man.get_kernel_map(1,
                                                     2,
                                                     stride=2,
                                                     kernel_size=3)
        print(kernel_map)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        self.assertTrue(
            gradcheck(fn, (input.F, conv.kernel, input.tensor_stride,
                           conv.stride, conv.kernel_size, conv.dilation,
                           conv.region_type_, conv.region_offset_,
                           input.coords_key, None, input.coords_man)))
Пример #2
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    bias=True,
                                    dimension=D)
        conv = conv.double()
        output = conv(input)
        print(output)

        self.assertEqual(input.coordinate_map_key.get_tensor_stride(), [1, 1])
        self.assertEqual(output.coordinate_map_key.get_tensor_stride(), [2, 2])

        if torch.cuda.is_available():
            input_gpu = SparseTensor(feats, coordinates=coords, device="cuda")
            conv_gpu = conv.cuda()
            output_gpu = conv_gpu(input_gpu)
            self.assertTrue(
                torch.allclose(output_gpu.F.var(0).cpu(), output.F.var(0)))
            self.assertTrue(
                torch.allclose(output_gpu.F.mean(0).cpu(), output.F.mean(0)))

        # kernel_map = input.coords_man.kernel_map(
        #     1, 2, stride=2, kernel_size=3)
        # print(kernel_map)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        conv = conv.cpu()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    conv.kernel,
                    conv.kernel_generator,
                    conv.convolution_mode,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))

        for i in range(LEAK_TEST_ITER):
            input = SparseTensor(feats, coordinates=coords)
            conv(input).F.sum().backward()
Пример #3
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return
        in_channels, out_channels, D = 3, 2, 2
        coords, feats, labels = data_loader(in_channels, batch_size=20)
        feats = feats.double()
        feats.requires_grad_()
        device = torch.device("cuda")
        conv = (
            MinkowskiConvolution(
                in_channels,
                out_channels,
                kernel_size=2,
                stride=1,
                bias=False,
                dimension=D,
            )
            .to(device)
            .double()
        )
        # Initialize context
        for mode in [_C.ConvolutionMode.DIRECT_GEMM, _C.ConvolutionMode.COPY_GEMM]:
            conv.convolution_mode = mode
            input = SparseTensor(feats, coordinates=coords, device=device)
            print(mode, input.F.numel(), len(input), input)
            output = conv(input)
            print(output)

            # Check backward
            fn = MinkowskiConvolutionFunction()

            grad = output.F.clone().zero_()
            grad[0] = 1
            output.F.backward(grad)

            self.assertTrue(
                gradcheck(
                    fn,
                    (
                        input.F,
                        conv.kernel,
                        conv.kernel_generator,
                        conv.convolution_mode,
                        input.coordinate_map_key,
                        None,
                        input.coordinate_manager,
                    ),
                )
            )
Пример #4
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()

        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    bias=True,
                                    dimension=D)

        print(conv)
        input = SparseTensor(feats, coordinates=coords)
        conv = conv.double()
        output = conv(input)
        print(output)

        device = torch.device("cuda")
        input = SparseTensor(feats.to(device), coordinates=coords.to(device))
        conv = conv.to(device)
        output = conv(input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        grad = output.F.clone().zero_()
        grad[0] = 1
        output.F.backward(grad)

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    conv.kernel,
                    conv.kernel_generator,
                    conv.convolution_mode,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            ))
Пример #5
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        print(conv)
        conv = conv.double()
        output = conv(input)
        print(output)

        device = torch.device('cuda')
        input = input.to(device)
        conv = conv.to(device)
        output = conv(input)
        print(output)
        print(output.F, output.coords)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        grad = output.F.clone().zero_()
        grad[0] = 1
        output.F.backward(grad)

        self.assertTrue(
            gradcheck(fn, (input.F, conv.kernel, input.tensor_stride,
                           conv.stride, conv.kernel_size, conv.dilation,
                           conv.region_type_, conv.region_offset_,
                           input.coords_key, None, input.coords_man)))
Пример #6
0
    def __init__(self, nchannels, spatial_sigma, chromatic_sigma,
                 meanfield_iterations, is_temporal, config, **kwargs):
        D = 7 if is_temporal else 6
        self.is_temporal = is_temporal
        # Setup metadata
        super(MeanField, self).__init__(nchannels, nchannels, config, D=D)

        self.spatial_sigma = spatial_sigma
        self.chromatic_sigma = chromatic_sigma
        # temporal sigma is 1
        self.meanfield_iterations = meanfield_iterations

        self.pixel_dist = 1
        self.stride = 1
        self.dilation = 1

        conv = MinkowskiConvolution(nchannels,
                                    nchannels,
                                    kernel_size=config.wrapper_kernel_size,
                                    has_bias=False,
                                    region_type=convert_region_type(
                                        config.wrapper_region_type),
                                    dimension=D)

        # Create a region_offset
        self.region_type_, self.region_offset_, _ = me_convert_region_type(
            conv.region_type, 1, conv.kernel_size, conv.up_stride,
            conv.dilation, conv.region_offset, conv.axis_types, conv.dimension)

        # Check whether the mapping is required
        self.requires_mapping = False
        self.conv = conv
        self.kernel = conv.kernel
        self.convs = {}
        self.softmaxes = {}
        for i in range(self.meanfield_iterations):
            self.softmaxes[i] = nn.Softmax(dim=1)
            self.convs[i] = MinkowskiConvolutionFunction()