Пример #1
0
    def test_long(self):
        if not torch.cuda.is_available():
            return
        pool = MinkowskiDirectMaxPoolingFunction()
        in_map = torch.randint(0, 5, (10, ))
        out_map = torch.randint(0, 3, (10, ))
        in_feat = torch.rand(5, 16).double()
        in_feat.requires_grad_()
        out_nrows = 3
        out_feat = pool.apply(in_map, out_map, in_feat, out_nrows)
        print(out_feat)
        out_feat.sum().backward()

        self.assertTrue(
            gradcheck(
                pool,
                (in_map, out_map, in_feat, out_nrows),
            ))

        if not torch.cuda.is_available():
            return

        in_map = in_map.cuda()
        out_map = out_map.cuda()
        in_feat = in_feat.cuda()

        out_feat = pool.apply(in_map, out_map, in_feat, out_nrows)
        print(out_feat)

        self.assertTrue(
            gradcheck(
                pool,
                (in_map, out_map, in_feat, out_nrows),
            ))
Пример #2
0
    def test_average(self):
        rows = torch.Tensor([0, 0, 1, 1]).int()
        cols = torch.Tensor([0, 1, 2, 3]).int()
        size = [2, 4]
        mat = torch.rand(4, 3).double()
        mat.requires_grad_()
        spmm_fn = MinkowskiSPMMAverageFunction()
        out = spmm_fn.apply(rows, cols, size, mat)
        print(out)

        loss = out.sum()
        loss.backward()
        print(mat.grad)
        self.assertTrue(gradcheck(spmm_fn, (rows, cols, size, mat)))

        rows = rows.cuda()
        cols = cols.cuda()
        mat = mat.cuda()
        mat.requires_grad_()
        out = spmm_fn.apply(rows, cols, size, mat)
        print(out)

        loss = out.sum()
        loss.backward()
        print(mat.grad)
        self.assertTrue(gradcheck(spmm_fn, (rows, cols, size, mat)))
Пример #3
0
    def test_global_avgpool(self):
        in_channels = 2
        coords, feats, labels = data_loader(in_channels, batch_size=2)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalPooling()
        output = pool(input)
        print(output)

        # Check backward
        fn = MinkowskiGlobalPoolingFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, True, GlobalPoolingMode.INDEX_SELECT,
                           input.coords_key, None, input.coords_man)))

        self.assertTrue(
            gradcheck(fn, (input.F, True, GlobalPoolingMode.SPARSE,
                           input.coords_key, None, input.coords_man)))

        coords, feats, labels = data_loader(in_channels, batch_size=1)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalPooling()
        output = pool(input)
        print(output)

        # Check backward
        fn = MinkowskiGlobalPoolingFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, True, GlobalPoolingMode.AUTO,
                           input.coords_key, None, input.coords_man)))
Пример #4
0
    def test_pruning(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        use_feat = torch.rand(feats.size(0)) < 0.5
        pruning = MinkowskiPruning(D)
        output = pruning(input, use_feat)
        print(use_feat, output)

        # Check backward
        fn = MinkowskiPruningFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, use_feat, input.coords_key,
                           output.coords_key, input.coords_man)))

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input = input.to(device)
            output = pruning(input, use_feat)
            print(output)

        self.assertTrue(
            gradcheck(fn, (input.F, use_feat, input.coords_key,
                           output.coords_key, input.coords_man)))
Пример #5
0
    def test_broadcast(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        coords, feats_glob, labels = data_loader(in_channels)
        feats = feats.double()
        feats_glob = feats_glob.double()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalPooling(dimension=D)
        input_glob = pool(input)
        input_glob.F.requires_grad_()
        broadcast = MinkowskiBroadcastAddition(D)
        broadcast_mul = MinkowskiBroadcastMultiplication(D)
        output = broadcast(input, input_glob)
        print(output)
        output = broadcast_mul(input, input_glob)
        print(output)

        # Check backward
        fn = MinkowskiBroadcastFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input_glob.F, OperationType.ADDITION,
                 input.coords_key, input_glob.coords_key, input.coords_man)))

        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input_glob.F, OperationType.MULTIPLICATION,
                 input.coords_key, input_glob.coords_key, input.coords_man)))
Пример #6
0
    def test_unpool_gpu(self):
        if not torch.cuda.is_available():
            return

        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords)
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    dimension=D)
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3,
                                           stride=2,
                                           dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)
        # Check backward
        fn = MinkowskiLocalPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    unpool.pooling_mode,
                    unpool.kernel_generator,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            ))

        with torch.cuda.device(0):
            conv = conv.to("cuda")
            input = SparseTensor(feats, coords, device="cuda")
            input = conv(input)
            input.requires_grad_()
            output = unpool(input)
            print(output)

        # Check backward
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    unpool.pooling_mode,
                    unpool.kernel_generator,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            ))
Пример #7
0
    def test_maxpooling(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels, batch_size=2)
        feats.requires_grad_()
        feats = feats.double()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=D)
        print(pool)
        output = pool(input)
        print(input)
        print(output)
        C = output.coords_man
        print(C.get_coords(2))
        region_type, _, _ = pool.kernel_generator.cache[(1, 1)]
        print(
            C.get_kernel_map(
                1,
                2,
                stride=2,
                kernel_size=2,
                region_type=region_type,
                is_pool=True))
        # Check backward
        fn = MinkowskiMaxPoolingFunction()

        # Even numbered kernel_size error!
        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input.tensor_stride, pool.stride, pool.kernel_size,
                 pool.dilation, pool.region_type_, pool.region_offset_,
                 input.coords_key, None, input.coords_man)))

        if not torch.cuda.is_available():
            return

        device = torch.device('cuda')
        input = input.to(device)
        output = pool(input)
        print(output)

        # Check backward
        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input.tensor_stride, pool.stride, pool.kernel_size,
                 pool.dilation, pool.region_type_, pool.region_offset_,
                 input.coords_key, None, input.coords_man)))
Пример #8
0
    def test_gpu(self):
        if not torch.cuda.is_available():
            return

        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords)
        pool = MinkowskiGlobalMaxPooling()
        output = pool(input)
        print(output)

        if not torch.cuda.is_available():
            return

        input = SparseTensor(feats, coordinates=coords, device=0)
        output = pool(input)
        print(output)

        # Check backward
        fn = MinkowskiGlobalPoolingFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    pool.pooling_mode,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input._manager,
                ),
            )
        )
Пример #9
0
    def test(self):
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        conv = conv.double()
        conv_tr = MinkowskiConvolutionTranspose(out_channels,
                                                in_channels,
                                                kernel_size=2,
                                                stride=2,
                                                has_bias=True,
                                                dimension=D)
        conv_tr = conv_tr.double()
        input = conv(input)
        output = conv_tr(input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionTransposeFunction()

        self.assertTrue(
            gradcheck(fn,
                      (input.F, conv_tr.kernel, input.tensor_stride,
                       conv_tr.stride, conv_tr.kernel_size, conv_tr.dilation,
                       conv_tr.region_type_, conv_tr.region_offset_,
                       input.coords_key, None, input.coords_man)))
Пример #10
0
    def test_empty(self):
        in_channels = 2
        coords, feats, labels = data_loader(in_channels, batch_size=1)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords)
        use_feat = torch.BoolTensor(len(input))
        use_feat.zero_()
        pruning = MinkowskiPruning()
        output = pruning(input, use_feat)
        print(input)
        print(use_feat)
        print(output)

        # Check backward
        fn = MinkowskiPruningFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    use_feat,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))
Пример #11
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    has_bias=True,
                                    dimension=D)
        conv = conv.double()
        output = conv(input)
        print(output)

        kernel_map = input.coords_man.get_kernel_map(1,
                                                     2,
                                                     stride=2,
                                                     kernel_size=3)
        print(kernel_map)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        self.assertTrue(
            gradcheck(fn, (input.F, conv.kernel, input.tensor_stride,
                           conv.stride, conv.kernel_size, conv.dilation,
                           conv.region_type_, conv.region_offset_,
                           input.coords_key, None, input.coords_man)))
Пример #12
0
    def test_unpool(self):
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords)
        conv = MinkowskiConvolution(
            in_channels, out_channels, kernel_size=3, stride=2, dimension=D
        )
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)

        # Check backward
        fn = MinkowskiPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    input.tensor_stride,
                    unpool.stride,
                    unpool.kernel_size,
                    unpool.dilation,
                    unpool.region_type_,
                    unpool.region_offset_,
                    False,
                    input.coords_key,
                    None,
                    input.coords_man,
                ),
            )
        )
Пример #13
0
    def test_unpool(self):
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords)
        conv = MinkowskiConvolution(
            in_channels, out_channels, kernel_size=3, stride=2, dimension=D
        )
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)

        # Check backward
        fn = MinkowskiLocalPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    unpool.pooling_mode,
                    unpool.kernel_generator,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            )
        )
Пример #14
0
    def test_avgpooling_gpu(self):
        if not torch.cuda.is_available():
            return

        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiAvgPooling(kernel_size=3, stride=2, dimension=D)
        output = pool(input)
        print(output)

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input = input.to(device)
            pool = pool.to(device)
            output = pool(input)
            print(output)

        # Check backward
        fn = MinkowskiAvgPoolingFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input.tensor_stride, pool.stride, pool.kernel_size,
                 pool.dilation, pool.region_type_, pool.region_offset_, True,
                 input.coords_key, None, input.coords_man)))
Пример #15
0
    def test_device(self):
        in_channels, D = 2, 2
        device = torch.device("cuda")
        coords, feats, labels = data_loader(in_channels, batch_size=1)
        feats = feats.double()
        feats.requires_grad_()

        use_feat = (torch.rand(feats.size(0)) < 0.5).to(device)
        pruning = MinkowskiPruning()

        input = SparseTensor(feats, coords, device=device)
        output = pruning(input, use_feat)
        print(input)
        print(output)

        fn = MinkowskiPruningFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    use_feat,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))
Пример #16
0
    def test(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords)
        pool = MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D)
        output = pool(input)
        print(output)

        # Check backward
        fn = MinkowskiLocalPoolingFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    pool.pooling_mode,
                    pool.kernel_generator,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input._manager,
                ),
            )
        )
Пример #17
0
    def test_broadcast_gpu(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        coords, feats_glob, labels = data_loader(in_channels)
        feats = feats.double()
        feats_glob = feats_glob.double()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalPooling()
        input_glob = pool(input)
        input_glob.F.requires_grad_()
        broadcast_add = MinkowskiBroadcastAddition()
        broadcast_mul = MinkowskiBroadcastMultiplication()
        broadcast_cat = MinkowskiBroadcastConcatenation()
        cpu_add = broadcast_add(input, input_glob)
        cpu_mul = broadcast_mul(input, input_glob)
        cpu_cat = broadcast_cat(input, input_glob)

        # Check backward
        fn = MinkowskiBroadcastFunction()

        device = torch.device('cuda')

        input = input.to(device)
        input_glob = input_glob.to(device)
        gpu_add = broadcast_add(input, input_glob)
        gpu_mul = broadcast_mul(input, input_glob)
        gpu_cat = broadcast_cat(input, input_glob)

        self.assertTrue(
            torch.prod(gpu_add.F.cpu() - cpu_add.F < 1e-5).item() == 1)
        self.assertTrue(
            torch.prod(gpu_mul.F.cpu() - cpu_mul.F < 1e-5).item() == 1)
        self.assertTrue(
            torch.prod(gpu_cat.F.cpu() - cpu_cat.F < 1e-5).item() == 1)

        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input_glob.F, OperationType.ADDITION,
                 input.coords_key, input_glob.coords_key, input.coords_man)))

        self.assertTrue(
            gradcheck(
                fn,
                (input.F, input_glob.F, OperationType.MULTIPLICATION,
                 input.coords_key, input_glob.coords_key, input.coords_man)))
Пример #18
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return

        device = torch.device("cuda")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats.to(device), coordinates=coords.to(device))
        # Initialize context
        conv = (
            MinkowskiConvolution(
                in_channels,
                out_channels,
                kernel_size=3,
                stride=2,
                bias=True,
                dimension=D,
            )
            .double()
            .to(device)
        )
        conv_tr = (
            MinkowskiGenerativeConvolutionTranspose(
                out_channels,
                in_channels,
                kernel_size=3,
                stride=2,
                bias=True,
                dimension=D,
            )
            .double()
            .to(device)
        )
        tr_input = conv(input)
        print(tr_input)
        output = conv_tr(tr_input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    tr_input.F,
                    conv_tr.kernel,
                    conv_tr.kernel_generator,
                    conv_tr.convolution_mode,
                    tr_input.coordinate_map_key,
                    output.coordinate_map_key,
                    tr_input.coordinate_manager,
                ),
            )
        )
Пример #19
0
    def test_unpooling_gpu(self):
        if not torch.cuda.is_available():
            return

        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords=coords)
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    dimension=D)
        conv = conv.double()
        unpool = MinkowskiPoolingTranspose(kernel_size=3,
                                           stride=2,
                                           dimension=D)
        input = conv(input)
        output = unpool(input)
        print(output)

        # Check backward
        fn = MinkowskiPoolingTransposeFunction()

        self.assertTrue(
            gradcheck(fn, (input.F, input.tensor_stride, unpool.stride,
                           unpool.kernel_size, unpool.dilation,
                           unpool.region_type_, unpool.region_offset_, False,
                           input.coords_key, None, input.coords_man)))

        device = torch.device('cuda')
        with torch.cuda.device(0):
            input = input.to(device)
            output = unpool(input)
            print(output)

        # Check backward
        fn = MinkowskiAvgPoolingFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, input.tensor_stride, unpool.stride,
                           unpool.kernel_size, unpool.dilation,
                           unpool.region_type_, unpool.region_offset_, True,
                           input.coords_key, None, input.coords_man)))
Пример #20
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords)
        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    bias=True,
                                    dimension=D)
        conv = conv.double()
        output = conv(input)
        print(output)

        self.assertEqual(input.coordinate_map_key.get_tensor_stride(), [1, 1])
        self.assertEqual(output.coordinate_map_key.get_tensor_stride(), [2, 2])

        if torch.cuda.is_available():
            input_gpu = SparseTensor(feats, coordinates=coords, device="cuda")
            conv_gpu = conv.cuda()
            output_gpu = conv_gpu(input_gpu)
            self.assertTrue(
                torch.allclose(output_gpu.F.var(0).cpu(), output.F.var(0)))
            self.assertTrue(
                torch.allclose(output_gpu.F.mean(0).cpu(), output.F.mean(0)))

        # kernel_map = input.coords_man.kernel_map(
        #     1, 2, stride=2, kernel_size=3)
        # print(kernel_map)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        conv = conv.cpu()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    conv.kernel,
                    conv.kernel_generator,
                    conv.convolution_mode,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))

        for i in range(LEAK_TEST_ITER):
            input = SparseTensor(feats, coordinates=coords)
            conv(input).F.sum().backward()
Пример #21
0
    def test_global_maxpool(self):
        in_channels = 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalMaxPooling()
        output = pool(input)
        print(output)

        # Check backward
        fn = MinkowskiGlobalMaxPoolingFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, input.coords_key, None, input.coords_man)))
Пример #22
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return
        in_channels, out_channels, D = 3, 2, 2
        coords, feats, labels = data_loader(in_channels, batch_size=20)
        feats = feats.double()
        feats.requires_grad_()
        device = torch.device("cuda")
        conv = (
            MinkowskiConvolution(
                in_channels,
                out_channels,
                kernel_size=2,
                stride=1,
                bias=False,
                dimension=D,
            )
            .to(device)
            .double()
        )
        # Initialize context
        for mode in [_C.ConvolutionMode.DIRECT_GEMM, _C.ConvolutionMode.COPY_GEMM]:
            conv.convolution_mode = mode
            input = SparseTensor(feats, coordinates=coords, device=device)
            print(mode, input.F.numel(), len(input), input)
            output = conv(input)
            print(output)

            # Check backward
            fn = MinkowskiConvolutionFunction()

            grad = output.F.clone().zero_()
            grad[0] = 1
            output.F.backward(grad)

            self.assertTrue(
                gradcheck(
                    fn,
                    (
                        input.F,
                        conv.kernel,
                        conv.kernel_generator,
                        conv.convolution_mode,
                        input.coordinate_map_key,
                        None,
                        input.coordinate_manager,
                    ),
                )
            )
Пример #23
0
    def test_gpu(self):
        print(f"{self.__class__.__name__}: test_gpu")
        if not torch.cuda.is_available():
            return
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()

        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    bias=True,
                                    dimension=D)

        print(conv)
        input = SparseTensor(feats, coordinates=coords)
        conv = conv.double()
        output = conv(input)
        print(output)

        device = torch.device("cuda")
        input = SparseTensor(feats.to(device), coordinates=coords.to(device))
        conv = conv.to(device)
        output = conv(input)
        print(output)

        # Check backward
        fn = MinkowskiConvolutionFunction()

        grad = output.F.clone().zero_()
        grad[0] = 1
        output.F.backward(grad)

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    conv.kernel,
                    conv.kernel_generator,
                    conv.convolution_mode,
                    input.coordinate_map_key,
                    None,
                    input.coordinate_manager,
                ),
            ))
Пример #24
0
    def test_inst_norm(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        input = SparseTensor(feats, coords=coords)
        input.F.requires_grad_()
        norm = MinkowskiInstanceNorm(num_features=in_channels,
                                     dimension=D).double()

        out = norm(input)
        print(out)

        fn = MinkowskiInstanceNormFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, input.coords_key, None, input.coords_man)))
Пример #25
0
    def test(self):
        print(f"{self.__class__.__name__}: test")
        in_channels, out_channels, D = 2, 3, 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords)

        # Initialize context
        conv = MinkowskiConvolution(in_channels,
                                    out_channels,
                                    kernel_size=3,
                                    stride=2,
                                    bias=True,
                                    dimension=D).double()
        conv_tr = MinkowskiGenerativeConvolutionTranspose(
            out_channels,
            in_channels,
            kernel_size=3,
            stride=2,
            bias=True,
            dimension=D).double()

        print("Initial input: ", input)
        input = conv(input)
        print("Conv output: ", input)

        output = conv_tr(input)
        print("Conv tr output: ", output)

        # Check backward
        fn = MinkowskiConvolutionTransposeFunction()

        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    conv_tr.kernel,
                    conv_tr.kernel_generator,
                    conv_tr.convolution_mode,
                    input.coordinate_map_key,
                    output.coordinate_map_key,
                    input.coordinate_manager,
                ),
            ))
Пример #26
0
    def test_inst_norm_gpu(self):
        in_channels = 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()

        device = torch.device('cuda')
        input = SparseTensor(feats, coords=coords).to(device)
        input.F.requires_grad_()
        norm = MinkowskiInstanceNorm(
            num_features=in_channels).to(device).double()

        out = norm(input)
        print(out)

        fn = MinkowskiInstanceNormFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, GlobalPoolingMode.AUTO, input.coords_key,
                           None, input.coords_man)))
Пример #27
0
    def test_gpu(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels, batch_size=2)
        feats = feats.double()
        tfield = torch.cuda.DoubleTensor(
            [
                [0, 0.1, 2.7],
                [0, 0.3, 2],
                [1, 1.5, 2.5],
            ],
        )
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords, device="cuda")
        interp = MinkowskiInterpolation()
        output = interp(input, tfield)
        print(input)
        print(output)

        output.sum().backward()
        # Check backward
        fn = MinkowskiInterpolationFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    tfield,
                    input.coordinate_map_key,
                    input._manager,
                ),
            )
        )

        for i in range(LEAK_TEST_ITER):
            input = SparseTensor(feats, coordinates=coords, device="cuda")
            tfield = torch.cuda.DoubleTensor(
                [
                    [0, 0.1, 2.7],
                    [0, 0.3, 2],
                    [1, 1.5, 2.5],
                ],
            )
            output = interp(input, tfield)
            output.sum().backward()
Пример #28
0
    def test(self):
        in_channels, D = 2, 2
        coords, feats, labels = data_loader(in_channels, batch_size=2)
        feats = feats.double()
        tfield = torch.Tensor(
            [
                [0, 0.1, 2.7],
                [0, 0.3, 2],
                [1, 1.5, 2.5],
            ]
        ).double()
        feats.requires_grad_()
        input = SparseTensor(feats, coordinates=coords)
        interp = MinkowskiInterpolation(return_kernel_map=True, return_weights=False)
        output, (in_map, out_map) = interp(input, tfield)
        print(input)
        print(output)

        # Check backward
        output.sum().backward()
        fn = MinkowskiInterpolationFunction()
        self.assertTrue(
            gradcheck(
                fn,
                (
                    input.F,
                    tfield,
                    input.coordinate_map_key,
                    input._manager,
                ),
            )
        )

        for i in range(LEAK_TEST_ITER):
            input = SparseTensor(feats, coordinates=coords)
            tfield = torch.DoubleTensor(
                [
                    [0, 0.1, 2.7],
                    [0, 0.3, 2],
                    [1, 1.5, 2.5],
                ],
            )
            output, _ = interp(input, tfield)
            output.sum().backward()
Пример #29
0
    def test_global_maxpool(self):
        in_channels = 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()
        feats.requires_grad_()
        input = SparseTensor(feats, coords=coords)
        pool = MinkowskiGlobalMaxPooling()
        output = pool(input)
        print(output)

        # Check backward
        fn = MinkowskiGlobalMaxPoolingFunction()
        self.assertTrue(
            gradcheck(fn, (input.F, input.coords_key, None, input.coords_man)))

        if torch.cuda.is_available():
            input_cuda = input.to(torch.device(0))
            output_cuda = pool(input)
            self.assertTrue(torch.allclose(output_cuda.F.cpu(), output.F))
Пример #30
0
    def test_inst_norm_gpu(self):
        in_channels = 2
        coords, feats, labels = data_loader(in_channels)
        feats = feats.double()

        device = torch.device("cuda")
        input = SparseTensor(feats, coords, device=device)
        input.F.requires_grad_()
        norm = MinkowskiInstanceNorm(num_features=in_channels).to(device).double()

        out = norm(input)
        print(out)

        fn = MinkowskiInstanceNormFunction()
        self.assertTrue(
            gradcheck(
                fn, (input.F, input.coordinate_map_key, None, input.coordinate_manager)
            )
        )