Ejemplo n.º 1
0
    def slice(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors).float()
        bcoords = batched_coordinates([coords / voxel_size],
                                      dtype=torch.float32)
        tfield = TensorField(colors, bcoords)

        network = nn.Sequential(
            MinkowskiLinear(3, 16),
            MinkowskiBatchNorm(16),
            MinkowskiReLU(),
            MinkowskiLinear(16, 32),
            MinkowskiBatchNorm(32),
            MinkowskiReLU(),
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3),
            MinkowskiConvolutionTranspose(64,
                                          32,
                                          kernel_size=3,
                                          stride=2,
                                          dimension=3),
        )

        otensor = network(tfield)
        ofield = otensor.slice(tfield)
        self.assertEqual(len(tfield), len(ofield))
        self.assertEqual(ofield.F.size(1), otensor.F.size(1))
        ofield = otensor.cat_slice(tfield)
        self.assertEqual(len(tfield), len(ofield))
        self.assertEqual(ofield.F.size(1),
                         (otensor.F.size(1) + tfield.F.size(1)))
Ejemplo n.º 2
0
    def test_pcd(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors)
        bcoords = batched_coordinates([coords / voxel_size])
        tfield = TensorField(colors, bcoords)

        self.assertTrue(len(tfield) == len(colors))
        stensor = tfield.sparse()
        print(stensor)
Ejemplo n.º 3
0
    def test_conv(self):
        IC, OC = 3, 16
        coords, colors, pcd = load_file("1.ply")
        kernel_size = [3, 3, 3]
        kernel_stride = [2, 2, 2]
        kernel_dilation = [1, 1, 1]

        # size, in, out
        kernel = torch.rand(np.prod(kernel_size), IC, OC).to(0)
        kernel_generator = KernelGenerator(
            kernel_size=kernel_size,
            stride=kernel_stride,
            dilation=kernel_dilation,
            expand_coordinates=False,
            dimension=3,
        )

        for batch_size in [1, 5, 10, 20, 40]:
            for voxel_size in [0.05, 0.035, 0.02]:
                min_time = 100000

                dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
                bcoords = batched_coordinates(
                    [dcoords for i in range(batch_size)])

                for i in range(10):
                    manager = _C.CoordinateMapManagerGPU_c10()

                    # batch insert
                    in_key, (unique_map, inverse_map) = manager.insert_and_map(
                        bcoords.to(0), [1, 1, 1], "")
                    in_feats = torch.rand(manager.size(in_key), IC).to(0)
                    out_key = _C.CoordinateMapKey(4)

                    stime = time.time()
                    out_features = _C.ConvolutionForwardGPU(
                        in_feats,
                        kernel,
                        kernel_generator.kernel_size,
                        kernel_generator.kernel_stride,
                        kernel_generator.kernel_dilation,
                        kernel_generator.region_type,
                        kernel_generator.region_offsets,
                        kernel_generator.expand_coordinates,
                        in_key,
                        out_key,
                        manager,
                    )
                    min_time = min(time.time() - stime, min_time)

                print(
                    f"{batch_size}\t{manager.size(in_key)}\t{manager.size(out_key)}\t{min_time}"
                )
Ejemplo n.º 4
0
 def test_decomposition(self):
     coords, colors, pcd = load_file("1.ply")
     colors = torch.from_numpy(colors)
     for batch_size in [1, 5, 10, 20, 40]:
         for voxel_size in [0.02]:
             dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
             bcoords = batched_coordinates(
                 [dcoords for i in range(batch_size)])
             feats = torch.cat([colors for b in range(batch_size)], 0)
             sinput = SparseTensor(feats, bcoords)
             (
                 decomposed_coords,
                 decomposed_feats,
             ) = sinput.decomposed_coordinates_and_features
             print([len(c) for c in decomposed_coords])
             print([len(f) for f in decomposed_feats])
             self.assertEqual(len(decomposed_coords), batch_size)
             self.assertEqual(len(decomposed_feats), batch_size)
Ejemplo n.º 5
0
    def test_network_device(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors)
        bcoords = batched_coordinates([coords / voxel_size])
        tfield = TensorField(colors, bcoords, device=0).float()

        network = nn.Sequential(
            MinkowskiLinear(3, 16),
            MinkowskiBatchNorm(16),
            MinkowskiReLU(),
            MinkowskiLinear(16, 32),
            MinkowskiBatchNorm(32),
            MinkowskiReLU(),
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3),
        ).to(0)

        print(network(tfield))