Exemplo n.º 1
0
    def slice(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors).float()
        bcoords = batched_coordinates([coords / voxel_size],
                                      dtype=torch.float32)
        tfield = TensorField(colors, bcoords)

        network = nn.Sequential(
            MinkowskiLinear(3, 16),
            MinkowskiBatchNorm(16),
            MinkowskiReLU(),
            MinkowskiLinear(16, 32),
            MinkowskiBatchNorm(32),
            MinkowskiReLU(),
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3),
            MinkowskiConvolutionTranspose(64,
                                          32,
                                          kernel_size=3,
                                          stride=2,
                                          dimension=3),
        )

        otensor = network(tfield)
        ofield = otensor.slice(tfield)
        self.assertEqual(len(tfield), len(ofield))
        self.assertEqual(ofield.F.size(1), otensor.F.size(1))
        ofield = otensor.cat_slice(tfield)
        self.assertEqual(len(tfield), len(ofield))
        self.assertEqual(ofield.F.size(1),
                         (otensor.F.size(1) + tfield.F.size(1)))
Exemplo n.º 2
0
 def setUp(self):
     coords, colors, pcd = load_file("1.ply")
     voxel_size = 0.02
     colors = torch.from_numpy(colors).float()
     bcoords = batched_coordinates([coords / voxel_size],
                                   dtype=torch.float32)
     self.tensor_field = TensorField(coordinates=bcoords, features=colors)
Exemplo n.º 3
0
    def test_forward(self):
        coords, colors, pcd = load_file("1.ply")
        device = "cuda"

        X = []
        Y = []
        W = []
        for IC in [3, 8, 16, 24, 32, 48, 64, 96, 128]:
            for OC in [3, 8, 16, 24, 32, 48, 64, 96, 128, 192, 256]:
                for batch_size in [1, 5, 10, 15, 20]:
                    for voxel_size in [0.2, 0.1, 0.075, 0.05, 0.025]:
                        min_times = []
                        for mode in [
                            _C.ConvolutionMode.DIRECT_GEMM,
                            _C.ConvolutionMode.COPY_GEMM,
                        ]:
                            min_time = 100000
                            dcoords = torch.from_numpy(
                                np.floor(coords / voxel_size)
                            ).int()
                            bcoords = batched_coordinates(
                                [dcoords for i in range(batch_size)]
                            )
                            in_feats = torch.rand(len(bcoords), IC).to(0)
                            sinput = SparseTensor(
                                in_feats, coordinates=bcoords, device=device
                            )
                            conv = MinkowskiConvolution(
                                in_channels=IC,
                                out_channels=OC,
                                kernel_size=3,
                                stride=2,
                                convolution_mode=mode,
                                dimension=3,
                            ).to(device)
                            soutput = conv(sinput)
                            loss = soutput.F.sum()
                            for i in range(10):
                                stime = time.time()
                                loss.backward()
                                min_time = min(time.time() - stime, min_time)
                            min_times.append(min_time)

                        X.append(
                            [
                                IC,
                                OC,
                                len(sinput),
                                len(soutput),
                            ]
                        )
                        Y.append(np.argmin(min_times))
                        W.append(np.abs(min_times[0] - min_times[1]))
                        print(X[-1], Y[-1], W[-1])

        import pickle as pkl

        with open("forward-speed.pkl", "wb") as f:
            pkl.dump([X, Y, W], f)
Exemplo n.º 4
0
    def test_sum(self):
        coords, colors, pcd = load_file("1.ply")
        device = "cuda"

        D = 3
        batch_size = 16
        voxel_size = 0.02
        channels = [3, 64, 128]
        dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
        bcoords = batched_coordinates([dcoords for i in range(batch_size)])
        in_feats = torch.rand(len(bcoords), 3).to(0)

        layer = MinkowskiStackSum(
            ME.MinkowskiConvolution(
                channels[0],
                channels[1],
                kernel_size=3,
                stride=1,
                dimension=3,
            ),
            nn.Sequential(
                ME.MinkowskiConvolution(
                    channels[0],
                    channels[1],
                    kernel_size=3,
                    stride=2,
                    dimension=3,
                ),
                ME.MinkowskiStackSum(
                    nn.Identity(),
                    nn.Sequential(
                        ME.MinkowskiConvolution(
                            channels[1],
                            channels[2],
                            kernel_size=3,
                            stride=2,
                            dimension=3,
                        ),
                        ME.MinkowskiConvolutionTranspose(
                            channels[2],
                            channels[1],
                            kernel_size=3,
                            stride=1,
                            dimension=3,
                        ),
                        ME.MinkowskiPoolingTranspose(
                            kernel_size=2, stride=2, dimension=D
                        ),
                    ),
                ),
                ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D),
            ),
        ).cuda()

        for i in range(1000):
            torch.cuda.empty_cache()
            sinput = ME.SparseTensor(in_feats, coordinates=bcoords, device=device)
            layer(sinput)
Exemplo n.º 5
0
    def test_pcd(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors)
        bcoords = batched_coordinates([coords / voxel_size])
        tfield = TensorField(colors, bcoords)

        self.assertTrue(len(tfield) == len(colors))
        stensor = tfield.sparse()
        print(stensor)
Exemplo n.º 6
0
    def test_conv(self):
        IC, OC = 3, 16
        coords, colors, pcd = load_file("1.ply")
        kernel_size = [3, 3, 3]
        kernel_stride = [2, 2, 2]
        kernel_dilation = [1, 1, 1]

        # size, in, out
        kernel = torch.rand(np.prod(kernel_size), IC, OC).to(0)
        kernel_generator = KernelGenerator(
            kernel_size=kernel_size,
            stride=kernel_stride,
            dilation=kernel_dilation,
            expand_coordinates=False,
            dimension=3,
        )

        for batch_size in [1, 5, 10, 20, 40]:
            for voxel_size in [0.05, 0.035, 0.02]:
                min_time = 100000

                dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
                bcoords = batched_coordinates(
                    [dcoords for i in range(batch_size)])

                for i in range(10):
                    manager = _C.CoordinateMapManagerGPU_c10()

                    # batch insert
                    in_key, (unique_map, inverse_map) = manager.insert_and_map(
                        bcoords.to(0), [1, 1, 1], "")
                    in_feats = torch.rand(manager.size(in_key), IC).to(0)
                    out_key = _C.CoordinateMapKey(4)

                    stime = time.time()
                    out_features = _C.ConvolutionForwardGPU(
                        in_feats,
                        kernel,
                        kernel_generator.kernel_size,
                        kernel_generator.kernel_stride,
                        kernel_generator.kernel_dilation,
                        kernel_generator.region_type,
                        kernel_generator.region_offsets,
                        kernel_generator.expand_coordinates,
                        in_key,
                        out_key,
                        manager,
                    )
                    min_time = min(time.time() - stime, min_time)

                print(
                    f"{batch_size}\t{manager.size(in_key)}\t{manager.size(out_key)}\t{min_time}"
                )
Exemplo n.º 7
0
    def stride_slice(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors).float()
        bcoords = batched_coordinates([coords / voxel_size],
                                      dtype=torch.float32)
        tfield = TensorField(colors, bcoords)

        network = nn.Sequential(
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(3, 8, kernel_size=3, stride=4, dimension=3),
            MinkowskiReLU(),
            MinkowskiConvolution(8, 16, kernel_size=3, stride=4, dimension=3),
        )

        otensor = network(tfield)
        ofield = otensor.slice(tfield)
Exemplo n.º 8
0
 def test_decomposition(self):
     coords, colors, pcd = load_file("1.ply")
     colors = torch.from_numpy(colors)
     for batch_size in [1, 5, 10, 20, 40]:
         for voxel_size in [0.02]:
             dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
             bcoords = batched_coordinates(
                 [dcoords for i in range(batch_size)])
             feats = torch.cat([colors for b in range(batch_size)], 0)
             sinput = SparseTensor(feats, bcoords)
             (
                 decomposed_coords,
                 decomposed_feats,
             ) = sinput.decomposed_coordinates_and_features
             print([len(c) for c in decomposed_coords])
             print([len(f) for f in decomposed_feats])
             self.assertEqual(len(decomposed_coords), batch_size)
             self.assertEqual(len(decomposed_feats), batch_size)
Exemplo n.º 9
0
    def test_network_device(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors)
        bcoords = batched_coordinates([coords / voxel_size])
        tfield = TensorField(colors, bcoords, device=0).float()

        network = nn.Sequential(
            MinkowskiLinear(3, 16),
            MinkowskiBatchNorm(16),
            MinkowskiReLU(),
            MinkowskiLinear(16, 32),
            MinkowskiBatchNorm(32),
            MinkowskiReLU(),
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3),
        ).to(0)

        print(network(tfield))
Exemplo n.º 10
0
    def field_to_sparse(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors).float()
        bcoords = batched_coordinates([coords / voxel_size],
                                      dtype=torch.float32)
        tfield = TensorField(colors, bcoords)

        network = nn.Sequential(
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(3, 8, kernel_size=3, stride=4, dimension=3),
            MinkowskiReLU(),
            MinkowskiConvolution(8, 16, kernel_size=3, stride=4, dimension=3),
        )

        otensor = network(tfield)
        field_to_sparse = tfield.sparse(
            coordinate_map_key=otensor.coordinate_map_key)
        self.assertTrue(len(field_to_sparse.F) == len(otensor))
Exemplo n.º 11
0
    def test_decomposition_gpu(self):
        print(f"{self.__class__.__name__}: test_decomposition_gpu")
        if not torch.cuda.is_available():
            return

        coords, colors, pcd = load_file("1.ply")
        colors = torch.from_numpy(colors)

        for batch_size in [5, 10, 20, 40]:
            for voxel_size in [0.02]:
                dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
                bcoords = batched_coordinates([dcoords for i in range(batch_size)])
                feats = torch.cat([colors for b in range(batch_size)], 0)
                sinput = SparseTensor(feats.to(0), bcoords.to(0))
                (
                    decomposed_coords,
                    decomposed_feats,
                ) = sinput.decomposed_coordinates_and_features
                print([len(c) for c in decomposed_coords])
                print([len(f) for f in decomposed_feats])
                self.assertEqual(len(decomposed_coords), batch_size)
                self.assertEqual(len(decomposed_feats), batch_size)