Beispiel #1
0
    def slice(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors).float()
        bcoords = batched_coordinates([coords / voxel_size],
                                      dtype=torch.float32)
        tfield = TensorField(colors, bcoords)

        network = nn.Sequential(
            MinkowskiLinear(3, 16),
            MinkowskiBatchNorm(16),
            MinkowskiReLU(),
            MinkowskiLinear(16, 32),
            MinkowskiBatchNorm(32),
            MinkowskiReLU(),
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3),
            MinkowskiConvolutionTranspose(64,
                                          32,
                                          kernel_size=3,
                                          stride=2,
                                          dimension=3),
        )

        otensor = network(tfield)
        ofield = otensor.slice(tfield)
        self.assertEqual(len(tfield), len(ofield))
        self.assertEqual(ofield.F.size(1), otensor.F.size(1))
        ofield = otensor.cat_slice(tfield)
        self.assertEqual(len(tfield), len(ofield))
        self.assertEqual(ofield.F.size(1),
                         (otensor.F.size(1) + tfield.F.size(1)))
Beispiel #2
0
 def setUp(self):
     coords, colors, pcd = load_file("1.ply")
     voxel_size = 0.02
     colors = torch.from_numpy(colors).float()
     bcoords = batched_coordinates([coords / voxel_size],
                                   dtype=torch.float32)
     self.tensor_field = TensorField(coordinates=bcoords, features=colors)
Beispiel #3
0
    def test_forward(self):
        coords, colors, pcd = load_file("1.ply")
        device = "cuda"

        X = []
        Y = []
        W = []
        for IC in [3, 8, 16, 24, 32, 48, 64, 96, 128]:
            for OC in [3, 8, 16, 24, 32, 48, 64, 96, 128, 192, 256]:
                for batch_size in [1, 5, 10, 15, 20]:
                    for voxel_size in [0.2, 0.1, 0.075, 0.05, 0.025]:
                        min_times = []
                        for mode in [
                            _C.ConvolutionMode.DIRECT_GEMM,
                            _C.ConvolutionMode.COPY_GEMM,
                        ]:
                            min_time = 100000
                            dcoords = torch.from_numpy(
                                np.floor(coords / voxel_size)
                            ).int()
                            bcoords = batched_coordinates(
                                [dcoords for i in range(batch_size)]
                            )
                            in_feats = torch.rand(len(bcoords), IC).to(0)
                            sinput = SparseTensor(
                                in_feats, coordinates=bcoords, device=device
                            )
                            conv = MinkowskiConvolution(
                                in_channels=IC,
                                out_channels=OC,
                                kernel_size=3,
                                stride=2,
                                convolution_mode=mode,
                                dimension=3,
                            ).to(device)
                            soutput = conv(sinput)
                            loss = soutput.F.sum()
                            for i in range(10):
                                stime = time.time()
                                loss.backward()
                                min_time = min(time.time() - stime, min_time)
                            min_times.append(min_time)

                        X.append(
                            [
                                IC,
                                OC,
                                len(sinput),
                                len(soutput),
                            ]
                        )
                        Y.append(np.argmin(min_times))
                        W.append(np.abs(min_times[0] - min_times[1]))
                        print(X[-1], Y[-1], W[-1])

        import pickle as pkl

        with open("forward-speed.pkl", "wb") as f:
            pkl.dump([X, Y, W], f)
Beispiel #4
0
    def test_sum(self):
        coords, colors, pcd = load_file("1.ply")
        device = "cuda"

        D = 3
        batch_size = 16
        voxel_size = 0.02
        channels = [3, 64, 128]
        dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
        bcoords = batched_coordinates([dcoords for i in range(batch_size)])
        in_feats = torch.rand(len(bcoords), 3).to(0)

        layer = MinkowskiStackSum(
            ME.MinkowskiConvolution(
                channels[0],
                channels[1],
                kernel_size=3,
                stride=1,
                dimension=3,
            ),
            nn.Sequential(
                ME.MinkowskiConvolution(
                    channels[0],
                    channels[1],
                    kernel_size=3,
                    stride=2,
                    dimension=3,
                ),
                ME.MinkowskiStackSum(
                    nn.Identity(),
                    nn.Sequential(
                        ME.MinkowskiConvolution(
                            channels[1],
                            channels[2],
                            kernel_size=3,
                            stride=2,
                            dimension=3,
                        ),
                        ME.MinkowskiConvolutionTranspose(
                            channels[2],
                            channels[1],
                            kernel_size=3,
                            stride=1,
                            dimension=3,
                        ),
                        ME.MinkowskiPoolingTranspose(
                            kernel_size=2, stride=2, dimension=D
                        ),
                    ),
                ),
                ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D),
            ),
        ).cuda()

        for i in range(1000):
            torch.cuda.empty_cache()
            sinput = ME.SparseTensor(in_feats, coordinates=bcoords, device=device)
            layer(sinput)
Beispiel #5
0
    def test_pcd(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors)
        bcoords = batched_coordinates([coords / voxel_size])
        tfield = TensorField(colors, bcoords)

        self.assertTrue(len(tfield) == len(colors))
        stensor = tfield.sparse()
        print(stensor)
Beispiel #6
0
    def stride_slice(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors).float()
        bcoords = batched_coordinates([coords / voxel_size],
                                      dtype=torch.float32)
        tfield = TensorField(colors, bcoords)

        network = nn.Sequential(
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(3, 8, kernel_size=3, stride=4, dimension=3),
            MinkowskiReLU(),
            MinkowskiConvolution(8, 16, kernel_size=3, stride=4, dimension=3),
        )

        otensor = network(tfield)
        ofield = otensor.slice(tfield)
    def setUp(self):
        file_name, voxel_size = "1.ply", 0.02
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.net = StackUNet(3, 20, D=3).to(self.device)
        if not os.path.isfile(file_name):
            print('Downloading an example pointcloud...')
            urlretrieve("https://bit.ly/3c2iLhg", file_name)

        pcd = o3d.io.read_point_cloud(file_name)
        coords = np.array(pcd.points)
        colors = np.array(pcd.colors)

        self.sinput = SparseTensor(
            features=torch.from_numpy(colors).float(),
            coordinates=batched_coordinates([coords / voxel_size], dtype=torch.float32),
            device=self.device,
        )
 def test_decomposition(self):
     coords, colors, pcd = load_file("1.ply")
     colors = torch.from_numpy(colors)
     for batch_size in [1, 5, 10, 20, 40]:
         for voxel_size in [0.02]:
             dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
             bcoords = batched_coordinates(
                 [dcoords for i in range(batch_size)])
             feats = torch.cat([colors for b in range(batch_size)], 0)
             sinput = SparseTensor(feats, bcoords)
             (
                 decomposed_coords,
                 decomposed_feats,
             ) = sinput.decomposed_coordinates_and_features
             print([len(c) for c in decomposed_coords])
             print([len(f) for f in decomposed_feats])
             self.assertEqual(len(decomposed_coords), batch_size)
             self.assertEqual(len(decomposed_feats), batch_size)
Beispiel #9
0
    def test_network_device(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors)
        bcoords = batched_coordinates([coords / voxel_size])
        tfield = TensorField(colors, bcoords, device=0).float()

        network = nn.Sequential(
            MinkowskiLinear(3, 16),
            MinkowskiBatchNorm(16),
            MinkowskiReLU(),
            MinkowskiLinear(16, 32),
            MinkowskiBatchNorm(32),
            MinkowskiReLU(),
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(32, 64, kernel_size=3, stride=2, dimension=3),
        ).to(0)

        print(network(tfield))
    def field_to_sparse(self):
        coords, colors, pcd = load_file("1.ply")
        voxel_size = 0.02
        colors = torch.from_numpy(colors).float()
        bcoords = batched_coordinates([coords / voxel_size],
                                      dtype=torch.float32)
        tfield = TensorField(colors, bcoords)

        network = nn.Sequential(
            MinkowskiToSparseTensor(),
            MinkowskiConvolution(3, 8, kernel_size=3, stride=4, dimension=3),
            MinkowskiReLU(),
            MinkowskiConvolution(8, 16, kernel_size=3, stride=4, dimension=3),
        )

        otensor = network(tfield)
        field_to_sparse = tfield.sparse(
            coordinate_map_key=otensor.coordinate_map_key)
        self.assertTrue(len(field_to_sparse.F) == len(otensor))
    def test_decomposition_gpu(self):
        print(f"{self.__class__.__name__}: test_decomposition_gpu")
        if not torch.cuda.is_available():
            return

        coords, colors, pcd = load_file("1.ply")
        colors = torch.from_numpy(colors)

        for batch_size in [5, 10, 20, 40]:
            for voxel_size in [0.02]:
                dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
                bcoords = batched_coordinates([dcoords for i in range(batch_size)])
                feats = torch.cat([colors for b in range(batch_size)], 0)
                sinput = SparseTensor(feats.to(0), bcoords.to(0))
                (
                    decomposed_coords,
                    decomposed_feats,
                ) = sinput.decomposed_coordinates_and_features
                print([len(c) for c in decomposed_coords])
                print([len(f) for f in decomposed_feats])
                self.assertEqual(len(decomposed_coords), batch_size)
                self.assertEqual(len(decomposed_feats), batch_size)