def get_dataset(conv_type, task):
    num_points = 1024
    features = 2
    batch_size = 2
    if task == "object_detection":
        include_box = True
    else:
        include_box = False

    if conv_type.lower() == "dense":
        num_points = 2048
        batch_size = 1

    if task == "registration":
        if conv_type.lower() == "dense":
            return PairMockDataset(features, num_points=num_points, batch_size=batch_size)
        if conv_type.lower() == "sparse":
            tr = Compose([XYZFeature(True, True, True), GridSampling3D(size=0.01, quantize_coords=True, mode="last")])
            return PairMockDatasetGeometric(features, transform=tr, num_points=num_points, batch_size=batch_size)
        return PairMockDatasetGeometric(features, batch_size=batch_size)
    else:
        if conv_type.lower() == "dense":
            num_points = 2048
            return MockDataset(features, num_points=num_points, include_box=include_box, batch_size=batch_size)
        if conv_type.lower() == "sparse":
            return MockDatasetGeometric(
                features,
                include_box=include_box,
                transform=GridSampling3D(size=0.01, quantize_coords=True, mode="last"),
                num_points=num_points,
                batch_size=batch_size,
            )
        return MockDatasetGeometric(features, batch_size=batch_size)
Exemple #2
0
    def test_minkowski(self):
        from torch_points3d.applications.minkowski import Minkowski

        input_nc = 3
        num_layers = 4
        in_feat = 16
        model = Minkowski(
            architecture="encoder",
            input_nc=input_nc,
            in_feat=in_feat,
            num_layers=num_layers,
            config=None,
        )
        dataset = MockDatasetGeometric(input_nc,
                                       transform=GridSampling3D(
                                           0.01, quantize_coords=True),
                                       num_points=128)
        self.assertEqual(len(model._modules["down_modules"]), num_layers)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertFalse(model.has_mlp_head)
        self.assertEqual(model.output_nc, 8 * in_feat)

        try:
            data_out = model.forward(dataset[0])
            # self.assertEqual(data_out.x.shape[1], 8 * in_feat)
        except Exception as e:
            print("Model failing:")
            print(model)
            raise e

        input_nc = 3
        num_layers = 4
        grid_sampling = 0.02
        in_feat = 32
        output_nc = 5
        model = Minkowski(
            architecture="encoder",
            input_nc=input_nc,
            output_nc=output_nc,
            in_feat=in_feat,
            in_grid_size=grid_sampling,
            num_layers=num_layers,
            config=None,
        )
        dataset = MockDatasetGeometric(input_nc,
                                       transform=GridSampling3D(
                                           0.01, quantize_coords=True),
                                       num_points=128)
        self.assertEqual(len(model._modules["down_modules"]), num_layers)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertTrue(model.has_mlp_head)
        self.assertEqual(model.output_nc, output_nc)

        try:
            data_out = model.forward(dataset[0])
            self.assertEqual(data_out.x.shape[1], output_nc)
        except Exception as e:
            print("Model failing:")
            print(model)
            raise e
Exemple #3
0
    def test_kpconv(self):
        from torch_points3d.applications.kpconv import KPConv

        input_nc = 3
        num_layers = 4
        grid_sampling = 0.02
        in_feat = 32
        model = KPConv(
            architecture="unet",
            input_nc=input_nc,
            in_feat=in_feat,
            in_grid_size=grid_sampling,
            num_layers=num_layers,
            config=None,
        )
        dataset = MockDatasetGeometric(input_nc + 1, transform=GridSampling3D(0.01), num_points=128)
        self.assertEqual(len(model._modules["down_modules"]), num_layers + 1)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertEqual(len(model._modules["up_modules"]), 4)
        self.assertFalse(model.has_mlp_head)
        self.assertEqual(model.output_nc, in_feat)

        try:
            data_out = model.forward(dataset[0])
            self.assertEqual(data_out.x.shape[1], in_feat)
        except Exception as e:
            print("Model failing:")
            print(model)
            raise e

        input_nc = 3
        num_layers = 4
        grid_sampling = 0.02
        in_feat = 32
        output_nc = 5
        model = KPConv(
            architecture="unet",
            input_nc=input_nc,
            output_nc=output_nc,
            in_feat=in_feat,
            in_grid_size=grid_sampling,
            num_layers=num_layers,
            config=None,
        )
        dataset = MockDatasetGeometric(input_nc + 1, transform=GridSampling3D(0.01), num_points=128)
        self.assertEqual(len(model._modules["down_modules"]), num_layers + 1)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertEqual(len(model._modules["up_modules"]), 4)
        self.assertTrue(model.has_mlp_head)
        self.assertEqual(model.output_nc, output_nc)

        try:
            data_out = model.forward(dataset[0])
            self.assertEqual(data_out.x.shape[1], output_nc)
        except Exception as e:
            print("Model failing:")
            print(model)
            raise e
    def test_multiscaleTransforms(self):
        samplers = [GridSampling3D(0.25), None, GridSampling3D(0.5)]
        search = [
            RadiusNeighbourFinder(0.5, 100,
                                  ConvolutionFormat.PARTIAL_DENSE.value),
            RadiusNeighbourFinder(0.5, 150,
                                  ConvolutionFormat.PARTIAL_DENSE.value),
            RadiusNeighbourFinder(1, 200,
                                  ConvolutionFormat.PARTIAL_DENSE.value),
        ]
        upsampler = [KNNInterpolate(1), KNNInterpolate(1)]

        N = 10
        x = np.linspace(0, 1, N)
        y = np.linspace(0, 1, N)
        xv, yv = np.meshgrid(x, y)

        pos = torch.tensor([xv.flatten(), yv.flatten(), np.zeros(N * N)]).T
        x = torch.ones_like(pos)
        d = Data(pos=pos, x=x).contiguous()
        ms_transform = MultiScaleTransform({
            "sampler": samplers,
            "neighbour_finder": search,
            "upsample_op": upsampler
        })

        transformed = ms_transform(d.clone())
        npt.assert_almost_equal(transformed.x.numpy(), x.numpy())
        npt.assert_almost_equal(transformed.pos.numpy(), pos.numpy())

        ms = transformed.multiscale
        npt.assert_almost_equal(ms[0].pos.numpy(), ms[1].pos.numpy())
        npt.assert_almost_equal(ms[0].pos.numpy(),
                                samplers[0](d.clone()).pos.numpy())
        npt.assert_almost_equal(ms[2].pos.numpy(),
                                samplers[2](ms[0].clone()).pos.numpy())

        self.assertEqual(ms[0].__inc__("idx_neighboors", 0), pos.shape[0])
        idx = search[0](
            d.pos,
            ms[0].pos,
            torch.zeros((d.pos.shape[0]), dtype=torch.long),
            torch.zeros((ms[0].pos.shape[0]), dtype=torch.long),
        )
        for i in range(len(ms[0].idx_neighboors)):
            self.assertEqual(set(ms[0].idx_neighboors[i].tolist()),
                             set(idx[i].tolist()))
        self.assertEqual(ms[1].idx_neighboors.shape[1], 150)
        self.assertEqual(ms[2].idx_neighboors.shape[1], 200)

        upsample = transformed.upsample
        self.assertEqual(upsample[0].num_nodes, ms[1].num_nodes)
        self.assertEqual(upsample[1].num_nodes, pos.shape[0])
        self.assertEqual(upsample[1].x_idx.max(), ms[0].num_nodes - 1)
        self.assertEqual(upsample[1].y_idx.max(), pos.shape[0] - 1)
        self.assertEqual(upsample[1].__inc__("x_idx", 0), ms[0].num_nodes)
        self.assertEqual(upsample[1].__inc__("y_idx", 0), pos.shape[0])
Exemple #5
0
    def test_sparseconv3d(self):
        from torch_points3d.applications.sparseconv3d import SparseConv3d

        input_nc = 3
        num_layers = 4
        in_feat = 32
        out_feat = in_feat * 3
        model = SparseConv3d(
            architecture="unet", input_nc=input_nc, in_feat=in_feat, num_layers=num_layers, config=None,
        )
        dataset = MockDatasetGeometric(input_nc, transform=GridSampling3D(0.01, quantize_coords=True), num_points=128)
        self.assertEqual(len(model._modules["down_modules"]), num_layers + 1)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertEqual(len(model._modules["up_modules"]), 4 + 1)
        self.assertFalse(model.has_mlp_head)
        self.assertEqual(model.output_nc, out_feat)

        try:
            data_out = model.forward(dataset[0])
            self.assertEqual(data_out.x.shape[1], out_feat)
        except Exception as e:
            print("Model failing:")
            print(model)
            print(e)

        input_nc = 3
        num_layers = 4

        output_nc = 5
        model = SparseConv3d(
            architecture="unet", input_nc=input_nc, output_nc=output_nc, num_layers=num_layers, config=None,
        )
        dataset = MockDatasetGeometric(input_nc, transform=GridSampling3D(0.01, quantize_coords=True), num_points=128)
        self.assertEqual(len(model._modules["down_modules"]), num_layers + 1)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertEqual(len(model._modules["up_modules"]), 4 + 1)
        self.assertTrue(model.has_mlp_head)
        self.assertEqual(model.output_nc, output_nc)

        try:
            data_out = model.forward(dataset[0])
            self.assertEqual(data_out.x.shape[1], output_nc)
        except Exception as e:
            print("Model failing:")
            print(model)
            raise e
Exemple #6
0
    def test_votenet_backbones(self):
        from torch_points3d.applications.votenet import VoteNet

        cfg = OmegaConf.load(
            os.path.join(DIR_PATH,
                         "data/scannet-fixed/config_object_detection.yaml"))
        config_data = cfg.data
        config_data.is_test = True
        dataset = ScannetDataset(config_data)
        model = VoteNet(
            original=False,
            backbone="kpconv",
            input_nc=dataset.feature_dimension,
            num_classes=dataset.num_classes,
            mean_size_arr=dataset.mean_size_arr,
            compute_loss=True,
            in_feat=4,
        )

        dataset.create_dataloaders(model,
                                   batch_size=2,
                                   shuffle=True,
                                   num_workers=0,
                                   precompute_multi_scale=False)

        train_loader = dataset.train_dataloader
        data = next(iter(train_loader))
        data = GridSampling3D(0.1)(data)
        # for key in data.keys:
        #    print(key, data[key].shape, data[key].dtype)
        model.verify_data(data)
        model.forward(data)

        self.assertEqual(hasattr(model, "loss"), True)

        attrs_test = {
            "center": [2, 256, 3],
            "heading_residuals": [2, 256, 1],
            "heading_residuals_normalized": [2, 256, 1],
            "heading_scores": [2, 256, 1],
            "object_assignment": [2, 256],
            "objectness_label": [2, 256],
            "objectness_mask": [2, 256],
            "objectness_scores": [2, 256, 2],
            "sampled_votes": [2, 256, 3],
            "seed_inds": [2048],
            "seed_pos": [2, 1024, 3],
            "seed_votes": [2, 1024, 3],
            "sem_cls_scores": [2, 256, 20],
            "size_residuals_normalized": [2, 256, 18, 3],
            "size_scores": [2, 256, 18],
        }

        output = model.output
        for k, v in attrs_test.items():
            self.assertEqual(hasattr(output, k), True)
            self.assertEqual(getattr(output, k).shape, torch.Size(v))
Exemple #7
0
 def test_registration_from_pretrained(self):
     model = PretainedRegistry.from_pretrained(
         "minkowski-registration-3dmatch", download=True)
     input_nc = 1
     dataset = MockDatasetGeometric(input_nc,
                                    transform=GridSampling3D(
                                        0.01, quantize_coords=True),
                                    num_points=128)
     model.set_input(dataset[0], device="cpu")
     model.forward(dataset[0])
Exemple #8
0
    def __init__(self, option, model_type, dataset, modules):
        super(PointGroup, self).__init__(option)
        backbone_options = option.get("backbone", {"architecture": "unet"})
        self.Backbone = Minkowski(
            backbone_options.get("architecture", "unet"),
            input_nc=dataset.feature_dimension,
            num_layers=4,
            config=backbone_options.get("config", {}),
        )

        self._scorer_type = option.get("scorer_type", "encoder")
        cluster_voxel_size = option.get("cluster_voxel_size", 0.05)
        if cluster_voxel_size:
            self._voxelizer = GridSampling3D(cluster_voxel_size,
                                             quantize_coords=True,
                                             mode="mean")
        else:
            self._voxelizer = None
        self.ScorerUnet = Minkowski("unet",
                                    input_nc=self.Backbone.output_nc,
                                    num_layers=4,
                                    config=option.scorer_unet)
        self.ScorerEncoder = Minkowski("encoder",
                                       input_nc=self.Backbone.output_nc,
                                       num_layers=4,
                                       config=option.scorer_encoder)
        self.ScorerMLP = MLP([
            self.Backbone.output_nc, self.Backbone.output_nc,
            self.ScorerUnet.output_nc
        ])
        self.ScorerHead = Seq().append(
            torch.nn.Linear(self.ScorerUnet.output_nc,
                            1)).append(torch.nn.Sigmoid())

        self.Offset = Seq().append(
            MLP([self.Backbone.output_nc, self.Backbone.output_nc],
                bias=False))
        self.Offset.append(torch.nn.Linear(self.Backbone.output_nc, 3))

        self.Semantic = (Seq().append(
            MLP([self.Backbone.output_nc, self.Backbone.output_nc],
                bias=False)).append(
                    torch.nn.Linear(self.Backbone.output_nc,
                                    dataset.num_classes)).append(
                                        torch.nn.LogSoftmax(dim=-1)))
        self.loss_names = [
            "loss", "offset_norm_loss", "offset_dir_loss", "semantic_loss",
            "score_loss"
        ]
        stuff_classes = dataset.stuff_classes
        if is_list(stuff_classes):
            stuff_classes = torch.Tensor(stuff_classes).long()
        self._stuff_classes = torch.cat(
            [torch.tensor([IGNORE_LABEL]), stuff_classes])
    def test_precompute(self):
        pos = torch.tensor([[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 0], [0.1, 0, 0]])
        x = torch.tensor([0, 0, 0, 0, 1]).unsqueeze(-1)
        support = Data(x=x, pos=pos)

        query = GridSampling3D(1)(support.clone())

        interpolate = KNNInterpolate(1)
        up = interpolate.precompute(query, support)
        self.assertEqual(up.num_nodes, 5)
        self.assertEqual(up.x_idx[4], up.x_idx[3])
Exemple #10
0
 def get_dataset(conv_type, task):
     features = 2
     if task == "registration":
         if conv_type.lower() == "dense":
             return PairMockDataset(features, num_points=2048)
         if conv_type.lower() == "sparse":
             tr = Compose(
                 [XYZFeature(True, True, True), GridSampling3D(size=0.01, quantize_coords=True, mode="last")]
             )
             return PairMockDatasetGeometric(features, transform=tr, num_points=1024)
         return PairMockDatasetGeometric(features)
     else:
         if conv_type.lower() == "dense":
             return MockDataset(features, num_points=2048)
         if conv_type.lower() == "sparse":
             return MockDatasetGeometric(
                 features,
                 transform=GridSampling3D(size=0.01, quantize_coords=True, mode="last"),
                 num_points=1024,
             )
         return MockDatasetGeometric(features)
Exemple #11
0
 def test_siamese_minkowski(self):
     params = load_model_config("registration", "minkowski", "MinkUNet_Fragment")
     transform = Compose(
         [XYZFeature(True, True, True), GridSampling3D(size=0.01, quantize_coords=True, mode="last")]
     )
     dataset = PairMockDatasetGeometric(5, transform=transform, num_points=1024, is_pair_ind=True)
     model = instantiate_model(params, dataset)
     d = dataset[0]
     model.set_input(d, device)
     model.forward()
     model.backward()
     ratio = test_hasgrad(model)
     if ratio < 1:
         print(
             "Model registration.minkowski.MinkUNet_Fragment has %i%% of parameters with 0 gradient" % (100 * ratio)
         )
Exemple #12
0
def compute_subsampled_matches(data1,
                               data2,
                               voxel_size=0.1,
                               max_distance_overlap=0.02):
    """
    compute matches on subsampled version of data and track ind
    """
    grid_sampling = Compose(
        [SaveOriginalPosId(),
         GridSampling3D(voxel_size, mode='last')])
    subsampled_data = grid_sampling(data1.clone())
    origin_id = subsampled_data.origin_id.numpy()
    pair = compute_overlap_and_matches(subsampled_data, data2,
                                       max_distance_overlap)['pair']
    pair[:, 0] = origin_id[pair[:, 0]]
    return torch.from_numpy(pair.copy())
Exemple #13
0
    def __init__(
        self,
        down_conv_nn=None,
        grid_size=None,
        prev_grid_size=None,
        sigma=1.0,
        max_num_neighbors=16,
        activation=torch.nn.LeakyReLU(negative_slope=0.1),
        bn_momentum=0.02,
        bn=FastBatchNorm1d,
        deformable=False,
        add_one=False,
        **kwargs,
    ):
        super(SimpleBlock, self).__init__()
        assert len(down_conv_nn) == 2
        num_inputs, num_outputs = down_conv_nn
        if deformable:
            density_parameter = self.DEFORMABLE_DENSITY
            self.kp_conv = KPConvDeformableLayer(
                num_inputs,
                num_outputs,
                point_influence=prev_grid_size * sigma,
                add_one=add_one)
        else:
            density_parameter = self.RIGID_DENSITY
            self.kp_conv = KPConvLayer(num_inputs,
                                       num_outputs,
                                       point_influence=prev_grid_size * sigma,
                                       add_one=add_one)
        search_radius = density_parameter * sigma * prev_grid_size
        self.neighbour_finder = RadiusNeighbourFinder(search_radius,
                                                      max_num_neighbors,
                                                      conv_type=self.CONV_TYPE)

        if bn:
            self.bn = bn(num_outputs, momentum=bn_momentum)
        else:
            self.bn = None
        self.activation = activation

        is_strided = prev_grid_size != grid_size
        if is_strided:
            self.sampler = GridSampling3D(grid_size)
        else:
            self.sampler = None
Exemple #14
0
        [
            -6.481465826011e-03, 8.051860151134e-03, -9.999466081774e-01,
            -7.337429464231e-02
        ],
        [
            9.999773098287e-01, -1.805528627661e-03, -6.496203536139e-03,
            -3.339968064433e-01
        ],
    ])
    pcd_s = np.fromfile(path_s, dtype=np.float32).reshape(-1, 4)[:, :3].dot(
        R_calib[:3, :3].T)
    pcd_t = np.fromfile(path_t, dtype=np.float32).reshape(-1, 4)[:, :3].dot(
        R_calib[:3, :3].T)

    transform = Compose([
        GridSampling3D(mode="last", size=0.3, quantize_coords=True),
        AddOnes(),
        AddFeatByKey(add_to_x=True, feat_name="ones"),
    ])

    data_s = transform(
        Batch(pos=torch.from_numpy(pcd_s).float(),
              batch=torch.zeros(pcd_s.shape[0]).long()))
    data_t = transform(
        Batch(pos=torch.from_numpy(pcd_t).float(),
              batch=torch.zeros(pcd_t.shape[0]).long()))

    model = PretainedRegistry.from_pretrained(
        "minkowski-registration-kitti").cuda()

    with torch.no_grad():