def get_dataset(conv_type, task):
     features = 2
     if task == "registration":
         if conv_type.lower() == "dense":
             return PairMockDataset(features, num_points=2048)
         if conv_type.lower() == "sparse":
             tr = Compose([
                 XYZFeature(True, True, True),
                 GridSampling(size=0.01,
                              quantize_coords=True,
                              mode="last")
             ])
             return PairMockDatasetGeometric(features,
                                             transform=tr,
                                             num_points=1024)
         return PairMockDatasetGeometric(features)
     else:
         if conv_type.lower() == "dense":
             return MockDataset(features, num_points=2048)
         if conv_type.lower() == "sparse":
             return MockDatasetGeometric(features,
                                         transform=GridSampling(
                                             size=0.01,
                                             quantize_coords=True,
                                             mode="last"),
                                         num_points=1024)
         return MockDatasetGeometric(features)
Beispiel #2
0
    def test_kpconv(self):
        from torch_points3d.applications.kpconv import KPConv

        input_nc = 3
        num_layers = 4
        grid_sampling = 0.02
        in_feat = 32
        model = KPConv(
            architecture="unet",
            input_nc=input_nc,
            in_feat=in_feat,
            in_grid_size=grid_sampling,
            num_layers=num_layers,
            config=None,
        )
        dataset = MockDatasetGeometric(input_nc + 1, transform=GridSampling(0.01), num_points=128)
        self.assertEqual(len(model._modules["down_modules"]), num_layers + 1)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertEqual(len(model._modules["up_modules"]), 4)
        self.assertFalse(model.has_mlp_head)
        self.assertEqual(model.output_nc, in_feat)

        try:
            data_out = model.forward(dataset[0])
            self.assertEqual(data_out.x.shape[1], in_feat)
        except Exception as e:
            print("Model failing:")
            print(model)
            raise e

        input_nc = 3
        num_layers = 4
        grid_sampling = 0.02
        in_feat = 32
        output_nc = 5
        model = KPConv(
            architecture="unet",
            input_nc=input_nc,
            output_nc=output_nc,
            in_feat=in_feat,
            in_grid_size=grid_sampling,
            num_layers=num_layers,
            config=None,
        )
        dataset = MockDatasetGeometric(input_nc + 1, transform=GridSampling(0.01), num_points=128)
        self.assertEqual(len(model._modules["down_modules"]), num_layers + 1)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertEqual(len(model._modules["up_modules"]), 4)
        self.assertTrue(model.has_mlp_head)
        self.assertEqual(model.output_nc, output_nc)

        try:
            data_out = model.forward(dataset[0])
            self.assertEqual(data_out.x.shape[1], output_nc)
        except Exception as e:
            print("Model failing:")
            print(model)
            raise e
Beispiel #3
0
    def test_multiscaleTransforms(self):
        samplers = [GridSampling(0.25), None, GridSampling(0.5)]
        search = [
            RadiusNeighbourFinder(0.5, 100,
                                  ConvolutionFormat.PARTIAL_DENSE.value),
            RadiusNeighbourFinder(0.5, 150,
                                  ConvolutionFormat.PARTIAL_DENSE.value),
            RadiusNeighbourFinder(1, 200,
                                  ConvolutionFormat.PARTIAL_DENSE.value),
        ]
        upsampler = [KNNInterpolate(1), KNNInterpolate(1)]

        N = 10
        x = np.linspace(0, 1, N)
        y = np.linspace(0, 1, N)
        xv, yv = np.meshgrid(x, y)

        pos = torch.tensor([xv.flatten(), yv.flatten(), np.zeros(N * N)]).T
        x = torch.ones_like(pos)
        d = Data(pos=pos, x=x).contiguous()
        ms_transform = MultiScaleTransform({
            "sampler": samplers,
            "neighbour_finder": search,
            "upsample_op": upsampler
        })

        transformed = ms_transform(d.clone())
        npt.assert_almost_equal(transformed.x.numpy(), x.numpy())
        npt.assert_almost_equal(transformed.pos.numpy(), pos.numpy())

        ms = transformed.multiscale
        npt.assert_almost_equal(ms[0].pos.numpy(), ms[1].pos.numpy())
        npt.assert_almost_equal(ms[0].pos.numpy(),
                                samplers[0](d.clone()).pos.numpy())
        npt.assert_almost_equal(ms[2].pos.numpy(),
                                samplers[2](ms[0].clone()).pos.numpy())

        self.assertEqual(ms[0].__inc__("idx_neighboors", 0), pos.shape[0])
        idx = search[0](
            d.pos,
            ms[0].pos,
            torch.zeros((d.pos.shape[0]), dtype=torch.long),
            torch.zeros((ms[0].pos.shape[0]), dtype=torch.long),
        )
        for i in range(len(ms[0].idx_neighboors)):
            self.assertEqual(set(ms[0].idx_neighboors[i].tolist()),
                             set(idx[i].tolist()))
        self.assertEqual(ms[1].idx_neighboors.shape[1], 150)
        self.assertEqual(ms[2].idx_neighboors.shape[1], 200)

        upsample = transformed.upsample
        self.assertEqual(upsample[0].num_nodes, ms[1].num_nodes)
        self.assertEqual(upsample[1].num_nodes, pos.shape[0])
        self.assertEqual(upsample[1].x_idx.max(), ms[0].num_nodes - 1)
        self.assertEqual(upsample[1].y_idx.max(), pos.shape[0] - 1)
        self.assertEqual(upsample[1].__inc__("x_idx", 0), ms[0].num_nodes)
        self.assertEqual(upsample[1].__inc__("y_idx", 0), pos.shape[0])
Beispiel #4
0
    def test_kpconv(self):
        from torch_points3d.applications.kpconv import KPConv

        input_nc = 3
        num_layers = 4
        grid_sampling = 0.02
        model = KPConv(
            architecture="unet",
            input_nc=input_nc,
            output_nc=5,
            in_feat=32,
            in_grid_size=grid_sampling,
            num_layers=num_layers,
            config=None,
        )
        dataset = MockDatasetGeometric(input_nc + 1,
                                       transform=GridSampling(0.01),
                                       num_points=128)
        model.set_input(dataset[0], device)
        self.assertEqual(len(model._modules["down_modules"]), num_layers + 1)
        self.assertEqual(len(model._modules["inner_modules"]), 1)
        self.assertEqual(len(model._modules["up_modules"]), 4)

        try:
            model.forward()
        except Exception as e:
            print("Model failing:")
            print(model)
            raise e
    def test_precompute(self):
        pos = torch.tensor([[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 0],
                            [0.1, 0, 0]])
        x = torch.tensor([0, 0, 0, 0, 1]).unsqueeze(-1)
        support = Data(x=x, pos=pos)

        query = GridSampling(1)(support.clone())

        interpolate = KNNInterpolate(1)
        up = interpolate.precompute(query, support)
        self.assertEqual(up.num_nodes, 5)
        self.assertEqual(up.x_idx[4], up.x_idx[3])
 def test_siamese_minkowski(self):
     params = load_model_config("registration", "minkowski",
                                "MinkUNet_Fragment")
     transform = Compose([
         XYZFeature(True, True, True),
         GridSampling(size=0.01, quantize_coords=True, mode="last")
     ])
     dataset = PairMockDatasetGeometric(5,
                                        transform=transform,
                                        num_points=1024,
                                        is_pair_ind=True)
     model = instantiate_model(params, dataset)
     d = dataset[0]
     model.set_input(d, device)
     model.forward()
     model.backward()
Beispiel #7
0
def compute_subsampled_matches(data1,
                               data2,
                               voxel_size=0.1,
                               max_distance_overlap=0.02):
    """
    compute matches on subsampled version of data and track ind
    """
    grid_sampling = Compose(
        [SaveOriginalPosId(),
         GridSampling(voxel_size, mode='last')])
    subsampled_data = grid_sampling(data1.clone())
    origin_id = subsampled_data.origin_id.numpy()
    pair = compute_overlap_and_matches(subsampled_data, data2,
                                       max_distance_overlap)['pair']
    pair[:, 0] = origin_id[pair[:, 0]]
    return torch.from_numpy(pair.copy())
    def __init__(
        self,
        down_conv_nn=None,
        grid_size=None,
        prev_grid_size=None,
        sigma=1.0,
        max_num_neighbors=16,
        activation=torch.nn.LeakyReLU(negative_slope=0.1),
        bn_momentum=0.02,
        bn=FastBatchNorm1d,
        deformable=False,
        add_one=False,
        **kwargs,
    ):
        super(SimpleBlock, self).__init__()
        assert len(down_conv_nn) == 2
        num_inputs, num_outputs = down_conv_nn
        if deformable:
            density_parameter = self.DEFORMABLE_DENSITY
            self.kp_conv = KPConvDeformableLayer(
                num_inputs, num_outputs, point_influence=prev_grid_size * sigma, add_one=add_one
            )
        else:
            density_parameter = self.RIGID_DENSITY
            self.kp_conv = KPConvLayer(num_inputs, num_outputs, point_influence=prev_grid_size * sigma, add_one=add_one)
        search_radius = density_parameter * sigma * prev_grid_size
        self.neighbour_finder = RadiusNeighbourFinder(search_radius, max_num_neighbors, conv_type=self.CONV_TYPE)

        if bn:
            self.bn = bn(num_outputs, momentum=bn_momentum)
        else:
            self.bn = None
        self.activation = activation

        is_strided = prev_grid_size != grid_size
        if is_strided:
            self.sampler = GridSampling(grid_size)
        else:
            self.sampler = None