def test_kpconv(self): from torch_points3d.applications.kpconv import KPConv input_nc = 3 num_layers = 4 grid_sampling = 0.02 model = KPConv( architecture="unet", input_nc=input_nc, output_nc=5, in_feat=32, in_grid_size=grid_sampling, num_layers=num_layers, config=None, ) dataset = MockDatasetGeometric(input_nc + 1, transform=GridSampling(0.01), num_points=128) self.assertEqual(len(model._modules["down_modules"]), num_layers + 1) self.assertEqual(len(model._modules["inner_modules"]), 1) self.assertEqual(len(model._modules["up_modules"]), 4) try: model.forward(dataset[0]) except Exception as e: print("Model failing:") print(model) raise e
def __init__(self, n_feat, in_grid_size): super().__init__() self.unet = KPConv( architecture="unet", input_nc=0, num_layers=4, in_grid_size=in_grid_size ) self.feature_regressor = FeatureRegressor(self.unet.output_nc, n_feat)
class KPConvFeatureExtractor(torch.nn.Module): def __init__(self, n_feat, in_grid_size): super().__init__() self.unet = KPConv( architecture="unet", input_nc=0, num_layers=4, in_grid_size=in_grid_size ) self.feature_regressor = FeatureRegressor(self.unet.output_nc, n_feat) @property def conv_type(self): """ This is needed by the dataset to infer which batch collate should be used""" return self.unet.conv_type def forward(self, data): # Forward through unet and feature_regressor data_features = self.unet(data) self.output = self.feature_regressor(data_features.x) return self.output def get_spatial_ops(self): return self.unet.get_spatial_ops()
def test_kpconv(self): from torch_points3d.applications.kpconv import KPConv input_nc = 3 num_layers = 4 grid_sampling = 0.02 in_feat = 16 model = KPConv( architecture="encoder", input_nc=input_nc, in_feat=in_feat, in_grid_size=grid_sampling, num_layers=num_layers, config=None, ) dataset = MockDatasetGeometric(input_nc + 1, transform=GridSampling3D(0.01), num_points=128) self.assertEqual(len(model._modules["down_modules"]), num_layers + 1) self.assertEqual(len(model._modules["inner_modules"]), 1) self.assertFalse(model.has_mlp_head) self.assertEqual(model.output_nc, 32 * in_feat) try: data_out = model.forward(dataset[0]) self.assertEqual(data_out.x.shape[1], 32 * in_feat) except Exception as e: print("Model failing:") print(model) raise e input_nc = 3 num_layers = 4 grid_sampling = 0.02 in_feat = 32 output_nc = 5 model = KPConv( architecture="encoder", input_nc=input_nc, output_nc=output_nc, in_feat=in_feat, in_grid_size=grid_sampling, num_layers=num_layers, config=None, ) dataset = MockDatasetGeometric(input_nc + 1, transform=GridSampling3D(0.01), num_points=128) self.assertEqual(len(model._modules["down_modules"]), num_layers + 1) self.assertEqual(len(model._modules["inner_modules"]), 1) self.assertTrue(model.has_mlp_head) self.assertEqual(model.output_nc, output_nc) try: data_out = model.forward(dataset[0]) self.assertEqual(data_out.x.shape[1], output_nc) except Exception as e: print("Model failing:") print(model) raise e
import numpy as np import torch from torch_points3d.applications.kpconv import KPConv from torch_geometric.data import Batch, Data # KPConv is implemented with PARTIAL_DENSE format. Therefore, data need an attribute batch containing the indice for each point input_nc = 0 num_classes = 10 batch_size = 3 num_points_per_sample = [5, 10, 3] model = KPConv( architecture="unet", # Could be unet here to perform segmentation input_nc= input_nc, # KPconv is particular. Pos aren't features. It needs a tensor of ones + any features available as rgb or intensity output_nc=num_classes, num_layers=4, ) samples = [] for idx_batch in range(batch_size): nb_points = num_points_per_sample[idx_batch] pos = torch.randn((nb_points, 3)) y = torch.from_numpy(np.random.choice(range(num_classes), nb_points)) x = torch.ones((nb_points, 1)) samples.append(Data(pos=pos, y=y, x=x)) data = Batch.from_data_list(samples) print(data) # Batch(batch=[18], pos=[18, 3], x=[18, 1], y=[18])