def __init__(self, checkpoint_dir, model_name, weight_name, feat_name, num_classes=None, mock_dataset=True): # Checkpoint from src.datasets.base_dataset import BaseDataset from src.datasets.dataset_factory import instantiate_dataset checkpoint = model_checkpoint.ModelCheckpoint(checkpoint_dir, model_name, weight_name, strict=True) if mock_dataset: dataset = MockDataset(num_classes) dataset.num_classes = num_classes else: dataset = instantiate_dataset(checkpoint.data_config) BaseDataset.set_transform(self, checkpoint.data_config) self.model = checkpoint.create_model(dataset, weight_name=weight_name) self.model.eval()
def get_dataset(conv_type, task): num_points = 1024 features = 2 batch_size = 2 if task == "object_detection": include_box = True else: include_box = False if conv_type.lower() == "dense": num_points = 2048 batch_size = 1 if task == "registration": if conv_type.lower() == "dense": return PairMockDataset(features, num_points=num_points, batch_size=batch_size) if conv_type.lower() == "sparse": tr = Compose([XYZFeature(True, True, True), GridSampling3D(size=0.01, quantize_coords=True, mode="last")]) return PairMockDatasetGeometric(features, transform=tr, num_points=num_points, batch_size=batch_size) return PairMockDatasetGeometric(features, batch_size=batch_size) else: if conv_type.lower() == "dense": num_points = 2048 return MockDataset(features, num_points=num_points, include_box=include_box, batch_size=batch_size) if conv_type.lower() == "sparse": return MockDatasetGeometric( features, include_box=include_box, transform=GridSampling3D(size=0.01, quantize_coords=True, mode="last"), num_points=num_points, batch_size=batch_size, ) return MockDatasetGeometric(features, batch_size=batch_size)
def test_pn2(self): from torch_points3d.applications.pointnet2 import PointNet2 input_nc = 2 num_layers = 3 output_nc = 5 model = PointNet2( architecture="unet", input_nc=input_nc, output_nc=output_nc, num_layers=num_layers, multiscale=True, config=None, ) dataset = MockDataset(input_nc, num_points=512) self.assertEqual(len(model._modules["down_modules"]), num_layers - 1) self.assertEqual(len(model._modules["inner_modules"]), 1) self.assertEqual(len(model._modules["up_modules"]), num_layers) try: data_out = model.forward(dataset[0]) self.assertEqual(data_out.x.shape[1], output_nc) except Exception as e: print("Model failing:") print(model) raise e
def test_rsconv(self): from torch_points3d.applications.rsconv import RSConv input_nc = 2 num_layers = 4 output_nc = 5 model = RSConv( architecture="encoder", input_nc=input_nc, output_nc=output_nc, num_layers=num_layers, multiscale=True, config=None, ) dataset = MockDataset(input_nc, num_points=1024) self.assertEqual(len(model._modules["down_modules"]), num_layers) self.assertEqual(len(model._modules["inner_modules"]), 1) try: data_out = model.forward(dataset[0]) self.assertEqual(data_out.x.shape[1], output_nc) except Exception as e: print("Model failing:") print(model) raise e
def test_resolve_1(self): models_conf = os.path.join( ROOT, "test/config_unwrapped_unet_base/test_models.yaml") config = os.path.join(ROOT, "conf/config.yaml") models_conf = OmegaConf.load(models_conf).models config = OmegaConf.load(config) cfg_training = config.training cfg_dataset = config.data.s3dis dataset = MockDataset(6) tested_task = "segmentation" resolve_model(models_conf, dataset, tested_task) for model_name, model_conf in models_conf.items(): print(model_name) model_type = model_conf.type module_filename = ".".join(["models", model_type, "modules"]) modules_lib = importlib.import_module(module_filename) cfg_training = set_format(model_conf, cfg_training) model_conf = merges_in_sub(model_conf, [cfg_training, cfg_dataset]) model = SegmentationModel(model_conf, model_type, dataset, modules_lib) assert len(model.down_modules) == len( model_conf.down_conv.down_conv_nn) assert len(model.up_modules) == len(model_conf.up_conv.up_conv_nn)
def get_dataset(conv_type, task): features = 2 if task == "registration": if conv_type.lower() == "dense": return PairMockDataset(features, num_points=2048) if conv_type.lower() == "sparse": tr = Compose([ XYZFeature(True, True, True), GridSampling(size=0.01, quantize_coords=True, mode="last") ]) return PairMockDatasetGeometric(features, transform=tr, num_points=1024) return PairMockDatasetGeometric(features) else: if conv_type.lower() == "dense": return MockDataset(features, num_points=2048) if conv_type.lower() == "sparse": return MockDatasetGeometric(features, transform=GridSampling( size=0.01, quantize_coords=True, mode="last"), num_points=1024) return MockDatasetGeometric(features)
def test_rsconv(self): from torch_points3d.applications.rsconv import RSConv input_nc = 2 num_layers = 4 model = RSConv( architecture="unet", input_nc=input_nc, output_nc=5, num_layers=num_layers, multiscale=True, config=None, ) dataset = MockDataset(input_nc, num_points=1024) model.set_input(dataset[0], device) self.assertEqual(len(model._modules["down_modules"]), num_layers) self.assertEqual(len(model._modules["inner_modules"]), 2) self.assertEqual(len(model._modules["up_modules"]), num_layers + 1) try: model.forward() except Exception as e: print("Model failing:") print(model) raise e
def get_dataset(conv_type): features = 2 if conv_type.lower() == "dense": return MockDataset(features, num_points=2048) if conv_type.lower() == "sparse": return MockDatasetGeometric(features, transform=ToSparseInput(0.01), num_points=1024) return MockDatasetGeometric(features)
def test_get_by_name(self): dataset_opt = MockDatasetConfig() setattr(dataset_opt, "dataroot", os.path.join(DIR, "temp_dataset")) mock_base_dataset = MockBaseDataset(dataset_opt) mock_base_dataset.test_dataset = [MockDataset(), MockDataset()] mock_base_dataset.train_dataset = MockDataset() mock_base_dataset.val_dataset = MockDataset() for name in ["train", "val", "test_0", "test_1"]: self.assertEqual(mock_base_dataset.get_dataset(name).name, name) test_with_name = MockDataset() setattr(test_with_name, "name", "testos") mock_base_dataset.test_dataset = test_with_name with self.assertRaises(ValueError): mock_base_dataset.get_dataset("test_1") mock_base_dataset.get_dataset("testos") with self.assertRaises(ValueError): mock_base_dataset.test_dataset = [test_with_name, test_with_name]
def test_normal(self): dataset_opt = MockDatasetConfig() setattr(dataset_opt, "dataroot", os.path.join(DIR, "temp_dataset")) mock_base_dataset = MockBaseDataset(dataset_opt) mock_base_dataset.test_dataset = MockDataset() model_config = MockModelConfig() setattr(model_config, "conv_type", "dense") model = MockModel(model_config) mock_base_dataset.create_dataloaders(model, 2, True, 0, False) datasets = mock_base_dataset.test_dataloaders self.assertEqual(len(datasets), 1)
def test_pointnet2ms(self): params = load_model_config("segmentation", "pointnet2", "pointnet2_largemsg") params.update("data.use_category", True) dataset = MockDataset(5, num_points=2048) model = instantiate_model(params, dataset) model.set_input(dataset[0], device) model.forward() model.backward() ratio = test_hasgrad(model) if ratio < 1: print( "Model segmentation.pointnet2.pointnet2_largemsgs has %i%% of parameters with 0 gradient" % (100 * ratio) )
def test_resolve_1(self): models_conf = os.path.join(ROOT, "test/config_unwrapped_unet_base/test_models.yaml") models_conf = OmegaConf.load(models_conf).models dataset = MockDataset(6) tested_task = "segmentation" resolve_model(models_conf, dataset, tested_task) for _, model_conf in models_conf.items(): modellib = MockModelLib() model = MockModel(model_conf, "", dataset, modellib) assert len(model.down_modules) == len(model_conf.down_conv.down_conv_nn) assert len(model.up_modules) == len(model_conf.up_conv.up_conv_nn)
def __init__(self, dataset_opt): super().__init__(dataset_opt) self._data_path = dataset_opt.dataroot self.train_dataset = MockDataset() self.val_dataset = MockDataset()