Пример #1
0
    def predict_original_samples(self, batch, conv_type, output):
        """ Takes the output generated by the NN and upsamples it to the original data
        Arguments:
            batch -- processed batch
            conv_type -- Type of convolutio (DENSE, PARTIAL_DENSE, etc...)
            output -- output predicted by the model
        """
        full_res_results = {}
        num_sample = BaseDataset.get_num_samples(batch, conv_type)
        if conv_type == "DENSE":
            output = output.reshape(num_sample, -1,
                                    output.shape[-1])  # [B,N,L]

        setattr(batch, "_pred", output)
        for b in range(num_sample):
            sampleid = batch.sampleid[b]
            sample_raw_pos = self.test_dataset[0].get_raw(sampleid).pos.to(
                output.device)
            predicted = BaseDataset.get_sample(batch, "_pred", b, conv_type)
            origindid = BaseDataset.get_sample(batch, SaveOriginalPosId.KEY, b,
                                               conv_type)
            full_prediction = knn_interpolate(predicted,
                                              sample_raw_pos[origindid],
                                              sample_raw_pos,
                                              k=3)
            labels = full_prediction.max(1)[1].unsqueeze(-1)
            full_res_results[self.test_dataset[0].get_filename(
                sampleid)] = np.hstack((
                    sample_raw_pos.cpu().numpy(),
                    labels.cpu().numpy(),
                ))
        return full_res_results
Пример #2
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):
    dataset.create_dataloaders(
        model,
        1,
        False,
        cfg.training.num_workers,
        False,
    )
    loader = dataset.test_dataloaders[0]
    list_res = []
    with Ctq(loader) as tq_test_loader:
        for i, data in enumerate(tq_test_loader):
            with torch.no_grad():
                model.set_input(data, device)
                model.forward()

                name_scene, name_pair_source, name_pair_target = dataset.test_dataset[
                    0].get_name(i)
                input, input_target = model.get_input()
                xyz, xyz_target = input.pos, input_target.pos
                ind, ind_target = input.ind, input_target.ind
                matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
                feat, feat_target = model.get_output()
                rand = torch.randperm(len(feat))[:cfg.data.num_points]
                rand_target = torch.randperm(
                    len(feat_target))[:cfg.data.num_points]
                res = dict(name_scene=name_scene,
                           name_pair_source=name_pair_source,
                           name_pair_target=name_pair_target)
                T_gt = estimate_transfo(xyz[matches_gt[:, 0]],
                                        xyz_target[matches_gt[:, 1]])
                metric = compute_metrics(
                    xyz[rand],
                    xyz_target[rand_target],
                    feat[rand],
                    feat_target[rand_target],
                    T_gt,
                    sym=cfg.data.sym,
                    tau_1=cfg.data.tau_1,
                    tau_2=cfg.data.tau_2,
                    rot_thresh=cfg.data.rot_thresh,
                    trans_thresh=cfg.data.trans_thresh,
                    use_ransac=cfg.data.use_ransac,
                    ransac_thresh=cfg.data.first_subsampling,
                    use_teaser=cfg.data.use_teaser,
                    noise_bound_teaser=cfg.data.noise_bound_teaser,
                )
                res = dict(**res, **metric)
                list_res.append(res)

    df = pd.DataFrame(list_res)
    output_path = os.path.join(cfg.training.checkpoint_dir, cfg.data.name,
                               "matches")
    if not os.path.exists(output_path):
        os.makedirs(output_path, exist_ok=True)
    df.to_csv(osp.join(output_path, "final_res.csv"))
    print(df.groupby("name_scene").mean())
Пример #3
0
    def test_multiple_test_datasets(self):
        opt = Options()
        opt.dataset_name = os.path.join(os.getcwd(), "test")
        opt.dataroot = os.path.join(os.getcwd(), "test")

        class MultiTestDataset(BaseDataset):
            def __init__(self, dataset_opt):
                super(MultiTestDataset, self).__init__(dataset_opt)

                self.train_dataset = CustomMockDataset(10, 1, 3, 10)
                self.val_dataset = CustomMockDataset(10, 1, 3, 10)
                self.test_dataset = [
                    CustomMockDataset(10, 1, 3, 10),
                    CustomMockDataset(10, 1, 3, 20)
                ]

        dataset = MultiTestDataset(opt)

        model_config = MockModelConfig()
        model_config.conv_type = "dense"
        model = MockModel(model_config)
        dataset.create_dataloaders(model, 5, True, 0, False)

        loaders = dataset.test_dataloaders
        self.assertEqual(len(loaders), 2)
        self.assertEqual(len(loaders[0].dataset), 10)
        self.assertEqual(len(loaders[1].dataset), 20)
        self.assertEqual(dataset.num_classes, 3)
        self.assertEqual(dataset.is_hierarchical, False)
        self.assertEqual(dataset.has_fixed_points_transform, False)
        self.assertEqual(dataset.has_val_loader, True)
        self.assertEqual(dataset.class_to_segments, None)
        self.assertEqual(dataset.feature_dimension, 1)

        batch = next(iter(loaders[0]))
        num_samples = BaseDataset.get_num_samples(batch, "dense")
        self.assertEqual(num_samples, 5)

        sample = BaseDataset.get_sample(batch, "pos", 1, "dense")
        self.assertEqual(sample.shape, (10, 3))
        sample = BaseDataset.get_sample(batch, "x", 1, "dense")
        self.assertEqual(sample.shape, (10, 1))
        self.assertEqual(dataset.num_batches, {
            "train": 2,
            "val": 2,
            "test_0": 2,
            "test_1": 4
        })

        repr = "Dataset: MultiTestDataset \n\x1b[0;95mpre_transform \x1b[0m= None\n\x1b[0;95mtest_transform \x1b[0m= None\n\x1b[0;95mtrain_transform \x1b[0m= None\n\x1b[0;95mval_transform \x1b[0m= None\n\x1b[0;95minference_transform \x1b[0m= None\nSize of \x1b[0;95mtrain_dataset \x1b[0m= 10\nSize of \x1b[0;95mtest_dataset \x1b[0m= 10, 20\nSize of \x1b[0;95mval_dataset \x1b[0m= 10\n\x1b[0;95mBatch size =\x1b[0m 5"
        self.assertEqual(dataset.__repr__(), repr)
Пример #4
0
    def __init__(self, checkpoint_dir, model_name, weight_name, feat_name, num_classes=None, mock_dataset=True):
        # Checkpoint
        from torch_points3d.datasets.base_dataset import BaseDataset
        from torch_points3d.datasets.dataset_factory import instantiate_dataset
        from torch_points3d.utils.mock import MockDataset
        import torch_points3d.metrics.model_checkpoint as model_checkpoint

        checkpoint = model_checkpoint.ModelCheckpoint(checkpoint_dir, model_name, weight_name, strict=True)
        if mock_dataset:
            dataset = MockDataset(num_classes)
            dataset.num_classes = num_classes
        else:
            dataset = instantiate_dataset(checkpoint.data_config)
        BaseDataset.set_transform(self, checkpoint.data_config)
        self.model = checkpoint.create_model(dataset, weight_name=weight_name)
        self.model.eval()
Пример #5
0
    def from_pretrained(model_tag,
                        download=True,
                        out_file=None,
                        weight_name="latest",
                        mock_dataset=True):
        # Convert inputs to registry format

        if PretainedRegistry.MODELS.get(model_tag) is not None:
            url = PretainedRegistry.MODELS.get(model_tag)
        else:
            raise Exception(
                "model_tag {} doesn't exist within available models. Here is the list of pre-trained models {}"
                .format(model_tag, PretainedRegistry.available_models()))

        checkpoint_name = model_tag + ".pt"
        out_file = os.path.join(CHECKPOINT_DIR, checkpoint_name)

        if download:
            download_file(url, out_file)

            weight_name = weight_name if weight_name is not None else "latest"

            checkpoint: ModelCheckpoint = ModelCheckpoint(
                CHECKPOINT_DIR,
                model_tag,
                weight_name if weight_name is not None else "latest",
                resume=False,
            )
            if mock_dataset:
                dataset = checkpoint.dataset_properties.copy()
                if PretainedRegistry.MOCK_USED_PROPERTIES.get(
                        model_tag) is not None:
                    for k, v in PretainedRegistry.MOCK_USED_PROPERTIES.get(
                            model_tag).items():
                        dataset[k] = v

            else:
                dataset = instantiate_dataset(checkpoint.data_config)

            model: BaseModel = checkpoint.create_model(dataset,
                                                       weight_name=weight_name)

            Wandb.set_urls_to_model(model, url)

            BaseDataset.set_transform(model, checkpoint.data_config)

            return model
def run(model: BaseModel, dataset: BaseDataset, device, output_path, cfg):
    # Set dataloaders
    num_fragment = dataset.num_fragment
    if cfg.data.is_patch:
        for i in range(num_fragment):
            dataset.set_patches(i)
            dataset.create_dataloaders(
                model,
                cfg.batch_size,
                False,
                cfg.num_workers,
                False,
            )
            loader = dataset.test_dataloaders()[0]
            features = []
            scene_name, pc_name = dataset.get_name(i)

            with Ctq(loader) as tq_test_loader:
                for data in tq_test_loader:
                    # pcd = open3d.geometry.PointCloud()
                    # pcd.points = open3d.utility.Vector3dVector(data.pos[0].numpy())
                    # open3d.visualization.draw_geometries([pcd])
                    with torch.no_grad():
                        model.set_input(data, device)
                        model.forward()
                        features.append(model.get_output().cpu())
            features = torch.cat(features, 0).numpy()
            log.info("save {} from {} in  {}".format(pc_name, scene_name,
                                                     output_path))
            save(output_path, scene_name, pc_name,
                 dataset.base_dataset[i].to("cpu"), features)
    else:
        dataset.create_dataloaders(
            model,
            1,
            False,
            cfg.num_workers,
            False,
        )
        loader = dataset.test_dataloaders()[0]
        with Ctq(loader) as tq_test_loader:
            for i, data in enumerate(tq_test_loader):
                with torch.no_grad():
                    model.set_input(data, device)
                    model.forward()
                    features = model.get_output()[0]  # batch of 1
                    save(output_path, scene_name, pc_name, data.to("cpu"),
                         features)
Пример #7
0
    def __init__(self, checkpoint_dir, model_name, weight_name, feat_name, num_classes, mock_dataset=True):
        super(PointNetForward, self).__init__(
            checkpoint_dir, model_name, weight_name, feat_name, num_classes=num_classes, mock_dataset=mock_dataset
        )
        self.feat_name = feat_name

        from torch_points3d.datasets.base_dataset import BaseDataset
        from torch_geometric.transforms import FixedPoints, GridSampling

        self.inference_transform = BaseDataset.remove_transform(self.inference_transform, [GridSampling, FixedPoints])
Пример #8
0
    def test_empty_dataset(self):
        opt = Options()
        opt.dataset_name = os.path.join(os.getcwd(), "test")
        opt.dataroot = os.path.join(os.getcwd(), "test")

        dataset = BaseDataset(opt)

        self.assertEqual(dataset.pre_transform, None)
        self.assertEqual(dataset.test_transform, None)
        self.assertEqual(dataset.train_transform, None)
        self.assertEqual(dataset.val_transform, None)
        self.assertEqual(dataset.train_dataset, None)
        self.assertEqual(dataset.test_dataset, None)
        self.assertEqual(dataset.val_dataset, None)
Пример #9
0
    def from_file(path, weight_name="latest", mock_property=None):
        """
        Load a pretrained model trained with torch-points3d from file.
        return a pretrained model
        Parameters
        ----------
        path: str
            path of a pretrained model
        weight_name: str, optional
            name of the weight
        mock_property: dict, optional
            mock dataset

        """
        weight_name = weight_name if weight_name is not None else "latest"
        path_dir, name = os.path.split(path)
        name = name.split(".")[0]  # ModelCheckpoint will add the extension

        checkpoint: ModelCheckpoint = ModelCheckpoint(
            path_dir,
            name,
            weight_name if weight_name is not None else "latest",
            resume=False,
        )
        dataset = checkpoint.data_config

        if mock_property is not None:
            for k, v in mock_property.items():
                dataset[k] = v

        else:
            dataset = instantiate_dataset(checkpoint.data_config)

        model: BaseModel = checkpoint.create_model(dataset,
                                                   weight_name=weight_name)
        BaseDataset.set_transform(model, checkpoint.data_config)
        return model
Пример #10
0
def run(model: BaseModel, dataset: BaseDataset, device, output_path):
    loaders = dataset.test_dataloaders
    predicted = {}
    for loader in loaders:
        loader.dataset.name
        with Ctq(loader) as tq_test_loader:
            for data in tq_test_loader:
                with torch.no_grad():
                    model.set_input(data, device)
                    model.forward()
                predicted = {
                    **predicted,
                    **dataset.predict_original_samples(data, model.conv_type,
                                                       model.get_output())
                }

    save(output_path, predicted)
Пример #11
0
    def test_empty_dataset(self):
        opt = Options()
        opt.dataset_name = os.path.join(os.getcwd(), "test")
        opt.dataroot = os.path.join(os.getcwd(), "test")
        opt.pre_transform = [DictConfig({"transform": "RandomNoise"})]
        opt.test_transform = [DictConfig({"transform": "AddOnes"})]
        opt.val_transform = [DictConfig({"transform": "Jitter"})]
        opt.train_transform = [DictConfig({"transform": "RandomSymmetry"})]
        dataset = BaseDataset(opt)

        self.assertEqual(str(dataset.pre_transform), str(T.Compose([T3d.RandomNoise()])))
        self.assertEqual(str(dataset.test_transform), str(T.Compose([T3d.AddOnes()])))
        self.assertEqual(str(dataset.train_transform), str(T.Compose([T3d.RandomSymmetry()])))
        self.assertEqual(str(dataset.val_transform), str(T.Compose([T3d.Jitter()])))
        self.assertEqual(str(dataset.inference_transform), str(T.Compose([T3d.RandomNoise(), T3d.AddOnes()])))
        self.assertEqual(dataset.train_dataset, None)
        self.assertEqual(dataset.test_dataset, None)
        self.assertEqual(dataset.val_dataset, None)
Пример #12
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):

    reg_thresh = cfg.data.registration_recall_thresh
    if reg_thresh is None:
        reg_thresh = 0.2
    print(time.strftime("%Y%m%d-%H%M%S"))
    dataset.create_dataloaders(
        model, 1, False, cfg.training.num_workers, False,
    )
    loader = dataset.test_dataloaders[0]
    list_res = []
    with Ctq(loader) as tq_test_loader:
        for i, data in enumerate(tq_test_loader):
            with torch.no_grad():
                t0 = time.time()
                model.set_input(data, device)
                model.forward()
                t1 = time.time()
                name_scene, name_pair_source, name_pair_target = dataset.test_dataset[0].get_name(i)
                input, input_target = model.get_input()
                xyz, xyz_target = input.pos, input_target.pos
                ind, ind_target = input.ind, input_target.ind
                matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
                feat, feat_target = model.get_output()
                # rand = voxel_selection(xyz, grid_size=0.06, min_points=cfg.data.min_points)
                # rand_target = voxel_selection(xyz_target, grid_size=0.06, min_points=cfg.data.min_points)

                rand = torch.randperm(len(feat))[: cfg.data.num_points]
                rand_target = torch.randperm(len(feat_target))[: cfg.data.num_points]
                res = dict(name_scene=name_scene, name_pair_source=name_pair_source, name_pair_target=name_pair_target)
                T_gt = estimate_transfo(xyz[matches_gt[:, 0]], xyz_target[matches_gt[:, 1]])
                t2 = time.time()
                metric = compute_metrics(
                    xyz[rand],
                    xyz_target[rand_target],
                    feat[rand],
                    feat_target[rand_target],
                    T_gt,
                    sym=cfg.data.sym,
                    tau_1=cfg.data.tau_1,
                    tau_2=cfg.data.tau_2,
                    rot_thresh=cfg.data.rot_thresh,
                    trans_thresh=cfg.data.trans_thresh,
                    use_ransac=cfg.data.use_ransac,
                    ransac_thresh=cfg.data.first_subsampling,
                    use_teaser=cfg.data.use_teaser,
                    noise_bound_teaser=cfg.data.noise_bound_teaser,
                    xyz_gt=xyz[matches_gt[:, 0]],
                    xyz_target_gt=xyz_target[matches_gt[:, 1]],
                    registration_recall_thresh=reg_thresh,
                )
                res = dict(**res, **metric)
                res["time_feature"] = t1 - t0
                res["time_feature_per_point"] = (t1 - t0) / (len(input.pos) + len(input_target.pos))
                res["time_prep"] = t2 - t1

                list_res.append(res)

    df = pd.DataFrame(list_res)
    output_path = os.path.join(cfg.training.checkpoint_dir, cfg.data.name, "matches")
    if not os.path.exists(output_path):
        os.makedirs(output_path, exist_ok=True)
    df.to_csv(osp.join(output_path, "final_res_{}.csv".format(time.strftime("%Y%m%d-%H%M%S"))))
    print(df.groupby("name_scene").mean())
Пример #13
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):
    print(time.strftime("%Y%m%d-%H%M%S"))
    dataset.create_dataloaders(
        model,
        1,
        False,
        cfg.training.num_workers,
        False,
    )
    loader = dataset.test_dataset[0]

    ind = 0
    if cfg.ind is not None:
        ind = cfg.ind
    t = 5
    if cfg.t is not None:
        t = cfg.t
    r = 0.1
    if cfg.r is not None:
        r = cfg.r
    print(loader)
    print(ind)
    data = loader[ind]
    data.batch = torch.zeros(len(data.pos)).long()
    data.batch_target = torch.zeros(len(data.pos_target)).long()
    print(data)
    with torch.no_grad():
        model.set_input(data, device)
        model.forward()

        name_scene, name_pair_source, name_pair_target = dataset.test_dataset[
            0].get_name(ind)
        print(name_scene, name_pair_source, name_pair_target)
        input, input_target = model.get_input()
        xyz, xyz_target = input.pos, input_target.pos
        ind, ind_target = input.ind, input_target.ind
        matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
        feat, feat_target = model.get_output()
        # rand = voxel_selection(xyz, grid_size=0.06, min_points=cfg.data.min_points)
        # rand_target = voxel_selection(xyz_target, grid_size=0.06, min_points=cfg.data.min_points)

        rand = torch.randperm(len(feat))[:cfg.data.num_points]
        rand_target = torch.randperm(len(feat_target))[:cfg.data.num_points]
        T_gt = estimate_transfo(xyz[matches_gt[:, 0]].clone(),
                                xyz_target[matches_gt[:, 1]].clone())
        matches_pred = get_matches(feat[rand],
                                   feat_target[rand_target],
                                   sym=cfg.data.sym)
        # For color
        inliers = (torch.norm(
            xyz[rand][matches_pred[:, 0]] @ T_gt[:3, :3].T + T_gt[:3, 3] -
            xyz_target[rand_target][matches_pred[:, 1]],
            dim=1,
        ) < cfg.data.tau_1)
        # compute transformation
        T_teaser = teaser_pp_registration(
            xyz[rand][matches_pred[:, 0]],
            xyz_target[rand_target][matches_pred[:, 1]],
            noise_bound=cfg.data.tau_1)
        pcd_source = torch2o3d(input, [1, 0.7, 0.1])

        pcd_target = torch2o3d(input_target, [0, 0.15, 0.9])
        open3d.visualization.draw_geometries([pcd_source, pcd_target])
        pcd_source.transform(T_teaser.cpu().numpy())
        open3d.visualization.draw_geometries([pcd_source, pcd_target])
        pcd_source.transform(np.linalg.inv(T_teaser.cpu().numpy()))
        rand_ind = torch.randperm(len(rand[matches_pred[:, 0]]))[:250]
        pcd_source.transform(T_gt.cpu().numpy())
        kp_s = torch2o3d(input, ind=rand[matches_pred[:, 0]][rand_ind])
        kp_s.transform(T_gt.cpu().numpy())
        kp_t = torch2o3d(input_target,
                         ind=rand_target[matches_pred[:, 1]][rand_ind])
        match_visualizer(pcd_source,
                         kp_s,
                         pcd_target,
                         kp_t,
                         inliers[rand_ind].cpu().numpy(),
                         radius=r,
                         t=t)