Ejemplo n.º 1
0
def run(model: BaseModel, dataset: BaseDataset, device, output_path, cfg):
    # Set dataloaders
    num_fragment = dataset.num_fragment
    if cfg.data.is_patch:
        for i in range(num_fragment):
            dataset.set_patches(i)
            dataset.create_dataloaders(
                model,
                cfg.batch_size,
                False,
                cfg.num_workers,
                False,
            )
            loader = dataset.test_dataloaders()[0]
            features = []
            scene_name, pc_name = dataset.get_name(i)

            with Ctq(loader) as tq_test_loader:
                for data in tq_test_loader:
                    # pcd = open3d.geometry.PointCloud()
                    # pcd.points = open3d.utility.Vector3dVector(data.pos[0].numpy())
                    # open3d.visualization.draw_geometries([pcd])
                    with torch.no_grad():
                        model.set_input(data, device)
                        model.forward()
                        features.append(model.get_output().cpu())
            features = torch.cat(features, 0).numpy()
            log.info("save {} from {} in  {}".format(pc_name, scene_name,
                                                     output_path))
            save(output_path, scene_name, pc_name,
                 dataset.base_dataset[i].to("cpu"), features)
    else:
        dataset.create_dataloaders(
            model,
            1,
            False,
            cfg.num_workers,
            False,
        )
        loader = dataset.test_dataloaders()[0]
        with Ctq(loader) as tq_test_loader:
            for i, data in enumerate(tq_test_loader):
                with torch.no_grad():
                    model.set_input(data, device)
                    model.forward()
                    features = model.get_output()[0]  # batch of 1
                    save(output_path, scene_name, pc_name, data.to("cpu"),
                         features)
Ejemplo n.º 2
0
    def track(self, model: BaseModel, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)

        outputs = model.get_output()
        targets = model.get_labels()

        # Mask ignored label
        mask = targets != self._ignore_label
        outputs = outputs[mask]
        targets = targets[mask]

        outputs = self._convert(outputs)
        targets = self._convert(targets)

        if len(targets) == 0:
            return

        assert outputs.shape[0] == len(targets)
        self._confusion_matrix.count_predicted_batch(targets, np.argmax(outputs, 1))

        self._acc = 100 * self._confusion_matrix.get_overall_accuracy()
        self._macc = 100 * self._confusion_matrix.get_mean_class_accuracy()
        self._miou = 100 * self._confusion_matrix.get_average_intersection_union()
    def track(self, model: BaseModel, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)

        outputs = model.get_output()
        targets = model.get_labels()

        # Mask ignored label
        mask = targets != self._ignore_label
        outputs = outputs[mask]
        targets = targets[mask]

        outputs = SegmentationTracker.detach_tensor(outputs)
        targets = SegmentationTracker.detach_tensor(targets)
        if not torch.is_tensor(targets):
            targets = torch.from_numpy(targets)
        self._ap_meter.add(outputs,
                           F.one_hot(targets, self._num_classes).bool())

        outputs = self._convert(outputs)
        targets = self._convert(targets)

        if len(targets) == 0:
            return

        assert outputs.shape[0] == len(targets)
        self._confusion_matrix.count_predicted_batch(targets,
                                                     np.argmax(outputs, 1))

        self._acc = 100 * self._confusion_matrix.get_overall_accuracy()
        self._macc = 100 * self._confusion_matrix.get_mean_class_accuracy()
        self._miou = 100 * self._confusion_matrix.get_average_intersection_union(
        )
        self._map = 100 * self._ap_meter.value().mean().item()
Ejemplo n.º 4
0
    def track(self, model: BaseModel, full_res=False, **kwargs):
        """ Add current model predictions (usually the result of a batch) to the tracking
        """
        super().track(model)

        # Train mode or low res, nothing special to do
        if self._stage == "train" or not full_res:
            return

        # Test mode, compute votes in order to get full res predictions
        if self._test_area is None:
            self._test_area = self._dataset.test_data.clone()
            if self._test_area.y is None:
                raise ValueError("It seems that the test area data does not have labels (attribute y).")
            self._test_area.prediction_count = torch.zeros(self._test_area.y.shape[0], dtype=torch.int)
            self._test_area.votes = torch.zeros((self._test_area.y.shape[0], self._num_classes), dtype=torch.float)
            self._test_area.to(model.device)

        # Gather input to the model and check that it fits with the test set
        inputs = model.get_input()
        if inputs[SaveOriginalPosId.KEY] is None or inputs[SaveOriginalPosId.KEY].max() >= self._test_area.pos.shape[0]:
            raise ValueError(
                "The inputs given to the model do not have a %s attribute or this attribute does\
                     not correspond to the number of points in the test area point cloud."
                % SaveOriginalPosId.KEY
            )

        # Set predictions
        outputs = model.get_output()
        self._test_area.votes[inputs[SaveOriginalPosId.KEY]] += outputs
        self._test_area.prediction_count[inputs[SaveOriginalPosId.KEY]] += 1
    def track(self, model: BaseModel):
        """ Add model predictions (accuracy)
        """
        super().track(model)

        outputs = self._convert(model.get_output())
        N = len(outputs) // 2

        self._acc = compute_accuracy(outputs[:N], outputs[N:])
Ejemplo n.º 6
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):
    dataset.create_dataloaders(
        model,
        1,
        False,
        cfg.training.num_workers,
        False,
    )
    loader = dataset.test_dataloaders[0]
    list_res = []
    with Ctq(loader) as tq_test_loader:
        for i, data in enumerate(tq_test_loader):
            with torch.no_grad():
                model.set_input(data, device)
                model.forward()

                name_scene, name_pair_source, name_pair_target = dataset.test_dataset[
                    0].get_name(i)
                input, input_target = model.get_input()
                xyz, xyz_target = input.pos, input_target.pos
                ind, ind_target = input.ind, input_target.ind
                matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
                feat, feat_target = model.get_output()
                rand = torch.randperm(len(feat))[:cfg.data.num_points]
                rand_target = torch.randperm(
                    len(feat_target))[:cfg.data.num_points]
                res = dict(name_scene=name_scene,
                           name_pair_source=name_pair_source,
                           name_pair_target=name_pair_target)
                T_gt = estimate_transfo(xyz[matches_gt[:, 0]],
                                        xyz_target[matches_gt[:, 1]])
                metric = compute_metrics(
                    xyz[rand],
                    xyz_target[rand_target],
                    feat[rand],
                    feat_target[rand_target],
                    T_gt,
                    sym=cfg.data.sym,
                    tau_1=cfg.data.tau_1,
                    tau_2=cfg.data.tau_2,
                    rot_thresh=cfg.data.rot_thresh,
                    trans_thresh=cfg.data.trans_thresh,
                    use_ransac=cfg.data.use_ransac,
                    ransac_thresh=cfg.data.first_subsampling,
                    use_teaser=cfg.data.use_teaser,
                    noise_bound_teaser=cfg.data.noise_bound_teaser,
                )
                res = dict(**res, **metric)
                list_res.append(res)

    df = pd.DataFrame(list_res)
    output_path = os.path.join(cfg.training.checkpoint_dir, cfg.data.name,
                               "matches")
    if not os.path.exists(output_path):
        os.makedirs(output_path, exist_ok=True)
    df.to_csv(osp.join(output_path, "final_res.csv"))
    print(df.groupby("name_scene").mean())
Ejemplo n.º 7
0
def run(model: BaseModel, dataset: BaseDataset, device, output_path):
    loaders = dataset.test_dataloaders
    predicted = {}
    for loader in loaders:
        loader.dataset.name
        with Ctq(loader) as tq_test_loader:
            for data in tq_test_loader:
                with torch.no_grad():
                    model.set_input(data, device)
                    model.forward()
                predicted = {
                    **predicted,
                    **dataset.predict_original_samples(data, model.conv_type,
                                                       model.get_output())
                }

    save(output_path, predicted)
Ejemplo n.º 8
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):

    reg_thresh = cfg.data.registration_recall_thresh
    if reg_thresh is None:
        reg_thresh = 0.2
    print(time.strftime("%Y%m%d-%H%M%S"))
    dataset.create_dataloaders(
        model, 1, False, cfg.training.num_workers, False,
    )
    loader = dataset.test_dataloaders[0]
    list_res = []
    with Ctq(loader) as tq_test_loader:
        for i, data in enumerate(tq_test_loader):
            with torch.no_grad():
                t0 = time.time()
                model.set_input(data, device)
                model.forward()
                t1 = time.time()
                name_scene, name_pair_source, name_pair_target = dataset.test_dataset[0].get_name(i)
                input, input_target = model.get_input()
                xyz, xyz_target = input.pos, input_target.pos
                ind, ind_target = input.ind, input_target.ind
                matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
                feat, feat_target = model.get_output()
                # rand = voxel_selection(xyz, grid_size=0.06, min_points=cfg.data.min_points)
                # rand_target = voxel_selection(xyz_target, grid_size=0.06, min_points=cfg.data.min_points)

                rand = torch.randperm(len(feat))[: cfg.data.num_points]
                rand_target = torch.randperm(len(feat_target))[: cfg.data.num_points]
                res = dict(name_scene=name_scene, name_pair_source=name_pair_source, name_pair_target=name_pair_target)
                T_gt = estimate_transfo(xyz[matches_gt[:, 0]], xyz_target[matches_gt[:, 1]])
                t2 = time.time()
                metric = compute_metrics(
                    xyz[rand],
                    xyz_target[rand_target],
                    feat[rand],
                    feat_target[rand_target],
                    T_gt,
                    sym=cfg.data.sym,
                    tau_1=cfg.data.tau_1,
                    tau_2=cfg.data.tau_2,
                    rot_thresh=cfg.data.rot_thresh,
                    trans_thresh=cfg.data.trans_thresh,
                    use_ransac=cfg.data.use_ransac,
                    ransac_thresh=cfg.data.first_subsampling,
                    use_teaser=cfg.data.use_teaser,
                    noise_bound_teaser=cfg.data.noise_bound_teaser,
                    xyz_gt=xyz[matches_gt[:, 0]],
                    xyz_target_gt=xyz_target[matches_gt[:, 1]],
                    registration_recall_thresh=reg_thresh,
                )
                res = dict(**res, **metric)
                res["time_feature"] = t1 - t0
                res["time_feature_per_point"] = (t1 - t0) / (len(input.pos) + len(input_target.pos))
                res["time_prep"] = t2 - t1

                list_res.append(res)

    df = pd.DataFrame(list_res)
    output_path = os.path.join(cfg.training.checkpoint_dir, cfg.data.name, "matches")
    if not os.path.exists(output_path):
        os.makedirs(output_path, exist_ok=True)
    df.to_csv(osp.join(output_path, "final_res_{}.csv".format(time.strftime("%Y%m%d-%H%M%S"))))
    print(df.groupby("name_scene").mean())
Ejemplo n.º 9
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):
    print(time.strftime("%Y%m%d-%H%M%S"))
    dataset.create_dataloaders(
        model,
        1,
        False,
        cfg.training.num_workers,
        False,
    )
    loader = dataset.test_dataset[0]

    ind = 0
    if cfg.ind is not None:
        ind = cfg.ind
    t = 5
    if cfg.t is not None:
        t = cfg.t
    r = 0.1
    if cfg.r is not None:
        r = cfg.r
    print(loader)
    print(ind)
    data = loader[ind]
    data.batch = torch.zeros(len(data.pos)).long()
    data.batch_target = torch.zeros(len(data.pos_target)).long()
    print(data)
    with torch.no_grad():
        model.set_input(data, device)
        model.forward()

        name_scene, name_pair_source, name_pair_target = dataset.test_dataset[
            0].get_name(ind)
        print(name_scene, name_pair_source, name_pair_target)
        input, input_target = model.get_input()
        xyz, xyz_target = input.pos, input_target.pos
        ind, ind_target = input.ind, input_target.ind
        matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
        feat, feat_target = model.get_output()
        # rand = voxel_selection(xyz, grid_size=0.06, min_points=cfg.data.min_points)
        # rand_target = voxel_selection(xyz_target, grid_size=0.06, min_points=cfg.data.min_points)

        rand = torch.randperm(len(feat))[:cfg.data.num_points]
        rand_target = torch.randperm(len(feat_target))[:cfg.data.num_points]
        T_gt = estimate_transfo(xyz[matches_gt[:, 0]].clone(),
                                xyz_target[matches_gt[:, 1]].clone())
        matches_pred = get_matches(feat[rand],
                                   feat_target[rand_target],
                                   sym=cfg.data.sym)
        # For color
        inliers = (torch.norm(
            xyz[rand][matches_pred[:, 0]] @ T_gt[:3, :3].T + T_gt[:3, 3] -
            xyz_target[rand_target][matches_pred[:, 1]],
            dim=1,
        ) < cfg.data.tau_1)
        # compute transformation
        T_teaser = teaser_pp_registration(
            xyz[rand][matches_pred[:, 0]],
            xyz_target[rand_target][matches_pred[:, 1]],
            noise_bound=cfg.data.tau_1)
        pcd_source = torch2o3d(input, [1, 0.7, 0.1])

        pcd_target = torch2o3d(input_target, [0, 0.15, 0.9])
        open3d.visualization.draw_geometries([pcd_source, pcd_target])
        pcd_source.transform(T_teaser.cpu().numpy())
        open3d.visualization.draw_geometries([pcd_source, pcd_target])
        pcd_source.transform(np.linalg.inv(T_teaser.cpu().numpy()))
        rand_ind = torch.randperm(len(rand[matches_pred[:, 0]]))[:250]
        pcd_source.transform(T_gt.cpu().numpy())
        kp_s = torch2o3d(input, ind=rand[matches_pred[:, 0]][rand_ind])
        kp_s.transform(T_gt.cpu().numpy())
        kp_t = torch2o3d(input_target,
                         ind=rand_target[matches_pred[:, 1]][rand_ind])
        match_visualizer(pcd_source,
                         kp_s,
                         pcd_target,
                         kp_t,
                         inliers[rand_ind].cpu().numpy(),
                         radius=r,
                         t=t)