示例#1
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):
    dataset.create_dataloaders(
        model,
        1,
        False,
        cfg.training.num_workers,
        False,
    )
    loader = dataset.test_dataloaders[0]
    list_res = []
    with Ctq(loader) as tq_test_loader:
        for i, data in enumerate(tq_test_loader):
            with torch.no_grad():
                model.set_input(data, device)
                model.forward()

                name_scene, name_pair_source, name_pair_target = dataset.test_dataset[
                    0].get_name(i)
                input, input_target = model.get_input()
                xyz, xyz_target = input.pos, input_target.pos
                ind, ind_target = input.ind, input_target.ind
                matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
                feat, feat_target = model.get_output()
                rand = torch.randperm(len(feat))[:cfg.data.num_points]
                rand_target = torch.randperm(
                    len(feat_target))[:cfg.data.num_points]
                res = dict(name_scene=name_scene,
                           name_pair_source=name_pair_source,
                           name_pair_target=name_pair_target)
                T_gt = estimate_transfo(xyz[matches_gt[:, 0]],
                                        xyz_target[matches_gt[:, 1]])
                metric = compute_metrics(
                    xyz[rand],
                    xyz_target[rand_target],
                    feat[rand],
                    feat_target[rand_target],
                    T_gt,
                    sym=cfg.data.sym,
                    tau_1=cfg.data.tau_1,
                    tau_2=cfg.data.tau_2,
                    rot_thresh=cfg.data.rot_thresh,
                    trans_thresh=cfg.data.trans_thresh,
                    use_ransac=cfg.data.use_ransac,
                    ransac_thresh=cfg.data.first_subsampling,
                    use_teaser=cfg.data.use_teaser,
                    noise_bound_teaser=cfg.data.noise_bound_teaser,
                )
                res = dict(**res, **metric)
                list_res.append(res)

    df = pd.DataFrame(list_res)
    output_path = os.path.join(cfg.training.checkpoint_dir, cfg.data.name,
                               "matches")
    if not os.path.exists(output_path):
        os.makedirs(output_path, exist_ok=True)
    df.to_csv(osp.join(output_path, "final_res.csv"))
    print(df.groupby("name_scene").mean())
示例#2
0
    def test_estimate_transfo(self):

        a = torch.randn(100, 3)

        R_gt = euler_angles_to_rotation_matrix(torch.rand(3) * np.pi)
        t_gt = torch.rand(3)
        T_gt = torch.eye(4)
        T_gt[:3, :3] = R_gt
        T_gt[:3, 3] = t_gt
        b = a.mm(R_gt.T) + t_gt
        T_pred = estimate_transfo(a, b)

        npt.assert_allclose(T_pred.numpy(), T_gt.numpy(), rtol=1e-3)
示例#3
0
    def track(self, model: model_interface.TrackerInterface, **kwargs):
        super().track(model)
        if self._stage != "train":
            batch_idx, batch_idx_target = model.get_batch()
            # batch_xyz, batch_xyz_target = model.get_xyz()  # type: ignore
            # batch_ind, batch_ind_target, batch_size_ind = model.get_ind()  # type: ignore
            input, input_target = model.get_input()
            batch_xyz, batch_xyz_target = input.pos, input_target.pos
            batch_ind, batch_ind_target, batch_size_ind = input.ind, input_target.ind, input.size
            batch_feat, batch_feat_target = model.get_output()

            nb_batches = batch_idx.max() + 1
            cum_sum = 0
            cum_sum_target = 0
            begin = 0
            end = batch_size_ind[0].item()
            for b in range(nb_batches):
                xyz = batch_xyz[batch_idx == b]
                xyz_target = batch_xyz_target[batch_idx_target == b]
                feat = batch_feat[batch_idx == b]
                feat_target = batch_feat_target[batch_idx_target == b]
                # as we have concatenated ind,
                # we need to substract the cum_sum because we deal
                # with each batch independently
                # ind = batch_ind[b * len(batch_ind) / nb_batches : (b + 1) * len(batch_ind) / nb_batches] - cum_sum
                # ind_target = (batch_ind_target[b * len(batch_ind_target) / nb_batches : (b + 1) * len(batch_ind_target) / nb_batches]- cum_sum_target)
                ind = batch_ind[begin:end] - cum_sum
                ind_target = batch_ind_target[begin:end] - cum_sum_target
                # print(begin, end)
                if b < nb_batches - 1:
                    begin = end
                    end = begin + batch_size_ind[b + 1].item()
                cum_sum += len(xyz)
                cum_sum_target += len(xyz_target)
                rand = torch.randperm(len(feat))[:self.num_points]
                rand_target = torch.randperm(
                    len(feat_target))[:self.num_points]

                matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)

                # print(matches_gt.max(0), len(xyz), len(xyz_target), len(matches_gt))
                # print(batch_ind.shape, nb_batches)
                T_gt = estimate_transfo(xyz[matches_gt[:, 0]],
                                        xyz_target[matches_gt[:, 1]])

                matches_pred = get_matches(feat[rand],
                                           feat_target[rand_target])
                T_pred = fast_global_registration(
                    xyz[rand][matches_pred[:, 0]],
                    xyz_target[rand_target][matches_pred[:, 1]])

                hit_ratio = compute_hit_ratio(
                    xyz[rand][matches_pred[:, 0]],
                    xyz_target[rand_target][matches_pred[:,
                                                         1]], T_gt, self.tau_1)

                trans_error, rot_error = compute_transfo_error(T_pred, T_gt)

                sr_err = compute_scaled_registration_error(xyz, T_gt, T_pred)
                self._hit_ratio.add(hit_ratio.item())
                self._feat_match_ratio.add(
                    float(hit_ratio.item() > self.tau_2))
                self._trans_error.add(trans_error.item())
                self._rot_error.add(rot_error.item())
                self._rre.add(rot_error.item() < self.rot_thresh)
                self._rte.add(trans_error.item() < self.trans_thresh)
                self._sr_err.add(sr_err.item())
示例#4
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):

    reg_thresh = cfg.data.registration_recall_thresh
    if reg_thresh is None:
        reg_thresh = 0.2
    print(time.strftime("%Y%m%d-%H%M%S"))
    dataset.create_dataloaders(
        model, 1, False, cfg.training.num_workers, False,
    )
    loader = dataset.test_dataloaders[0]
    list_res = []
    with Ctq(loader) as tq_test_loader:
        for i, data in enumerate(tq_test_loader):
            with torch.no_grad():
                t0 = time.time()
                model.set_input(data, device)
                model.forward()
                t1 = time.time()
                name_scene, name_pair_source, name_pair_target = dataset.test_dataset[0].get_name(i)
                input, input_target = model.get_input()
                xyz, xyz_target = input.pos, input_target.pos
                ind, ind_target = input.ind, input_target.ind
                matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
                feat, feat_target = model.get_output()
                # rand = voxel_selection(xyz, grid_size=0.06, min_points=cfg.data.min_points)
                # rand_target = voxel_selection(xyz_target, grid_size=0.06, min_points=cfg.data.min_points)

                rand = torch.randperm(len(feat))[: cfg.data.num_points]
                rand_target = torch.randperm(len(feat_target))[: cfg.data.num_points]
                res = dict(name_scene=name_scene, name_pair_source=name_pair_source, name_pair_target=name_pair_target)
                T_gt = estimate_transfo(xyz[matches_gt[:, 0]], xyz_target[matches_gt[:, 1]])
                t2 = time.time()
                metric = compute_metrics(
                    xyz[rand],
                    xyz_target[rand_target],
                    feat[rand],
                    feat_target[rand_target],
                    T_gt,
                    sym=cfg.data.sym,
                    tau_1=cfg.data.tau_1,
                    tau_2=cfg.data.tau_2,
                    rot_thresh=cfg.data.rot_thresh,
                    trans_thresh=cfg.data.trans_thresh,
                    use_ransac=cfg.data.use_ransac,
                    ransac_thresh=cfg.data.first_subsampling,
                    use_teaser=cfg.data.use_teaser,
                    noise_bound_teaser=cfg.data.noise_bound_teaser,
                    xyz_gt=xyz[matches_gt[:, 0]],
                    xyz_target_gt=xyz_target[matches_gt[:, 1]],
                    registration_recall_thresh=reg_thresh,
                )
                res = dict(**res, **metric)
                res["time_feature"] = t1 - t0
                res["time_feature_per_point"] = (t1 - t0) / (len(input.pos) + len(input_target.pos))
                res["time_prep"] = t2 - t1

                list_res.append(res)

    df = pd.DataFrame(list_res)
    output_path = os.path.join(cfg.training.checkpoint_dir, cfg.data.name, "matches")
    if not os.path.exists(output_path):
        os.makedirs(output_path, exist_ok=True)
    df.to_csv(osp.join(output_path, "final_res_{}.csv".format(time.strftime("%Y%m%d-%H%M%S"))))
    print(df.groupby("name_scene").mean())
示例#5
0
def run(model: BaseModel, dataset: BaseDataset, device, cfg):
    print(time.strftime("%Y%m%d-%H%M%S"))
    dataset.create_dataloaders(
        model,
        1,
        False,
        cfg.training.num_workers,
        False,
    )
    loader = dataset.test_dataset[0]

    ind = 0
    if cfg.ind is not None:
        ind = cfg.ind
    t = 5
    if cfg.t is not None:
        t = cfg.t
    r = 0.1
    if cfg.r is not None:
        r = cfg.r
    print(loader)
    print(ind)
    data = loader[ind]
    data.batch = torch.zeros(len(data.pos)).long()
    data.batch_target = torch.zeros(len(data.pos_target)).long()
    print(data)
    with torch.no_grad():
        model.set_input(data, device)
        model.forward()

        name_scene, name_pair_source, name_pair_target = dataset.test_dataset[
            0].get_name(ind)
        print(name_scene, name_pair_source, name_pair_target)
        input, input_target = model.get_input()
        xyz, xyz_target = input.pos, input_target.pos
        ind, ind_target = input.ind, input_target.ind
        matches_gt = torch.stack([ind, ind_target]).transpose(0, 1)
        feat, feat_target = model.get_output()
        # rand = voxel_selection(xyz, grid_size=0.06, min_points=cfg.data.min_points)
        # rand_target = voxel_selection(xyz_target, grid_size=0.06, min_points=cfg.data.min_points)

        rand = torch.randperm(len(feat))[:cfg.data.num_points]
        rand_target = torch.randperm(len(feat_target))[:cfg.data.num_points]
        T_gt = estimate_transfo(xyz[matches_gt[:, 0]].clone(),
                                xyz_target[matches_gt[:, 1]].clone())
        matches_pred = get_matches(feat[rand],
                                   feat_target[rand_target],
                                   sym=cfg.data.sym)
        # For color
        inliers = (torch.norm(
            xyz[rand][matches_pred[:, 0]] @ T_gt[:3, :3].T + T_gt[:3, 3] -
            xyz_target[rand_target][matches_pred[:, 1]],
            dim=1,
        ) < cfg.data.tau_1)
        # compute transformation
        T_teaser = teaser_pp_registration(
            xyz[rand][matches_pred[:, 0]],
            xyz_target[rand_target][matches_pred[:, 1]],
            noise_bound=cfg.data.tau_1)
        pcd_source = torch2o3d(input, [1, 0.7, 0.1])

        pcd_target = torch2o3d(input_target, [0, 0.15, 0.9])
        open3d.visualization.draw_geometries([pcd_source, pcd_target])
        pcd_source.transform(T_teaser.cpu().numpy())
        open3d.visualization.draw_geometries([pcd_source, pcd_target])
        pcd_source.transform(np.linalg.inv(T_teaser.cpu().numpy()))
        rand_ind = torch.randperm(len(rand[matches_pred[:, 0]]))[:250]
        pcd_source.transform(T_gt.cpu().numpy())
        kp_s = torch2o3d(input, ind=rand[matches_pred[:, 0]][rand_ind])
        kp_s.transform(T_gt.cpu().numpy())
        kp_t = torch2o3d(input_target,
                         ind=rand_target[matches_pred[:, 1]][rand_ind])
        match_visualizer(pcd_source,
                         kp_s,
                         pcd_target,
                         kp_t,
                         inliers[rand_ind].cpu().numpy(),
                         radius=r,
                         t=t)