Esempio n. 1
0
def solve(datArray, prm):
    global scnFtArray, scnPcArray, Param
    Param.update(prm)
    scnFtArray = []
    scnPcArray = []
    for dat in datArray:
        pc = fromNumpy(dat)
        scnPcArray.append(pc)
        scnFtArray.append(_get_features(pc))
    t1 = time.time()
    resft = o3.registration_ransac_based_on_feature_matching(
        modPcArray[0], scnPcArray[0], modFtArray[0], scnFtArray[0],
        Param["distance_threshold"],
        o3.TransformationEstimationPointToPoint(False), 4, [
            o3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            o3.CorrespondenceCheckerBasedOnDistance(
                Param["distance_threshold"])
        ], o3.RANSACConvergenceCriteria(2000000, 500))
    print "time for feature matching", time.time() - t1
    print "feature matching", resft.transformation, resft.fitness
    resicp = o3.registration_icp(modPcArray[0], scnPcArray[0],
                                 Param["icp_threshold"], resft.transformation,
                                 o3.TransformationEstimationPointToPlane())
    return {
        "transform": [resicp.transformation],
        "fitness": [resicp.fitness],
        "rmse": [resicp.inlier_rmse]
    }
Esempio n. 2
0
def execute_global_registration(source_down: op3.PointCloud,
                                target_down: op3.PointCloud,
                                source_fpfh: op3.PointCloud,
                                target_fpfh: op3.PointCloud,
                                voxel_size: float,
                                verbose=False):
    """
    Do global matching, find gross transformation form source to target
    :param source_down, target_down: 2 objects of Open3D, that are point clouds of source and target after down-sampling
    :param source_fpfh, target_fpfh: 2 objects of Open3D, that are point cloud features of source and target
    :param voxel_size: a float value, that is how sparse you want the sample points is
    :param verbose: a boolean value, display notification and visualization when True and no notification when False
    :return: a transformation object
    """
    distance_threshold = voxel_size * 1.5
    if verbose:
        print(":: RANSAC registration on downsampled point clouds.")
        print("   Since the downsampling voxel size is %.3f," % voxel_size)
        print("   we use a liberal distance threshold %.3f." %
              distance_threshold)
    result = op3.registration_ransac_based_on_feature_matching(
        source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
        op3.TransformationEstimationPointToPoint(False), 4, [
            op3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            op3.CorrespondenceCheckerBasedOnDistance(distance_threshold)
        ], op3.RANSACConvergenceCriteria(4000000, 500))
    return result
Esempio n. 3
0
def execute_global_registration(src_keypts, tgt_keypts, src_desc, tgt_desc, distance_threshold):
    result = open3d.registration_ransac_based_on_feature_matching(
        src_keypts, tgt_keypts, src_desc, tgt_desc,
        distance_threshold,
        open3d.TransformationEstimationPointToPoint(False), 4,
        [open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
         open3d.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
        open3d.RANSACConvergenceCriteria(4000000, 500))
    return result
Esempio n. 4
0
def execute_global_registration(kp1, kp2, fpfh1, fpfh2, voxel_size):
    distance_threshold = voxel_size * 2.5
    result = open3d.registration_ransac_based_on_feature_matching(
        kp1, kp2, fpfh1, fpfh2, distance_threshold,
        open3d.TransformationEstimationPointToPoint(False), 4, [
            open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            open3d.CorrespondenceCheckerBasedOnDistance(distance_threshold)
        ], open3d.RANSACConvergenceCriteria(500000, 1000))
    return result
Esempio n. 5
0
def match_():
    source = copy.deepcopy(modPcArray[0])
    source_fpfh = modFtArray[0]
    target = scnPcArray[0]
    target_fpfh = scnFtArray[0]
    reg_global = o3d.registration_ransac_based_on_feature_matching(
        source, target, source_fpfh, target_fpfh, param['distance_threshold'],
        o3d.TransformationEstimationPointToPoint(False), 4, [
            o3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            o3d.CorrespondenceCheckerBasedOnDistance(
                param['distance_threshold'])
        ], o3d.RANSACConvergenceCriteria(4000000, 500))
    source.transform(reg_global.transformation)
    return reg_global, [source]
Esempio n. 6
0
def icp_o3_gicp(file_idx,
                cfg,
                refine=None,
                refine_radius=0.05,
                precomputed_results=None):
    pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
    voxel_size = 0.05
    start = time.time()
    if precomputed_results is None:
        distance_threshold = voxel_size * 1.5
        source_down, target_down, source_fpfh, target_fpfh = ICP._icp_global_prepare_dataset(
            pc1, pc2, voxel_size)
        reg_res = o3.registration_ransac_based_on_feature_matching(
            source_down,
            target_down,
            source_fpfh,
            target_fpfh,
            distance_threshold,
            o3.TransformationEstimationPointToPoint(
                with_constraint=cfg.evaluation.special.icp.with_constraint,
                with_scaling=False),
            4,  # scaling=False
            [
                o3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
                o3.CorrespondenceCheckerBasedOnDistance(distance_threshold)
            ],
            o3.RANSACConvergenceCriteria(4000000, 500))
        transformation = reg_res.transformation
    else:
        precomp_pred_translation, precomp_pred_angle, precomp_pred_center = precomputed_results
        transformation = get_mat_angle(precomp_pred_translation,
                                       precomp_pred_angle, precomp_pred_center)

    if refine is None:
        time_elapsed = time.time() - start
        return transformation, pc1_centroid, time_elapsed
    else:
        if refine == 'p2p':
            reg_p2p = o3.registration_icp(
                pc1, pc2, refine_radius, transformation,
                o3.TransformationEstimationPointToPoint(
                    with_constraint=cfg.evaluation.special.icp.with_constraint,
                    with_scaling=False))
            #  if file_idx == 8019:
            #  print('->', reg_p2p.transformation)
            time_elapsed = time.time() - start
            return reg_p2p.transformation, pc1_centroid, time_elapsed
        else:
            assert False
def execute_global_registration(source_down, target_down, source_fpfh,
                                target_fpfh, voxel_size):
    distance_threshold = voxel_size * 1.5
    print(":: RANSAC registration on downsampled point clouds.")
    print("   Since the downsampling voxel size is %.3f," % voxel_size)
    print("   we use a liberal distance threshold %.3f." % distance_threshold)
    result = op3.registration_ransac_based_on_feature_matching(
        source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
        op3.TransformationEstimationPointToPoint(False), 4, [
            op3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            op3.CorrespondenceCheckerBasedOnDistance(distance_threshold)
        ],
        op3.RANSACConvergenceCriteria(4000000, 500)
        # op3.RANSACConvergenceCriteria(10000000, 20000)
    )
    return result
Esempio n. 8
0
    def update_source_to_target_transformation(self):
        """
        Function to update the source to target transformation matrix
        
         
        """
        source = copy.deepcopy(self.source_pcl)
        target = pn.voxel_down_sample(self.target_pcl, target_voxel_size)

        if self.source_to_target_transformation.size > 0:
            # if the global registration have already been done
            source.transform(self.source_to_target_transformation)
            icp_result = pn.registration_icp(
                source, target, distance_threshold_icp, np.eye(4),
                pn.TransformationEstimationPointToPlane())
            if icp_result.fitness > 0.8:
                self.source_to_target_transformation =\
                    icp_result.transformation.dot(self.source_to_target_transformation)
                return

        # we fail to align source & target with ICP
        # GLOBAL REGISTRATION
        # PREPROCESSING POINT CLOUD
        source = copy.deepcopy(self.source_pcl)

        source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
        target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)

        # GLOBAL REGISTRATION
        ransac_result = pn.registration_ransac_based_on_feature_matching(
            source_down, target_down, source_fpfh, target_fpfh,
            distance_threshold_ransac,
            pn.TransformationEstimationPointToPoint(False), 4, [
                pn.CorrespondenceCheckerBasedOnDistance(
                    distance_threshold_ransac)
            ], pn.RANSACConvergenceCriteria(10000000, 10000))

        source.transform(ransac_result.transformation)

        icp_result = pn.registration_icp(
            source, target, distance_threshold_icp, np.eye(4),
            pn.TransformationEstimationPointToPlane())

        self.source_to_target_transformation = \
            icp_result.transformation.dot(ransac_result.transformation)

        return
Esempio n. 9
0
    def register(self):
        start = time.time()
        result = open3d.registration_ransac_based_on_feature_matching(
            self.source_open3d, self.target_open3d, self.source_fpfh,
            self.target_fpfh, self.epsilon,
            open3d.TransformationEstimationPointToPoint(False), 4, [
                open3d.CorrespondenceCheckerBasedOnEdgeLength(0.95),
                open3d.CorrespondenceCheckerBasedOnDistance(self.epsilon)
            ],
            open3d.RANSACConvergenceCriteria(self.max_iteration,
                                             self.max_validation))
        end = time.time()

        self.time = end - start
        self.transformation = result.transformation

        return
Esempio n. 10
0
def register2Fragments(id1, id2, keyptspath, descpath, resultpath, logpath, gtLog, desc_name, inlier_ratio, distance_threshold):
    """
    Register point cloud {id1} and {id2} using the keypts location and descriptors.
    """
    cloud_bin_s = f'cloud_bin_{id1}'
    cloud_bin_t = f'cloud_bin_{id2}'
    write_file = f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'
    if os.path.exists(os.path.join(resultpath, write_file)):
        return 0, 0, 0
    source_keypts = get_keypts(keyptspath, cloud_bin_s)
    target_keypts = get_keypts(keyptspath, cloud_bin_t)
    source_desc = get_desc(descpath, cloud_bin_s, desc_name)
    target_desc = get_desc(descpath, cloud_bin_t, desc_name)
    source_desc = np.nan_to_num(source_desc)
    target_desc = np.nan_to_num(target_desc)
    # Select {num_keypts} points based on the scores. The descriptors and keypts are already sorted based on the detection score.
    num_keypts = 250
    source_keypts = source_keypts[-num_keypts:, :]
    source_desc = source_desc[-num_keypts:, :]
    target_keypts = target_keypts[-num_keypts:, :]
    target_desc = target_desc[-num_keypts:, :]
    # Select {num_keypts} points randomly.
    # num_keypts = 250
    # source_indices = np.random.choice(range(source_keypts.shape[0]), num_keypts)
    # target_indices = np.random.choice(range(target_keypts.shape[0]), num_keypts)
    # source_keypts = source_keypts[source_indices, :]
    # source_desc = source_desc[source_indices, :]
    # target_keypts = target_keypts[target_indices, :]
    # target_desc = target_desc[target_indices, :]
    key = f'{cloud_bin_s.split("_")[-1]}_{cloud_bin_t.split("_")[-1]}'
    if key not in gtLog.keys():
        # skip the pairs that have less than 30% overlap.
        num_inliers = 0
        inlier_ratio = 0
        gt_flag = 0
    else:
        # build correspondence set in feature space.
        corr = build_correspondence(source_desc, target_desc)

        # calculate the inlier ratio, this is for Feature Matching Recall.
        gt_trans = gtLog[key]
        frag1 = source_keypts[corr[:, 0]]
        frag2_pc = open3d.PointCloud()
        frag2_pc.points = open3d.utility.Vector3dVector(target_keypts[corr[:, 1]])
        frag2_pc.transform(gt_trans)
        frag2 = np.asarray(frag2_pc.points)
        distance = np.sqrt(np.sum(np.power(frag1 - frag2, 2), axis=1))
        num_inliers = np.sum(distance < distance_threshold)
        if num_inliers / len(distance) < inlier_ratio:
            print(key)
            print("num_corr:", len(corr), "inlier_ratio:", num_inliers / len(distance))
        inlier_ratio = num_inliers / len(distance)
        gt_flag = 1

        # calculate the transformation matrix using RANSAC, this is for Registration Recall.
        source_pcd = open3d.PointCloud()
        source_pcd.points = open3d.utility.Vector3dVector(source_keypts)
        target_pcd = open3d.PointCloud()
        target_pcd.points = open3d.utility.Vector3dVector(target_keypts)
        s_desc = open3d.registration.Feature()
        s_desc.data = source_desc.T
        t_desc = open3d.registration.Feature()
        t_desc.data = target_desc.T
        result = open3d.registration_ransac_based_on_feature_matching(
            source_pcd, target_pcd, s_desc, t_desc,
            0.05,
            open3d.TransformationEstimationPointToPoint(False), 3,
            [open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
             open3d.CorrespondenceCheckerBasedOnDistance(0.05)],
            open3d.RANSACConvergenceCriteria(50000, 1000))

        # write the transformation matrix into .log file for evaluation.
        with open(os.path.join(logpath, f'{desc_name}_{timestr}.log'), 'a+') as f:
            trans = result.transformation
            trans = np.linalg.inv(trans)
            s1 = f'{id1}\t {id2}\t  37\n'
            f.write(s1)
            f.write(f"{trans[0,0]}\t {trans[0,1]}\t {trans[0,2]}\t {trans[0,3]}\t \n")
            f.write(f"{trans[1,0]}\t {trans[1,1]}\t {trans[1,2]}\t {trans[1,3]}\t \n")
            f.write(f"{trans[2,0]}\t {trans[2,1]}\t {trans[2,2]}\t {trans[2,3]}\t \n")
            f.write(f"{trans[3,0]}\t {trans[3,1]}\t {trans[3,2]}\t {trans[3,3]}\t \n")

    # write the result into resultpath so that it can be re-shown.
    s = f"{cloud_bin_s}\t{cloud_bin_t}\t{num_inliers}\t{inlier_ratio:.8f}\t{gt_flag}"
    with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'w+') as f:
        f.write(s)
    return num_inliers, inlier_ratio, gt_flag
Esempio n. 11
0
def main(config):
  test_loader = make_data_loader(
      config, config.test_phase, 1, num_threads=config.test_num_thread, shuffle=True)

  num_feats = 1

  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

  Model = load_model(config.model)
  model = Model(num_feats, config.model_n_out, config=config)
  checkpoint = torch.load(config.save_dir + '/checkpoint.pth')
  model.load_state_dict(checkpoint['state_dict'])
  model = model.to(device)
  model.eval()

  success_meter, loss_meter, rte_meter, rre_meter = AverageMeter(), AverageMeter(
  ), AverageMeter(), AverageMeter()
  data_timer, feat_timer, reg_timer = Timer(), Timer(), Timer()

  test_iter = test_loader.__iter__()
  N = len(test_iter)
  n_gpu_failures = 0

  # downsample_voxel_size = 2 * config.voxel_size

  for i in range(len(test_iter)):

    data_timer.tic()
    try:
      xyz0, xyz1, coord0, coord1, feats0, feats1, matching01, T_gth, len_01 = test_iter.next(
      )
    except ValueError:
      n_gpu_failures += 1
      logging.info(f"# Erroneous GPU Pair {n_gpu_failures}")
      continue
    data_timer.toc()

    xyz0np, xyz1np = xyz0.numpy(), xyz1.numpy()

    pcd0 = make_open3d_point_cloud(xyz0np)
    pcd1 = make_open3d_point_cloud(xyz1np)

    with torch.no_grad():
      feat_timer.tic()
      sinput0 = ME.SparseTensor(feats0, coords=coord0).to(device)
      F0 = model(sinput0).F.detach()
      sinput1 = ME.SparseTensor(feats1, coords=coord1).to(device)
      F1 = model(sinput1).F.detach()
      feat_timer.toc()

    feat0 = make_open3d_feature(F0, 32, F0.shape[0])
    feat1 = make_open3d_feature(F1, 32, F1.shape[0])

    reg_timer.tic()
    distance_threshold = config.voxel_size * 1.0
    ransac_result = o3d.registration_ransac_based_on_feature_matching(
        pcd0, pcd1, feat0, feat1, distance_threshold,
        o3d.TransformationEstimationPointToPoint(False), 4, [
            o3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            o3d.CorrespondenceCheckerBasedOnDistance(distance_threshold)
        ], o3d.RANSACConvergenceCriteria(4000000, 10000))
    T_ransac = torch.from_numpy(ransac_result.transformation.astype(np.float32))
    reg_timer.toc()

    loss_ransac = corr_dist(T_ransac, T_gth, xyz0, xyz1, weight=None, max_dist=1)

    # Translation error
    rte = np.linalg.norm(T_ransac[:3, 3] - T_gth[:3, 3])
    rre = np.arccos((np.trace(T_ransac[:3, :3].t() @ T_gth[:3, :3]) - 1) / 2)

    # Check if the ransac was successful. successful if rte < 2m and rre < 5◦
    # http://openaccess.thecvf.com/content_ECCV_2018/papers/Zi_Jian_Yew_3DFeat-Net_Weakly_Supervised_ECCV_2018_paper.pdf
    if rte < 2:
      rte_meter.update(rte)

    if not np.isnan(rre) and rre < np.pi / 180 * 5:
      rre_meter.update(rre)

    if rte < 2 and not np.isnan(rre) and rre < np.pi / 180 * 5:
      success_meter.update(1)
    else:
      success_meter.update(0)
      logging.info(f"Failed with RTE: {rte}, RRE: {rre}")

    loss_meter.update(loss_ransac)

    if i % 10 == 0:
      logging.info(
          f"{i} / {N}: Data time: {data_timer.avg}, Feat time: {feat_timer.avg}," +
          f" Reg time: {reg_timer.avg}, Loss: {loss_meter.avg}, RTE: {rte_meter.avg}," +
          f" RRE: {rre_meter.avg}, Success: {success_meter.sum} / {success_meter.count}" +
          f" ({success_meter.avg * 100} %)"
      )
      data_timer.reset()
      feat_timer.reset()
      reg_timer.reset()

  logging.info(
      f"Total loss: {loss_meter.avg}, RTE: {rte_meter.avg}, var: {rte_meter.var}," +
      f" RRE: {rre_meter.avg}, var: {rre_meter.var}, Success: {success_meter.sum} " +
      f"/ {success_meter.count} ({success_meter.avg * 100} %)"
  )
Esempio n. 12
0
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)

source_down.paint_uniform_color([0, 0.651, 0.929])
target_down.paint_uniform_color([1, 0.706, 0])
pn.draw_geometries([source_down, target_down])

distance_threshold_ransac = 6 * voxel_size
start_time = time.time()

result_ransac = pn.registration_ransac_based_on_feature_matching(
    source_down, target_down, source_fpfh,
    target_fpfh, distance_threshold_ransac,
    pn.TransformationEstimationPointToPoint(False), 4, [
        pn.CorrespondenceCheckerBasedOnDistance(distance_threshold_ransac),
    ], pn.RANSACConvergenceCriteria(10000000, 10000))

print result_ransac
print time.time() - start_time
cam_pose_pcl.transform(result_ransac.transformation)
source.transform(result_ransac.transformation)
source_down.transform(result_ransac.transformation)

pn.draw_geometries([source_down, target_down])
print cam_pose_pcl.points[0]
#%%
target_voxel_size = 1
voxel_size = 5
distance_threshold_icp = 7 * voxel_size