Example #1
0
def icp_o3_gicp(file_idx,
                cfg,
                refine=None,
                refine_radius=0.05,
                precomputed_results=None):
    pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
    voxel_size = 0.05
    start = time.time()
    if precomputed_results is None:
        distance_threshold = voxel_size * 1.5
        source_down, target_down, source_fpfh, target_fpfh = ICP._icp_global_prepare_dataset(
            pc1, pc2, voxel_size)
        reg_res = o3.registration_ransac_based_on_feature_matching(
            source_down,
            target_down,
            source_fpfh,
            target_fpfh,
            distance_threshold,
            o3.TransformationEstimationPointToPoint(
                with_constraint=cfg.evaluation.special.icp.with_constraint,
                with_scaling=False),
            4,  # scaling=False
            [
                o3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
                o3.CorrespondenceCheckerBasedOnDistance(distance_threshold)
            ],
            o3.RANSACConvergenceCriteria(4000000, 500))
        transformation = reg_res.transformation
    else:
        precomp_pred_translation, precomp_pred_angle, precomp_pred_center = precomputed_results
        transformation = get_mat_angle(precomp_pred_translation,
                                       precomp_pred_angle, precomp_pred_center)

    if refine is None:
        time_elapsed = time.time() - start
        return transformation, pc1_centroid, time_elapsed
    else:
        if refine == 'p2p':
            reg_p2p = o3.registration_icp(
                pc1, pc2, refine_radius, transformation,
                o3.TransformationEstimationPointToPoint(
                    with_constraint=cfg.evaluation.special.icp.with_constraint,
                    with_scaling=False))
            #  if file_idx == 8019:
            #  print('->', reg_p2p.transformation)
            time_elapsed = time.time() - start
            return reg_p2p.transformation, pc1_centroid, time_elapsed
        else:
            assert False
Example #2
0
    def _perform_registration(self):
        import open3d

        def array2pcd(xyz):
            pcd = open3d.PointCloud()
            pcd.points = open3d.Vector3dVector(xyz)
            return pcd

        fixed = array2pcd(self.fixed_points)
        moving = array2pcd(self.moving_points)

        init = np.asarray([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                           [0, 0, 0, 1]])
        regp2p = open3d.registration_icp(
            moving, fixed, np.Inf, init,
            open3d.TransformationEstimationPointToPoint())
        self.transform = regp2p.transformation

        print(regp2p)

        moving_points = np.concatenate(
            (self.moving_points, np.ones((self.number_of_moving_points, 1))),
            axis=1)
        self.moving_points = moving_points @ np.transpose(
            self.transform)[:, :3]
def icp_registration_error(sourcepc, targetpc, threshold=0.02):

    source_o3dpc = o3d.geometry.PointCloud()
    source_o3dpc.points = o3d.utility.Vector3dVector(sourcepc.position)
    target_o3dpc = o3d.geometry.PointCloud()
    target_o3dpc.points = o3d.utility.Vector3dVector(targetpc.position)

    threshold = 0.02

    draw_registration_result(source, target, trans_init)
    print("Initial alignment")
    evaluation = o3d.evaluate_registration(source, target, threshold,
                                           trans_init)
    print('evaluation:', evaluation)

    print("Apply point-to-point ICP")
    reg_p2p = o3d.registration_icp(
        source,
        target,
        threshold,
        init=np.eye(4),
        estimation_method=o3d.TransformationEstimationPointToPoint())
    print(reg_p2p)
    print("reg_p2p Transformation is:")
    print(reg_p2p.transformation)
    print("transformation type is :", type(reg_p2p.transformation))
    target.transform(trans_init)
    draw_registration_result(source, target, reg_p2p.transformation)
Example #4
0
    def stitch_pointclouds_open3D(self, target, source, threshold = 1, trans_init = np.eye(4), max_iteration = 2000, with_plot = True):
        source = self.check_pointcloud_type(np.array(source))
        target = self.check_pointcloud_type(np.array(target))
        target.paint_uniform_color([0.1, 0.1, 0.7])
        print dir(open3d)
        if with_plot:
            self.draw_registration_result(source, target, trans_init)
            
        print("Initial alignment")
        evaluation = open3d.evaluate_registration(source, target, threshold, trans_init)
        print(evaluation)

        print("Apply point-to-point ICP")
        reg_p2p = open3d.registration_icp(source, target, threshold, trans_init,
                open3d.TransformationEstimationPointToPoint(),
                open3d.ICPConvergenceCriteria(max_iteration = max_iteration))
        print("Transformation is:")
        print(reg_p2p.transformation)
        print("")
        if with_plot:
            self.draw_registration_result(source, target, reg_p2p.transformation)
        
        xyz_source = np.asarray(source.points)
        xyz_target = np.asarray(target.points)
        xyz_global = np.concatenate([xyz_source, xyz_target], axis = 0)
        return xyz_global
Example #5
0
def execute_global_registration(source_down: op3.PointCloud,
                                target_down: op3.PointCloud,
                                source_fpfh: op3.PointCloud,
                                target_fpfh: op3.PointCloud,
                                voxel_size: float,
                                verbose=False):
    """
    Do global matching, find gross transformation form source to target
    :param source_down, target_down: 2 objects of Open3D, that are point clouds of source and target after down-sampling
    :param source_fpfh, target_fpfh: 2 objects of Open3D, that are point cloud features of source and target
    :param voxel_size: a float value, that is how sparse you want the sample points is
    :param verbose: a boolean value, display notification and visualization when True and no notification when False
    :return: a transformation object
    """
    distance_threshold = voxel_size * 1.5
    if verbose:
        print(":: RANSAC registration on downsampled point clouds.")
        print("   Since the downsampling voxel size is %.3f," % voxel_size)
        print("   we use a liberal distance threshold %.3f." %
              distance_threshold)
    result = op3.registration_ransac_based_on_feature_matching(
        source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
        op3.TransformationEstimationPointToPoint(False), 4, [
            op3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            op3.CorrespondenceCheckerBasedOnDistance(distance_threshold)
        ], op3.RANSACConvergenceCriteria(4000000, 500))
    return result
Example #6
0
def solve(datArray, prm):
    global scnFtArray, scnPcArray, Param
    Param.update(prm)
    scnFtArray = []
    scnPcArray = []
    for dat in datArray:
        pc = fromNumpy(dat)
        scnPcArray.append(pc)
        scnFtArray.append(_get_features(pc))
    t1 = time.time()
    resft = o3.registration_ransac_based_on_feature_matching(
        modPcArray[0], scnPcArray[0], modFtArray[0], scnFtArray[0],
        Param["distance_threshold"],
        o3.TransformationEstimationPointToPoint(False), 4, [
            o3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            o3.CorrespondenceCheckerBasedOnDistance(
                Param["distance_threshold"])
        ], o3.RANSACConvergenceCriteria(2000000, 500))
    print "time for feature matching", time.time() - t1
    print "feature matching", resft.transformation, resft.fitness
    resicp = o3.registration_icp(modPcArray[0], scnPcArray[0],
                                 Param["icp_threshold"], resft.transformation,
                                 o3.TransformationEstimationPointToPlane())
    return {
        "transform": [resicp.transformation],
        "fitness": [resicp.fitness],
        "rmse": [resicp.inlier_rmse]
    }
Example #7
0
    def icp(self, source, target):
        source = source.astype(np.float32)
        target = target.astype(np.float32)
        threshold = self.threshold

        trans_init = np.asarray([[1.0, 0.0, 0.0, 0.2], [0.0, 1.0, 0.0, 0.1],
                                 [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

        pc_source = open3d.PointCloud()
        pc_source.points = open3d.Vector3dVector(source)

        pc_target = open3d.PointCloud()
        pc_target.points = open3d.Vector3dVector(target)

        evaluation = open3d.evaluate_registration(pc_source, pc_target,
                                                  threshold, trans_init)
        print(evaluation)
        print("Apply point-to-point ICP")
        reg_p2p = open3d.registration_icp(
            pc_source, pc_target, threshold, trans_init,
            open3d.TransformationEstimationPointToPoint())
        print(reg_p2p)
        print("Transformation is:")
        print(reg_p2p.transformation)

        pc_source.transform(reg_p2p.transformation)
        source = np.asarray(pc_source.points)

        return reg_p2p.transformation
Example #8
0
def refine_registration(scene1, scene2, trans, voxel_size, max_iteration):
    distance_threshold = voxel_size * 0.4
    result = open3d.registration_icp(
        scene1, scene2, distance_threshold, trans,
        open3d.TransformationEstimationPointToPoint(),
        open3d.ICPConvergenceCriteria(max_iteration=100))
    return result
Example #9
0
def getImmovTf(T,p0):
  p1=transform(T,p0)
  pc0=fromNumpy(p0)
  pc1=fromNumpy(p1)
  reg=o3d.registration_icp(pc1,pc0,param['icp_threshold'],np.eye(4,dtype=float),o3d.TransformationEstimationPointToPoint())
#  print reg.fitness
#  print reg.transformation
  return np.dot(reg.transformation,T),p1
def execute_global_registration(src_keypts, tgt_keypts, src_desc, tgt_desc, distance_threshold):
    result = open3d.registration_ransac_based_on_feature_matching(
        src_keypts, tgt_keypts, src_desc, tgt_desc,
        distance_threshold,
        open3d.TransformationEstimationPointToPoint(False), 4,
        [open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
         open3d.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
        open3d.RANSACConvergenceCriteria(4000000, 500))
    return result
Example #11
0
def execute_global_registration(kp1, kp2, fpfh1, fpfh2, voxel_size):
    distance_threshold = voxel_size * 2.5
    result = open3d.registration_ransac_based_on_feature_matching(
        kp1, kp2, fpfh1, fpfh2, distance_threshold,
        open3d.TransformationEstimationPointToPoint(False), 4, [
            open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            open3d.CorrespondenceCheckerBasedOnDistance(distance_threshold)
        ], open3d.RANSACConvergenceCriteria(500000, 1000))
    return result
Example #12
0
def registration_point_to_point(scene1, scene2, voxel_size):

    # scene1 and 2 are point cloud data
    # voxel_size is grid size

    #draw_registration_result(scene1,scene2,np.identity(4))

    # voxel down sampling
    scene1_down = open3d.voxel_down_sample(scene1, voxel_size)
    scene2_down = open3d.voxel_down_sample(scene2, voxel_size)

    #draw_registration_result(scene1_down,scene2_down,np.identity(4))

    # estimate normal with search radius voxel_size*2.0
    radius_normal = voxel_size * 2.0
    open3d.estimate_normals(
        scene1, open3d.KDTreeSearchParamHybrid(radius=radius_normal,
                                               max_nn=30))
    open3d.estimate_normals(
        scene2, open3d.KDTreeSearchParamHybrid(radius=radius_normal,
                                               max_nn=30))
    open3d.estimate_normals(
        scene1_down,
        open3d.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
    open3d.estimate_normals(
        scene2_down,
        open3d.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))

    # compute fpfh feature with search radius voxel_size/2.0
    radius_feature = voxel_size * 2.0
    scene1_fpfh = open3d.compute_fpfh_feature(
        scene1_down,
        search_param=open3d.KDTreeSearchParamHybrid(radius=radius_feature,
                                                    max_nn=100))
    scene2_fpfh = open3d.compute_fpfh_feature(
        scene2_down,
        search_param=open3d.KDTreeSearchParamHybrid(radius=radius_feature,
                                                    max_nn=100))

    # compute ransac registration
    ransac_result = execute_global_registration(scene1_down, scene2_down,
                                                scene1_fpfh, scene2_fpfh,
                                                voxel_size)
    #draw_registration_result(scene1,scene2,ransac_result.transformation)

    # point to point ICP resigtration
    distance_threshold = voxel_size * 0.4
    result = open3d.registration_icp(
        scene1, scene2, distance_threshold, ransac_result.transformation,
        open3d.TransformationEstimationPointToPoint(),
        open3d.ICPConvergenceCriteria(max_iteration=1000))

    #draw_registration_result(scene1,scene2,result.transformation)
    print(result)

    return result
Example #13
0
def refine_registration(source, target, trans, voxel_size):
    global all_transformation
    distance_threshold = voxel_size * 0.4
    print(":: Point-to-point ICP registration is applied on original point")
    print("   clouds to refine the alignment. This time we use a strict")
    print("   distance threshold %.3f." % distance_threshold)
    result = o3.registration_icp(source, target, distance_threshold, trans,
                                 o3.TransformationEstimationPointToPoint())

    return result.transformation
Example #14
0
 def registration_icp(source,
                      target,
                      threshold,
                      transformation=np.eye(4),
                      **kwargs):
     # call the open3d registration_icp
     reg_p2p = open3d.registration_icp(
         source, target, threshold, transformation,
         open3d.TransformationEstimationPointToPoint(),
         open3d.ICPConvergenceCriteria(max_iteration=30))
     return reg_p2p.transformation
Example #15
0
def refine_registration(source, target, corrs):

    start = time.time()

    p2p = o3d.TransformationEstimationPointToPoint()
    result = p2p.compute_transformation(source, target,
                                        o3d.Vector2iVector(corrs))

    elapsed_time = time.time() - start

    return result, elapsed_time
Example #16
0
def icp_refine(source_pc, target_pc, trans, dis):
    """
    use icp to refine the rigid transformation.
    """
    result = open3d.registration_icp(
        source=source_pc,
        target=target_pc,
        max_correspondence_distance=dis,
        init=trans,
        estimation_method=open3d.TransformationEstimationPointToPoint(False)
    )
    return result
Example #17
0
    def register(self, iteration=None, voxel_size=None):
        iteration = 100 if iteration is None else iteration
        voxel_size = 0.01 if voxel_size is None else voxel_size

        source, target = self._prepare(voxel_size=voxel_size)
        result = open3d.registration_icp(
            source,  # points_from_depth
            target,  # points_from_cad
            2 * voxel_size,
            tf.inverse_matrix(self._transform),
            open3d.TransformationEstimationPointToPoint(False),
            open3d.ICPConvergenceCriteria(max_iteration=iteration),
        )
        return tf.inverse_matrix(result.transformation)
Example #18
0
def match_():
    source = copy.deepcopy(modPcArray[0])
    source_fpfh = modFtArray[0]
    target = scnPcArray[0]
    target_fpfh = scnFtArray[0]
    reg_global = o3d.registration_ransac_based_on_feature_matching(
        source, target, source_fpfh, target_fpfh, param['distance_threshold'],
        o3d.TransformationEstimationPointToPoint(False), 4, [
            o3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            o3d.CorrespondenceCheckerBasedOnDistance(
                param['distance_threshold'])
        ], o3d.RANSACConvergenceCriteria(4000000, 500))
    source.transform(reg_global.transformation)
    return reg_global, [source]
Example #19
0
def open3d_icp(src, trgt, init_rotation=np.eye(3, 3)):
    source = open3d.PointCloud()
    source.points = open3d.Vector3dVector(src)

    target = open3d.PointCloud()
    target.points = open3d.Vector3dVector(trgt)

    init_rotation_4x4 = np.eye(4, 4)
    init_rotation_4x4[0:3, 0:3] = init_rotation

    threshold = 0.2
    reg_p2p = open3d.registration_icp(source, target, threshold, init_rotation_4x4,
                                    open3d.TransformationEstimationPointToPoint())

    return reg_p2p
Example #20
0
def icp_two_pc(source, target, draw_result=False, point2plane=False):
    """
    return target * result = source
    :param source:
    :param target:
    :param draw_result:
    :param point2plane:
    :return:
    """
    if isinstance(source, PointCloud):
        tmp = source
        source = o3d.PointCloud()
        source.points = o3d.Vector3dVector(tmp.position)
    elif isinstance(source, np.ndarray):
        tmp = source
        source = o3d.PointCloud()
        source.points = o3d.Vector3dVector(tmp)
    if isinstance(target, PointCloud):
        tmp = target
        target = o3d.PointCloud()
        target.points = o3d.Vector3dVector(tmp.position)
    elif isinstance(target, np.ndarray):
        tmp = target
        target = o3d.PointCloud()
        target.points = o3d.Vector3dVector(tmp)

    if point2plane:
        result = o3d.registration_icp(
            source,
            target,
            0.002,
            estimation_method=o3d.TransformationEstimationPointToPlane())
    else:
        print('using point to point icp method')
        result = o3d.registration_icp(
            source,
            target,
            0.002,
            estimation_method=o3d.TransformationEstimationPointToPoint())
    transformation = result.transformation
    print('result transformation:', transformation)

    if draw_result:
        source.transform(
            transformation
        )  # source point cloud apply the transformation to get target point cloud
        draw_registration_result(source, target)
    return transformation
def execute_global_registration(source_down, target_down, source_fpfh,
                                target_fpfh, voxel_size):
    distance_threshold = voxel_size * 1.5
    print(":: RANSAC registration on downsampled point clouds.")
    print("   Since the downsampling voxel size is %.3f," % voxel_size)
    print("   we use a liberal distance threshold %.3f." % distance_threshold)
    result = op3.registration_ransac_based_on_feature_matching(
        source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
        op3.TransformationEstimationPointToPoint(False), 4, [
            op3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            op3.CorrespondenceCheckerBasedOnDistance(distance_threshold)
        ],
        op3.RANSACConvergenceCriteria(4000000, 500)
        # op3.RANSACConvergenceCriteria(10000000, 20000)
    )
    return result
Example #22
0
    def update_source_to_target_transformation(self):
        """
        Function to update the source to target transformation matrix
        
         
        """
        source = copy.deepcopy(self.source_pcl)
        target = pn.voxel_down_sample(self.target_pcl, target_voxel_size)

        if self.source_to_target_transformation.size > 0:
            # if the global registration have already been done
            source.transform(self.source_to_target_transformation)
            icp_result = pn.registration_icp(
                source, target, distance_threshold_icp, np.eye(4),
                pn.TransformationEstimationPointToPlane())
            if icp_result.fitness > 0.8:
                self.source_to_target_transformation =\
                    icp_result.transformation.dot(self.source_to_target_transformation)
                return

        # we fail to align source & target with ICP
        # GLOBAL REGISTRATION
        # PREPROCESSING POINT CLOUD
        source = copy.deepcopy(self.source_pcl)

        source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
        target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)

        # GLOBAL REGISTRATION
        ransac_result = pn.registration_ransac_based_on_feature_matching(
            source_down, target_down, source_fpfh, target_fpfh,
            distance_threshold_ransac,
            pn.TransformationEstimationPointToPoint(False), 4, [
                pn.CorrespondenceCheckerBasedOnDistance(
                    distance_threshold_ransac)
            ], pn.RANSACConvergenceCriteria(10000000, 10000))

        source.transform(ransac_result.transformation)

        icp_result = pn.registration_icp(
            source, target, distance_threshold_icp, np.eye(4),
            pn.TransformationEstimationPointToPlane())

        self.source_to_target_transformation = \
            icp_result.transformation.dot(ransac_result.transformation)

        return
Example #23
0
    def register(self):
        start = time.time()
        result = open3d.registration_ransac_based_on_feature_matching(
            self.source_open3d, self.target_open3d, self.source_fpfh,
            self.target_fpfh, self.epsilon,
            open3d.TransformationEstimationPointToPoint(False), 4, [
                open3d.CorrespondenceCheckerBasedOnEdgeLength(0.95),
                open3d.CorrespondenceCheckerBasedOnDistance(self.epsilon)
            ],
            open3d.RANSACConvergenceCriteria(self.max_iteration,
                                             self.max_validation))
        end = time.time()

        self.time = end - start
        self.transformation = result.transformation

        return
Example #24
0
def get_evaluation(pcd_temp_,
                   pcd_scene_,
                   inlier_thres,
                   tf,
                   final_th=0,
                   n_iter=5):  #queue
    tf_pcd = np.eye(4)

    reg_p2p = open3d.registration_icp(
        pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
        open3d.TransformationEstimationPointToPoint(),
        open3d.ICPConvergenceCriteria(max_iteration=1))  #5?
    tf = np.matmul(reg_p2p.transformation, tf)
    tf_pcd = np.matmul(reg_p2p.transformation, tf_pcd)
    pcd_temp_.transform(reg_p2p.transformation)

    for i in range(4):
        inlier_thres = reg_p2p.inlier_rmse * 3
        if inlier_thres == 0:
            continue

        reg_p2p = open3d.registration_icp(
            pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
            open3d.TransformationEstimationPointToPlane(),
            open3d.ICPConvergenceCriteria(max_iteration=1))  #5?
        tf = np.matmul(reg_p2p.transformation, tf)
        tf_pcd = np.matmul(reg_p2p.transformation, tf_pcd)
        pcd_temp_.transform(reg_p2p.transformation)
    inlier_rmse = reg_p2p.inlier_rmse

    ##Calculate fitness with depth_inlier_th
    if (final_th > 0):
        inlier_thres = final_th  #depth_inlier_th*2 #reg_p2p.inlier_rmse*3
        reg_p2p = registration_icp(
            pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
            TransformationEstimationPointToPlane(),
            ICPConvergenceCriteria(max_iteration=1))  #5?

    if (np.abs(np.linalg.det(tf[:3, :3]) - 1) > 0.001):
        tf[:3, 0] = tf[:3, 0] / np.linalg.norm(tf[:3, 0])
        tf[:3, 1] = tf[:3, 1] / np.linalg.norm(tf[:3, 1])
        tf[:3, 2] = tf[:3, 2] / np.linalg.norm(tf[:3, 2])
    if (np.linalg.det(tf) < 0):
        tf[:3, 2] = -tf[:3, 2]

    return tf, inlier_rmse, tf_pcd, reg_p2p.fitness
Example #25
0
def icp_p2point(file_idx,
                cfg,
                radius=0.2,
                its=30,
                init=None,
                with_constraint=None):
    with_constraint = with_constraint if with_constraint is not None else cfg.evaluation.special.icp.with_constraint
    pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
    if init is None:
        #  init = get_median_init(pc1, pc2)
        init = get_centroid_init(pc1, pc2)
    start = time.time()
    reg_p2p = o3.registration_icp(pc1, pc2, radius, init,
                                  o3.TransformationEstimationPointToPoint(
                                      with_constraint=with_constraint,
                                      with_scaling=False),
                                  o3.registration.ICPConvergenceCriteria(
                                      max_iteration=its))  # Default: 30
    time_elapsed = time.time() - start
    return reg_p2p.transformation, pc1_centroid, time_elapsed
Example #26
0
    def register_iterative(self, iteration=None, voxel_size=None):
        iteration = 100 if iteration is None else iteration
        voxel_size = 0.01 if voxel_size is None else voxel_size

        yield self._transform

        source, target = self._prepare(voxel_size=voxel_size)
        for i in range(iteration):
            result = open3d.registration_icp(
                source,  # points_from_depth
                target,  # points_from_cad
                2 * voxel_size,
                tf.inverse_matrix(self._transform),
                open3d.TransformationEstimationPointToPoint(False),
                open3d.ICPConvergenceCriteria(max_iteration=1),
            )
            print(f"[{i:08d}] fitness={result.fitness:.2g} "
                  f"inlier_rmse={result.inlier_rmse:.2g}")
            self._transform = tf.inverse_matrix(result.transformation)
            yield self._transform
Example #27
0
    def _perform_registration(self):

        if self.registration == 0:
            self.registration_name = 'None'
            return

        fixed = open3dmeshopen3dpcd(self.fixed)
        moving = open3dmeshopen3dpcd(self.moving)

        if self.registration == 1:
            estimator = open3d.TransformationEstimationPointToPoint()
        else:
            estimator = open3d.TransformationEstimationPointToPlane()

        init = np.asarray([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                           [0, 0, 0, 1]])
        registration = open3d.registration_icp(moving, fixed, np.Inf, init,
                                               estimator)

        self.registration_name = type(estimator)
        self.transform = registration.transformation
        self.moving.transform(self.transform)
Example #28
0
def transformation_matrix(source_points, target_points):
    """
    Compute the transformation matrix between to set of matching points. 
  
    Parameters: 
        source_points (numpy.arrays): the source points array 
        target_points (numpy.arrays): the target points array matching the 
            source
        
    Returns: 
        transformation_matrix (numpy.arrays) : The 4D transformation matrix from source to target
        
    """
    assert (len(source_points) >= 3
            and len(target_points) == len(source_points))
    source = pn.PointCloud()
    source.points = pn.Vector3dVector(source_points)
    target = pn.PointCloud()
    target.points = pn.Vector3dVector(target_points)
    corr = np.array(2 * [range(len(source_points))]).T
    p2p = pn.TransformationEstimationPointToPoint()
    trans = p2p.compute_transformation(source, target, pn.Vector2iVector(corr))
    return trans
Example #29
0
def icp_o3_gicp_fast(file_idx,
                     cfg,
                     refine=None,
                     refine_radius=0.05,
                     precomputed_results=None):
    pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
    voxel_size = 0.05
    distance_threshold = voxel_size * 0.5
    start = time.time()
    if precomputed_results is None:
        source_down, target_down, source_fpfh, target_fpfh = ICP._icp_global_prepare_dataset(
            pc1, pc2, voxel_size)
        reg_res = o3.registration_fast_based_on_feature_matching(
            source_down, target_down, source_fpfh, target_fpfh,
            o3.FastGlobalRegistrationOption(
                with_constraint=cfg.evaluation.special.icp.with_constraint,
                maximum_correspondence_distance=distance_threshold))
        transformation = reg_res.transformation
    else:
        precomp_pred_translation, precomp_pred_angle, precomp_pred_center = precomputed_results
        transformation = get_mat_angle(precomp_pred_translation,
                                       precomp_pred_angle, precomp_pred_center)

    if refine is None:
        time_elapsed = time.time() - start
        return transformation, pc1_centroid, time_elapsed
    else:
        if refine == 'p2p':
            reg_p2p = o3.registration_icp(
                pc1, pc2, refine_radius, transformation,
                o3.TransformationEstimationPointToPoint(
                    with_constraint=cfg.evaluation.special.icp.with_constraint,
                    with_scaling=False))
            time_elapsed = time.time() - start
            return reg_p2p.transformation, pc1_centroid, time_elapsed
        else:
            assert False
# transform target point cloud
th = np.deg2rad(10.0)
target.transform(np.array([[np.cos(th), -np.sin(th), 0.0, 0.0],
                           [np.sin(th), np.cos(th), 0.0, 0.0],
                           [0.0, 0.0, 1.0, 0.0],
                           [0.0, 0.0, 0.0, 1.0]]))

vis = o3.Visualizer()
vis.create_window()
result = copy.deepcopy(source)
source.paint_uniform_color([1, 0, 0])
target.paint_uniform_color([0, 1, 0])
result.paint_uniform_color([0, 0, 1])
vis.add_geometry(source)
vis.add_geometry(target)
vis.add_geometry(result)
threshold = 0.05
icp_iteration = 100
save_image = False

for i in range(icp_iteration):
    reg_p2p = o3.registration_icp(result, target, threshold,
                np.identity(4), o3.TransformationEstimationPointToPoint(),
                o3.ICPConvergenceCriteria(max_iteration=1))
    result.transform(reg_p2p.transformation)
    vis.update_geometry()
    vis.poll_events()
    vis.update_renderer()
    if save_image:
        vis.capture_screen_image("image_%04d.jpg" % i)
vis.run()