Пример #1
0
def refine_registration(source: op3.PointCloud,
                        target: op3.PointCloud,
                        voxel_size: float,
                        gross_matching: np.ndarray,
                        verbose=False):
    """
    Refine the matching
    :param source, target: 2 objects of Open3D, that are point clouds of source and target
    :param voxel_size: a float value, that is how sparse you want the sample points is
    :param gross_matching: a transformation matrix, that grossly matches source to target
    :param verbose: a boolean value, display notification and visualization when True and no notification when False
    :return: a transformation object
    """
    distance_threshold = voxel_size * 0.4
    if verbose:
        print(
            ":: Point-to-plane ICP registration is applied on original point")
        print("   clouds to refine the alignment. This time we use a strict")
        print("   distance threshold %.3f." % distance_threshold)
    result = op3.registration_icp(
        source,
        target,
        distance_threshold,
        gross_matching.transformation,
        op3.TransformationEstimationPointToPlane(),
        # op3.ICPConvergenceCriteria(max_iteration=2000)
    )
    return result
Пример #2
0
def solve(datArray, prm):
    global scnFtArray, scnPcArray, Param
    Param.update(prm)
    scnFtArray = []
    scnPcArray = []
    for dat in datArray:
        pc = fromNumpy(dat)
        scnPcArray.append(pc)
        scnFtArray.append(_get_features(pc))
    t1 = time.time()
    resft = o3.registration_ransac_based_on_feature_matching(
        modPcArray[0], scnPcArray[0], modFtArray[0], scnFtArray[0],
        Param["distance_threshold"],
        o3.TransformationEstimationPointToPoint(False), 4, [
            o3.CorrespondenceCheckerBasedOnEdgeLength(0.9),
            o3.CorrespondenceCheckerBasedOnDistance(
                Param["distance_threshold"])
        ], o3.RANSACConvergenceCriteria(2000000, 500))
    print "time for feature matching", time.time() - t1
    print "feature matching", resft.transformation, resft.fitness
    resicp = o3.registration_icp(modPcArray[0], scnPcArray[0],
                                 Param["icp_threshold"], resft.transformation,
                                 o3.TransformationEstimationPointToPlane())
    return {
        "transform": [resicp.transformation],
        "fitness": [resicp.fitness],
        "rmse": [resicp.inlier_rmse]
    }
Пример #3
0
def icp_registration(source, target, distance_threshold):
    pn.estimate_normals(source)
    pn.estimate_normals(target)
    icp_result = pn.registration_icp(source, target, distance_threshold,
                                     np.eye(4),
                                     pn.TransformationEstimationPointToPlane())
    return icp_result
Пример #4
0
    def globalRansacMatchAndICPRefine(self):
        source_down, source_fpfh, target_down, target_fpfh = self.prepareDataset()

        # showPointCloud([target_down])
        # exit()
        # result_ransac = open3d.registration_ransac_based_on_feature_matching(
        #         source_down, target_down, source_fpfh, target_fpfh, self._distance_threshold_ransac_m,
        #         open3d.TransformationEstimationPointToPoint(False), 4, [
        #         open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
        #         open3d.CorrespondenceCheckerBasedOnDistance(
        #         self._distance_threshold_ransac_m)
        # ], open3d.RANSACConvergenceCriteria(4000000, 500))
        result_ransac = open3d.registration_fast_based_on_feature_matching(
                source_down, target_down, source_fpfh, target_fpfh,
                open3d.FastGlobalRegistrationOption(
                maximum_correspondence_distance=self._distance_threshold_ransac_m)
         )
        # ], open3d.RANSACConvergenceCriteria(40000, 500))
        if self._show_result:
            self.showMatchResult(source_down, target_down, result_ransac.transformation)
        # result_icp = open3d.registration_icp(
        #         self._source, self._target, self._distance_threshold_icp_m, result_ransac.transformation,
        #         open3d.TransformationEstimationPointToPlane())
        result_icp = open3d.registration_icp(
                source_down, target_down, self._distance_threshold_icp_m, result_ransac.transformation,
                open3d.TransformationEstimationPointToPlane())
        if self._show_result:
            self.showMatchResult(self._source, self._target, result_icp.transformation)
        return result_ransac, result_icp
Пример #5
0
    def update_source_to_target_transformation(self):
        """
        Function to update the source to target transformation matrix
        
         
        """
        source = copy.deepcopy(self.source_pcl)
        target = pn.voxel_down_sample(self.target_pcl, target_voxel_size)

        if self.source_to_target_transformation.size > 0:
            # if the global registration have already been done
            source.transform(self.source_to_target_transformation)
            icp_result = pn.registration_icp(
                source, target, distance_threshold_icp, np.eye(4),
                pn.TransformationEstimationPointToPlane())
            if icp_result.fitness > 0.8:
                self.source_to_target_transformation =\
                    icp_result.transformation.dot(self.source_to_target_transformation)
                return

        # we fail to align source & target with ICP
        # GLOBAL REGISTRATION
        # PREPROCESSING POINT CLOUD
        source = copy.deepcopy(self.source_pcl)

        source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
        target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)

        # GLOBAL REGISTRATION
        ransac_result = pn.registration_ransac_based_on_feature_matching(
            source_down, target_down, source_fpfh, target_fpfh,
            distance_threshold_ransac,
            pn.TransformationEstimationPointToPoint(False), 4, [
                pn.CorrespondenceCheckerBasedOnDistance(
                    distance_threshold_ransac)
            ], pn.RANSACConvergenceCriteria(10000000, 10000))

        source.transform(ransac_result.transformation)

        icp_result = pn.registration_icp(
            source, target, distance_threshold_icp, np.eye(4),
            pn.TransformationEstimationPointToPlane())

        self.source_to_target_transformation = \
            icp_result.transformation.dot(ransac_result.transformation)

        return
Пример #6
0
def refine_registration(source, target, source_fpfh, target_fpfh, voxel_size):
    distance_threshold = voxel_size * 1.5
    print(":: Point-to-plane ICP registration is applied on original point")
    print("   clouds to refine the alignment. This time we use a strict")
    print("   distance threshold %.3f." % distance_threshold)
    result = open3d.registration_icp(
        source, target, distance_threshold, result_fast.transformation,
        open3d.TransformationEstimationPointToPlane())
    return result
Пример #7
0
def registration_point_to_plane(scene1, scene2, voxel_size):

    # scene1 and 2 are point cloud data
    # voxel_size is grid size

    #draw_registration_result(scene1,scene2,np.identity(4))

    # voxel down sampling
    scene1_down = open3d.voxel_down_sample(scene1, voxel_size)
    scene2_down = open3d.voxel_down_sample(scene2, voxel_size)

    #draw_registration_result(scene1_down,scene2_down,np.identity(4))

    # estimate normal with search radius voxel_size*2.0
    radius_normal = voxel_size * 2.0
    open3d.estimate_normals(
        scene1, open3d.KDTreeSearchParamHybrid(radius=radius_normal,
                                               max_nn=30))
    open3d.estimate_normals(
        scene2, open3d.KDTreeSearchParamHybrid(radius=radius_normal,
                                               max_nn=30))
    open3d.estimate_normals(
        scene1_down,
        open3d.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
    open3d.estimate_normals(
        scene2_down,
        open3d.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))

    # compute fpfh feature with search radius voxel_size/2.0
    radius_feature = voxel_size * 2.0
    scene1_fpfh = open3d.compute_fpfh_feature(
        scene1_down,
        search_param=open3d.KDTreeSearchParamHybrid(radius=radius_feature,
                                                    max_nn=100))
    scene2_fpfh = open3d.compute_fpfh_feature(
        scene2_down,
        search_param=open3d.KDTreeSearchParamHybrid(radius=radius_feature,
                                                    max_nn=100))

    # compute ransac registration
    ransac_result = execute_global_registration(scene1_down, scene2_down,
                                                scene1_fpfh, scene2_fpfh,
                                                voxel_size)
    #draw_registration_result(scene1,scene2,ransac_result.transformation)

    # point to point ICP resigtration
    distance_threshold = voxel_size * 10.0
    result = open3d.registration_icp(
        scene1, scene2, distance_threshold, ransac_result.transformation,
        open3d.TransformationEstimationPointToPlane(),
        open3d.ICPConvergenceCriteria(max_iteration=1000))

    #draw_registration_result(scene1,scene2,result.transformation)
    print(result)

    return result
def refine_registration(source, target, voxel_size, gross_matching):
    distance_threshold = voxel_size * 0.4
    print(":: Point-to-plane ICP registration is applied on original point")
    print("   clouds to refine the alignment. This time we use a strict")
    print("   distance threshold %.3f." % distance_threshold)
    result = op3.registration_icp(
        source, target, distance_threshold, gross_matching.transformation,
        op3.TransformationEstimationPointToPlane(),
        op3.ICPConvergenceCriteria(max_iteration=2000))
    return result
Пример #9
0
def icp_two_pc(source, target, draw_result=False, point2plane=False):
    """
    return target * result = source
    :param source:
    :param target:
    :param draw_result:
    :param point2plane:
    :return:
    """
    if isinstance(source, PointCloud):
        tmp = source
        source = o3d.PointCloud()
        source.points = o3d.Vector3dVector(tmp.position)
    elif isinstance(source, np.ndarray):
        tmp = source
        source = o3d.PointCloud()
        source.points = o3d.Vector3dVector(tmp)
    if isinstance(target, PointCloud):
        tmp = target
        target = o3d.PointCloud()
        target.points = o3d.Vector3dVector(tmp.position)
    elif isinstance(target, np.ndarray):
        tmp = target
        target = o3d.PointCloud()
        target.points = o3d.Vector3dVector(tmp)

    if point2plane:
        result = o3d.registration_icp(
            source,
            target,
            0.002,
            estimation_method=o3d.TransformationEstimationPointToPlane())
    else:
        print('using point to point icp method')
        result = o3d.registration_icp(
            source,
            target,
            0.002,
            estimation_method=o3d.TransformationEstimationPointToPoint())
    transformation = result.transformation
    print('result transformation:', transformation)

    if draw_result:
        source.transform(
            transformation
        )  # source point cloud apply the transformation to get target point cloud
        draw_registration_result(source, target)
    return transformation
Пример #10
0
def get_evaluation(pcd_temp_,
                   pcd_scene_,
                   inlier_thres,
                   tf,
                   final_th=0,
                   n_iter=5):  #queue
    tf_pcd = np.eye(4)

    reg_p2p = open3d.registration_icp(
        pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
        open3d.TransformationEstimationPointToPoint(),
        open3d.ICPConvergenceCriteria(max_iteration=1))  #5?
    tf = np.matmul(reg_p2p.transformation, tf)
    tf_pcd = np.matmul(reg_p2p.transformation, tf_pcd)
    pcd_temp_.transform(reg_p2p.transformation)

    for i in range(4):
        inlier_thres = reg_p2p.inlier_rmse * 3
        if inlier_thres == 0:
            continue

        reg_p2p = open3d.registration_icp(
            pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
            open3d.TransformationEstimationPointToPlane(),
            open3d.ICPConvergenceCriteria(max_iteration=1))  #5?
        tf = np.matmul(reg_p2p.transformation, tf)
        tf_pcd = np.matmul(reg_p2p.transformation, tf_pcd)
        pcd_temp_.transform(reg_p2p.transformation)
    inlier_rmse = reg_p2p.inlier_rmse

    ##Calculate fitness with depth_inlier_th
    if (final_th > 0):
        inlier_thres = final_th  #depth_inlier_th*2 #reg_p2p.inlier_rmse*3
        reg_p2p = registration_icp(
            pcd_temp_, pcd_scene_, inlier_thres, np.eye(4),
            TransformationEstimationPointToPlane(),
            ICPConvergenceCriteria(max_iteration=1))  #5?

    if (np.abs(np.linalg.det(tf[:3, :3]) - 1) > 0.001):
        tf[:3, 0] = tf[:3, 0] / np.linalg.norm(tf[:3, 0])
        tf[:3, 1] = tf[:3, 1] / np.linalg.norm(tf[:3, 1])
        tf[:3, 2] = tf[:3, 2] / np.linalg.norm(tf[:3, 2])
    if (np.linalg.det(tf) < 0):
        tf[:3, 2] = -tf[:3, 2]

    return tf, inlier_rmse, tf_pcd, reg_p2p.fitness
def evaluate_calibration(s, t, H):
    """
    Evaluate by ICP
    :param s: source point cloud with reference to camera
    :param t: target point cloud with reference to robot base
    :param H: transformation matrix to be evaluated, camera pose with reference to robot base
    :return:
    (cam',cam)H * cam_P = Base_P * (cam', Base)H
    (cam',cam)H * s = tTs
    """
    tTs = copy.deepcopy(t)
    tTs = tTs.transform(np.matrix(H).getI())
    HI = np.matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
    reg_p2l = o3d.registration_icp(s, tTs, 1, HI,
                                   o3d.TransformationEstimationPointToPlane())
    R = reg_p2l.transformation[:3, :3]
    T = reg_p2l.transformation[:3, 3]
    al, be, ga = tf.transformations.euler_from_matrix(R, 'sxyz')
    return T, np.array([al, be, ga]) * 180 / 3.1415
Пример #12
0
    def _perform_registration(self):

        if self.registration == 0:
            self.registration_name = 'None'
            return

        fixed = open3dmeshopen3dpcd(self.fixed)
        moving = open3dmeshopen3dpcd(self.moving)

        if self.registration == 1:
            estimator = open3d.TransformationEstimationPointToPoint()
        else:
            estimator = open3d.TransformationEstimationPointToPlane()

        init = np.asarray([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
                           [0, 0, 0, 1]])
        registration = open3d.registration_icp(moving, fixed, np.Inf, init,
                                               estimator)

        self.registration_name = type(estimator)
        self.transform = registration.transformation
        self.moving.transform(self.transform)
Пример #13
0
def solve(datArray,prm):
  global scnFtArray,scnPcArray,Param
  Param.update(prm)
  scnFtArray=[]
  scnPcArray=[]
  t1=time.time()
  for dat in datArray:
    pc=fromNumpy(dat)
    scnPcArray.append(pc)
    scnFtArray.append(_get_features(pc))
  print "time for extracting feature",time.time()-t1
  t1=time.time()
  resft=o3.registration.registration_fast_based_on_feature_matching(
    modPcArray[0],scnPcArray[0],modFtArray[0],scnFtArray[0],
    o3.registration.FastGlobalRegistrationOption(
      maximum_correspondence_distance=Param["distance_threshold"]))
  print "time for feature matching",time.time()-t1
  print "feature matching",resft.transformation
#  return {"transform":[resft.transformation],"fitness":[0]}
  resicp=o3.registration_icp(
    modPcArray[0],scnPcArray[0],
    Param["icp_threshold"],
    resft.transformation,o3.TransformationEstimationPointToPlane())
  return {"transform":[resicp.transformation],"fitness":[resicp.fitness],"rmse":[resicp.inlier_rmse]}        
Пример #14
0
    mesh = open3d.read_triangle_mesh(sfile)
    source = mesh2pcd(mesh)

    open3d.draw_geometries([target])
    open3d.draw_geometries([source])

    trans_init = np.asarray([[1, 0, 0, 0],
                             [0, 1, 0, 0],
                             [0, 0, 1, 0],
                             [0, 0, 0, 1]])

    draw_registration_result(source, target, trans_init)
    print("Initial alignment")
    evaluation = open3d.evaluate_registration(source, target, np.Inf, trans_init)
    print(evaluation)

    print("Apply point-to-point ICP")
    reg_p2p = open3d.registration_icp(source, target, np.Inf, trans_init, open3d.TransformationEstimationPointToPoint())
    print(reg_p2p)
    print("Transformation is:")
    print(reg_p2p.transformation)
    print("")
    draw_registration_result(source, target, reg_p2p.transformation)

    print("Apply point-to-plane ICP")
    reg_p2l = open3d.registration_icp(source, target, np.Inf, trans_init, open3d.TransformationEstimationPointToPlane())
    print(reg_p2l)
    print("Transformation is:")
    print(reg_p2l.transformation)
    print("")
    draw_registration_result(source, target, reg_p2l.transformation)
Пример #15
0
def open3d_icp_normal(pa,
                      pb,
                      trans_init=np.eye(4),
                      max_distance=0.02,
                      b_graph=False):
    '''
    (Transformation A->B)

    pa : numpy array
    pb : numpy array

    return: 
        transformation
            - 4x4 numpy array
        result
            - transformation
            - fitness
            - inlier_rmse
            - correspondence_set
    '''

    import open3d as op3
    import copy

    def draw_registration_result(source, target, transformation):
        source_temp = copy.deepcopy(source)
        target_temp = copy.deepcopy(target)
        source_temp.paint_uniform_color([1, 0.706, 0])
        target_temp.paint_uniform_color([0, 0.651, 0.929])
        source_temp.transform(transformation)
        op3.draw_geometries([source_temp, target_temp])

    pc1 = op3.PointCloud()
    pc1.points = op3.Vector3dVector(pa)

    pc2 = op3.PointCloud()
    pc2.points = op3.Vector3dVector(pb)

    op3.estimate_normals(pc1,
                         search_param=op3.KDTreeSearchParamHybrid(radius=10.0,
                                                                  max_nn=30))
    op3.estimate_normals(pc2,
                         search_param=op3.KDTreeSearchParamHybrid(radius=10.0,
                                                                  max_nn=30))

    # op3.draw_geometries([pc1, pc2])

    # max_distance = 0.2
    # trans_init = np.asarray(
    # [[0.862, 0.011, -0.507,  0.5],
    # [-0.139, 0.967, -0.215,  0.7],
    # [0.487, 0.255,  0.835, -1.4],
    # [0.0, 0.0, 0.0, 1.0]])
    # trans_init = np.eye(4)
    # tsl = pb.mean(axis=0) - pa.mean(axis=0)
    # trans_init = TransformationMatrixFromEulerAngleTranslation(euler, tsl)
    # trans_init = transform

    #### # print("Apply point-to-point ICP")
    #### reg_p2p = op3.registration_icp(pc1, pc2, max_distance, trans_init,
    ####         # op3.TransformationEstimationPointToPoint())
    ####         op3.TransformationEstimationPointToPoint(),
    ####         op3.ICPConvergenceCriteria(max_iteration = 2000))
    #### # print(reg_p2p)
    #### # print("Transformation is:")
    #### # print(reg_p2p.transformation)
    #### # print(EulerAngleTranslationFromTransformationMatrix(reg_p2p.transformation))
    #### # print("")

    # print("Apply point-to-plane ICP")
    reg_p2l = op3.registration_icp(
        pc1, pc2, max_distance, trans_init,
        op3.TransformationEstimationPointToPlane(),
        op3.ICPConvergenceCriteria(max_iteration=2000))
    # print(reg_p2l)
    # print("Transformation is:")
    # print(reg_p2l.transformation)
    # print("")

    if b_graph:
        draw_registration_result(pc1, pc2, reg_p2l.transformation)

    # print(reg_p2p.transformation)
    # print(reg_p2p.fitness)
    # print(reg_p2p.inlier_rmse)
    # print(reg_p2p.correspondence_set)

    return reg_p2l.transformation, reg_p2l
print("Fast global registration took %.3f sec.\n" % (time.time() - start))
fgr.draw_registration_result(source_down, target_down,
                             result_fast.transformation)

# start accurate registration
threshold = 0.5
trans_init = result_fast.transformation
print("Initial alignment")
evaluation = open3d.evaluate_registration(source, target, threshold,
                                          trans_init)
print(evaluation)

print("Apply point-to-point ICP")
reg_p2p = open3d.registration_icp(
    source, target, threshold, trans_init,
    open3d.TransformationEstimationPointToPoint())
print(reg_p2p)
print("Transformation is:")
print(reg_p2p.transformation)
print("")
fgr.draw_registration_result(source, target, reg_p2p.transformation)

print("Apply point-to-plane ICP")
reg_p2l = open3d.registration_icp(
    source, target, threshold, trans_init,
    open3d.TransformationEstimationPointToPlane())
print(reg_p2l)
print("Transformation is:")
print(reg_p2l.transformation)
print("")
fgr.draw_registration_result(source, target, reg_p2l.transformation)
Пример #17
0
    def registerLocalCloud(self, target, source):
        """
        get local transformation matrix
        :param target:  Target Point Cloud
        :param source: Source Point Cloud
        :return: transformation from target cloud to source cloud
        """

        source_temp = copy.deepcopy(source)
        target_temp = copy.deepcopy(target)

        source_temp = o3d.voxel_down_sample(source_temp, 0.004)
        target_temp = o3d.voxel_down_sample(target_temp, 0.004)

        o3d.estimate_normals(source_temp, search_param=o3d.KDTreeSearchParamHybrid(
            radius=0.1, max_nn=30))
        o3d.estimate_normals(target_temp, search_param=o3d.KDTreeSearchParamHybrid(
            radius=0.1, max_nn=30))

        current_transformation = np.identity(4)
        # use Point-to-plane ICP registeration to obtain initial pose guess
        result_icp_p2l = o3d.registration_icp(source_temp, target_temp, 0.02,
                                              current_transformation, o3d.TransformationEstimationPointToPlane())
        # 0.1 is searching distance

        print("----------------")
        print("initial guess from Point-to-plane ICP registeration")
        print(result_icp_p2l)
        print(result_icp_p2l.transformation)

        p2l_init_trans_guess = result_icp_p2l.transformation
        print("----------------")
        # print("Colored point cloud registration")

    #   Double Normal ICP
        result_icp = o3d.registration_icp(
            source_temp, target_temp, 0.01, p2l_init_trans_guess, o3d.TransformationEstimationPointToPlane())
    #   Color ICP
        # result_icp = o3d.registration_colored_icp(
        #     source_temp, target_temp, 0.01, p2l_init_trans_guess)

        print(result_icp)
        print(result_icp.transformation)
        # print(result_icp.fitness)

        # draw_registration_result_original_color(source, target, result_icp.transformation)

        # write intermediate result for test,
        # but found it will make registration result worse...
        # ---->>>> cause source.transform will change the source!!!!!!
        # source.transform(result_icp.transformation)
        # temp_cloud = target + source
        # write_point_cloud("/home/dylan2/catkin_ws/src/temp/pointCloudInRviz/data/result/{}-{}.pcd".format(
        # self.cloud_index,self.cloud_index-1), temp_cloud , write_ascii = False)

        return result_icp.transformation
        ######################################
        #    kick-out rule
        ######################################
        # New rule:
        # 1/ rotation is out of plane (5 degree, or 0.087266 in radians);
        # 2/ too big rotation;
        # 3/ too big translation;

        # first calculate what is the rotation direction and rotation angle
        """
Пример #18
0
def stitch_pc(files_directory, output_dir, r, with_images=False, downsample=2.5,
           cam_settings=(cam_angle_of_view, cam_rot, cam_pos, image_flip_h, image_flip_v),
           omsws_settings=(scan_id, inverse_x, rotation)):
    
    (cam_angle_of_view, cam_rot, cam_pos, image_flip_h, image_flip_v) = cam_settings
    
    # icp setting - how close should the point be to be a correspondence
    threshold = 2.5
    
    file_paths = [join(files_directory, path) for path 
                  in os.listdir(files_directory) if path.endswith('.omsws')]
    file_paths.sort()

    target = su.StitchPart(file_paths[0], omsws_settings, cam_settings, r)    
    crawler_position = target.distance_driven * 1000
    
    # create transformation array which identity matrix for now
    transformation = np.identity(4)
        
    print('base ', split(file_paths[0])[1])
    
    # initialize straightening rotation, which will centrecorrect and rotate
    # the scans, so they are palallel to z axis
    rot_trans_straight = None
    
    # initialize the output point cloud that will save all the measured points
    stitched = su.StitchResult()

    if downsample > 0:
        target.full_copy = o3d.voxel_down_sample(target.full_copy, 
                                                 voxel_size=downsample)
    stitched.Pc = copy.deepcopy(target.full_copy)

    # save no of points and path of current file to the class
    stitched.points_no.append(len(target.full_copy.points))
    stitched.omsws_paths.append(target.omsws.path)
    
    if with_images:
        colors = target.load_images()/255
        stitched.Pc.colors = o3d.Vector3dVector(colors)    

    # break for loop if we have only one file path
    dont_stitch = len(file_paths) == 1
        
    for i, path in enumerate(file_paths[1:]):
        
        if dont_stitch:
            break
        
        print('stitching {}'.format(split(path)[1]))
        
        # initialize source point cloud
        source = su.StitchPart(path, omsws_settings, cam_settings, r)
#        o3d.draw_geometries([source.full_copy, target.lower])
        if downsample > 0:
            source.full_copy = o3d.voxel_down_sample(source.full_copy, 
                                                     voxel_size = downsample)
        if with_images:
            colors = source.load_images()/255           
            stitched.Pc.colors.extend(o3d.Vector3dVector(colors))

        # calculate the centroids of point clouds and find the differrence
        transition = su.estimate_xy_translation(source.lower, target.lower, 
                                                rot_trans_straight)
        
        # update x and y of translation so the centroids are in the same place
        trans_init = np.identity(4)
        trans_init[:2, 3] = transition[:2]
        
        # update z from the cralwer position distance saved in omsws file
        trans_init[2, 3] = source.omsws.distance_driven * 1000 - crawler_position
        print('crawler position: %.2f' % crawler_position)
        crawler_position = source.omsws.distance_driven * 1000
        
        # stitch the point clouds of the lower part using icp
        reg_p2p = o3d.registration_icp(source.lower, target.lower, threshold, 
                        trans_init, o3d.TransformationEstimationPointToPlane())

        # stitch the full point clouds using the registration matrix from
        # previous stitch
        reg_p2p = o3d.registration_icp(source.full, target.full, threshold,
                                   reg_p2p.transformation,
                                   o3d.TransformationEstimationPointToPlane())
        
        print('fitness: {}'.format(reg_p2p.fitness))
        trans_init = reg_p2p.transformation
        
        # multiply transformation matrix of current scan that best fits the
        # target scan that is placed in (0,0) point with the global
        # transformation of the target scan (where is it placed in real live)
        transformation = np.dot(transformation, reg_p2p.transformation)
        
        # transform our copy with this transformation and that will place 
        # the source in correct position
        source.full_copy.transform(transformation)
        
        # add all the points of the scan to the stitched scan
        stitched.Pc.points.extend(source.full_copy.points)
        stitched.points_no.append(len(source.full_copy.points))
        
        # add omsws obj to the list in the stitched
        stitched.omsws_paths.append(source.omsws.path)
        
        stitched.transformations.append(transformation)
        
        # update target and target full
        target = source
    
    # create output folder
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        
    output_path = join(output_dir, "stitched.pcd")
    # save point cloud, downsample and display
    o3d.write_point_cloud(output_path, stitched.Pc)
    
    stitched.Pc = su.o3d_estimate_normals(stitched.Pc)
    o3d.write_point_cloud(join(files_directory, "stitched_downsampled_colored.pcd"), stitched.Pc)
##    stitched.Pc.paint_uniform_color([1, 0.706, 0])
#    mesh_frame = o3d.create_mesh_coordinate_frame(size = 200, origin = [0, 0, 0])
#    o3d.draw_geometries([stitched.Pc, mesh_frame])
    
    # save transformation of the stitched point clouds to json file
    transformations = [list([list(el) for el in t]) for t in stitched.transformations]
    with open(join(output_dir, 'stitched_data.json'), 'w') as outfile:
        data = {'paths': stitched.omsws_paths, 
                'points_no': stitched.points_no, 
                'transformations': transformations}
        json.dump([data], outfile)

    return (output_path, join(output_dir, 'stitched_data.json'))
Пример #19
0
	def update_keyframe(self, cloud):
		criteria = open3d.ICPConvergenceCriteria(max_iteration=100)
		reg = open3d.registration_icp(cloud, self.keyframes[-1].cloud, 1.0, self.last_frame_transformation, open3d.TransformationEstimationPointToPlane(), criteria=criteria)

		angle = pyquaternion.Quaternion(matrix=reg.transformation[:3, :3]).degrees
		trans = numpy.linalg.norm(reg.transformation[:3, 3])

		if abs(angle) < self.keyframe_angle_thresh_deg and abs(trans) < self.keyframe_trans_thresh_m:
			self.last_frame_transformation = reg.transformation
			return False

		odom = reg.transformation.dot(self.keyframes[-1].odom)
		self.keyframes.append(Keyframe(len(self.keyframes), cloud, odom))
		self.graph.nodes.append(self.keyframes[-1].node)
		self.vis.add_geometry(self.keyframes[-1].transformed)

		self.last_frame_transformation = numpy.identity(4)

		information = open3d.get_information_matrix_from_point_clouds(self.keyframes[-1].cloud, self.keyframes[-2].cloud, 1.0, reg.transformation)
		edge = open3d.PoseGraphEdge(self.keyframes[-1].id, self.keyframes[-2].id, reg.transformation, information, uncertain=False)

		self.graph.edges.append(edge)

		return True
Пример #20
0
    print("err_R: ", err_R)
    print()
    draw_registration_result_original_color(source, target,
                                            result_icp.transformation)

    # point to plane ICP
    current_transformation = np.identity(4)
    print("Point-to-plane ICP")
    o3.estimate_normals(source,
                        o3.KDTreeSearchParamHybrid(radius=0.4, max_nn=30))
    o3.estimate_normals(target,
                        o3.KDTreeSearchParamHybrid(radius=0.4, max_nn=30))
    t3 = time.time()
    result_icp = o3.registration_icp(source, target, 0.25,
                                     current_transformation,
                                     o3.TransformationEstimationPointToPlane(),
                                     myCriteria)
    t4 = time.time()
    print("Time Consumed: ", t4 - t3)
    print(result_icp)
    print(result_icp.transformation)
    err_t, err_R = eval_err(result_icp.transformation, Tg)
    print("err_t: ", err_t)
    print("err_R: ", err_R)
    print()
    draw_registration_result_original_color(source, target,
                                            result_icp.transformation)

    # colored pointcloud registration
    # coarse to fine
    voxel_radius = [0.4, 0.2, 0.1]