def extract_points_to_pcd_file(filename, by_selection=False, name_starts_with="", name_ends_with=""):
    """
    Extract 3D coordinates of vertices of meshes to a PointCloud (.pcd) file.
    The resulting vertices are sorted by object-name.
    
    "filename" : .pcd-file to save to
    "by_selection", "name_starts_with", "name_ends_with" : see documentation of "get_objects()"
    
    Note: currently, colors are not exported.
    """
    
    verts = []
    for ob_name, ob in get_objects(by_selection, name_starts_with, name_ends_with):
        verts += [tuple(ob.matrix_world * vertex.co) for vertex in ob.data.vertices]
    verts = np.array(verts)
    
    dataset_tools.save_3D_points_to_pcd_file(filename, verts)
def main():
    traj_to_file, traj_from_file, traj_input_files, map_input_files, at_frame, offset_time = parse_cmd_args(
    )

    print("Calculating transformation...")
    cam_trajectory_from = dataset_tools.load_cam_trajectory_TUM(traj_from_file)
    cam_trajectory_to = dataset_tools.load_cam_trajectory_TUM(traj_to_file)
    transformation = dataset_tools.transform_between_cam_trajectories(
        cam_trajectory_from,
        cam_trajectory_to,
        at_frame=at_frame,
        offset_time=offset_time)

    print("Results:")
    delta_quaternion, delta_scale, delta_location = transformation
    print("delta_quaternion:")
    print("\t %s" % delta_quaternion)
    print("delta_scale:")
    print("\t %s" % delta_scale)
    print("delta_location:")
    print("\t %s" % delta_location)
    print()

    for traj_input_file in traj_input_files:
        print('Transforming traj "%s"...' % traj_input_file)
        dataset_tools.save_cam_trajectory_TUM(
            "%s-trfm%s" % tuple(os.path.splitext(traj_input_file)),
            dataset_tools.transformed_cam_trajectory(
                dataset_tools.load_cam_trajectory_TUM(traj_input_file),
                transformation))

    for map_input_file in map_input_files:
        print('Transforming map "%s"...' % map_input_file)
        points, colors, _ = dataset_tools.load_3D_points_from_pcd_file(
            map_input_file, use_alpha=True)
        dataset_tools.save_3D_points_to_pcd_file(
            "%s-trfm%s" % tuple(os.path.splitext(map_input_file)),
            dataset_tools.transformed_points(points, transformation), colors)

    print("Done.")
def extract_points_to_pcd_file(filename,
                               by_selection=False,
                               name_starts_with="",
                               name_ends_with=""):
    """
    Extract 3D coordinates of vertices of meshes to a PointCloud (.pcd) file.
    The resulting vertices are sorted by object-name.
    
    "filename" : .pcd-file to save to
    "by_selection", "name_starts_with", "name_ends_with" : see documentation of "get_objects()"
    
    Note: currently, colors are not exported.
    """

    verts = []
    for ob_name, ob in get_objects(by_selection, name_starts_with,
                                   name_ends_with):
        verts += [
            tuple(ob.matrix_world * vertex.co) for vertex in ob.data.vertices
        ]
    verts = np.array(verts)

    dataset_tools.save_3D_points_to_pcd_file(filename, verts)
def main():
    traj_to_file, traj_from_file, traj_input_files, map_input_files, at_frame, offset_time = parse_cmd_args()
    
    print "Calculating transformation..."
    cam_trajectory_from = dataset_tools.load_cam_trajectory_TUM(traj_from_file)
    cam_trajectory_to = dataset_tools.load_cam_trajectory_TUM(traj_to_file)
    transformation = dataset_tools.transform_between_cam_trajectories(
            cam_trajectory_from, cam_trajectory_to, at_frame=at_frame, offset_time=offset_time )
    
    print "Results:"
    delta_quaternion, delta_scale, delta_location = transformation
    print "delta_quaternion:"
    print "\t %s" % delta_quaternion
    print "delta_scale:"
    print "\t %s" % delta_scale
    print "delta_location:"
    print "\t %s" % delta_location
    print
    
    for traj_input_file in traj_input_files:
        print 'Transforming traj "%s"...' % traj_input_file
        dataset_tools.save_cam_trajectory_TUM(
                "%s-trfm%s" % tuple(os.path.splitext(traj_input_file)),
                dataset_tools.transformed_cam_trajectory(
                        dataset_tools.load_cam_trajectory_TUM(traj_input_file),
                        transformation ) )
    
    for map_input_file in map_input_files:
        print 'Transforming map "%s"...' % map_input_file
        points, colors, _ = dataset_tools.load_3D_points_from_pcd_file(map_input_file, use_alpha=True)
        dataset_tools.save_3D_points_to_pcd_file(
                "%s-trfm%s" % tuple(os.path.splitext(map_input_file)),
                dataset_tools.transformed_points(points, transformation),
                colors )
    
    print "Done."
def main():
    """
    A file with the initial pose and 3D points of the SVO dataset, will be created.
    """
    num_features = 100
    num_iterations = 53
    #corner_quality_level = 0.4805294789864505

    print "Searching for features ( max", num_features, ")..."
    
    # Load first image
    img = cv2.cvtColor(
            cv2.imread(join_path("sin2_tex2_h1_v8_d", "img", "frame_000002_0.png")),
            cv2.COLOR_BGR2GRAY )
    
    # Find just enough features, iterate using bisection to find the best "corner_quality_level" value
    corner_min_dist = 0.
    lower = 0.
    upper = 1.
    for i in range(num_iterations):
        corner_quality_level = (lower + upper) / 2
        imgp = cv2.goodFeaturesToTrack(img, num_features, corner_quality_level, corner_min_dist)
        if imgp == None or len(imgp) < num_features:
            upper = corner_quality_level
        else:
            lower = corner_quality_level
    corner_quality_level = lower if lower else corner_quality_level
    imgp = cv2.goodFeaturesToTrack(img, num_features, corner_quality_level, corner_min_dist).reshape((-1, 2))
    
    print len(imgp), "features found, corner_quality_level:", corner_quality_level
    
    # Load camera intrinsics
    cameraMatrix, distCoeffs, imageSize = calibration_tools.load_camera_intrinsics(
            join_path("camera_intrinsics.txt") )
    
    # Load and save first pose
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(
            join_path("sin2_tex2_h1_v8_d", "traj_groundtruth.txt") )
    P = trfm.P_from_pose_TUM(quaternions[0], locations[0])
    np.savetxt(join_path("sin2_tex2_h1_v8_d", "init_pose.txt"), P)
    
    # Generate 3D points, knowing that they lay at the plane z=0:
    # for each 2D point, the following system of equations is solved for X:
    #     A * X = b
    # where:
    #     X = [x, y, s]^T
    #     [x, y, 0] the 3D point's coords
    #     s is an unknown scalefactor of the normalized 2D point
    #     A = [[ 1  0 |    ],
    #          [ 0  1 | Pu ],
    #          [ 0  0 |    ]]
    #     Pu = - R^(-1) * p
    #     p = [u, v, 1]^T the 2D point's homogeneous coords
    #     b = - R^(-1) * t
    #     R, t = 3x3 rotation matrix and 3x1 translation vector of 3x4 pose matrix P
    # The system of equations is solved in bulk for all points using broadcasted backsubstitution
    objp = np.empty((len(imgp), 3)).T
    objp[2:3, :] = 0.    # z = 0
    imgp_nrml = cv2.undistortPoints(np.array([imgp]), cameraMatrix, distCoeffs)[0]
    imgp_nrml = np.concatenate((imgp_nrml, np.ones((len(imgp), 1))), axis=1)    # to homogeneous coords
    P_inv = trfm.P_inv(P)
    Pu = P_inv[0:3, 0:3].dot(imgp_nrml.T)
    scale = -P_inv[2:3, 3:4] / Pu[2:3, :]
    objp[0:2, :] = P_inv[0:2, 3:4] + scale * Pu[0:2, :]    # x and y components
    objp = objp.T
    
    # Save 3D points
    dataset_tools.save_3D_points_to_pcd_file(
            join_path("sin2_tex2_h1_v8_d", "init_points.pcd"),
            objp )
    
    print "Done."
Beispiel #6
0
def main():
    """
    A file with the initial pose and 3D points of the SVO dataset, will be created.
    """
    num_features = 100
    num_iterations = 53
    #corner_quality_level = 0.4805294789864505

    print("Searching for features ( max", num_features, ")...")

    # Load first image
    img = cv2.cvtColor(
        cv2.imread(join_path("sin2_tex2_h1_v8_d", "img",
                             "frame_000002_0.png")), cv2.COLOR_BGR2GRAY)

    # Find just enough features, iterate using bisection to find the best "corner_quality_level" value
    corner_min_dist = 0.
    lower = 0.
    upper = 1.
    for i in range(num_iterations):
        corner_quality_level = (lower + upper) / 2
        imgp = cv2.goodFeaturesToTrack(img, num_features, corner_quality_level,
                                       corner_min_dist)
        if imgp == None or len(imgp) < num_features:
            upper = corner_quality_level
        else:
            lower = corner_quality_level
    corner_quality_level = lower if lower else corner_quality_level
    imgp = cv2.goodFeaturesToTrack(img, num_features, corner_quality_level,
                                   corner_min_dist).reshape((-1, 2))

    print(len(imgp), "features found, corner_quality_level:",
          corner_quality_level)

    # Load camera intrinsics
    cameraMatrix, distCoeffs, imageSize = calibration_tools.load_camera_intrinsics(
        join_path("camera_intrinsics.txt"))

    # Load and save first pose
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(
        join_path("sin2_tex2_h1_v8_d", "traj_groundtruth.txt"))
    P = trfm.P_from_pose_TUM(quaternions[0], locations[0])
    np.savetxt(join_path("sin2_tex2_h1_v8_d", "init_pose.txt"), P)

    # Generate 3D points, knowing that they lay at the plane z=0:
    # for each 2D point, the following system of equations is solved for X:
    #     A * X = b
    # where:
    #     X = [x, y, s]^T
    #     [x, y, 0] the 3D point's coords
    #     s is an unknown scalefactor of the normalized 2D point
    #     A = [[ 1  0 |    ],
    #          [ 0  1 | Pu ],
    #          [ 0  0 |    ]]
    #     Pu = - R^(-1) * p
    #     p = [u, v, 1]^T the 2D point's homogeneous coords
    #     b = - R^(-1) * t
    #     R, t = 3x3 rotation matrix and 3x1 translation vector of 3x4 pose matrix P
    # The system of equations is solved in bulk for all points using broadcasted backsubstitution
    objp = np.empty((len(imgp), 3)).T
    objp[2:3, :] = 0.  # z = 0
    imgp_nrml = cv2.undistortPoints(np.array([imgp]), cameraMatrix,
                                    distCoeffs)[0]
    imgp_nrml = np.concatenate((imgp_nrml, np.ones((len(imgp), 1))),
                               axis=1)  # to homogeneous coords
    P_inv = trfm.P_inv(P)
    Pu = P_inv[0:3, 0:3].dot(imgp_nrml.T)
    scale = -P_inv[2:3, 3:4] / Pu[2:3, :]
    objp[0:2, :] = P_inv[0:2, 3:4] + scale * Pu[0:2, :]  # x and y components
    objp = objp.T

    # Save 3D points
    dataset_tools.save_3D_points_to_pcd_file(
        join_path("sin2_tex2_h1_v8_d", "init_points.pcd"), objp)

    print("Done.")