def load_and_create_cam_trajectory(filename,
                                   name_prefix="",
                                   strip_file_extension=True,
                                   start_frame=1,
                                   start_time=None,
                                   fps="data",
                                   no_keyframe_highlighting=True,
                                   goto_last_keyframe=False):
    """
    Load a camera trajectory (in the TUM format, see "dataset_tools" module for more info)
    with filename "filename", and create it in Blender.
    
    "name_prefix", "strip_file_extension" : see documentation of "object_name_from_filename()"
    "start_frame" : Blender's start frame of the trajectory
    "start_time" : if not None, this float will be used as trajectory's timestamp offset to start from
    "fps" : should be one of the following:
            - "blender" : infer the fps from Blender's scene "fps" property
            - "data" : infer the fps from the data (using minimum delta time), Blender's "fps" will be adjusted
            - an integer indicating the data's fps
    "no_keyframe_highlighting" : if True, don't show framenumbers for each keyframe along trajectory
    "goto_last_keyframe" : go to the last keyframe of the generated trajectory
    
    Note: to synchronize multiple trajectories in time,
    it is advised to set:
            - "start_frame" to 0
            - "start_time" to 0.
            - "fps" to the exact fps integer
    """

    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(
        filename)

    if len(timestps) == 0:
        return
    elif len(timestps) == 1:
        framenrs = [start_frame]
    else:
        if fps == "blender":
            fps = bpy.context.scene.render.fps
        elif fps == "data":
            fps = 1. / np.min(timestps[1:] - timestps[:-1])
            bpy.context.scene.render.fps = int(round(fps))
        if start_time == None:
            start_time = timestps[0]
        framenrs = np.rint(start_frame +
                           (timestps - start_time) * float(fps)).astype(int)

    create_cam_trajectory(object_name_from_filename(filename, name_prefix,
                                                    strip_file_extension),
                          locations,
                          quaternions,
                          start_frame,
                          framenrs,
                          no_keyframe_highlighting,
                          goto_last_keyframe=goto_last_keyframe)
def main():
    traj_to_file, traj_from_file, traj_input_files, map_input_files, at_frame, offset_time = parse_cmd_args(
    )

    print("Calculating transformation...")
    cam_trajectory_from = dataset_tools.load_cam_trajectory_TUM(traj_from_file)
    cam_trajectory_to = dataset_tools.load_cam_trajectory_TUM(traj_to_file)
    transformation = dataset_tools.transform_between_cam_trajectories(
        cam_trajectory_from,
        cam_trajectory_to,
        at_frame=at_frame,
        offset_time=offset_time)

    print("Results:")
    delta_quaternion, delta_scale, delta_location = transformation
    print("delta_quaternion:")
    print("\t %s" % delta_quaternion)
    print("delta_scale:")
    print("\t %s" % delta_scale)
    print("delta_location:")
    print("\t %s" % delta_location)
    print()

    for traj_input_file in traj_input_files:
        print('Transforming traj "%s"...' % traj_input_file)
        dataset_tools.save_cam_trajectory_TUM(
            "%s-trfm%s" % tuple(os.path.splitext(traj_input_file)),
            dataset_tools.transformed_cam_trajectory(
                dataset_tools.load_cam_trajectory_TUM(traj_input_file),
                transformation))

    for map_input_file in map_input_files:
        print('Transforming map "%s"...' % map_input_file)
        points, colors, _ = dataset_tools.load_3D_points_from_pcd_file(
            map_input_file, use_alpha=True)
        dataset_tools.save_3D_points_to_pcd_file(
            "%s-trfm%s" % tuple(os.path.splitext(map_input_file)),
            dataset_tools.transformed_points(points, transformation), colors)

    print("Done.")
def main():
    """
    The quaternions of the groundtruth will be normalized,
    and saved to another filename.
    """

    print("Normalizing quaternions of groundtruth camera trajectory...")

    dataset_tools.save_cam_trajectory_TUM(
        join_path("sin2_tex2_h1_v8_d", "traj_groundtruth.txt"),
        dataset_tools.load_cam_trajectory_TUM(join_path("sin2_tex2_h1_v8_d", "trajectory_nominal.txt")),
    )

    print("Done.")
def repair_ICL_NUIM_cam_trajectory(filename_in,
                                   filename_out,
                                   initial_location=None,
                                   rebuild_timestamps=True,
                                   delta_timestamp=0.,
                                   fps=30):
    """
    After this repair, you will be able to use the output in conjunction with
    the original (i.e. non-mirrored) 3D scene of the dataset,
    provided that you don't apply the negative Y-factor of the camera calibration matrix.
    
    "filename_in" : filename of the input camera trajectory
    "filename_out" : filename of the repaired output camera trajectory
    "initial_location" : location of the first camera pose
        Note that all ICL NUIM camera trajectories are undetermined up to an unknown translation.
        To find the correct "initial_location", you can do the following:
            Ps = load_cam_poses_POV("traj1.posesRenderingCommands.sh")
            _, locs_exact, _ = dataset_tools.convert_cam_poses_to_cam_trajectory_TUM(Ps)
            initial_location = [-locs_exact[0, 0], locs_exact[0, 1], locs_exact[0, 2]]
    "rebuild_timestamps" :
        if True, regenerate timestamps starting from start-time "delta_timestamp",
        at a rate of "fps"
    
    The new "timestps", "locations", "quaternions" will be returned.
    
    Note: this is not the recommended way to go,
    for accuracy, consider using "load_cam_poses_POV()" instead.
    """
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(
        filename_in)

    if initial_location != None:
        delta_location = initial_location - locations[0]
    else:
        delta_location = np.zeros(3)

    if rebuild_timestamps:
        timestps = delta_timestamp + (1 +
                                      np.arange(len(timestps))) / float(fps)

    for i, (location, quaternion) in enumerate(zip(locations, quaternions)):
        lx, ly, lz = location
        locations[i] = np.array([lx, ly, -lz]) + delta_location

        qx, qy, qz, qw = quaternion
        quaternions[i] = np.array([qw, qz, qy, -qx])

    dataset_tools.save_cam_trajectory_TUM(filename_out,
                                          (timestps, locations, quaternions))
    return timestps, locations, quaternions
def main():
    """
    The quaternions of the groundtruth will be normalized,
    and saved to another filename.
    """

    print ("Normalizing quaternions of groundtruth camera trajectory...")
    
    dataset_tools.save_cam_trajectory_TUM(
            join_path("sin2_tex2_h1_v8_d", "traj_groundtruth.txt"),
            dataset_tools.load_cam_trajectory_TUM(
                    join_path("sin2_tex2_h1_v8_d", "trajectory_nominal.txt") ) )

    print ("Done.")
def main():
    traj_to_file, traj_from_file, traj_input_files, map_input_files, at_frame, offset_time = parse_cmd_args()
    
    print "Calculating transformation..."
    cam_trajectory_from = dataset_tools.load_cam_trajectory_TUM(traj_from_file)
    cam_trajectory_to = dataset_tools.load_cam_trajectory_TUM(traj_to_file)
    transformation = dataset_tools.transform_between_cam_trajectories(
            cam_trajectory_from, cam_trajectory_to, at_frame=at_frame, offset_time=offset_time )
    
    print "Results:"
    delta_quaternion, delta_scale, delta_location = transformation
    print "delta_quaternion:"
    print "\t %s" % delta_quaternion
    print "delta_scale:"
    print "\t %s" % delta_scale
    print "delta_location:"
    print "\t %s" % delta_location
    print
    
    for traj_input_file in traj_input_files:
        print 'Transforming traj "%s"...' % traj_input_file
        dataset_tools.save_cam_trajectory_TUM(
                "%s-trfm%s" % tuple(os.path.splitext(traj_input_file)),
                dataset_tools.transformed_cam_trajectory(
                        dataset_tools.load_cam_trajectory_TUM(traj_input_file),
                        transformation ) )
    
    for map_input_file in map_input_files:
        print 'Transforming map "%s"...' % map_input_file
        points, colors, _ = dataset_tools.load_3D_points_from_pcd_file(map_input_file, use_alpha=True)
        dataset_tools.save_3D_points_to_pcd_file(
                "%s-trfm%s" % tuple(os.path.splitext(map_input_file)),
                dataset_tools.transformed_points(points, transformation),
                colors )
    
    print "Done."
def repair_ICL_NUIM_cam_trajectory(filename_in, filename_out,
                                   initial_location=None,
                                   rebuild_timestamps=True, delta_timestamp=0., fps=30):
    """
    After this repair, you will be able to use the output in conjunction with
    the original (i.e. non-mirrored) 3D scene of the dataset,
    provided that you don't apply the negative Y-factor of the camera calibration matrix.
    
    "filename_in" : filename of the input camera trajectory
    "filename_out" : filename of the repaired output camera trajectory
    "initial_location" : location of the first camera pose
        Note that all ICL NUIM camera trajectories are undetermined up to an unknown translation.
        To find the correct "initial_location", you can do the following:
            Ps = load_cam_poses_POV("traj1.posesRenderingCommands.sh")
            _, locs_exact, _ = dataset_tools.convert_cam_poses_to_cam_trajectory_TUM(Ps)
            initial_location = [-locs_exact[0, 0], locs_exact[0, 1], locs_exact[0, 2]]
    "rebuild_timestamps" :
        if True, regenerate timestamps starting from start-time "delta_timestamp",
        at a rate of "fps"
    
    The new "timestps", "locations", "quaternions" will be returned.
    
    Note: this is not the recommended way to go,
    for accuracy, consider using "load_cam_poses_POV()" instead.
    """
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(filename_in)
    
    if initial_location != None:
        delta_location = initial_location - locations[0]
    else:
        delta_location = np.zeros(3)
    
    if rebuild_timestamps:
        timestps = delta_timestamp + (1 + np.arange(len(timestps))) / float(fps)
    
    for i, (location, quaternion) in enumerate(zip(locations, quaternions)):
        lx, ly, lz = location
        locations[i] = np.array([lx, ly, -lz]) + delta_location
        
        qx, qy, qz, qw = quaternion
        quaternions[i] = np.array([qw, qz, qy, -qx])
    
    dataset_tools.save_cam_trajectory_TUM(filename_out, (timestps, locations, quaternions))
    return timestps, locations, quaternions
def load_and_create_cam_trajectory(filename, name_prefix="", strip_file_extension=True,
                                   start_frame=1, start_time=None, fps="data",
                                   no_keyframe_highlighting=True, goto_last_keyframe=False):
    """
    Load a camera trajectory (in the TUM format, see "dataset_tools" module for more info)
    with filename "filename", and create it in Blender.
    
    "name_prefix", "strip_file_extension" : see documentation of "object_name_from_filename()"
    "start_frame" : Blender's start frame of the trajectory
    "start_time" : if not None, this float will be used as trajectory's timestamp offset to start from
    "fps" : should be one of the following:
            - "blender" : infer the fps from Blender's scene "fps" property
            - "data" : infer the fps from the data (using minimum delta time), Blender's "fps" will be adjusted
            - an integer indicating the data's fps
    "no_keyframe_highlighting" : if True, don't show framenumbers for each keyframe along trajectory
    "goto_last_keyframe" : go to the last keyframe of the generated trajectory
    
    Note: to synchronize multiple trajectories in time,
    it is advised to set:
            - "start_frame" to 0
            - "start_time" to 0.
            - "fps" to the exact fps integer
    """
    
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(filename)
    
    if len(timestps) == 0:
        return
    elif len(timestps) == 1:
        framenrs = [start_frame]
    else:
        if fps == "blender":
            fps = bpy.context.scene.render.fps
        elif fps == "data":
            fps = 1. / np.min(timestps[1:] - timestps[:-1])
            bpy.context.scene.render.fps = int(round(fps))
        if start_time == None:
            start_time = timestps[0]
        framenrs = np.rint(start_frame + (timestps - start_time) * float(fps)).astype(int)
    
    create_cam_trajectory(
            object_name_from_filename(filename, name_prefix, strip_file_extension),
            locations, quaternions, start_frame, framenrs, no_keyframe_highlighting,
            goto_last_keyframe=goto_last_keyframe )
def main():
    """
    A file with the initial pose and 3D points of the SVO dataset, will be created.
    """
    num_features = 100
    num_iterations = 53
    #corner_quality_level = 0.4805294789864505

    print "Searching for features ( max", num_features, ")..."
    
    # Load first image
    img = cv2.cvtColor(
            cv2.imread(join_path("sin2_tex2_h1_v8_d", "img", "frame_000002_0.png")),
            cv2.COLOR_BGR2GRAY )
    
    # Find just enough features, iterate using bisection to find the best "corner_quality_level" value
    corner_min_dist = 0.
    lower = 0.
    upper = 1.
    for i in range(num_iterations):
        corner_quality_level = (lower + upper) / 2
        imgp = cv2.goodFeaturesToTrack(img, num_features, corner_quality_level, corner_min_dist)
        if imgp == None or len(imgp) < num_features:
            upper = corner_quality_level
        else:
            lower = corner_quality_level
    corner_quality_level = lower if lower else corner_quality_level
    imgp = cv2.goodFeaturesToTrack(img, num_features, corner_quality_level, corner_min_dist).reshape((-1, 2))
    
    print len(imgp), "features found, corner_quality_level:", corner_quality_level
    
    # Load camera intrinsics
    cameraMatrix, distCoeffs, imageSize = calibration_tools.load_camera_intrinsics(
            join_path("camera_intrinsics.txt") )
    
    # Load and save first pose
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(
            join_path("sin2_tex2_h1_v8_d", "traj_groundtruth.txt") )
    P = trfm.P_from_pose_TUM(quaternions[0], locations[0])
    np.savetxt(join_path("sin2_tex2_h1_v8_d", "init_pose.txt"), P)
    
    # Generate 3D points, knowing that they lay at the plane z=0:
    # for each 2D point, the following system of equations is solved for X:
    #     A * X = b
    # where:
    #     X = [x, y, s]^T
    #     [x, y, 0] the 3D point's coords
    #     s is an unknown scalefactor of the normalized 2D point
    #     A = [[ 1  0 |    ],
    #          [ 0  1 | Pu ],
    #          [ 0  0 |    ]]
    #     Pu = - R^(-1) * p
    #     p = [u, v, 1]^T the 2D point's homogeneous coords
    #     b = - R^(-1) * t
    #     R, t = 3x3 rotation matrix and 3x1 translation vector of 3x4 pose matrix P
    # The system of equations is solved in bulk for all points using broadcasted backsubstitution
    objp = np.empty((len(imgp), 3)).T
    objp[2:3, :] = 0.    # z = 0
    imgp_nrml = cv2.undistortPoints(np.array([imgp]), cameraMatrix, distCoeffs)[0]
    imgp_nrml = np.concatenate((imgp_nrml, np.ones((len(imgp), 1))), axis=1)    # to homogeneous coords
    P_inv = trfm.P_inv(P)
    Pu = P_inv[0:3, 0:3].dot(imgp_nrml.T)
    scale = -P_inv[2:3, 3:4] / Pu[2:3, :]
    objp[0:2, :] = P_inv[0:2, 3:4] + scale * Pu[0:2, :]    # x and y components
    objp = objp.T
    
    # Save 3D points
    dataset_tools.save_3D_points_to_pcd_file(
            join_path("sin2_tex2_h1_v8_d", "init_points.pcd"),
            objp )
    
    print "Done."
Beispiel #10
0
def main():
    """
    A file with the initial pose and 3D points of the SVO dataset, will be created.
    """
    num_features = 100
    num_iterations = 53
    #corner_quality_level = 0.4805294789864505

    print("Searching for features ( max", num_features, ")...")

    # Load first image
    img = cv2.cvtColor(
        cv2.imread(join_path("sin2_tex2_h1_v8_d", "img",
                             "frame_000002_0.png")), cv2.COLOR_BGR2GRAY)

    # Find just enough features, iterate using bisection to find the best "corner_quality_level" value
    corner_min_dist = 0.
    lower = 0.
    upper = 1.
    for i in range(num_iterations):
        corner_quality_level = (lower + upper) / 2
        imgp = cv2.goodFeaturesToTrack(img, num_features, corner_quality_level,
                                       corner_min_dist)
        if imgp == None or len(imgp) < num_features:
            upper = corner_quality_level
        else:
            lower = corner_quality_level
    corner_quality_level = lower if lower else corner_quality_level
    imgp = cv2.goodFeaturesToTrack(img, num_features, corner_quality_level,
                                   corner_min_dist).reshape((-1, 2))

    print(len(imgp), "features found, corner_quality_level:",
          corner_quality_level)

    # Load camera intrinsics
    cameraMatrix, distCoeffs, imageSize = calibration_tools.load_camera_intrinsics(
        join_path("camera_intrinsics.txt"))

    # Load and save first pose
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(
        join_path("sin2_tex2_h1_v8_d", "traj_groundtruth.txt"))
    P = trfm.P_from_pose_TUM(quaternions[0], locations[0])
    np.savetxt(join_path("sin2_tex2_h1_v8_d", "init_pose.txt"), P)

    # Generate 3D points, knowing that they lay at the plane z=0:
    # for each 2D point, the following system of equations is solved for X:
    #     A * X = b
    # where:
    #     X = [x, y, s]^T
    #     [x, y, 0] the 3D point's coords
    #     s is an unknown scalefactor of the normalized 2D point
    #     A = [[ 1  0 |    ],
    #          [ 0  1 | Pu ],
    #          [ 0  0 |    ]]
    #     Pu = - R^(-1) * p
    #     p = [u, v, 1]^T the 2D point's homogeneous coords
    #     b = - R^(-1) * t
    #     R, t = 3x3 rotation matrix and 3x1 translation vector of 3x4 pose matrix P
    # The system of equations is solved in bulk for all points using broadcasted backsubstitution
    objp = np.empty((len(imgp), 3)).T
    objp[2:3, :] = 0.  # z = 0
    imgp_nrml = cv2.undistortPoints(np.array([imgp]), cameraMatrix,
                                    distCoeffs)[0]
    imgp_nrml = np.concatenate((imgp_nrml, np.ones((len(imgp), 1))),
                               axis=1)  # to homogeneous coords
    P_inv = trfm.P_inv(P)
    Pu = P_inv[0:3, 0:3].dot(imgp_nrml.T)
    scale = -P_inv[2:3, 3:4] / Pu[2:3, :]
    objp[0:2, :] = P_inv[0:2, 3:4] + scale * Pu[0:2, :]  # x and y components
    objp = objp.T

    # Save 3D points
    dataset_tools.save_3D_points_to_pcd_file(
        join_path("sin2_tex2_h1_v8_d", "init_points.pcd"), objp)

    print("Done.")