def main():
    """
    The quaternions of the groundtruth will be normalized,
    and saved to another filename.
    """

    print("Normalizing quaternions of groundtruth camera trajectory...")

    dataset_tools.save_cam_trajectory_TUM(
        join_path("sin2_tex2_h1_v8_d", "traj_groundtruth.txt"),
        dataset_tools.load_cam_trajectory_TUM(join_path("sin2_tex2_h1_v8_d", "trajectory_nominal.txt")),
    )

    print("Done.")
def repair_ICL_NUIM_cam_trajectory(filename_in,
                                   filename_out,
                                   initial_location=None,
                                   rebuild_timestamps=True,
                                   delta_timestamp=0.,
                                   fps=30):
    """
    After this repair, you will be able to use the output in conjunction with
    the original (i.e. non-mirrored) 3D scene of the dataset,
    provided that you don't apply the negative Y-factor of the camera calibration matrix.
    
    "filename_in" : filename of the input camera trajectory
    "filename_out" : filename of the repaired output camera trajectory
    "initial_location" : location of the first camera pose
        Note that all ICL NUIM camera trajectories are undetermined up to an unknown translation.
        To find the correct "initial_location", you can do the following:
            Ps = load_cam_poses_POV("traj1.posesRenderingCommands.sh")
            _, locs_exact, _ = dataset_tools.convert_cam_poses_to_cam_trajectory_TUM(Ps)
            initial_location = [-locs_exact[0, 0], locs_exact[0, 1], locs_exact[0, 2]]
    "rebuild_timestamps" :
        if True, regenerate timestamps starting from start-time "delta_timestamp",
        at a rate of "fps"
    
    The new "timestps", "locations", "quaternions" will be returned.
    
    Note: this is not the recommended way to go,
    for accuracy, consider using "load_cam_poses_POV()" instead.
    """
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(
        filename_in)

    if initial_location != None:
        delta_location = initial_location - locations[0]
    else:
        delta_location = np.zeros(3)

    if rebuild_timestamps:
        timestps = delta_timestamp + (1 +
                                      np.arange(len(timestps))) / float(fps)

    for i, (location, quaternion) in enumerate(zip(locations, quaternions)):
        lx, ly, lz = location
        locations[i] = np.array([lx, ly, -lz]) + delta_location

        qx, qy, qz, qw = quaternion
        quaternions[i] = np.array([qw, qz, qy, -qx])

    dataset_tools.save_cam_trajectory_TUM(filename_out,
                                          (timestps, locations, quaternions))
    return timestps, locations, quaternions
def main():
    """
    The quaternions of the groundtruth will be normalized,
    and saved to another filename.
    """

    print ("Normalizing quaternions of groundtruth camera trajectory...")
    
    dataset_tools.save_cam_trajectory_TUM(
            join_path("sin2_tex2_h1_v8_d", "traj_groundtruth.txt"),
            dataset_tools.load_cam_trajectory_TUM(
                    join_path("sin2_tex2_h1_v8_d", "trajectory_nominal.txt") ) )

    print ("Done.")
def main():
    """
    The exact trajectories will be extracted, along with the repaired 3D model.
    For now only the "Living Room Dataset" is supported.
    """

    print("Mirroring 3D model...")

    mirror_wavefront_obj_file(
        join_path("living_room_obj_mtl", "living-room.obj"),
        join_path("living_room_obj_mtl", "living-room_Xmirrored.obj"))

    print("Extracting exact ground-truth camera trajectories...")

    pov_bash_script_filenames = [
        "livingroomlcmlog-2013-08-07.00.posesRenderingCommands.sh",
        "livingroomlcmlog-2013-08-07.01.posesRenderingCommands_copy.sh",
        "livingroomlcmlog-2013-08-07.02.posesRenderingCommands.sh",
        "livingroomlcmlog-2013-08-07.03.posesRenderingCommands.sh"
    ]

    for i, pov_bash_script_filename in enumerate(pov_bash_script_filenames):
        dataset_tools.save_cam_trajectory_TUM(
            join_path("living_room_traj%sn_frei_png" % i,
                      "traj_groundtruth%s.txt" % i),
            dataset_tools.convert_cam_poses_to_cam_trajectory_TUM(
                load_cam_poses_POV(
                    join_path("living_room_code", pov_bash_script_filename))))

    if "--repair-noisy-trajectories" in sys.argv:
        print("Repairing (noisy) camera trajectories...")

        for i, pov_bash_script_filename in enumerate(
                pov_bash_script_filenames):
            _, locs_exact, _ = dataset_tools.convert_cam_poses_to_cam_trajectory_TUM(
                load_cam_poses_POV(
                    join_path("living_room_code", pov_bash_script_filename)))
            repair_ICL_NUIM_cam_trajectory(
                join_path("living_room_traj%sn_frei_png" % i,
                          "livingRoom%sn.gt.freiburg" % i),
                join_path("living_room_traj%sn_frei_png" % i,
                          "livingRoom%sn.gt.freiburg_repaired" % i),
                np.array(
                    [-locs_exact[0, 0], locs_exact[0, 1], locs_exact[0, 2]]))

    print("Done.")
def repair_ICL_NUIM_cam_trajectory(filename_in, filename_out,
                                   initial_location=None,
                                   rebuild_timestamps=True, delta_timestamp=0., fps=30):
    """
    After this repair, you will be able to use the output in conjunction with
    the original (i.e. non-mirrored) 3D scene of the dataset,
    provided that you don't apply the negative Y-factor of the camera calibration matrix.
    
    "filename_in" : filename of the input camera trajectory
    "filename_out" : filename of the repaired output camera trajectory
    "initial_location" : location of the first camera pose
        Note that all ICL NUIM camera trajectories are undetermined up to an unknown translation.
        To find the correct "initial_location", you can do the following:
            Ps = load_cam_poses_POV("traj1.posesRenderingCommands.sh")
            _, locs_exact, _ = dataset_tools.convert_cam_poses_to_cam_trajectory_TUM(Ps)
            initial_location = [-locs_exact[0, 0], locs_exact[0, 1], locs_exact[0, 2]]
    "rebuild_timestamps" :
        if True, regenerate timestamps starting from start-time "delta_timestamp",
        at a rate of "fps"
    
    The new "timestps", "locations", "quaternions" will be returned.
    
    Note: this is not the recommended way to go,
    for accuracy, consider using "load_cam_poses_POV()" instead.
    """
    timestps, locations, quaternions = dataset_tools.load_cam_trajectory_TUM(filename_in)
    
    if initial_location != None:
        delta_location = initial_location - locations[0]
    else:
        delta_location = np.zeros(3)
    
    if rebuild_timestamps:
        timestps = delta_timestamp + (1 + np.arange(len(timestps))) / float(fps)
    
    for i, (location, quaternion) in enumerate(zip(locations, quaternions)):
        lx, ly, lz = location
        locations[i] = np.array([lx, ly, -lz]) + delta_location
        
        qx, qy, qz, qw = quaternion
        quaternions[i] = np.array([qw, qz, qy, -qx])
    
    dataset_tools.save_cam_trajectory_TUM(filename_out, (timestps, locations, quaternions))
    return timestps, locations, quaternions
def main():
    traj_to_file, traj_from_file, traj_input_files, map_input_files, at_frame, offset_time = parse_cmd_args(
    )

    print("Calculating transformation...")
    cam_trajectory_from = dataset_tools.load_cam_trajectory_TUM(traj_from_file)
    cam_trajectory_to = dataset_tools.load_cam_trajectory_TUM(traj_to_file)
    transformation = dataset_tools.transform_between_cam_trajectories(
        cam_trajectory_from,
        cam_trajectory_to,
        at_frame=at_frame,
        offset_time=offset_time)

    print("Results:")
    delta_quaternion, delta_scale, delta_location = transformation
    print("delta_quaternion:")
    print("\t %s" % delta_quaternion)
    print("delta_scale:")
    print("\t %s" % delta_scale)
    print("delta_location:")
    print("\t %s" % delta_location)
    print()

    for traj_input_file in traj_input_files:
        print('Transforming traj "%s"...' % traj_input_file)
        dataset_tools.save_cam_trajectory_TUM(
            "%s-trfm%s" % tuple(os.path.splitext(traj_input_file)),
            dataset_tools.transformed_cam_trajectory(
                dataset_tools.load_cam_trajectory_TUM(traj_input_file),
                transformation))

    for map_input_file in map_input_files:
        print('Transforming map "%s"...' % map_input_file)
        points, colors, _ = dataset_tools.load_3D_points_from_pcd_file(
            map_input_file, use_alpha=True)
        dataset_tools.save_3D_points_to_pcd_file(
            "%s-trfm%s" % tuple(os.path.splitext(map_input_file)),
            dataset_tools.transformed_points(points, transformation), colors)

    print("Done.")
def main():
    """
    The exact trajectories will be extracted, along with the repaired 3D model.
    For now only the "Living Room Dataset" is supported.
    """
    
    print "Mirroring 3D model..."
    
    mirror_wavefront_obj_file(
            join_path("living_room_obj_mtl", "living-room.obj"),
            join_path("living_room_obj_mtl", "living-room_Xmirrored.obj") )
    
    print "Extracting exact ground-truth camera trajectories..."
    
    pov_bash_script_filenames = [
            "livingroomlcmlog-2013-08-07.00.posesRenderingCommands.sh",
            "livingroomlcmlog-2013-08-07.01.posesRenderingCommands_copy.sh",
            "livingroomlcmlog-2013-08-07.02.posesRenderingCommands.sh",
            "livingroomlcmlog-2013-08-07.03.posesRenderingCommands.sh" ]
    
    for i, pov_bash_script_filename in enumerate(pov_bash_script_filenames):
        dataset_tools.save_cam_trajectory_TUM(
                join_path("living_room_traj%sn_frei_png" % i, "traj_groundtruth%s.txt" % i),
                dataset_tools.convert_cam_poses_to_cam_trajectory_TUM(load_cam_poses_POV(
                        join_path("living_room_code", pov_bash_script_filename) )) )
    
    if "--repair-noisy-trajectories" in sys.argv:
        print "Repairing (noisy) camera trajectories..."
        
        for i, pov_bash_script_filename in enumerate(pov_bash_script_filenames):
            _, locs_exact, _ = dataset_tools.convert_cam_poses_to_cam_trajectory_TUM(
                    load_cam_poses_POV(join_path("living_room_code", pov_bash_script_filename)) )
            repair_ICL_NUIM_cam_trajectory(
                    join_path("living_room_traj%sn_frei_png" % i, "livingRoom%sn.gt.freiburg" % i),
                    join_path("living_room_traj%sn_frei_png" % i, "livingRoom%sn.gt.freiburg_repaired" % i),
                    np.array([-locs_exact[0, 0], locs_exact[0, 1], locs_exact[0, 2]]) )
    
    print "Done."
def main():
    traj_to_file, traj_from_file, traj_input_files, map_input_files, at_frame, offset_time = parse_cmd_args()
    
    print "Calculating transformation..."
    cam_trajectory_from = dataset_tools.load_cam_trajectory_TUM(traj_from_file)
    cam_trajectory_to = dataset_tools.load_cam_trajectory_TUM(traj_to_file)
    transformation = dataset_tools.transform_between_cam_trajectories(
            cam_trajectory_from, cam_trajectory_to, at_frame=at_frame, offset_time=offset_time )
    
    print "Results:"
    delta_quaternion, delta_scale, delta_location = transformation
    print "delta_quaternion:"
    print "\t %s" % delta_quaternion
    print "delta_scale:"
    print "\t %s" % delta_scale
    print "delta_location:"
    print "\t %s" % delta_location
    print
    
    for traj_input_file in traj_input_files:
        print 'Transforming traj "%s"...' % traj_input_file
        dataset_tools.save_cam_trajectory_TUM(
                "%s-trfm%s" % tuple(os.path.splitext(traj_input_file)),
                dataset_tools.transformed_cam_trajectory(
                        dataset_tools.load_cam_trajectory_TUM(traj_input_file),
                        transformation ) )
    
    for map_input_file in map_input_files:
        print 'Transforming map "%s"...' % map_input_file
        points, colors, _ = dataset_tools.load_3D_points_from_pcd_file(map_input_file, use_alpha=True)
        dataset_tools.save_3D_points_to_pcd_file(
                "%s-trfm%s" % tuple(os.path.splitext(map_input_file)),
                dataset_tools.transformed_points(points, transformation),
                colors )
    
    print "Done."