Ejemplo n.º 1
0
def main(opt):
    eval_tool = KittiEvalOdom(plot_keys=opt.plot_keys)

    eval_tool.run(gt_dir=opt.kitti_odo_poses_path,
                  out_dir=opt.out_dir,
                  pred_dir=opt.pred_poses_dir,
                  alignment=opt.align,
                  seqs=opt.seqs)
Ejemplo n.º 2
0
def eval_odometry(seq, seq_pred):
    eval_odom = KittiEvalOdom()

    poses_gt = {i: seq.poses[i] for i in range(len(seq.poses))}
    poses_result = {i: seq_pred.poses[i] for i in range(len(seq_pred.poses))}
    
    result = {}
    
    # compute sequence errors
    seq_err = eval_odom.calc_sequence_errors(poses_gt, poses_result)

    # Compute segment errors
    avg_segment_errs = eval_odom.compute_segment_error(seq_err)

    # compute overall error
    ave_t_err, ave_r_err = eval_odom.compute_overall_err(seq_err)
    result['t_err(%)'] = ave_t_err*100
    result['r_err(deg_per_100m)'] = ave_r_err/np.pi*180*100

    # Compute ATE
    ate = eval_odom.compute_ATE(poses_gt, poses_result)
    result['ATE(m)'] = ate

    # Compute RPE
    rpe_trans, rpe_rot = eval_odom.compute_RPE(poses_gt, poses_result)
    result['RPE(m)'] = rpe_trans
    result['RPE(deg)'] = rpe_rot * 180 /np.pi
    
    return result
Ejemplo n.º 3
0
                        type=str,
                        default="dataset/kitti_odom/gt_poses/",
                        help="GT Pose directory containing gt pose txt files")
    parser.add_argument('--align',
                        type=str,
                        choices=['scale', 'scale_7dof', '7dof', '6dof'],
                        default=None,
                        help="alignment type")
    parser.add_argument('--seqs',
                        nargs="+",
                        help="sequences to be evaluated",
                        default=None)
    args = parser.parse_args()

    return args


if __name__ == '__main__':
    # argument parsing
    args = argument_parsing()

    # initialize evaluation tool
    eval_tool = KittiEvalOdom()

    eval_tool.eval(
        args.gt,
        args.result,
        alignment=args.align,
        seqs=args.seqs,
    )
Ejemplo n.º 4
0
def run(args):
    """Run SLAM system for each frame in the dataset and save depths and poses.

    Args:
        args: command line arguments
    """
    setting_file = args.settings
    if not os.path.exists(setting_file):
        raise ValueError(f"Cannot find setting file at {setting_file}")
    if args.pose_id < -1:
        raise ValueError(f"Pose index must be -1 or >0")

    with open(args.settings) as fs:
        settings_yalm = yaml.safe_load(fs)
        print("\nAlgorithm " + settings_yalm["SLAM.alg"] + " has been set\n")

    print("Dataset selected: " + os.path.basename(args.dataset) + "\n")

    app = slampy.System(setting_file, slampy.Sensor.MONOCULAR_IMU)

    print("\n")

    if args.data_type == "TUM_VI":
        image_filenames, timestamps = load_images_TUM_VI(args.dataset)
    elif args.data_type == "OTHERS":
        image_filenames, timestamps = load_images_OTHERS(args.dataset)

    num_images = len(image_filenames)

    if args.data_type == "TUM_VI":
        acc_data, gyro_data, IMUtimestamps = load_IMU_datas_TUM_VI(
            args.dataset)

    dest_depth = os.path.join(args.dest, "depth")
    dest_pose = os.path.join(args.dest, "pose")

    create_dir(dest_depth)
    create_dir(dest_pose)

    states = []
    errors = []

    #finds first useful imu data, assuming imu starts recording way before camera
    firstIMU = 0
    while (IMUtimestamps[firstIMU] <= timestamps[0]):
        firstIMU += 1
    firstIMU -= 1

    imu = [
    ]  # array of valid imu measurments: one imu measure is 7 floats [acc x, acc y, acc z, gyro x, gyro y, gyro z, timestamp]

    with tqdm(total=num_images) as pbar:
        for idx, image_name in enumerate(image_filenames):
            # TODO: it is image loader duty to provide correct images
            image = cv2.imread(image_name)
            if image is None:
                raise ValueError(f"failed to load image {image_name}")

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            imu.clear()  #clear imu measures from last frame

            if idx > 0:  #select only those imu meas that occour in time before the current frame
                while (IMUtimestamps[firstIMU] <= timestamps[idx]):
                    imu_valid_meas = (acc_data[firstIMU] + gyro_data[firstIMU])
                    imu_valid_meas.append(IMUtimestamps[firstIMU])
                    imu.append(imu_valid_meas)
                    firstIMU += 1

            state = app.process_image_imu_mono(image, timestamps[idx],
                                               np.array(imu))

            # NOTE: we buid a default invalid depth, in the case of system failure
            if state == slampy.State.OK:
                depth = app.get_depth()
                pose_past_frame_to_current = app.get_pose_to_target(
                    precedent_frame=args.pose_id)
                name = os.path.splitext(os.path.basename(image_name))[0]

                depth_path = os.path.join(dest_depth, name)
                save_depth(depth_path, depth)

                pose_path = os.path.join(dest_pose, name)
                save_pose(pose_path, pose_past_frame_to_current)

                curr_pose = app.get_pose_to_target(-1)
                if curr_pose is not None:
                    save_pose_txt(args, name, curr_pose)

                if args.is_evaluate_depth:
                    gt_file_path = os.path.join(args.gt_depth,
                                                "{}.png".format(name))
                    err = get_error(args, name, depth, gt_file_path)
                    errors.append(err)

            states.append(state)
            pbar.update(1)

        if args.is_evaluate_depth:
            mean_errors = np.array(errors).mean(0)
            save_results = os.path.join(args.dest, "results.txt")
            save_depth_err_results(save_results, "mean values", mean_errors)

    # NOTE: final dump of log.txt file
    with open(os.path.join(args.dest, "log.txt"), "w") as f:
        for i, state in enumerate(states):
            f.write(f"{i}: {state}\n")

    if args.is_evaluate_pose:
        print("Begin to evaluate predicted pose")
        evaluate_pose(args)
        eval_tool = KittiEvalOdom()
        eval_tool.eval(args)
Ejemplo n.º 5
0
def main():
    
    global folder_with_gt_poses
    global wb
    global args
    global kitti_eval_tool
    global output_folder
    global plot_mode
    global t
    global results
    
    argparser = argparse.ArgumentParser(description='Evaluation of poses')
    argparser.add_argument('--dir_gt', help='directory with gt poses', 
                           default='/media/cds-s/data2/Datasets/Husky-NKBVS/gt_poses_camera(with_cam_lidar_timestamps)')
    
    argparser.add_argument('--dir_result', help='directory with predicted poses', default='/media/cds-s/data2/Datasets/Husky-NKBVS/OpenVSLAM_results')
    
    argparser.add_argument('--dir_output', help='output directory, where results will be', 
                           default='/media/cds-s/data2/Datasets/Husky-NKBVS/result_evaluation')
    
    argparser.add_argument('--gt_format', choices=["kitti", "tum", "euroc"], help='format of gt poses: "kitti" or "tum" or "euroc"', default='kitti', required=False)

    argparser.add_argument('--result_format', choices=["kitti", "tum", "euroc"], help='format of result poses: "kitti" or "tum" or "euroc"', default='kitti', required=False)
    
    argparser.add_argument('--projection', choices=["xy", "yx", "xz", "zx", "yz", "zy"],
                           help='projection on which trajectory will plotted. Possible variants: "xy", "yx", "xz", "zx", "yz", "zy"', default = "xz", type=str)
    
    argparser.add_argument('--max_diff', help="maximum difference between timestamps in case when gt foramt is tum and result format is tum. By default it's 1/(2*FPS)=0.05 for FPS=10", 
                           default=10, type=float)

    argparser.add_argument('--alignment', choices=["scale", "6dof", "7dof", "scale_7dof"], help="Type of alignment. Choices are: 'scale', '6dof', '7dof', 'scale_7dof'", 
                           default=None, type=str)
    
    args = argparser.parse_args()
    folder_with_gt_poses = args.dir_gt
    folder_with_predicted_poses = args.dir_result
    output_folder = args.dir_output
    plot_mode = PlotMode(args.projection)
    
    plt.ioff()
    kitti_eval_tool = KittiEvalOdom()
    os.makedirs(output_folder, exist_ok=True)
    
    wb = Workbook()
    for sheet_name in wb.sheetnames:
        del wb[sheet_name]
    sheet1 = wb.create_sheet('sheet1',0)
    
    results = []
    proccessed_files_in_root_res_dir = False
    noOfFiles = 0
    for base, dirs, files in os.walk(folder_with_predicted_poses):
        for Files in files:
            noOfFiles += 1
    t = tqdm(total=noOfFiles)
    for filename in sorted(os.listdir(folder_with_predicted_poses)):
        if os.path.isfile(os.path.join(folder_with_predicted_poses, filename)) and not(proccessed_files_in_root_res_dir):
            category = folder_with_predicted_poses.rstrip('/')
            category = category.split('/')[-1]
            get_and_save_results_from_folder(folder_with_predicted_poses, category)
            proccessed_files_in_root_res_dir = True
            output_summary(results)
            results = []
        else:
            if filename.find('.txt') != -1:
                continue
            category = filename.rstrip('/')
            get_and_save_results_from_folder(os.path.join(folder_with_predicted_poses, filename), category)
            output_summary(results)
            results = []
    t.close()
    for seq_results in results:
        if seq_results['metrics'] == {}:
            continue
        if seq_results["metrics"]["Kitti trans err (%)"] == 0:
            print("'Kitti trans err (%)' = 0")
            print("dataset - "+seq_results['name'])
            print()
        if seq_results["num_gt_poses"]*0.5 > seq_results["num_predicted_poses"]:
            print("few predicted poses:")
            print("num predicted poses "+str(seq_results["num_predicted_poses"])+\
                  ", num gt poses "+str(seq_results["num_gt_poses"]))
            print("dataset - "+seq_results['name'])
            print()
Ejemplo n.º 6
0
                    type=str,
                    required=True,
                    help="Result directory")
parser.add_argument('--align',
                    type=str,
                    choices=['scale', 'scale_7dof', '7dof', '6dof'],
                    default=None,
                    help="alignment type")
parser.add_argument('--seqs',
                    nargs="+",
                    type=int,
                    help="sequences to be evaluated",
                    default=None)
args = parser.parse_args()

eval_tool = KittiEvalOdom()
gt_dir = "./kitti-odom-eval/dataset/kitti_odom/gt_poses/"
# gt_dir = "./dataset/kitti_odom/gt_poses/"
result_dir = args.result

continue_flag = input("Evaluate result in {}? [y/n]".format(result_dir))
if continue_flag == "y":
    eval_tool.eval(
        gt_dir,
        result_dir,
        alignment=args.align,
        seqs=args.seqs,
    )
else:
    print("Double check the path!")
Ejemplo n.º 7
0
def run(args):
    """Run SLAM system for each frame in the dataset and save depths and poses.

    Args:
        args: command line arguments
    """
    setting_file = args.settings
    if not os.path.exists(setting_file):
        raise ValueError(f"Cannot find setting file at {setting_file}")
    if args.pose_id < -1:
        raise ValueError(f"Pose index must be -1 or >0")
    

    with open(args.settings) as fs:
         settings_yalm = yaml.safe_load(fs)
         print("\nAlgorithm " + settings_yalm["SLAM.alg"] + " has been set\n")

    print("Dataset selected: " + os.path.basename(args.dataset) + "\n")

    app = slampy.System(setting_file, slampy.Sensor.MONOCULAR)

    print("\n")

    # TODO: generic loader an not KITTI one

    if args.data_type == "TUM":
        image_filenames, timestamps = load_images_TUM(args.dataset, "rgb.txt")
    elif args.data_type == "KITTI_VO":
        image_filenames, timestamps = load_images_KITTI_VO(args.dataset)
    elif args.data_type == "OTHERS":
        image_filenames, timestamps = load_images_OTHERS(args.dataset)

    num_images = len(image_filenames)

    dest_depth = os.path.join(args.dest, "depth")
    dest_pose = os.path.join(args.dest, "pose")

    create_dir(dest_depth)
    create_dir(dest_pose)

    states = []
    errors = []

    with tqdm(total=num_images) as pbar:
        for idx, image_name in enumerate(image_filenames):
            # TODO: it is image loader duty to provide correct images
            # image_name = image_name.replace(".png", ".jpg")
            image = cv2.imread(image_name)
            if image is None:
                raise ValueError(f"failed to load image {image_name}")

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
           
            state = app.process_image_mono(image, timestamps[idx])

            # NOTE: we buid a default invalid depth, in the case of system failure
            if state == slampy.State.OK:
                depth = app.get_depth() 
                pose_past_frame_to_current = app.get_pose_to_target(
                    precedent_frame=args.pose_id
                )
                name = os.path.splitext(os.path.basename(image_name))[0] 
                
                depth_path = os.path.join(dest_depth, name)  
                save_depth(depth_path, depth) 

                pose_path = os.path.join(dest_pose, name)
                save_pose(pose_path, pose_past_frame_to_current)

                curr_pose = app.get_pose_to_target(-1) 
                if curr_pose is not None:
                    save_pose_txt(args, name, curr_pose)

                if args.is_evaluate_depth:
                    gt_file_path = os.path.join(args.gt_depth, "{}.png".format(name))
                    err = get_error(args, name, depth, gt_file_path)
                    errors.append(err)

            states.append(state)
            pbar.update(1)
        
        if args.is_evaluate_depth: 
            mean_errors = np.array(errors).mean(0) 
            save_results = os.path.join(args.dest, "results.txt")
            save_depth_err_results(save_results, "mean values", mean_errors)
    

    # NOTE: final dump of log.txt file
    with open(os.path.join(args.dest, "log.txt"), "w") as f:
        for i, state in enumerate(states):
            f.write(f"{i}: {state}\n")

    if args.is_evaluate_pose:
        print("Begin to evaluate predicted pose")
        evaluate_pose(args)
        eval_tool = KittiEvalOdom()
        eval_tool.eval(args)