Пример #1
0
                        type=str,
                        default="dataset/kitti_odom/gt_poses/",
                        help="GT Pose directory containing gt pose txt files")
    parser.add_argument('--align',
                        type=str,
                        choices=['scale', 'scale_7dof', '7dof', '6dof'],
                        default=None,
                        help="alignment type")
    parser.add_argument('--seqs',
                        nargs="+",
                        help="sequences to be evaluated",
                        default=None)
    args = parser.parse_args()

    return args


if __name__ == '__main__':
    # argument parsing
    args = argument_parsing()

    # initialize evaluation tool
    eval_tool = KittiEvalOdom()

    eval_tool.eval(
        args.gt,
        args.result,
        alignment=args.align,
        seqs=args.seqs,
    )
Пример #2
0
                    type=str,
                    required=True,
                    help="Result directory")
parser.add_argument('--align',
                    type=str,
                    choices=['scale', 'scale_7dof', '7dof', '6dof'],
                    default=None,
                    help="alignment type")
parser.add_argument('--seqs',
                    nargs="+",
                    type=int,
                    help="sequences to be evaluated",
                    default=None)
args = parser.parse_args()

eval_tool = KittiEvalOdom()
gt_dir = "./kitti-odom-eval/dataset/kitti_odom/gt_poses/"
# gt_dir = "./dataset/kitti_odom/gt_poses/"
result_dir = args.result

continue_flag = input("Evaluate result in {}? [y/n]".format(result_dir))
if continue_flag == "y":
    eval_tool.eval(
        gt_dir,
        result_dir,
        alignment=args.align,
        seqs=args.seqs,
    )
else:
    print("Double check the path!")
Пример #3
0
def run(args):
    """Run SLAM system for each frame in the dataset and save depths and poses.

    Args:
        args: command line arguments
    """
    setting_file = args.settings
    if not os.path.exists(setting_file):
        raise ValueError(f"Cannot find setting file at {setting_file}")
    if args.pose_id < -1:
        raise ValueError(f"Pose index must be -1 or >0")

    with open(args.settings) as fs:
        settings_yalm = yaml.safe_load(fs)
        print("\nAlgorithm " + settings_yalm["SLAM.alg"] + " has been set\n")

    print("Dataset selected: " + os.path.basename(args.dataset) + "\n")

    app = slampy.System(setting_file, slampy.Sensor.MONOCULAR_IMU)

    print("\n")

    if args.data_type == "TUM_VI":
        image_filenames, timestamps = load_images_TUM_VI(args.dataset)
    elif args.data_type == "OTHERS":
        image_filenames, timestamps = load_images_OTHERS(args.dataset)

    num_images = len(image_filenames)

    if args.data_type == "TUM_VI":
        acc_data, gyro_data, IMUtimestamps = load_IMU_datas_TUM_VI(
            args.dataset)

    dest_depth = os.path.join(args.dest, "depth")
    dest_pose = os.path.join(args.dest, "pose")

    create_dir(dest_depth)
    create_dir(dest_pose)

    states = []
    errors = []

    #finds first useful imu data, assuming imu starts recording way before camera
    firstIMU = 0
    while (IMUtimestamps[firstIMU] <= timestamps[0]):
        firstIMU += 1
    firstIMU -= 1

    imu = [
    ]  # array of valid imu measurments: one imu measure is 7 floats [acc x, acc y, acc z, gyro x, gyro y, gyro z, timestamp]

    with tqdm(total=num_images) as pbar:
        for idx, image_name in enumerate(image_filenames):
            # TODO: it is image loader duty to provide correct images
            image = cv2.imread(image_name)
            if image is None:
                raise ValueError(f"failed to load image {image_name}")

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            imu.clear()  #clear imu measures from last frame

            if idx > 0:  #select only those imu meas that occour in time before the current frame
                while (IMUtimestamps[firstIMU] <= timestamps[idx]):
                    imu_valid_meas = (acc_data[firstIMU] + gyro_data[firstIMU])
                    imu_valid_meas.append(IMUtimestamps[firstIMU])
                    imu.append(imu_valid_meas)
                    firstIMU += 1

            state = app.process_image_imu_mono(image, timestamps[idx],
                                               np.array(imu))

            # NOTE: we buid a default invalid depth, in the case of system failure
            if state == slampy.State.OK:
                depth = app.get_depth()
                pose_past_frame_to_current = app.get_pose_to_target(
                    precedent_frame=args.pose_id)
                name = os.path.splitext(os.path.basename(image_name))[0]

                depth_path = os.path.join(dest_depth, name)
                save_depth(depth_path, depth)

                pose_path = os.path.join(dest_pose, name)
                save_pose(pose_path, pose_past_frame_to_current)

                curr_pose = app.get_pose_to_target(-1)
                if curr_pose is not None:
                    save_pose_txt(args, name, curr_pose)

                if args.is_evaluate_depth:
                    gt_file_path = os.path.join(args.gt_depth,
                                                "{}.png".format(name))
                    err = get_error(args, name, depth, gt_file_path)
                    errors.append(err)

            states.append(state)
            pbar.update(1)

        if args.is_evaluate_depth:
            mean_errors = np.array(errors).mean(0)
            save_results = os.path.join(args.dest, "results.txt")
            save_depth_err_results(save_results, "mean values", mean_errors)

    # NOTE: final dump of log.txt file
    with open(os.path.join(args.dest, "log.txt"), "w") as f:
        for i, state in enumerate(states):
            f.write(f"{i}: {state}\n")

    if args.is_evaluate_pose:
        print("Begin to evaluate predicted pose")
        evaluate_pose(args)
        eval_tool = KittiEvalOdom()
        eval_tool.eval(args)
Пример #4
0
def run(args):
    """Run SLAM system for each frame in the dataset and save depths and poses.

    Args:
        args: command line arguments
    """
    setting_file = args.settings
    if not os.path.exists(setting_file):
        raise ValueError(f"Cannot find setting file at {setting_file}")
    if args.pose_id < -1:
        raise ValueError(f"Pose index must be -1 or >0")
    

    with open(args.settings) as fs:
         settings_yalm = yaml.safe_load(fs)
         print("\nAlgorithm " + settings_yalm["SLAM.alg"] + " has been set\n")

    print("Dataset selected: " + os.path.basename(args.dataset) + "\n")

    app = slampy.System(setting_file, slampy.Sensor.MONOCULAR)

    print("\n")

    # TODO: generic loader an not KITTI one

    if args.data_type == "TUM":
        image_filenames, timestamps = load_images_TUM(args.dataset, "rgb.txt")
    elif args.data_type == "KITTI_VO":
        image_filenames, timestamps = load_images_KITTI_VO(args.dataset)
    elif args.data_type == "OTHERS":
        image_filenames, timestamps = load_images_OTHERS(args.dataset)

    num_images = len(image_filenames)

    dest_depth = os.path.join(args.dest, "depth")
    dest_pose = os.path.join(args.dest, "pose")

    create_dir(dest_depth)
    create_dir(dest_pose)

    states = []
    errors = []

    with tqdm(total=num_images) as pbar:
        for idx, image_name in enumerate(image_filenames):
            # TODO: it is image loader duty to provide correct images
            # image_name = image_name.replace(".png", ".jpg")
            image = cv2.imread(image_name)
            if image is None:
                raise ValueError(f"failed to load image {image_name}")

            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
           
            state = app.process_image_mono(image, timestamps[idx])

            # NOTE: we buid a default invalid depth, in the case of system failure
            if state == slampy.State.OK:
                depth = app.get_depth() 
                pose_past_frame_to_current = app.get_pose_to_target(
                    precedent_frame=args.pose_id
                )
                name = os.path.splitext(os.path.basename(image_name))[0] 
                
                depth_path = os.path.join(dest_depth, name)  
                save_depth(depth_path, depth) 

                pose_path = os.path.join(dest_pose, name)
                save_pose(pose_path, pose_past_frame_to_current)

                curr_pose = app.get_pose_to_target(-1) 
                if curr_pose is not None:
                    save_pose_txt(args, name, curr_pose)

                if args.is_evaluate_depth:
                    gt_file_path = os.path.join(args.gt_depth, "{}.png".format(name))
                    err = get_error(args, name, depth, gt_file_path)
                    errors.append(err)

            states.append(state)
            pbar.update(1)
        
        if args.is_evaluate_depth: 
            mean_errors = np.array(errors).mean(0) 
            save_results = os.path.join(args.dest, "results.txt")
            save_depth_err_results(save_results, "mean values", mean_errors)
    

    # NOTE: final dump of log.txt file
    with open(os.path.join(args.dest, "log.txt"), "w") as f:
        for i, state in enumerate(states):
            f.write(f"{i}: {state}\n")

    if args.is_evaluate_pose:
        print("Begin to evaluate predicted pose")
        evaluate_pose(args)
        eval_tool = KittiEvalOdom()
        eval_tool.eval(args)