コード例 #1
0
def read_kitti_poses_file(file_path):
    """
    parses pose file in KITTI format (first 3 rows of SE(3) matrix per line)
    :param file_path: the trajectory file path (or file handle)
    :return: trajectory.PosePath3D
    """
    print("read kitti_poses_file")
    raw_mat = csv_read_matrix(file_path, delim=" ", comment_str="#")
    error_msg = ("KITTI pose files must have 12 entries per row "
                 "and no trailing delimiter at the end of the rows (space)")
    if len(raw_mat) > 0 and len(raw_mat[0]) != 12:
        raise FileInterfaceException(error_msg)
    try:
        mat = np.array(raw_mat).astype(float)
    except ValueError:
        raise FileInterfaceException(error_msg)
    # yapf: disable
    poses = [np.array([[r[0], r[1], r[2], r[3]],
                       [r[4], r[5], r[6], r[7]],
                       [r[8], r[9], r[10], r[11]],
                       [0, 0, 0, 1]]) for r in mat]
    # yapf: enable
    if not hasattr(file_path, 'read'):  # if not file handle
        logger.debug("Loaded {} poses from: {}".format(len(poses), file_path))
    return PosePath3D(poses_se3=poses)
コード例 #2
0
def read_kitti_poses_file(file_path):
    """
    parses pose file in KITTI format (first 3 rows of SE(3) matrix per line)
    :param file_path: the trajectory file path (or file handle)
    :return: trajectory.PosePath3D
    """
    mat = np.array(csv_read_matrix(file_path, delim=" ",
                                   comment_str="#")).astype(float)
    if mat.shape[1] != 12:
        raise FileInterfaceException(
            "KITTI pose files must have 12 entries per row")
    poses = [
        np.array([[r[0], r[1], r[2], r[3]], [r[4], r[5], r[6], r[7]],
                  [r[8], r[9], r[10], r[11]], [0, 0, 0, 1]]) for r in mat
    ]
    if not hasattr(file_path, 'read'):  # if not file handle
        logger.debug("Loaded {} poses from: {}".format(len(poses), file_path))
    return PosePath3D(poses_se3=poses)
コード例 #3
0
def main():
    hvd.init()
    args = parser.parse_args()

    if hvd.size() > 1:
        printcolor('----------- DISTRIBUTED DATA PARALLEL -----------', 'cyan')
        device_id = hvd.local_rank()
        torch.cuda.set_device(device_id)

    if hvd.rank() == 0:
        os.makedirs(args.output_dir, exist_ok=True)

    depth_net = DispResnet(version='18_pt')
    depth_net.load_state_dict(torch.load(args.depth_model, map_location='cpu'))
    depth_net = depth_net.cuda()  # move to GPU

    keypoint_net = KeypointResnet()
    keypoint_net.load_state_dict(
        torch.load(args.keypoint_model, map_location='cpu'))
    keypoint_net.cuda()

    def _set_seeds(seed=42):
        """Set Python random seeding and PyTorch seeds.
        Parameters
        ----------
        seed: int, default: 42
            Random number generator seeds for PyTorch and python
        """
        np.random.seed(seed)
        random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

    def _worker_init_fn(worker_id):
        """Worker init fn to fix the seed of the workers"""
        _set_seeds(42 + worker_id)

    if args.run_evaluation:
        all_trel = []
        all_rrel = []
    ########## STACK PREDICTED POSES INTO FULL TRAJECTORY  ##########
    printcolor('Evaluation sequences {}'.format(args.sequence))

    for seq in args.sequence:
        dataset = KITTIODOMETRYDataset(
            args.dataset_dir,
            [seq],
            back_context=0,
            forward_context=1,
            image_shape=(args.input_height, args.input_width),
        )
        trajectory_length = sum(len(imgs) for imgs in dataset.img_files)
        printcolor('Evaluating SEQ {}'.format(seq))

        sampler = None if not (hvd.size() > 1) else \
            torch.utils.data.distributed.DistributedSampler(
                dataset, num_replicas=hvd.size(), rank=hvd.rank())

        dl = DataLoader(dataset,
                        batch_size=1,
                        pin_memory=False,
                        shuffle=False,
                        num_workers=48,
                        worker_init_fn=_worker_init_fn,
                        sampler=sampler)

        all_poses = accumulate_pose_single_frame(
            dataloader=dl,
            depth_net=depth_net,
            keypoint_net=keypoint_net,
            trajectory_length=trajectory_length,
            rank=hvd.rank())

        if hvd.rank() == 0:
            np.savetxt(os.path.join(args.output_dir,
                                    str(seq) + '_ori.txt'),
                       all_poses.reshape(trajectory_length, -1)[:, :12])

        if args.align_trajectory:
            if hvd.rank() == 0:
                printcolor('Computing scale alignment.')
                # Load ground truth poses
                ground_truth_trajectory = file_interface.read_kitti_poses_file(
                    os.path.join(os.path.join(args.dataset_dir, 'poses'), seq)
                    + '.txt')
                # Convert predicted trajectory
                predicted_trajectory = PosePath3D(poses_se3=all_poses)
                # Umeyama alignment with scaling only
                predicted_trajectory_aligned = copy.deepcopy(
                    predicted_trajectory)
                predicted_trajectory_aligned.align(ground_truth_trajectory,
                                                   correct_only_scale=True)
                # Save aligned trajectory
                file_interface.write_kitti_poses_file(
                    os.path.join(args.output_dir, seq) + '.txt',
                    predicted_trajectory_aligned)

        if args.run_evaluation and hvd.rank() == 0:
            pose_test_executable = "./kp3d/externals/cpp/evaluate_odometry"
            p = subprocess.Popen(
                [pose_test_executable, args.dataset_dir, args.output_dir, seq],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
            p.wait()

            seq_output = os.path.join(args.output_dir, seq + '_stats.txt')
            with open(seq_output, 'r') as f:
                pose_result = f.readlines()[0].split(' ')
                pose_result = [float(p) for p in pose_result]

            traj_pos_error = pose_result[0]
            traj_rot_error = pose_result[1]
            all_trel.append(traj_pos_error)
            all_rrel.append(traj_rot_error)

            printcolor(
                'SEQ {} -- Mean TRAJ (pos: {:.4f}, rot: {:.4f})'.format(
                    seq, traj_pos_error, traj_rot_error), 'green')

    printcolor(all_trel)
    printcolor(all_rrel)
コード例 #4
0
ファイル: helpers.py プロジェクト: ToniRV/evo-1
def fake_path(length):
    return PosePath3D(poses_se3=random_se3_list(length))