Example #1
0
def do_vo_mapping(basepath,
                  seq,
                  ref_cond,
                  frames=None,
                  outfile=None,
                  rgb_dir='rgb'):
    ref_data = vkitti.LocalizationDataset(basepath,
                                          seq,
                                          ref_cond,
                                          frames=frames,
                                          rgb_dir=rgb_dir)

    test_im = ref_data.get_gray(0)
    camera = get_camera(seq, test_im)
    camera.maxdepth = 200.

    # Ground truth
    T_w_c_gt = [SE3.from_matrix(p, normalize=True) for p in ref_data.poses]
    T_0_w = T_w_c_gt[0].inv()

    vo = DenseRGBDPipeline(camera, first_pose=T_0_w)
    # vo.keyframe_trans_thresh = 3.  # meters
    vo.keyframe_trans_thresh = 2.  # meters
    vo.keyframe_rot_thresh = 15. * np.pi / 180.  # rad
    vo.depth_stiffness = 1. / 0.01  # 1/meters
    vo.intensity_stiffness = 1. / 0.2  # 1/ (intensity is in [0,1] )
    # vo.intensity_stiffness = 1. / 0.1
    vo.use_motion_model_guess = True
    # vo.min_grad = 0.2
    # vo.loss = HuberLoss(5.0)

    print('Mapping using {}/{}'.format(seq, ref_cond))
    vo.set_mode('map')

    start = time.perf_counter()
    for c_idx in range(len(ref_data)):
        image = ref_data.get_gray(c_idx)
        depth = ref_data.get_depth(c_idx)

        depth[depth >= camera.maxdepth] = -1.
        vo.track(image, depth)
        # vo.track(image, depth, guess=T_w_c_gt[c_idx].inv()) # debug
        end = time.perf_counter()
        print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
            c_idx, len(ref_data), 100. * c_idx / len(ref_data), end - start),
              end='\r')
        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    return tm, vo
def do_tracking(basepath,
                seq,
                track_cond,
                vo,
                frames=None,
                outfile=None,
                rgb_dir='rgb'):
    track_data = tum_rgbd.LocalizationDataset(basepath,
                                              seq,
                                              track_cond,
                                              frames=frames,
                                              rgb_dir=rgb_dir)

    # Ground truth
    T_w_c_gt = [
        SE3(SO3.from_quaternion(p[0], ordering='xyzw'), p[1])
        for p in track_data.poses
    ]
    T_0_w = T_w_c_gt[0].inv()

    print('Tracking using {}/{}'.format(seq, track_cond))
    vo.set_mode('track')

    start = time.perf_counter()
    for c_idx in range(len(track_data)):
        image = track_data.get_gray(c_idx)
        depth = track_data.get_depth(c_idx)
        try:
            vo.track(image, depth)
            # vo.track(image, depth, guess=T_w_c_gt[c_idx].inv()) # debug
            end = time.perf_counter()
            print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
                c_idx, len(track_data), 100. * c_idx / len(track_data),
                end - start),
                  end='\r')

        except Exception as e:
            print('Error on {}/{}'.format(seq, track_cond))
            print(e)
            print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
                c_idx, len(track_data), 100. * c_idx / len(track_data),
                end - start))
            break

        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    return tm, vo
Example #3
0
def process_ground_truth(trial_strs,
                         tm_path,
                         kitti_path,
                         pose_deltas,
                         eval_type='train',
                         add_reverse=False):

    poses_correction = []
    poses_gt = []
    poses_est = []
    image_quad_paths_rgb = []
    image_quad_paths_mono = []

    tm_mat_files = []
    for t_id, trial_str in enumerate(trial_strs):

        drive_folder = KITTI_SEQS_DICT[trial_str][
            'date'] + '_drive_' + KITTI_SEQS_DICT[trial_str]['drive'] + '_sync'
        data_path = os.path.join(kitti_path,
                                 KITTI_SEQS_DICT[trial_str]['date'],
                                 drive_folder)
        tm_mat_file = os.path.join(
            tm_path, KITTI_SEQS_DICT[trial_str]['date'] + '_drive_' +
            KITTI_SEQS_DICT[trial_str]['drive'] + '.mat')

        try:
            tm = TrajectoryMetrics.loadmat(tm_mat_file)
        except FileNotFoundError:
            tm_mat_file = os.path.join(tm_path, trial_str + '.mat')
            tm = TrajectoryMetrics.loadmat(tm_mat_file)

        image_paths_rgb = get_image_paths(data_path, trial_str, pose_deltas,
                                          'rgb', eval_type, add_reverse)
        image_paths_mono = get_image_paths(data_path, trial_str, pose_deltas,
                                           'mono', eval_type, add_reverse)

        (T_corr, T_gt, T_est) = compute_vo_pose_errors(tm, pose_deltas,
                                                       eval_type, add_reverse)

        if not len(image_paths_rgb) == len(T_corr):
            raise AssertionError(
                'Number of image paths and number of poses differ. Image quads: {}. Poses: {}.'
                .format(len(image_paths_rgb), len(T_corr)))

        image_quad_paths_rgb.extend(image_paths_rgb)
        image_quad_paths_mono.extend(image_paths_mono)

        poses_correction.extend(T_corr)
        poses_gt.extend(T_gt)
        poses_est.extend(T_est)

        tm_mat_files.append(tm_mat_file)

    return (image_quad_paths_rgb, image_quad_paths_mono, poses_correction,
            poses_gt, poses_est, tm_mat_files)
Example #4
0
def run_vo_kitti(basedir, date, drive, frames, outfile=None):
    # Load KITTI data
    dataset = pykitti.raw(basedir, date, drive, frames=frames, imformat='cv2')

    first_oxts = dataset.oxts[0]
    T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu)
    T_cam0_imu.normalize()
    T_0_w = T_cam0_imu.dot(SE3.from_matrix(first_oxts.T_w_imu).inv())
    T_0_w.normalize()

    # Create the camera
    test_im = np.array(next(dataset.cam0))
    fu = dataset.calib.K_cam0[0, 0]
    fv = dataset.calib.K_cam0[1, 1]
    cu = dataset.calib.K_cam0[0, 2]
    cv = dataset.calib.K_cam0[1, 2]
    b = dataset.calib.b_gray
    h, w = test_im.shape
    camera = StereoCamera(cu, cv, fu, fv, b, w, h)

    # Ground truth
    T_w_c_gt = [
        SE3.from_matrix(o.T_w_imu).dot(T_cam0_imu.inv()) for o in dataset.oxts
    ]

    # Pipeline
    vo = DenseStereoPipeline(camera, first_pose=T_0_w)
    # Skip the highest resolution level
    vo.pyrlevel_sequence.pop()
    vo.pyr_cameras.pop()

    start = time.perf_counter()
    for c_idx, impair in enumerate(dataset.gray):
        vo.track(np.array(impair[0]), np.array(impair[1]))
        # vo.track(impair[0], impair[1], guess=T_w_c_gt[c_idx].inv())
        end = time.perf_counter()
        print('Image {}/{} | {:.3f} s'.format(c_idx, len(dataset),
                                              end - start))
        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    # Clean up
    del vo

    return tm
Example #5
0
def make_plots(data_dir, file_prefix, segs, topdown_plane='xy', Twv_gt=None):
    file_rgb = file_prefix + '-rgb.mat'
    file_cat = file_prefix + '-cat.mat'

    tm_dict = {
        'Original': TrajectoryMetrics.loadmat(os.path.join(data_dir,
                                                           file_rgb)),
        'CAT': TrajectoryMetrics.loadmat(os.path.join(data_dir, file_cat))
    }

    vis = TrajectoryVisualizer(tm_dict)

    try:
        outfile = os.path.join(data_dir,
                               '{}_{}'.format(file_prefix, 'norm_err.pdf'))
        f, ax = vis.plot_norm_err(outfile=outfile,
                                  figsize=(9, 2.5),
                                  tight_layout=False,
                                  fontsize=12,
                                  err_xlabel='Frame')
        plt.close(f)
    except Exception as e:
        print(e)

    try:
        outfile = os.path.join(data_dir,
                               '{}_{}'.format(file_prefix, 'seg_err.pdf'))
        f, ax = vis.plot_segment_errors(segs,
                                        outfile=outfile,
                                        figsize=(9, 2.5),
                                        tight_layout=False,
                                        fontsize=12,
                                        err_xlabel='Frame')
        plt.close(f)
    except Exception as e:
        print(e)

    try:
        if Twv_gt is not None:
            tm_dict['Original'].Twv_gt = Twv_gt

        outfile = os.path.join(data_dir,
                               '{}_{}'.format(file_prefix, 'topdown.pdf'))
        f, ax = vis.plot_topdown(topdown_plane,
                                 outfile=outfile,
                                 figsize=(4, 3),
                                 tight_layout=False,
                                 use_endpoint_markers=True,
                                 fontsize=12)
        plt.close(f)
    except Exception as e:
        print(e)
def generate_trajectory_metrics(gt_traj, est_traj, name='',seq='', mode=''):
    gt_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in gt_traj]
    est_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in est_traj]
    tm = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention = 'Twv')
    
    est_mATE_trans, est_mATE_rot = tm.mean_err()
    est_mATE_rot = est_mATE_rot*180/np.pi
    print("{} ({}) mean trans. error: {} | mean rot. error: {}".format(name, mode, est_mATE_trans, est_mATE_rot))
    
    seg_lengths = list(range(100,801,100))
    _, est_seg_errs = tm.segment_errors(seg_lengths, rot_unit='rad')
    est_seg_err_trans = np.mean(est_seg_errs[:,1])*100
    est_seg_err_rot = 100*np.mean(est_seg_errs[:,2])*180/np.pi
    print("{} ({}) mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(name, mode, est_seg_err_trans, est_seg_err_rot) )
    return tm, (seq, name, mode, est_mATE_trans.round(3), est_mATE_rot.round(3), est_seg_err_trans.round(3), est_seg_err_rot.round(3))
Example #7
0
def compute_trajectory(pose_vec, gt_traj, method='odom'):
    est_traj = [gt_traj[0]]
    cum_dist = [0]
    for i in range(0, pose_vec.shape[0]):
        #classically estimated traj
        dT = SE3.exp(pose_vec[i])
        new_est = SE3.as_matrix(
            (dT.dot(SE3.from_matrix(est_traj[i], normalize=True).inv())).inv())
        est_traj.append(new_est)
        cum_dist.append(cum_dist[i] + np.linalg.norm(dT.trans))

    gt_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in gt_traj]
    est_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in est_traj]

    tm_est = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention='Twv')
    est_mean_trans, est_mean_rot = tm_est.mean_err()
    est_mean_rot = (est_mean_rot * 180 / np.pi).round(3)
    est_mean_trans = est_mean_trans.round(3)

    seg_lengths = list(range(100, 801, 100))
    _, seg_errs_est = tm_est.segment_errors(seg_lengths, rot_unit='rad')

    rot_seg_err = (100 * np.mean(seg_errs_est[:, 2]) * 180 / np.pi).round(3)
    trans_seg_err = (np.mean(seg_errs_est[:, 1]) * 100).round(3)

    if np.isnan(trans_seg_err):
        max_dist = cum_dist[-1] - cum_dist[-1] % 100 + 1 - 100
        seg_lengths = list(range(100, int(max_dist), 100))
        _, seg_errs_est = tm_est.segment_errors(seg_lengths, rot_unit='rad')

        rot_seg_err = (100 * np.mean(seg_errs_est[:, 2]) * 180 /
                       np.pi).round(3)
        trans_seg_err = (np.mean(seg_errs_est[:, 1]) * 100).round(3)

    print("{} mean trans. error: {} | mean rot. error: {}".format(
        method, est_mean_trans, est_mean_rot))
    print("{} mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(
        method, trans_seg_err, rot_seg_err))

    errors = (est_mean_trans, est_mean_rot, trans_seg_err, rot_seg_err)

    return np.array(est_traj), np.array(gt_traj), errors, np.array(cum_dist)
Example #8
0
def process_ground_truth(trial_strs,
                         tm_path,
                         pose_deltas,
                         eval_type='train',
                         add_reverse=False,
                         min_turning_angle=0.):

    T_21_gt_all = []
    T_21_est_all = []
    pose_ids = []
    sequences = []
    tm_mat_files = []

    for t_id, trial_str in enumerate(trial_strs):

        tm_mat_file = os.path.join(
            tm_path, KITTI_SEQS_DICT[trial_str]['date'] + '_drive_' +
            KITTI_SEQS_DICT[trial_str]['drive'] + '.mat')

        try:
            tm = TrajectoryMetrics.loadmat(tm_mat_file)
        except FileNotFoundError:
            tm_mat_file = os.path.join(tm_path, trial_str + '.mat')
            tm = TrajectoryMetrics.loadmat(tm_mat_file)

        (T_21_gt, T_21_est, pair_pose_ids,
         seqs) = compute_vo_pose_errors(tm, pose_deltas, trial_str, eval_type,
                                        add_reverse, min_turning_angle)

        T_21_gt_all.extend(T_21_gt)
        T_21_est_all.extend(T_21_est)
        pose_ids.extend(pair_pose_ids)
        sequences.extend(seqs)
        tm_mat_files.extend(tm_mat_file)

    return (pose_ids, sequences, T_21_gt_all, T_21_est_all, tm_mat_files)
Example #9
0
def entry_from_file(data_dir, file):
    tm = TrajectoryMetrics.loadmat(os.path.join(data_dir, file))

    tokens = re.split('; |\-|\.', file)
    trans_err, rot_err = tm.mean_err(rot_unit='deg')
    length = tm.cum_dists[-1]

    ref_seq = tokens[0]
    track_seq = ref_seq if 'vo' in tokens else tokens[1]

    if 'cat' in tokens:
        ref_seq += ' (CAT)'
        track_seq += ' (CAT)'

    line = '{},{},{},{},{},{}'.format(ref_seq, track_seq, tm.num_poses, length,
                                      trans_err, rot_err)
    return line
            ## Compute Trajectories
            gt_traj = test_dset_loaders.dataset.raw_gt_trials[0]
            est, gt, errors, cum_dist = tt(unscaled_pose_vec,
                                           gt_traj,
                                           method='unscaled')
            scaled_est, gt, errors, cum_dist = tt(scaled_pose_vec,
                                                  gt_traj,
                                                  method='scaled')

            gt_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in gt_traj]
            est_se3 = [SE3.from_matrix(T, normalize=True) for T in est]
            scaled_se3 = [
                SE3.from_matrix(T, normalize=True) for T in scaled_est
            ]

            est_tm = TrajectoryMetrics(gt_traj_se3, est_se3, convention='Twv')
            scaled_tm = TrajectoryMetrics(gt_traj_se3,
                                          scaled_se3,
                                          convention='Twv')

            tm_dict[method] = {
                'unscaled': est_tm,
                'scaled': scaled_tm,
            }

        plotting_dict = {
            'Ours': tm_dict['scaled']['unscaled'],
            'DNet': tm_dict['unscaled']['scaled']
        }

        est_vis = visualizers.TrajectoryVisualizer(plotting_dict)
Example #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset',
                        type=str,
                        help='which dataset?',
                        choices=['ethl_dataset', 'virtual-kitti'])
    parser.add_argument('--no_vo', action='store_true', help='skip VO?')
    parser.add_argument('--no_reloc',
                        action='store_true',
                        help='skip relocalization?')
    args = parser.parse_args()

    data_dir = '/media/raid5-array/experiments/cat-net/{}/pyslam'.format(
        args.dataset)

    if args.dataset == 'ethl_dataset':
        topdown_plane = 'xy'
        segs = [0.25, 0.5, 1., 2., 3.]  # meters
    elif args.dataset == 'virtual-kitti':
        topdown_plane = 'xz'
        segs = [25., 50., 100., 200., 300.]  # meters
    elif args.dataset == 'affine-kitti':
        topdown_plane = 'xy'
        segs = [100., 200., 400., 600., 800.]  # meters

    file_tags = ['rgb', 'cat']
    csv_header = [
        'Reference', 'Tracking', 'Poses', 'Length', 'Avg Trans Error (m)',
        'Avg Rot Error (deg)'
    ]

    seq_dirs = [os.path.join(data_dir, d) for d in os.listdir(data_dir)]

    for seq_dir in seq_dirs:
        rgb_files = [
            f for f in os.listdir(seq_dir)
            if f.split('.')[-1] == 'mat' and f[0] != '.' and 'rgb' in f
        ]
        file_prefixes = ['-'.join(f.split('-')[:-1]) for f in rgb_files]
        vo_file_prefixes = sorted([f for f in file_prefixes if '-vo' in f])
        reloc_file_prefixes = sorted(
            [f for f in file_prefixes if '-vo' not in f])

        if not args.no_vo:
            outfile = os.path.join(seq_dir, 'vo.csv')
            print(outfile)
            with open(outfile, 'w') as f:
                f.write(','.join(csv_header) + '\n')
                for vo_file_prefix in vo_file_prefixes:
                    for tag in file_tags:
                        vo_file = '{}-{}.mat'.format(vo_file_prefix, tag)
                        line = entry_from_file(seq_dir, vo_file)
                        f.write(line + '\n')

                    make_plots(seq_dir, vo_file_prefix, segs, topdown_plane)

        if not args.no_reloc:
            outfile = os.path.join(seq_dir, 'reloc.csv')
            print(outfile)
            with open(outfile, 'w') as f:
                f.write(','.join(csv_header) + '\n')
                for reloc_file_prefix in reloc_file_prefixes:
                    for tag in file_tags:
                        reloc_file = '{}-{}.mat'.format(reloc_file_prefix, tag)
                        line = entry_from_file(seq_dir, reloc_file)
                        f.write(line + '\n')

                    gt_seq_file = '{}-vo-rgb.mat'.format(
                        reloc_file_prefix.split('-')[0])
                    tm_gt = TrajectoryMetrics.loadmat(
                        os.path.join(seq_dir, gt_seq_file))
                    make_plots(seq_dir, reloc_file_prefix, segs, topdown_plane,
                               tm_gt.Twv_gt)
Example #12
0
def test_trajectory(device, pose_model, spatial_trans, dset, epoch):
    pose_model.train(False)  # Set model to evaluate mode
    pose_model.eval()        #used for batch normalization  # Set model to training mode
    spatial_trans.train(False)  
    spatial_trans.eval()     
    
    #initialize the relevant outputs
    full_corr_lie_alg_stacked, rot_corr_lie_alg_stacked, gt_lie_alg_stacked, vo_lie_alg_stacked, corrections_stacked, gt_corrections_stacked= \
            np.empty((0,6)), np.empty((0,6)), np.empty((0,6)), np.empty((0,6)), np.empty((0,6)), np.empty((0,6))

    for data in dset:
        imgs, gt_lie_alg, intrinsics, vo_lie_alg, gt_correction = data
        gt_lie_alg = gt_lie_alg.type(torch.FloatTensor).to(device)   
        vo_lie_alg = vo_lie_alg.type(torch.FloatTensor).to(device)
        img_list = []
        for im in imgs:              
            img_list.append(im.to(device))

        corr, exp_mask, disp = pose_model(img_list[0:3], vo_lie_alg)
        exp_mask, disp = exp_mask[0], disp[0][:,0]
        corr_rot = torch.clone(corr)
        corr_rot[:,0:3]=0

        corrected_pose = se3_log_exp(corr, vo_lie_alg)
        corrected_pose_rot_only = se3_log_exp(corr_rot, vo_lie_alg)
        
        
        corrections_stacked = np.vstack((corrections_stacked, corr.cpu().detach().numpy()))
        gt_corrections_stacked = np.vstack((gt_corrections_stacked, gt_correction.cpu().detach().numpy()))
        full_corr_lie_alg_stacked = np.vstack((full_corr_lie_alg_stacked, corrected_pose.cpu().detach().numpy()))
        rot_corr_lie_alg_stacked = np.vstack((rot_corr_lie_alg_stacked, corrected_pose_rot_only.cpu().detach().numpy()))
        gt_lie_alg_stacked = np.vstack((gt_lie_alg_stacked, gt_lie_alg.cpu().detach().numpy()))
        vo_lie_alg_stacked = np.vstack((vo_lie_alg_stacked, vo_lie_alg.cpu().detach().numpy()))

    est_traj, corr_traj, corr_traj_rot, gt_traj = [],[],[],[]
    gt_traj = dset.dataset.raw_gt_trials[0]
    est_traj.append(gt_traj[0])
    corr_traj.append(gt_traj[0])
    corr_traj_rot.append(gt_traj[0])

    cum_dist = [0]
    for i in range(0,full_corr_lie_alg_stacked.shape[0]):
        #classically estimated traj
        dT = SE3.exp(vo_lie_alg_stacked[i])
        new_est = SE3.as_matrix((dT.dot(SE3.from_matrix(est_traj[i],normalize=True).inv())).inv())
        est_traj.append(new_est)
        cum_dist.append(cum_dist[i]+np.linalg.norm(dT.trans))

        #corrected traj (rotation only)
        dT = SE3.exp(rot_corr_lie_alg_stacked[i])
        new_est = SE3.as_matrix((dT.dot(SE3.from_matrix(corr_traj_rot[i],normalize=True).inv())).inv())
        corr_traj_rot.append(new_est)
#        
#        
#        #corrected traj (full pose)
        dT = SE3.exp(full_corr_lie_alg_stacked[i])
        new_est = SE3.as_matrix((dT.dot(SE3.from_matrix(corr_traj[i],normalize=True).inv())).inv())
        corr_traj.append(new_est)

    gt_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in gt_traj]
    est_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in est_traj]
    corr_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in corr_traj]
    corr_traj_rot_se3 = [SE3.from_matrix(T,normalize=True) for T in corr_traj_rot]
    
    tm_est = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention = 'Twv')
    tm_corr = TrajectoryMetrics(gt_traj_se3, corr_traj_se3, convention = 'Twv')
    tm_corr_rot = TrajectoryMetrics(gt_traj_se3, corr_traj_rot_se3, convention = 'Twv')
    
    if epoch >= 0:
        est_mean_trans, est_mean_rot = tm_est.mean_err()
        corr_mean_trans, corr_mean_rot = tm_corr.mean_err()
        corr_rot_mean_trans, corr_rot_mean_rot = tm_corr_rot.mean_err()
        print("Odom. mean trans. error: {} | mean rot. error: {}".format(est_mean_trans, est_mean_rot*180/np.pi))
        print("Corr. mean trans. error: {} | mean rot. error: {}".format(corr_mean_trans, corr_mean_rot*180/np.pi))
        print("Corr. (rot. only) mean trans. error: {} | mean rot. error: {}".format(corr_rot_mean_trans, corr_rot_mean_rot*180/np.pi))
        
        seg_lengths = list(range(100,801,100))
        _, seg_errs_est = tm_est.segment_errors(seg_lengths, rot_unit='rad')
        _, seg_errs_corr = tm_corr.segment_errors(seg_lengths, rot_unit='rad')
        _, seg_errs_corr_rot = tm_corr_rot.segment_errors(seg_lengths, rot_unit='rad')
        print("Odom. mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(np.mean(seg_errs_est[:,1])*100, 100*np.mean(seg_errs_est[:,2])*180/np.pi))
        print("Corr. mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(np.mean(seg_errs_corr[:,1])*100, 100*np.mean(seg_errs_corr[:,2])*180/np.pi))
        print("Corr. (rot. only) mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(np.mean(seg_errs_corr_rot[:,1])*100, 100*np.mean(seg_errs_corr_rot[:,2])*180/np.pi)) 
        
    rot_seg_err = 100*np.mean(seg_errs_corr_rot[:,2])*180/np.pi

    return corrections_stacked, gt_corrections_stacked, full_corr_lie_alg_stacked, vo_lie_alg_stacked, gt_lie_alg_stacked, \
        np.array(corr_traj), np.array(corr_traj_rot), np.array(est_traj), np.array(gt_traj), rot_seg_err, corr_rot_mean_trans, np.array(cum_dist)
        
Example #13
0
eval_dsets = {'test': test_dset_loaders}
Reconstructor = stn.Reconstructor().to(device)
model = mono_model_joint.joint_model(
    num_img_channels=(6 + 2 * config['use_flow']),
    output_exp=args.exploss,
    dropout_prob=config['dropout_prob']).to(device)
model.load_state_dict(torch.load(pretrained_path))

_, _, _, _, _, corr_traj, corr_traj_rot, est_traj, gt_traj, _, _, _ = test_trajectory(
    device, model, Reconstructor, test_dset_loaders, 0)

est_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in est_traj]
corr_traj_rot_se3 = [SE3.from_matrix(T, normalize=True) for T in corr_traj_rot]
gt_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in gt_traj]

dense_tm = TrajectoryMetrics(gt_traj_se3, gt_traj_se3, convention='Twv')
est_tm = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention='Twv')
corr_tm = TrajectoryMetrics(gt_traj_se3, corr_traj_rot_se3, convention='Twv')

tm_dict = {
    'Dense': dense_tm,
    'libviso2-s': est_tm,
    'Ours (Gen.)': corr_tm,
}
est_vis = visualizers.TrajectoryVisualizer(tm_dict)
fig, ax = est_vis.plot_topdown(
    which_plane='xy',
    plot_gt=False,
    outfile='paper_plots_and_data/figs/{}.pdf'.format(seq[0]),
    title=r'{}'.format(seq[0]))