def compute_target(self, idx, target_idx, source_idx):
        ## Compute ts
        # dt = np.abs(self.ts_samples[idx,target_idx] - self.ts_samples[idx, source_idx])
        dt = self.ts_samples[idx, target_idx] - self.ts_samples[idx,
                                                                source_idx]

        #compute Tau_gt
        T2 = SE3.from_matrix(self.gt_samples[idx, target_idx, :, :],
                             normalize=True).inv()
        T1 = SE3.from_matrix(self.gt_samples[idx, source_idx, :, :],
                             normalize=True)
        dT_gt = T2.dot(
            T1
        )  #pose change from source to a target (for reconstructing source from target)
        gt_lie_alg = dT_gt.log()

        #compute Tau_vo
        T2 = SE3.from_matrix(self.vo_samples[idx, target_idx, :, :],
                             normalize=True).inv()
        T1 = SE3.from_matrix(self.vo_samples[idx, source_idx, :, :],
                             normalize=True)
        dT_vo = T2.dot(T1)
        vo_lie_alg = dT_vo.log()

        if self.config['estimator_type'] == 'mono':
            if np.linalg.norm(vo_lie_alg[0:3]) >= 1e-8:
                scale = np.linalg.norm(gt_lie_alg[0:3]) / np.linalg.norm(
                    vo_lie_alg[0:3])
                scale = 1
                vo_lie_alg[0:3] = scale * vo_lie_alg[0:3]
                dT_vo = SE3.exp(vo_lie_alg)

        gt_correction = (dT_gt.dot(dT_vo.inv())).log()
        return gt_lie_alg, vo_lie_alg, gt_correction, dt
Exemplo n.º 2
0
def load_brookshire_data(data_filename):
    data_dict = sio.loadmat(data_filename)
    pose_array = data_dict['T_v_rel_list']
    T_v_rel = [SE3.from_matrix(pose_array[:, :, i]) for i in range(pose_array.shape[2])]
    pose_array = data_dict['T_c_rel_list']
    T_c_rel = [SE3.from_matrix(pose_array[:, :, i]) for i in range(pose_array.shape[2])]
    extcal = data_dict['extcal']
    return T_v_rel, T_c_rel, extcal
Exemplo n.º 3
0
def load_data_1(data_filename):
    #Data loader for Emmett's Synethetic datasets
    data_dict = sio.loadmat(data_filename)
    pose_array = data_dict['T_vki_list']
    T_vki = [SE3.from_matrix(pose_array[i, :, :]) for i in range(pose_array.shape[0])]
    pose_array = data_dict['T_ci_list']
    T_ci = [SE3.from_matrix(pose_array[i, :, :]) for i in range(pose_array.shape[0])]
    extcal = data_dict['extcal']
    return T_vki, T_ci, extcal
Exemplo n.º 4
0
def run_vo_kitti(basedir, date, drive, frames, outfile=None):
    # Load KITTI data
    dataset = pykitti.raw(basedir, date, drive, frames=frames, imformat='cv2')

    first_oxts = dataset.oxts[0]
    T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu)
    T_cam0_imu.normalize()
    T_0_w = T_cam0_imu.dot(SE3.from_matrix(first_oxts.T_w_imu).inv())
    T_0_w.normalize()

    # Create the camera
    test_im = np.array(next(dataset.cam0))
    fu = dataset.calib.K_cam0[0, 0]
    fv = dataset.calib.K_cam0[1, 1]
    cu = dataset.calib.K_cam0[0, 2]
    cv = dataset.calib.K_cam0[1, 2]
    b = dataset.calib.b_gray
    h, w = test_im.shape
    camera = StereoCamera(cu, cv, fu, fv, b, w, h)

    # Ground truth
    T_w_c_gt = [
        SE3.from_matrix(o.T_w_imu).dot(T_cam0_imu.inv()) for o in dataset.oxts
    ]

    # Pipeline
    vo = DenseStereoPipeline(camera, first_pose=T_0_w)
    # Skip the highest resolution level
    vo.pyrlevel_sequence.pop()
    vo.pyr_cameras.pop()

    start = time.perf_counter()
    for c_idx, impair in enumerate(dataset.gray):
        vo.track(np.array(impair[0]), np.array(impair[1]))
        # vo.track(impair[0], impair[1], guess=T_w_c_gt[c_idx].inv())
        end = time.perf_counter()
        print('Image {}/{} | {:.3f} s'.format(c_idx, len(dataset),
                                              end - start))
        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    # Clean up
    del vo

    return tm
Exemplo n.º 5
0
    def compute_target(self, poses, target_idx, source_idx):
        T_c2_w = SE3.from_matrix(poses[target_idx], normalize=True)
        T_c1_w = SE3.from_matrix(poses[source_idx], normalize=True)
        

        T_c2_c1 = T_c2_w.dot(T_c1_w.inv()) 
        gt_lie_alg = T_c2_c1.log()
        vo_lie_alg = np.copy(gt_lie_alg)
        gt_correction = np.zeros(gt_lie_alg.shape)
        dt = 0

        return gt_lie_alg, vo_lie_alg, gt_correction, dt
def generate_trajectory_metrics(gt_traj, est_traj, name='',seq='', mode=''):
    gt_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in gt_traj]
    est_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in est_traj]
    tm = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention = 'Twv')
    
    est_mATE_trans, est_mATE_rot = tm.mean_err()
    est_mATE_rot = est_mATE_rot*180/np.pi
    print("{} ({}) mean trans. error: {} | mean rot. error: {}".format(name, mode, est_mATE_trans, est_mATE_rot))
    
    seg_lengths = list(range(100,801,100))
    _, est_seg_errs = tm.segment_errors(seg_lengths, rot_unit='rad')
    est_seg_err_trans = np.mean(est_seg_errs[:,1])*100
    est_seg_err_rot = 100*np.mean(est_seg_errs[:,2])*180/np.pi
    print("{} ({}) mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(name, mode, est_seg_err_trans, est_seg_err_rot) )
    return tm, (seq, name, mode, est_mATE_trans.round(3), est_mATE_rot.round(3), est_seg_err_trans.round(3), est_seg_err_rot.round(3))
def find_loop_closures(traj, cum_dist):
    num_loop_closures = 0
    filtered_loop_closures = 0
    idx_list = []
    for i in range(
            0, traj.shape[0], 8
    ):  #check for loop closure points (compare current frame with all future points, and do this for every 20th frame)
        current_pose = traj[i]
        current_trans = current_pose[0:3, 3]
        current_rot = SO3.from_matrix(current_pose[0:3, 0:3],
                                      normalize=True).to_rpy()
        current_yaw = current_rot[2]

        current_cum_dist = cum_dist[i]
        loop_closure_idx = np.linalg.norm(
            np.abs(current_trans[0:3] - traj[i + 1:, 0:3, 3]), axis=1) <= 7
        dist_idx = (cum_dist[i + 1:] - current_cum_dist) >= 10
        loop_closure_idx = loop_closure_idx & dist_idx

        idx = np.where(loop_closure_idx == 1)

        if idx != np.array([]):
            for pose_idx in idx[0]:
                T = traj[i + 1:][pose_idx]
                yaw = SE3.from_matrix(T, normalize=True).rot.to_rpy()[2]
                yaw_diff = np.abs(np.abs(current_yaw) - np.abs(yaw))
                in_range = ((yaw_diff <= 0.15)) or (np.abs(
                    (np.pi - yaw_diff) <= 0.15))
                filtered_loop_closures += in_range
                if in_range:
                    idx_list.append(pose_idx + i)

        num_loop_closures += np.sum(loop_closure_idx)

    return num_loop_closures, filtered_loop_closures, idx_list
Exemplo n.º 8
0
def compute_trajectory(pose_vec, gt_traj, method='odom'):
    est_traj = [gt_traj[0]]
    cum_dist = [0]
    for i in range(0, pose_vec.shape[0]):
        #classically estimated traj
        dT = SE3.exp(pose_vec[i])
        new_est = SE3.as_matrix(
            (dT.dot(SE3.from_matrix(est_traj[i], normalize=True).inv())).inv())
        est_traj.append(new_est)
        cum_dist.append(cum_dist[i] + np.linalg.norm(dT.trans))

    gt_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in gt_traj]
    est_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in est_traj]

    tm_est = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention='Twv')
    est_mean_trans, est_mean_rot = tm_est.mean_err()
    est_mean_rot = (est_mean_rot * 180 / np.pi).round(3)
    est_mean_trans = est_mean_trans.round(3)

    seg_lengths = list(range(100, 801, 100))
    _, seg_errs_est = tm_est.segment_errors(seg_lengths, rot_unit='rad')

    print('trans. rel. err: {}, rot. rel. err: {}'.format(
        np.mean(tm_est.rel_errors()[0]), np.mean(tm_est.rel_errors()[1])))

    rot_seg_err = (100 * np.mean(seg_errs_est[:, 2]) * 180 / np.pi).round(3)
    trans_seg_err = (np.mean(seg_errs_est[:, 1]) * 100).round(3)

    if np.isnan(trans_seg_err):
        max_dist = cum_dist[-1] - cum_dist[-1] % 100 + 1 - 100
        print('max dist', max_dist)
        seg_lengths = list(range(100, int(max_dist), 100))
        _, seg_errs_est = tm_est.segment_errors(seg_lengths, rot_unit='rad')

        rot_seg_err = (100 * np.mean(seg_errs_est[:, 2]) * 180 /
                       np.pi).round(3)
        trans_seg_err = (np.mean(seg_errs_est[:, 1]) * 100).round(3)

    print("{} mean trans. error: {} | mean rot. error: {}".format(
        method, est_mean_trans, est_mean_rot))
    print("{} mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(
        method, trans_seg_err, rot_seg_err))

    errors = (est_mean_trans, est_mean_rot, trans_seg_err, rot_seg_err)

    return np.array(est_traj), np.array(gt_traj), errors, np.array(cum_dist)
Exemplo n.º 9
0
def do_vo_mapping(basepath,
                  seq,
                  ref_cond,
                  frames=None,
                  outfile=None,
                  rgb_dir='rgb'):
    ref_data = vkitti.LocalizationDataset(basepath,
                                          seq,
                                          ref_cond,
                                          frames=frames,
                                          rgb_dir=rgb_dir)

    test_im = ref_data.get_gray(0)
    camera = get_camera(seq, test_im)
    camera.maxdepth = 200.

    # Ground truth
    T_w_c_gt = [SE3.from_matrix(p, normalize=True) for p in ref_data.poses]
    T_0_w = T_w_c_gt[0].inv()

    vo = DenseRGBDPipeline(camera, first_pose=T_0_w)
    # vo.keyframe_trans_thresh = 3.  # meters
    vo.keyframe_trans_thresh = 2.  # meters
    vo.keyframe_rot_thresh = 15. * np.pi / 180.  # rad
    vo.depth_stiffness = 1. / 0.01  # 1/meters
    vo.intensity_stiffness = 1. / 0.2  # 1/ (intensity is in [0,1] )
    # vo.intensity_stiffness = 1. / 0.1
    vo.use_motion_model_guess = True
    # vo.min_grad = 0.2
    # vo.loss = HuberLoss(5.0)

    print('Mapping using {}/{}'.format(seq, ref_cond))
    vo.set_mode('map')

    start = time.perf_counter()
    for c_idx in range(len(ref_data)):
        image = ref_data.get_gray(c_idx)
        depth = ref_data.get_depth(c_idx)

        depth[depth >= camera.maxdepth] = -1.
        vo.track(image, depth)
        # vo.track(image, depth, guess=T_w_c_gt[c_idx].inv()) # debug
        end = time.perf_counter()
        print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
            c_idx, len(ref_data), 100. * c_idx / len(ref_data), end - start),
              end='\r')
        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    return tm, vo
Exemplo n.º 10
0
def do_tracking(basepath,
                seq,
                track_cond,
                vo,
                frames=None,
                outfile=None,
                rgb_dir='rgb'):
    track_data = vkitti.LocalizationDataset(basepath,
                                            seq,
                                            track_cond,
                                            frames=frames,
                                            rgb_dir=rgb_dir)

    # Ground truth
    T_w_c_gt = [SE3.from_matrix(p, normalize=True) for p in track_data.poses]
    T_0_w = T_w_c_gt[0].inv()

    print('Tracking using {}/{}'.format(seq, track_cond))
    vo.set_mode('track')

    start = time.perf_counter()
    for c_idx in range(len(track_data)):
        image = track_data.get_gray(c_idx)
        depth = track_data.get_depth(c_idx)
        try:
            depth[depth >= vo.camera.maxdepth] = -1.
            vo.track(image, depth)
            # vo.track(image, depth, guess=T_w_c_gt[c_idx].inv()) # debug
            end = time.perf_counter()
            print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
                c_idx, len(track_data), 100. * c_idx / len(track_data),
                end - start),
                  end='\r')

        except Exception as e:
            print('Error on {}/{}'.format(seq, track_cond))
            print(e)
            print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
                c_idx, len(track_data), 100. * c_idx / len(track_data),
                end - start))
            break

        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    return tm, vo
Exemplo n.º 11
0
    def perform_ransac(self):
        """Main RANSAC Routine"""
        max_inliers = 0

        # Select random ids for minimal sets
        rand_idx = np.random.randint(self.num_pts,
                                     size=(self.ransac_iters,
                                           self.num_min_set_pts))
        pts_1_sample_stacked = self.pts_1[rand_idx]
        pts_2_sample_stacked = self.pts_2[rand_idx]

        # Parallel transform computation
        # Compute transforms in parallel
        # start = time.perf_counter()
        T_21_stacked = compute_transform_fast(pts_1_sample_stacked,
                                              pts_2_sample_stacked, SE3_SHAPE)
        # end = time.perf_counter()
        # print('comp, transform | {}'.format(end - start))

        # Parallel cost computation
        # start = time.perf_counter()
        # inlier_masks_stacked = compute_ransac_cost_fast(
        #     T_21_stacked, self.pts_1, self.obs_2, cam_params, inlier_thresh)
        inlier_masks_stacked = self.compute_ransac_cost(
            T_21_stacked, self.pts_1, self.obs_2, self.camera,
            self.ransac_thresh)

        # end = time.perf_counter()
        # print('comp, masks | {}'.format(end - start))

        # start = time.perf_counter()
        inlier_nums = np.sum(inlier_masks_stacked, axis=1)
        most_inliers_idx = np.argmax(inlier_nums)
        T_21_best = SE3.from_matrix(T_21_stacked[most_inliers_idx, :, :])
        max_inliers = inlier_nums[most_inliers_idx]
        inlier_indices_best = np.where(
            inlier_masks_stacked[most_inliers_idx, :])[0]
        # end = time.perf_counter()
        # print('comp, rest | {}'.format(end - start))

        if max_inliers < 5:
            raise ValueError(
                " RANSAC failed to find more than 5 inliers. Try adjusting the thresholds."
            )

        # print('After {} RANSAC iters, found best transform with {} / {} inliers.'.format(
        #     self.ransac_iters, max_inliers, self.num_pts))

        obs_1_inliers = self.obs_1[inlier_indices_best]
        obs_2_inliers = self.obs_2[inlier_indices_best]

        return (T_21_best, obs_1_inliers, obs_2_inliers, inlier_indices_best)
Exemplo n.º 12
0
def matlab_comparison():
    data = sio.loadmat('simple_case.mat')
    T1 = data['T1']
    T2 = data['T2']
    T_v_rel = [SE3.from_matrix(T1[:, :, 0]), SE3.from_matrix(T1[:, :, 1])]
    T_c_rel = [SE3.from_matrix(T2[:, :, 0]), SE3.from_matrix(T2[:, :, 1])]

    my_solver = solver()
    my_solver.set_T_v_rel(T_v_rel)
    my_solver.set_T_c_rel(T_c_rel)

    dual_time, dual_gt, dual_primal, dual_gap, dual_opt, dual_solution, dual_flag = my_solver.dual_solve(
        'R', verbose=True)
    rel_time, rel_gt, rel_primal, rel_gap, relax_opt, relax_solution, rel_flag = my_solver.relax_solve(
        'R')

    print(1 / dual_opt[3])
    print(1 / relax_opt[3])

    print(dual_solution)
    print(relax_solution)
    return
Exemplo n.º 13
0
    def compute_target(self, idx):
        #compute Tau_gt
        T2 = SE3.from_matrix(self.gt_samples[idx, 1, :, :],
                             normalize=True).inv()
        T1 = SE3.from_matrix(self.gt_samples[idx, 0, :, :], normalize=True)
        dT_gt = T2.dot(T1)
        gt_lie_alg = dT_gt.log()

        #compute Tau_vo
        T2 = SE3.from_matrix(self.vo_samples[idx, 1, :, :],
                             normalize=True).inv()
        T1 = SE3.from_matrix(self.vo_samples[idx, 0, :, :], normalize=True)
        dT_vo = T2.dot(T1)
        vo_lie_alg = dT_vo.log()

        if self.config['estimator_type'] == 'mono':
            if np.linalg.norm(vo_lie_alg[0:3]) >= 1e-8:
                scale = np.linalg.norm(gt_lie_alg[0:3]) / np.linalg.norm(
                    vo_lie_alg[0:3])
                vo_lie_alg[0:3] = scale * vo_lie_alg[0:3]

        gt_correction = (dT_gt.dot(dT_vo.inv())).log()
        return gt_lie_alg, vo_lie_alg, gt_correction
Exemplo n.º 14
0
from pyslam.losses import HuberLoss
import time

# Load KITTI data
basedir = '/Users/leeclement/Desktop/KITTI/raw/'
date = '2011_09_30'
drive = '0018'
frame_range = [0, 1]

dataset = pykitti.raw(basedir, date, drive, frame_range)
dataset.load_calib()
dataset.load_gray(format='cv2')
dataset.load_oxts()

# Parameters to estimate
T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu)
T_cam0_imu.normalize()
T_0_w = T_cam0_imu * \
    SE3.from_matrix(dataset.oxts[0].T_w_imu).inv()
T_0_w.normalize()
T_1_w = T_cam0_imu * \
    SE3.from_matrix(dataset.oxts[1].T_w_imu).inv()
T_1_w.normalize()
T_1_0_true = T_1_w * T_0_w.inv()

# params_init = {'T_1_0': T_1_0_true}
params_init = {'T_1_0': SE3.identity()}

# Scaling parameters
pyrlevels = [3, 2, 1]
Exemplo n.º 15
0
    )  #original odometry 6x1 vecs (seq_size-1 x 6)
    corr_pose_vec = matfile['corr_pose_vecs'].item(
    )  # corrected 6x1 vecs (seq_size-1 x 6)
    if estimator == 'stereo':  #use rotation corrections only.
        corr_pose_vec[:, :, 0:3] = odom_pose_vec[:, 0:3]
    best_loop_closure_pose_vec = corr_pose_vec[best_loop_closure_epoch]

    est_traj, avg_corr_traj, dense_traj, dense_gt = [], [], [], []
    est_traj.append(gt_traj[0])
    avg_corr_traj.append(gt_traj[0])

    for i in range(0, odom_pose_vec.shape[0]):
        #classically estimated traj
        dT = SE3.exp(odom_pose_vec[i])
        new_est = SE3.as_matrix(
            (dT.dot(SE3.from_matrix(est_traj[i], normalize=True).inv())).inv())
        est_traj.append(new_est)

        dT_corr = SE3.exp(best_loop_closure_pose_vec[i])
        new_est = SE3.as_matrix((dT_corr.dot(
            SE3.from_matrix(avg_corr_traj[i], normalize=True).inv())).inv())
        avg_corr_traj.append(new_est)

    est_tm, est_metrics = generate_trajectory_metrics(gt_traj,
                                                      est_traj,
                                                      name='libviso2',
                                                      seq=seq)
    corr_tm, avg_corr_metrics = generate_trajectory_metrics(gt_traj,
                                                            avg_corr_traj,
                                                            name='ss-dpcnet',
                                                            seq='')
Exemplo n.º 16
0
        corr_pose_vec[:, :, 0:3] = odom_pose_vec[:, 0:3]

    ###Use the epoch with the lowest loss, or most loop closures:
    best_loss_pose_vec = corr_pose_vec[best_loss_epoch]
    best_loop_closure_pose_vec = corr_pose_vec[best_loop_closure_epoch]

    est_traj, best_loss_traj, best_loop_closure_traj = [], [], []
    est_traj.append(gt_traj[0])
    best_loss_traj.append(gt_traj[0])
    best_loop_closure_traj.append(gt_traj[0])

    for i in range(0, odom_pose_vec.shape[0]):
        #classically estimated traj
        dT = SE3.exp(odom_pose_vec[i])
        new_est = SE3.as_matrix(
            (dT.dot(SE3.from_matrix(est_traj[i], normalize=True).inv())).inv())
        est_traj.append(new_est)

        #best validation loss traj
        dT_corr = SE3.exp(best_loss_pose_vec[i])
        new_est = SE3.as_matrix((dT_corr.dot(
            SE3.from_matrix(best_loss_traj[i], normalize=True).inv())).inv())
        best_loss_traj.append(new_est)

        #best loop closure traj
        dT_corr = SE3.exp(best_loop_closure_pose_vec[i])
        new_est = SE3.as_matrix((dT_corr.dot(
            SE3.from_matrix(best_loop_closure_traj[i],
                            normalize=True).inv())).inv())
        best_loop_closure_traj.append(new_est)
def eval_with_noise_andreff(my_solver,
                            T_v_rel,
                            T_c_rel,
                            scale,
                            num_tests,
                            trans_noise_per,
                            rot_noise_per,
                            cons='RCH'):
    #Initialize Storage variables
    #Extrinsic calibration Errors
    dual_trans_error = np.empty((num_tests))
    rel_trans_error = np.empty((num_tests))
    andreff_trans_error = np.empty((num_tests))
    dual_rot_error = np.empty((num_tests))
    rel_rot_error = np.empty((num_tests))
    andreff_rot_error = np.empty((num_tests))

    #Scale errors
    dual_alpha_error = np.empty((num_tests))
    rel_alpha_error = np.empty((num_tests))
    andreff_alpha_error = np.empty((num_tests))
    #Optimization Results
    dual_primal = np.empty((num_tests))
    rel_primal = np.empty((num_tests))
    dual_gap = np.empty((num_tests))
    rel_gap = np.empty((num_tests))

    #Algorithm Evaluation
    dual_time = np.empty((num_tests))
    rel_time = np.empty((num_tests))
    andreff_time = np.empty((num_tests))

    #Sanity Check
    dual_gt_value = np.empty((num_tests))
    rel_gt_value = np.empty((num_tests))

    #Extrinsic calibration inverse for evaluating error
    ext_cal_trans = SE3.from_matrix(my_solver.extcal).trans
    ext_cal_rot_inv = SE3.from_matrix(my_solver.extcal).rot.inv()

    #Choose multiple subsets of the data
    for i in range(num_tests):
        print("Solving set: {}".format(i))

        # Add noise to the measurements
        print("Adding noise to data")
        T_v_rel_noisy = utils.add_noise(T_v_rel, trans_noise_per,
                                        rot_noise_per)
        T_c_rel_noisy = utils.add_noise(T_c_rel, trans_noise_per,
                                        rot_noise_per)

        one_result_dict = evaluate_results_with_andreff(
            my_solver, T_v_rel_noisy, T_c_rel_noisy, scale, cons)

        #Store the estimation errors
        print("Storing results")
        dual_time[i] = one_result_dict['dual_time']
        dual_gt_value[i] = one_result_dict['dual_gt_value']
        dual_primal[i] = one_result_dict['dual_primal']
        dual_gap[i] = one_result_dict['dual_gap']
        dual_trans_error[i] = one_result_dict['dual_trans_error']
        dual_rot_error[i] = one_result_dict['dual_rot_error']
        dual_alpha_error[i] = one_result_dict['dual_alpha_error']
        rel_time[i] = one_result_dict['rel_time']
        rel_gt_value[i] = one_result_dict['rel_gt_value']
        rel_primal[i] = one_result_dict['rel_primal']
        rel_gap[i] = one_result_dict['rel_gap']
        rel_trans_error[i] = one_result_dict['rel_trans_error']
        rel_rot_error[i] = one_result_dict['rel_rot_error']
        rel_alpha_error[i] = one_result_dict['rel_alpha_error']
        andreff_trans_error[i] = one_result_dict['andreff_trans_error']
        andreff_rot_error[i] = one_result_dict['andreff_rot_error']
        andreff_alpha_error[i] = one_result_dict['andreff_alpha_error']
        andreff_time[i] = one_result_dict['andreff_time']

    #Store data
    results_dict = {
        'dual_time': dual_time,
        'dual_gt_value': dual_gt_value,
        'dual_primal': dual_primal,
        'dual_gap': dual_gap,
        'dual_trans_error': dual_trans_error,
        'dual_rot_error': dual_rot_error,
        'dual_alpha_error': dual_alpha_error,
        'rel_time': rel_time,
        'rel_gt_value': rel_gt_value,
        'rel_primal': rel_primal,
        'rel_gap': rel_gap,
        'rel_trans_error': rel_trans_error,
        'rel_rot_error': rel_rot_error,
        'rel_alpha_error': rel_alpha_error,
        'andreff_trans_error': andreff_trans_error,
        'andreff_rot_error': andreff_rot_error,
        'andreff_alpha_error': andreff_alpha_error,
        'andreff_time': andreff_time
    }

    return results_dict
def evaluate_results_with_andreff(my_solver, T_v_rel, T_c_rel, scale, cons):
    # Solve with the Andreff method
    fail = 1000.

    # Extrinsic calibration inverse for evaluating error
    ext_cal_trans = SE3.from_matrix(my_solver.extcal).trans
    ext_cal_rot_inv = SE3.from_matrix(my_solver.extcal).rot.inv()

    # Set the pose data
    my_solver.set_T_v_rel(T_v_rel)
    T_c_rel_scaled = my_solver.set_T_c_rel(T_c_rel, scale)
    my_solver.andreff_solver.set_vals(T_v_rel, T_c_rel_scaled, None)
    X_andreff, scale_andreff, runtime_andreff = my_solver.andreff_solver.solve(
        schur=True)

    # Solve for the extrinsic calibration
    dual_time, dual_gt_value, dual_primal, dual_gap, dual_opt, dual_solution, dual_flag = my_solver.dual_solve(
        cons)
    rel_time, rel_gt_value, rel_primal, rel_gap, relax_opt, relax_solution, rel_flag = my_solver.relax_solve(
        cons)

    if dual_flag:  #Solution was found
        # Store the estimation errors
        dual_trans_error = np.linalg.norm(ext_cal_trans - dual_solution[:3, 3])
        dual_rot_error = np.linalg.norm((ext_cal_rot_inv.dot(
            SO3.from_matrix(dual_solution[:3, :3], normalize=True))).log())
        dual_alpha_error = np.abs(scale - 1 / dual_opt[3])
        dual_results_dict = {
            'dual_time': dual_time,
            'dual_gt_value': np.asscalar(dual_gt_value),
            'dual_primal': np.asscalar(dual_primal),
            'dual_gap': np.asscalar(dual_gap),
            'dual_trans_error': dual_trans_error,
            'dual_rot_error': dual_rot_error,
            'dual_alpha_error': np.asscalar(dual_alpha_error)
        }
        print("DUAL    SCALE: {:}".format(1 / dual_opt[3]))
    else:  #Solution was not found
        dual_results_dict = {
            'dual_time': fail,
            'dual_gt_value': fail,
            'dual_primal': fail,
            'dual_gap': fail,
            'dual_trans_error': fail,
            'dual_rot_error': fail,
            'dual_alpha_error': fail
        }
    if rel_flag:
        rel_trans_error = np.linalg.norm(ext_cal_trans - relax_solution[:3, 3])
        rel_rot_error = np.linalg.norm((ext_cal_rot_inv.dot(
            SO3.from_matrix(relax_solution[:3, :3], normalize=True))).log())
        rel_alpha_error = np.abs(scale - 1 / relax_opt[3])
        rel_results_dict = {
            'rel_time': rel_time,
            'rel_gt_value': np.asscalar(rel_gt_value),
            'rel_primal': np.asscalar(rel_primal),
            'rel_gap': np.asscalar(rel_gap),
            'rel_trans_error': rel_trans_error,
            'rel_rot_error': rel_rot_error,
            'rel_alpha_error': np.asscalar(rel_alpha_error)
        }
    else:  #Solution was not found
        rel_results_dict = {
            'rel_time': fail,
            'rel_gt_value': fail,
            'rel_primal': fail,
            'rel_gap': fail,
            'rel_trans_error': fail,
            'rel_rot_error': fail,
            'rel_alpha_error': fail
        }
    rel_results_dict.update(dual_results_dict)

    # Store Andreff estimation errors
    andreff_trans_error = np.linalg.norm(ext_cal_trans - X_andreff[0:3, 3])
    andreff_rot_error = np.linalg.norm(
        (ext_cal_rot_inv.dot(SO3.from_matrix(X_andreff[:3, :3],
                                             normalize=True))).log())
    andreff_alpha_error = np.abs(scale - 1 / scale_andreff)
    print("ANDREFF SCALE: {:}".format(scale_andreff))

    # Store data
    and_dict = {
        "andreff_trans_error": andreff_trans_error,
        "andreff_rot_error": andreff_rot_error,
        "andreff_alpha_error": andreff_alpha_error,
        'andreff_time': runtime_andreff
    }
    rel_results_dict.update(and_dict)
    return rel_results_dict
Exemplo n.º 19
0
                                       int(img_num) - 1,
                                       int(img_num) + 2))
            else:
                data = pykitti.raw(args.source_dir,
                                   date,
                                   drive,
                                   frames=range(int(img_num),
                                                int(img_num) + 1))
            # print(data.cam2_files)

            filenames_i = []
            if cam_num == 'l':
                img_files = mult * data.cam2_files
                intrinsics_i = data.calib.K_cam2
                num = '02'
                T_cam_imu = SE3.from_matrix(data.calib.T_cam2_imu,
                                            normalize=True)
            if cam_num == 'r':
                img_files = mult * data.cam3_files
                intrinsics_i = data.calib.K_cam3
                num = '03'
                T_cam_imu = SE3.from_matrix(data.calib.T_cam3_imu,
                                            normalize=True)

            for img_file in img_files:
                img, zoomx, zoomy, orig_img_width, orig_img_height = load_image(
                    img_file)
                new_filename = img_file.split(args.source_dir)[1].replace(
                    '/image_03/data', '').replace('.png', '.jpg')
                new_filename = os.path.join(seq_target_dir, new_filename)
                new_filename = new_filename.replace(
                    'image_{}/data/'.format(num),
Exemplo n.º 20
0
    gt_traj = matfile['gt_traj'].item()
    odom_pose_vec = matfile['odom_pose_vecs'].item() #original odometry 6x1 vecs (seq_size-1 x 6)
    corr_pose_vec = matfile['corr_pose_vecs'].item() # corrected 6x1 vecs (seq_size-1 x 6)
    if estimator == 'stereo': #use rotation corrections only.
        corr_pose_vec[:,:,0:3] = odom_pose_vec[:,0:3]
    best_loop_closure_pose_vec = corr_pose_vec[best_loop_closure_epoch]

    
    est_traj, avg_corr_traj, dense_traj, dense_gt = [],[],[],[]
    est_traj.append(gt_traj[0])
    avg_corr_traj.append(gt_traj[0])
 
    for i in range(0,odom_pose_vec.shape[0]):
        #classically estimated traj
        dT = SE3.exp(odom_pose_vec[i])
        new_est = SE3.as_matrix((dT.dot(SE3.from_matrix(est_traj[i], normalize=True).inv())).inv())
        est_traj.append(new_est)

        dT_corr = SE3.exp(best_loop_closure_pose_vec[i])
        new_est = SE3.as_matrix((dT_corr.dot(SE3.from_matrix(avg_corr_traj[i], normalize=True).inv())).inv())
        avg_corr_traj.append(new_est)    
 
    est_tm, est_metrics =  generate_trajectory_metrics(gt_traj, est_traj, name='libviso2', seq=seq)
    corr_tm, avg_corr_metrics = generate_trajectory_metrics(gt_traj, avg_corr_traj, name='ss-dpcnet', seq='')
    saved_traj[seq]['ours'] = corr_tm
    saved_traj[seq]['libviso2'] = est_tm



for seq in ['00', '02', '05']:
    tm_dict = {'Libviso2-s': saved_traj[seq]['libviso2'],
    if estimator == 'stereo': #use rotation corrections only.
        corr_pose_vec[:,:,0:3] = odom_pose_vec[:,0:3]
        
    ###Use the epoch with the lowest loss, or most loop closures:
    best_loss_pose_vec = corr_pose_vec[best_loss_epoch]
    best_loop_closure_pose_vec = corr_pose_vec[best_loop_closure_epoch]

    est_traj, best_loss_traj, best_loop_closure_traj = [],[],[]
    est_traj.append(gt_traj[0])
    best_loss_traj.append(gt_traj[0])
    best_loop_closure_traj.append(gt_traj[0])
    
    for i in range(0,odom_pose_vec.shape[0]):
        #classically estimated traj
        dT = SE3.exp(odom_pose_vec[i])
        new_est = SE3.as_matrix((dT.dot(SE3.from_matrix(est_traj[i], normalize=True).inv())).inv())
        est_traj.append(new_est)
        
        #best validation loss traj
        dT_corr = SE3.exp(best_loss_pose_vec[i])
        new_est = SE3.as_matrix((dT_corr.dot(SE3.from_matrix(best_loss_traj[i], normalize=True).inv())).inv())
        best_loss_traj.append(new_est)         

        #best loop closure traj
        dT_corr = SE3.exp(best_loop_closure_pose_vec[i])
        new_est = SE3.as_matrix((dT_corr.dot(SE3.from_matrix(best_loop_closure_traj[i], normalize=True).inv())).inv())
        best_loop_closure_traj.append(new_est)  

    est_tm, est_metrics =  generate_trajectory_metrics(gt_traj, est_traj, name='libviso2', seq=seq, mode='---')
    best_loss_tm, best_loss_metrics = generate_trajectory_metrics(gt_traj, best_loss_traj, name='Ours', seq='', mode='best loss')
    best_loop_closure_tm, best_loop_closure_metrics = generate_trajectory_metrics(gt_traj, best_loop_closure_traj, name='Ours', seq='', mode='loop closure')
Exemplo n.º 22
0
test_dset_loaders = torch.utils.data.DataLoader(test_dset,
                                                batch_size=config['minibatch'],
                                                shuffle=False,
                                                num_workers=4)
eval_dsets = {'test': test_dset_loaders}
Reconstructor = stn.Reconstructor().to(device)
model = mono_model_joint.joint_model(
    num_img_channels=(6 + 2 * config['use_flow']),
    output_exp=args.exploss,
    dropout_prob=config['dropout_prob']).to(device)
model.load_state_dict(torch.load(pretrained_path))

_, _, _, _, _, corr_traj, corr_traj_rot, est_traj, gt_traj, _, _, _ = test_trajectory(
    device, model, Reconstructor, test_dset_loaders, 0)

est_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in est_traj]
corr_traj_rot_se3 = [SE3.from_matrix(T, normalize=True) for T in corr_traj_rot]
gt_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in gt_traj]

dense_tm = TrajectoryMetrics(gt_traj_se3, gt_traj_se3, convention='Twv')
est_tm = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention='Twv')
corr_tm = TrajectoryMetrics(gt_traj_se3, corr_traj_rot_se3, convention='Twv')

tm_dict = {
    'Dense': dense_tm,
    'libviso2-s': est_tm,
    'Ours (Gen.)': corr_tm,
}
est_vis = visualizers.TrajectoryVisualizer(tm_dict)
fig, ax = est_vis.plot_topdown(
    which_plane='xy',
Exemplo n.º 23
0
def test_trajectory(device, pose_model, spatial_trans, dset, epoch):
    pose_model.train(False)  # Set model to evaluate mode
    pose_model.eval()        #used for batch normalization  # Set model to training mode
    spatial_trans.train(False)  
    spatial_trans.eval()     
    
    #initialize the relevant outputs
    full_corr_lie_alg_stacked, rot_corr_lie_alg_stacked, gt_lie_alg_stacked, vo_lie_alg_stacked, corrections_stacked, gt_corrections_stacked= \
            np.empty((0,6)), np.empty((0,6)), np.empty((0,6)), np.empty((0,6)), np.empty((0,6)), np.empty((0,6))

    for data in dset:
        imgs, gt_lie_alg, intrinsics, vo_lie_alg, gt_correction = data
        gt_lie_alg = gt_lie_alg.type(torch.FloatTensor).to(device)   
        vo_lie_alg = vo_lie_alg.type(torch.FloatTensor).to(device)
        img_list = []
        for im in imgs:              
            img_list.append(im.to(device))

        corr, exp_mask, disp = pose_model(img_list[0:3], vo_lie_alg)
        exp_mask, disp = exp_mask[0], disp[0][:,0]
        corr_rot = torch.clone(corr)
        corr_rot[:,0:3]=0

        corrected_pose = se3_log_exp(corr, vo_lie_alg)
        corrected_pose_rot_only = se3_log_exp(corr_rot, vo_lie_alg)
        
        
        corrections_stacked = np.vstack((corrections_stacked, corr.cpu().detach().numpy()))
        gt_corrections_stacked = np.vstack((gt_corrections_stacked, gt_correction.cpu().detach().numpy()))
        full_corr_lie_alg_stacked = np.vstack((full_corr_lie_alg_stacked, corrected_pose.cpu().detach().numpy()))
        rot_corr_lie_alg_stacked = np.vstack((rot_corr_lie_alg_stacked, corrected_pose_rot_only.cpu().detach().numpy()))
        gt_lie_alg_stacked = np.vstack((gt_lie_alg_stacked, gt_lie_alg.cpu().detach().numpy()))
        vo_lie_alg_stacked = np.vstack((vo_lie_alg_stacked, vo_lie_alg.cpu().detach().numpy()))

    est_traj, corr_traj, corr_traj_rot, gt_traj = [],[],[],[]
    gt_traj = dset.dataset.raw_gt_trials[0]
    est_traj.append(gt_traj[0])
    corr_traj.append(gt_traj[0])
    corr_traj_rot.append(gt_traj[0])

    cum_dist = [0]
    for i in range(0,full_corr_lie_alg_stacked.shape[0]):
        #classically estimated traj
        dT = SE3.exp(vo_lie_alg_stacked[i])
        new_est = SE3.as_matrix((dT.dot(SE3.from_matrix(est_traj[i],normalize=True).inv())).inv())
        est_traj.append(new_est)
        cum_dist.append(cum_dist[i]+np.linalg.norm(dT.trans))

        #corrected traj (rotation only)
        dT = SE3.exp(rot_corr_lie_alg_stacked[i])
        new_est = SE3.as_matrix((dT.dot(SE3.from_matrix(corr_traj_rot[i],normalize=True).inv())).inv())
        corr_traj_rot.append(new_est)
#        
#        
#        #corrected traj (full pose)
        dT = SE3.exp(full_corr_lie_alg_stacked[i])
        new_est = SE3.as_matrix((dT.dot(SE3.from_matrix(corr_traj[i],normalize=True).inv())).inv())
        corr_traj.append(new_est)

    gt_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in gt_traj]
    est_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in est_traj]
    corr_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in corr_traj]
    corr_traj_rot_se3 = [SE3.from_matrix(T,normalize=True) for T in corr_traj_rot]
    
    tm_est = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention = 'Twv')
    tm_corr = TrajectoryMetrics(gt_traj_se3, corr_traj_se3, convention = 'Twv')
    tm_corr_rot = TrajectoryMetrics(gt_traj_se3, corr_traj_rot_se3, convention = 'Twv')
    
    if epoch >= 0:
        est_mean_trans, est_mean_rot = tm_est.mean_err()
        corr_mean_trans, corr_mean_rot = tm_corr.mean_err()
        corr_rot_mean_trans, corr_rot_mean_rot = tm_corr_rot.mean_err()
        print("Odom. mean trans. error: {} | mean rot. error: {}".format(est_mean_trans, est_mean_rot*180/np.pi))
        print("Corr. mean trans. error: {} | mean rot. error: {}".format(corr_mean_trans, corr_mean_rot*180/np.pi))
        print("Corr. (rot. only) mean trans. error: {} | mean rot. error: {}".format(corr_rot_mean_trans, corr_rot_mean_rot*180/np.pi))
        
        seg_lengths = list(range(100,801,100))
        _, seg_errs_est = tm_est.segment_errors(seg_lengths, rot_unit='rad')
        _, seg_errs_corr = tm_corr.segment_errors(seg_lengths, rot_unit='rad')
        _, seg_errs_corr_rot = tm_corr_rot.segment_errors(seg_lengths, rot_unit='rad')
        print("Odom. mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(np.mean(seg_errs_est[:,1])*100, 100*np.mean(seg_errs_est[:,2])*180/np.pi))
        print("Corr. mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(np.mean(seg_errs_corr[:,1])*100, 100*np.mean(seg_errs_corr[:,2])*180/np.pi))
        print("Corr. (rot. only) mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(np.mean(seg_errs_corr_rot[:,1])*100, 100*np.mean(seg_errs_corr_rot[:,2])*180/np.pi)) 
        
    rot_seg_err = 100*np.mean(seg_errs_corr_rot[:,2])*180/np.pi

    return corrections_stacked, gt_corrections_stacked, full_corr_lie_alg_stacked, vo_lie_alg_stacked, gt_lie_alg_stacked, \
        np.array(corr_traj), np.array(corr_traj_rot), np.array(est_traj), np.array(gt_traj), rot_seg_err, corr_rot_mean_trans, np.array(cum_dist)
        
Exemplo n.º 24
0
from pyslam.problem import Options, Problem
from pyslam.sensors import StereoCamera
from pyslam.residuals import PhotometricResidualSE3
from pyslam.losses import HuberLoss
import time

# Load KITTI data
basedir = '/path/to/KITTI/raw/'
date = '2011_09_30'
drive = '0016'
frame_range = range(0, 2)

dataset = pykitti.raw(basedir, date, drive, frames=frame_range)

# Parameters to estimate
T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu)
T_cam0_imu.normalize()
T_0_w = T_cam0_imu.dot(SE3.from_matrix(dataset.oxts[0].T_w_imu).inv())
T_0_w.normalize()
T_1_w = T_cam0_imu.dot(SE3.from_matrix(dataset.oxts[1].T_w_imu).inv())
T_1_w.normalize()
T_1_0_true = T_1_w.dot(T_0_w.inv())

# params_init = {'T_1_0': T_1_0_true}
params_init = {'T_1_0': SE3.identity()}

# Scaling parameters
pyrlevels = [3, 2, 1]

params = params_init
Exemplo n.º 25
0
                unscaled_pose_vec[:, 3:6] = gt_pose_vec[:, 3:6]

            scaled_pose_vec = np.array(unscaled_pose_vec)
            scaled_pose_vec[:, 0:3] = scaled_pose_vec[:, 0:3] * np.repeat(
                data['dnet_scale_factor'], 3, axis=1)

            ## Compute Trajectories
            gt_traj = test_dset_loaders.dataset.raw_gt_trials[0]
            est, gt, errors, cum_dist = tt(unscaled_pose_vec,
                                           gt_traj,
                                           method='unscaled')
            scaled_est, gt, errors, cum_dist = tt(scaled_pose_vec,
                                                  gt_traj,
                                                  method='scaled')

            gt_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in gt_traj]
            est_se3 = [SE3.from_matrix(T, normalize=True) for T in est]
            scaled_se3 = [
                SE3.from_matrix(T, normalize=True) for T in scaled_est
            ]

            est_tm = TrajectoryMetrics(gt_traj_se3, est_se3, convention='Twv')
            scaled_tm = TrajectoryMetrics(gt_traj_se3,
                                          scaled_se3,
                                          convention='Twv')

            tm_dict[method] = {
                'unscaled': est_tm,
                'scaled': scaled_tm,
            }
Exemplo n.º 26
0
        ### store the VO pose estimates to extract
        mono_seq_info['sparse_vo'] = mono_traj
        stereo_seq_info['sparse_vo'] = stereo_traj

        ###filter out frames with low rotational or translational velocities
        for seq_info in [mono_seq_info, stereo_seq_info]:
            if args.remove_static:
                print("Removing Static frames from {}".format(drive))
                deleting = True

                while deleting:
                    idx_list = []
                    sparse_traj = np.copy(seq_info['sparse_vo'])
                    for i in range(0, sparse_traj.shape[0] - 1, 2):
                        T2 = SE3.from_matrix(sparse_traj[i + 1, :, :],
                                             normalize=True).inv()
                        T1 = SE3.from_matrix(sparse_traj[i, :, :],
                                             normalize=True)
                        dT = T2.dot(T1)
                        pose_vec = dT.log()
                        trans_norm = np.linalg.norm(pose_vec[0:3])
                        rot_norm = np.linalg.norm(pose_vec[3:6])
                        if trans_norm < 1.5 and rot_norm < 0.007:  #0.007
                            idx_list.append(i)
#                            os.remove(seq_info['cam_02'][i])
#                            os.remove(seq_info['cam_03'][i])
                    if len(idx_list) == 0:
                        deleting = False

                    print('deleting {} frames'.format(len(idx_list)))
                    print('original length: {}'.format(
def evaluate_results(my_solver, T_v_rel, T_c_rel, scale, cons):
    #Number to know that the solution failed
    fail = 1000

    #Extrinsic calibration inverse for evaluating error
    ext_cal_trans = SE3.from_matrix(my_solver.extcal).trans
    ext_cal_rot_inv = SE3.from_matrix(my_solver.extcal).rot.inv()

    # Set the pose data
    my_solver.set_T_v_rel(T_v_rel)
    my_solver.set_T_c_rel(T_c_rel, scale)

    # Solve for the extrinsic calibration
    dual_time, dual_gt_value, dual_primal, dual_gap, dual_opt, dual_solution, dualflag = my_solver.dual_solve(
        cons)
    rel_time, rel_gt_value, rel_primal, rel_gap, relax_opt, relax_solution, relflag = my_solver.relax_solve(
        cons)
    #and_extcal, and_scale, and_runtime =  my_solver.andreff_solver.solve()

    if dualflag:  #Solution was found
        # Store the estimation errors
        dual_trans_error = np.linalg.norm(ext_cal_trans - dual_solution[:3, 3])
        dual_rot_error = np.linalg.norm((ext_cal_rot_inv.dot(
            SO3.from_matrix(dual_solution[:3, :3], normalize=True))).log())
        dual_alpha_error = np.abs(scale - 1 / dual_opt[3])
        dual_results_dict = {
            'dual_time': dual_time,
            'dual_gt_value': np.asscalar(dual_gt_value),
            'dual_primal': np.asscalar(dual_primal),
            'dual_gap': np.asscalar(dual_gap),
            'dual_trans_error': dual_trans_error,
            'dual_rot_error': dual_rot_error,
            'dual_alpha_error': np.asscalar(dual_alpha_error)
        }
    else:  #Solution was not found
        dual_results_dict = {
            'dual_time': fail,
            'dual_gt_value': fail,
            'dual_primal': fail,
            'dual_gap': fail,
            'dual_trans_error': fail,
            'dual_rot_error': fail,
            'dual_alpha_error': fail
        }
    if relflag:
        rel_trans_error = np.linalg.norm(ext_cal_trans - relax_solution[:3, 3])
        rel_rot_error = np.linalg.norm((ext_cal_rot_inv.dot(
            SO3.from_matrix(relax_solution[:3, :3], normalize=True))).log())
        rel_alpha_error = np.abs(scale - 1 / relax_opt[3])
        rel_results_dict = {
            'rel_time': rel_time,
            'rel_gt_value': np.asscalar(rel_gt_value),
            'rel_primal': np.asscalar(rel_primal),
            'rel_gap': np.asscalar(rel_gap),
            'rel_trans_error': rel_trans_error,
            'rel_rot_error': rel_rot_error,
            'rel_alpha_error': np.asscalar(rel_alpha_error)
        }
    else:  #Solution was not found
        rel_results_dict = {
            'rel_time': fail,
            'rel_gt_value': fail,
            'rel_primal': fail,
            'rel_gap': fail,
            'rel_trans_error': fail,
            'rel_rot_error': fail,
            'rel_alpha_error': fail
        }
    rel_results_dict.update(dual_results_dict)

    return rel_results_dict