def do_vo_mapping(basepath, seq, ref_cond, frames=None, outfile=None, rgb_dir='rgb'): ref_data = vkitti.LocalizationDataset(basepath, seq, ref_cond, frames=frames, rgb_dir=rgb_dir) test_im = ref_data.get_gray(0) camera = get_camera(seq, test_im) camera.maxdepth = 200. # Ground truth T_w_c_gt = [SE3.from_matrix(p, normalize=True) for p in ref_data.poses] T_0_w = T_w_c_gt[0].inv() vo = DenseRGBDPipeline(camera, first_pose=T_0_w) # vo.keyframe_trans_thresh = 3. # meters vo.keyframe_trans_thresh = 2. # meters vo.keyframe_rot_thresh = 15. * np.pi / 180. # rad vo.depth_stiffness = 1. / 0.01 # 1/meters vo.intensity_stiffness = 1. / 0.2 # 1/ (intensity is in [0,1] ) # vo.intensity_stiffness = 1. / 0.1 vo.use_motion_model_guess = True # vo.min_grad = 0.2 # vo.loss = HuberLoss(5.0) print('Mapping using {}/{}'.format(seq, ref_cond)) vo.set_mode('map') start = time.perf_counter() for c_idx in range(len(ref_data)): image = ref_data.get_gray(c_idx) depth = ref_data.get_depth(c_idx) depth[depth >= camera.maxdepth] = -1. vo.track(image, depth) # vo.track(image, depth, guess=T_w_c_gt[c_idx].inv()) # debug end = time.perf_counter() print('Image {}/{} ({:.2f} %) | {:.3f} s'.format( c_idx, len(ref_data), 100. * c_idx / len(ref_data), end - start), end='\r') start = end # Compute errors T_w_c_est = [T.inv() for T in vo.T_c_w] tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est) # Save to file if outfile is not None: print('Saving to {}'.format(outfile)) tm.savemat(outfile) return tm, vo
def do_tracking(basepath, seq, track_cond, vo, frames=None, outfile=None, rgb_dir='rgb'): track_data = tum_rgbd.LocalizationDataset(basepath, seq, track_cond, frames=frames, rgb_dir=rgb_dir) # Ground truth T_w_c_gt = [ SE3(SO3.from_quaternion(p[0], ordering='xyzw'), p[1]) for p in track_data.poses ] T_0_w = T_w_c_gt[0].inv() print('Tracking using {}/{}'.format(seq, track_cond)) vo.set_mode('track') start = time.perf_counter() for c_idx in range(len(track_data)): image = track_data.get_gray(c_idx) depth = track_data.get_depth(c_idx) try: vo.track(image, depth) # vo.track(image, depth, guess=T_w_c_gt[c_idx].inv()) # debug end = time.perf_counter() print('Image {}/{} ({:.2f} %) | {:.3f} s'.format( c_idx, len(track_data), 100. * c_idx / len(track_data), end - start), end='\r') except Exception as e: print('Error on {}/{}'.format(seq, track_cond)) print(e) print('Image {}/{} ({:.2f} %) | {:.3f} s'.format( c_idx, len(track_data), 100. * c_idx / len(track_data), end - start)) break start = end # Compute errors T_w_c_est = [T.inv() for T in vo.T_c_w] tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est) # Save to file if outfile is not None: print('Saving to {}'.format(outfile)) tm.savemat(outfile) return tm, vo
def run_vo_kitti(basedir, date, drive, frames, outfile=None): # Load KITTI data dataset = pykitti.raw(basedir, date, drive, frames=frames, imformat='cv2') first_oxts = dataset.oxts[0] T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu) T_cam0_imu.normalize() T_0_w = T_cam0_imu.dot(SE3.from_matrix(first_oxts.T_w_imu).inv()) T_0_w.normalize() # Create the camera test_im = np.array(next(dataset.cam0)) fu = dataset.calib.K_cam0[0, 0] fv = dataset.calib.K_cam0[1, 1] cu = dataset.calib.K_cam0[0, 2] cv = dataset.calib.K_cam0[1, 2] b = dataset.calib.b_gray h, w = test_im.shape camera = StereoCamera(cu, cv, fu, fv, b, w, h) # Ground truth T_w_c_gt = [ SE3.from_matrix(o.T_w_imu).dot(T_cam0_imu.inv()) for o in dataset.oxts ] # Pipeline vo = DenseStereoPipeline(camera, first_pose=T_0_w) # Skip the highest resolution level vo.pyrlevel_sequence.pop() vo.pyr_cameras.pop() start = time.perf_counter() for c_idx, impair in enumerate(dataset.gray): vo.track(np.array(impair[0]), np.array(impair[1])) # vo.track(impair[0], impair[1], guess=T_w_c_gt[c_idx].inv()) end = time.perf_counter() print('Image {}/{} | {:.3f} s'.format(c_idx, len(dataset), end - start)) start = end # Compute errors T_w_c_est = [T.inv() for T in vo.T_c_w] tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est) # Save to file if outfile is not None: print('Saving to {}'.format(outfile)) tm.savemat(outfile) # Clean up del vo return tm