def ape(traj_ref: PosePath3D, traj_est: PosePath3D, pose_relation: metrics.PoseRelation, align: bool = False, correct_scale: bool = False, n_to_align: int = -1, align_origin: bool = False, ref_name: str = "reference", est_name: str = "estimate") -> Result: # Align the trajectories. only_scale = correct_scale and not align alignment_transformation = None if align or correct_scale: logger.debug(SEP) alignment_transformation = lie_algebra.sim3( *traj_est.align(traj_ref, correct_scale, only_scale, n=n_to_align)) elif align_origin: logger.debug(SEP) alignment_transformation = traj_est.align_origin(traj_ref) # Calculate APE. logger.debug(SEP) data = (traj_ref, traj_est) ape_metric = metrics.APE(pose_relation) ape_metric.process_data(data) title = str(ape_metric) if align and not correct_scale: title += "\n(with SE(3) Umeyama alignment)" elif align and correct_scale: title += "\n(with Sim(3) Umeyama alignment)" elif only_scale: title += "\n(scale corrected)" elif align_origin: title += "\n(with origin alignment)" else: title += "\n(not aligned)" if (align or correct_scale) and n_to_align != -1: title += " (aligned poses: {})".format(n_to_align) ape_result = ape_metric.get_result(ref_name, est_name) ape_result.info["title"] = title logger.debug(SEP) logger.info(ape_result.pretty_str()) ape_result.add_trajectory(ref_name, traj_ref) ape_result.add_trajectory(est_name, traj_est) if isinstance(traj_est, PoseTrajectory3D): seconds_from_start = np.array( [t - traj_est.timestamps[0] for t in traj_est.timestamps]) ape_result.add_np_array("seconds_from_start", seconds_from_start) ape_result.add_np_array("timestamps", traj_est.timestamps) if alignment_transformation is not None: ape_result.add_np_array("alignment_transformation_sim3", alignment_transformation) return ape_result
def trajectory_stats_to_df(traj: trajectory.PosePath3D, name: typing.Optional[str] = None) -> pd.DataFrame: if not isinstance(traj, trajectory.PosePath3D): raise TypeError("trajectory.PosePath3D or derived required") data_dict = {k: v for k, v in traj.get_infos().items() if np.isscalar(v)} data_dict.update(traj.get_statistics()) index = [name] if name else ['0'] return pd.DataFrame(data=data_dict, index=index)
def read_kitti_poses_file(file_path): """ parses pose file in KITTI format (first 3 rows of SE(3) matrix per line) :param file_path: the trajectory file path (or file handle) :return: trajectory.PosePath3D """ print("read kitti_poses_file") raw_mat = csv_read_matrix(file_path, delim=" ", comment_str="#") error_msg = ("KITTI pose files must have 12 entries per row " "and no trailing delimiter at the end of the rows (space)") if len(raw_mat) > 0 and len(raw_mat[0]) != 12: raise FileInterfaceException(error_msg) try: mat = np.array(raw_mat).astype(float) except ValueError: raise FileInterfaceException(error_msg) # yapf: disable poses = [np.array([[r[0], r[1], r[2], r[3]], [r[4], r[5], r[6], r[7]], [r[8], r[9], r[10], r[11]], [0, 0, 0, 1]]) for r in mat] # yapf: enable if not hasattr(file_path, 'read'): # if not file handle logger.debug("Loaded {} poses from: {}".format(len(poses), file_path)) return PosePath3D(poses_se3=poses)
def read_kitti_poses_file(file_path): """ parses pose file in KITTI format (first 3 rows of SE(3) matrix per line) :param file_path: the trajectory file path (or file handle) :return: trajectory.PosePath3D """ mat = np.array(csv_read_matrix(file_path, delim=" ", comment_str="#")).astype(float) if mat.shape[1] != 12: raise FileInterfaceException( "KITTI pose files must have 12 entries per row") poses = [ np.array([[r[0], r[1], r[2], r[3]], [r[4], r[5], r[6], r[7]], [r[8], r[9], r[10], r[11]], [0, 0, 0, 1]]) for r in mat ] if not hasattr(file_path, 'read'): # if not file handle logger.debug("Loaded {} poses from: {}".format(len(poses), file_path)) return PosePath3D(poses_se3=poses)
def traj_rpy(axarr: np.ndarray, traj: trajectory.PosePath3D, style: str = '-', color: str = 'black', label: str = "", alpha: float = 1.0, start_timestamp: typing.Optional[float] = None) -> None: """ plot a path/trajectory's Euler RPY angles into an axis :param axarr: an axis array (for R, P & Y) e.g. from 'fig, axarr = plt.subplots(3)' :param traj: trajectory.PosePath3D or trajectory.PoseTrajectory3D object :param style: matplotlib line style :param color: matplotlib color :param label: label (for legend) :param alpha: alpha value for transparency :param start_timestamp: optional start time of the reference (for x-axis alignment) """ if len(axarr) != 3: raise PlotException("expected an axis array with 3 subplots - got " + str(len(axarr))) angles = traj.get_orientations_euler(SETTINGS.euler_angle_sequence) if isinstance(traj, trajectory.PoseTrajectory3D): if start_timestamp: x = traj.timestamps - start_timestamp else: x = traj.timestamps xlabel = "$t$ (s)" else: x = range(0, len(angles)) xlabel = "index" ylabels = ["$roll$ (deg)", "$pitch$ (deg)", "$yaw$ (deg)"] for i in range(0, 3): axarr[i].plot(x, np.rad2deg(angles[:, i]), style, color=color, label=label, alpha=alpha) axarr[i].set_ylabel(ylabels[i]) axarr[2].set_xlabel(xlabel) if label: axarr[0].legend(frameon=True)
def main(): hvd.init() args = parser.parse_args() if hvd.size() > 1: printcolor('----------- DISTRIBUTED DATA PARALLEL -----------', 'cyan') device_id = hvd.local_rank() torch.cuda.set_device(device_id) if hvd.rank() == 0: os.makedirs(args.output_dir, exist_ok=True) depth_net = DispResnet(version='18_pt') depth_net.load_state_dict(torch.load(args.depth_model, map_location='cpu')) depth_net = depth_net.cuda() # move to GPU keypoint_net = KeypointResnet() keypoint_net.load_state_dict( torch.load(args.keypoint_model, map_location='cpu')) keypoint_net.cuda() def _set_seeds(seed=42): """Set Python random seeding and PyTorch seeds. Parameters ---------- seed: int, default: 42 Random number generator seeds for PyTorch and python """ np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def _worker_init_fn(worker_id): """Worker init fn to fix the seed of the workers""" _set_seeds(42 + worker_id) if args.run_evaluation: all_trel = [] all_rrel = [] ########## STACK PREDICTED POSES INTO FULL TRAJECTORY ########## printcolor('Evaluation sequences {}'.format(args.sequence)) for seq in args.sequence: dataset = KITTIODOMETRYDataset( args.dataset_dir, [seq], back_context=0, forward_context=1, image_shape=(args.input_height, args.input_width), ) trajectory_length = sum(len(imgs) for imgs in dataset.img_files) printcolor('Evaluating SEQ {}'.format(seq)) sampler = None if not (hvd.size() > 1) else \ torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=hvd.size(), rank=hvd.rank()) dl = DataLoader(dataset, batch_size=1, pin_memory=False, shuffle=False, num_workers=48, worker_init_fn=_worker_init_fn, sampler=sampler) all_poses = accumulate_pose_single_frame( dataloader=dl, depth_net=depth_net, keypoint_net=keypoint_net, trajectory_length=trajectory_length, rank=hvd.rank()) if hvd.rank() == 0: np.savetxt(os.path.join(args.output_dir, str(seq) + '_ori.txt'), all_poses.reshape(trajectory_length, -1)[:, :12]) if args.align_trajectory: if hvd.rank() == 0: printcolor('Computing scale alignment.') # Load ground truth poses ground_truth_trajectory = file_interface.read_kitti_poses_file( os.path.join(os.path.join(args.dataset_dir, 'poses'), seq) + '.txt') # Convert predicted trajectory predicted_trajectory = PosePath3D(poses_se3=all_poses) # Umeyama alignment with scaling only predicted_trajectory_aligned = copy.deepcopy( predicted_trajectory) predicted_trajectory_aligned.align(ground_truth_trajectory, correct_only_scale=True) # Save aligned trajectory file_interface.write_kitti_poses_file( os.path.join(args.output_dir, seq) + '.txt', predicted_trajectory_aligned) if args.run_evaluation and hvd.rank() == 0: pose_test_executable = "./kp3d/externals/cpp/evaluate_odometry" p = subprocess.Popen( [pose_test_executable, args.dataset_dir, args.output_dir, seq], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p.wait() seq_output = os.path.join(args.output_dir, seq + '_stats.txt') with open(seq_output, 'r') as f: pose_result = f.readlines()[0].split(' ') pose_result = [float(p) for p in pose_result] traj_pos_error = pose_result[0] traj_rot_error = pose_result[1] all_trel.append(traj_pos_error) all_rrel.append(traj_rot_error) printcolor( 'SEQ {} -- Mean TRAJ (pos: {:.4f}, rot: {:.4f})'.format( seq, traj_pos_error, traj_rot_error), 'green') printcolor(all_trel) printcolor(all_rrel)
def rpe(traj_ref: PosePath3D, traj_est: PosePath3D, pose_relation: metrics.PoseRelation, delta: float, delta_unit: metrics.Unit, rel_delta_tol: float = 0.1, all_pairs: bool = False, align: bool = False, correct_scale: bool = False, n_to_align: int = -1, align_origin: bool = False, ref_name: str = "reference", est_name: str = "estimate", support_loop: bool = False) -> Result: # Align the trajectories. only_scale = correct_scale and not align if align or correct_scale: logger.debug(SEP) traj_est.align(traj_ref, correct_scale, only_scale, n=n_to_align) elif align_origin: logger.debug(SEP) traj_est.align_origin(traj_ref) # Calculate RPE. logger.debug(SEP) data = (traj_ref, traj_est) rpe_metric = metrics.RPE(pose_relation, delta, delta_unit, rel_delta_tol, all_pairs) rpe_metric.process_data(data) title = str(rpe_metric) if align and not correct_scale: title += "\n(with SE(3) Umeyama alignment)" elif align and correct_scale: title += "\n(with Sim(3) Umeyama alignment)" elif only_scale: title += "\n(scale corrected)" elif align_origin: title += "\n(with origin alignment)" else: title += "\n(not aligned)" if (align or correct_scale) and n_to_align != -1: title += " (aligned poses: {})".format(n_to_align) rpe_result = rpe_metric.get_result(ref_name, est_name) rpe_result.info["title"] = title logger.debug(SEP) logger.info(rpe_result.pretty_str()) # Restrict trajectories to delta ids for further processing steps. if support_loop: # Avoid overwriting if called repeatedly e.g. in Jupyter notebook. import copy traj_ref = copy.deepcopy(traj_ref) traj_est = copy.deepcopy(traj_est) traj_ref.reduce_to_ids(rpe_metric.delta_ids) traj_est.reduce_to_ids(rpe_metric.delta_ids) rpe_result.add_trajectory(ref_name, traj_ref) rpe_result.add_trajectory(est_name, traj_est) if isinstance(traj_est, PoseTrajectory3D): seconds_from_start = [ t - traj_est.timestamps[0] for t in traj_est.timestamps ] rpe_result.add_np_array("seconds_from_start", seconds_from_start) rpe_result.add_np_array("timestamps", traj_est.timestamps) return rpe_result
def fake_path(length): return PosePath3D(poses_se3=random_se3_list(length))