def np_traj_to_kitti(working_dir): logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ CONVERT TO KITTI ================") logger.print("Working on directory:", working_dir) pose_est_dir = os.path.join(working_dir, "est_poses") pose_gt_dir = os.path.join(working_dir, "gt_poses") kitti_traj_output = os.path.join(working_dir, "kitti") Logger.make_dir_if_not_exist(kitti_traj_output) pose_est_files = sorted(os.listdir(pose_est_dir)) logger.print("Found pose estimate files: \n" + "\n".join(pose_est_files)) for i, pose_est_file in enumerate(pose_est_files): sequence = os.path.splitext(pose_est_file)[0] traj_est = np.load(os.path.join(pose_est_dir, "%s.npy" % sequence)) traj_gt = np.load(os.path.join(pose_gt_dir, "%s.npy" % sequence)) kitti_est_file = open( os.path.join(kitti_traj_output, "%s_est.txt" % sequence), "w") kitti_gt_file = open( os.path.join(kitti_traj_output, "%s_gt.txt" % sequence), "w") assert (traj_est.shape[0] == traj_gt.shape[0]) for j in range(0, traj_est.shape[0]): write_trj(kitti_est_file, traj_est[j, :, :]) write_trj(kitti_gt_file, traj_gt[j, :, :]) kitti_est_file.close() kitti_gt_file.close() logger.print("All Done.")
def test_ekf_all_plotted(self): output_dir = os.path.join(par.results_coll_dir, "test_ekf_all_plotted") logger.initialize(output_dir, use_tensorboard=False) seqs = ["K01", "K06", "K07", "K10"] device = "cpu" req_grad = False imu_covar = torch.diag( torch.tensor([ 1e-5, 1e-5, 1e-5, 1e-8, 1e-8, 1e-8, 1e-1, 1e-1, 1e-1, 1e-3, 1e-3, 1e-3 ])).to(device) vis_meas_covar = torch.diag( torch.tensor([1e-2, 1e-2, 1e-2, 1e0, 1e0, 1e0])).to(device) init_covar = np.eye(18, 18) init_covar[0:3, 0:3] = np.eye(3, 3) * 1e-4 # g init_covar[3:9, 3:9] = np.zeros([6, 6]) # C,r init_covar[9:12, 9:12] = np.eye(3, 3) * 1e-2 # v init_covar[12:15, 12:15] = np.eye(3, 3) * 1e-8 # bw # init_covar[15:18, 15:18] = np.eye(3, 3) * 1e-2 # ba init_covar = np.tile(init_covar, ( len(seqs), 1, 1, )) timestamps, gt_poses, gt_vels, poses, states, covars = \ self.ekf_test_case(seqs, [0, 1091], init_covar, imu_covar, vis_meas_covar, device, req_grad) for i in range(0, len(seqs)): plot_ekf_data(os.path.join(output_dir, seqs[i]), timestamps[i], gt_poses[i], gt_vels[i], poses[i], states[i])
def euroc_eval(working_dir, seqs): logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ Evaluate EUROC ================") logger.print("Working on directory:", working_dir) pose_est_dir = os.path.join(working_dir, "est_poses") pose_est_files = sorted(os.listdir(pose_est_dir)) logger.print("Evaluating seqs: %s" % ", ".join(seqs)) available_seqs = [seq.replace(".npy", "") for seq in pose_est_files] table = prettytable.PrettyTable() table.field_names = ["Seq.", "RMSE APE Err"] table.align["Seq."] = "l" table.align["RMSE APE Err"] = "r" assert set(seqs).issubset(available_seqs), "est file is not available, have seqs: %s" % \ ", ".join(list(available_seqs)) error_calc = EurocErrorCalc(seqs) stats = [] for seq in seqs: if seq not in available_seqs: raise RuntimeError("File for seq %s not available" % seq) poses_est = np.load(os.path.join(pose_est_dir, "%s.npy" % seq)) stat = error_calc.accumulate_error(seq, poses_est) table.add_row([seq, "%.6f" % stat]) stats.append(stat) table.add_row(["Ave.", "%.6f" % error_calc.get_average_error()]) logger.print(table) logger.print("Copy to Google Sheets:", ",".join([str(stat) for stat in stats]))
def plot_ekf_states(working_dir): output_dir = os.path.join(working_dir, "figures") timestamps_dir = os.path.join(working_dir, "timestamps") poses_dir = os.path.join(working_dir, "ekf_states", "poses") states_dir = os.path.join(working_dir, "ekf_states", "states") gt_velocities_dir = os.path.join(working_dir, "ekf_states", "gt_velocities") gt_poses_dir = os.path.join(working_dir, "gt_poses") pose_files = sorted(os.listdir(poses_dir)) assert sorted(os.listdir(poses_dir)) == sorted(os.listdir(states_dir)) == sorted(os.listdir(gt_velocities_dir)) == \ sorted(os.listdir(timestamps_dir)) Logger.make_dir_if_not_exist(output_dir) logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ PLOT EKF States ================") logger.print("Working on directory:", working_dir) logger.print("Found pose estimate files: \n" + "\n".join(pose_files)) for i, pose_est_file in enumerate(pose_files): seq = os.path.splitext(pose_est_file)[0] plot_ekf_data(os.path.join(working_dir, "ekf_states", "figures", seq), np.load(os.path.join(timestamps_dir, seq + ".npy")), np.load(os.path.join(gt_poses_dir, seq + ".npy")), np.load(os.path.join(gt_velocities_dir, seq + ".npy")), np.load(os.path.join(poses_dir, seq + ".npy")), np.load(os.path.join(states_dir, seq + ".npy"))) logger.print("Plot saved for sequence %s" % seq)
def kitti_eval_simple(working_dir, seqs): logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ Evaluate KITTI SIMPLE ================") logger.print("Working on directory:", working_dir) pose_est_dir = os.path.join(working_dir, "est_poses") pose_gt_dir = os.path.join(working_dir, "gt_poses") assert sorted(os.listdir(pose_est_dir)) == sorted(os.listdir(pose_gt_dir)) pose_est_files = sorted(os.listdir(pose_est_dir)) if seqs is None: seqs = [os.path.splitext(f)[0] for f in pose_est_files] errs = [] for i, seq in enumerate(seqs): est_poses = np.load(os.path.join(pose_est_dir, "%s.npy" % seq)) gt_poses = np.load(os.path.join(pose_gt_dir, "%s.npy" % seq)) err = kitti_eval_pyimpl.calc_kitti_seq_errors(gt_poses, est_poses) errs += err err = np.array(err) if err.any(): logger.print("Seq %s trans & rot error:" % seq) logger.print( "%.6f %.6f\n" % (np.average(err[:, 0]), np.average(err[:, 1]) * 180 / np.pi)) else: logger.print("Error cannot be computed for seq %s\n" % seq) errs = np.array(errs) logger.print("Ave trans & rot error:") logger.print( "%.6f %.6f\n" % (np.average(errs[:, 0]), np.average(errs[:, 1]) * 180 / np.pi))
def imu_predict_euroc(self): output_dir = os.path.join(par.results_coll_dir, "imu_predict_kitti") seqs = ['MH_05', 'V1_03', 'V2_02'] error_calc = EurocErrorCalc(seqs) for i in range(0, len(seqs)): logger.initialize(os.path.join(output_dir, seqs[i]), use_tensorboard=False) timestamps, gt_poses, gt_vels, poses, states, covars, precomp_covars = \ self.predict_test_case([seqs[i]], None, "cpu", False) poses_np = np.linalg.inv(poses[0].numpy().astype(np.float64)) np.save( logger.ensure_file_dir_exists( os.path.join(output_dir, seqs[i], "est.npy")), poses_np) # plot_ekf_data(os.path.join(output_dir, seqs[i]), # timestamps[0], gt_poses[0], gt_vels[0], poses[0], states[0]) err = error_calc.accumulate_error(seqs[i], poses_np) logger.print("Processed seq %s" % seqs[i]) logger.print("Errors trans & rot") logger.print("%.6f" % err) logger.print("Ave Trans Errors: ") logger.print(np.average(np.array(error_calc.errors)))
def test_ekf_K06_with_artificial_biases_plotted(self): output_dir = os.path.join( par.results_coll_dir, "test_ekf_K06_with_artificial_biases_plotted") logger.initialize(output_dir, use_tensorboard=False) device = "cpu" req_grad = False imu_covar = torch.diag( torch.tensor([ 1e-5, 1e-5, 1e-5, 1e-8, 1e-8, 1e-8, 1e-1, 1e-1, 1e-1, 1e-3, 1e-3, 1e-3 ])).to(device) vis_meas_covar = torch.diag( torch.tensor([1e-2, 1e-2, 1e-2, 1e0, 1e0, 1e0])).to(device) init_covar = np.eye(18, 18) init_covar[0:3, 0:3] = np.eye(3, 3) * 1e-8 # g init_covar[3:9, 3:9] = np.zeros([6, 6]) # C,r init_covar[9:12, 9:12] = np.eye(3, 3) * 1e-2 # v init_covar[12:15, 12:15] = np.eye(3, 3) * 1e-8 # bw init_covar[15:18, 15:18] = np.eye(3, 3) * 1e2 # ba timestamps, gt_poses, gt_vels, poses, states, covars = \ self.ekf_test_case(["K06"], [0, 1091, ], init_covar.reshape(1, 18, 18), imu_covar, vis_meas_covar, device, req_grad, accel_bias_inject=np.array([0.1, -0.2, 0.3]).reshape(1, 3)) plot_ekf_data(os.path.join(output_dir, "K06_accel_bias"), timestamps[0], gt_poses[0], gt_vels[0], poses[0], states[0]) init_covar = np.eye(18, 18) init_covar[0:3, 0:3] = np.eye(3, 3) * 1e-8 # g init_covar[3:9, 3:9] = np.zeros([6, 6]) # C,r init_covar[9:12, 9:12] = np.eye(3, 3) * 1e-2 # v init_covar[12:15, 12:15] = np.eye(3, 3) * 1e2 # bw init_covar[15:18, 15:18] = np.eye(3, 3) * 1e-2 # ba timestamps, gt_poses, gt_vels, poses, states, covars = \ self.ekf_test_case(["K06"], [0, 1091, ], init_covar.reshape(1, 18, 18), imu_covar, vis_meas_covar, device, req_grad, gyro_bias_inject=np.array([-0.1, 0.2, -0.3]).reshape(1, 3)) plot_ekf_data(os.path.join(output_dir, "K06_gyro_bias"), timestamps[0], gt_poses[0], gt_vels[0], poses[0], states[0]) init_covar = np.eye(18, 18) init_covar[0:3, 0:3] = np.eye(3, 3) * 1e-8 # g init_covar[3:9, 3:9] = np.zeros([6, 6]) # C,r init_covar[9:12, 9:12] = np.eye(3, 3) * 1e-2 # v init_covar[12:15, 12:15] = np.eye(3, 3) * 1e2 # bw init_covar[15:18, 15:18] = np.eye(3, 3) * 1e2 # ba timestamps, gt_poses, gt_vels, poses, states, covars = \ self.ekf_test_case(["K06"], [0, 1091, ], init_covar.reshape(1, 18, 18), imu_covar, vis_meas_covar, device, req_grad, gyro_bias_inject=np.array([-0.05, 0.06, -0.07]).reshape(1, 3), accel_bias_inject=np.array([-0.1, 0.3, -0.2]).reshape(1, 3)) plot_ekf_data(os.path.join(output_dir, "K06_both_gyro_accel_bias"), timestamps[0], gt_poses[0], gt_vels[0], poses[0], states[0])
def calc_error(working_dir): logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ CALCULATE ERRORS ================") logger.print("Working on directory:", working_dir) pose_est_dir = os.path.join(working_dir, "est_poses") pose_gt_dir = os.path.join(working_dir, "gt_poses") vis_meas_dir = os.path.join(working_dir, "vis_meas") errors_output_dir = os.path.join(working_dir, "errors") Logger.make_dir_if_not_exist(errors_output_dir) pose_est_files = sorted(os.listdir(pose_est_dir)) logger.print("Found pose estimate files: \n" + "\n".join(pose_est_files)) for i, pose_est_file in enumerate(pose_est_files): sequence = os.path.splitext(pose_est_file)[0] traj_est = np.load(os.path.join(pose_est_dir, "%s.npy" % sequence)) traj_gt = np.load(os.path.join(pose_gt_dir, "%s.npy" % sequence)) vis_meas = np.load(os.path.join(vis_meas_dir, "meas", "%s.npy" % sequence)) assert (traj_est.shape[0] == traj_gt.shape[0]) abs_traj_error = [] rel_traj_error = [np.zeros(6)] vis_meas_error = [np.zeros(6)] for j in range(0, traj_est.shape[0]): pose_est = traj_est[j] pose_gt = traj_gt[j] abs_pose_err = np.linalg.inv(pose_est).dot(pose_gt) abs_traj_error.append(np.concatenate([log_SO3(C_from_T(abs_pose_err)), r_from_T(abs_pose_err)])) for j in range(1, traj_est.shape[0]): rel_pose_gt = np.linalg.inv(traj_gt[j - 1]).dot(traj_gt[j]) rel_pose_est = np.linalg.inv(traj_est[j - 1]).dot(traj_est[j]) rel_pose_err = np.linalg.inv(rel_pose_est).dot(rel_pose_gt) rel_traj_error.append(np.concatenate([log_SO3(C_from_T(rel_pose_err)), r_from_T(rel_pose_err)])) vis_meas_error.append(np.concatenate([log_SO3(C_from_T(rel_pose_gt)), r_from_T(rel_pose_gt)]) - vis_meas[j - 1]) np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "abs", sequence + ".npy")), np.array(abs_traj_error)) np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "rel", sequence + ".npy")), np.array(rel_traj_error)) np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "vis_meas", sequence + ".npy")), np.array(vis_meas_error)) logger.print("Error saved for sequence %s" % sequence) logger.print("All Done.")
def test_predict_plotted(self): output_dir = os.path.join(par.results_coll_dir, "ekf_test_predict_plotted") logger.initialize(output_dir, use_tensorboard=False) seqs = [ "K01", "K06", ] timestamps, gt_poses, gt_vels, poses, states, covars, precomp_covars = \ self.predict_test_case(seqs, (0, 1091,), "cpu", False) for i in range(0, len(seqs)): plot_ekf_data(os.path.join(output_dir, seqs[i]), timestamps[i], gt_poses[i], gt_vels[i], poses[i], states[i])
def imu_predict_kitti(self): output_dir = os.path.join(par.results_coll_dir, "imu_predict_kitti") seqs = ["K01", "K04", "K06", "K07", "K08", "K09", "K10"] error_calc = KittiErrorCalc(seqs) for i in range(0, len(seqs)): logger.initialize(os.path.join(output_dir, seqs[i]), use_tensorboard=False) timestamps, gt_poses, gt_vels, poses, states, covars, precomp_covars = \ self.predict_test_case([seqs[i]], None, "cpu", False) poses_np = np.linalg.inv(poses[0].numpy().astype(np.float64)) np.save( logger.ensure_file_dir_exists( os.path.join(output_dir, seqs[i], "est.npy")), poses_np) # plot_ekf_data(os.path.join(output_dir, seqs[i]), # timestamps[0], gt_poses[0], gt_vels[0], poses[0], states[0]) logger.print("Processed seq %s" % seqs[i]) err = kitti_eval_pyimpl.calc_kitti_seq_errors( gt_poses[0], np.linalg.inv(poses[0].numpy().astype(np.float64))) err = err[0] err = np.array(err) logger.print("Errors trans & rot") logger.print("%.6f" % np.average(err[:, 0])) logger.print("%.6f" % (np.average(err[:, 1]) * 180 / np.pi)) error_calc.accumulate_error(seqs[i], poses_np) logger.print("Ave Trans Errors: ") logger.print(np.average(np.array(error_calc.errors)[:, 0])) logger.print("Ave Rot Errors: ") logger.print(np.average(np.array(error_calc.errors)[:, 1]))
def plot_trajectory(working_dir): output_dir = os.path.join(working_dir, "figures") pose_est_dir = os.path.join(working_dir, "est_poses") pose_gt_dir = os.path.join(working_dir, "gt_poses") assert sorted(os.listdir(pose_est_dir)) == sorted(os.listdir(pose_gt_dir)) pose_est_files = sorted(os.listdir(pose_est_dir)) Logger.make_dir_if_not_exist(output_dir) logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ PLOT TRAJECTORY ================") logger.print("Working on directory:", working_dir) logger.print("Found pose estimate files: \n" + "\n".join(pose_est_files)) for i, pose_est_file in enumerate(pose_est_files): sequence = os.path.splitext(pose_est_file)[0] trajectory = np.load(os.path.join(pose_est_dir, "%s.npy" % sequence)) trajectory_gt = np.load(os.path.join(pose_gt_dir, "%s.npy" % sequence)) trans_xyz = np.array([se3.r_from_T(T) for T in trajectory]) rot_xyz = np.array([se3.log_SO3(se3.C_from_T(T)) for T in trajectory]) trans_xyz_gt = np.array([se3.r_from_T(T) for T in trajectory_gt]) rot_xyz_gt = np.array( [se3.log_SO3(se3.C_from_T(T)) for T in trajectory_gt]) trans_x = trans_xyz[:, 0] trans_y = trans_xyz[:, 1] trans_z = trans_xyz[:, 2] rot_x = rot_xyz[:, 0] rot_y = rot_xyz[:, 1] rot_z = rot_xyz[:, 2] trans_x_gt = trans_xyz_gt[:, 0] trans_y_gt = trans_xyz_gt[:, 1] trans_z_gt = trans_xyz_gt[:, 2] rot_x_gt = rot_xyz_gt[:, 0] rot_y_gt = rot_xyz_gt[:, 1] rot_z_gt = rot_xyz_gt[:, 2] # ########## XYZ Planes Plots ########## # plt.figure() plt.clf() plt.plot(trans_x, trans_y, linewidth=1.0, color="r", label="Estimate") plt.plot(trans_x_gt, trans_y_gt, linewidth=1.0, color="b", label="Ground Truth") plt.axis("equal") plt.xlabel("x [m]") plt.ylabel("y [m]") plt.title("Seq. %s Trajectory XY Plane" % sequence) plt.legend() plt.savefig( os.path.join(output_dir, "seq_%s_00_xy_traj.png" % sequence)) plt.clf() plt.plot(trans_x, trans_z, linewidth=1.0, color="r", label="Estimate") plt.plot(trans_x_gt, trans_z_gt, linewidth=1.0, color="b", label="Ground Truth") plt.axis("equal") plt.xlabel("x [m]") plt.ylabel("z [m]") plt.title("Seq. %s Trajectory XZ Plane" % sequence) plt.legend() plt.savefig( os.path.join(output_dir, "seq_%s_01_xz_traj.png" % sequence)) plt.clf() plt.plot(trans_y, trans_z, linewidth=1.0, color="r", label="Estimate") plt.plot(trans_y_gt, trans_z_gt, linewidth=1.0, color="b", label="Ground Truth") plt.axis("equal") plt.xlabel("y [m]") plt.ylabel("z [m]") plt.title("Seq. %s Trajectory YZ Plane" % sequence) plt.legend() plt.savefig( os.path.join(output_dir, "seq_%s_02_yz_traj.png" % sequence)) # ########## Progression Throughout Planes Plot ########## labels = ["Trans X", "Trans Y", "Trans Z", "Rot X", "Rot Y", "Rot Z"] data = [trans_x, trans_y, trans_z, rot_x, rot_y, rot_z] data_gt = [ trans_x_gt, trans_y_gt, trans_z_gt, rot_x_gt, rot_y_gt, rot_z_gt ] for j in range(0, len(labels)): plt.clf() plt.plot(data[j], linewidth=1.0, color="r", label="Estimate") plt.plot(data_gt[j], linewidth=1.0, color="b", label="Ground Truth") plt.xlabel("frame # []") plt.ylabel(labels[j].lower()) plt.title("Seq. %s %s" % (sequence, labels[j])) plt.legend() plt.savefig( os.path.join( output_dir, "seq_%s_%02d_%s_plt.png" % (sequence, j + 3, "_".join(labels[j].lower().split())))) logger.print("Plot saved for sequence %s" % sequence)
def gen_trajectory(model_file_path, sequences, seq_len, prop_lstm_states): # Path model_file_path = os.path.abspath(model_file_path) assert (os.path.exists(model_file_path)) working_dir = os.path.join(os.path.dirname(model_file_path), os.path.basename(model_file_path) + ".traj") logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ GENERATE TRAJECTORY REL ================") # Load model logger.print("Constructing model...") model = E2EVIO() model = model.cuda() logger.print("Loading model from: ", model_file_path) model.load_state_dict( logger.clean_state_dict_key(torch.load(model_file_path))) model.eval() logger.log_parameters() logger.print("Using sequence length:", seq_len) logger.print("Prop LSTM states:", prop_lstm_states) logger.print("Sequences: \n" + "\n".join(sequences)) for seq in sequences: logger.print("Generating trajectory for seq...", seq) start_time = time.time() subseqs = get_subseqs([seq], seq_len, overlap=1, sample_times=1, training=False) dataset = SubseqDataset(subseqs, (par.img_h, par.img_w), par.img_means, par.img_stds, par.minus_point_5, training=False) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=4) seq_data = SequenceData(seq) gt_abs_poses = seq_data.get_poses() timestamps = seq_data.get_timestamps() if par.enable_ekf: logger.print("With EKF enabled ...") est_vis_meas_dict, vis_meas_covar_dict, est_poses_dict, est_states_dict, est_covars_dict = \ gen_trajectory_abs_iter(model, {seq: dataloader}) est_vis_meas = est_vis_meas_dict[seq] vis_meas_covar = vis_meas_covar_dict[seq] est_states = est_states_dict[seq] est_covars = est_covars_dict[seq] est_poses = est_poses_dict[seq] np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "ekf_states", "vis_meas", seq + ".npy")), est_vis_meas) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "ekf_states", "vis_meas_covar", seq + ".npy")), vis_meas_covar) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "ekf_states", "poses", seq + ".npy")), est_poses) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "ekf_states", "states", seq + ".npy")), est_states) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "ekf_states", "covars", seq + ".npy")), est_covars) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "ekf_states", "gt_velocities", seq + ".npy")), seq_data.get_velocities()) est_poses = np.linalg.inv( np.array(est_poses_dict[seq]).astype(np.float64)) else: logger.print("Without EKF enabled ...") est_poses, est_vis_meas, vis_meas_covar = gen_trajectory_rel_iter( model, dataloader, prop_lstm_states, initial_pose=gt_abs_poses[0, :, :]) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "vis_meas", "meas", seq + ".npy")), est_vis_meas) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "vis_meas", "covar", seq + ".npy")), vis_meas_covar) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "est_poses", seq + ".npy")), est_poses) np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "gt_poses", seq + ".npy")), gt_abs_poses[:len(est_poses)]) # ensure same length as est poses np.save( logger.ensure_file_dir_exists( os.path.join(working_dir, "timestamps", seq + ".npy")), timestamps[:len(est_poses)]) logger.print("Done, took %.2f seconds" % (time.time() - start_time)) return working_dir
def plot_errors(working_dir): errors_dir = os.path.join(working_dir, "errors") rel_errors_dir = os.path.join(errors_dir, "rel") abs_errors_dir = os.path.join(errors_dir, "abs") vis_meas_errors_dir = os.path.join(errors_dir, "vis_meas") vis_meas_covars_dir = os.path.join(working_dir, "vis_meas", "covar") abs_errors_files = sorted(os.listdir(abs_errors_dir)) rel_errors_files = sorted(os.listdir(rel_errors_dir)) vis_meas_errors_files = sorted(os.listdir(vis_meas_errors_dir)) vis_meas_covars_files = sorted(os.listdir(vis_meas_covars_dir)) assert (abs_errors_files == rel_errors_files == vis_meas_errors_files == vis_meas_covars_files) sequences = [f[:-4] for f in abs_errors_files] logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ PLOT ERROR ================") logger.print("Working on directory:", working_dir) logger.print("Found sequences: [%s]" % ", ".join(sequences)) for i, sequence in enumerate(sequences): error_rel = np.load(os.path.join(rel_errors_dir, "%s.npy" % sequence)) error_abs = np.load(os.path.join(abs_errors_dir, "%s.npy" % sequence)) error_vis_meas = np.load(os.path.join(vis_meas_errors_dir, "%s.npy" % sequence)) covar_vis_meas = np.load(os.path.join(vis_meas_covars_dir, "%s.npy" % sequence)) labels = ["Rot X", "Rot Y", "Rot Z", "Trans X", "Trans Y", "Trans Z"] for j in range(0, 6): err = error_rel[:, j] plt.clf() plt.plot(err, color="r") plt.xlabel("frame # []") plt.ylabel(labels[j].lower()) plt.title("Seq. %s %s Rel Error" % (sequence, labels[j])) plt.savefig(Logger.ensure_file_dir_exists( os.path.join(errors_dir, "rel_error_figures", "seq_%s_%02d_%s_rel_err_plt.png" % ( sequence, j, "_".join(labels[j].lower().split()))))) plt.clf() plt.hist(err, bins=np.linspace(start=np.min(err), stop=np.max(err), num=100), normed=True) ticks = plt.xticks()[0] lnspc = np.linspace(min(ticks), max(ticks), len(err)) mean, std_dev = stats.norm.fit(err) pdf_g = stats.norm.pdf(lnspc, mean, std_dev) plt.plot(lnspc, pdf_g, color="r") plt.text(0.05, 0.9, r"$\mu$=%.4g" % mean + "\n" + r"$\sigma$=%.4g" % std_dev, transform=plt.gca().transAxes) plt.title("Seq. %s %s Rel Error Hist." % (sequence, labels[j])) plt.savefig(Logger.ensure_file_dir_exists( os.path.join(errors_dir, "rel_error_histograms", "fig_%s_%02d_%s_rel_err_hist.png" % ( sequence, j, "_".join(labels[j].lower().split()))))) for j in range(0, 6): err = np.abs(error_vis_meas[:, j]) covar = covar_vis_meas[:, j, j] if j in [0, 1, 2]: covar = covar / par.k1 covar_sig = np.sqrt(covar) plt.clf() plt.plot(err, color="r", label="Err") plt.plot(-err, color="r", label="Err") plt.plot(covar_sig, color="b", label="3sig") plt.plot(-covar_sig, color="b", label="3sig") plt.plot() plt.xlabel("frame # []") plt.ylabel(labels[j].lower()) plt.title("Seq. %s %s Vis Meas Error & Covar" % (sequence, labels[j])) plt.legend() plt.savefig(Logger.ensure_file_dir_exists( os.path.join(errors_dir, "vis_meas_error_covar_figures", "fig_%s_%02d_%s_vis_meas_error_covar.png" % ( sequence, j, "_".join(labels[j].lower().split()))))) for j in range(0, 6): err = error_abs[:, j] plt.clf() plt.plot(err, color="r") plt.xlabel("frame # []") plt.ylabel(labels[j].lower()) plt.title("Seq. %s %s Abs Error" % (sequence, labels[j])) plt.savefig(Logger.ensure_file_dir_exists( os.path.join(errors_dir, "abs_error_figures", "seq_%s_%02d_%s_abs_err_plt.png" % ( sequence, j, "_".join(labels[j].lower().split()))))) logger.print("Plots saved for sequence %s" % sequence)
def preprocess_kitti_raw(raw_seq_dir, output_dir, cam_subset_range, plot_figures=True): logger.initialize(working_dir=output_dir, use_tensorboard=False) logger.print("================ PREPROCESS KITTI RAW ================") logger.print("Preprocessing %s" % raw_seq_dir) logger.print("Output to: %s" % output_dir) logger.print("Camera images: %d => %d" % (cam_subset_range[0], cam_subset_range[1])) oxts_dir = os.path.join(raw_seq_dir, "oxts") image_dir = os.path.join(raw_seq_dir, "image_02") gps_poses = np.loadtxt(os.path.join(oxts_dir, "poses.txt")) gps_poses = np.array([np.vstack([np.reshape(p, [3, 4]), [0, 0, 0, 1]]) for p in gps_poses]) T_velo_imu = np.loadtxt(os.path.join(raw_seq_dir, "../T_velo_imu.txt")) T_cam_velo = np.loadtxt(os.path.join(raw_seq_dir, '../T_cam_velo.txt')) T_cam_imu = T_cam_velo.dot(T_velo_imu) # imu timestamps imu_timestamps = read_timestamps(os.path.join(oxts_dir, "timestamps.txt")) assert (len(imu_timestamps) == len(gps_poses)) # load image data cam_timestamps = read_timestamps(os.path.join(image_dir, "timestamps.txt")) image_paths = [os.path.join(image_dir, "data", p) for p in sorted(os.listdir(os.path.join(image_dir, "data")))] assert (len(cam_timestamps) == len(image_paths)) assert (cam_subset_range[0] >= 0 and cam_subset_range[1] < len(image_paths)) # the first camera timestamps must be between IMU timestamps assert (cam_timestamps[cam_subset_range[0]] >= imu_timestamps[0]) assert (cam_timestamps[cam_subset_range[1]] <= imu_timestamps[-1]) # take subset of the camera images int the range of images we are interested in image_paths = image_paths[cam_subset_range[0]: cam_subset_range[1] + 1] cam_timestamps = cam_timestamps[cam_subset_range[0]: cam_subset_range[1] + 1] # convert to local time reference in seconds cam_timestamps = (cam_timestamps - imu_timestamps[0]) / np.timedelta64(1, 's') imu_timestamps = (imu_timestamps - imu_timestamps[0]) / np.timedelta64(1, 's') # take a subset of imu data corresponds to camera images idx_imu_data_start = find_timestamps_in_between(cam_timestamps[0], imu_timestamps)[0] idx_imu_data_end = find_timestamps_in_between(cam_timestamps[-1], imu_timestamps)[1] imu_timestamps = imu_timestamps[idx_imu_data_start:idx_imu_data_end + 1] gps_poses = gps_poses[idx_imu_data_start:idx_imu_data_end + 1] # load IMU data from list of text files imu_data = [] imu_data_files = sorted(os.listdir(os.path.join(oxts_dir, "data"))) start_time = time.time() for i in range(idx_imu_data_start, idx_imu_data_end + 1): print("Loading IMU data files %d/%d (%.2f%%)" % (i + 1 - idx_imu_data_start, len(imu_timestamps), 100 * (i + 1 - idx_imu_data_start) / len(imu_timestamps)), end='\r') imu_data.append(np.loadtxt(os.path.join(oxts_dir, "data", imu_data_files[i]))) imu_data = np.array(imu_data) logger.print("\nLoading IMU data took %.2fs" % (time.time() - start_time)) assert (len(imu_data) == len(gps_poses)) # imu_data = imu_data[idx_imu_data_start:idx_imu_data_end + 1] imu_timestamps, imu_data, gps_poses = remove_negative_timesteps(imu_timestamps, imu_data, gps_poses) data_frames = [] start_time = time.time() idx_imu_slice_start = 0 idx_imu_slice_end = 0 for k in range(0, len(cam_timestamps) - 1): print("Processing IMU data files %d/%d (%.2f%%)" % ( k + 1, len(cam_timestamps), 100 * (k + 1) / len(cam_timestamps)), end='\r') t_k = cam_timestamps[k] t_kp1 = cam_timestamps[k + 1] # the start value does not need to be recomputed, since you can get that from the previous time step, but # i am a lazy person, this will work while imu_timestamps[idx_imu_slice_start] < t_k: idx_imu_slice_start += 1 assert (imu_timestamps[idx_imu_slice_start - 1] <= t_k <= imu_timestamps[idx_imu_slice_start]) # interpolate tk_i = imu_timestamps[idx_imu_slice_start - 1] tk_j = imu_timestamps[idx_imu_slice_start] alpha_k = (t_k - tk_i) / (tk_j - tk_i) T_i_vk, v_vk, w_vk, a_vk = \ interpolate(imu_data[idx_imu_slice_start - 1], imu_data[idx_imu_slice_start], gps_poses[idx_imu_slice_start - 1], gps_poses[idx_imu_slice_start], alpha_k) while imu_timestamps[idx_imu_slice_end] < t_kp1: idx_imu_slice_end += 1 assert (imu_timestamps[idx_imu_slice_end - 1] <= t_kp1 <= imu_timestamps[idx_imu_slice_end]) # interpolate tkp1_i = imu_timestamps[idx_imu_slice_end - 1] tkp1_j = imu_timestamps[idx_imu_slice_end] alpha_kp1 = (t_kp1 - tkp1_i) / (tkp1_j - tkp1_i) T_i_vkp1, v_vkp1, w_vkp1, a_vkp1 = \ interpolate(imu_data[idx_imu_slice_end - 1], imu_data[idx_imu_slice_end], gps_poses[idx_imu_slice_end - 1], gps_poses[idx_imu_slice_end], alpha_kp1) imu_timestamps_k_kp1 = np.concatenate( [[t_k], imu_timestamps[idx_imu_slice_start:idx_imu_slice_end - 1], [t_kp1]]) imu_poses = np.concatenate([[T_i_vk], gps_poses[idx_imu_slice_start:idx_imu_slice_end - 1], [T_i_vkp1]]) accel_measurements_k_kp1 = np.concatenate([[a_vk], imu_data[idx_imu_slice_start: idx_imu_slice_end - 1, ax:az + 1], [a_vkp1]]) gyro_measurements_k_kp1 = np.concatenate([[w_vk], imu_data[idx_imu_slice_start: idx_imu_slice_end - 1, wx:wz + 1], [w_vkp1]]) frame_k = SequenceData.Frame(image_paths[k], t_k, T_i_vk, v_vk, imu_poses, imu_timestamps_k_kp1, accel_measurements_k_kp1, gyro_measurements_k_kp1) data_frames.append(frame_k) # assertions for sanity check assert (np.allclose(data_frames[-1].timestamp, data_frames[-1].imu_timestamps[0], atol=1e-13)) assert (np.allclose(data_frames[-1].T_i_vk, data_frames[-1].imu_poses[0], atol=1e-13)) if len(data_frames) > 1: assert (np.allclose(data_frames[-1].timestamp, data_frames[-2].imu_timestamps[-1], atol=1e-13)) assert (np.allclose(data_frames[-1].T_i_vk, data_frames[-2].imu_poses[-1], atol=1e-13)) assert ( np.allclose(data_frames[-1].accel_measurements[0], data_frames[-2].accel_measurements[-1], atol=1e-13)) assert ( np.allclose(data_frames[-1].accel_measurements[0], data_frames[-2].accel_measurements[-1], atol=1e-13)) # add the last frame without any IMU data data_frames.append(SequenceData.Frame(image_paths[-1], t_kp1, T_i_vkp1, v_vkp1, np.zeros([0, 4, 4]), np.zeros([0]), np.zeros([0, 3]), np.zeros([0, 3]))) logger.print("\nProcessing data took %.2fs" % (time.time() - start_time)) df = SequenceData.save_as_pd(data_frames, np.array([0, 0, 9.808679801065017]), np.zeros(3), T_cam_imu, output_dir) data = df.to_dict("list") if not plot_figures: logger.print("All done!") return # ============================== FIGURES FOR SANITY TESTS ============================== # plot trajectory start_time = time.time() plotter = Plotter(output_dir) p_poses = np.array(data["T_i_vk"]) p_timestamps = np.array(data["timestamp"]) p_velocities = np.array(data["v_vk_i_vk"]) p_imu_timestamps = np.concatenate([d[:-1] for d in data['imu_timestamps']]) p_gyro_measurements = np.concatenate([d[:-1] for d in data['gyro_measurements']]) p_accel_measurements = np.concatenate([d[:-1] for d in data["accel_measurements"]]) p_imu_poses = np.concatenate([d[:-1, :, :] for d in data["imu_poses"]]) assert (len(p_imu_timestamps) == len(p_gyro_measurements)) assert (len(p_imu_timestamps) == len(p_accel_measurements)) assert (len(p_imu_timestamps) == len(p_imu_poses)) # integrate accel to compare against velocity p_accel_int = [p_velocities[0, :]] p_accel_int_int = [p_poses[0, :3, 3]] # g = np.array([0, 0, 9.80665]) g = np.array([0, 0, 9.808679801065017]) # g = np.array([0, 0, 9.8096]) for i in range(0, len(p_imu_timestamps) - 1): dt = p_imu_timestamps[i + 1] - p_imu_timestamps[i] C_i_vk = p_imu_poses[i, :3, :3] C_vkp1_vk = p_imu_poses[i + 1, :3, :3].transpose().dot(p_imu_poses[i, :3, :3]) v_vk_i_vk = p_accel_int[-1] v_vkp1_vk_vk = dt * (p_accel_measurements[i] - C_i_vk.transpose().dot(g)) v_vkp1_i_vk = v_vk_i_vk + v_vkp1_vk_vk p_accel_int.append(C_vkp1_vk.dot(v_vkp1_i_vk)) p_accel_int_int.append(p_accel_int_int[-1] + p_imu_poses[i, :3, :3].dot(p_accel_int[-1]) * dt) p_accel_int = np.array(p_accel_int) p_accel_int_int = np.array(p_accel_int_int) # poses from integrating velocity p_vel_int_poses = [p_poses[0, :3, 3]] for i in range(0, len(p_velocities) - 1): dt = p_timestamps[i + 1] - p_timestamps[i] dp = p_poses[i, :3, :3].dot(p_velocities[i]) * dt p_vel_int_poses.append(p_vel_int_poses[-1] + dp) p_vel_int_poses = np.array(p_vel_int_poses) plotter.plot(([p_poses[:, 0, 3], p_poses[:, 1, 3]], [p_vel_int_poses[:, 0], p_vel_int_poses[:, 1]], [p_accel_int_int[:, 0], p_accel_int_int[:, 1]],), "x [m]", "Y [m]", "XY Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True) plotter.plot(([p_poses[:, 0, 3], p_poses[:, 2, 3]], [p_vel_int_poses[:, 0], p_vel_int_poses[:, 2]], [p_accel_int_int[:, 0], p_accel_int_int[:, 2]],), "X [m]", "Z [m]", "XZ Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True) plotter.plot(([p_poses[:, 1, 3], p_poses[:, 2, 3]], [p_vel_int_poses[:, 1], p_vel_int_poses[:, 2]], [p_accel_int_int[:, 1], p_accel_int_int[:, 2]],), "Y [m]", "Z [m]", "YZ Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True) plotter.plot(([p_timestamps, p_poses[:, 0, 3]], [p_timestamps, p_vel_int_poses[:, 0]], [p_imu_timestamps, p_accel_int_int[:, 0]],), "t [s]", "Y [m]", "X Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"]) plotter.plot(([p_timestamps, p_poses[:, 1, 3]], [p_timestamps, p_vel_int_poses[:, 1]], [p_imu_timestamps, p_accel_int_int[:, 1]],), "t [s]", "Z [m]", "Y Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"]) plotter.plot(([p_timestamps, p_poses[:, 2, 3]], [p_timestamps, p_vel_int_poses[:, 2]], [p_imu_timestamps, p_accel_int_int[:, 2]],), "t [s]", "Z [m]", "Z Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"]) # plot trajectory rotated wrt to the first frame p_poses_from_I = np.array([np.linalg.inv(p_poses[0]).dot(p) for p in p_poses]) plotter.plot(([p_poses_from_I[:, 0, 3], p_poses_from_I[:, 1, 3]],), "x [m]", "Y [m]", "XY Plot From Identity", equal_axes=True) plotter.plot(([p_poses_from_I[:, 0, 3], p_poses_from_I[:, 2, 3]],), "X [m]", "Z [m]", "XZ Plot From Identity", equal_axes=True) plotter.plot(([p_poses_from_I[:, 1, 3], p_poses_from_I[:, 2, 3]],), "Y [m]", "Z [m]", "YZ Plot From Identity", equal_axes=True) # plot p_velocities plotter.plot(([p_timestamps, p_velocities[:, 0]], [p_timestamps, p_velocities[:, 1]], [p_timestamps, p_velocities[:, 2]]), "t [s]", "v [m/s]", "YZ Plot", labels=["dat_vx", "dat_vy", "dat_vz"]) # make sure the interpolated acceleration and gyroscope measurements are the same plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 0]], [imu_timestamps, imu_data[:, wx]],), "t [s]", "w [rad/s]", "Rot Vel X Verification") plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 1]], [imu_timestamps, imu_data[:, wy]],), "t [s]", "w [rad/s]", "Rot Vel Y Verification") plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 2]], [imu_timestamps, imu_data[:, wz]],), "t [s]", "w [rad/s]", "Rot Vel Z Verification") plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 0]], [imu_timestamps, imu_data[:, ax]],), "t [s]", "a [m/s^2]", "Accel X Verification") plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 1]], [imu_timestamps, imu_data[:, ay]],), "t [s]", "a [m/s^2]", "Accel Y Verification") plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 2]], [imu_timestamps, imu_data[:, az]],), "t [s]", "a [m/s^2]", "Accel Z Verification") # integrate gyroscope to compare against rotation p_gyro_int = [data["T_i_vk"][0][:3, :3]] for i in range(0, len(p_imu_timestamps) - 1): dt = p_imu_timestamps[i + 1] - p_imu_timestamps[i] p_gyro_int.append(p_gyro_int[-1].dot(exp_SO3(dt * p_gyro_measurements[i]))) p_gyro_int = np.array([log_SO3(o) for o in p_gyro_int]) p_orientation = np.array([log_SO3(p[:3, :3]) for p in data["T_i_vk"]]) plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 0])], [p_timestamps, np.unwrap(p_orientation[:, 0])],), "t [s]", "rot [rad/s]", "Theta X Cmp Plot", labels=["gyro_int", "dat_pose"]) plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 1])], [p_timestamps, np.unwrap(p_orientation[:, 1])],), "t [s]", "rot [rad/s]", "Theta Y Cmp Plot", labels=["gyro_int", "dat_pose"]) plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 2])], [p_timestamps, np.unwrap(p_orientation[:, 2])],), "t [s]", "rot [rad/s]", "Theta Z Cmp Plot", labels=["gyro_int", "dat_pose"]) vel_from_gps_rel_poses = [] for k in range(0, len(gps_poses) - 1): dt = imu_timestamps[k + 1] - imu_timestamps[k] T_i_vk = gps_poses[k] T_i_vkp1 = gps_poses[k + 1] T_vk_vkp1 = np.linalg.inv(T_i_vk).dot(T_i_vkp1) vel_from_gps_rel_poses.append(T_vk_vkp1[0:3, 3] / dt) # vel_from_gps_rel_poses.append(log_SE3(T_vk_vkp1)[0:3] / dt) vel_from_gps_rel_poses = np.array(vel_from_gps_rel_poses) plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 0]], [p_timestamps, p_velocities[:, 0]], [p_imu_timestamps, p_accel_int[:, 0]],), "t [s]", "v [m/s]", "Velocity X Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"]) plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 1]], [p_timestamps, p_velocities[:, 1]], [p_imu_timestamps, p_accel_int[:, 1]],), "t [s]", "v [m/s]", "Velocity Y Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"]) plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 2]], [p_timestamps, p_velocities[:, 2]], [p_imu_timestamps, p_accel_int[:, 2]],), "t [s]", "v [m/s]", "Velocity Z Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"]) logger.print("Generating figures took %.2fs" % (time.time() - start_time)) logger.print("All done!")
def test_ekf_euroc(self): output_dir = os.path.join(par.results_coll_dir, "test_ekf_euroc") logger.initialize(output_dir, use_tensorboard=False) # seqs = ["MH_01"] seqs = [ "MH_01", "MH_02", "MH_03", "MH_04", "MH_05", "V1_01", "V1_02", "V1_03", "V2_01", "V2_02", "V2_03" ] # seqs2 = ["MH_01_eval", "MH_02_eval", "MH_03_eval", "MH_04_eval", "MH_05_eval", # "V1_01_eval", "V1_02_eval", "V1_03_eval", "V2_01_eval", "V2_02_eval", "V2_03_eval"] # seqs = seqs + seqs2 error_calc = EurocErrorCalc(seqs) imu_covar = torch.diag( torch.tensor([ 1e-3, 1e-3, 1e-3, 1e-5, 1e-5, 1e-5, 1e-1, 1e-1, 1e-1, 1e-2, 1e-2, 1e-2 ])) vis_meas_covar = torch.diag( torch.tensor([1e-3, 1e-3, 1e-3, 1e-3, 1e-3, 1e-3])).view(1, 1, 6, 6) init_covar = np.eye(18, 18) init_covar[0:3, 0:3] = np.eye(3, 3) * 1e-2 # g init_covar[3:9, 3:9] = np.zeros([6, 6]) # C,r init_covar[9:12, 9:12] = np.eye(3, 3) * 1e-2 # v init_covar[12:15, 12:15] = np.eye(3, 3) * 1e-2 # bw init_covar[15:18, 15:18] = np.eye(3, 3) * 1e2 # ba init_covar = torch.tensor(init_covar, dtype=torch.float32).view(1, 18, 18) ekf = IMUKalmanFilter() for seq in seqs: subseqs = get_subseqs([seq], 2, overlap=1, sample_times=1, training=False) dataset = SubseqDataset(subseqs, (par.img_h, par.img_w), 0, 1, True, training=False, no_image=True) dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0) seq_data = SequenceData(seq) est_poses = [] est_ekf_states = [] est_ekf_covars = [] for i, data in enumerate(dataloader): # print('%d/%d (%.2f%%)' % (i, len(dataloader), i * 100 / len(dataloader)), end="\n") _, _, imu_data, init_state, _, gt_poses_inv, gt_rel_poses = data gt_rel_poses = gt_rel_poses.view(1, 1, 6, 1) if i == 0: pose, ekf_state, ekf_covar = ekf.forward( imu_data, imu_covar, gt_poses_inv[:, 0], init_state, init_covar, gt_rel_poses, vis_meas_covar, torch.eye(4, 4).view(1, 4, 4)) est_poses.append(pose[:, 0]) est_ekf_states.append(ekf_state[:, 0]) est_ekf_covars.append(ekf_covar[:, 0]) est_poses.append(pose[:, -1]) est_ekf_states.append(ekf_state[:, -1]) est_ekf_covars.append(ekf_covar[:, -1]) else: pose, ekf_state, ekf_covar = ekf.forward( imu_data, imu_covar, est_poses[-1], est_ekf_states[-1], est_ekf_covars[-1], gt_rel_poses, vis_meas_covar, torch.eye(4, 4).view(1, 4, 4)) est_poses.append(pose[:, -1]) est_ekf_states.append(ekf_state[:, -1]) est_ekf_covars.append(ekf_covar[:, -1]) est_poses_np = torch.stack( est_poses, 1).squeeze().detach().cpu().numpy().astype("float64") err = error_calc.accumulate_error(seq, np.linalg.inv(est_poses_np)) logger.print("Error: %.5f" % err) logger.print("Plotting figures...") plot_ekf_data(os.path.join(output_dir, seq), seq_data.get_timestamps(), seq_data.get_poses(), seq_data.get_velocities(), torch.stack(est_poses, 1).squeeze(), torch.stack(est_ekf_states, 1).squeeze()) logger.print("Finished Sequence %s" % seq) logger.print("Done! Ave Error: %.5f" % error_calc.get_average_error())
def calc_image_mean_std(sequences): logger.initialize(working_dir=par.data_dir, use_tensorboard=False) logger.print("================ CALC IMAGE MEAN STD ================") logger.print("Sequences: [%s]" % ",".join(sequences)) to_tensor = torchvision.transforms.ToTensor() image_count = 0 mean_sum = np.array([0.0, 0.0, 0.0]) var_sum = np.array([0.0, 0.0, 0.0]) image_paths = [] for i, seq in enumerate(sequences): print("Collecting image paths %d/%d (%.2f%%)" % (i + 1, len(sequences), (i + 1) * 100 / len(sequences)), end="\r") image_paths += list(SequenceData(seq).get_images_paths()) print() start_time = time.time() for i, path in enumerate(image_paths): print("Computing mean %d/%d (%.2f%%)" % (i + 1, len(image_paths), (i + 1) * 100 / len(image_paths)), end="\r") img = np.array(to_tensor(Image.open(path))) if par.minus_point_5: img = img - 0.5 mean_sum += np.mean(img, ( 1, 2, )) image_count += 1 print("\nTook %.2fs" % (time.time() - start_time)) mean = mean_sum / image_count num_pixels = 0 start_time = time.time() for i, path in enumerate(image_paths): print("Computing standard deviation %d/%d (%.2f%%)" % (i + 1, len(image_paths), (i + 1) * 100 / len(image_paths)), end="\r") img = np.array(to_tensor(Image.open(path))) if par.minus_point_5: img = img - 0.5 img[0, :, :] = img[0, :, :] - mean[0] img[1, :, :] = img[1, :, :] - mean[1] img[2, :, :] = img[2, :, :] - mean[2] var_sum += np.sum(np.square(img), ( 1, 2, )) num_pixels += img.shape[1] * img.shape[2] print("\nTook %.2fs" % (time.time() - start_time)) std = np.sqrt(var_sum / (num_pixels - 1)) logger.print("Mean: [%f, %f, %f]" % (mean[0], mean[1], mean[2])) logger.print("Std: [%f, %f, %f]" % (std[0], std[1], std[2]))
import torch train_subseqs = get_subseqs(["K06"], 1090, overlap=1, sample_times=par.sample_times, training=True) train_dataset = SubseqDataset(train_subseqs, (par.img_h, par.img_w), par.img_means, par.img_stds, par.minus_point_5) train_dl = DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=0, pin_memory=True, drop_last=False) train_dl_iter = iter(train_dl) e2e_vio_model = E2EVIO() resume_path = "/home/cs4li/Dev/deep_ekf_vio/results/train_20190324-16-19-02_all_aug_stateful_lstm/saved_model.eval" save_path = "/home/cs4li/Dev/deep_ekf_vio/results/ekf_total_test" e2e_vio_model.load_state_dict(logger.clean_state_dict_key(torch.load(resume_path))) e2e_vio_model = e2e_vio_model.cuda() e2e_vio_ta = _TrainAssistant(e2e_vio_model) logger.initialize(save_path, use_tensorboard=True) for i in range(0, 1): data = next(train_dl_iter) meta_data, images, imu_data, prev_state, T_imu_cam, gt_poses, gt_rel_poses = data vis_meas, vis_meas_covar, lstm_states, poses, ekf_states, ekf_covars = \ e2e_vio_ta.model.forward(images.cuda(), imu_data.cuda(), None, gt_poses[:, 0].cuda(), prev_state.cuda(), T_imu_cam.cuda()) plot_ekf_data(save_path, imu_data[0, :, 0, 0].detach().cpu(), torch.inverse(gt_poses[0]), torch.zeros(poses.shape[1], 3),
def train(resume_model_path, resume_optimizer_path, train_description): logger.initialize(working_dir=par.results_dir, use_tensorboard=True) logger.print("================ TRAIN ================") if not train_description: train_description = input("Enter a description of this training run: ") logger.print("Train description: ", train_description) logger.tensorboard.add_text("description", train_description) logger.log_parameters() logger.log_source_files() # Prepare Data train_subseqs = get_subseqs(par.train_seqs, par.seq_len, overlap=1, sample_times=par.sample_times, training=True) convert_subseqs_list_to_panda(train_subseqs).to_pickle( os.path.join(par.results_dir, "train_df.pickle")) train_dataset = SubseqDataset(train_subseqs, (par.img_h, par.img_w), par.img_means, par.img_stds, par.minus_point_5) train_dl = DataLoader(train_dataset, batch_size=par.batch_size, shuffle=True, num_workers=par.n_processors, pin_memory=par.pin_mem, drop_last=False) logger.print('Number of samples in training dataset: %d' % len(train_subseqs)) valid_subseqs = get_subseqs(par.valid_seqs, par.seq_len, overlap=1, sample_times=1, training=False) convert_subseqs_list_to_panda(valid_subseqs).to_pickle( os.path.join(par.results_dir, "valid_df.pickle")) valid_dataset = SubseqDataset(valid_subseqs, (par.img_h, par.img_w), par.img_means, par.img_stds, par.minus_point_5, training=False) valid_dl = DataLoader(valid_dataset, batch_size=par.batch_size, shuffle=False, num_workers=par.n_processors, pin_memory=par.pin_mem, drop_last=False) logger.print('Number of samples in validation dataset: %d' % len(valid_subseqs)) # Model e2e_vio_model = E2EVIO() e2e_vio_model = e2e_vio_model.cuda() online_evaluator = _OnlineDatasetEvaluator(e2e_vio_model, par.valid_seqs, 50) # Load FlowNet weights pretrained with FlyingChairs # NOTE: the pretrained model assumes image rgb values in range [-0.5, 0.5] if par.pretrained_flownet and not resume_model_path: pretrained_w = torch.load(par.pretrained_flownet) logger.print('Load FlowNet pretrained model') # Use only conv-layer-part of FlowNet as CNN for DeepVO vo_model_dict = e2e_vio_model.vo_module.state_dict() update_dict = { k: v for k, v in pretrained_w['state_dict'].items() if k in vo_model_dict } assert (len(update_dict) > 0) vo_model_dict.update(update_dict) e2e_vio_model.vo_module.load_state_dict(vo_model_dict) # Create optimizer logger.print("Optimizing on parameters:") optimizer_params = [] for p_name, p in e2e_vio_model.named_parameters(): specific_lr_found = False for k in par.param_specific_lr: regex = re.compile(k) if regex.match(p_name): optimizer_params.append({ "params": p, "lr": par.param_specific_lr[k] }) logger.print("%s, lr:%f" % (p_name, par.param_specific_lr[k])) specific_lr_found = True if not specific_lr_found: optimizer_params.append({"params": p}) logger.print(p_name) assert (len(optimizer_params) == sum(1 for _ in e2e_vio_model.parameters())) optimizer = par.optimizer(optimizer_params, **par.optimizer_args) # Load trained DeepVO model and optimizer if resume_model_path: state_dict_update = logger.clean_state_dict_key( torch.load(resume_model_path)) state_dict_update = { key: state_dict_update[key] for key in state_dict_update if key not in par.exclude_resume_weights } state_dict = e2e_vio_model.state_dict() state_dict.update(state_dict_update) e2e_vio_model.load_state_dict(state_dict) logger.print('Load model from: %s' % resume_model_path) if resume_optimizer_path: optimizer.load_state_dict(torch.load(resume_optimizer_path)) logger.print('Load optimizer from: %s' % resume_optimizer_path) # if to use more than one GPU if par.n_gpu > 1: assert (torch.cuda.device_count() == par.n_gpu) e2e_vio_model = torch.nn.DataParallel(e2e_vio_model) e2e_vio_ta = _TrainAssistant(e2e_vio_model) # Train min_loss_t = 1e10 min_loss_v = 1e10 min_err_eval = 1e10 for epoch in range(par.epochs): e2e_vio_ta.epoch = epoch st_t = time.time() logger.print('=' * 50) # Train e2e_vio_model.train() loss_mean = 0 t_loss_list = [] count = 0 for data in train_dl: print("%d/%d (%.2f%%)" % (count, len(train_dl), 100 * count / len(train_dl)), end='\r') ls = e2e_vio_ta.step(data, optimizer).data.cpu().numpy() t_loss_list.append(float(ls)) loss_mean += float(ls) count += 1 logger.print('Train take {:.1f} sec'.format(time.time() - st_t)) loss_mean /= len(train_dl) logger.tensorboard.add_scalar("epoch/train_loss", loss_mean, epoch) # Validation st_t = time.time() e2e_vio_model.eval() loss_mean_valid = 0 v_loss_list = [] for data in valid_dl: v_ls = e2e_vio_ta.get_loss(data).data.cpu().numpy() v_loss_list.append(float(v_ls)) loss_mean_valid += float(v_ls) logger.print('Valid take {:.1f} sec'.format(time.time() - st_t)) loss_mean_valid /= len(valid_dl) logger.tensorboard.add_scalar("epoch/val_loss", loss_mean_valid, epoch) logger.print( 'Epoch {}\ntrain loss mean: {}, std: {}\nvalid loss mean: {}, std: {}\n' .format(epoch + 1, loss_mean, np.std(t_loss_list), loss_mean_valid, np.std(v_loss_list))) err_eval = online_evaluator.evaluate() logger.tensorboard.add_scalar("epoch/eval_loss", err_eval, epoch) # Save model if (epoch + 1) % 5 == 0: logger.log_training_state("checkpoint", epoch + 1, e2e_vio_model.state_dict(), optimizer.state_dict()) if loss_mean_valid < min_loss_v: min_loss_v = loss_mean_valid logger.log_training_state("valid", epoch + 1, e2e_vio_model.state_dict()) if loss_mean < min_loss_t: min_loss_t = loss_mean logger.log_training_state("train", epoch + 1, e2e_vio_model.state_dict()) if err_eval < min_err_eval: min_err_eval = err_eval logger.log_training_state("eval", epoch + 1, e2e_vio_model.state_dict()) logger.print( "Latest saves:", " ".join([ "%s: %s" % (k, v) for k, v in logger.log_training_state_latest_epoch.items() ]))
def kitti_eval(working_dir, train_sequences, val_sequences, min_num_frames=200): logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ Evaluate KITTI ================") logger.print("Working on directory:", working_dir) executable = os.path.join(par.project_dir, "eval", "kitti_eval", "cpp", "evaluate_odometry") kitti_dir = os.path.join(working_dir, "kitti") available_seqs = [] for f in sorted(os.listdir(kitti_dir)): # must have at least 200 entries for evaluation if f.endswith(".txt") and sum( 1 for line in open(os.path.join(kitti_dir, f))) > min_num_frames: available_seqs.append( f.replace("_est.txt", "").replace("_gt.txt", "")) assert (len(available_seqs) // 2 == len(set(available_seqs))) available_seqs = list(set(available_seqs)) print( "Poses not available for these sequences:", ", ".join( list(set(available_seqs) ^ set(train_sequences + val_sequences)))) if train_sequences: logger.print("Evaluating training sequences...") logger.print( "Training sequences: ", ", ".join(list(set(available_seqs) & set(train_sequences)))) cmd = [ executable, kitti_dir, Logger.make_dir_if_not_exist(os.path.join(kitti_dir, "train")) ] + list(set(available_seqs) & set(train_sequences)) for line in execute(cmd): logger.print(line.strip()) if val_sequences: logger.print("Evaluating validation sequences...") logger.print("Validation sequences:", " ,".join(list(set(available_seqs) & set(val_sequences)))) cmd = [ executable, kitti_dir, Logger.make_dir_if_not_exist(os.path.join(kitti_dir, "valid")) ] + list(set(available_seqs) & set(val_sequences)) execute(cmd) for line in execute(cmd): logger.print(line.strip()) logger.print("Deleting useless files...") selected_files = list(glob.iglob(os.path.join(kitti_dir, "train", "**"), recursive=True)) + \ list(glob.iglob(os.path.join(kitti_dir, "valid", "**"), recursive=True)) for filename in selected_files: if filename.endswith(".tex") or filename.endswith(".eps") or \ filename.endswith(".pdf") or filename.endswith(".gp"): os.remove(filename) logger.print("Finished running KITTI evaluation!") # print the errors logger.print("Training errors are:") if list(set(available_seqs) & set(train_sequences)): train_errors, ave_train_errors = compute_error_for_each_seq( os.path.join(kitti_dir, "train")) print_error_table(train_errors, ave_train_errors) else: logger.print("No training errors data") logger.print("Validation errors are:") if list(set(available_seqs) & set(val_sequences)): val_errors, ave_val_errors = compute_error_for_each_seq( os.path.join(kitti_dir, "valid")) print_error_table(val_errors, ave_val_errors) else: logger.print("No validation errors data")
def preprocess_euroc(seq_dir, output_dir, cam_still_range): logger.initialize(working_dir=output_dir, use_tensorboard=False) logger.print("================ PREPROCESS EUROC ================") logger.print("Preprocessing %s" % seq_dir) logger.print("Output to: %s" % output_dir) logger.print("Camera still range [%d -> %d]" % (cam_still_range[0], cam_still_range[1])) left_cam_csv = open(os.path.join(seq_dir, 'cam0', 'data.csv'), 'r') imu_csv = open(os.path.join(seq_dir, 'imu0', "data.csv"), 'r') gt_csv = open(os.path.join(seq_dir, "state_groundtruth_estimate0", "data.csv"), "r") cam_sensor_yaml_config = yaml.load(open(os.path.join(seq_dir, "cam0", "sensor.yaml"))) T_cam_imu = np.linalg.inv(np.array(cam_sensor_yaml_config["T_BS"]["data"]).reshape(4, 4)) cam_timestamps = [] imu_timestamps = [] imu_data = [] gt_timestamps = [] gt_data = [] left_cam_csv.readline() # skip header for line in left_cam_csv: line = line.split(",") timestamp_str = line[0] assert str(timestamp_str + ".png" == line[1]) cam_timestamps.append(int(timestamp_str)) imu_csv.readline() # skip header for line in imu_csv: line = line.split(",") timestamp = int(line[0]) data = [float(line[i + 1]) for i in range(0, 6)] imu_timestamps.append(timestamp) imu_data.append(data) gt_csv.readline() # skip header for line in gt_csv: line = line.split(",") timestamp = int(line[0]) data = [float(line[i + 1]) for i in range(0, 16)] gt_timestamps.append(timestamp) gt_data.append(data) cam_timestamps = cam_timestamps imu_timestamps = imu_timestamps imu_data = np.array(imu_data) gt_timestamps = gt_timestamps gt_data = np.array(gt_data) assert np.all(np.diff(cam_timestamps) > 0), "nonmonotonic timestamp" assert np.all(np.diff(imu_timestamps) > 0), "nonmonotonic timestamp" assert np.all(np.diff(gt_timestamps) > 0), "nonmonotonic timestamp" # align imu and ground truth timestamps, and the time difference should not be more than 1 us assert (gt_timestamps[0] > imu_timestamps[0] and gt_timestamps[-1] < imu_timestamps[-1]) imu_gt_aligned_start_idx = (np.abs(np.array(imu_timestamps) - gt_timestamps[0])).argmin() # gta = gt aligned imu_timestamps_gt_aligned = imu_timestamps[imu_gt_aligned_start_idx:imu_gt_aligned_start_idx + len(gt_timestamps)] imu_data_gt_aligned = imu_data[imu_gt_aligned_start_idx:imu_gt_aligned_start_idx + len(gt_timestamps)] gt_align_time_sync_diff = np.array(gt_timestamps) - np.array(imu_timestamps_gt_aligned) assert np.all(np.abs(gt_align_time_sync_diff) < 1000), "timestamp out of sync by > 1 us" # first_cam_timestamp cam_imu_aligned_start_idx = -1 for i in range(0, len(cam_timestamps)): if cam_timestamps[i] in imu_timestamps: cam_imu_aligned_start_idx = i break assert cam_imu_aligned_start_idx >= 0 cam_imu_aligned_end_idx = -1 for i in range(len(cam_timestamps) - 1, -1, -1): if cam_timestamps[i] in imu_timestamps: cam_imu_aligned_end_idx = i break assert cam_imu_aligned_end_idx >= 0 assert cam_imu_aligned_start_idx < cam_still_range[1], "sanity check that the alignment is at the start" imu_still_idx_start = imu_timestamps.index(cam_timestamps[max(cam_still_range[0], cam_imu_aligned_start_idx)]) imu_still_idx_end = imu_timestamps.index(cam_timestamps[cam_still_range[1]]) gyro_bias_from_still = np.mean(imu_data[imu_still_idx_start:imu_still_idx_end, [wx, wy, wz]], axis=0) gravity_from_still = np.mean(imu_data[imu_still_idx_start:imu_still_idx_end, [ax, ay, az]], axis=0) print("still estimated g_b0:", gravity_from_still) print("still estimated g_b0 norm: ", np.linalg.norm(gravity_from_still)) # for training /validation logger.print("Processing data for training...") every_N_frames = 10 rounded_length = len(imu_timestamps_gt_aligned) // every_N_frames * every_N_frames gravity_from_gt = find_initial_gravity(imu_timestamps_gt_aligned[0:rounded_length], imu_data_gt_aligned[0:rounded_length], gt_timestamps[0:rounded_length], gt_data[0:rounded_length], 10) cam_gt_aligned_start_idx = -1 for i in range(0, len(cam_timestamps)): if cam_timestamps[i] in imu_timestamps_gt_aligned: cam_gt_aligned_start_idx = i break assert cam_gt_aligned_start_idx >= 0 cam_gt_aligned_end_idx = -1 for i in range(len(cam_timestamps) - 1, -1, -1): if cam_timestamps[i] in imu_timestamps_gt_aligned: cam_gt_aligned_end_idx = i break assert cam_gt_aligned_end_idx >= 0 gt_cam_aligned_start_idx = imu_timestamps_gt_aligned.index(cam_timestamps[cam_gt_aligned_start_idx]) gt_cam_aligned_end_idx = imu_timestamps_gt_aligned.index(cam_timestamps[cam_gt_aligned_end_idx]) logger.print("Camera index [%d -> %d]" % (cam_gt_aligned_start_idx, cam_gt_aligned_end_idx)) data_frames = package_euroc_data(seq_dir, cam_timestamps[cam_gt_aligned_start_idx:cam_gt_aligned_end_idx + 1], imu_timestamps_gt_aligned[gt_cam_aligned_start_idx:gt_cam_aligned_end_idx + 1], imu_data_gt_aligned[gt_cam_aligned_start_idx:gt_cam_aligned_end_idx + 1], gt_timestamps[gt_cam_aligned_start_idx:gt_cam_aligned_end_idx + 1], gt_data[gt_cam_aligned_start_idx:gt_cam_aligned_end_idx + 1]) SequenceData.save_as_pd(data_frames, gravity_from_gt, gyro_bias_from_still, T_cam_imu, output_dir) copyfile(os.path.join(seq_dir, "state_groundtruth_estimate0", "data.csv"), os.path.join(output_dir, "groundtruth.csv")) # for evaluation logger.print("Processing data for evaluation...") imu_cam_aligned_start_idx = imu_timestamps.index(cam_timestamps[cam_still_range[1]]) imu_cam_aligned_end_idx = imu_timestamps.index(cam_timestamps[cam_imu_aligned_end_idx]) imu_timestamps_cam_aligned = imu_timestamps[imu_cam_aligned_start_idx:imu_cam_aligned_end_idx + 1] imu_data_cam_aligned = imu_data[imu_cam_aligned_start_idx:imu_cam_aligned_end_idx + 1] zeros_gt = np.zeros([len(imu_timestamps_cam_aligned), 16]) zeros_gt[: qw] = 1 logger.print("Camera index [%d -> %d]" % (cam_still_range[1], cam_imu_aligned_end_idx)) eval_output_dir = output_dir + "_eval" data_frames = package_euroc_data(seq_dir, cam_timestamps[cam_still_range[1]:cam_imu_aligned_end_idx + 1], imu_timestamps_cam_aligned, imu_data_cam_aligned, imu_timestamps_cam_aligned, zeros_gt) logger.make_dir_if_not_exist(eval_output_dir) SequenceData.save_as_pd(data_frames, gravity_from_still, gyro_bias_from_still, T_cam_imu, eval_output_dir) copyfile(os.path.join(seq_dir, "state_groundtruth_estimate0", "data.csv"), os.path.join(eval_output_dir, "groundtruth.csv"))
type=str, help="path of model state to resume from") arg_parser.add_argument('--resume_optimizer_from', type=str, help="path of optimizer state to resume from") arg_parsed = arg_parser.parse_args() gpu_ids = arg_parsed.gpu_id resume_model_path = os.path.abspath( arg_parsed.resume_model_from) if arg_parsed.resume_model_from else None resume_optimizer_path = os.path.abspath( arg_parsed.resume_optimizer_from ) if arg_parsed.resume_optimizer_from else None results_dir = os.path.abspath( os.path.dirname(__file__)) if arg_parsed.run_eval_only else par.results_dir logger.initialize(working_dir=results_dir, use_tensorboard=True) # set the visible GPUs if gpu_ids: os.environ["CUDA_VISIBLE_DEVICES"] = ", ".join([str(i) for i in gpu_ids]) logger.print("CUDA_VISIVLE_DEVICES: %s" % os.environ["CUDA_VISIBLE_DEVICES"]) if not arg_parsed.run_eval_only: start_t = time.time() trainer.train(resume_model_path, resume_optimizer_path, arg_parsed.description) logger.print("Training took %.2fs" % (time.time() - start_t)) for tag in ["valid", "train", "checkpoint", "eval"]: seq_results_dir = gen_trajectory(