def calc_error(working_dir): logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ CALCULATE ERRORS ================") logger.print("Working on directory:", working_dir) pose_est_dir = os.path.join(working_dir, "est_poses") pose_gt_dir = os.path.join(working_dir, "gt_poses") vis_meas_dir = os.path.join(working_dir, "vis_meas") errors_output_dir = os.path.join(working_dir, "errors") Logger.make_dir_if_not_exist(errors_output_dir) pose_est_files = sorted(os.listdir(pose_est_dir)) logger.print("Found pose estimate files: \n" + "\n".join(pose_est_files)) for i, pose_est_file in enumerate(pose_est_files): sequence = os.path.splitext(pose_est_file)[0] traj_est = np.load(os.path.join(pose_est_dir, "%s.npy" % sequence)) traj_gt = np.load(os.path.join(pose_gt_dir, "%s.npy" % sequence)) vis_meas = np.load(os.path.join(vis_meas_dir, "meas", "%s.npy" % sequence)) assert (traj_est.shape[0] == traj_gt.shape[0]) abs_traj_error = [] rel_traj_error = [np.zeros(6)] vis_meas_error = [np.zeros(6)] for j in range(0, traj_est.shape[0]): pose_est = traj_est[j] pose_gt = traj_gt[j] abs_pose_err = np.linalg.inv(pose_est).dot(pose_gt) abs_traj_error.append(np.concatenate([log_SO3(C_from_T(abs_pose_err)), r_from_T(abs_pose_err)])) for j in range(1, traj_est.shape[0]): rel_pose_gt = np.linalg.inv(traj_gt[j - 1]).dot(traj_gt[j]) rel_pose_est = np.linalg.inv(traj_est[j - 1]).dot(traj_est[j]) rel_pose_err = np.linalg.inv(rel_pose_est).dot(rel_pose_gt) rel_traj_error.append(np.concatenate([log_SO3(C_from_T(rel_pose_err)), r_from_T(rel_pose_err)])) vis_meas_error.append(np.concatenate([log_SO3(C_from_T(rel_pose_gt)), r_from_T(rel_pose_gt)]) - vis_meas[j - 1]) np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "abs", sequence + ".npy")), np.array(abs_traj_error)) np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "rel", sequence + ".npy")), np.array(rel_traj_error)) np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "vis_meas", sequence + ".npy")), np.array(vis_meas_error)) logger.print("Error saved for sequence %s" % sequence) logger.print("All Done.")
def gen_trajectory_rel_iter(model, dataloader, prop_lstm_states, initial_pose=np.eye(4, 4)): predicted_abs_poses = [ np.array(initial_pose), ] predicted_rel_poses = [] vis_meas_covars = [] lstm_states = None # none defaults to zero for i, data in enumerate(dataloader): print('%d/%d (%.2f%%)' % (i, len(dataloader), i * 100 / len(dataloader)), end="\r") # images = data[1].cuda() meta_data, images, imu_data, prev_state, T_imu_cam, gt_poses, gt_rel_poses = data lstm_states = lstm_states if prop_lstm_states else None # we only care about the results from the VO front ends here vis_meas, vis_meas_covar, lstm_states, _, _, _ = model.forward( images.cuda(), imu_data.cuda(), lstm_states, gt_poses[:, 0].inverse().cuda(), prev_state.cuda(), None, T_imu_cam.cuda()) lstm_states = lstm_states.detach() vis_meas = vis_meas.detach().cpu().numpy() vis_meas_covar = vis_meas_covar.detach().cpu().numpy() for i, rel_pose in enumerate(vis_meas[-1]): # select the only batch T_vkm1_vk = se3.T_from_Ct(se3.exp_SO3(rel_pose[0:3]), rel_pose[3:6]) T_i_vk = predicted_abs_poses[-1].dot(T_vkm1_vk) se3.log_SO3(T_i_vk[0:3, 0:3]) # just here to check for warnings predicted_abs_poses.append(T_i_vk) vis_meas_covars.append(vis_meas_covar[-1][i]) predicted_rel_poses.append(rel_pose) return predicted_abs_poses, predicted_rel_poses, vis_meas_covars
def ekf_test_case(self, seqs, seqs_range, init_covar, imu_covar, vis_meas_covar, device, req_grad, accel_bias_inject=None, gyro_bias_inject=None): seqs_data = [SequenceData(seq) for seq in seqs] data_frames = [d.df[seqs_range[0]:seqs_range[1]] for d in seqs_data] data_frames_lengths = [len(d) for d in data_frames] assert (all(data_frames_lengths[0] == l for l in data_frames_lengths)) timestamps = np.array( [list(df.loc[:, "timestamp"].values) for df in data_frames]) gt_poses = np.array( [list(df.loc[:, "T_i_vk"].values) for df in data_frames]) gt_vels = np.array( [list(df.loc[:, "v_vk_i_vk"].values) for df in data_frames]) T_imu_cam = np.array([d.T_cam_imu for d in seqs_data]) if accel_bias_inject is None: accel_bias_inject = np.zeros([len(seqs_data), 3]) if gyro_bias_inject is None: gyro_bias_inject = np.zeros([len(seqs_data), 3]) imu_timestamps = np.array( [df.loc[:, "imu_timestamps"].values for df in data_frames]) gyro_measurements = np.array( [df.loc[:, "gyro_measurements"].values for df in data_frames]) accel_measurements = np.array( [df.loc[:, "accel_measurements"].values for df in data_frames]) ekf = IMUKalmanFilter() self.assertEqual(timestamps.shape[0:2], gt_poses.shape[0:2]) self.assertEqual(timestamps.shape[0:2], gt_vels.shape[0:2]) self.assertEqual(timestamps.shape[0], T_imu_cam.shape[0]) g = np.array([0, 0, 9.808679801065017]) init_state = [] for i in range(0, len(gt_poses)): init_state.append( IMUKalmanFilter.encode_state( torch.tensor(gt_poses[i, 0, 0:3, 0:3].transpose().dot(g), dtype=torch.float32), # g torch.eye(3, 3), # C torch.zeros(3), # r torch.tensor(gt_vels[i, 0], dtype=torch.float32), # v torch.zeros(3), # bw torch.zeros(3)).to(device)) # ba init_state = torch.stack(init_state) init_pose = torch.tensor([np.linalg.inv(p[0]) for p in gt_poses], dtype=torch.float32).to(device) init_covar = torch.tensor(init_covar, dtype=torch.float32).to(device) imu_covar = torch.tensor(imu_covar, dtype=torch.float32).to(device) # collect the data imu_data = [] imu_max_length = 12 for i in range(0, timestamps.shape[1]): imu_data_time_k = self.concat_imu_data_at_time_k( imu_max_length, imu_timestamps[:, i], gyro_measurements[:, i], accel_measurements[:, i], gyro_bias_inject, accel_bias_inject) imu_data.append(imu_data_time_k) imu_data = torch.tensor(np.stack(imu_data, 1), dtype=torch.float32).to(device) vis_meas = [] for i in range(0, timestamps.shape[1] - 1): T_rel = np.matmul(np.linalg.inv(gt_poses[:, i]), gt_poses[:, i + 1]) T_rel_vis = np.matmul(np.matmul(np.linalg.inv(T_imu_cam), T_rel), T_imu_cam) vis_meas.append( np.concatenate([ np.array([log_SO3(T[:3, :3]) for T in T_rel_vis]), T_rel_vis[:, 0:3, 3] ], -1)) vis_meas = np.expand_dims(np.stack(vis_meas, 1), -1) vis_meas = torch.tensor(vis_meas, dtype=torch.float32).to(device) vis_meas_covars = vis_meas_covar.repeat(vis_meas.shape[0], vis_meas.shape[1], 1, 1).to(device) vis_meas_covars.requires_grad = req_grad imu_covar.requires_grad = req_grad init_covar.requires_grad = req_grad init_pose.requires_grad = req_grad init_state.requires_grad = req_grad init_pose.requires_grad = req_grad start_time = time.time() poses, states, covars = ekf.forward( imu_data, imu_covar, init_pose, init_state, init_covar, vis_meas, vis_meas_covars, torch.tensor(T_imu_cam, dtype=torch.float32).to(device)) print("ekf.forward elapsed %.5f" % (time.time() - start_time)) return timestamps, gt_poses, gt_vels, poses, states, covars
def plot_ekf_data(output_dir, timestamps, gt_poses, gt_vels, est_poses, est_states, g_const=9.80665): # convert all to np timestamps = np.array([np.array(item) for item in timestamps], dtype=np.float64) gt_poses = np.array([np.array(item) for item in gt_poses], dtype=np.float64) gt_vels = np.array([np.array(item) for item in gt_vels], dtype=np.float64) est_poses = np.array([np.array(item) for item in est_poses], dtype=np.float64) est_states = np.array([np.array(item) for item in est_states], dtype=np.float64) est_positions = [] est_vels = [] est_rots = [] est_gravities = [] est_ba = [] est_bw = [] for i in range(0, len(est_poses)): pose = np.linalg.inv(est_poses[i]) g, C, r, v, bw, ba = IMUKalmanFilter.decode_state( torch.tensor(est_states[i])) est_positions.append(pose[0:3, 3]) est_rots.append(log_SO3(pose[0:3, 0:3])) est_vels.append(np.array(v)) est_gravities.append(np.array(g)) est_bw.append(np.array(bw)) est_ba.append(np.array(ba)) est_positions = np.squeeze(est_positions) est_vels = np.squeeze(est_vels) est_rots = np.squeeze(est_rots) est_gravities = np.squeeze(est_gravities) est_bw = np.squeeze(est_bw) est_ba = np.squeeze(est_ba) gt_rots = np.array([log_SO3(p[:3, :3]) for p in gt_poses]) gt_gravities = np.array([ gt_poses[i, 0:3, 0:3].transpose().dot([0, 0, g_const]) for i in range(0, len(gt_poses)) ]) est_rots[:, 0] = np.unwrap(est_rots[:, 0]) est_rots[:, 1] = np.unwrap(est_rots[:, 1]) est_rots[:, 2] = np.unwrap(est_rots[:, 2]) gt_rots[:, 0] = np.unwrap(gt_rots[:, 0]) gt_rots[:, 1] = np.unwrap(gt_rots[:, 1]) gt_rots[:, 2] = np.unwrap(gt_rots[:, 2]) plotter = Plotter(output_dir) plotter.plot(( [gt_poses[:, 0, 3], gt_poses[:, 1, 3]], [est_positions[:, 0], est_positions[:, 1]], ), "x [m]", "y [m]", "XY Plot", labels=["gt_poses", "est_pose"], equal_axes=True) plotter.plot(( [gt_poses[:, 0, 3], gt_poses[:, 2, 3]], [est_positions[:, 0], est_positions[:, 2]], ), "x [m]", "z [m]", "XZ Plot", labels=["gt_poses", "est_pose"], equal_axes=True) plotter.plot(( [gt_poses[:, 1, 3], gt_poses[:, 2, 3]], [est_positions[:, 1], est_positions[:, 2]], ), "y [m]", "z [m]", "YZ Plot", labels=["gt_poses", "est_pose"], equal_axes=True) plotter.plot(( [timestamps, gt_poses[:, 0, 3]], [timestamps, est_positions[:, 0]], ), "t [s]", "p [m]", "Pos X", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_poses[:, 1, 3]], [timestamps, est_positions[:, 1]], ), "t [s]", "p [m]", "Pos Y", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_poses[:, 2, 3]], [timestamps, est_positions[:, 2]], ), "t [s]", "p [m]", "Pos Z", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_vels[:, 0]], [timestamps, est_vels[:, 0]], ), "t [s]", "v [m/s]", "Vel X", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_vels[:, 1]], [timestamps, est_vels[:, 1]], ), "t [s]", "v [m/s]", "Vel Y", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_vels[:, 2]], [timestamps, est_vels[:, 2]], ), "t [s]", "v [m/s]", "Vel Z", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_rots[:, 0]], [timestamps, est_rots[:, 0]], ), "t [s]", "rot [rad]", "Rot X", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_rots[:, 1]], [timestamps, est_rots[:, 1]], ), "t [s]", "rot [rad]", "Rot Y", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_rots[:, 2]], [timestamps, est_rots[:, 2]], ), "t [s]", "rot [rad]", "Rot Z", labels=["gt", "set"]) plotter.plot(( [timestamps, gt_gravities[:, 0]], [timestamps, est_gravities[:, 0]], ), "t [s]", "accel [m/s^2]", "Gravity X", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_gravities[:, 1]], [timestamps, est_gravities[:, 1]], ), "t [s]", "accel [m/s^2]", "Gravity Y", labels=["gt", "est"]) plotter.plot(( [timestamps, gt_gravities[:, 2]], [timestamps, est_gravities[:, 2]], ), "t [s]", "accel [m/s^2]", "Gravity Z", labels=["gt", "est"]) plotter.plot(([timestamps, est_bw[:, 0]], ), "t [s]", "w [rad/s]", "Gyro Bias X") plotter.plot(([timestamps, est_bw[:, 1]], ), "t [s]", "w [rad/s]", "Gyro Bias Y") plotter.plot(([timestamps, est_bw[:, 2]], ), "t [s]", "w [rad/s]", "Gyro Bias Z") plotter.plot(([timestamps, est_ba[:, 0]], ), "t [s]", "a [m/s^2]", "Accel Bias X") plotter.plot(([timestamps, est_ba[:, 1]], ), "t [s]", "a [m/s^2]", "Accel Bias Y") plotter.plot(([timestamps, est_ba[:, 2]], ), "t [s]", "a [m/s^2]", "Accel Bias Z")
def preprocess_kitti_raw(raw_seq_dir, output_dir, cam_subset_range, plot_figures=True): logger.initialize(working_dir=output_dir, use_tensorboard=False) logger.print("================ PREPROCESS KITTI RAW ================") logger.print("Preprocessing %s" % raw_seq_dir) logger.print("Output to: %s" % output_dir) logger.print("Camera images: %d => %d" % (cam_subset_range[0], cam_subset_range[1])) oxts_dir = os.path.join(raw_seq_dir, "oxts") image_dir = os.path.join(raw_seq_dir, "image_02") gps_poses = np.loadtxt(os.path.join(oxts_dir, "poses.txt")) gps_poses = np.array([np.vstack([np.reshape(p, [3, 4]), [0, 0, 0, 1]]) for p in gps_poses]) T_velo_imu = np.loadtxt(os.path.join(raw_seq_dir, "../T_velo_imu.txt")) T_cam_velo = np.loadtxt(os.path.join(raw_seq_dir, '../T_cam_velo.txt')) T_cam_imu = T_cam_velo.dot(T_velo_imu) # imu timestamps imu_timestamps = read_timestamps(os.path.join(oxts_dir, "timestamps.txt")) assert (len(imu_timestamps) == len(gps_poses)) # load image data cam_timestamps = read_timestamps(os.path.join(image_dir, "timestamps.txt")) image_paths = [os.path.join(image_dir, "data", p) for p in sorted(os.listdir(os.path.join(image_dir, "data")))] assert (len(cam_timestamps) == len(image_paths)) assert (cam_subset_range[0] >= 0 and cam_subset_range[1] < len(image_paths)) # the first camera timestamps must be between IMU timestamps assert (cam_timestamps[cam_subset_range[0]] >= imu_timestamps[0]) assert (cam_timestamps[cam_subset_range[1]] <= imu_timestamps[-1]) # take subset of the camera images int the range of images we are interested in image_paths = image_paths[cam_subset_range[0]: cam_subset_range[1] + 1] cam_timestamps = cam_timestamps[cam_subset_range[0]: cam_subset_range[1] + 1] # convert to local time reference in seconds cam_timestamps = (cam_timestamps - imu_timestamps[0]) / np.timedelta64(1, 's') imu_timestamps = (imu_timestamps - imu_timestamps[0]) / np.timedelta64(1, 's') # take a subset of imu data corresponds to camera images idx_imu_data_start = find_timestamps_in_between(cam_timestamps[0], imu_timestamps)[0] idx_imu_data_end = find_timestamps_in_between(cam_timestamps[-1], imu_timestamps)[1] imu_timestamps = imu_timestamps[idx_imu_data_start:idx_imu_data_end + 1] gps_poses = gps_poses[idx_imu_data_start:idx_imu_data_end + 1] # load IMU data from list of text files imu_data = [] imu_data_files = sorted(os.listdir(os.path.join(oxts_dir, "data"))) start_time = time.time() for i in range(idx_imu_data_start, idx_imu_data_end + 1): print("Loading IMU data files %d/%d (%.2f%%)" % (i + 1 - idx_imu_data_start, len(imu_timestamps), 100 * (i + 1 - idx_imu_data_start) / len(imu_timestamps)), end='\r') imu_data.append(np.loadtxt(os.path.join(oxts_dir, "data", imu_data_files[i]))) imu_data = np.array(imu_data) logger.print("\nLoading IMU data took %.2fs" % (time.time() - start_time)) assert (len(imu_data) == len(gps_poses)) # imu_data = imu_data[idx_imu_data_start:idx_imu_data_end + 1] imu_timestamps, imu_data, gps_poses = remove_negative_timesteps(imu_timestamps, imu_data, gps_poses) data_frames = [] start_time = time.time() idx_imu_slice_start = 0 idx_imu_slice_end = 0 for k in range(0, len(cam_timestamps) - 1): print("Processing IMU data files %d/%d (%.2f%%)" % ( k + 1, len(cam_timestamps), 100 * (k + 1) / len(cam_timestamps)), end='\r') t_k = cam_timestamps[k] t_kp1 = cam_timestamps[k + 1] # the start value does not need to be recomputed, since you can get that from the previous time step, but # i am a lazy person, this will work while imu_timestamps[idx_imu_slice_start] < t_k: idx_imu_slice_start += 1 assert (imu_timestamps[idx_imu_slice_start - 1] <= t_k <= imu_timestamps[idx_imu_slice_start]) # interpolate tk_i = imu_timestamps[idx_imu_slice_start - 1] tk_j = imu_timestamps[idx_imu_slice_start] alpha_k = (t_k - tk_i) / (tk_j - tk_i) T_i_vk, v_vk, w_vk, a_vk = \ interpolate(imu_data[idx_imu_slice_start - 1], imu_data[idx_imu_slice_start], gps_poses[idx_imu_slice_start - 1], gps_poses[idx_imu_slice_start], alpha_k) while imu_timestamps[idx_imu_slice_end] < t_kp1: idx_imu_slice_end += 1 assert (imu_timestamps[idx_imu_slice_end - 1] <= t_kp1 <= imu_timestamps[idx_imu_slice_end]) # interpolate tkp1_i = imu_timestamps[idx_imu_slice_end - 1] tkp1_j = imu_timestamps[idx_imu_slice_end] alpha_kp1 = (t_kp1 - tkp1_i) / (tkp1_j - tkp1_i) T_i_vkp1, v_vkp1, w_vkp1, a_vkp1 = \ interpolate(imu_data[idx_imu_slice_end - 1], imu_data[idx_imu_slice_end], gps_poses[idx_imu_slice_end - 1], gps_poses[idx_imu_slice_end], alpha_kp1) imu_timestamps_k_kp1 = np.concatenate( [[t_k], imu_timestamps[idx_imu_slice_start:idx_imu_slice_end - 1], [t_kp1]]) imu_poses = np.concatenate([[T_i_vk], gps_poses[idx_imu_slice_start:idx_imu_slice_end - 1], [T_i_vkp1]]) accel_measurements_k_kp1 = np.concatenate([[a_vk], imu_data[idx_imu_slice_start: idx_imu_slice_end - 1, ax:az + 1], [a_vkp1]]) gyro_measurements_k_kp1 = np.concatenate([[w_vk], imu_data[idx_imu_slice_start: idx_imu_slice_end - 1, wx:wz + 1], [w_vkp1]]) frame_k = SequenceData.Frame(image_paths[k], t_k, T_i_vk, v_vk, imu_poses, imu_timestamps_k_kp1, accel_measurements_k_kp1, gyro_measurements_k_kp1) data_frames.append(frame_k) # assertions for sanity check assert (np.allclose(data_frames[-1].timestamp, data_frames[-1].imu_timestamps[0], atol=1e-13)) assert (np.allclose(data_frames[-1].T_i_vk, data_frames[-1].imu_poses[0], atol=1e-13)) if len(data_frames) > 1: assert (np.allclose(data_frames[-1].timestamp, data_frames[-2].imu_timestamps[-1], atol=1e-13)) assert (np.allclose(data_frames[-1].T_i_vk, data_frames[-2].imu_poses[-1], atol=1e-13)) assert ( np.allclose(data_frames[-1].accel_measurements[0], data_frames[-2].accel_measurements[-1], atol=1e-13)) assert ( np.allclose(data_frames[-1].accel_measurements[0], data_frames[-2].accel_measurements[-1], atol=1e-13)) # add the last frame without any IMU data data_frames.append(SequenceData.Frame(image_paths[-1], t_kp1, T_i_vkp1, v_vkp1, np.zeros([0, 4, 4]), np.zeros([0]), np.zeros([0, 3]), np.zeros([0, 3]))) logger.print("\nProcessing data took %.2fs" % (time.time() - start_time)) df = SequenceData.save_as_pd(data_frames, np.array([0, 0, 9.808679801065017]), np.zeros(3), T_cam_imu, output_dir) data = df.to_dict("list") if not plot_figures: logger.print("All done!") return # ============================== FIGURES FOR SANITY TESTS ============================== # plot trajectory start_time = time.time() plotter = Plotter(output_dir) p_poses = np.array(data["T_i_vk"]) p_timestamps = np.array(data["timestamp"]) p_velocities = np.array(data["v_vk_i_vk"]) p_imu_timestamps = np.concatenate([d[:-1] for d in data['imu_timestamps']]) p_gyro_measurements = np.concatenate([d[:-1] for d in data['gyro_measurements']]) p_accel_measurements = np.concatenate([d[:-1] for d in data["accel_measurements"]]) p_imu_poses = np.concatenate([d[:-1, :, :] for d in data["imu_poses"]]) assert (len(p_imu_timestamps) == len(p_gyro_measurements)) assert (len(p_imu_timestamps) == len(p_accel_measurements)) assert (len(p_imu_timestamps) == len(p_imu_poses)) # integrate accel to compare against velocity p_accel_int = [p_velocities[0, :]] p_accel_int_int = [p_poses[0, :3, 3]] # g = np.array([0, 0, 9.80665]) g = np.array([0, 0, 9.808679801065017]) # g = np.array([0, 0, 9.8096]) for i in range(0, len(p_imu_timestamps) - 1): dt = p_imu_timestamps[i + 1] - p_imu_timestamps[i] C_i_vk = p_imu_poses[i, :3, :3] C_vkp1_vk = p_imu_poses[i + 1, :3, :3].transpose().dot(p_imu_poses[i, :3, :3]) v_vk_i_vk = p_accel_int[-1] v_vkp1_vk_vk = dt * (p_accel_measurements[i] - C_i_vk.transpose().dot(g)) v_vkp1_i_vk = v_vk_i_vk + v_vkp1_vk_vk p_accel_int.append(C_vkp1_vk.dot(v_vkp1_i_vk)) p_accel_int_int.append(p_accel_int_int[-1] + p_imu_poses[i, :3, :3].dot(p_accel_int[-1]) * dt) p_accel_int = np.array(p_accel_int) p_accel_int_int = np.array(p_accel_int_int) # poses from integrating velocity p_vel_int_poses = [p_poses[0, :3, 3]] for i in range(0, len(p_velocities) - 1): dt = p_timestamps[i + 1] - p_timestamps[i] dp = p_poses[i, :3, :3].dot(p_velocities[i]) * dt p_vel_int_poses.append(p_vel_int_poses[-1] + dp) p_vel_int_poses = np.array(p_vel_int_poses) plotter.plot(([p_poses[:, 0, 3], p_poses[:, 1, 3]], [p_vel_int_poses[:, 0], p_vel_int_poses[:, 1]], [p_accel_int_int[:, 0], p_accel_int_int[:, 1]],), "x [m]", "Y [m]", "XY Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True) plotter.plot(([p_poses[:, 0, 3], p_poses[:, 2, 3]], [p_vel_int_poses[:, 0], p_vel_int_poses[:, 2]], [p_accel_int_int[:, 0], p_accel_int_int[:, 2]],), "X [m]", "Z [m]", "XZ Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True) plotter.plot(([p_poses[:, 1, 3], p_poses[:, 2, 3]], [p_vel_int_poses[:, 1], p_vel_int_poses[:, 2]], [p_accel_int_int[:, 1], p_accel_int_int[:, 2]],), "Y [m]", "Z [m]", "YZ Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True) plotter.plot(([p_timestamps, p_poses[:, 0, 3]], [p_timestamps, p_vel_int_poses[:, 0]], [p_imu_timestamps, p_accel_int_int[:, 0]],), "t [s]", "Y [m]", "X Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"]) plotter.plot(([p_timestamps, p_poses[:, 1, 3]], [p_timestamps, p_vel_int_poses[:, 1]], [p_imu_timestamps, p_accel_int_int[:, 1]],), "t [s]", "Z [m]", "Y Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"]) plotter.plot(([p_timestamps, p_poses[:, 2, 3]], [p_timestamps, p_vel_int_poses[:, 2]], [p_imu_timestamps, p_accel_int_int[:, 2]],), "t [s]", "Z [m]", "Z Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"]) # plot trajectory rotated wrt to the first frame p_poses_from_I = np.array([np.linalg.inv(p_poses[0]).dot(p) for p in p_poses]) plotter.plot(([p_poses_from_I[:, 0, 3], p_poses_from_I[:, 1, 3]],), "x [m]", "Y [m]", "XY Plot From Identity", equal_axes=True) plotter.plot(([p_poses_from_I[:, 0, 3], p_poses_from_I[:, 2, 3]],), "X [m]", "Z [m]", "XZ Plot From Identity", equal_axes=True) plotter.plot(([p_poses_from_I[:, 1, 3], p_poses_from_I[:, 2, 3]],), "Y [m]", "Z [m]", "YZ Plot From Identity", equal_axes=True) # plot p_velocities plotter.plot(([p_timestamps, p_velocities[:, 0]], [p_timestamps, p_velocities[:, 1]], [p_timestamps, p_velocities[:, 2]]), "t [s]", "v [m/s]", "YZ Plot", labels=["dat_vx", "dat_vy", "dat_vz"]) # make sure the interpolated acceleration and gyroscope measurements are the same plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 0]], [imu_timestamps, imu_data[:, wx]],), "t [s]", "w [rad/s]", "Rot Vel X Verification") plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 1]], [imu_timestamps, imu_data[:, wy]],), "t [s]", "w [rad/s]", "Rot Vel Y Verification") plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 2]], [imu_timestamps, imu_data[:, wz]],), "t [s]", "w [rad/s]", "Rot Vel Z Verification") plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 0]], [imu_timestamps, imu_data[:, ax]],), "t [s]", "a [m/s^2]", "Accel X Verification") plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 1]], [imu_timestamps, imu_data[:, ay]],), "t [s]", "a [m/s^2]", "Accel Y Verification") plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 2]], [imu_timestamps, imu_data[:, az]],), "t [s]", "a [m/s^2]", "Accel Z Verification") # integrate gyroscope to compare against rotation p_gyro_int = [data["T_i_vk"][0][:3, :3]] for i in range(0, len(p_imu_timestamps) - 1): dt = p_imu_timestamps[i + 1] - p_imu_timestamps[i] p_gyro_int.append(p_gyro_int[-1].dot(exp_SO3(dt * p_gyro_measurements[i]))) p_gyro_int = np.array([log_SO3(o) for o in p_gyro_int]) p_orientation = np.array([log_SO3(p[:3, :3]) for p in data["T_i_vk"]]) plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 0])], [p_timestamps, np.unwrap(p_orientation[:, 0])],), "t [s]", "rot [rad/s]", "Theta X Cmp Plot", labels=["gyro_int", "dat_pose"]) plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 1])], [p_timestamps, np.unwrap(p_orientation[:, 1])],), "t [s]", "rot [rad/s]", "Theta Y Cmp Plot", labels=["gyro_int", "dat_pose"]) plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 2])], [p_timestamps, np.unwrap(p_orientation[:, 2])],), "t [s]", "rot [rad/s]", "Theta Z Cmp Plot", labels=["gyro_int", "dat_pose"]) vel_from_gps_rel_poses = [] for k in range(0, len(gps_poses) - 1): dt = imu_timestamps[k + 1] - imu_timestamps[k] T_i_vk = gps_poses[k] T_i_vkp1 = gps_poses[k + 1] T_vk_vkp1 = np.linalg.inv(T_i_vk).dot(T_i_vkp1) vel_from_gps_rel_poses.append(T_vk_vkp1[0:3, 3] / dt) # vel_from_gps_rel_poses.append(log_SE3(T_vk_vkp1)[0:3] / dt) vel_from_gps_rel_poses = np.array(vel_from_gps_rel_poses) plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 0]], [p_timestamps, p_velocities[:, 0]], [p_imu_timestamps, p_accel_int[:, 0]],), "t [s]", "v [m/s]", "Velocity X Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"]) plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 1]], [p_timestamps, p_velocities[:, 1]], [p_imu_timestamps, p_accel_int[:, 1]],), "t [s]", "v [m/s]", "Velocity Y Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"]) plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 2]], [p_timestamps, p_velocities[:, 2]], [p_imu_timestamps, p_accel_int[:, 2]],), "t [s]", "v [m/s]", "Velocity Z Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"]) logger.print("Generating figures took %.2fs" % (time.time() - start_time)) logger.print("All done!")
def plot_trajectory(working_dir): output_dir = os.path.join(working_dir, "figures") pose_est_dir = os.path.join(working_dir, "est_poses") pose_gt_dir = os.path.join(working_dir, "gt_poses") assert sorted(os.listdir(pose_est_dir)) == sorted(os.listdir(pose_gt_dir)) pose_est_files = sorted(os.listdir(pose_est_dir)) Logger.make_dir_if_not_exist(output_dir) logger.initialize(working_dir=working_dir, use_tensorboard=False) logger.print("================ PLOT TRAJECTORY ================") logger.print("Working on directory:", working_dir) logger.print("Found pose estimate files: \n" + "\n".join(pose_est_files)) for i, pose_est_file in enumerate(pose_est_files): sequence = os.path.splitext(pose_est_file)[0] trajectory = np.load(os.path.join(pose_est_dir, "%s.npy" % sequence)) trajectory_gt = np.load(os.path.join(pose_gt_dir, "%s.npy" % sequence)) trans_xyz = np.array([se3.r_from_T(T) for T in trajectory]) rot_xyz = np.array([se3.log_SO3(se3.C_from_T(T)) for T in trajectory]) trans_xyz_gt = np.array([se3.r_from_T(T) for T in trajectory_gt]) rot_xyz_gt = np.array( [se3.log_SO3(se3.C_from_T(T)) for T in trajectory_gt]) trans_x = trans_xyz[:, 0] trans_y = trans_xyz[:, 1] trans_z = trans_xyz[:, 2] rot_x = rot_xyz[:, 0] rot_y = rot_xyz[:, 1] rot_z = rot_xyz[:, 2] trans_x_gt = trans_xyz_gt[:, 0] trans_y_gt = trans_xyz_gt[:, 1] trans_z_gt = trans_xyz_gt[:, 2] rot_x_gt = rot_xyz_gt[:, 0] rot_y_gt = rot_xyz_gt[:, 1] rot_z_gt = rot_xyz_gt[:, 2] # ########## XYZ Planes Plots ########## # plt.figure() plt.clf() plt.plot(trans_x, trans_y, linewidth=1.0, color="r", label="Estimate") plt.plot(trans_x_gt, trans_y_gt, linewidth=1.0, color="b", label="Ground Truth") plt.axis("equal") plt.xlabel("x [m]") plt.ylabel("y [m]") plt.title("Seq. %s Trajectory XY Plane" % sequence) plt.legend() plt.savefig( os.path.join(output_dir, "seq_%s_00_xy_traj.png" % sequence)) plt.clf() plt.plot(trans_x, trans_z, linewidth=1.0, color="r", label="Estimate") plt.plot(trans_x_gt, trans_z_gt, linewidth=1.0, color="b", label="Ground Truth") plt.axis("equal") plt.xlabel("x [m]") plt.ylabel("z [m]") plt.title("Seq. %s Trajectory XZ Plane" % sequence) plt.legend() plt.savefig( os.path.join(output_dir, "seq_%s_01_xz_traj.png" % sequence)) plt.clf() plt.plot(trans_y, trans_z, linewidth=1.0, color="r", label="Estimate") plt.plot(trans_y_gt, trans_z_gt, linewidth=1.0, color="b", label="Ground Truth") plt.axis("equal") plt.xlabel("y [m]") plt.ylabel("z [m]") plt.title("Seq. %s Trajectory YZ Plane" % sequence) plt.legend() plt.savefig( os.path.join(output_dir, "seq_%s_02_yz_traj.png" % sequence)) # ########## Progression Throughout Planes Plot ########## labels = ["Trans X", "Trans Y", "Trans Z", "Rot X", "Rot Y", "Rot Z"] data = [trans_x, trans_y, trans_z, rot_x, rot_y, rot_z] data_gt = [ trans_x_gt, trans_y_gt, trans_z_gt, rot_x_gt, rot_y_gt, rot_z_gt ] for j in range(0, len(labels)): plt.clf() plt.plot(data[j], linewidth=1.0, color="r", label="Estimate") plt.plot(data_gt[j], linewidth=1.0, color="b", label="Ground Truth") plt.xlabel("frame # []") plt.ylabel(labels[j].lower()) plt.title("Seq. %s %s" % (sequence, labels[j])) plt.legend() plt.savefig( os.path.join( output_dir, "seq_%s_%02d_%s_plt.png" % (sequence, j + 3, "_".join(labels[j].lower().split())))) logger.print("Plot saved for sequence %s" % sequence)
def ekf_loss(self, est_poses, gt_poses, ekf_states, gt_rel_poses, vis_meas, vis_meas_covar): abs_errors = torch.matmul(est_poses[:, 1:], gt_poses[:, 1:]) length_div = torch.arange(start=1, end=abs_errors.size(1) + 1, device=abs_errors.device, dtype=torch.float32).view(1, -1, 1) # calculate the F norm squared from identity I_minus_angle_errors = (torch.eye(3, 3, device=abs_errors.device) - abs_errors[:, :, 0:3, 0:3]) / length_div.view( 1, -1, 1, 1) I_minus_angle_errors_sq = torch.matmul( I_minus_angle_errors, I_minus_angle_errors.transpose(-2, -1)) abs_angle_errors_sq = torch.sum(torch.diagonal(I_minus_angle_errors_sq, dim1=-2, dim2=-1), dim=-1) # abs_angle_errors = torch.squeeze(torch_se3.log_SO3_b(abs_errors[:, :, 0:3, 0:3]), -1) / length_div # abs_angle_errors_sq = torch.sum(abs_angle_errors ** 2, dim=-1) # norm squared abs_trans_errors_sq = torch.sum( (abs_errors[:, :, 0:3, 3] / length_div)**2, dim=-1) abs_angle_loss = torch.mean(abs_angle_errors_sq) abs_trans_loss = torch.mean(abs_trans_errors_sq) # _, C_rel, r_rel, _, _, _ = IMUKalmanFilter.decode_state_b(ekf_states) # rel_angle_errors = (gt_rel_poses[:, :, 0:3] - torch.squeeze(torch_se3.log_SO3_b(C_rel[:, 1:]), -1)) ** 2 # rel_angle_errors_sq = torch.sum(rel_angle_errors ** 2, dim=-1) # rel_trans_error_sq = torch.sum((gt_rel_poses[:, :, 3:6] - torch.squeeze(r_rel[:, 1:], -1)) ** 2, dim=-1) # rel_angle_loss = torch.mean(rel_angle_errors_sq) # rel_trans_loss = torch.mean(rel_trans_error_sq) k3 = self.schedule(par.k3) loss_abs = abs_trans_loss * par.k4**2 # loss_rel = (par.k1 * rel_angle_loss + rel_trans_loss) # loss = k3 * loss_rel + (1 - k3) * loss_abs loss_vis_meas = self.vis_meas_loss(vis_meas, vis_meas_covar, gt_rel_poses) loss = k3 * loss_vis_meas + (1 - k3) * loss_abs assert not torch.any(torch.isnan(loss)) # add to tensorboard trans_errors = abs_errors[:, :, 0:3, 3].detach().cpu().numpy() angle_errors_np = [] errors_np = abs_errors.detach().cpu().numpy() for i in range(0, abs_errors.size(0)): angle_errors_over_ts = [] for j in range(0, abs_errors.size(1)): angle_errors_over_ts.append( se3.log_SO3(errors_np[i, j, 0:3, 0:3])) angle_errors_np.append(np.stack(angle_errors_over_ts)) angle_errors_np = np.stack(angle_errors_np) last_rot_x_loss = np.mean(np.abs(angle_errors_np[:, -1, 0])) last_rot_y_loss = np.mean(np.abs(angle_errors_np[:, -1, 1])) last_rot_z_loss = np.mean(np.abs(angle_errors_np[:, -1, 2])) last_trans_x_loss = np.mean(np.abs(trans_errors[:, -1, 0])) last_trans_y_loss = np.mean(np.abs(trans_errors[:, -1, 1])) last_trans_z_loss = np.mean(np.abs(trans_errors[:, -1, 2])) tag_name = "train" if self.model.training else "val" iterations = self.num_train_iterations if self.model.training else self.num_val_iterations add_scalar = logger.tensorboard.add_scalar add_scalar(tag_name + "_abs/abs_total_loss", loss_abs, iterations) add_scalar(tag_name + "_abs/abs_rot_loss", abs_angle_loss, iterations) add_scalar(tag_name + "_abs/last_rot_loss/x", last_rot_x_loss, iterations) add_scalar(tag_name + "_abs/last_rot_loss/y", last_rot_y_loss, iterations) add_scalar(tag_name + "_abs/last_rot_loss/z", last_rot_z_loss, iterations) add_scalar(tag_name + "_abs/abs_loss", abs_trans_loss, iterations) add_scalar(tag_name + "_abs/last_trans_loss/x", last_trans_x_loss, iterations) add_scalar(tag_name + "_abs/last_trans_loss/y", last_trans_y_loss, iterations) add_scalar(tag_name + "_abs/last_trans_loss/z", last_trans_z_loss, iterations) if self.model.training: model = self.model.module if isinstance( self.model, torch.nn.DataParallel) else self.model imu_noise_covar = torch.diagonal(model.get_imu_noise_covar()) add_scalar("imu_noise_diag/w_x", imu_noise_covar[0], iterations) add_scalar("imu_noise_diag/w_y", imu_noise_covar[1], iterations) add_scalar("imu_noise_diag/w_z", imu_noise_covar[2], iterations) add_scalar("imu_noise_diag/bw_x", imu_noise_covar[3], iterations) add_scalar("imu_noise_diag/bw_y", imu_noise_covar[4], iterations) add_scalar("imu_noise_diag/bw_z", imu_noise_covar[5], iterations) add_scalar("imu_noise_diag/a_x", imu_noise_covar[6], iterations) add_scalar("imu_noise_diag/a_y", imu_noise_covar[7], iterations) add_scalar("imu_noise_diag/a_z", imu_noise_covar[8], iterations) add_scalar("imu_noise_diag/ba_x", imu_noise_covar[9], iterations) add_scalar("imu_noise_diag/ba_y", imu_noise_covar[10], iterations) add_scalar("imu_noise_diag/ba_z", imu_noise_covar[11], iterations) add_scalar("params/k3", k3, iterations) return loss, loss_abs, loss_vis_meas
def ekf_loss(est_poses, gt_poses, ekf_states, gt_rel_poses, vis_meas, vis_meas_covar): abs_errors = torch.matmul(torch.inverse(est_poses[:, 1:]), gt_poses[:, 1:]) # length_div = torch.arange(start=1, end=abs_errors.size(1) + 1, device=abs_errors.device, # dtype=torch.float32).view(1, -1, 1) # calculate the F norm squared from identity I_minus_angle_errors = (torch.eye(3, 3, device=abs_errors.device) - abs_errors[:, :, 0:3, 0:3]) I_minus_angle_errors_sq = torch.matmul(I_minus_angle_errors, I_minus_angle_errors.transpose(-2, -1)) abs_angle_errors_sq = torch.sum(torch.diagonal(I_minus_angle_errors_sq, dim1=-2, dim2=-1), dim=-1) # abs_angle_errors = torch.squeeze(torch_se3.log_SO3_b(abs_errors[:, :, 0:3, 0:3]), -1) / length_div # abs_angle_errors_sq = torch.sum(abs_angle_errors ** 2, dim=-1) # norm squared abs_trans_errors_sq = torch.sum((abs_errors[:, :, 0:3, 3]) ** 2, dim=-1) abs_angle_loss = torch.mean(abs_angle_errors_sq) abs_trans_loss = torch.mean(abs_trans_errors_sq) # _, C_rel, r_rel, _, _, _ = IMUKalmanFilter.decode_state_b(ekf_states) # rel_angle_errors = (gt_rel_poses[:, :, 0:3] - torch.squeeze(torch_se3.log_SO3_b(C_rel[:, 1:]), -1)) ** 2 # rel_angle_errors_sq = torch.sum(rel_angle_errors ** 2, dim=-1) # rel_trans_error_sq = torch.sum((gt_rel_poses[:, :, 3:6] - torch.squeeze(r_rel[:, 1:], -1)) ** 2, dim=-1) # rel_angle_loss = torch.mean(rel_angle_errors_sq) # rel_trans_loss = torch.mean(rel_trans_error_sq) # k3 = self.schedule(par.k3) loss_abs = (par.k2 * abs_angle_loss + abs_trans_loss) * par.k4 ** 2 # loss_rel = (par.k1 * rel_angle_loss + rel_trans_loss) # loss = k3 * loss_rel + (1 - k3) * loss_abs # loss_vis_meas = self.vis_meas_loss(vis_meas, vis_meas_covar, gt_rel_poses) # loss = k3 * loss_vis_meas + (1 - k3) * loss_abs # assert not torch.any(torch.isnan(loss)) # add to tensorboard trans_errors = abs_errors[:, :, 0:3, 3].detach().cpu().numpy() angle_errors_np = [] errors_np = abs_errors.detach().cpu().numpy() for i in range(0, abs_errors.size(0)): angle_errors_over_ts = [] for j in range(0, abs_errors.size(1)): angle_errors_over_ts.append(se3.log_SO3(errors_np[i, j, 0:3, 0:3])) angle_errors_np.append(np.stack(angle_errors_over_ts)) angle_errors_np = np.stack(angle_errors_np) last_rot_x_loss = np.mean(np.abs(angle_errors_np[:, -1, 0])) last_rot_y_loss = np.mean(np.abs(angle_errors_np[:, -1, 1])) last_rot_z_loss = np.mean(np.abs(angle_errors_np[:, -1, 2])) last_trans_x_loss = np.mean(np.abs(trans_errors[:, -1, 0])) last_trans_y_loss = np.mean(np.abs(trans_errors[:, -1, 1])) last_trans_z_loss = np.mean(np.abs(trans_errors[:, -1, 2])) print("abs_angle_loss", abs_angle_loss) print("abs_trans_loss", abs_trans_loss) print("loss_abs", loss_abs) print("last_rot_x_loss", last_rot_x_loss) print("last_rot_y_loss", last_rot_y_loss) print("last_rot_z_loss", last_rot_z_loss) print("last_trans_x_loss", last_trans_x_loss) print("last_trans_y_loss", last_trans_y_loss) print("last_trans_z_loss", last_trans_z_loss)
def __getitem__(self, index): subseq = self.subseqs[index] gt_rel_poses = [] imu_data = [] for i in range(1, len(subseq.gt_poses)): # get relative poses T_i_vkm1 = subseq.gt_poses[i - 1] T_i_vk = subseq.gt_poses[i] T_vkm1_vk = se3.reorthogonalize_SE3( np.linalg.inv(T_i_vkm1).dot(T_i_vk)) r_vk_vkm1_vkm1 = T_vkm1_vk[0:3, 3] # get the translation from T phi_vkm1_vk = se3.log_SO3(T_vkm1_vk[0:3, 0:3]) gt_rel_poses.append(np.concatenate([ phi_vkm1_vk, r_vk_vkm1_vkm1, ])) for i in range(0, len(subseq.gt_poses)): imu_dat_concat = np.concatenate([ np.expand_dims(subseq.imu_timestamps[i], 1), subseq.gyro_measurements[i], subseq.accel_measurements[i] ], axis=1) imu_dat_padded = np.full([self.max_imu_data_length, 7], 0.0) imu_dat_padded[0:len(imu_dat_concat), :] = imu_dat_concat imu_dat_padded[len(imu_dat_concat):, 0] = imu_dat_concat[ -1, 0] if len(imu_dat_concat) > 0 else 0 imu_data.append(imu_dat_padded) gt_rel_poses = torch.tensor(gt_rel_poses, dtype=torch.float32) gt_poses = torch.tensor(subseq.gt_poses, dtype=torch.float32) gt_velocities = torch.tensor(subseq.gt_velocities, dtype=torch.float32) imu_data = torch.tensor(imu_data, dtype=torch.float32) init_g = torch.tensor(subseq.gt_poses[0, 0:3, 0:3].transpose().dot(subseq.g_i), dtype=torch.float32) bw_0 = torch.tensor(subseq.bw_0, dtype=torch.float32) ba_0 = torch.tensor(subseq.ba_0, dtype=torch.float32) if par.cal_override_enable: T_imu_cam = torch.tensor(par.T_imu_cam_override, dtype=torch.float32) else: T_imu_cam = torch.tensor( np.linalg.inv(subseq.T_cam_imu), dtype=torch.float32) # EKF takes T_imu_cam init_state = model.IMUKalmanFilter.encode_state( init_g, torch.eye(3, 3), # C torch.zeros(3), # r gt_velocities[0], # v bw_0, # bw ba_0) # ba if self.no_image: return (subseq.length, subseq.seq, subseq.type, subseq.id, subseq.id_next), \ torch.zeros(1), imu_data, init_state, T_imu_cam, gt_poses, gt_rel_poses # process images images = [] for img_path in subseq.image_paths: if par.cache_image: image = SubseqDataset.__cache[img_path] else: image = self.load_image_func(img_path) if "flippedlr" in subseq.type: image = image.transpose(Image.FLIP_LEFT_RIGHT) elif "flippedud" in subseq.type: image = image.transpose(Image.FLIP_TOP_BOTTOM) elif "flippedlrud" in subseq.type: image = image.transpose(Image.FLIP_LEFT_RIGHT) image = image.transpose(Image.FLIP_TOP_BOTTOM) image = self.runtime_transformer(image) if self.minus_point_5: image = image - 0.5 # from [0, 1] -> [-0.5, 0.5] image = self.normalizer(image) # if monochrome, repeat channel 3 times if image.shape[0] == 1: image = image.repeat(3, 1, 1) images.append(image) images = torch.stack(images, 0) if "flipped" in subseq.type or "reversed" in subseq.type: invalid_imu = True else: invalid_imu = False return (subseq.length, subseq.seq, subseq.type, subseq.id, subseq.id_next, invalid_imu), \ images, imu_data, init_state, T_imu_cam, gt_poses, gt_rel_poses