예제 #1
0
    def __init__(self, model, sequences, eval_length):
        self.model = model  # this is a reference
        self.dataloaders = {}

        if par.dataset() == "KITTI":
            self.error_calc = KittiErrorCalc(sequences)
        elif par.dataset() == "EUROC":
            self.error_calc = EurocErrorCalc(sequences)

        logger.print("Loading data for the online dataset evaluator...")
        for seq in sequences:
            subseqs = get_subseqs([seq],
                                  eval_length,
                                  overlap=1,
                                  sample_times=1,
                                  training=False)
            dataset = SubseqDataset(subseqs, (par.img_h, par.img_w),
                                    par.img_means,
                                    par.img_stds,
                                    par.minus_point_5,
                                    training=False)
            dataloader = DataLoader(dataset,
                                    batch_size=1,
                                    shuffle=False,
                                    num_workers=4)
            self.dataloaders[seq] = dataloader
예제 #2
0
    def imu_predict_euroc(self):
        output_dir = os.path.join(par.results_coll_dir, "imu_predict_kitti")
        seqs = ['MH_05', 'V1_03', 'V2_02']
        error_calc = EurocErrorCalc(seqs)

        for i in range(0, len(seqs)):
            logger.initialize(os.path.join(output_dir, seqs[i]),
                              use_tensorboard=False)

            timestamps, gt_poses, gt_vels, poses, states, covars, precomp_covars = \
                self.predict_test_case([seqs[i]], None, "cpu", False)

            poses_np = np.linalg.inv(poses[0].numpy().astype(np.float64))
            np.save(
                logger.ensure_file_dir_exists(
                    os.path.join(output_dir, seqs[i], "est.npy")), poses_np)

            # plot_ekf_data(os.path.join(output_dir, seqs[i]),
            #               timestamps[0], gt_poses[0], gt_vels[0], poses[0], states[0])

            err = error_calc.accumulate_error(seqs[i], poses_np)
            logger.print("Processed seq %s" % seqs[i])
            logger.print("Errors trans & rot")
            logger.print("%.6f" % err)

        logger.print("Ave Trans Errors: ")
        logger.print(np.average(np.array(error_calc.errors)))
예제 #3
0
def euroc_eval(working_dir, seqs):
    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ Evaluate EUROC ================")
    logger.print("Working on directory:", working_dir)

    pose_est_dir = os.path.join(working_dir, "est_poses")
    pose_est_files = sorted(os.listdir(pose_est_dir))

    logger.print("Evaluating seqs: %s" % ", ".join(seqs))
    available_seqs = [seq.replace(".npy", "") for seq in pose_est_files]

    table = prettytable.PrettyTable()
    table.field_names = ["Seq.", "RMSE APE Err"]
    table.align["Seq."] = "l"
    table.align["RMSE APE Err"] = "r"

    assert set(seqs).issubset(available_seqs), "est file is not available, have seqs: %s" % \
                                               ", ".join(list(available_seqs))
    error_calc = EurocErrorCalc(seqs)
    stats = []

    for seq in seqs:
        if seq not in available_seqs:
            raise RuntimeError("File for seq %s not available" % seq)

        poses_est = np.load(os.path.join(pose_est_dir, "%s.npy" % seq))
        stat = error_calc.accumulate_error(seq, poses_est)
        table.add_row([seq, "%.6f" % stat])
        stats.append(stat)

    table.add_row(["Ave.", "%.6f" % error_calc.get_average_error()])
    logger.print(table)

    logger.print("Copy to Google Sheets:",
                 ",".join([str(stat) for stat in stats]))
예제 #4
0
    def save_as_pd(data_frames,
                   g_i,
                   bw_0,
                   T_cam_imu,
                   output_dir,
                   ba_0=np.zeros(3)):
        start_time = time.time()
        data = {
            "image_path": [f.image_path for f in data_frames],
            "timestamp": [f.timestamp for f in data_frames],
            "T_i_vk": [f.T_i_vk for f in data_frames],
            "v_vk_i_vk": [f.v_vk_i_vk for f in data_frames],
            "imu_timestamps": [f.imu_timestamps for f in data_frames],
            "imu_poses": [f.imu_poses for f in data_frames],
            "accel_measurements": [f.accel_measurements for f in data_frames],
            "gyro_measurements": [f.gyro_measurements for f in data_frames],
            "timestamp_raw": [f.timestamp_raw for f in data_frames]
        }
        df = pd.DataFrame(data, columns=data.keys())
        df.to_pickle(os.path.join(output_dir, "data.pickle"))

        constants = {
            "g_i": g_i,
            "T_cam_imu": T_cam_imu,
            "bw_0": bw_0,
            "ba_0": ba_0
        }

        np.save(os.path.join(output_dir, "constants.npy"), constants)

        logger.print("Saving pandas took %.2fs" % (time.time() - start_time))

        return df
예제 #5
0
def plot_ekf_states(working_dir):
    output_dir = os.path.join(working_dir, "figures")

    timestamps_dir = os.path.join(working_dir, "timestamps")
    poses_dir = os.path.join(working_dir, "ekf_states", "poses")
    states_dir = os.path.join(working_dir, "ekf_states", "states")
    gt_velocities_dir = os.path.join(working_dir, "ekf_states",
                                     "gt_velocities")
    gt_poses_dir = os.path.join(working_dir, "gt_poses")

    pose_files = sorted(os.listdir(poses_dir))
    assert sorted(os.listdir(poses_dir)) == sorted(os.listdir(states_dir)) == sorted(os.listdir(gt_velocities_dir)) == \
           sorted(os.listdir(timestamps_dir))

    Logger.make_dir_if_not_exist(output_dir)
    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ PLOT EKF States ================")
    logger.print("Working on directory:", working_dir)
    logger.print("Found pose estimate files: \n" + "\n".join(pose_files))

    for i, pose_est_file in enumerate(pose_files):
        seq = os.path.splitext(pose_est_file)[0]
        plot_ekf_data(os.path.join(working_dir, "ekf_states", "figures", seq),
                      np.load(os.path.join(timestamps_dir, seq + ".npy")),
                      np.load(os.path.join(gt_poses_dir, seq + ".npy")),
                      np.load(os.path.join(gt_velocities_dir, seq + ".npy")),
                      np.load(os.path.join(poses_dir, seq + ".npy")),
                      np.load(os.path.join(states_dir, seq + ".npy")))
        logger.print("Plot saved for sequence %s" % seq)
예제 #6
0
def np_traj_to_kitti(working_dir):
    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ CONVERT TO KITTI ================")
    logger.print("Working on directory:", working_dir)

    pose_est_dir = os.path.join(working_dir, "est_poses")
    pose_gt_dir = os.path.join(working_dir, "gt_poses")
    kitti_traj_output = os.path.join(working_dir, "kitti")
    Logger.make_dir_if_not_exist(kitti_traj_output)
    pose_est_files = sorted(os.listdir(pose_est_dir))

    logger.print("Found pose estimate files: \n" + "\n".join(pose_est_files))

    for i, pose_est_file in enumerate(pose_est_files):
        sequence = os.path.splitext(pose_est_file)[0]

        traj_est = np.load(os.path.join(pose_est_dir, "%s.npy" % sequence))
        traj_gt = np.load(os.path.join(pose_gt_dir, "%s.npy" % sequence))
        kitti_est_file = open(
            os.path.join(kitti_traj_output, "%s_est.txt" % sequence), "w")
        kitti_gt_file = open(
            os.path.join(kitti_traj_output, "%s_gt.txt" % sequence), "w")

        assert (traj_est.shape[0] == traj_gt.shape[0])

        for j in range(0, traj_est.shape[0]):
            write_trj(kitti_est_file, traj_est[j, :, :])
            write_trj(kitti_gt_file, traj_gt[j, :, :])

        kitti_est_file.close()
        kitti_gt_file.close()

    logger.print("All Done.")
예제 #7
0
def calc_error(working_dir):
    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ CALCULATE ERRORS ================")
    logger.print("Working on directory:", working_dir)

    pose_est_dir = os.path.join(working_dir, "est_poses")
    pose_gt_dir = os.path.join(working_dir, "gt_poses")
    vis_meas_dir = os.path.join(working_dir, "vis_meas")
    errors_output_dir = os.path.join(working_dir, "errors")
    Logger.make_dir_if_not_exist(errors_output_dir)
    pose_est_files = sorted(os.listdir(pose_est_dir))

    logger.print("Found pose estimate files: \n" + "\n".join(pose_est_files))

    for i, pose_est_file in enumerate(pose_est_files):
        sequence = os.path.splitext(pose_est_file)[0]

        traj_est = np.load(os.path.join(pose_est_dir, "%s.npy" % sequence))
        traj_gt = np.load(os.path.join(pose_gt_dir, "%s.npy" % sequence))
        vis_meas = np.load(os.path.join(vis_meas_dir, "meas", "%s.npy" % sequence))
        assert (traj_est.shape[0] == traj_gt.shape[0])

        abs_traj_error = []
        rel_traj_error = [np.zeros(6)]
        vis_meas_error = [np.zeros(6)]

        for j in range(0, traj_est.shape[0]):
            pose_est = traj_est[j]
            pose_gt = traj_gt[j]

            abs_pose_err = np.linalg.inv(pose_est).dot(pose_gt)
            abs_traj_error.append(np.concatenate([log_SO3(C_from_T(abs_pose_err)), r_from_T(abs_pose_err)]))

        for j in range(1, traj_est.shape[0]):
            rel_pose_gt = np.linalg.inv(traj_gt[j - 1]).dot(traj_gt[j])
            rel_pose_est = np.linalg.inv(traj_est[j - 1]).dot(traj_est[j])
            rel_pose_err = np.linalg.inv(rel_pose_est).dot(rel_pose_gt)
            rel_traj_error.append(np.concatenate([log_SO3(C_from_T(rel_pose_err)), r_from_T(rel_pose_err)]))
            vis_meas_error.append(np.concatenate([log_SO3(C_from_T(rel_pose_gt)), r_from_T(rel_pose_gt)]) -
                                  vis_meas[j - 1])

        np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "abs", sequence + ".npy")),
                np.array(abs_traj_error))
        np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "rel", sequence + ".npy")),
                np.array(rel_traj_error))
        np.save(logger.ensure_file_dir_exists(os.path.join(errors_output_dir, "vis_meas", sequence + ".npy")),
                np.array(vis_meas_error))
        logger.print("Error saved for sequence %s" % sequence)

    logger.print("All Done.")
예제 #8
0
def remove_negative_timesteps(imu_timestamps, imu_data, gps_poses):
    imu_timestamps, indices = np.unique(imu_timestamps, return_index=True)
    indices_sort = np.argsort(imu_timestamps)
    imu_data = imu_data[indices, :][indices_sort, :]
    gps_poses = gps_poses[indices, :][indices_sort, :]

    # double check
    for i in range(1, len(imu_timestamps)):
        dt = imu_timestamps[i] - imu_timestamps[i - 1]
        if dt > (1.5 / 100):
            logger.print("WARNING: Larger than usual timestep of %.5fs detected time [%5.2fs -> %5.2fs] idx [%d -> %d]"
                         % (dt, imu_timestamps[i - 1], imu_timestamps[i], i - 1, i))
        assert (dt > 0)

    return imu_timestamps, imu_data, gps_poses
예제 #9
0
def log_SO3_b(C, raise_exeption=True):
    eps = 1e-6
    eps_pi = 1e-4  # strict eps_pi

    ret_sz = list(C.shape[:-2]) + [3, 1]
    phi = torch.zeros(*ret_sz, device=C.device)
    trace = torch.sum(torch.diagonal(C, dim1=-2, dim2=-1),
                      dim=-1,
                      keepdim=True)
    acos_ratio = torch.unsqueeze((trace - 1) / 2, -1)

    if torch.any(acos_ratio + 1.0 < eps_pi):
        sel_invalid = torch.sum(acos_ratio + 1.0 < eps_pi, (-2, -1)) > 0
        logger.print(sel_invalid)
        logger.print(C[sel_invalid])
        logger.print("Warn: log_SO3_b acos_ratio close to -1")
        if raise_exeption:
            raise ValueError("Warn: log_SO3_b acos_ratio close to -1")

    sel = ((acos_ratio - 1.0 < -eps) & ~(acos_ratio + 1.0 < eps_pi)).view(
        ret_sz[:-2])
    not_sel = (~(acos_ratio - 1.0 < -eps) & ~(acos_ratio + 1.0 < eps_pi)).view(
        ret_sz[:-2])
    phi_norm_sel = torch.acos(acos_ratio[sel])
    C_sel = C[sel]
    C_not_sel = C[not_sel]

    phi[sel] = phi_norm_sel * unskew3_b(C_sel - C_sel.transpose(-2, -1)) / (
        2 * torch.sin(phi_norm_sel))
    phi[not_sel] = 0.5 * unskew3_b(C_not_sel - C_not_sel.transpose(-2, -1))

    return phi
예제 #10
0
def print_error_table(errors, ave_errors):
    table = prettytable.PrettyTable()
    table.field_names = ["Seq.", "Trans. Err", "Rot. Error"]
    table.align["Seq."] = "l"
    table.align["Trans. Err"] = "r"
    table.align["Rot. Error"] = "r"
    keys = sorted(list(errors.keys()))
    for key in keys:
        table.add_row([
            key,
            "%.6f" % errors[key][0],
            "%.6f" % (errors[key][1] * 180 / np.pi)
        ])
    table.add_row([
        "Ave.",
        "%.6f" % ave_errors[0],
        "%.6f" % (ave_errors[1] * 180 / np.pi)
    ])
    logger.print(table)
    logger.print(
        "Copy to Google Sheets:", ",".join([
            str(errors[k][0]) + "," + str(errors[k][1] * 180 / np.pi)
            for k in sorted(list(errors.keys()))
        ]) + "," + str(ave_errors[0]) + "," + str(ave_errors[1] * 180 / np.pi))
예제 #11
0
    def evaluate_abs(self):
        start_time = time.time()
        _, _, est_poses_dict, _, _ = gen_trajectory_abs_iter(
            self.model, self.dataloaders)
        for k, v in est_poses_dict.items():
            seq_err = self.error_calc.accumulate_error(
                k, np.linalg.inv(np.array(v, dtype=np.float64)))
            logger.print("%s: %.5f" % (k, seq_err), end=" ")
        logger.print()
        ave_err = self.error_calc.get_average_error()
        self.error_calc.clear()
        logger.print("Online evaluation abs took %.2fs, err %.6f" %
                     (time.time() - start_time, ave_err))

        return ave_err
예제 #12
0
    def evaluate_rel(self):
        start_time = time.time()
        seqs = sorted(list(self.dataloaders.keys()))
        for seq in seqs:
            predicted_abs_poses, _, _ = gen_trajectory_rel_iter(
                self.model, self.dataloaders[seq], True)
            seq_err = self.error_calc.accumulate_error(
                seq, np.array(predicted_abs_poses))
            logger.print("%s: %.5f" % (seq, seq_err), end=" ")
        logger.print()
        ave_err = self.error_calc.get_average_error()
        self.error_calc.clear()
        logger.print("Online evaluation took %.2fs, err %.6f" %
                     (time.time() - start_time, ave_err))

        return ave_err
예제 #13
0
    def test_ekf_euroc(self):
        output_dir = os.path.join(par.results_coll_dir, "test_ekf_euroc")
        logger.initialize(output_dir, use_tensorboard=False)

        # seqs = ["MH_01"]
        seqs = [
            "MH_01", "MH_02", "MH_03", "MH_04", "MH_05", "V1_01", "V1_02",
            "V1_03", "V2_01", "V2_02", "V2_03"
        ]
        # seqs2 = ["MH_01_eval", "MH_02_eval", "MH_03_eval", "MH_04_eval", "MH_05_eval",
        #          "V1_01_eval", "V1_02_eval", "V1_03_eval", "V2_01_eval", "V2_02_eval", "V2_03_eval"]
        # seqs = seqs + seqs2

        error_calc = EurocErrorCalc(seqs)

        imu_covar = torch.diag(
            torch.tensor([
                1e-3, 1e-3, 1e-3, 1e-5, 1e-5, 1e-5, 1e-1, 1e-1, 1e-1, 1e-2,
                1e-2, 1e-2
            ]))
        vis_meas_covar = torch.diag(
            torch.tensor([1e-3, 1e-3, 1e-3, 1e-3, 1e-3,
                          1e-3])).view(1, 1, 6, 6)
        init_covar = np.eye(18, 18)
        init_covar[0:3, 0:3] = np.eye(3, 3) * 1e-2  # g
        init_covar[3:9, 3:9] = np.zeros([6, 6])  # C,r
        init_covar[9:12, 9:12] = np.eye(3, 3) * 1e-2  # v
        init_covar[12:15, 12:15] = np.eye(3, 3) * 1e-2  # bw
        init_covar[15:18, 15:18] = np.eye(3, 3) * 1e2  # ba
        init_covar = torch.tensor(init_covar,
                                  dtype=torch.float32).view(1, 18, 18)
        ekf = IMUKalmanFilter()

        for seq in seqs:
            subseqs = get_subseqs([seq],
                                  2,
                                  overlap=1,
                                  sample_times=1,
                                  training=False)
            dataset = SubseqDataset(subseqs, (par.img_h, par.img_w),
                                    0,
                                    1,
                                    True,
                                    training=False,
                                    no_image=True)
            dataloader = torch.utils.data.DataLoader(dataset,
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=0)
            seq_data = SequenceData(seq)

            est_poses = []
            est_ekf_states = []
            est_ekf_covars = []

            for i, data in enumerate(dataloader):
                # print('%d/%d (%.2f%%)' % (i, len(dataloader), i * 100 / len(dataloader)), end="\n")

                _, _, imu_data, init_state, _, gt_poses_inv, gt_rel_poses = data

                gt_rel_poses = gt_rel_poses.view(1, 1, 6, 1)

                if i == 0:
                    pose, ekf_state, ekf_covar = ekf.forward(
                        imu_data, imu_covar, gt_poses_inv[:, 0], init_state,
                        init_covar, gt_rel_poses, vis_meas_covar,
                        torch.eye(4, 4).view(1, 4, 4))
                    est_poses.append(pose[:, 0])
                    est_ekf_states.append(ekf_state[:, 0])
                    est_ekf_covars.append(ekf_covar[:, 0])
                    est_poses.append(pose[:, -1])
                    est_ekf_states.append(ekf_state[:, -1])
                    est_ekf_covars.append(ekf_covar[:, -1])
                else:
                    pose, ekf_state, ekf_covar = ekf.forward(
                        imu_data, imu_covar, est_poses[-1], est_ekf_states[-1],
                        est_ekf_covars[-1], gt_rel_poses, vis_meas_covar,
                        torch.eye(4, 4).view(1, 4, 4))

                    est_poses.append(pose[:, -1])
                    est_ekf_states.append(ekf_state[:, -1])
                    est_ekf_covars.append(ekf_covar[:, -1])

            est_poses_np = torch.stack(
                est_poses,
                1).squeeze().detach().cpu().numpy().astype("float64")
            err = error_calc.accumulate_error(seq, np.linalg.inv(est_poses_np))

            logger.print("Error: %.5f" % err)

            logger.print("Plotting figures...")
            plot_ekf_data(os.path.join(output_dir, seq),
                          seq_data.get_timestamps(), seq_data.get_poses(),
                          seq_data.get_velocities(),
                          torch.stack(est_poses, 1).squeeze(),
                          torch.stack(est_ekf_states, 1).squeeze())

            logger.print("Finished Sequence %s" % seq)

        logger.print("Done! Ave Error: %.5f" % error_calc.get_average_error())
예제 #14
0
def preprocess_kitti_raw(raw_seq_dir, output_dir, cam_subset_range, plot_figures=True):
    logger.initialize(working_dir=output_dir, use_tensorboard=False)
    logger.print("================ PREPROCESS KITTI RAW ================")
    logger.print("Preprocessing %s" % raw_seq_dir)
    logger.print("Output to: %s" % output_dir)
    logger.print("Camera images: %d => %d" % (cam_subset_range[0], cam_subset_range[1]))

    oxts_dir = os.path.join(raw_seq_dir, "oxts")
    image_dir = os.path.join(raw_seq_dir, "image_02")
    gps_poses = np.loadtxt(os.path.join(oxts_dir, "poses.txt"))
    gps_poses = np.array([np.vstack([np.reshape(p, [3, 4]), [0, 0, 0, 1]]) for p in gps_poses])
    T_velo_imu = np.loadtxt(os.path.join(raw_seq_dir, "../T_velo_imu.txt"))
    T_cam_velo = np.loadtxt(os.path.join(raw_seq_dir, '../T_cam_velo.txt'))
    T_cam_imu = T_cam_velo.dot(T_velo_imu)

    # imu timestamps
    imu_timestamps = read_timestamps(os.path.join(oxts_dir, "timestamps.txt"))
    assert (len(imu_timestamps) == len(gps_poses))

    # load image data
    cam_timestamps = read_timestamps(os.path.join(image_dir, "timestamps.txt"))
    image_paths = [os.path.join(image_dir, "data", p) for p in sorted(os.listdir(os.path.join(image_dir, "data")))]
    assert (len(cam_timestamps) == len(image_paths))
    assert (cam_subset_range[0] >= 0 and cam_subset_range[1] < len(image_paths))

    # the first camera timestamps must be between IMU timestamps
    assert (cam_timestamps[cam_subset_range[0]] >= imu_timestamps[0])
    assert (cam_timestamps[cam_subset_range[1]] <= imu_timestamps[-1])

    # take subset of the camera images int the range of images we are interested in
    image_paths = image_paths[cam_subset_range[0]: cam_subset_range[1] + 1]
    cam_timestamps = cam_timestamps[cam_subset_range[0]: cam_subset_range[1] + 1]

    # convert to local time reference in seconds
    cam_timestamps = (cam_timestamps - imu_timestamps[0]) / np.timedelta64(1, 's')
    imu_timestamps = (imu_timestamps - imu_timestamps[0]) / np.timedelta64(1, 's')

    # take a subset of imu data corresponds to camera images
    idx_imu_data_start = find_timestamps_in_between(cam_timestamps[0], imu_timestamps)[0]
    idx_imu_data_end = find_timestamps_in_between(cam_timestamps[-1], imu_timestamps)[1]
    imu_timestamps = imu_timestamps[idx_imu_data_start:idx_imu_data_end + 1]
    gps_poses = gps_poses[idx_imu_data_start:idx_imu_data_end + 1]

    # load IMU data from list of text files
    imu_data = []
    imu_data_files = sorted(os.listdir(os.path.join(oxts_dir, "data")))
    start_time = time.time()
    for i in range(idx_imu_data_start, idx_imu_data_end + 1):
        print("Loading IMU data files %d/%d (%.2f%%)"
              % (i + 1 - idx_imu_data_start, len(imu_timestamps),
                 100 * (i + 1 - idx_imu_data_start) / len(imu_timestamps)), end='\r')
        imu_data.append(np.loadtxt(os.path.join(oxts_dir, "data", imu_data_files[i])))
    imu_data = np.array(imu_data)
    logger.print("\nLoading IMU data took %.2fs" % (time.time() - start_time))
    assert (len(imu_data) == len(gps_poses))

    # imu_data = imu_data[idx_imu_data_start:idx_imu_data_end + 1]
    imu_timestamps, imu_data, gps_poses = remove_negative_timesteps(imu_timestamps, imu_data, gps_poses)

    data_frames = []
    start_time = time.time()
    idx_imu_slice_start = 0
    idx_imu_slice_end = 0
    for k in range(0, len(cam_timestamps) - 1):
        print("Processing IMU data files %d/%d (%.2f%%)" % (
            k + 1, len(cam_timestamps), 100 * (k + 1) / len(cam_timestamps)), end='\r')

        t_k = cam_timestamps[k]
        t_kp1 = cam_timestamps[k + 1]

        # the start value does not need to be recomputed, since you can get that from the previous time step, but
        # i am a lazy person, this will work
        while imu_timestamps[idx_imu_slice_start] < t_k:
            idx_imu_slice_start += 1

        assert (imu_timestamps[idx_imu_slice_start - 1] <= t_k <= imu_timestamps[idx_imu_slice_start])
        # interpolate
        tk_i = imu_timestamps[idx_imu_slice_start - 1]
        tk_j = imu_timestamps[idx_imu_slice_start]
        alpha_k = (t_k - tk_i) / (tk_j - tk_i)
        T_i_vk, v_vk, w_vk, a_vk = \
            interpolate(imu_data[idx_imu_slice_start - 1], imu_data[idx_imu_slice_start],
                        gps_poses[idx_imu_slice_start - 1], gps_poses[idx_imu_slice_start], alpha_k)

        while imu_timestamps[idx_imu_slice_end] < t_kp1:
            idx_imu_slice_end += 1
        assert (imu_timestamps[idx_imu_slice_end - 1] <= t_kp1 <= imu_timestamps[idx_imu_slice_end])
        # interpolate
        tkp1_i = imu_timestamps[idx_imu_slice_end - 1]
        tkp1_j = imu_timestamps[idx_imu_slice_end]
        alpha_kp1 = (t_kp1 - tkp1_i) / (tkp1_j - tkp1_i)
        T_i_vkp1, v_vkp1, w_vkp1, a_vkp1 = \
            interpolate(imu_data[idx_imu_slice_end - 1], imu_data[idx_imu_slice_end],
                        gps_poses[idx_imu_slice_end - 1], gps_poses[idx_imu_slice_end], alpha_kp1)

        imu_timestamps_k_kp1 = np.concatenate(
                [[t_k], imu_timestamps[idx_imu_slice_start:idx_imu_slice_end - 1], [t_kp1]])
        imu_poses = np.concatenate([[T_i_vk], gps_poses[idx_imu_slice_start:idx_imu_slice_end - 1], [T_i_vkp1]])
        accel_measurements_k_kp1 = np.concatenate([[a_vk],
                                                   imu_data[idx_imu_slice_start: idx_imu_slice_end - 1, ax:az + 1],
                                                   [a_vkp1]])
        gyro_measurements_k_kp1 = np.concatenate([[w_vk],
                                                  imu_data[idx_imu_slice_start: idx_imu_slice_end - 1, wx:wz + 1],
                                                  [w_vkp1]])
        frame_k = SequenceData.Frame(image_paths[k], t_k, T_i_vk, v_vk,
                                     imu_poses, imu_timestamps_k_kp1, accel_measurements_k_kp1, gyro_measurements_k_kp1)
        data_frames.append(frame_k)

        # assertions for sanity check
        assert (np.allclose(data_frames[-1].timestamp, data_frames[-1].imu_timestamps[0], atol=1e-13))
        assert (np.allclose(data_frames[-1].T_i_vk, data_frames[-1].imu_poses[0], atol=1e-13))
        if len(data_frames) > 1:
            assert (np.allclose(data_frames[-1].timestamp, data_frames[-2].imu_timestamps[-1], atol=1e-13))
            assert (np.allclose(data_frames[-1].T_i_vk, data_frames[-2].imu_poses[-1], atol=1e-13))
            assert (
                np.allclose(data_frames[-1].accel_measurements[0], data_frames[-2].accel_measurements[-1], atol=1e-13))
            assert (
                np.allclose(data_frames[-1].accel_measurements[0], data_frames[-2].accel_measurements[-1], atol=1e-13))

    # add the last frame without any IMU data
    data_frames.append(SequenceData.Frame(image_paths[-1], t_kp1, T_i_vkp1, v_vkp1,
                                          np.zeros([0, 4, 4]), np.zeros([0]), np.zeros([0, 3]), np.zeros([0, 3])))

    logger.print("\nProcessing data took %.2fs" % (time.time() - start_time))

    df = SequenceData.save_as_pd(data_frames, np.array([0, 0, 9.808679801065017]), np.zeros(3), T_cam_imu, output_dir)
    data = df.to_dict("list")

    if not plot_figures:
        logger.print("All done!")
        return

    # ============================== FIGURES FOR SANITY TESTS ==============================
    # plot trajectory
    start_time = time.time()
    plotter = Plotter(output_dir)
    p_poses = np.array(data["T_i_vk"])
    p_timestamps = np.array(data["timestamp"])
    p_velocities = np.array(data["v_vk_i_vk"])

    p_imu_timestamps = np.concatenate([d[:-1] for d in data['imu_timestamps']])
    p_gyro_measurements = np.concatenate([d[:-1] for d in data['gyro_measurements']])
    p_accel_measurements = np.concatenate([d[:-1] for d in data["accel_measurements"]])
    p_imu_poses = np.concatenate([d[:-1, :, :] for d in data["imu_poses"]])
    assert (len(p_imu_timestamps) == len(p_gyro_measurements))
    assert (len(p_imu_timestamps) == len(p_accel_measurements))
    assert (len(p_imu_timestamps) == len(p_imu_poses))

    # integrate accel to compare against velocity
    p_accel_int = [p_velocities[0, :]]
    p_accel_int_int = [p_poses[0, :3, 3]]
    # g = np.array([0, 0, 9.80665])
    g = np.array([0, 0, 9.808679801065017])
    # g = np.array([0, 0, 9.8096])
    for i in range(0, len(p_imu_timestamps) - 1):
        dt = p_imu_timestamps[i + 1] - p_imu_timestamps[i]
        C_i_vk = p_imu_poses[i, :3, :3]
        C_vkp1_vk = p_imu_poses[i + 1, :3, :3].transpose().dot(p_imu_poses[i, :3, :3])

        v_vk_i_vk = p_accel_int[-1]
        v_vkp1_vk_vk = dt * (p_accel_measurements[i] - C_i_vk.transpose().dot(g))
        v_vkp1_i_vk = v_vk_i_vk + v_vkp1_vk_vk
        p_accel_int.append(C_vkp1_vk.dot(v_vkp1_i_vk))
        p_accel_int_int.append(p_accel_int_int[-1] + p_imu_poses[i, :3, :3].dot(p_accel_int[-1]) * dt)
    p_accel_int = np.array(p_accel_int)
    p_accel_int_int = np.array(p_accel_int_int)

    # poses from integrating velocity
    p_vel_int_poses = [p_poses[0, :3, 3]]
    for i in range(0, len(p_velocities) - 1):
        dt = p_timestamps[i + 1] - p_timestamps[i]
        dp = p_poses[i, :3, :3].dot(p_velocities[i]) * dt
        p_vel_int_poses.append(p_vel_int_poses[-1] + dp)
    p_vel_int_poses = np.array(p_vel_int_poses)

    plotter.plot(([p_poses[:, 0, 3], p_poses[:, 1, 3]],
                  [p_vel_int_poses[:, 0], p_vel_int_poses[:, 1]],
                  [p_accel_int_int[:, 0], p_accel_int_int[:, 1]],),
                 "x [m]", "Y [m]", "XY Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True)
    plotter.plot(([p_poses[:, 0, 3], p_poses[:, 2, 3]],
                  [p_vel_int_poses[:, 0], p_vel_int_poses[:, 2]],
                  [p_accel_int_int[:, 0], p_accel_int_int[:, 2]],),
                 "X [m]", "Z [m]", "XZ Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True)
    plotter.plot(([p_poses[:, 1, 3], p_poses[:, 2, 3]],
                  [p_vel_int_poses[:, 1], p_vel_int_poses[:, 2]],
                  [p_accel_int_int[:, 1], p_accel_int_int[:, 2]],),
                 "Y [m]", "Z [m]", "YZ Plot", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"], equal_axes=True)

    plotter.plot(([p_timestamps, p_poses[:, 0, 3]],
                  [p_timestamps, p_vel_int_poses[:, 0]],
                  [p_imu_timestamps, p_accel_int_int[:, 0]],),
                 "t [s]", "Y [m]", "X Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"])
    plotter.plot(([p_timestamps, p_poses[:, 1, 3]],
                  [p_timestamps, p_vel_int_poses[:, 1]],
                  [p_imu_timestamps, p_accel_int_int[:, 1]],),
                 "t [s]", "Z [m]", "Y Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"])
    plotter.plot(([p_timestamps, p_poses[:, 2, 3]],
                  [p_timestamps, p_vel_int_poses[:, 2]],
                  [p_imu_timestamps, p_accel_int_int[:, 2]],),
                 "t [s]", "Z [m]", "Z Plot From Zero", labels=["dat_poses", "dat_vel_int", "dat_acc_int^2"])

    # plot trajectory rotated wrt to the first frame
    p_poses_from_I = np.array([np.linalg.inv(p_poses[0]).dot(p) for p in p_poses])
    plotter.plot(([p_poses_from_I[:, 0, 3], p_poses_from_I[:, 1, 3]],),
                 "x [m]", "Y [m]", "XY Plot From Identity", equal_axes=True)
    plotter.plot(([p_poses_from_I[:, 0, 3], p_poses_from_I[:, 2, 3]],),
                 "X [m]", "Z [m]", "XZ Plot From Identity", equal_axes=True)
    plotter.plot(([p_poses_from_I[:, 1, 3], p_poses_from_I[:, 2, 3]],),
                 "Y [m]", "Z [m]", "YZ Plot From Identity", equal_axes=True)

    # plot p_velocities
    plotter.plot(([p_timestamps, p_velocities[:, 0]], [p_timestamps, p_velocities[:, 1]],
                  [p_timestamps, p_velocities[:, 2]]), "t [s]", "v [m/s]", "YZ Plot",
                 labels=["dat_vx", "dat_vy", "dat_vz"])

    # make sure the interpolated acceleration and gyroscope measurements are the same
    plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 0]], [imu_timestamps, imu_data[:, wx]],),
                 "t [s]", "w [rad/s]", "Rot Vel X Verification")
    plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 1]], [imu_timestamps, imu_data[:, wy]],),
                 "t [s]", "w [rad/s]", "Rot Vel Y Verification")
    plotter.plot(([p_imu_timestamps, p_gyro_measurements[:, 2]], [imu_timestamps, imu_data[:, wz]],),
                 "t [s]", "w [rad/s]", "Rot Vel Z Verification")
    plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 0]], [imu_timestamps, imu_data[:, ax]],),
                 "t [s]", "a [m/s^2]", "Accel X Verification")
    plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 1]], [imu_timestamps, imu_data[:, ay]],),
                 "t [s]", "a [m/s^2]", "Accel Y Verification")
    plotter.plot(([p_imu_timestamps, p_accel_measurements[:, 2]], [imu_timestamps, imu_data[:, az]],),
                 "t [s]", "a [m/s^2]", "Accel Z Verification")

    # integrate gyroscope to compare against rotation
    p_gyro_int = [data["T_i_vk"][0][:3, :3]]
    for i in range(0, len(p_imu_timestamps) - 1):
        dt = p_imu_timestamps[i + 1] - p_imu_timestamps[i]
        p_gyro_int.append(p_gyro_int[-1].dot(exp_SO3(dt * p_gyro_measurements[i])))
    p_gyro_int = np.array([log_SO3(o) for o in p_gyro_int])
    p_orientation = np.array([log_SO3(p[:3, :3]) for p in data["T_i_vk"]])

    plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 0])], [p_timestamps, np.unwrap(p_orientation[:, 0])],),
                 "t [s]", "rot [rad/s]", "Theta X Cmp Plot", labels=["gyro_int", "dat_pose"])
    plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 1])], [p_timestamps, np.unwrap(p_orientation[:, 1])],),
                 "t [s]", "rot [rad/s]", "Theta Y Cmp Plot", labels=["gyro_int", "dat_pose"])
    plotter.plot(([p_imu_timestamps, np.unwrap(p_gyro_int[:, 2])], [p_timestamps, np.unwrap(p_orientation[:, 2])],),
                 "t [s]", "rot [rad/s]", "Theta Z Cmp Plot", labels=["gyro_int", "dat_pose"])

    vel_from_gps_rel_poses = []
    for k in range(0, len(gps_poses) - 1):
        dt = imu_timestamps[k + 1] - imu_timestamps[k]
        T_i_vk = gps_poses[k]
        T_i_vkp1 = gps_poses[k + 1]
        T_vk_vkp1 = np.linalg.inv(T_i_vk).dot(T_i_vkp1)
        vel_from_gps_rel_poses.append(T_vk_vkp1[0:3, 3] / dt)
        # vel_from_gps_rel_poses.append(log_SE3(T_vk_vkp1)[0:3] / dt)
    vel_from_gps_rel_poses = np.array(vel_from_gps_rel_poses)

    plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 0]],
                  [p_timestamps, p_velocities[:, 0]],
                  [p_imu_timestamps, p_accel_int[:, 0]],),
                 "t [s]", "v [m/s]", "Velocity X Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"])
    plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 1]],
                  [p_timestamps, p_velocities[:, 1]],
                  [p_imu_timestamps, p_accel_int[:, 1]],),
                 "t [s]", "v [m/s]", "Velocity Y Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"])
    plotter.plot(([imu_timestamps[1:], vel_from_gps_rel_poses[:, 2]],
                  [p_timestamps, p_velocities[:, 2]],
                  [p_imu_timestamps, p_accel_int[:, 2]],),
                 "t [s]", "v [m/s]", "Velocity Z Cmp Plot", labels=["gps_rel", "dat_vel", "dat_accel_int"])

    logger.print("Generating figures took %.2fs" % (time.time() - start_time))
    logger.print("All done!")
예제 #15
0
def package_euroc_data(seq_dir, cam_timestamps, imu_timestamps, imu_data, gt_timestamps, gt_data):
    assert len(gt_timestamps) == len(imu_timestamps)
    assert len(gt_timestamps) == len(imu_data)
    assert np.max(np.abs(np.array(imu_timestamps) - np.array(gt_timestamps))) < 1000
    assert cam_timestamps[0] == imu_timestamps[0]
    assert cam_timestamps[-1] == imu_timestamps[-1]

    data_frames = []
    ref_time = np.datetime64(int(min([cam_timestamps[0], imu_timestamps[0], ])), "ns")

    cam_period = 100 * 10 ** 6  # nanoseconds
    imu_skip = 2
    i_start = 0
    # for i in range(0, len(cam_timestamps) - 1):
    while i_start < len(cam_timestamps) - 1:
        t_k = cam_timestamps[i_start]
        i_end = (np.abs(np.array(cam_timestamps) - (cam_timestamps[i_start] + cam_period))).argmin()
        t_kp1 = cam_timestamps[i_end]

        imu_start_idx = imu_timestamps.index(t_k)
        imu_end_idx = imu_timestamps.index(t_kp1)

        if not t_kp1 - t_k == cam_period:
            # ignore the last frame if it is does not at the desired rate
            if i_end == len(cam_timestamps) - 1:
                break

            logger.print("WARN imu_end_idx - imu_start_idx != %.5s, "
                         "image: [%d -> %d] imu: [%d -> %d] time: [%d -> %d] diff %.5f"
                         % (cam_period / 10 ** 9, i_start, i_end, imu_start_idx, imu_end_idx, t_k, t_kp1,
                            (t_kp1 - t_k) / 10 ** 9))

        imu_poses = []
        imu_timestamps_k_kp1 = []
        accel_measurements_k_kp1 = []
        gyro_measurements_k_kp1 = []
        for j in range(imu_start_idx, imu_end_idx + 1, imu_skip):
            imu_pose = transformations.quaternion_matrix(gt_data[j, [qw, qx, qy, qz]])
            imu_pose[0:3, 3] = gt_data[j, [px, py, pz]]
            imu_poses.append(imu_pose)
            imu_timestamps_k_kp1.append((np.datetime64(imu_timestamps[j], "ns") - ref_time) / np.timedelta64(1, "s"))
            accel_measurements_k_kp1.append(imu_data[j, [ax, ay, az]])
            gyro_measurements_k_kp1.append(imu_data[j, [wx, wy, wz]])

        T_i_vk = imu_poses[0]
        frame_k = SequenceData.Frame(os.path.join(seq_dir, "cam0", "data", "%09d.png" % t_k),
                                     (np.datetime64(t_k, "ns") - ref_time) / np.timedelta64(1, "s"),
                                     T_i_vk,
                                     T_i_vk[0:3, 0:3].transpose().dot(gt_data[imu_start_idx, [vx, vy, vz]]),  # v_vk
                                     imu_poses,
                                     imu_timestamps_k_kp1,
                                     accel_measurements_k_kp1,
                                     gyro_measurements_k_kp1,
                                     timestamp_raw=t_k)

        data_frames.append(frame_k)
        i_start = i_end

    T_i_vkp1 = imu_poses[-1]
    data_frames.append(SequenceData.Frame(os.path.join(seq_dir, "cam0", "data", "%09d.png" % t_kp1),
                                          (np.datetime64(t_kp1, "ns") - ref_time) / np.timedelta64(1, "s"),
                                          T_i_vkp1,
                                          T_i_vkp1[0:3, 0:3].transpose().dot(gt_data[imu_end_idx, [vx, vy, vz]]),
                                          np.zeros([0, 4, 4]), np.zeros([0]), np.zeros([0, 3]), np.zeros([0, 3]),
                                          timestamp_raw=t_kp1))

    return data_frames
예제 #16
0
def calc_image_mean_std(sequences):
    logger.initialize(working_dir=par.data_dir, use_tensorboard=False)
    logger.print("================ CALC IMAGE MEAN STD ================")
    logger.print("Sequences: [%s]" % ",".join(sequences))
    to_tensor = torchvision.transforms.ToTensor()

    image_count = 0
    mean_sum = np.array([0.0, 0.0, 0.0])
    var_sum = np.array([0.0, 0.0, 0.0])
    image_paths = []

    for i, seq in enumerate(sequences):
        print("Collecting image paths %d/%d (%.2f%%)" %
              (i + 1, len(sequences), (i + 1) * 100 / len(sequences)),
              end="\r")
        image_paths += list(SequenceData(seq).get_images_paths())
    print()

    start_time = time.time()
    for i, path in enumerate(image_paths):
        print("Computing mean %d/%d (%.2f%%)" %
              (i + 1, len(image_paths), (i + 1) * 100 / len(image_paths)),
              end="\r")
        img = np.array(to_tensor(Image.open(path)))

        if par.minus_point_5:
            img = img - 0.5

        mean_sum += np.mean(img, (
            1,
            2,
        ))
        image_count += 1
    print("\nTook %.2fs" % (time.time() - start_time))

    mean = mean_sum / image_count

    num_pixels = 0
    start_time = time.time()
    for i, path in enumerate(image_paths):
        print("Computing standard deviation %d/%d (%.2f%%)" %
              (i + 1, len(image_paths), (i + 1) * 100 / len(image_paths)),
              end="\r")
        img = np.array(to_tensor(Image.open(path)))

        if par.minus_point_5:
            img = img - 0.5

        img[0, :, :] = img[0, :, :] - mean[0]
        img[1, :, :] = img[1, :, :] - mean[1]
        img[2, :, :] = img[2, :, :] - mean[2]
        var_sum += np.sum(np.square(img), (
            1,
            2,
        ))
        num_pixels += img.shape[1] * img.shape[2]
    print("\nTook %.2fs" % (time.time() - start_time))

    std = np.sqrt(var_sum / (num_pixels - 1))

    logger.print("Mean: [%f, %f, %f]" % (mean[0], mean[1], mean[2]))
    logger.print("Std: [%f, %f, %f]" % (std[0], std[1], std[2]))
예제 #17
0
def plot_errors(working_dir):
    errors_dir = os.path.join(working_dir, "errors")
    rel_errors_dir = os.path.join(errors_dir, "rel")
    abs_errors_dir = os.path.join(errors_dir, "abs")
    vis_meas_errors_dir = os.path.join(errors_dir, "vis_meas")
    vis_meas_covars_dir = os.path.join(working_dir, "vis_meas", "covar")
    abs_errors_files = sorted(os.listdir(abs_errors_dir))
    rel_errors_files = sorted(os.listdir(rel_errors_dir))
    vis_meas_errors_files = sorted(os.listdir(vis_meas_errors_dir))
    vis_meas_covars_files = sorted(os.listdir(vis_meas_covars_dir))

    assert (abs_errors_files == rel_errors_files == vis_meas_errors_files == vis_meas_covars_files)
    sequences = [f[:-4] for f in abs_errors_files]

    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ PLOT ERROR ================")
    logger.print("Working on directory:", working_dir)
    logger.print("Found sequences: [%s]" % ", ".join(sequences))

    for i, sequence in enumerate(sequences):
        error_rel = np.load(os.path.join(rel_errors_dir, "%s.npy" % sequence))
        error_abs = np.load(os.path.join(abs_errors_dir, "%s.npy" % sequence))
        error_vis_meas = np.load(os.path.join(vis_meas_errors_dir, "%s.npy" % sequence))
        covar_vis_meas = np.load(os.path.join(vis_meas_covars_dir, "%s.npy" % sequence))

        labels = ["Rot X", "Rot Y", "Rot Z", "Trans X", "Trans Y", "Trans Z"]

        for j in range(0, 6):
            err = error_rel[:, j]
            plt.clf()
            plt.plot(err, color="r")
            plt.xlabel("frame # []")
            plt.ylabel(labels[j].lower())
            plt.title("Seq. %s %s Rel Error" % (sequence, labels[j]))
            plt.savefig(Logger.ensure_file_dir_exists(
                    os.path.join(errors_dir, "rel_error_figures",
                                 "seq_%s_%02d_%s_rel_err_plt.png" % (
                                     sequence, j, "_".join(labels[j].lower().split())))))
            plt.clf()
            plt.hist(err, bins=np.linspace(start=np.min(err), stop=np.max(err), num=100), normed=True)

            ticks = plt.xticks()[0]
            lnspc = np.linspace(min(ticks), max(ticks), len(err))
            mean, std_dev = stats.norm.fit(err)
            pdf_g = stats.norm.pdf(lnspc, mean, std_dev)
            plt.plot(lnspc, pdf_g, color="r")
            plt.text(0.05, 0.9, r"$\mu$=%.4g" % mean + "\n" + r"$\sigma$=%.4g" % std_dev,
                     transform=plt.gca().transAxes)
            plt.title("Seq. %s %s Rel Error Hist." % (sequence, labels[j]))
            plt.savefig(Logger.ensure_file_dir_exists(
                    os.path.join(errors_dir, "rel_error_histograms",
                                 "fig_%s_%02d_%s_rel_err_hist.png" % (
                                     sequence, j, "_".join(labels[j].lower().split())))))

        for j in range(0, 6):
            err = np.abs(error_vis_meas[:, j])
            covar = covar_vis_meas[:, j, j]
            if j in [0, 1, 2]: covar = covar / par.k1

            covar_sig = np.sqrt(covar)
            plt.clf()
            plt.plot(err, color="r", label="Err")
            plt.plot(-err, color="r", label="Err")
            plt.plot(covar_sig, color="b", label="3sig")
            plt.plot(-covar_sig, color="b", label="3sig")
            plt.plot()
            plt.xlabel("frame # []")
            plt.ylabel(labels[j].lower())
            plt.title("Seq. %s %s Vis Meas Error & Covar" % (sequence, labels[j]))
            plt.legend()
            plt.savefig(Logger.ensure_file_dir_exists(
                    os.path.join(errors_dir, "vis_meas_error_covar_figures",
                                 "fig_%s_%02d_%s_vis_meas_error_covar.png" % (
                                     sequence, j, "_".join(labels[j].lower().split())))))

        for j in range(0, 6):
            err = error_abs[:, j]
            plt.clf()
            plt.plot(err, color="r")
            plt.xlabel("frame # []")
            plt.ylabel(labels[j].lower())
            plt.title("Seq. %s %s Abs Error" % (sequence, labels[j]))
            plt.savefig(Logger.ensure_file_dir_exists(
                    os.path.join(errors_dir, "abs_error_figures",
                                 "seq_%s_%02d_%s_abs_err_plt.png" % (
                                     sequence, j, "_".join(labels[j].lower().split())))))

        logger.print("Plots saved for sequence %s" % sequence)
예제 #18
0
def preprocess_euroc(seq_dir, output_dir, cam_still_range):
    logger.initialize(working_dir=output_dir, use_tensorboard=False)
    logger.print("================ PREPROCESS EUROC ================")
    logger.print("Preprocessing %s" % seq_dir)
    logger.print("Output to: %s" % output_dir)
    logger.print("Camera still range [%d -> %d]" % (cam_still_range[0], cam_still_range[1]))

    left_cam_csv = open(os.path.join(seq_dir, 'cam0', 'data.csv'), 'r')
    imu_csv = open(os.path.join(seq_dir, 'imu0', "data.csv"), 'r')
    gt_csv = open(os.path.join(seq_dir, "state_groundtruth_estimate0", "data.csv"), "r")
    cam_sensor_yaml_config = yaml.load(open(os.path.join(seq_dir, "cam0", "sensor.yaml")))
    T_cam_imu = np.linalg.inv(np.array(cam_sensor_yaml_config["T_BS"]["data"]).reshape(4, 4))
    cam_timestamps = []
    imu_timestamps = []
    imu_data = []
    gt_timestamps = []
    gt_data = []

    left_cam_csv.readline()  # skip header
    for line in left_cam_csv:
        line = line.split(",")
        timestamp_str = line[0]
        assert str(timestamp_str + ".png" == line[1])
        cam_timestamps.append(int(timestamp_str))

    imu_csv.readline()  # skip header
    for line in imu_csv:
        line = line.split(",")
        timestamp = int(line[0])
        data = [float(line[i + 1]) for i in range(0, 6)]
        imu_timestamps.append(timestamp)
        imu_data.append(data)

    gt_csv.readline()  # skip header
    for line in gt_csv:
        line = line.split(",")
        timestamp = int(line[0])
        data = [float(line[i + 1]) for i in range(0, 16)]
        gt_timestamps.append(timestamp)
        gt_data.append(data)

    cam_timestamps = cam_timestamps
    imu_timestamps = imu_timestamps
    imu_data = np.array(imu_data)
    gt_timestamps = gt_timestamps
    gt_data = np.array(gt_data)

    assert np.all(np.diff(cam_timestamps) > 0), "nonmonotonic timestamp"
    assert np.all(np.diff(imu_timestamps) > 0), "nonmonotonic timestamp"
    assert np.all(np.diff(gt_timestamps) > 0), "nonmonotonic timestamp"

    # align imu and ground truth timestamps, and the time difference should not be more than 1 us
    assert (gt_timestamps[0] > imu_timestamps[0] and gt_timestamps[-1] < imu_timestamps[-1])
    imu_gt_aligned_start_idx = (np.abs(np.array(imu_timestamps) - gt_timestamps[0])).argmin()  # gta = gt aligned
    imu_timestamps_gt_aligned = imu_timestamps[imu_gt_aligned_start_idx:imu_gt_aligned_start_idx + len(gt_timestamps)]
    imu_data_gt_aligned = imu_data[imu_gt_aligned_start_idx:imu_gt_aligned_start_idx + len(gt_timestamps)]

    gt_align_time_sync_diff = np.array(gt_timestamps) - np.array(imu_timestamps_gt_aligned)
    assert np.all(np.abs(gt_align_time_sync_diff) < 1000), "timestamp out of sync by > 1 us"

    # first_cam_timestamp
    cam_imu_aligned_start_idx = -1
    for i in range(0, len(cam_timestamps)):
        if cam_timestamps[i] in imu_timestamps:
            cam_imu_aligned_start_idx = i
            break
    assert cam_imu_aligned_start_idx >= 0

    cam_imu_aligned_end_idx = -1
    for i in range(len(cam_timestamps) - 1, -1, -1):
        if cam_timestamps[i] in imu_timestamps:
            cam_imu_aligned_end_idx = i
            break
    assert cam_imu_aligned_end_idx >= 0

    assert cam_imu_aligned_start_idx < cam_still_range[1], "sanity check that the alignment is at the start"

    imu_still_idx_start = imu_timestamps.index(cam_timestamps[max(cam_still_range[0], cam_imu_aligned_start_idx)])
    imu_still_idx_end = imu_timestamps.index(cam_timestamps[cam_still_range[1]])

    gyro_bias_from_still = np.mean(imu_data[imu_still_idx_start:imu_still_idx_end, [wx, wy, wz]], axis=0)
    gravity_from_still = np.mean(imu_data[imu_still_idx_start:imu_still_idx_end, [ax, ay, az]], axis=0)

    print("still estimated g_b0:", gravity_from_still)
    print("still estimated g_b0 norm: ", np.linalg.norm(gravity_from_still))

    # for training /validation
    logger.print("Processing data for training...")
    every_N_frames = 10
    rounded_length = len(imu_timestamps_gt_aligned) // every_N_frames * every_N_frames
    gravity_from_gt = find_initial_gravity(imu_timestamps_gt_aligned[0:rounded_length],
                                           imu_data_gt_aligned[0:rounded_length],
                                           gt_timestamps[0:rounded_length], gt_data[0:rounded_length], 10)

    cam_gt_aligned_start_idx = -1
    for i in range(0, len(cam_timestamps)):
        if cam_timestamps[i] in imu_timestamps_gt_aligned:
            cam_gt_aligned_start_idx = i
            break
    assert cam_gt_aligned_start_idx >= 0

    cam_gt_aligned_end_idx = -1
    for i in range(len(cam_timestamps) - 1, -1, -1):
        if cam_timestamps[i] in imu_timestamps_gt_aligned:
            cam_gt_aligned_end_idx = i
            break
    assert cam_gt_aligned_end_idx >= 0

    gt_cam_aligned_start_idx = imu_timestamps_gt_aligned.index(cam_timestamps[cam_gt_aligned_start_idx])
    gt_cam_aligned_end_idx = imu_timestamps_gt_aligned.index(cam_timestamps[cam_gt_aligned_end_idx])

    logger.print("Camera index [%d -> %d]" % (cam_gt_aligned_start_idx, cam_gt_aligned_end_idx))
    data_frames = package_euroc_data(seq_dir, cam_timestamps[cam_gt_aligned_start_idx:cam_gt_aligned_end_idx + 1],
                                     imu_timestamps_gt_aligned[gt_cam_aligned_start_idx:gt_cam_aligned_end_idx + 1],
                                     imu_data_gt_aligned[gt_cam_aligned_start_idx:gt_cam_aligned_end_idx + 1],
                                     gt_timestamps[gt_cam_aligned_start_idx:gt_cam_aligned_end_idx + 1],
                                     gt_data[gt_cam_aligned_start_idx:gt_cam_aligned_end_idx + 1])

    SequenceData.save_as_pd(data_frames, gravity_from_gt, gyro_bias_from_still, T_cam_imu, output_dir)
    copyfile(os.path.join(seq_dir, "state_groundtruth_estimate0", "data.csv"),
             os.path.join(output_dir, "groundtruth.csv"))

    # for evaluation
    logger.print("Processing data for evaluation...")
    imu_cam_aligned_start_idx = imu_timestamps.index(cam_timestamps[cam_still_range[1]])
    imu_cam_aligned_end_idx = imu_timestamps.index(cam_timestamps[cam_imu_aligned_end_idx])
    imu_timestamps_cam_aligned = imu_timestamps[imu_cam_aligned_start_idx:imu_cam_aligned_end_idx + 1]
    imu_data_cam_aligned = imu_data[imu_cam_aligned_start_idx:imu_cam_aligned_end_idx + 1]
    zeros_gt = np.zeros([len(imu_timestamps_cam_aligned), 16])
    zeros_gt[: qw] = 1

    logger.print("Camera index [%d -> %d]" % (cam_still_range[1], cam_imu_aligned_end_idx))
    eval_output_dir = output_dir + "_eval"
    data_frames = package_euroc_data(seq_dir, cam_timestamps[cam_still_range[1]:cam_imu_aligned_end_idx + 1],
                                     imu_timestamps_cam_aligned,
                                     imu_data_cam_aligned,
                                     imu_timestamps_cam_aligned,
                                     zeros_gt)
    logger.make_dir_if_not_exist(eval_output_dir)
    SequenceData.save_as_pd(data_frames, gravity_from_still, gyro_bias_from_still, T_cam_imu, eval_output_dir)
    copyfile(os.path.join(seq_dir, "state_groundtruth_estimate0", "data.csv"),
             os.path.join(eval_output_dir, "groundtruth.csv"))
예제 #19
0
def interpolate_SE3(T1, T2, alpha):
    T_interp = scipy.linalg.fractional_matrix_power(T2.dot(np.linalg.inv(T1)), alpha).dot(T1)
    if np.linalg.norm(np.imag(T_interp)) > 1e-10:
        logger.print("Bad SE(3) interp:")
        logger.print(T_interp)
    return np.real(T_interp)
예제 #20
0
def plot_trajectory(working_dir):
    output_dir = os.path.join(working_dir, "figures")
    pose_est_dir = os.path.join(working_dir, "est_poses")
    pose_gt_dir = os.path.join(working_dir, "gt_poses")
    assert sorted(os.listdir(pose_est_dir)) == sorted(os.listdir(pose_gt_dir))
    pose_est_files = sorted(os.listdir(pose_est_dir))

    Logger.make_dir_if_not_exist(output_dir)
    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ PLOT TRAJECTORY ================")
    logger.print("Working on directory:", working_dir)
    logger.print("Found pose estimate files: \n" + "\n".join(pose_est_files))

    for i, pose_est_file in enumerate(pose_est_files):
        sequence = os.path.splitext(pose_est_file)[0]

        trajectory = np.load(os.path.join(pose_est_dir, "%s.npy" % sequence))
        trajectory_gt = np.load(os.path.join(pose_gt_dir, "%s.npy" % sequence))

        trans_xyz = np.array([se3.r_from_T(T) for T in trajectory])
        rot_xyz = np.array([se3.log_SO3(se3.C_from_T(T)) for T in trajectory])
        trans_xyz_gt = np.array([se3.r_from_T(T) for T in trajectory_gt])
        rot_xyz_gt = np.array(
            [se3.log_SO3(se3.C_from_T(T)) for T in trajectory_gt])

        trans_x = trans_xyz[:, 0]
        trans_y = trans_xyz[:, 1]
        trans_z = trans_xyz[:, 2]
        rot_x = rot_xyz[:, 0]
        rot_y = rot_xyz[:, 1]
        rot_z = rot_xyz[:, 2]

        trans_x_gt = trans_xyz_gt[:, 0]
        trans_y_gt = trans_xyz_gt[:, 1]
        trans_z_gt = trans_xyz_gt[:, 2]
        rot_x_gt = rot_xyz_gt[:, 0]
        rot_y_gt = rot_xyz_gt[:, 1]
        rot_z_gt = rot_xyz_gt[:, 2]

        # ########## XYZ Planes Plots ##########
        # plt.figure()
        plt.clf()
        plt.plot(trans_x, trans_y, linewidth=1.0, color="r", label="Estimate")
        plt.plot(trans_x_gt,
                 trans_y_gt,
                 linewidth=1.0,
                 color="b",
                 label="Ground Truth")
        plt.axis("equal")
        plt.xlabel("x [m]")
        plt.ylabel("y [m]")
        plt.title("Seq. %s Trajectory XY Plane" % sequence)
        plt.legend()
        plt.savefig(
            os.path.join(output_dir, "seq_%s_00_xy_traj.png" % sequence))

        plt.clf()
        plt.plot(trans_x, trans_z, linewidth=1.0, color="r", label="Estimate")
        plt.plot(trans_x_gt,
                 trans_z_gt,
                 linewidth=1.0,
                 color="b",
                 label="Ground Truth")
        plt.axis("equal")
        plt.xlabel("x [m]")
        plt.ylabel("z [m]")
        plt.title("Seq. %s Trajectory XZ Plane" % sequence)
        plt.legend()
        plt.savefig(
            os.path.join(output_dir, "seq_%s_01_xz_traj.png" % sequence))

        plt.clf()
        plt.plot(trans_y, trans_z, linewidth=1.0, color="r", label="Estimate")
        plt.plot(trans_y_gt,
                 trans_z_gt,
                 linewidth=1.0,
                 color="b",
                 label="Ground Truth")
        plt.axis("equal")
        plt.xlabel("y [m]")
        plt.ylabel("z [m]")
        plt.title("Seq. %s Trajectory YZ Plane" % sequence)
        plt.legend()
        plt.savefig(
            os.path.join(output_dir, "seq_%s_02_yz_traj.png" % sequence))

        # ########## Progression Throughout Planes Plot ##########
        labels = ["Trans X", "Trans Y", "Trans Z", "Rot X", "Rot Y", "Rot Z"]
        data = [trans_x, trans_y, trans_z, rot_x, rot_y, rot_z]
        data_gt = [
            trans_x_gt, trans_y_gt, trans_z_gt, rot_x_gt, rot_y_gt, rot_z_gt
        ]

        for j in range(0, len(labels)):
            plt.clf()
            plt.plot(data[j], linewidth=1.0, color="r", label="Estimate")
            plt.plot(data_gt[j],
                     linewidth=1.0,
                     color="b",
                     label="Ground Truth")
            plt.xlabel("frame # []")
            plt.ylabel(labels[j].lower())
            plt.title("Seq. %s %s" % (sequence, labels[j]))
            plt.legend()
            plt.savefig(
                os.path.join(
                    output_dir, "seq_%s_%02d_%s_plt.png" %
                    (sequence, j + 3, "_".join(labels[j].lower().split()))))

        logger.print("Plot saved for sequence %s" % sequence)
예제 #21
0
def gen_trajectory(model_file_path, sequences, seq_len, prop_lstm_states):
    # Path
    model_file_path = os.path.abspath(model_file_path)
    assert (os.path.exists(model_file_path))
    working_dir = os.path.join(os.path.dirname(model_file_path),
                               os.path.basename(model_file_path) + ".traj")
    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ GENERATE TRAJECTORY REL ================")

    # Load model
    logger.print("Constructing model...")
    model = E2EVIO()
    model = model.cuda()
    logger.print("Loading model from: ", model_file_path)
    model.load_state_dict(
        logger.clean_state_dict_key(torch.load(model_file_path)))
    model.eval()

    logger.log_parameters()
    logger.print("Using sequence length:", seq_len)
    logger.print("Prop LSTM states:", prop_lstm_states)
    logger.print("Sequences: \n" + "\n".join(sequences))

    for seq in sequences:
        logger.print("Generating trajectory for seq...", seq)
        start_time = time.time()

        subseqs = get_subseqs([seq],
                              seq_len,
                              overlap=1,
                              sample_times=1,
                              training=False)
        dataset = SubseqDataset(subseqs, (par.img_h, par.img_w),
                                par.img_means,
                                par.img_stds,
                                par.minus_point_5,
                                training=False)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=4)
        seq_data = SequenceData(seq)
        gt_abs_poses = seq_data.get_poses()
        timestamps = seq_data.get_timestamps()

        if par.enable_ekf:
            logger.print("With EKF enabled ...")
            est_vis_meas_dict, vis_meas_covar_dict, est_poses_dict, est_states_dict, est_covars_dict = \
                gen_trajectory_abs_iter(model, {seq: dataloader})
            est_vis_meas = est_vis_meas_dict[seq]
            vis_meas_covar = vis_meas_covar_dict[seq]
            est_states = est_states_dict[seq]
            est_covars = est_covars_dict[seq]
            est_poses = est_poses_dict[seq]
            np.save(
                logger.ensure_file_dir_exists(
                    os.path.join(working_dir, "ekf_states", "vis_meas",
                                 seq + ".npy")), est_vis_meas)
            np.save(
                logger.ensure_file_dir_exists(
                    os.path.join(working_dir, "ekf_states", "vis_meas_covar",
                                 seq + ".npy")), vis_meas_covar)
            np.save(
                logger.ensure_file_dir_exists(
                    os.path.join(working_dir, "ekf_states", "poses",
                                 seq + ".npy")), est_poses)
            np.save(
                logger.ensure_file_dir_exists(
                    os.path.join(working_dir, "ekf_states", "states",
                                 seq + ".npy")), est_states)
            np.save(
                logger.ensure_file_dir_exists(
                    os.path.join(working_dir, "ekf_states", "covars",
                                 seq + ".npy")), est_covars)
            np.save(
                logger.ensure_file_dir_exists(
                    os.path.join(working_dir, "ekf_states", "gt_velocities",
                                 seq + ".npy")), seq_data.get_velocities())

            est_poses = np.linalg.inv(
                np.array(est_poses_dict[seq]).astype(np.float64))
        else:
            logger.print("Without EKF enabled ...")
            est_poses, est_vis_meas, vis_meas_covar = gen_trajectory_rel_iter(
                model,
                dataloader,
                prop_lstm_states,
                initial_pose=gt_abs_poses[0, :, :])

        np.save(
            logger.ensure_file_dir_exists(
                os.path.join(working_dir, "vis_meas", "meas", seq + ".npy")),
            est_vis_meas)
        np.save(
            logger.ensure_file_dir_exists(
                os.path.join(working_dir, "vis_meas", "covar", seq + ".npy")),
            vis_meas_covar)
        np.save(
            logger.ensure_file_dir_exists(
                os.path.join(working_dir, "est_poses", seq + ".npy")),
            est_poses)
        np.save(
            logger.ensure_file_dir_exists(
                os.path.join(working_dir, "gt_poses", seq + ".npy")),
            gt_abs_poses[:len(est_poses)])  # ensure same length as est poses
        np.save(
            logger.ensure_file_dir_exists(
                os.path.join(working_dir, "timestamps", seq + ".npy")),
            timestamps[:len(est_poses)])
        logger.print("Done, took %.2f seconds" % (time.time() - start_time))

    return working_dir
예제 #22
0
    def __init__(self,
                 subseqs,
                 img_size=None,
                 img_mean=None,
                 img_std=(1, 1, 1),
                 minus_point_5=False,
                 training=True,
                 no_image=False):

        # Transforms
        self.pre_runtime_transformer = transforms.Compose(
            [transforms.Resize((img_size[0], img_size[1]))])

        if training:
            transform_ops = []
            if par.data_aug_rand_color.enable:
                transform_ops.append(
                    transforms.ColorJitter(**par.data_aug_rand_color.params))
            transform_ops.append(transforms.ToTensor())
            self.runtime_transformer = transforms.Compose(transform_ops)
        else:
            self.runtime_transformer = transforms.ToTensor()

        # Normalization
        self.minus_point_5 = minus_point_5
        self.normalizer = transforms.Normalize(mean=img_mean, std=img_std)
        self.no_image = no_image

        # log
        # logger.print("Transform parameters: ")
        # logger.print("pre_runtime_transformer:", self.pre_runtime_transformer)
        # logger.print("runtime_transformer:", self.runtime_transformer)
        # logger.print("minus_point_5:", self.minus_point_5)
        # logger.print("normalizer:", self.normalizer)

        self.subseqs = subseqs
        self.load_image_func = lambda p: self.pre_runtime_transformer(
            Image.open(p))

        if par.cache_image:
            total_images = self.subseqs[0].length * len(subseqs)
            counter = 0
            start_t = time.time()
            for subseq in self.subseqs:
                for path in subseq.image_paths:
                    if path not in SubseqDataset.__cache:
                        SubseqDataset.__cache[path] = self.load_image_func(
                            path)
                    counter += 1
                    print(
                        "Processed %d/%d (%.2f%%)" %
                        (counter, total_images, counter / total_images * 100),
                        end="\r")
            logger.print("Image preprocessing took %.2fs" %
                         (time.time() - start_t))

        # since IMU data might have different length, find the longest length of IMU data so we can pad the
        # the rest with zeros
        imu_data_lengths = []
        for s in self.subseqs:
            imu_data_lengths += [len(t) for t in s.imu_timestamps]
        self.max_imu_data_length = max(imu_data_lengths)
예제 #23
0
    def imu_predict_kitti(self):
        output_dir = os.path.join(par.results_coll_dir, "imu_predict_kitti")
        seqs = ["K01", "K04", "K06", "K07", "K08", "K09", "K10"]
        error_calc = KittiErrorCalc(seqs)

        for i in range(0, len(seqs)):
            logger.initialize(os.path.join(output_dir, seqs[i]),
                              use_tensorboard=False)

            timestamps, gt_poses, gt_vels, poses, states, covars, precomp_covars = \
                self.predict_test_case([seqs[i]], None, "cpu", False)

            poses_np = np.linalg.inv(poses[0].numpy().astype(np.float64))
            np.save(
                logger.ensure_file_dir_exists(
                    os.path.join(output_dir, seqs[i], "est.npy")), poses_np)

            # plot_ekf_data(os.path.join(output_dir, seqs[i]),
            #               timestamps[0], gt_poses[0], gt_vels[0], poses[0], states[0])

            logger.print("Processed seq %s" % seqs[i])
            err = kitti_eval_pyimpl.calc_kitti_seq_errors(
                gt_poses[0],
                np.linalg.inv(poses[0].numpy().astype(np.float64)))
            err = err[0]
            err = np.array(err)
            logger.print("Errors trans & rot")
            logger.print("%.6f" % np.average(err[:, 0]))
            logger.print("%.6f" % (np.average(err[:, 1]) * 180 / np.pi))

            error_calc.accumulate_error(seqs[i], poses_np)

        logger.print("Ave Trans Errors: ")
        logger.print(np.average(np.array(error_calc.errors)[:, 0]))
        logger.print("Ave Rot Errors: ")
        logger.print(np.average(np.array(error_calc.errors)[:, 1]))
예제 #24
0
def kitti_eval_simple(working_dir, seqs):
    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ Evaluate KITTI SIMPLE ================")
    logger.print("Working on directory:", working_dir)

    pose_est_dir = os.path.join(working_dir, "est_poses")
    pose_gt_dir = os.path.join(working_dir, "gt_poses")
    assert sorted(os.listdir(pose_est_dir)) == sorted(os.listdir(pose_gt_dir))
    pose_est_files = sorted(os.listdir(pose_est_dir))

    if seqs is None:
        seqs = [os.path.splitext(f)[0] for f in pose_est_files]

    errs = []

    for i, seq in enumerate(seqs):
        est_poses = np.load(os.path.join(pose_est_dir, "%s.npy" % seq))
        gt_poses = np.load(os.path.join(pose_gt_dir, "%s.npy" % seq))
        err = kitti_eval_pyimpl.calc_kitti_seq_errors(gt_poses, est_poses)
        errs += err
        err = np.array(err)

        if err.any():
            logger.print("Seq %s trans & rot error:" % seq)
            logger.print(
                "%.6f   %.6f\n" %
                (np.average(err[:, 0]), np.average(err[:, 1]) * 180 / np.pi))
        else:
            logger.print("Error cannot be computed for seq %s\n" % seq)

    errs = np.array(errs)
    logger.print("Ave trans & rot error:")
    logger.print(
        "%.6f   %.6f\n" %
        (np.average(errs[:, 0]), np.average(errs[:, 1]) * 180 / np.pi))
예제 #25
0
def kitti_eval(working_dir,
               train_sequences,
               val_sequences,
               min_num_frames=200):
    logger.initialize(working_dir=working_dir, use_tensorboard=False)
    logger.print("================ Evaluate KITTI ================")
    logger.print("Working on directory:", working_dir)

    executable = os.path.join(par.project_dir, "eval", "kitti_eval", "cpp",
                              "evaluate_odometry")
    kitti_dir = os.path.join(working_dir, "kitti")

    available_seqs = []
    for f in sorted(os.listdir(kitti_dir)):
        # must have at least 200 entries for evaluation
        if f.endswith(".txt") and sum(
                1
                for line in open(os.path.join(kitti_dir, f))) > min_num_frames:
            available_seqs.append(
                f.replace("_est.txt", "").replace("_gt.txt", ""))

    assert (len(available_seqs) // 2 == len(set(available_seqs)))
    available_seqs = list(set(available_seqs))

    print(
        "Poses not available for these sequences:", ", ".join(
            list(set(available_seqs) ^ set(train_sequences + val_sequences))))

    if train_sequences:
        logger.print("Evaluating training sequences...")
        logger.print(
            "Training sequences: ",
            ", ".join(list(set(available_seqs) & set(train_sequences))))
        cmd = [
            executable, kitti_dir,
            Logger.make_dir_if_not_exist(os.path.join(kitti_dir, "train"))
        ] + list(set(available_seqs) & set(train_sequences))
        for line in execute(cmd):
            logger.print(line.strip())

    if val_sequences:
        logger.print("Evaluating validation sequences...")
        logger.print("Validation sequences:",
                     " ,".join(list(set(available_seqs) & set(val_sequences))))
        cmd = [
            executable, kitti_dir,
            Logger.make_dir_if_not_exist(os.path.join(kitti_dir, "valid"))
        ] + list(set(available_seqs) & set(val_sequences))
        execute(cmd)
        for line in execute(cmd):
            logger.print(line.strip())

    logger.print("Deleting useless files...")
    selected_files = list(glob.iglob(os.path.join(kitti_dir, "train", "**"), recursive=True)) + \
                     list(glob.iglob(os.path.join(kitti_dir, "valid", "**"), recursive=True))
    for filename in selected_files:
        if filename.endswith(".tex") or filename.endswith(".eps") or \
                filename.endswith(".pdf") or filename.endswith(".gp"):
            os.remove(filename)

    logger.print("Finished running KITTI evaluation!")

    # print the errors
    logger.print("Training errors are:")
    if list(set(available_seqs) & set(train_sequences)):
        train_errors, ave_train_errors = compute_error_for_each_seq(
            os.path.join(kitti_dir, "train"))
        print_error_table(train_errors, ave_train_errors)
    else:
        logger.print("No training errors data")

    logger.print("Validation errors are:")
    if list(set(available_seqs) & set(val_sequences)):
        val_errors, ave_val_errors = compute_error_for_each_seq(
            os.path.join(kitti_dir, "valid"))
        print_error_table(val_errors, ave_val_errors)
    else:
        logger.print("No validation errors data")
예제 #26
0
def get_subseqs(sequences, seq_len, overlap, sample_times, training):
    subseq_list = []

    for seq in sequences:
        start_t = time.time()
        seq_data = SequenceData(seq)
        frames = seq_data.as_frames()

        if sample_times > 1:
            sample_interval = int(np.ceil(seq_len / sample_times))
            start_frames = list(range(0, seq_len, sample_interval))
            logger.print('Sample start from frame {}'.format(start_frames))
        else:
            start_frames = [0]

        for st in start_frames:
            jump = seq_len - overlap
            subseqs_buffer = []
            # The original image and data
            sub_seqs_vanilla = []
            for i in range(st, len(frames), jump):
                if i + seq_len <= len(
                        frames):  # this will discard a few frames at the end
                    subseq = Subsequence(frames[i:i + seq_len],
                                         seq_data.g_i,
                                         seq_data.bw_0,
                                         seq_data.ba_0,
                                         seq_data.T_cam_imu,
                                         length=seq_len,
                                         seq=seq,
                                         type="vanilla",
                                         idx=i,
                                         idx_next=i + jump)
                    sub_seqs_vanilla.append(subseq)
            subseqs_buffer += sub_seqs_vanilla

            if training and par.data_aug_transforms.enable:
                # assert not par.enable_ekf, "Data aug transforms not compatible with EKF"
                if par.data_aug_transforms.lr_flip:
                    subseq_flipped_buffer = []
                    H = np.diag(
                        [1, -1,
                         1])  # reflection matrix, flip y, across the xz plane
                    for subseq in sub_seqs_vanilla:
                        subseq_flipped = copy.deepcopy(subseq)
                        subseq_flipped.gt_poses = \
                            np.array([se3.T_from_Ct(H.dot(T[0:3, 0:3].dot(H.transpose())), H.dot(T[0:3, 3]))
                                      for T in subseq.gt_poses])
                        subseq_flipped.type = subseq.type + "_flippedlr"
                        subseq_flipped_buffer.append(subseq_flipped)
                    subseqs_buffer += subseq_flipped_buffer

                if par.data_aug_transforms.ud_flip:
                    assert par.dataset(
                    ) == "EUROC", "up down flips only supported for EUROC"
                    subseq_flipped_buffer = []
                    H = np.diag(
                        [-1, 1, 1]
                    )  # reflection matrix, flip x, across the yz plane, only for EUROC
                    for subseq in sub_seqs_vanilla:
                        subseq_flipped = copy.deepcopy(subseq)
                        subseq_flipped.gt_poses = \
                            np.array([se3.T_from_Ct(H.dot(T[0:3, 0:3].dot(H.transpose())), H.dot(T[0:3, 3]))
                                      for T in subseq.gt_poses])
                        subseq_flipped.type = subseq.type + "_flippedud"
                        subseq_flipped_buffer.append(subseq_flipped)
                    subseqs_buffer += subseq_flipped_buffer

                if par.data_aug_transforms.lrud_flip:
                    assert par.dataset(
                    ) == "EUROC", "left right up down flips only supported for EUROC"
                    subseq_flipped_buffer = []
                    H = np.diag(
                        [-1, -1, 1]
                    )  # reflection matrix, flip x, across the yz plane, only for EUROC
                    for subseq in sub_seqs_vanilla:
                        subseq_flipped = copy.deepcopy(subseq)
                        subseq_flipped.gt_poses = \
                            np.array([se3.T_from_Ct(H.dot(T[0:3, 0:3].dot(H.transpose())), H.dot(T[0:3, 3]))
                                      for T in subseq.gt_poses])
                        subseq_flipped.type = subseq.type + "_flippedlrud"
                        subseq_flipped_buffer.append(subseq_flipped)
                    subseqs_buffer += subseq_flipped_buffer

                # Reverse, effectively doubles the number of examples
                if par.data_aug_transforms.reverse:
                    subseqs_rev_buffer = []
                    for subseq in subseqs_buffer:
                        subseq_rev = copy.deepcopy(subseq)
                        subseq_rev.image_paths = list(
                            reversed(subseq.image_paths))
                        subseq_rev.gt_poses = np.flip(subseq.gt_poses,
                                                      axis=0).copy()
                        subseq_rev.type = subseq.type + "_reversed"
                        subseq_rev.id = subseq.id_next
                        subseq_rev.id_next = subseq.id
                        subseqs_rev_buffer.append(subseq_rev)
                    subseqs_buffer += subseqs_rev_buffer

            # collect the sub-sequences
            subseq_list += subseqs_buffer

        logger.print('Folder %s finish in %.2g sec' %
                     (seq, time.time() - start_t))

    return subseq_list
예제 #27
0
def interpolate_SO3(C1, C2, alpha):
    C_interp = scipy.linalg.fractional_matrix_power(C2.dot(C1.transpose()), alpha).dot(C1)
    if np.linalg.norm(np.imag(C_interp)) > 1e-10:
        logger.print("Bad SO(3) interp:")
        logger.print(C_interp)
    return np.real(C_interp)
예제 #28
0
def train(resume_model_path, resume_optimizer_path, train_description):
    logger.initialize(working_dir=par.results_dir, use_tensorboard=True)
    logger.print("================ TRAIN ================")

    if not train_description:
        train_description = input("Enter a description of this training run: ")
    logger.print("Train description: ", train_description)
    logger.tensorboard.add_text("description", train_description)

    logger.log_parameters()
    logger.log_source_files()

    # Prepare Data
    train_subseqs = get_subseqs(par.train_seqs,
                                par.seq_len,
                                overlap=1,
                                sample_times=par.sample_times,
                                training=True)
    convert_subseqs_list_to_panda(train_subseqs).to_pickle(
        os.path.join(par.results_dir, "train_df.pickle"))
    train_dataset = SubseqDataset(train_subseqs, (par.img_h, par.img_w),
                                  par.img_means, par.img_stds,
                                  par.minus_point_5)
    train_dl = DataLoader(train_dataset,
                          batch_size=par.batch_size,
                          shuffle=True,
                          num_workers=par.n_processors,
                          pin_memory=par.pin_mem,
                          drop_last=False)
    logger.print('Number of samples in training dataset: %d' %
                 len(train_subseqs))

    valid_subseqs = get_subseqs(par.valid_seqs,
                                par.seq_len,
                                overlap=1,
                                sample_times=1,
                                training=False)
    convert_subseqs_list_to_panda(valid_subseqs).to_pickle(
        os.path.join(par.results_dir, "valid_df.pickle"))
    valid_dataset = SubseqDataset(valid_subseqs, (par.img_h, par.img_w),
                                  par.img_means,
                                  par.img_stds,
                                  par.minus_point_5,
                                  training=False)
    valid_dl = DataLoader(valid_dataset,
                          batch_size=par.batch_size,
                          shuffle=False,
                          num_workers=par.n_processors,
                          pin_memory=par.pin_mem,
                          drop_last=False)
    logger.print('Number of samples in validation dataset: %d' %
                 len(valid_subseqs))

    # Model
    e2e_vio_model = E2EVIO()
    e2e_vio_model = e2e_vio_model.cuda()
    online_evaluator = _OnlineDatasetEvaluator(e2e_vio_model, par.valid_seqs,
                                               50)

    # Load FlowNet weights pretrained with FlyingChairs
    # NOTE: the pretrained model assumes image rgb values in range [-0.5, 0.5]
    if par.pretrained_flownet and not resume_model_path:
        pretrained_w = torch.load(par.pretrained_flownet)
        logger.print('Load FlowNet pretrained model')
        # Use only conv-layer-part of FlowNet as CNN for DeepVO
        vo_model_dict = e2e_vio_model.vo_module.state_dict()
        update_dict = {
            k: v
            for k, v in pretrained_w['state_dict'].items()
            if k in vo_model_dict
        }
        assert (len(update_dict) > 0)
        vo_model_dict.update(update_dict)
        e2e_vio_model.vo_module.load_state_dict(vo_model_dict)

    # Create optimizer
    logger.print("Optimizing on parameters:")
    optimizer_params = []
    for p_name, p in e2e_vio_model.named_parameters():
        specific_lr_found = False
        for k in par.param_specific_lr:
            regex = re.compile(k)
            if regex.match(p_name):
                optimizer_params.append({
                    "params": p,
                    "lr": par.param_specific_lr[k]
                })
                logger.print("%s, lr:%f" % (p_name, par.param_specific_lr[k]))
                specific_lr_found = True

        if not specific_lr_found:
            optimizer_params.append({"params": p})
            logger.print(p_name)
    assert (len(optimizer_params) == sum(1
                                         for _ in e2e_vio_model.parameters()))

    optimizer = par.optimizer(optimizer_params, **par.optimizer_args)

    # Load trained DeepVO model and optimizer
    if resume_model_path:
        state_dict_update = logger.clean_state_dict_key(
            torch.load(resume_model_path))
        state_dict_update = {
            key: state_dict_update[key]
            for key in state_dict_update
            if key not in par.exclude_resume_weights
        }
        state_dict = e2e_vio_model.state_dict()
        state_dict.update(state_dict_update)
        e2e_vio_model.load_state_dict(state_dict)
        logger.print('Load model from: %s' % resume_model_path)
        if resume_optimizer_path:
            optimizer.load_state_dict(torch.load(resume_optimizer_path))
            logger.print('Load optimizer from: %s' % resume_optimizer_path)

    # if to use more than one GPU
    if par.n_gpu > 1:
        assert (torch.cuda.device_count() == par.n_gpu)
        e2e_vio_model = torch.nn.DataParallel(e2e_vio_model)

    e2e_vio_ta = _TrainAssistant(e2e_vio_model)

    # Train
    min_loss_t = 1e10
    min_loss_v = 1e10
    min_err_eval = 1e10
    for epoch in range(par.epochs):
        e2e_vio_ta.epoch = epoch
        st_t = time.time()
        logger.print('=' * 50)
        # Train
        e2e_vio_model.train()
        loss_mean = 0
        t_loss_list = []
        count = 0
        for data in train_dl:
            print("%d/%d (%.2f%%)" %
                  (count, len(train_dl), 100 * count / len(train_dl)),
                  end='\r')
            ls = e2e_vio_ta.step(data, optimizer).data.cpu().numpy()
            t_loss_list.append(float(ls))
            loss_mean += float(ls)
            count += 1
        logger.print('Train take {:.1f} sec'.format(time.time() - st_t))
        loss_mean /= len(train_dl)
        logger.tensorboard.add_scalar("epoch/train_loss", loss_mean, epoch)

        # Validation
        st_t = time.time()
        e2e_vio_model.eval()
        loss_mean_valid = 0
        v_loss_list = []
        for data in valid_dl:
            v_ls = e2e_vio_ta.get_loss(data).data.cpu().numpy()
            v_loss_list.append(float(v_ls))
            loss_mean_valid += float(v_ls)
        logger.print('Valid take {:.1f} sec'.format(time.time() - st_t))
        loss_mean_valid /= len(valid_dl)
        logger.tensorboard.add_scalar("epoch/val_loss", loss_mean_valid, epoch)

        logger.print(
            'Epoch {}\ntrain loss mean: {}, std: {}\nvalid loss mean: {}, std: {}\n'
            .format(epoch + 1, loss_mean, np.std(t_loss_list), loss_mean_valid,
                    np.std(v_loss_list)))

        err_eval = online_evaluator.evaluate()
        logger.tensorboard.add_scalar("epoch/eval_loss", err_eval, epoch)

        # Save model
        if (epoch + 1) % 5 == 0:
            logger.log_training_state("checkpoint", epoch + 1,
                                      e2e_vio_model.state_dict(),
                                      optimizer.state_dict())
        if loss_mean_valid < min_loss_v:
            min_loss_v = loss_mean_valid
            logger.log_training_state("valid", epoch + 1,
                                      e2e_vio_model.state_dict())
        if loss_mean < min_loss_t:
            min_loss_t = loss_mean
            logger.log_training_state("train", epoch + 1,
                                      e2e_vio_model.state_dict())
        if err_eval < min_err_eval:
            min_err_eval = err_eval
            logger.log_training_state("eval", epoch + 1,
                                      e2e_vio_model.state_dict())

        logger.print(
            "Latest saves:", " ".join([
                "%s: %s" % (k, v)
                for k, v in logger.log_training_state_latest_epoch.items()
            ]))
예제 #29
0
arg_parsed = arg_parser.parse_args()
gpu_ids = arg_parsed.gpu_id
resume_model_path = os.path.abspath(
    arg_parsed.resume_model_from) if arg_parsed.resume_model_from else None
resume_optimizer_path = os.path.abspath(
    arg_parsed.resume_optimizer_from
) if arg_parsed.resume_optimizer_from else None

results_dir = os.path.abspath(
    os.path.dirname(__file__)) if arg_parsed.run_eval_only else par.results_dir
logger.initialize(working_dir=results_dir, use_tensorboard=True)

# set the visible GPUs
if gpu_ids:
    os.environ["CUDA_VISIBLE_DEVICES"] = ", ".join([str(i) for i in gpu_ids])
    logger.print("CUDA_VISIVLE_DEVICES: %s" %
                 os.environ["CUDA_VISIBLE_DEVICES"])

if not arg_parsed.run_eval_only:
    start_t = time.time()
    trainer.train(resume_model_path, resume_optimizer_path,
                  arg_parsed.description)
    logger.print("Training took %.2fs" % (time.time() - start_t))

for tag in ["valid", "train", "checkpoint", "eval"]:
    seq_results_dir = gen_trajectory(
        os.path.join(results_dir, "saved_model.%s" % tag),
        par.valid_seqs + par.train_seqs, 2, True)
    plot_trajectory(seq_results_dir)
    calc_error(seq_results_dir)
    plot_errors(seq_results_dir)