コード例 #1
0
 def load_vio(self, dataset, args):
     """
     load ts, p, q, v from vio states, load ba and bg from calibration states
     """
     logging.info("loading vio states from " +
                  osp.join(args.root_dir, dataset, "evolving_state.txt"))
     vio_states = np.loadtxt(osp.join(args.root_dir, dataset,
                                      "evolving_state.txt"),
                             delimiter=",")
     vio_calibs = np.loadtxt(osp.join(args.root_dir, dataset,
                                      "calib_state.txt"),
                             delimiter=",")
     self.vio_ts = vio_states[:, 0] * 1e-6
     self.vio_p = vio_states[:, 5:8]
     self.vio_v = vio_states[:, 8:11]
     self.vio_rq = vio_states[:, 1:5]
     vio_r = Rotation.from_quat(
         np.concatenate([
             self.vio_rq[:, 1:4],
             np.expand_dims(self.vio_rq[:, 0], axis=1)
         ],
                        axis=1))
     self.vio_eul = vio_r.as_euler("xyz", degrees=True)
     self.vio_R = vio_r.as_matrix()
     self.vio_calib_ts = vio_calibs[:, 0] * 1e-6
     self.vio_ba = vio_calibs[:, 28:31]
     self.vio_bg = vio_calibs[:, 31:34]
     self.vio_accelScaleInv = vio_calibs[:, 1:10].reshape((-1, 3, 3))
     self.vio_gyroScaleInv = vio_calibs[:, 10:19].reshape((-1, 3, 3))
     self.vio_gyroGSense = vio_calibs[:, 19:28].reshape((-1, 3, 3))
コード例 #2
0
ファイル: plot_filter_state.py プロジェクト: ori-drs/TLIO
def run_on_each_dataset_and_gather_metrics(args, data_names):
    all_metrics = {}
    # retrieve metric from old, logs, they would be erased if necessary
    # I use the presence of .svg or .png as the flag for something necessary
    if osp.exists(args.log_dir + "/metrics.json"):
        with open(args.log_dir + "/metrics.json", "r") as f:
            logging.info(f"Loading old metric file at {args.log_dir + '/metrics.json'}")
            all_metrics = json.load(f)

    for dataset in progressbar.progressbar(data_names, redirect_stdout=True):
        logging.info(f"Plotting dataset {dataset}")
        try:
            results_folder = os.path.join(args.log_dir, dataset)
            # if osp.exists(osp.join(results_folder, "position-3d.png")):
            #     logging.info(f"Skipping {dataset} because alraedy processed")
            #     continue
            metric_map = run(args, dataset)
            all_metrics[dataset] = metric_map
            with open(args.log_dir + "/metrics.json", "w") as f:
                json.dump(all_metrics, f, indent=1)
        except ValueError as e:
            raise e
        except OSError as e:
            print(e)
            continue
        except Exception as e:
            raise e
コード例 #3
0
    def do_synthesis(self, input_text):
        input_text = self.tts_pause.add_pause(input_text)
        print("input_text>>>>", input_text)
        logging.info(
            "[TTSModel] [do_synthesis] input_text:{}".format(input_text))
        input_ids = self.processor.text_to_sequence(input_text, inference=True)
        # nput_ids = np.concatenate([input_ids, [219 - 1]], -1)
        self.interpreter.resize_tensor_input(self.input_details[0]['index'],
                                             [1, len(input_ids)])

        self.interpreter.allocate_tensors()
        input_data = self.prepare_input(input_ids)
        for i, detail in enumerate(self.input_details):
            input_shape = detail['shape']
            self.interpreter.set_tensor(detail['index'], input_data[i])
        # self.interpreter.invoke()
        decoder_output_tflite, mel_outputs = self.interpreter.get_tensor(
            self.output_details[0]['index']), interpreter.get_tensor(
                self.output_details[1]['index'])

        remove_end = 1024
        audio = self.mb_melgan.inference(mel_outputs)[0, :-remove_end, 0]

        return mel_outputs.numpy(), decoder_output_tflite.numpy(), audio.numpy(
        )
コード例 #4
0
    def __init__(self, model_path, force_cpu=False):
        # load trained network model
        if not torch.cuda.is_available() or force_cpu:
            self.device = torch.device("cpu")
            self.net = torch.jit.load(model_path, map_location="cpu")
        else:
            self.device = torch.device("cuda:0")
            self.net = torch.jit.load(model_path)

        self.net.to(self.device)
        logging.info("Model {} loaded to device {}.".format(
            model_path, self.device))
コード例 #5
0
    def load_sim_data(self, args):
        """
        This loads simulation data from an imu.csv file containing
        perfect imu data.
        """
        logging.info("loading simulation data from " + args.sim_data_path)
        sim_data = np.loadtxt(
            args.sim_data_path,
            delimiter=",",
            usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
        )
        ts_all = sim_data[:, 0]
        vio_p = sim_data[:, 1:4]
        vio_rq = sim_data[:, 4:8]
        acc_all = sim_data[:, 8:11]
        vio_v = sim_data[:, 11:14]
        gyr_all = sim_data[:, 14:17]

        # add sim noise and bias
        if args.add_sim_imu_noise:
            wa = args.sim_sigma_na * np.random.normal(0, 1, acc_all.shape)
            wg = args.sim_sigma_ng * np.random.normal(0, 1, gyr_all.shape)
            acc_all = acc_all + wa
            gyr_all = gyr_all + wg

            sim_ba = np.array([0.3, -0.2, 0.4])
            sim_bg = np.array([0.0005, 0.002, -0.001])
            acc_all = acc_all + sim_ba
            gyr_all = gyr_all + sim_bg

        if args.start_from_ts is not None:
            idx_start = np.where(ts_all >= args.start_from_ts * 1e-6)[0][0]
        else:
            idx_start = 50

        self.ts_all = ts_all[idx_start:] * 1e6
        self.acc_all = 0.5 * (acc_all[idx_start:, :] +
                              acc_all[idx_start - 1:-1, :])
        self.gyr_all = 0.5 * (gyr_all[idx_start:, :] +
                              gyr_all[idx_start - 1:-1, :])
        # self.acc_all = acc_all[idx_start:,:]
        # self.gyr_all = gyr_all[idx_start:,:]
        self.vio_ts = ts_all[idx_start - 1:]
        self.vio_p = vio_p[idx_start - 1:, :]
        self.vio_v = vio_v[idx_start - 1:, :]
        self.vio_rq = vio_rq[idx_start - 1:, :]

        vio_r = Rotation.from_quat(self.vio_rq)
        self.vio_eul = vio_r.as_euler("xyz", degrees=True)
        self.vio_R = vio_r.as_matrix()

        self.dataset_size = self.ts_all.shape[0]
        self.init_ts = self.ts_all[0]
コード例 #6
0
ファイル: train.py プロジェクト: zeta1999/TLIO
def save_model(args, epoch, network, optimizer, interrupt=False):
    if interrupt:
        model_path = osp.join(args.out_dir, "checkpoints", "checkpoint_latest.pt")
    else:
        model_path = osp.join(args.out_dir, "checkpoints", "checkpoint_%d.pt" % epoch)
    state_dict = {
        "model_state_dict": network.state_dict(),
        "epoch": epoch,
        "optimizer_state_dict": optimizer.state_dict(),
        "args": vars(args),
    }
    torch.save(state_dict, model_path)
    logging.info(f"Model saved to {model_path}")
コード例 #7
0
ファイル: plot_filter_state.py プロジェクト: ori-drs/TLIO
def compare_biases(args):
    with open(args.data_list) as f:
        data_names = [
            s.strip().split("," or " ")[0]
            for s in f.readlines()
            if len(s) > 0 and s[0] != "#"
        ]
    # get headset name
    headset_names = set()
    for dataset in data_names:
        headset_name = dataset.split("_")[1].strip("hidacori")
        headset_names.add(headset_name)
    logging.info(f"Got {len(headset_names)} different hw")
    # assign color to each
    headset_names_colors = {}
    for i, hw in enumerate(headset_names):
        headset_names_colors[hw] = "C" + str(i)

    for dataset in data_names:
        headset_name = dataset.split("_")[1].strip("hidacori")
        results_folder = os.path.join(args.log_dir, dataset)
        states = np.load(os.path.join(results_folder, args.log_filename + ".npy"))
        ts = states[:, 27]
        ts = ts - ts[0]
        ba = states[:, 15:18]
        bg = states[:, 18:21]
        sigma_bg = np.sqrt(states[:, 37:40])
        sigma_ba = np.sqrt(states[:, 40:43])

        plot_state_euclidean(
            "accel bias",
            dataset,
            ["ba_x", "ba_y", "ba_z"],
            ts,
            ba,
            sigma_ba,
            color=headset_names_colors[headset_name],
        )
        plt.legend([])
        plot_state_euclidean(
            "gyro bias",
            dataset,
            ["bg_x", "bg_y", "bg_z"],
            ts,
            np.rad2deg(bg) * 3600,
            np.rad2deg(sigma_bg) * 3600,
            color=headset_names_colors[headset_name],
        )
        plt.legend([])
    plt.show()
コード例 #8
0
    def post(self):

        if not request.json or 'content' not in request.json :
            res = { "code": "400", "data": {}, "message": "request is not json or content not in json" }
            return jsonify ( res )

        else:
            logging.info( "[QDNetInference] [post] request.json:{}".format( request.json ) )
            url = request.json["content"]
            logging.info( "[QDNetInference] [post] url:{}".format( url ) )
            data = url2imgcv2(url)
            pre = qdnet_model.predict(data)
            res = { "code": "200", "data": pre, "message": "" }
            return jsonify ( res )
コード例 #9
0
 def text_to_pinyin_sequence(self, text):
     # pinyin = self.processor.pinyin_parser(text, style=Style.TONE3, errors="ignore")
     pinyin, text = self.tts_py.get_pyin(text)
     new_pinyin = []
     for x in str(pinyin).split(" "):
         if "#" not in x:
             new_pinyin.append(x)
     phonemes = self.processor.get_phoneme_from_char_and_pinyin(
         text, new_pinyin)
     text = " ".join(phonemes)
     print("phoneme seq: {}".format(text))
     logging.info(
         "[TTSModel] [text_to_pinyin_sequence] phoneme seq:{}".format(text))
     input_ids = self.processor.text_to_sequence(text, inference=False)
     return input_ids
コード例 #10
0
    def do_synthesis(self, input_text):
        input_text = self.tts_pause.add_pause(input_text)
        print("input_text>>>>", input_text)
        logging.info(
            "[TTSModel] [do_synthesis] input_text:{}".format(input_text))
        input_ids = self.processor.text_to_sequence(input_text, inference=True)

        _, mel_outputs, stop_token_prediction, alignment_history = self.tacotron2.inference(
            tf.expand_dims(tf.convert_to_tensor(input_ids, dtype=tf.int32), 0),
            tf.convert_to_tensor([len(input_ids)], tf.int32),
            tf.convert_to_tensor([0], dtype=tf.int32))

        remove_end = 1024
        audio = self.mb_melgan.inference(mel_outputs)[0, :-remove_end, 0]

        return mel_outputs.numpy(), alignment_history.numpy(), audio.numpy()
コード例 #11
0
ファイル: meas_source_network.py プロジェクト: ori-drs/TLIO
    def __init__(self, model_path, arch, net_config, force_cpu=False):
        # network
        self.net = get_model(arch, net_config, 6, 3)

        # load trained network model
        if not torch.cuda.is_available() or force_cpu:
            self.device = torch.device("cpu")
            checkpoint = torch.load(
                model_path, map_location=lambda storage, location: storage)
        else:
            self.device = torch.device("cuda:0")
            checkpoint = torch.load(model_path)

        self.net.load_state_dict(checkpoint["model_state_dict"])
        self.net.eval().to(self.device)
        logging.info("Model {} loaded to device {}.".format(
            model_path, self.device))
コード例 #12
0
    def post(self):

        if not request.json or 'content' not in request.json:
            res = {
                "code": "400",
                "data": {},
                "message": "request is not json or content not in json"
            }
            return jsonify(res)

        else:
            logging.info("[TTSInference] [post] request.json:{}".format(
                request.json))
            text = request.json["content"]
            logging.info("[TTSInference] [post] text:{}".format(text))
            mels, alignment_history, audios = tts_model.do_synthesis(text)
            res = {"code": "200", "data": audios.tolist(), "message": ""}
            return jsonify(res)
コード例 #13
0
ファイル: train.py プロジェクト: zeta1999/TLIO
def write_summary(summary_writer, attr_dict, epoch, optimizer, mode):
    """ Given the attr_dict write summary and log the losses """

    mse_loss = np.mean((attr_dict["targets"] - attr_dict["preds"]) ** 2, axis=0)
    ml_loss = np.average(attr_dict["losses"])
    sigmas = np.exp(attr_dict["preds_cov"])
    summary_writer.add_scalar(f"{mode}_loss/loss_x", mse_loss[0], epoch)
    summary_writer.add_scalar(f"{mode}_loss/loss_y", mse_loss[1], epoch)
    summary_writer.add_scalar(f"{mode}_loss/loss_z", mse_loss[2], epoch)
    summary_writer.add_scalar(f"{mode}_loss/avg", np.mean(mse_loss), epoch)
    summary_writer.add_scalar(f"{mode}_dist/loss_full", ml_loss, epoch)
    summary_writer.add_histogram(f"{mode}_hist/sigma_x", sigmas[:, 0], epoch)
    summary_writer.add_histogram(f"{mode}_hist/sigma_y", sigmas[:, 1], epoch)
    summary_writer.add_histogram(f"{mode}_hist/sigma_z", sigmas[:, 2], epoch)
    if epoch > 0:
        summary_writer.add_scalar(
            "optimizer/lr", optimizer.param_groups[0]["lr"], epoch - 1
        )
    logging.info(
        f"{mode}: average ml loss: {ml_loss}, average mse loss: {mse_loss}/{np.mean(mse_loss)}"
    )
コード例 #14
0
ファイル: imu_tracker.py プロジェクト: zeta1999/TLIO
    def on_imu_measurement(self, t_us, gyr_raw, acc_raw):
        assert isinstance(t_us, int)
        if self.filter.initialized:
            return self._on_imu_measurement_after_init(t_us, gyr_raw, acc_raw)
        else:
            logging.info(f"Initializing filter at time {t_us*1e-6}")
            if self.icalib:
                logging.info(f"Using bias from initial calibration")
                init_ba = self.icalib.accelBias
                init_bg = self.icalib.gyroBias
                # calibrate raw imu data
                acc_biascpst, gyr_biascpst = self.icalib.calibrate_raw(
                    acc_raw, gyr_raw)  # removed offline bias and scaled
            else:
                logging.info(f"Using zero bias")
                init_ba = np.zeros((3, 1))
                init_bg = np.zeros((3, 1))
                acc_biascpst, gyr_biascpst = acc_raw, gyr_raw

            self.filter.initialize(acc_biascpst, t_us, init_ba, init_bg)
            self.next_interp_t_us = t_us
            self.next_aug_t_us = t_us
            self._add_interpolated_imu_to_buffer(acc_biascpst, gyr_biascpst,
                                                 t_us)
            self.next_aug_t_us = t_us + self.dt_update_us
            self.last_t_us, self.last_acc, self.last_gyr = (
                t_us,
                acc_biascpst,
                gyr_biascpst,
            )
            return False
コード例 #15
0
def arg_conversion(args):
    """ Conversions from time arguments to data size """

    if not (args.past_time * args.imu_freq).is_integer():
        raise ValueError(
            "past_time cannot be represented by integer number of IMU data.")
    if not (args.window_time * args.imu_freq).is_integer():
        raise ValueError(
            "window_time cannot be represented by integer number of IMU data.")
    if not (args.future_time * args.imu_freq).is_integer():
        raise ValueError(
            "future_time cannot be represented by integer number of IMU data.")
    if not (args.imu_freq / args.sample_freq).is_integer():
        raise ValueError("sample_freq must be divisible by imu_freq.")

    data_window_config = dict([
        ("past_data_size", int(args.past_time * args.imu_freq)),
        ("window_size", int(args.window_time * args.imu_freq)),
        ("future_data_size", int(args.future_time * args.imu_freq)),
        ("step_size", int(args.imu_freq / args.sample_freq)),
    ])
    net_config = {
        "in_dim": (data_window_config["past_data_size"] +
                   data_window_config["window_size"] +
                   data_window_config["future_data_size"]) // 32 + 1
    }

    # Display
    np.set_printoptions(formatter={"all": "{:.6f}".format})
    logging.info(f"Training/testing with {args.imu_freq} Hz IMU data")
    logging.info("Size: " + str(data_window_config["past_data_size"]) + "+" +
                 str(data_window_config["window_size"]) + "+" +
                 str(data_window_config["future_data_size"]) + ", " +
                 "Time: " + str(args.past_time) + "+" + str(args.window_time) +
                 "+" + str(args.future_time))
    logging.info("Perturb on bias: %s" % args.do_bias_shift)
    logging.info("Perturb on gravity: %s" % args.perturb_gravity)
    logging.info("Sample frequency: %s" % args.sample_freq)
    return data_window_config, net_config
コード例 #16
0
def pose_integrate(args, dataset, preds):
    """
    Concatenate predicted velocity to reconstruct sequence trajectory
    """
    dp_t = args.window_time
    pred_vels = preds / dp_t

    ind = np.array([i[1] for i in dataset.index_map], dtype=np.int)
    delta_int = int(args.window_time * args.imu_freq /
                    2.0)  # velocity as the middle of the segment
    if not (args.window_time * args.imu_freq / 2.0).is_integer():
        logging.info("Trajectory integration point is not centered.")
    ind_intg = ind + delta_int  # the indices of doing integral

    ts = dataset.ts[0]
    dts = np.mean(ts[ind_intg[1:]] - ts[ind_intg[:-1]])
    pos_intg = np.zeros([pred_vels.shape[0] + 1, args.output_dim])
    pos_intg[0] = dataset.gt_pos[0][ind_intg[0], :]
    pos_intg[1:] = np.cumsum(pred_vels[:, :] * dts, axis=0) + pos_intg[0]
    ts_intg = np.append(ts[ind_intg], ts[ind_intg[-1]] + dts)

    ts_in_range = ts[ind_intg[0]:ind_intg[-1]]  # s
    pos_pred = interp1d(ts_intg, pos_intg, axis=0)(ts_in_range)
    pos_gt = dataset.gt_pos[0][ind_intg[0]:ind_intg[-1], :]
    ori_pred = dataset.orientations[0][ind_intg[0]:ind_intg[-1], :]
    ori_gt = dataset.gt_ori[0][ind_intg[0]:ind_intg[-1], :]
    eul_pred = Rotation.from_quat(ori_pred).as_euler("xyz", degrees=True)
    eul_gt = Rotation.from_quat(ori_gt).as_euler("xyz", degrees=True)

    traj_attr_dict = {
        "ts": ts_in_range,
        "pos_pred": pos_pred,
        "pos_gt": pos_gt,
        "eul_pred": eul_pred,
        "eul_gt": eul_gt,
    }

    return traj_attr_dict
コード例 #17
0
def plot_autocorellation(args, log_folder, n, dataset):
    results_folder = log_folder + "/" + dataset
    filename = os.path.join(results_folder, args.log_filename + ".npy")
    try:
        logging.info(f"Loading {filename}")
        states = np.load(filename)
    except:
        logging.error(
            f"{filename}.npy was not found. Surely means filter did not finish"
        )
        raise FileNotFoundError

    # load all states
    meas = states[:, 46:49]
    if os.path.exists(os.path.join(results_folder, "vio_states.npy")):
        vio_states = np.load(os.path.join(results_folder, "vio_states.npy"))
        ref_disp = vio_states[:, 15:18]
    else:
        logging.error(
            "vio_states.npy was not found. you shoud create it with plot_state.py before... sorry :("
        )
        raise FileNotFoundError

    fig = plt.figure("autocorellation " + dataset)
    meas_err = meas - ref_disp
    meas_err_update = meas_err[~np.isnan(meas_err).any(axis=1)]
    logging.warning("We assume update frequency at 20hz for autocorrelation")
    for i in range(3):
        plt.subplot(3, 1, i + 1)
        plt.acorr(meas_err_update[:, i], maxlags=100, lw=2, usevlines=False, label=n)
        locs, labels = plt.xticks()  # Get locations and labels
        for (l, t) in zip(locs, labels):
            t.set_text(str(l / 20.0) + "s")
        plt.xticks(locs, labels)  # Set locations and labels
        plt.xlim(left=0)
        plt.grid()
    plt.legend()
コード例 #18
0
ファイル: plot_filter_state.py プロジェクト: ori-drs/TLIO
 def compute_rpe_ronin(ns_rpe):
     rpe_rmse, rpe_rmse_z, relative_yaw_rmse = compute_rpe(
         ns_rpe, ps_ronin, ps_gt, euls_aekf[:, [2]], euls_gt[:, [2]]
     )
     logging.info(f"RPE RMSE of ronin over 1s: {rpe_rmse}")
     logging.info(f"RPE RMSE Z of ronin over 1s: {rpe_rmse_z}")
     logging.info(f"RPE RMSE Yaw of ronin over 1s: {relative_yaw_rmse}")
     metric_map["ronin"]["rpe_rmse_" + str(ns_rpe)] = rpe_rmse
     metric_map["ronin"]["rpe_rmse_z_" + str(ns_rpe)] = rpe_rmse_z
     metric_map["ronin"]["relative_yaw_rmse_" + str(ns_rpe)] = relative_yaw_rmse
コード例 #19
0
ファイル: plot_filter_state.py プロジェクト: ori-drs/TLIO
 def compute_rpe_filter(ns_rpe):
     rpe_rmse, rpe_rmse_z, relative_yaw_rmse = compute_rpe_distance(
         ns_rpe, ps_filter, ps_gt, euls_filter[:, [2]], euls_gt[:, [2]]
     )
     logging.info(f"RPE RMSE of filter over {1e-3*ns_rpe}s: {rpe_rmse}")
     logging.info(f"RPE RMSE Z of filter over {1e-3*ns_rpe}s: {rpe_rmse_z}")
     logging.info(f"RPE RMSE Yaw of filter over {1e-3*ns_rpe}s: {relative_yaw_rmse}")
     metric_map["filter"]["rpe_rmse_" + str(ns_rpe)] = rpe_rmse
     metric_map["filter"]["rpe_rmse_z_" + str(ns_rpe)] = rpe_rmse_z
     metric_map["filter"]["relative_yaw_rmse_" + str(ns_rpe)] = relative_yaw_rmse
コード例 #20
0
        help="Path to dataset directory",
    )
    io_groups.add_argument("--save_plot", action="store_true")
    io_groups.add_argument("--concatenate", action="store_true")

    args = parser.parse_args()

    all_models = list(Path.cwd().glob(args.model_globbing))

    def save_plot_arg(yes):
        if yes:
            return f"--save_plot"
        else:
            return f"--no-save_plot"

    logging.info(f"Found {len(all_models)} models")
    logging.info(f"Found {all_models}")
    for m in all_models:
        base_folder = Path(m).parent
        logging.info(base_folder)
        name_run = str(Path(m).parents[1].name) + "-" + str(
            Path(m).parents[0].name)
        if not osp.exists(f"./{args.out_dir}/{name_run}/"):
            os.mkdir(f"./{args.out_dir}/{name_run}/")
        # read yaml
        with open(str(base_folder) + "/parameters.json", "r") as f:
            conf = json.load(f)
        print(conf)

        sample_freq = 20.0  # no concatenation
        if args.concatenate:
コード例 #21
0
ファイル: plot_filter_state.py プロジェクト: ori-drs/TLIO
def run(args, dataset):
    plt.close("all")
    if args.dir is not None:
        results_folder = args.dir
    else:
        results_folder = os.path.join(args.log_dir, dataset)

    try:
        print("here wtf")
        print(os.path.join(results_folder, args.log_filename + ".npy"))
        states = np.load(os.path.join(results_folder, args.log_filename + ".npy"))
        save_vio_states = True  # traj is done
    except:
        logging.warning(
            "Relying on .txt file because .npy was not found. Surely means filter did not finish"
        )
        states = np.loadtxt(
            os.path.join(results_folder, args.log_filename), delimiter=","
        )  # traj is still processing
        save_vio_states = False  # traj is done
    
    R_init = states[0, :9].reshape(-1, 3, 3)
    r_init = Rotation.from_matrix(R_init)
    Rs = states[:, :9].reshape(-1, 3, 3)
    rs = Rotation.from_matrix(Rs)
    euls = rs.as_euler("xyz", degrees=True)
    vs = states[:, 9:12]
    ps = states[:, 12:15]
    ps_dr = np.cumsum(
        states[:, 9:12] * np.diff(states[:, 27:28], prepend=states[0, 27], axis=0),
        axis=0,
    )
    ba = states[:, 15:18]
    bg = states[:, 18:21]
    accs = states[:, 21:24]  # offline calib compensated, scale+bias
    gyrs = states[:, 24:27]  # offline calib compensated, scale+bias
    ts = states[:, 27]
    sigma_r = np.sqrt(states[:, 28:31]) * 180.0 / np.pi
    sigma_v = np.sqrt(states[:, 31:34])
    sigma_p = np.sqrt(states[:, 34:37])
    sigma_bg = np.sqrt(states[:, 37:40])
    sigma_ba = np.sqrt(states[:, 40:43])
    innos = states[:, 43:46]
    meas = states[:, 46:49]
    pred = states[:, 49:52]
    meas_sigma = states[:, 52:55]
    inno_sigma = states[:, 55:58]
    nobs_sigma = states[:, 58 : 58 + 16]

    N = ts.shape[0]

    # get RoNIN concatenation results
    if args.ronin_dir is not None:
        try:
            ronin = np.loadtxt(
                osp.join(args.ronin_dir, dataset + ".txt"), delimiter=","
            )
            logging.info(
                f"Reading ronin data from {osp.join(args.ronin_dir, dataset)}.txt"
            )
        except:
            ronin = np.loadtxt(
                osp.join(args.ronin_dir, dataset, "trajectory.txt"), delimiter=","
            )
            logging.info(
                f"Reading ronin data from {osp.join(args.ronin_dir, dataset,  'trajectory.txt')}"
            )

        ronin_ts = ronin[:, 0]
        ronin_p = ronin[:, 1:4]
        if ronin_ts[0] > ts[0]:
            ronin_ts = np.insert(ronin_ts, 0, ts[0])
            ronin_p = np.concatenate([ronin_p[0].reshape(1, 3), ronin_p], axis=0)
        if ronin_ts[-1] < ts[-1]:
            ronin_ts = np.insert(ronin_ts, -1, ts[-1])
            ronin_p = np.concatenate([ronin_p, ronin_p[-1].reshape(1, 3)], axis=0)
        ronin_p = interp1d(ronin_ts, ronin_p, axis=0)(ts)

    # get vio states
    if args.plot_sim is False:
        if os.path.exists(os.path.join(results_folder, "vio_states.npy")):
            vio_states = np.load(os.path.join(results_folder, "vio_states.npy"))
            vio_euls = vio_states[:, :3]
            vio_p = vio_states[:, 3:6]
            vio_v = vio_states[:, 6:9]
            vio_ba = vio_states[:, 9:12]
            vio_bg = vio_states[:, 12:15]
            vio_disp = vio_states[:, 15:18]
            vio_ba_b = vio_states[:, 18:21]
            vio_bg_b = vio_states[:, 21:24]
            vio_accelScaleInv_flat = vio_states[:, 24:33]
            vio_gyroScaleInv_flat = vio_states[:, 33:42]
            vio_accelScaleInv = vio_accelScaleInv_flat.reshape((-1, 3, 3))
            vio_gyroScaleInv = vio_gyroScaleInv_flat.reshape((-1, 3, 3))
        else:
            vio_states = np.loadtxt(
                os.path.join(args.root_dir, dataset, "evolving_state.txt"),
                delimiter=",",
            )
            vio_calibs = np.loadtxt(
                os.path.join(args.root_dir, dataset, "calib_state.txt"), delimiter=","
            )
            vio_ts = vio_states[:, 0] * 1e-6
            vio_rq = vio_states[:, 1:5]
            vio_p = vio_states[:, 5:8]
            vio_v = vio_states[:, 8:11]
            vio_r = Rotation.from_quat(
                np.concatenate(
                    [vio_rq[:, 1:4], np.expand_dims(vio_rq[:, 0], axis=1)], axis=1
                )
            )
            vio_euls = vio_r.as_euler("xyz", degrees=True)
            vio_calib_ts = vio_calibs[:, 0] * 1e-6
            vio_accelScaleInv = vio_calibs[:, 1:10].reshape((-1, 3, 3))
            vio_gyroScaleInv = vio_calibs[:, 10:19].reshape((-1, 3, 3))
            vio_gyroGSense = vio_calibs[:, 19:28].reshape((-1, 3, 3))
            vio_ba = vio_calibs[:, 28:31]
            vio_bg = vio_calibs[:, 31:34]

            vio_pj_idx = np.searchsorted(vio_ts, ts) - 1
            vio_pi_idx = np.searchsorted(vio_ts, ts - args.displacement_time) - 1
            vio_pj = vio_p[vio_pj_idx, :]
            vio_pi = vio_p[vio_pi_idx, :]
            vio_disp = vio_pj - vio_pi

            vio_Ri = vio_r[vio_pi_idx].as_matrix()
            ri_z = Rotation.from_matrix(vio_Ri).as_euler("xyz")[:, 2]
            vio_Riz = Rotation.from_euler("z", ri_z).as_matrix()
            vio_Rizt = np.transpose(vio_Riz, (0, 2, 1))
            vio_disp = np.squeeze(
                np.matmul(vio_Rizt, np.expand_dims(vio_disp, axis=-1))
            )

            vio_uw_euls = unwrap_rpy(vio_euls)
            if vio_ts[0] > ts[0]:
                vio_ts = np.insert(vio_ts, 0, ts[0])
                vio_uw_euls = np.concatenate(
                    [vio_uw_euls[0].reshape(1, 3), vio_uw_euls], axis=0
                )
                vio_p = np.concatenate([vio_p[0].reshape(1, 3), vio_p], axis=0)
                vio_v = np.concatenate([vio_v[0].reshape(1, 3), vio_v], axis=0)
            if vio_ts[-1] < ts[-1]:
                vio_ts = np.insert(vio_ts, -1, ts[-1])
                vio_uw_euls = np.concatenate(
                    [vio_uw_euls, vio_uw_euls[-1].reshape(1, 3)], axis=0
                )
                vio_p = np.concatenate([vio_p, vio_p[-1].reshape(1, 3)], axis=0)
                vio_v = np.concatenate([vio_v, vio_v[-1].reshape(1, 3)], axis=0)
            vio_uw_euls_interp = interp1d(vio_ts, vio_uw_euls, axis=0)(ts)
            vio_euls = wrap_rpy(vio_uw_euls_interp)
            vio_p = interp1d(vio_ts, vio_p, axis=0)(ts)
            vio_v = interp1d(vio_ts, vio_v, axis=0)(ts)

            # This compute bias in non scaled sensor frame (I think)
            vio_accelScaleInv_flat = vio_accelScaleInv.reshape((-1, 9))
            vio_gyroScaleInv_flat = vio_gyroScaleInv.reshape((-1, 9))
            if vio_calib_ts[0] > ts[0]:
                vio_calib_ts = np.insert(vio_calib_ts, 0, ts[0])
                vio_ba = np.concatenate([vio_ba[0].reshape(1, 3), vio_ba], axis=0)
                vio_bg = np.concatenate([vio_bg[0].reshape(1, 3), vio_bg], axis=0)
                vio_accelScaleInv_flat = np.concatenate(
                    [vio_accelScaleInv_flat[0].reshape(1, 9), vio_accelScaleInv_flat],
                    axis=0,
                )
                vio_gyroScaleInv_flat = np.concatenate(
                    [vio_gyroScaleInv_flat[0].reshape(1, 9), vio_gyroScaleInv_flat],
                    axis=0,
                )
            if vio_calib_ts[-1] < ts[-1]:
                vio_calib_ts = np.insert(vio_calib_ts, -1, ts[-1])
                vio_ba = np.concatenate([vio_ba, vio_ba[-1].reshape(1, 3)], axis=0)
                vio_bg = np.concatenate([vio_bg, vio_bg[-1].reshape(1, 3)], axis=0)
                vio_accelScaleInv_flat = np.concatenate(
                    [vio_accelScaleInv_flat, vio_accelScaleInv_flat[-1].reshape(1, 9)],
                    axis=0,
                )
                vio_gyroScaleInv_flat = np.concatenate(
                    [vio_gyroScaleInv_flat, vio_gyroScaleInv_flat[-1].reshape(1, 9)],
                    axis=0,
                )

            vio_ba = interp1d(vio_calib_ts, vio_ba, axis=0)(ts)
            vio_bg = interp1d(vio_calib_ts, vio_bg, axis=0)(ts)
            vio_accelScaleInv_flat = interp1d(
                vio_calib_ts, vio_accelScaleInv_flat, axis=0
            )(ts)
            vio_gyroScaleInv_flat = interp1d(
                vio_calib_ts, vio_gyroScaleInv_flat, axis=0
            )(ts)
            vio_accelScaleInv = vio_accelScaleInv_flat.reshape((-1, 3, 3))
            vio_gyroScaleInv = vio_gyroScaleInv_flat.reshape((-1, 3, 3))

            vio_ba_temp = np.expand_dims(vio_ba, axis=-1)
            vio_bg_temp = np.expand_dims(vio_bg, axis=-1)
            vio_ba_b = np.squeeze(
                np.matmul(np.linalg.inv(vio_accelScaleInv), vio_ba_temp)
            )
            vio_bg_b = np.squeeze(
                np.matmul(np.linalg.inv(vio_gyroScaleInv), vio_bg_temp)
            )

            if save_vio_states:
                vio_states = np.concatenate(
                    [
                        vio_euls,
                        vio_p,
                        vio_v,
                        vio_ba,
                        vio_bg,
                        vio_disp,
                        vio_ba_b,
                        vio_bg_b,
                        vio_accelScaleInv_flat,
                        vio_gyroScaleInv_flat,
                    ],
                    axis=1,
                )
                np.save(os.path.join(results_folder, "vio_states.npy"), vio_states)
            else:
                logging.warning(
                    "Not saving vio_states.npy because traj is still processing"
                )

    # get simulation states
    if args.plot_sim:
        sim_data = np.loadtxt(
            args.sim_data_path,
            delimiter=",",
            usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16),
        )
        sim_ts = sim_data[:, 0]
        sim_p = sim_data[:, 1:4]
        sim_rq = sim_data[:, 4:8]
        sim_v = sim_data[:, 11:14]
        # acc_all = sim_data[:,8:11]
        # gyr_all = sim_data[:,14:17]
        sim_r = Rotation.from_quat(sim_rq)
        sim_euls = sim_r.as_euler("xyz", degrees=True)

        sim_pj_idx = np.searchsorted(sim_ts, ts) - 1
        sim_pi_idx = np.searchsorted(sim_ts, ts - args.displacement_time) - 1
        sim_pj = sim_p[sim_pj_idx, :]
        sim_pi = sim_p[sim_pi_idx, :]
        sim_disp = sim_pj - sim_pi

        ri_z = sim_r[sim_pi_idx].as_matrix().as_euler("xyz")[:, 2]
        sim_Riz = Rotation.from_euler("z", ri_z).as_matrix()
        sim_Rizt = np.transpose(sim_Riz, (0, 2, 1))
        sim_disp = np.squeeze(np.matmul(sim_Rizt, np.expand_dims(sim_disp, axis=-1)))

        sim_uw_euls = unwrap_rpy(sim_euls)
        if sim_ts[0] > ts[0]:
            sim_ts = np.insert(sim_ts, 0, ts[0])
            sim_uw_euls = np.concatenate(
                [sim_uw_euls[0].reshape(1, 3), sim_uw_euls], axis=0
            )
            sim_p = np.concatenate([sim_p[0].reshape(1, 3), sim_p], axis=0)
            sim_v = np.concatenate([sim_v[0].reshape(1, 3), sim_v], axis=0)
        if sim_ts[-1] < ts[-1]:
            sim_ts = np.insert(sim_ts, -1, ts[-1])
            sim_uw_euls = np.concatenate(
                [sim_uw_euls, sim_uw_euls[-1].reshape(1, 3)], axis=0
            )
            sim_p = np.concatenate([sim_p, sim_p[-1].reshape(1, 3)], axis=0)
            sim_v = np.concatenate([sim_v, sim_v[-1].reshape(1, 3)], axis=0)
        sim_uw_euls_interp = interp1d(sim_ts, sim_uw_euls, axis=0)(ts)
        sim_euls = wrap_rpy(sim_uw_euls_interp)
        sim_p = interp1d(sim_ts, sim_p, axis=0)(ts)
        sim_v = interp1d(sim_ts, sim_v, axis=0)(ts)
        sim_ba = np.zeros((ts.shape[0], 3)) + np.array([0.3, -0.2, 0.4])
        sim_bg = np.zeros((ts.shape[0], 3)) + np.array([0.0005, 0.002, -0.001])

    if args.plot_sim:
        ref_type = "sim"
        ref_p = sim_p
        ref_v = sim_v
        ref_bg = sim_bg
        ref_ba = sim_ba
        ref_euls = sim_euls
        ref_disp = sim_disp
    else:
        ref_type = "vio"
        ref_p = vio_p
        ref_v = vio_v
        ref_bg = vio_bg
        ref_ba = vio_ba
        ref_euls = vio_euls
        ref_disp = vio_disp

    # obtain biases in the body frame in the same unit
    attitude_filter_path = osp.join(args.root_dir, dataset, "calib_state.txt")
    (
        init_gyroScaleInv,
        init_gyroBias,
        init_gyroGSense,
        init_accelScaleInv,
        init_accelBias,
    ) = load_aekf_calibration(attitude_filter_path)

    if args.body_bias:
        ba_temp = np.expand_dims(ba, axis=-1)
        bg_temp = np.expand_dims(bg, axis=-1)
        ba_b = np.squeeze(np.matmul(np.linalg.inv(init_accelScaleInv), ba_temp))
        bg_b = np.squeeze(np.matmul(np.linalg.inv(init_gyroScaleInv), bg_temp))
        ba = ba_b
        bg = bg_b

    # load aekf rotation
    aekf_ts, aekf_R = load_aekf_rotation(attitude_filter_path)
    aekf_euls = unwrap_rpy(aekf_R.as_euler("xyz", degrees=True))
    # plotting
    N = ts.shape[0]
    start_idx = 2000  # 2 s
    end_idx = N - 1
    start_ts = ts[start_idx]
    end_ts = ts[end_idx]

    # align diverse trajectory sources
    ts = ts - start_ts
    aekf_ts = aekf_ts - start_ts

    try:
        # align at start_ts
        aekf_euls[:, 2] -= (
            interp1d(aekf_ts, aekf_euls[:, 2])(ts[start_idx]) - ref_euls[start_idx, 2]
        )
    except:
        # if we can't align at first aekf timestamp
        aekf_euls[:, 2] -= aekf_euls[0, 2] - interp1d(ts, ref_euls[:, 2])(aekf_ts[0])

    aekf_euls_time_aligned = interp1d(aekf_ts, aekf_euls, axis=0, fill_value=np.nan)(ts)
    aekf_euls_time_aligned = wrap_rpy(aekf_euls_time_aligned)

    if args.ronin_dir is not None:
        ronin_p = ronin_p - (ronin_p[start_idx, :] - ref_p[start_idx, :])

    ps = ps - (ps[start_idx, :] - ref_p[start_idx, :])
    euls[:, 2] = euls[:, 2] - (euls[start_idx, 2] - ref_euls[start_idx, 2])
    euls = wrap_rpy(euls)

    ps_gt = ref_p[start_idx:end_idx, :]
    euls_gt = ref_euls[start_idx:end_idx, :]
    euls_aekf = aekf_euls_time_aligned[start_idx:end_idx, :]

    # metrics computation
    metric_map = {"filter": {}, "ronin": {}}

    # align on first error, WHY IS THAT?
    ps_filter = ps[start_idx:end_idx, :]
    euls_filter = euls[start_idx:end_idx, :]
    ps_filter = ps_filter - (ps_filter[0, :] - ps_gt[0, :])

    # get drift and ATE
    ps_diff = ps_gt[1:, :] - ps_gt[:-1, :]
    traj_length = np.sum(np.linalg.norm(ps_diff, axis=1))
    drift_filter = np.linalg.norm(ps_filter[-1, :] - ps_gt[-1, :])
    angular_drift_filter = np.linalg.norm(euls[-1, 2] - ref_euls[-1, 2])

    filter_heading_error = wrap_rpy(euls - ref_euls)[:, 2]
    ate_filter = np.sqrt(np.mean(np.linalg.norm(ps_filter - ps_gt, axis=1) ** 2))
    metric_map["filter"]["drift_ratio"] = drift_filter / traj_length
    metric_map["filter"]["ate"] = ate_filter
    metric_map["filter"]["mhe"] = np.sqrt(
        np.nansum(filter_heading_error ** 2)
        / np.count_nonzero(~(np.isnan(filter_heading_error)))
    )
    metric_map["filter"]["angular_drift_deg_hour"] = (
        angular_drift_filter / ts.max() * 3600
    )
    logging.info(f"drift of filter {metric_map['filter']['drift_ratio']}")
    logging.info(f"ATE of filter {metric_map['filter']['ate']}")
    logging.info(f"Mean Heading error of filter {metric_map['filter']['mhe']}")

    def compute_rpe_filter(ns_rpe):
        rpe_rmse, rpe_rmse_z, relative_yaw_rmse = compute_rpe_distance(
            ns_rpe, ps_filter, ps_gt, euls_filter[:, [2]], euls_gt[:, [2]]
        )
        logging.info(f"RPE RMSE of filter over {1e-3*ns_rpe}s: {rpe_rmse}")
        logging.info(f"RPE RMSE Z of filter over {1e-3*ns_rpe}s: {rpe_rmse_z}")
        logging.info(f"RPE RMSE Yaw of filter over {1e-3*ns_rpe}s: {relative_yaw_rmse}")
        metric_map["filter"]["rpe_rmse_" + str(ns_rpe)] = rpe_rmse
        metric_map["filter"]["rpe_rmse_z_" + str(ns_rpe)] = rpe_rmse_z
        metric_map["filter"]["relative_yaw_rmse_" + str(ns_rpe)] = relative_yaw_rmse

    if args.rpe_1 is True:
        compute_rpe_filter(1000)  # 1 s
    if args.rpe_10 is True:
        compute_rpe_filter(10000)  # 10 s
    if args.rpe_100 is True:
        compute_rpe_filter(100000)  # 100 s

    if args.ronin_dir is not None:
        ps_ronin = ronin_p[start_idx:end_idx, :]
        ps_ronin = ps_ronin - (ps_ronin[0, :] - ps_gt[0, :])
        drift_ronin = np.linalg.norm(ps_ronin[-1, :] - ps_gt[-1, :])
        ate_ronin = np.sqrt(np.mean(np.linalg.norm(ps_ronin - ps_gt, axis=1) ** 2))
        angular_drift_ronin = np.linalg.norm(
            aekf_euls_time_aligned[-1, 2] - ref_euls[-1, 2]
        )
        heading_error_ronin = wrap_rpy(aekf_euls_time_aligned - ref_euls)[:, 2]
        metric_map["ronin"]["drift_ratio"] = drift_ronin / traj_length
        metric_map["ronin"]["ate"] = ate_ronin

        metric_map["ronin"]["mhe"] = np.sqrt(
            np.nansum(heading_error_ronin ** 2)
            / np.count_nonzero(~(np.isnan(heading_error_ronin)))
        )
        metric_map["ronin"]["angular_drift_deg_hour"] = (
            angular_drift_ronin / ts.max() * 3600
        )
        logging.info(f"drift of ronin {metric_map['ronin']['drift_ratio']}")
        logging.info(f"ATE of ronin {metric_map['ronin']['ate']}")
        logging.info(f"Mean Heading error of ronin {metric_map['ronin']['mhe']}")

        def compute_rpe_ronin(ns_rpe):
            rpe_rmse, rpe_rmse_z, relative_yaw_rmse = compute_rpe(
                ns_rpe, ps_ronin, ps_gt, euls_aekf[:, [2]], euls_gt[:, [2]]
            )
            logging.info(f"RPE RMSE of ronin over 1s: {rpe_rmse}")
            logging.info(f"RPE RMSE Z of ronin over 1s: {rpe_rmse_z}")
            logging.info(f"RPE RMSE Yaw of ronin over 1s: {relative_yaw_rmse}")
            metric_map["ronin"]["rpe_rmse_" + str(ns_rpe)] = rpe_rmse
            metric_map["ronin"]["rpe_rmse_z_" + str(ns_rpe)] = rpe_rmse_z
            metric_map["ronin"]["relative_yaw_rmse_" + str(ns_rpe)] = relative_yaw_rmse

        if args.rpe_1 is True:
            compute_rpe_ronin(1000)
        if args.rpe_10 is True:
            compute_rpe_ronin(10000)
        if args.rpe_100 is True:
            compute_rpe_ronin(100000)

    if args.make_plots is False:
        return metric_map

    # Plot results
    idxs = slice(start_idx, end_idx)

    if not args.plot_sim:
        plt.figure("Calibration accelerometer vio vs init")
        plt.plot(ts, vio_accelScaleInv[:, 0, 0], label="1")
        plt.plot(ts, vio_accelScaleInv[:, 1, 1], label="2")
        plt.plot(ts, vio_accelScaleInv[:, 2, 2], label="3")
        plt.plot(ts, vio_accelScaleInv[:, 0, 1], label="12")
        plt.plot(ts, vio_accelScaleInv[:, 0, 2], label="13")
        plt.plot(ts, vio_accelScaleInv[:, 1, 2], label="23")
        plt.plot(
            ts,
            np.ones_like(vio_accelScaleInv[:, 0, 0]) * init_accelScaleInv[0, 0],
            label="1",
        )
        plt.plot(
            ts,
            np.ones_like(vio_accelScaleInv[:, 1, 1]) * init_accelScaleInv[1, 1],
            label="2",
        )
        plt.plot(
            ts,
            np.ones_like(vio_accelScaleInv[:, 2, 2]) * init_accelScaleInv[2, 2],
            label="2",
        )
        plt.plot(
            ts,
            np.ones_like(vio_accelScaleInv[:, 0, 1]) * init_accelScaleInv[0, 1],
            label="12",
        )
        plt.plot(
            ts,
            np.ones_like(vio_accelScaleInv[:, 0, 2]) * init_accelScaleInv[0, 2],
            label="13",
        )
        plt.plot(
            ts,
            np.ones_like(vio_accelScaleInv[:, 1, 2]) * init_accelScaleInv[1, 2],
            label="23",
        )
        plt.grid(True)
        plt.legend(loc="upper center")
        plt.figure()
        g = np.einsum("kip,p->ki", vio_accelScaleInv, [0, 9.81, 0])
        plt.plot(ts, g[:, 1])
        plt.figure("Calibration gyrometer vio vs init")
        plt.plot(ts, vio_gyroScaleInv[:, 0, 0], label="1")
        plt.plot(ts, vio_gyroScaleInv[:, 1, 1], label="2")
        plt.plot(ts, vio_gyroScaleInv[:, 2, 2], label="3")
        plt.plot(ts, vio_gyroScaleInv[:, 0, 1], label="12")
        plt.plot(ts, vio_gyroScaleInv[:, 0, 2], label="13")
        plt.plot(ts, vio_gyroScaleInv[:, 1, 2], label="23")
        plt.plot(
            ts,
            np.ones_like(vio_gyroScaleInv[:, 0, 0]) * init_gyroScaleInv[0, 0],
            label="1",
        )
        plt.plot(
            ts,
            np.ones_like(vio_gyroScaleInv[:, 1, 1]) * init_gyroScaleInv[1, 1],
            label="2",
        )
        plt.plot(
            ts,
            np.ones_like(vio_gyroScaleInv[:, 2, 2]) * init_gyroScaleInv[2, 2],
            label="2",
        )
        plt.plot(
            ts,
            np.ones_like(vio_gyroScaleInv[:, 0, 1]) * init_gyroScaleInv[0, 1],
            label="12",
        )
        plt.plot(
            ts,
            np.ones_like(vio_gyroScaleInv[:, 0, 2]) * init_gyroScaleInv[0, 2],
            label="13",
        )
        plt.plot(
            ts,
            np.ones_like(vio_gyroScaleInv[:, 1, 2]) * init_gyroScaleInv[1, 2],
            label="23",
        )
        plt.grid(True)
        plt.legend(loc="upper center")

    fig14 = plt.figure("position-2d")
    plt.plot(ps[idxs, 0], ps[idxs, 1], label="filter", color=color_filter)
    # plt.plot(ps_dr[idxs, 0], ps_dr[idxs, 1], label="filter_dr")
    plt.plot(ref_p[idxs, 0], ref_p[idxs, 1], label=ref_type, color=color_vio)
    if args.ronin_dir is not None:
        plt.plot(ronin_p[idxs, 0], ronin_p[idxs, 1], label="ronin", color=color_ronin)
    plt.legend(loc="upper center")
    plt.xlabel("x")
    plt.ylabel("y")
    plt.grid(True)
    plt.title("position-2d")
    plt.gca().set_aspect("equal")

    fig15 = plt.figure("position-3d")
    ax = fig15.add_subplot(111, projection="3d")
    plt.plot(ps[idxs, 0], ps[idxs, 1], ps[idxs, 2], label="filter", color=color_filter)
    # plt.plot(ps_dr[idxs, 0], ps_dr[idxs, 1], ps_dr[idxs, 2], label="filter_dr")
    plt.plot(
        ref_p[idxs, 0], ref_p[idxs, 1], ref_p[idxs, 2], label="vio", color=color_vio
    )
    if args.ronin_dir is not None:
        plt.plot(
            ronin_p[idxs, 0],
            ronin_p[idxs, 1],
            ronin_p[idxs, 2],
            label="ronin",
            color=color_ronin,
        )
    set_axes_equal(ax)

    fig1 = plot_state_euclidean(
        "position",
        "filter",
        ["p_x", "p_y", "p_z"],
        ts[idxs],
        ps[idxs, :],
        sigma_p[idxs, :],
        color=color_filter,
    )
    fig2 = plot_state_euclidean(
        "velocity",
        "filter",
        ["v_x", "v_y", "v_z"],
        ts[idxs],
        vs[idxs, :],
        sigma_v[idxs, :],
        color=color_filter,
    )
    fig3 = plot_state_euclidean(
        "attitude",
        "filter",
        ["eul_x", "eul_y", "eul_z"],
        ts[idxs],
        euls[idxs, :],
        sigma_r[idxs, :],
        color=color_filter,
    )
    fig4 = plot_state_euclidean(
        "accel bias",
        "filter",
        ["ba_x", "ba_y", "ba_z"],
        ts[idxs],
        ba[idxs, :],
        sigma_ba[idxs, :],
        color=color_filter,
    )

    fig5 = plot_state_euclidean(
        "gyro bias",
        "filter",
        ["bg_x", "bg_y", "bg_z"],
        ts[idxs],
        bg[idxs, :],
        sigma_bg[idxs, :],
        color=color_filter,
    )
    # plot integrated speed output
    # plot_state_euclidean(
    #     "position",
    #     "filter_dr",
    #     ["p_x", "p_y", "p_z"],
    #     ts[idxs],
    #     ps_dr[idxs, :]
    # )
    # plot reference traj
    plot_state_euclidean(
        "position",
        ref_type,
        ["p_x", "p_y", "p_z"],
        ts[idxs],
        ref_p[idxs, :],
        color=color_vio,
    )
    plot_state_euclidean(
        "velocity",
        ref_type,
        ["v_x", "v_y", "v_z"],
        ts[idxs],
        ref_v[idxs, :],
        color=color_vio,
    )
    plot_state_euclidean(
        "attitude",
        ref_type,
        ["eul_x", "eul_y", "eul_z"],
        ts[idxs],
        ref_euls[idxs, :],
        color=color_vio,
    )
    plot_state_euclidean(
        "accel bias",
        ref_type,
        ["ba_x", "ba_y", "ba_z"],
        ts[idxs],
        ref_ba[idxs, :],
        color=color_vio,
    )
    plot_state_euclidean(
        "accel bias",
        ref_type + "b",
        ["ba_x", "ba_y", "ba_z"],
        ts[idxs],
        vio_ba_b[idxs, :],
        color=color_vio,
    )
    plot_state_euclidean(
        "gyro bias",
        ref_type,
        ["bg_x", "bg_y", "bg_z"],
        ts[idxs],
        ref_bg[idxs, :],
        color=color_vio,
    )
    plot_state_euclidean(
        "gyro bias",
        ref_type + "b",
        ["bg_x", "bg_y", "bg_z"],
        ts[idxs],
        vio_bg_b[idxs, :],
        color=color_vio,
    )
    # plot RONIN
    if args.ronin_dir is not None:
        plot_state_euclidean(
            "position",
            "ronin",
            ["p_x", "p_y", "p_z"],
            ts[idxs],
            ronin_p[idxs, :],
            color=color_ronin,
        )
        plot_state_euclidean(
            "attitude",
            "aekf",
            ["eul_x", "eul_y", "eul_z"],
            ts[idxs],
            aekf_euls_time_aligned[idxs, :],
            color=color_ronin,
        )

    # compute values at update rate
    idx_update = ~np.isnan(innos[idxs, :]).any(axis=1)
    ts_update = ts[idxs][idx_update]
    innos_update = innos[idxs, :][idx_update, :]
    innos_sigma_update = inno_sigma[idxs, :][idx_update, :]
    meas_update = meas[idxs, :][idx_update, :]
    meas_sigma_update = meas_sigma[idxs, :][idx_update, :]
    pred_update = pred[idxs, :][idx_update, :]
    ref_disp_update = ref_disp[idxs, :][idx_update, :]
    fig6 = plot_error_euclidean(
        "innovation (m)",
        "filter",
        ["x", "y", "z"],
        ts_update,
        innos_update,
        sigma=innos_sigma_update,
    )

    # plot measurement
    # compute norm for visualization
    meas_plus_nom = np.hstack(
        (meas_update, np.atleast_2d(np.linalg.norm(meas_update, axis=1)).T)
    )
    pred_plus_nom = np.hstack(
        (pred_update, np.atleast_2d(np.linalg.norm(pred_update, axis=1)).T)
    )
    ref_disp_plus_norm = np.hstack(
        (ref_disp_update, np.atleast_2d(np.linalg.norm(ref_disp_update, axis=1)).T)
    )
    meas_sigma_plus_norm = np.hstack(
        (meas_sigma_update, np.zeros((meas_update.shape[0], 1)) * np.nan)
    )

    fig7 = plot_state_euclidean(
        "displacement measurement vs target",
        "meas",
        ["x", "y", "z", "norm"],
        ts_update,
        meas_plus_nom,
        sigma=meas_sigma_plus_norm,
        color=color_ronin,
    )
    plot_state_euclidean(
        "displacement measurement vs target",
        "pred",
        ["x", "y", "z", "norm"],
        ts_update,
        pred_plus_nom,
        color=color_filter,
    )
    plot_state_euclidean(
        "displacement measurement vs target",
        ref_type,
        ["x", "y", "z", "norm"],
        ts_update,
        ref_disp_plus_norm,
        color=color_vio,
    )

    # Plot the error in covariance

    meas_err_update = meas_update - ref_disp_update
    fig8 = plot_error_euclidean(
        "displace measurement errors",
        "meas-err",
        ["x", "y", "z"],
        ts_update,
        meas_err_update,
        meas_sigma_update,
    )
    plot_error_euclidean(
        "displace measurement errors",
        "pred-err",
        ["x", "y", "z"],
        ts_update,
        pred_update - ref_disp_update,
        meas_sigma_update,
    )
    for ax in fig8.axes:
        ax.set_ylim([-0.5, 0.5])

    # display measurement error autocorellation (SLOW)
    fig8b = plt.figure("autocorellation of measurement error")
    logging.warning("We assume update frequency at 20hz for autocorrelation")
    for i in range(3):
        plt.subplot(3, 1, i + 1)
        plt.acorr(meas_err_update[:, i], maxlags=100, lw=2)
        locs, labels = plt.xticks()  # Get locations and labels
        for (l, t) in zip(locs, labels):
            t.set_text(str(l / 20.0) + "s")
        plt.xticks(locs, labels)  # Set locations and labels
        plt.xlim(left=0)
        plt.grid()

    fig9 = plot_error_euclidean(
        "position error",
        ref_type,
        ["p_x", "p_y", "p_z"],
        ts[idxs],
        ps[idxs, :] - ref_p[idxs, :],
        sigma_p[idxs, :],
    )
    fig10 = plot_error_euclidean(
        "velocity error",
        ref_type,
        ["v_x", "v_y", "v_z"],
        ts[idxs],
        vs[idxs, :] - ref_v[idxs, :],
        sigma_v[idxs, :],
    )

    euls_err = wrap_rpy(euls - ref_euls)
    fig11 = plot_error_euclidean(
        "attitude error",
        ref_type,
        ["eulx", "euly", "eulz"],
        ts[idxs],
        euls_err[idxs, :],
        sigma_r[idxs, :],
    )

    fig12 = plot_error_euclidean(
        "accel bias error",
        ref_type,
        ["ba_x", "ba_y", "ba_z"],
        ts[idxs],
        ba[idxs, :] - ref_ba[idxs, :],
        sigma_ba[idxs, :],
    )
    fig13 = plot_error_euclidean(
        "gyros bias error",
        ref_type,
        ["bg_x", "bg_y", "bg_z"],
        ts[idxs],
        bg[idxs, :] - ref_bg[idxs, :],
        sigma_bg[idxs, :],
    )

    if args.save_fig:
        if not save_vio_states:
            logging.warning("Not saving figure because, the filter did not finish")
        else:
            logging.info("Saving figures")
            for save_as in ["png", "svg"]:
                fig1.savefig(osp.join(results_folder, "position." + save_as))
                fig2.savefig(osp.join(results_folder, "velocity." + save_as))
                fig3.savefig(osp.join(results_folder, "rotation." + save_as))
                fig4.savefig(osp.join(results_folder, "acc-bias." + save_as))
                fig5.savefig(osp.join(results_folder, "gyr-bias." + save_as))
                fig6.savefig(osp.join(results_folder, "innovation." + save_as))
                fig7.savefig(osp.join(results_folder, "displacement." + save_as))
                fig8.savefig(osp.join(results_folder, "displacement-err." + save_as))
                fig9.savefig(osp.join(results_folder, "position-err." + save_as))
                fig10.savefig(osp.join(results_folder, "velocity-err." + save_as))
                fig11.savefig(osp.join(results_folder, "rotation-err." + save_as))
                fig12.savefig(osp.join(results_folder, "acc-bias-err." + save_as))
                fig13.savefig(osp.join(results_folder, "gyr-bias-err." + save_as))
                fig14.savefig(osp.join(results_folder, "position-2d." + save_as))
                fig15.savefig(osp.join(results_folder, "position-3d." + save_as))
            # # also save as pickle just in case (TOO LARGE)
            # import pickle as pl
            # pl.dump(fig1, open(osp.join(results_folder, "position.pickle"),"wb"))
            # pl.dump(fig2, open(osp.join(results_folder, "velocity.pickle"),"wb"))
            # pl.dump(fig3, open(osp.join(results_folder, "rotation.pickle"),"wb"))
            # pl.dump(fig4, open(osp.join(results_folder, "acc-bias.pickle"),"wb"))
            # pl.dump(fig5, open(osp.join(results_folder, "gyr-bias.pickle"),"wb"))
            # pl.dump(fig6, open(osp.join(results_folder, "innovation.pickle"),"wb"))
            # pl.dump(fig7, open(osp.join(results_folder, "displacement.pickle"),"wb"))
            # pl.dump(fig8, open(osp.join(results_folder, "displacement-err.pickle"),"wb"))
            # pl.dump(fig9, open(osp.join(results_folder, "position-err.pickle"),"wb"))
            # pl.dump(fig10, open(osp.join(results_folder, "velocity-err.pickle"),"wb"))
            # pl.dump(fig11, open(osp.join(results_folder, "rotation-err.pickle"),"wb"))
            # pl.dump(fig12, open(osp.join(results_folder, "acc-bias-err.pickle"),"wb"))
            # pl.dump(fig13, open(osp.join(results_folder, "gyr-bias-err.pickle"),"wb"))
            # pl.dump(fig14, open(osp.join(results_folder, "position-2d.pickle"),"wb"))
            # pl.dump(fig15, open(osp.join(results_folder, "position-3d.pickle"),"wb"))

    else:
        logging.warning("Not saving figure")

    if args.display_fig:
        fig_state = [fig1, fig2, fig3, fig4, fig5, fig14, fig15]
        fig_innovation = [fig6, fig7, fig8]
        fig_err = [fig9, fig10, fig11, fig12, fig13]

        # [plt.close(f) for f in fig_state]
        # [plt.close(f) for f in fig_innovation]
        # [plt.close(f) for f in fig_err]
        import matplotlib

        fig_button = plt.figure("continue")
        axcontinue = plt.axes([0, 0.0, 0.5, 0.5])
        bcontinue = matplotlib.widgets.Button(axcontinue, "Continue")
        bcontinue.on_clicked(lambda d: plt.close("all"))
        ax3d = plt.axes([0.5, 0.5, 0.5, 0.5])
        b3d = matplotlib.widgets.Button(ax3d, "state")
        b3d.on_clicked(lambda d: [f.show() for f in fig_state])
        axerror = plt.axes([0, 0.5, 0.5, 0.5])
        berror = matplotlib.widgets.Button(axerror, "error")
        berror.on_clicked(lambda d: [f.show() for f in fig_err])
        axinnov = plt.axes([0.5, 0, 0.5, 0.5])
        binnov = matplotlib.widgets.Button(axinnov, "innov")
        binnov.on_clicked(lambda d: [f.show() for f in fig_innovation])
        fig15.show()

        plt.show()

    return metric_map
コード例 #22
0
 def add_watch(self, dir):
     if self.fun is not None:
         logging.info('Adding watcher for : %s', dir)
         observer = Observer()
         observer.schedule(MyHandler(self.fun), path=dir, recursive=True)
         self.__observers.append(observer)
コード例 #23
0
ファイル: plot_filter_state.py プロジェクト: ori-drs/TLIO
    parser.add_argument("--rpe_10", type=bool, default=True)
    parser.add_argument("--rpe_100", type=bool, default=False)

    subparsers = parser.add_subparsers()
    parser_bias = subparsers.add_parser("bias")
    parser_bias.set_defaults(func=compare_biases)

    args = parser.parse_args()
    print(vars(args))

    try:
        args.func(args)
    except AttributeError:
        pass

    # default plot
    if args.plot_sim is False:
        with open(args.data_list) as f:
            data_names = [
                s.strip().split("," or " ")[0]
                for s in f.readlines()
                if len(s) > 0 and s[0] != "#"
            ]
        if args.dataset_number is not None:
            logging.info(f"Plotting dataset {data_names[args.dataset_number]}")
            run(args, data_names[args.dataset_number])
        else:
            run_on_each_dataset_and_gather_metrics(args, data_names)
    else:
        pass
コード例 #24
0
ファイル: imu_tracker.py プロジェクト: zeta1999/TLIO
    def __init__(
        self,
        model_path,
        model_param_path,
        update_freq,
        filter_tuning,
        imu_calib: Optional[ImuCalib] = None,
        force_cpu=False,
    ):

        config_from_network = dotdict({})
        with open(model_param_path) as json_file:
            data_json = json.load(json_file)
            config_from_network["imu_freq_net"] = data_json["imu_freq"]
            config_from_network["past_time"] = data_json["past_time"]
            config_from_network["window_time"] = data_json["window_time"]
            config_from_network["arch"] = data_json["arch"]

        # frequencies and sizes conversion
        if not (config_from_network.past_time *
                config_from_network.imu_freq_net).is_integer():
            raise ValueError(
                "past_time cannot be represented by integer number of IMU data."
            )
        if not (config_from_network.window_time *
                config_from_network.imu_freq_net).is_integer():
            raise ValueError(
                "window_time cannot be represented by integer number of IMU data."
            )
        self.imu_freq_net = (config_from_network.imu_freq_net
                             )  # imu frequency as input to the network
        self.past_data_size = int(config_from_network.past_time *
                                  config_from_network.imu_freq_net)
        self.disp_window_size = int(config_from_network.window_time *
                                    config_from_network.imu_freq_net)
        self.net_input_size = self.disp_window_size + self.past_data_size

        # EXAMPLE :
        # if using 200 samples with step size 10, inference at 20 hz
        # we do update between clone separated by 19=update_distance_num_clone-1 other clone
        # if using 400 samples with 200 past data and clone_every_n_netimu_sample 10, inference at 20 hz
        # we do update between clone separated by 19=update_distance_num_clone-1 other clone
        if not (config_from_network.imu_freq_net / update_freq).is_integer():
            raise ValueError("update_freq must be divisible by imu_freq_net.")
        if not (config_from_network.window_time * update_freq).is_integer():
            raise ValueError(
                "window_time cannot be represented by integer number of updates."
            )
        self.update_freq = update_freq
        self.clone_every_n_netimu_sample = int(
            config_from_network.imu_freq_net /
            update_freq)  # network inference/filter update interval
        assert (config_from_network.imu_freq_net % update_freq == 0
                )  # imu frequency must be a multiple of update frequency
        self.update_distance_num_clone = int(config_from_network.window_time *
                                             update_freq)

        # time
        self.dt_interp_us = int(1.0 / self.imu_freq_net * 1e6)
        self.dt_update_us = int(1.0 / self.update_freq *
                                1e6)  # multiple of interpolation interval

        # logging
        logging.info(
            f"Network Input Time: {config_from_network.past_time + config_from_network.window_time} = {config_from_network.past_time} + {config_from_network.window_time} (s)"
        )
        logging.info(
            f"Network Input size: {self.net_input_size} = {self.past_data_size} + {self.disp_window_size} (samples)"
        )
        logging.info("IMU interpolation frequency: %s (Hz)" %
                     self.imu_freq_net)
        logging.info("Measurement update frequency: %s (Hz)" %
                     self.update_freq)
        logging.info("Filter update stride state number: %i" %
                     self.update_distance_num_clone)
        logging.info(
            f"Interpolating IMU measurement every {self.dt_interp_us}us for the network input"
        )

        # IMU initial calibration
        self.icalib = imu_calib
        # MSCKF
        self.filter = ImuMSCKF(filter_tuning)

        net_config = {
            "in_dim": (self.past_data_size + self.disp_window_size) // 32 + 1
        }
        self.meas_source = MeasSourceNetwork(model_path,
                                             config_from_network["arch"],
                                             net_config, force_cpu)
        # self.meas_source = MeasSourceTorchScript(model_path, force_cpu)

        self.imu_buffer = ImuBuffer()

        #  This callback is called at first update if set
        self.callback_first_update = None
        # This callback can be use to bypass network use for measurement
        self.debug_callback_get_meas = None

        # keep track of past timestamp and measurement
        self.last_t_us, self.last_acc, self.last_gyr = -1, None, None
        self.next_interp_t_us = None
        self.next_aug_t_us = None
        self.has_done_first_update = False
コード例 #25
0
ファイル: train.py プロジェクト: zeta1999/TLIO
 def stop_signal_handler(args, epoch, network, optimizer, signal, frame):
     logging.info("-" * 30)
     logging.info("Early terminate")
     save_model(args, epoch, network, optimizer, interrupt=True)
     sys.exit()
コード例 #26
0
ファイル: train.py プロジェクト: zeta1999/TLIO
def net_train(args):
    """
    Main function for network training
    """

    try:
        if args.root_dir is None:
            raise ValueError("root_dir must be specified.")
        if args.train_list is None:
            raise ValueError("train_list must be specified.")
        if args.out_dir is not None:
            if not osp.isdir(args.out_dir):
                os.makedirs(args.out_dir)
            if not osp.isdir(osp.join(args.out_dir, "checkpoints")):
                os.makedirs(osp.join(args.out_dir, "checkpoints"))
            if not osp.isdir(osp.join(args.out_dir, "logs")):
                os.makedirs(osp.join(args.out_dir, "logs"))
            with open(
                os.path.join(args.out_dir, "parameters.json"), "w"
            ) as parameters_file:
                parameters_file.write(json.dumps(vars(args), sort_keys=True, indent=4))
            logging.info(f"Training output writes to {args.out_dir}")
        else:
            raise ValueError("out_dir must be specified.")
        if args.val_list is None:
            logging.warning("val_list is not specified.")
        if args.continue_from is not None:
            if osp.exists(args.continue_from):
                logging.info(
                    f"Continue training from existing model {args.continue_from}"
                )
            else:
                raise ValueError(
                    f"continue_from model file path {args.continue_from} does not exist"
                )
        data_window_config, net_config = arg_conversion(args)
    except ValueError as e:
        logging.error(e)
        return

    # Display
    np.set_printoptions(formatter={"all": "{:.6f}".format})
    logging.info(f"Training/testing with {args.imu_freq} Hz IMU data")
    logging.info(
        "Size: "
        + str(data_window_config["past_data_size"])
        + "+"
        + str(data_window_config["window_size"])
        + "+"
        + str(data_window_config["future_data_size"])
        + ", "
        + "Time: "
        + str(args.past_time)
        + "+"
        + str(args.window_time)
        + "+"
        + str(args.future_time)
    )
    logging.info("Perturb on bias: %s" % args.do_bias_shift)
    logging.info("Perturb on gravity: %s" % args.perturb_gravity)
    logging.info("Sample frequency: %s" % args.sample_freq)

    train_loader, val_loader = None, None
    start_t = time.time()
    train_list = get_datalist(args.train_list)
    try:
        train_dataset = FbSequenceDataset(
            args.root_dir, train_list, args, data_window_config, mode="train"
        )
        train_loader = DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=True
        )
    except OSError as e:
        logging.error(e)
        return
    end_t = time.time()
    logging.info(f"Training set loaded. Loading time: {end_t - start_t:.3f}s")
    logging.info(f"Number of train samples: {len(train_dataset)}")

    if args.val_list is not None:
        val_list = get_datalist(args.val_list)
        try:
            val_dataset = FbSequenceDataset(
                args.root_dir, val_list, args, data_window_config, mode="val"
            )
            val_loader = DataLoader(val_dataset, batch_size=512, shuffle=True)
        except OSError as e:
            logging.error(e)
            return
        logging.info("Validation set loaded.")
        logging.info(f"Number of val samples: {len(val_dataset)}")

    device = torch.device(
        "cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu"
    )
    network = get_model(args.arch, net_config, args.input_dim, args.output_dim).to(
        device
    )
    total_params = network.get_num_params()
    logging.info(f'Network "{args.arch}" loaded to device {device}')
    logging.info(f"Total number of parameters: {total_params}")

    optimizer = torch.optim.Adam(network.parameters(), args.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, factor=0.1, patience=10, verbose=True, eps=1e-12
    )
    logging.info(f"Optimizer: {optimizer}, Scheduler: {scheduler}")

    start_epoch = 0
    if args.continue_from is not None:
        checkpoints = torch.load(args.continue_from)
        start_epoch = checkpoints.get("epoch", 0)
        network.load_state_dict(checkpoints.get("model_state_dict"))
        optimizer.load_state_dict(checkpoints.get("optimizer_state_dict"))
        logging.info(f"Continue from epoch {start_epoch}")
    else:
        # default starting from latest checkpoint from interruption
        latest_pt = os.path.join(args.out_dir, "checkpoints", "checkpoint_latest.pt")
        if os.path.isfile(latest_pt):
            checkpoints = torch.load(latest_pt)
            start_epoch = checkpoints.get("epoch", 0)
            network.load_state_dict(checkpoints.get("model_state_dict"))
            optimizer.load_state_dict(checkpoints.get("optimizer_state_dict"))
            logging.info(
                f"Detected saved checkpoint, starting from epoch {start_epoch}"
            )

    summary_writer = SummaryWriter(osp.join(args.out_dir, "logs"))
    summary_writer.add_text("info", f"total_param: {total_params}")

    logging.info(f"-------------- Init, Epoch {start_epoch} --------------")
    attr_dict = get_inference(network, train_loader, device, start_epoch)
    write_summary(summary_writer, attr_dict, start_epoch, optimizer, "train")
    if val_loader is not None:
        attr_dict = get_inference(network, val_loader, device, start_epoch)
        write_summary(summary_writer, attr_dict, start_epoch, optimizer, "val")

    def stop_signal_handler(args, epoch, network, optimizer, signal, frame):
        logging.info("-" * 30)
        logging.info("Early terminate")
        save_model(args, epoch, network, optimizer, interrupt=True)
        sys.exit()

    best_val_loss = np.inf
    for epoch in range(start_epoch + 1, args.epochs):
        signal.signal(
            signal.SIGINT, partial(stop_signal_handler, args, epoch, network, optimizer)
        )
        signal.signal(
            signal.SIGTERM,
            partial(stop_signal_handler, args, epoch, network, optimizer),
        )

        logging.info(f"-------------- Training, Epoch {epoch} ---------------")
        start_t = time.time()
        train_attr_dict = do_train(network, train_loader, device, epoch, optimizer)
        write_summary(summary_writer, train_attr_dict, epoch, optimizer, "train")
        end_t = time.time()
        logging.info(f"time usage: {end_t - start_t:.3f}s")

        if val_loader is not None:
            val_attr_dict = get_inference(network, val_loader, device, epoch)
            write_summary(summary_writer, val_attr_dict, epoch, optimizer, "val")
            if np.mean(val_attr_dict["losses"]) < best_val_loss:
                best_val_loss = np.mean(val_attr_dict["losses"])
                save_model(args, epoch, network, optimizer)
        else:
            save_model(args, epoch, network, optimizer)

    logging.info("Training complete.")

    return
コード例 #27
0
    parser_bias.set_defaults(func=plot_position)
    parser_bias = subparsers.add_parser("plot_autocorellation")
    parser_bias.set_defaults(func=plot_position)

    args = parser.parse_args()
    print(vars(args))

    with open(args.data_list) as f:
        data_names = [
            s.strip().split("," or " ")[0]
            for s in f.readlines()
            if len(s) > 0 and s[0] != "#"
        ]
    if args.dataset_number is not None:
        dataset = data_names[args.dataset_number]
        logging.info(f"Plotting dataset {dataset}")
        log_folders = glob.glob(args.root_dir_regexp)
        logging.info(f"{log_folders}")
        for f in log_folders:
            if f == "./output_b1":
                continue
            logging.info(f"Processing run {f}")
            n = f
            try:
                args.func(args, f, n, dataset)
            except Exception as e:
                print(e)
                logging.warning(f"Something went wrong with {dataset}")
                continue
        plt.show()
    else:
コード例 #28
0
def plot_position(args, log_folder, n, dataset):
    results_folder = log_folder + "/" + dataset
    filename = os.path.join(results_folder, args.log_filename + ".npy")
    try:
        logging.info(f"Loading {filename}")
        states = np.load(filename)
    except:
        logging.error(
            f"{filename}.npy was not found. Surely means filter did not finish"
        )
        raise FileNotFoundError

    # load all states
    R_init = states[0, :9].reshape(-1, 3, 3)
    Rs = states[:, :9].reshape(-1, 3, 3)
    rs = Rotation.from_matrix(Rs)
    ps = states[:, 12:15]
    ts = states[:, 27]
    sigma_p = np.sqrt(states[:, 34:37])

    if os.path.exists(os.path.join(results_folder, "vio_states.npy")):
        vio_states = np.load(os.path.join(results_folder, "vio_states.npy"))
        ref_p = vio_states[:, 3:6]
        ref_accelScaleInv_flat = vio_states[:, 24:33]
        ref_gyroScaleInv_flat = vio_states[:, 33:42]
    else:
        logging.error(
            "vio_states.npy was not found. you shoud create it with plot_state.py before... sorry :("
        )
        raise FileNotFoundError

    fig14 = plt.figure("position-2d " + dataset)
    plt.plot(ps[:, 0], ps[:, 1], label=n)
    plt.plot(
        ref_p[:, 0], ref_p[:, 1], label="vio", color=plot_state.color_vio, linestyle=":"
    )
    plt.legend(loc="upper center")
    plt.xlabel("x")
    plt.ylabel("y")
    plt.grid(True)
    plt.title("position-2d")
    plt.gca().set_aspect("equal")

    fig1 = plot_state.plot_state_euclidean(
        "position " + dataset,
        n,
        ["p_x", "p_y", "p_z"],
        ts,
        ps,
        linestyle=":",
        color="black",
    )
    fig9 = plot_state.plot_error_euclidean(
        "position error " + dataset,
        n,
        ["p_x", "p_y", "p_z"],
        ts,
        ps - ref_p,
        sigma=sigma_p,
        linestyle=":",
        color="black",
    )
コード例 #29
0
    io_groups.add_argument(
        "--ronin_dir",
        type=str,
        help="folder names should match those in filter_dir")
    io_groups.add_argument(
        "--window_time",
        type=float,
        default=1.0,
        help="window time when no ronin dir exists",
    )
    io_groups.add_argument("--no_make_plots", action="store_true")

    args = parser.parse_args()

    all_outputs_filter = list(Path.cwd().glob(args.filter_dir))
    logging.info(f"Found {len(all_outputs_filter)} runs")

    for m in all_outputs_filter:
        base_folder = Path(m)
        print(base_folder)
        logging.info(base_folder)
        name_run = "/tlio_output/" + base_folder.name
        # read parameters
        with open(base_folder.joinpath("./tlio_output/parameters.json"),
                  "r") as f:
            conf_filter = json.load(f)
        pprint(conf_filter)
        if "window_time" in conf_filter.keys():
            disp_time = float(conf_filter["window_time"])
        else:
            disp_time = args.window_time  # This is default
コード例 #30
0
    io_groups.add_argument("--model_path",
                           required=True,
                           type=str,
                           default=None)
    io_groups.add_argument("--model_param_path",
                           required=True,
                           type=str,
                           default=None)
    io_groups.add_argument("--out_dir", type=str, default="./")

    parser.add_argument("--cpu", type=bool, default=True)
    args = parser.parse_args()

    # overwrite network model parameter from json file if specified
    if args.model_param_path is not None:
        with open(args.model_param_path) as json_file:
            data_json = json.load(json_file)
            args.imu_freq_net = data_json["imu_freq"]
            args.past_time = data_json["past_time"]
            args.window_time = data_json["window_time"]
            args.arch = data_json["arch"]

    np.set_printoptions(linewidth=2000)

    logging.info("Program options:")
    logging.info(pprint(vars(args)))
    # run filter
    if not os.path.exists(args.out_dir):
        os.mkdir(args.out_dir)
    load_and_convert(args)