Пример #1
0
def plot_autocorellation(args, log_folder, n, dataset):
    results_folder = log_folder + "/" + dataset
    filename = os.path.join(results_folder, args.log_filename + ".npy")
    try:
        logging.info(f"Loading {filename}")
        states = np.load(filename)
    except:
        logging.error(
            f"{filename}.npy was not found. Surely means filter did not finish"
        )
        raise FileNotFoundError

    # load all states
    meas = states[:, 46:49]
    if os.path.exists(os.path.join(results_folder, "vio_states.npy")):
        vio_states = np.load(os.path.join(results_folder, "vio_states.npy"))
        ref_disp = vio_states[:, 15:18]
    else:
        logging.error(
            "vio_states.npy was not found. you shoud create it with plot_state.py before... sorry :("
        )
        raise FileNotFoundError

    fig = plt.figure("autocorellation " + dataset)
    meas_err = meas - ref_disp
    meas_err_update = meas_err[~np.isnan(meas_err).any(axis=1)]
    logging.warning("We assume update frequency at 20hz for autocorrelation")
    for i in range(3):
        plt.subplot(3, 1, i + 1)
        plt.acorr(meas_err_update[:, i], maxlags=100, lw=2, usevlines=False, label=n)
        locs, labels = plt.xticks()  # Get locations and labels
        for (l, t) in zip(locs, labels):
            t.set_text(str(l / 20.0) + "s")
        plt.xticks(locs, labels)  # Set locations and labels
        plt.xlim(left=0)
        plt.grid()
    plt.legend()
Пример #2
0
            "python3",
            "main_net.py",
            "--mode",
            "test",
            "--test_list",
            f"{args.data_list}",
            "--root_dir",
            f"{args.root_dir}",
            "--model_path",
            f"{m}",
            "--out_dir",
            f"./{args.out_dir}/{name_run}/",
            "--imu_freq",
            f'{conf["imu_freq"]}',
            "--past_time",
            f'{conf["past_time"]}',
            "--window_time",
            f'{conf["window_time"]}',
            "--future_time",
            f'{conf["future_time"]}',
            "--sample_freq",
            f"{sample_freq}",
            f"{save_plot_arg(args.save_plot)}",
        ]
        logging.info(" ".join(command))
        try:
            sp.run(command)
        except Exception as e:
            logging.error(e)
            continue
Пример #3
0
def net_train(args):
    """
    Main function for network training
    """

    try:
        if args.root_dir is None:
            raise ValueError("root_dir must be specified.")
        if args.train_list is None:
            raise ValueError("train_list must be specified.")
        if args.out_dir is not None:
            if not osp.isdir(args.out_dir):
                os.makedirs(args.out_dir)
            if not osp.isdir(osp.join(args.out_dir, "checkpoints")):
                os.makedirs(osp.join(args.out_dir, "checkpoints"))
            if not osp.isdir(osp.join(args.out_dir, "logs")):
                os.makedirs(osp.join(args.out_dir, "logs"))
            with open(
                os.path.join(args.out_dir, "parameters.json"), "w"
            ) as parameters_file:
                parameters_file.write(json.dumps(vars(args), sort_keys=True, indent=4))
            logging.info(f"Training output writes to {args.out_dir}")
        else:
            raise ValueError("out_dir must be specified.")
        if args.val_list is None:
            logging.warning("val_list is not specified.")
        if args.continue_from is not None:
            if osp.exists(args.continue_from):
                logging.info(
                    f"Continue training from existing model {args.continue_from}"
                )
            else:
                raise ValueError(
                    f"continue_from model file path {args.continue_from} does not exist"
                )
        data_window_config, net_config = arg_conversion(args)
    except ValueError as e:
        logging.error(e)
        return

    # Display
    np.set_printoptions(formatter={"all": "{:.6f}".format})
    logging.info(f"Training/testing with {args.imu_freq} Hz IMU data")
    logging.info(
        "Size: "
        + str(data_window_config["past_data_size"])
        + "+"
        + str(data_window_config["window_size"])
        + "+"
        + str(data_window_config["future_data_size"])
        + ", "
        + "Time: "
        + str(args.past_time)
        + "+"
        + str(args.window_time)
        + "+"
        + str(args.future_time)
    )
    logging.info("Perturb on bias: %s" % args.do_bias_shift)
    logging.info("Perturb on gravity: %s" % args.perturb_gravity)
    logging.info("Sample frequency: %s" % args.sample_freq)

    train_loader, val_loader = None, None
    start_t = time.time()
    train_list = get_datalist(args.train_list)
    try:
        train_dataset = FbSequenceDataset(
            args.root_dir, train_list, args, data_window_config, mode="train"
        )
        train_loader = DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=True
        )
    except OSError as e:
        logging.error(e)
        return
    end_t = time.time()
    logging.info(f"Training set loaded. Loading time: {end_t - start_t:.3f}s")
    logging.info(f"Number of train samples: {len(train_dataset)}")

    if args.val_list is not None:
        val_list = get_datalist(args.val_list)
        try:
            val_dataset = FbSequenceDataset(
                args.root_dir, val_list, args, data_window_config, mode="val"
            )
            val_loader = DataLoader(val_dataset, batch_size=512, shuffle=True)
        except OSError as e:
            logging.error(e)
            return
        logging.info("Validation set loaded.")
        logging.info(f"Number of val samples: {len(val_dataset)}")

    device = torch.device(
        "cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu"
    )
    network = get_model(args.arch, net_config, args.input_dim, args.output_dim).to(
        device
    )
    total_params = network.get_num_params()
    logging.info(f'Network "{args.arch}" loaded to device {device}')
    logging.info(f"Total number of parameters: {total_params}")

    optimizer = torch.optim.Adam(network.parameters(), args.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, factor=0.1, patience=10, verbose=True, eps=1e-12
    )
    logging.info(f"Optimizer: {optimizer}, Scheduler: {scheduler}")

    start_epoch = 0
    if args.continue_from is not None:
        checkpoints = torch.load(args.continue_from)
        start_epoch = checkpoints.get("epoch", 0)
        network.load_state_dict(checkpoints.get("model_state_dict"))
        optimizer.load_state_dict(checkpoints.get("optimizer_state_dict"))
        logging.info(f"Continue from epoch {start_epoch}")
    else:
        # default starting from latest checkpoint from interruption
        latest_pt = os.path.join(args.out_dir, "checkpoints", "checkpoint_latest.pt")
        if os.path.isfile(latest_pt):
            checkpoints = torch.load(latest_pt)
            start_epoch = checkpoints.get("epoch", 0)
            network.load_state_dict(checkpoints.get("model_state_dict"))
            optimizer.load_state_dict(checkpoints.get("optimizer_state_dict"))
            logging.info(
                f"Detected saved checkpoint, starting from epoch {start_epoch}"
            )

    summary_writer = SummaryWriter(osp.join(args.out_dir, "logs"))
    summary_writer.add_text("info", f"total_param: {total_params}")

    logging.info(f"-------------- Init, Epoch {start_epoch} --------------")
    attr_dict = get_inference(network, train_loader, device, start_epoch)
    write_summary(summary_writer, attr_dict, start_epoch, optimizer, "train")
    if val_loader is not None:
        attr_dict = get_inference(network, val_loader, device, start_epoch)
        write_summary(summary_writer, attr_dict, start_epoch, optimizer, "val")

    def stop_signal_handler(args, epoch, network, optimizer, signal, frame):
        logging.info("-" * 30)
        logging.info("Early terminate")
        save_model(args, epoch, network, optimizer, interrupt=True)
        sys.exit()

    best_val_loss = np.inf
    for epoch in range(start_epoch + 1, args.epochs):
        signal.signal(
            signal.SIGINT, partial(stop_signal_handler, args, epoch, network, optimizer)
        )
        signal.signal(
            signal.SIGTERM,
            partial(stop_signal_handler, args, epoch, network, optimizer),
        )

        logging.info(f"-------------- Training, Epoch {epoch} ---------------")
        start_t = time.time()
        train_attr_dict = do_train(network, train_loader, device, epoch, optimizer)
        write_summary(summary_writer, train_attr_dict, epoch, optimizer, "train")
        end_t = time.time()
        logging.info(f"time usage: {end_t - start_t:.3f}s")

        if val_loader is not None:
            val_attr_dict = get_inference(network, val_loader, device, epoch)
            write_summary(summary_writer, val_attr_dict, epoch, optimizer, "val")
            if np.mean(val_attr_dict["losses"]) < best_val_loss:
                best_val_loss = np.mean(val_attr_dict["losses"])
                save_model(args, epoch, network, optimizer)
        else:
            save_model(args, epoch, network, optimizer)

    logging.info("Training complete.")

    return
Пример #4
0
def plot_position(args, log_folder, n, dataset):
    results_folder = log_folder + "/" + dataset
    filename = os.path.join(results_folder, args.log_filename + ".npy")
    try:
        logging.info(f"Loading {filename}")
        states = np.load(filename)
    except:
        logging.error(
            f"{filename}.npy was not found. Surely means filter did not finish"
        )
        raise FileNotFoundError

    # load all states
    R_init = states[0, :9].reshape(-1, 3, 3)
    Rs = states[:, :9].reshape(-1, 3, 3)
    rs = Rotation.from_matrix(Rs)
    ps = states[:, 12:15]
    ts = states[:, 27]
    sigma_p = np.sqrt(states[:, 34:37])

    if os.path.exists(os.path.join(results_folder, "vio_states.npy")):
        vio_states = np.load(os.path.join(results_folder, "vio_states.npy"))
        ref_p = vio_states[:, 3:6]
        ref_accelScaleInv_flat = vio_states[:, 24:33]
        ref_gyroScaleInv_flat = vio_states[:, 33:42]
    else:
        logging.error(
            "vio_states.npy was not found. you shoud create it with plot_state.py before... sorry :("
        )
        raise FileNotFoundError

    fig14 = plt.figure("position-2d " + dataset)
    plt.plot(ps[:, 0], ps[:, 1], label=n)
    plt.plot(
        ref_p[:, 0], ref_p[:, 1], label="vio", color=plot_state.color_vio, linestyle=":"
    )
    plt.legend(loc="upper center")
    plt.xlabel("x")
    plt.ylabel("y")
    plt.grid(True)
    plt.title("position-2d")
    plt.gca().set_aspect("equal")

    fig1 = plot_state.plot_state_euclidean(
        "position " + dataset,
        n,
        ["p_x", "p_y", "p_z"],
        ts,
        ps,
        linestyle=":",
        color="black",
    )
    fig9 = plot_state.plot_error_euclidean(
        "position error " + dataset,
        n,
        ["p_x", "p_y", "p_z"],
        ts,
        ps - ref_p,
        sigma=sigma_p,
        linestyle=":",
        color="black",
    )
Пример #5
0
def net_test(args):
    """
    Main function for network testing
    Generate trajectories, plots, and metrics.json file
    """

    try:
        if args.root_dir is None:
            raise ValueError("root_dir must be specified.")
        if args.test_list is None:
            raise ValueError("test_list must be specified.")
        if args.out_dir is not None:
            if not osp.isdir(args.out_dir):
                os.makedirs(args.out_dir)
            logging.info(f"Testing output writes to {args.out_dir}")
        else:
            raise ValueError("out_dir must be specified.")
        data_window_config, net_config = arg_conversion(args)
    except ValueError as e:
        logging.error(e)
        return

    test_list = get_datalist(args.test_list)

    device = torch.device(
        "cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu")
    checkpoint = torch.load(args.model_path, map_location=device)
    network = get_model(args.arch, net_config, args.input_dim,
                        args.output_dim).to(device)
    network.load_state_dict(checkpoint["model_state_dict"])
    network.eval()
    logging.info(f"Model {args.model_path} loaded to device {device}.")

    # initialize containers
    all_metrics = {}

    for data in test_list:
        logging.info(f"Processing {data}...")
        try:
            seq_dataset = FbSequenceDataset(args.root_dir, [data],
                                            args,
                                            data_window_config,
                                            mode="test")
            seq_loader = DataLoader(seq_dataset,
                                    batch_size=1024,
                                    shuffle=False)
        except OSError as e:
            print(e)
            continue

        # Obtain trajectory
        net_attr_dict = get_inference(network, seq_loader, device, epoch=50)
        traj_attr_dict = pose_integrate(args, seq_dataset,
                                        net_attr_dict["preds"])
        outdir = osp.join(args.out_dir, data)
        if osp.exists(outdir) is False:
            os.mkdir(outdir)
        outfile = osp.join(outdir, "trajectory.txt")
        trajectory_data = np.concatenate(
            [
                traj_attr_dict["ts"].reshape(-1, 1),
                traj_attr_dict["pos_pred"],
                traj_attr_dict["pos_gt"],
            ],
            axis=1,
        )
        np.savetxt(outfile, trajectory_data, delimiter=",")

        # obtain metrics
        metrics, plot_dict = compute_metrics_and_plotting(
            args, net_attr_dict, traj_attr_dict)
        logging.info(metrics)
        all_metrics[data] = metrics

        outfile_net = osp.join(outdir, "net_outputs.txt")
        net_outputs_data = np.concatenate(
            [
                plot_dict["pred_ts"].reshape(-1, 1),
                plot_dict["preds"],
                plot_dict["targets"],
                plot_dict["pred_sigmas"],
            ],
            axis=1,
        )
        np.savetxt(outfile_net, net_outputs_data, delimiter=",")

        if args.save_plot:
            make_plots(args, plot_dict, outdir)

        try:
            with open(args.out_dir + "/metrics.json", "w") as f:
                json.dump(all_metrics, f, indent=1)
        except ValueError as e:
            raise e
        except OSError as e:
            print(e)
            continue
        except Exception as e:
            raise e

    return
Пример #6
0
def net_eval(args):
    """
    Main function for network evaluation
    Generate pickle file containing all network sample results
    """

    try:
        if args.root_dir is None:
            raise ValueError("root_dir must be specified.")
        if args.test_list is None:
            raise ValueError("test_list must be specified.")
        if args.out_dir is not None:
            if not osp.isdir(args.out_dir):
                os.makedirs(args.out_dir)
            logging.info(f"Testing output writes to {args.out_dir}")
        else:
            raise ValueError("out_dir must be specified.")
        data_window_config, net_config = arg_conversion(args)
    except ValueError as e:
        logging.error(e)
        return

    device = torch.device(
        "cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu")
    checkpoint = torch.load(args.model_path, map_location=device)
    network = get_model(args.arch, net_config, args.input_dim,
                        args.output_dim).to(device)
    network.load_state_dict(checkpoint["model_state_dict"])
    network.eval()
    logging.info(f"Model {args.model_path} loaded to device {device}.")

    all_targets, all_errors, all_sigmas = [], [], []
    all_norm_targets, all_angle_targets, all_norm_errors, all_angle_errors = (
        [],
        [],
        [],
        [],
    )
    mse_losses, likelihood_losses, avg_mse_losses, avg_likelihood_losses = (
        [],
        [],
        [],
        [],
    )
    all_mahalanobis = []

    test_list = get_datalist(args.test_list)
    blacklist = ["loop_hidacori058_20180519_1525"]
    # blacklist = []
    for data in test_list:
        if data in blacklist:
            logging.info(f"skipping blacklist {data}")
            continue
        logging.info(f"Processing {data}...")

        try:
            seq_dataset = FbSequenceDataset(args.root_dir, [data],
                                            args,
                                            data_window_config,
                                            mode="eval")
            seq_loader = DataLoader(seq_dataset,
                                    batch_size=1024,
                                    shuffle=False)
        except OSError as e:
            logging.error(e)
            continue

        attr_dict = get_inference(network, seq_loader, device, epoch=50)
        norm_targets = np.linalg.norm(attr_dict["targets"][:, :2], axis=1)
        angle_targets = np.arctan2(attr_dict["targets"][:, 0],
                                   attr_dict["targets"][:, 1])
        norm_preds = np.linalg.norm(attr_dict["preds"][:, :2], axis=1)
        angle_preds = np.arctan2(attr_dict["preds"][:, 0],
                                 attr_dict["preds"][:, 1])
        norm_errors = norm_preds - norm_targets
        angle_errors = angle_preds - angle_targets
        sigmas = np.exp(attr_dict["preds_cov"])
        errors = attr_dict["preds"] - attr_dict["targets"]
        a1 = np.expand_dims(errors, axis=1)
        a2 = np.expand_dims(np.multiply(np.reciprocal(sigmas), errors),
                            axis=-1)
        mahalanobis_dists = np.einsum("tip,tpi->t", a1, a2)

        all_targets.append(attr_dict["targets"])
        all_errors.append(errors)
        all_sigmas.append(sigmas)
        mse_losses.append(errors**2)
        avg_mse_losses.append(np.mean(errors**2, axis=1).reshape(-1, 1))
        likelihood_losses.append(attr_dict["losses"])
        avg_likelihood_losses.append(
            np.mean(attr_dict["losses"], axis=1).reshape(-1, 1))
        all_norm_targets.append(norm_targets.reshape(-1, 1))
        all_angle_targets.append(angle_targets.reshape(-1, 1))
        all_norm_errors.append(norm_errors.reshape(-1, 1))
        all_angle_errors.append(angle_errors.reshape(-1, 1))
        all_mahalanobis.append(mahalanobis_dists.reshape(-1, 1))

    arr_targets = np.concatenate(all_targets, axis=0)
    arr_errors = np.concatenate(all_errors, axis=0)
    arr_sigmas = np.concatenate(all_sigmas, axis=0)
    arr_mse_losses = np.concatenate(mse_losses, axis=0)
    arr_avg_mse_losses = np.concatenate(avg_mse_losses, axis=0)
    arr_likelihood_losses = np.concatenate(likelihood_losses, axis=0)
    arr_avg_likelihood_losses = np.concatenate(avg_likelihood_losses, axis=0)
    arr_norm_targets = np.concatenate(all_norm_targets, axis=0)
    arr_norm_errors = np.concatenate(all_norm_errors, axis=0)
    arr_angle_targets = np.concatenate(all_angle_targets, axis=0)
    arr_angle_errors = np.concatenate(all_angle_errors, axis=0)
    arr_mahalanobis = np.concatenate(all_mahalanobis, axis=0)

    arr_data = np.concatenate(
        (
            arr_targets,
            arr_errors,
            arr_sigmas,
            arr_mse_losses,
            arr_avg_mse_losses,
            arr_likelihood_losses,
            arr_avg_likelihood_losses,
            arr_norm_targets,
            arr_norm_errors,
            arr_angle_targets,
            arr_angle_errors,
            arr_mahalanobis,
        ),
        axis=1,
    )

    dataset = pd.DataFrame(
        arr_data,
        index=range(arr_mahalanobis.shape[0]),
        columns=[
            "targets_x",
            "targets_y",
            "targets_z",
            "errors_x",
            "errors_y",
            "errors_z",
            "sigmas_x",
            "sigmas_y",
            "sigmas_z",
            "mse_losses_x",
            "mse_losses_y",
            "mse_losses_z",
            "avg_mse_losses",
            "likelihood_losses_x",
            "likelihood_losses_y",
            "likelihood_losses_z",
            "avg_likelihood_losses",
            "norm_targets",
            "norm_errors",
            "angle_targets",
            "angle_errors",
            "mahalanobis",
        ],
    )

    dstr = "d"
    if args.do_bias_shift:
        dstr = f"{dstr}-bias-{args.accel_bias_range}-{args.gyro_bias_range}"
    if args.perturb_gravity:
        dstr = f"{dstr}-grav-{args.perturb_gravity_theta_range}"
    dstr = f"{dstr}.pkl"
    if args.out_name is not None:
        dstr = args.out_name
    outfile = os.path.join(args.out_dir, dstr)
    dataset.to_pickle(outfile)
    logging.info(f"Data saved to {outfile}")

    return