Exemplo n.º 1
0
def test_forecasting_metrics(
    forecasted_trajectories,
    ground_truth_trajectories,
    max_n_guesses,
    forecasted_probabilities,
    expected_metrics,
) -> None:
    metrics = compute_forecasting_metrics(
        forecasted_trajectories,
        ground_truth_trajectories,
        city_names,
        max_n_guesses,
        horizon,
        miss_threshold,
        forecasted_probabilities,
    )

    assert_metrics_almost_equal(expected_metrics, metrics)
Exemplo n.º 2
0
def main():
    # Import all settings for experiment.
    args = parser.parse_args()
    model = import_module(args.model)
    config, _, collate_fn, net, loss, post_process, opt = model.get_model()

    # load pretrain model
    ckpt_path = args.weight
    if not os.path.isabs(ckpt_path):
        ckpt_path = os.path.join(config["save_dir"], ckpt_path)
    ckpt = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
    load_pretrain(net, ckpt["state_dict"])
    net.eval()

    # Data loader for evaluation
    dataset = ArgoTestDataset(args.split, config, train=False)
    data_loader = DataLoader(
        dataset,
        batch_size=config["val_batch_size"],
        num_workers=config["val_workers"],
        collate_fn=collate_fn,
        shuffle=True,
        pin_memory=True,
    )

    # begin inference
    preds = {}
    gts = {}
    cities = {}
    for ii, data in tqdm(enumerate(data_loader)):
        data = dict(data)
        with torch.no_grad():
            output = net(data)
            results = [x[0:1].detach().cpu().numpy() for x in output["reg"]]
        for i, (argo_idx, pred_traj) in enumerate(zip(data["argo_id"],
                                                      results)):
            preds[argo_idx] = pred_traj.squeeze()
            cities[argo_idx] = data["city"][i]
            gts[argo_idx] = data["gt_preds"][i][
                0] if "gt_preds" in data else None

    # save for further visualization
    res = dict(
        preds=preds,
        gts=gts,
        cities=cities,
    )
    # torch.save(res,f"{config['save_dir']}/results.pkl")

    # evaluate or submit
    if args.split == "val":
        # for val set: compute metric
        from argoverse.evaluation.eval_forecasting import (
            compute_forecasting_metrics, )
        # Max #guesses (K): 6
        _ = compute_forecasting_metrics(preds, gts, cities, 6, 30, 20)
        # Max #guesses (K): 1
        _ = compute_forecasting_metrics(preds, gts, cities, 1, 30, 20)
    else:
        # for test set: save as h5 for submission in evaluation server
        from argoverse.evaluation.competition_util import generate_forecasting_h5
        generate_forecasting_h5(
            preds, f"{config['save_dir']}/submit.h5")  # this might take awhile
    import ipdb
    ipdb.set_trace()
            forecasted_trajectories = get_m_trajectories_along_n_cl(
                forecasted_trajectories)
            num_trajectories = args.n_cl * args.n_guesses_cl

        # Get displacement error and dac on pruned guesses
        elif args.prune_n_guesses:
            forecasted_trajectories = get_pruned_guesses(
                forecasted_trajectories, city_names, gt_trajectories)
            num_trajectories = args.prune_n_guesses

        # Normal case
        else:
            num_trajectories = args.max_n_guesses

        compute_forecasting_metrics(
            forecasted_trajectories,
            gt_trajectories,
            city_names,
            num_trajectories,
            args.horizon,
            args.miss_threshold,
        )

    if args.viz:
        id_for_viz = None
        if args.viz_seq_id:
            with open(args.viz_seq_id, "rb") as f:
                id_for_viz = pkl.load(f)
        viz_predictions_helper(forecasted_trajectories, gt_trajectories,
                               features_df, id_for_viz)
Exemplo n.º 4
0
def test_compute_forecasting_metric():
    """Test computation of motion forecasting metrics."""
    # Test Case:

    #   x: Ground Truth Trajectory
    #   *: Predicted Trajectory 1
    #   o: Predicted Trajectory 2

    #   90   91   92   93   94   95   96   97   98   99   100  101  102

    # 3980       |       x   *   o            |
    #            |       x   *   o            |
    #            |       x   *   o            |
    #            |       x   *   o            |\
    # 3975       |       x   *   o            | \
    #            |       x   *   o            |  \
    #            |       x   *   o            |   ---------------------------
    #            |       x   *   o            |
    # 3970       |       x   *   o            |
    #            |         x   *   o          |
    #            |           x   *   o        |
    #            |           x   *   o        |
    # 3965       |           x   *   o        |
    #            |           x   *      o     |
    #            |           x   *          o |  o   o   o   o   o   o   o   o
    #            |           x   *            |
    # 3960       |             x   *          |\
    #            |               x   *        | \
    #            |               x   *        |  \
    #            |               x   *        |   -----------------------------
    # 3955       |               x   *        |
    #            |               x   *        |
    #            |               x   *        |
    #            |               x   *        |
    # 3950       |               x   *        |

    ground_truth = np.array([
        [93, 3979],
        [93, 3978],
        [93, 3977],
        [93, 3976],
        [93, 3976],
        [93, 3975],
        [93, 3974],
        [93, 3973],
        [93, 3972],
        [93, 3971],
        [93, 3970],
        [94, 3969],
        [94, 3969],
        [94, 3968],
        [94, 3967],
        [94, 3966],
        [94, 3966],
        [94, 3965],
        [94, 3964],
        [94, 3963],
        [94, 3962],
        [95, 3961],
        [95, 3960],
        [95, 3959],
        [94, 3957],
        [94, 3957],
        [94, 3956],
        [94, 3955],
        [95, 3953],
        [95, 3952],
    ])

    forecasted_1 = np.array([
        [94, 3979],
        [94, 3978],
        [94, 3977],
        [94, 3976],
        [94, 3976],
        [94, 3975],
        [94, 3974],
        [94, 3973],
        [94, 3972],
        [94, 3971],
        [94, 3970],
        [95, 3969],
        [95, 3969],
        [95, 3968],
        [95, 3967],
        [95, 3966],
        [95, 3966],
        [95, 3965],
        [95, 3964],
        [95, 3963],
        [95, 3962],
        [96, 3961],
        [96, 3960],
        [96, 3959],
        [95, 3957],
        [95, 3957],
        [95, 3956],
        [95, 3955],
        [96, 3953],
        [96, 3952],
    ])

    forecasted_2 = np.array([
        [95, 3979],
        [95, 3978],
        [95, 3977],
        [95, 3976],
        [95, 3976],
        [95, 3975],
        [95, 3974],
        [95, 3973],
        [95, 3972],
        [95, 3971],
        [95, 3970],
        [96, 3969],
        [96, 3969],
        [96, 3968],
        [96, 3967],
        [96, 3966],
        [96, 3966],
        [96, 3965],
        [96, 3964],
        [96, 3963],
        [96, 3962],
        [97, 3961],
        [97, 3960],
        [98, 3960],
        [98, 3960],
        [99, 3960],
        [100, 3960],
        [101, 3960],
        [102, 3960],
        [103, 3960],
    ])

    city_names = {1: "MIA"}
    max_n_guesses = 2
    horizon = 30
    miss_threshold = 1.0

    # Case 1
    forecasted_trajectories = {1: [forecasted_1, forecasted_2]}
    forecasted_probabilities = {1: [0.80, 0.20]}
    ground_truth_trajectories = {1: ground_truth}

    metrics = compute_forecasting_metrics(
        forecasted_trajectories,
        ground_truth_trajectories,
        city_names,
        max_n_guesses,
        horizon,
        miss_threshold,
        forecasted_probabilities,
    )

    expected_min_ade = 1.0
    expected_min_fde = 1.0
    expected_dac = 1.0
    expected_miss_rate = 0.0
    expected_p_min_ade = 1.22
    expected_p_min_fde = 1.22
    expected_p_miss_rate = 0.20
    assert_almost_equal(expected_min_ade, round(metrics["minADE"], 2), 2)
    assert_almost_equal(expected_min_fde, round(metrics["minFDE"], 2), 2)
    assert_almost_equal(expected_dac, round(metrics["DAC"], 2), 2)
    assert_almost_equal(expected_miss_rate, round(metrics["MR"], 2), 2)
    assert_almost_equal(expected_p_min_ade, round(metrics["p-minADE"], 2), 2)
    assert_almost_equal(expected_p_min_fde, round(metrics["p-minFDE"], 2), 2)
    assert_almost_equal(expected_p_miss_rate, round(metrics["p-MR"], 2), 2)

    # Case 2
    forecasted_trajectories = {1: [forecasted_2]}
    forecasted_probabilities = {1: [1.0]}
    ground_truth_trajectories = {1: ground_truth}

    metrics = compute_forecasting_metrics(
        forecasted_trajectories,
        ground_truth_trajectories,
        city_names,
        max_n_guesses,
        horizon,
        miss_threshold,
        forecasted_probabilities,
    )

    expected_min_ade = 3.23
    expected_min_fde = 11.31
    expected_dac = 1.0
    expected_miss_rate = 1.0
    expected_p_min_ade = 3.23
    expected_p_min_fde = 11.31
    expected_p_miss_rate = 1.0

    assert_almost_equal(expected_min_ade, round(metrics["minADE"], 2), 2)
    assert_almost_equal(expected_min_fde, round(metrics["minFDE"], 2), 2)
    assert_almost_equal(expected_dac, round(metrics["DAC"], 2), 2)
    assert_almost_equal(expected_miss_rate, round(metrics["MR"], 2), 2)
    assert_almost_equal(expected_p_min_ade, round(metrics["p-minADE"], 2), 2)
    assert_almost_equal(expected_p_min_fde, round(metrics["p-minFDE"], 2), 2)
    assert_almost_equal(expected_p_miss_rate, round(metrics["p-MR"], 2), 2)