示例#1
0
    def validation_epoch_end(self, outputs):
        timestamps = np.concatenate(self.timestamps_list)
        track_ids = np.concatenate(self.track_id_list)
        coords = np.concatenate(self.pred_coords_list)
        confs = np.concatenate(self.confidences_list)

        self.timestamps_list = []
        self.track_id_list = []
        self.pred_coords_list = []
        self.confidences_list = []

        csv_path = "val.csv"
        write_pred_csv(csv_path,
                       timestamps=timestamps,
                       track_ids=track_ids,
                       coords=coords,
                       confs=confs)

        try:
            metrics = compute_metrics_csv(eval_gt_path, csv_path,
                                          [neg_multi_log_likelihood])
            target_metric = 0
            for metric_name, metric_mean in metrics.items():
                target_metric = metric_mean
                break
        except:
            target_metric = 1000

        print('got target metric', target_metric)
        tensorboard_logs = {'val/_metric': target_metric}

        return {'avg_val_loss': target_metric, 'log': tensorboard_logs}
def predict_and_save(predictor,
                     test_loader,
                     convert_world_from_agent,
                     out_dir,
                     model_mode,
                     feat_mode: str = "none"):
    # --- Inference ---
    timestamps, track_ids, coords, confs = run_prediction(
        predictor, test_loader, convert_world_from_agent, feat_mode)

    num_modes = confs.shape[-1]
    prediction_out_dir = out_dir / f"prediction_{model_mode}{debug_str}"
    os.makedirs(str(prediction_out_dir), exist_ok=True)

    if num_modes == 3:
        csv_path = prediction_out_dir / "submission.csv"
        write_pred_csv(csv_path,
                       timestamps=timestamps,
                       track_ids=track_ids,
                       coords=coords,
                       confs=confs)
        print(f"Saved to {csv_path}")

    # --- Save to npz format, for future analysis purpose ---
    npz_path = prediction_out_dir / "submission.npz"
    np.savez_compressed(npz_path,
                        timestamps=timestamps,
                        track_ids=track_ids,
                        coords=coords,
                        confs=confs)
    print(f"Saved to {npz_path}")
示例#3
0
    def evaluate(self, data_path, file_name="submission.csv"):

        # set env variable for data
        os.environ["L5KIT_DATA_FOLDER"] = data_path
        dm = LocalDataManager(None)

        cfg = self.cfg

        # ===== INIT DATASET
        test_cfg = cfg["test_data_loader"]

        # Rasterizer
        rasterizer = build_rasterizer(cfg, dm)

        # Test dataset/dataloader
        test_zarr = ChunkedDataset(dm.require(test_cfg["key"])).open()
        test_mask = np.load(f"{data_path}/scenes/mask.npz")["arr_0"]
        test_dataset = AgentDataset(cfg,
                                    test_zarr,
                                    rasterizer,
                                    agents_mask=test_mask)
        test_dataloader = DataLoader(test_dataset,
                                     shuffle=test_cfg["shuffle"],
                                     batch_size=test_cfg["batch_size"],
                                     num_workers=test_cfg["num_workers"])
        test_dataloader = test_dataloader
        print(test_dataloader)

        # ==== EVAL LOOP
        self.model.eval()
        torch.set_grad_enabled(False)
        criterion = nn.MSELoss(reduction="none")

        # store information for evaluation
        future_coords_offsets_pd = []
        timestamps = []
        pred_coords = []
        confidences_list = []

        agent_ids = []
        progress_bar = tqdm(test_dataloader)
        for data in progress_bar:
            _, pred, confidences = self.forward(data, criterion)

            # future_coords_offsets_pd.append(outputs.cpu().numpy().copy())
            timestamps.append(data["timestamp"].numpy().copy())
            agent_ids.append(data["track_id"].numpy().copy())
            #
            # pred, confidences = predictor(image)

            pred_coords.append(pred.cpu().numpy().copy())
            confidences_list.append(confidences.cpu().numpy().copy())

        # ==== Save Results
        pred_path = f"{os.getcwd()}/{file_name}"
        write_pred_csv(pred_path,
                       timestamps=np.concatenate(timestamps),
                       track_ids=np.concatenate(agent_ids),
                       coords=np.concatenate(pred_coords),
                       confs=np.concatenate(confidences_list))
示例#4
0
def validation(model, device):

    model.eval()
    torch.set_grad_enabled(False)

    # store information for evaluation
    future_coords_offsets_pd = []
    timestamps = []
    agent_ids = []
    confidences_list = []

    val_dataloader = load_val_data()
    # num_iter = iter(val_dataloader)
    # progress_bar = tqdm(range(cfg["train_params"]["val_num_steps"]))
    progress_bar = tqdm(val_dataloader)

    for data in progress_bar:
        # data = next(num_iter)
        preds, confs = forward(data, model, device)

        # convert agent coordinates into world offsets
        preds = preds.cpu().numpy()

        confs = confs.cpu().numpy()
        world_from_agents = data["world_from_agent"].numpy()
        centroids = data["centroid"].numpy()
        coords_offset = []

        for pred, world_from_agent, centroid in zip(preds, world_from_agents,
                                                    centroids):
            for mode in range(3):
                pred[mode] = transform_points(pred[mode],
                                              world_from_agent) - centroid[:2]
            coords_offset.append(pred)

        confidences_list.append(confs)
        future_coords_offsets_pd.append(np.stack(coords_offset))
        timestamps.append(data["timestamp"].numpy().copy())
        agent_ids.append(data["track_id"].numpy().copy())

    pred_path = f"{gettempdir()}/pred.csv"
    write_pred_csv(pred_path,
                   timestamps=np.concatenate(timestamps),
                   track_ids=np.concatenate(agent_ids),
                   coords=np.concatenate(future_coords_offsets_pd),
                   confs=np.concatenate(confidences_list))

    eval_base_path = '/home/axot/lyft/data/scenes/validate_chopped_31'
    eval_gt_path = str(Path(eval_base_path) / "gt.csv")

    metrics = compute_metrics_csv(eval_gt_path, pred_path,
                                  [neg_multi_log_likelihood, time_displace])
    for metric_name, metric_mean in metrics.items():
        print(metric_name, metric_mean)
    return metrics['neg_multi_log_likelihood']
示例#5
0
 def test_epoch_end(self, outputs):
     write_pred_csv('submission.csv',
                    timestamps=np.concatenate(
                        [output["timestamps"] for output in outputs]),
                    track_ids=np.concatenate(
                        [output["agent_ids"] for output in outputs]),
                    coords=np.concatenate([
                        output["future_coords_offsets_pd"]
                        for output in outputs
                    ]))
     return {}
示例#6
0
def test_compute_mse_error(tmp_path: Path, zarr_dataset: ChunkedDataset, cfg: dict) -> None:
    render_context = RenderContext(
        np.asarray((10, 10)),
        np.asarray((0.25, 0.25)),
        np.asarray((0.5, 0.5)),
        set_origin_to_bottom=cfg["raster_params"]["set_origin_to_bottom"],
    )
    rast = StubRasterizer(render_context)
    dataset = AgentDataset(cfg, zarr_dataset, rast)

    gt_coords = []
    gt_avails = []
    timestamps = []
    track_ids = []

    for idx, el in enumerate(dataset):  # type: ignore
        gt_coords.append(el["target_positions"])
        gt_avails.append(el["target_availabilities"])
        timestamps.append(el["timestamp"])
        track_ids.append(el["track_id"])
        if idx == 100:
            break  # speed up test

    gt_coords = np.asarray(gt_coords)
    gt_avails = np.asarray(gt_avails)
    timestamps = np.asarray(timestamps)
    track_ids = np.asarray(track_ids)

    # test same values error
    write_gt_csv(str(tmp_path / "gt1.csv"), timestamps, track_ids, gt_coords, gt_avails)
    write_pred_csv(str(tmp_path / "pred1.csv"), timestamps, track_ids, gt_coords, confs=None)

    metrics = compute_metrics_csv(str(tmp_path / "gt1.csv"), str(tmp_path / "pred1.csv"), [neg_multi_log_likelihood])
    for metric_value in metrics.values():
        assert np.all(metric_value == 0.0)

    # test different values error
    pred_coords = gt_coords.copy()
    pred_coords += np.random.randn(*pred_coords.shape)
    write_pred_csv(str(tmp_path / "pred3.csv"), timestamps, track_ids, pred_coords, confs=None)

    metrics = compute_metrics_csv(str(tmp_path / "gt1.csv"), str(tmp_path / "pred3.csv"), [neg_multi_log_likelihood])
    for metric_value in metrics.values():
        assert np.any(metric_value > 0.0)

    # test invalid conf by removing lines in gt1
    with open(str(tmp_path / "pred4.csv"), "w") as fp:
        lines = open(str(tmp_path / "pred1.csv")).readlines()
        fp.writelines(lines[:-10])

    with pytest.raises(ValueError):
        compute_metrics_csv(str(tmp_path / "gt1.csv"), str(tmp_path / "pred4.csv"), [neg_multi_log_likelihood])
示例#7
0
def test_write_pred_csv(tmpdir: Path) -> None:
    dump_path = str(tmpdir / "pred_pred.csv")
    num_example, num_modes, future_len, num_coords = 100, 3, 12, 2

    timestamps = np.zeros(num_example)
    track_ids = np.zeros(num_example)

    # test some invalid shapes for coords and confidences
    with pytest.raises(AssertionError):
        coords = np.zeros((num_example, future_len,
                           num_coords))  # pred with 1 mode and confidence 1
        confs = np.ones((num_example, ))
        write_pred_csv(dump_path, timestamps, track_ids, coords, confs)
    with pytest.raises(AssertionError):
        coords = np.zeros((num_example, num_modes, future_len, num_coords))
        confs = np.ones((num_example, 1))  # no modes
        write_pred_csv(dump_path, timestamps, track_ids, coords, confs)

    # test a valid single-mode configuration
    coords = np.zeros((num_example, future_len, num_coords))
    confs = None
    dump_path = str(tmpdir / "pred_pred_uni.csv")
    write_pred_csv(dump_path, timestamps, track_ids, coords, confs)
    assert Path(dump_path).exists()

    # test a valid multi-mode configuration
    coords = np.zeros((num_example, num_modes, future_len, num_coords))
    confs = np.ones((num_example, num_modes))
    dump_path = str(tmpdir / "pred_pred_multi.csv")
    write_pred_csv(dump_path, timestamps, track_ids, coords, confs)
    assert Path(dump_path).exists()
示例#8
0
    def run(self):
        model = TopkNet(self.cfg).to(Global.DEVICE)
        Global.load_weight(model, self.W_PATH)
        model.eval()

        future_coords_offsets_pd = []
        timestamps = []
        agent_ids = []
        confs = []

        with torch.no_grad():
            dataiter = tqdm(self.test_dataloader)

            for data in dataiter:
                inputs = data["image"].to(Global.DEVICE)
                images = []
                samples_means, means, mixture_weights = model(inputs)

                fit_outputs = torch.stack([mean for mean in means], dim=1)

                fit_confidences = torch.stack(
                    [mixture_weight for mixture_weight in mixture_weights],
                    dim=1).squeeze()
                outputs = torch.zeros(fit_outputs.size(0), fit_outputs.size(1),
                                      fit_outputs.size(2), 2).to(Global.DEVICE)

                conf = []
                one_hot_el = torch.eye(3, 3)
                for i in range(fit_confidences.size(0)):
                    idx = torch.argmax(fit_confidences[i]).item()
                    conf.append(one_hot_el[idx])

                outputs[:, 0] = Utils.map_writer_from_image_to_world(
                    data, fit_outputs[:, 0, :, :], self.cfg)
                outputs[:, 1] = Utils.map_writer_from_image_to_world(
                    data, fit_outputs[:, 1, :, :], self.cfg)
                outputs[:, 2] = Utils.map_writer_from_image_to_world(
                    data, fit_outputs[:, 2, :, :], self.cfg)

                future_coords_offsets_pd.append(outputs.cpu().numpy().copy())
                timestamps.append(data["timestamp"].numpy().copy())
                agent_ids.append(data["track_id"].numpy().copy())
                confs.append(fit_confidences.cpu().numpy().copy())
                if (len(confs) == 10): break
        write_pred_csv(f'{Global.MULTI_MODE_SUBMISSION}',
                       timestamps=np.concatenate(timestamps),
                       track_ids=np.concatenate(agent_ids),
                       coords=np.concatenate(future_coords_offsets_pd),
                       confs=np.concatenate(confs))
示例#9
0
def generate_subs_from_val(val_path):

    if 'pkl' in os.path.splitext(val_path)[-1]:

        # val_path is a dictionary
        val_sub_path = val_path.replace('pkl', 'csv')

        if not os.path.exists(val_sub_path):
            # Create sub from dict
            val_dict = load_from_pickle(val_path)
            write_pred_csv(val_sub_path, val_dict['timestamps'], val_dict['track_ids'], val_dict['preds'], val_dict['conf'])
    else:

        val_sub_path = val_path

    return val_sub_path
示例#10
0
def test_e2e_multi_pred_csv(tmpdir: Path) -> None:
    dump_path = str(tmpdir / "pred_out.csv")
    num_example, num_modes, future_len, num_coords = 100, 3, 12, 2

    timestamps = np.random.randint(1000, 2000, num_example)
    track_ids = np.random.randint(0, 200, num_example)

    coords = np.random.randn(*(num_example, num_modes, future_len, num_coords))
    confs = np.random.rand(*(num_example, num_modes))
    write_pred_csv(dump_path, timestamps, track_ids, coords, confs)

    # read and check values
    for idx, el in enumerate(read_pred_csv(dump_path)):
        assert int(el["track_id"]) == track_ids[idx]
        assert int(el["timestamp"]) == timestamps[idx]
        assert np.allclose(el["coords"], coords[idx], atol=1e-4)
        assert np.allclose(el["conf"], confs[idx], atol=1e-4)
示例#11
0
def model_validation_score(model, pred_path):
    # ==== EVAL LOOP
    model.eval()
    torch.set_grad_enabled(False)

    # store information for evaluation
    future_coords_offsets_pd = []
    timestamps = []
    confidences_list = []
    agent_ids = []
    progress_bar = tqdm(eval_dataloader)

    for data in progress_bar:

        _, preds, confidences = forward(data, model, device)

        #fix for the new environment
        preds = preds.cpu().numpy()
        world_from_agents = data["world_from_agent"].numpy()
        centroids = data["centroid"].numpy()
        coords_offset = []

        # convert into world coordinates and compute offsets
        for idx in range(len(preds)):
            for mode in range(3):
                preds[idx, mode, :, :] = transform_points(
                    preds[idx, mode, :, :],
                    world_from_agents[idx]) - centroids[idx][:2]

        future_coords_offsets_pd.append(preds.copy())
        confidences_list.append(confidences.cpu().numpy().copy())
        timestamps.append(data["timestamp"].numpy().copy())
        agent_ids.append(data["track_id"].numpy().copy())

    write_pred_csv(
        pred_path,
        timestamps=np.concatenate(timestamps),
        track_ids=np.concatenate(agent_ids),
        coords=np.concatenate(future_coords_offsets_pd),
        confs=np.concatenate(confidences_list),
    )

    metrics = compute_metrics_csv(eval_gt_path, pred_path,
                                  [neg_multi_log_likelihood, time_displace])
    for metric_name, metric_mean in metrics.items():
        print(metric_name, metric_mean)
示例#12
0
def inference(model, exp_id):
    device = get_device()
    # predict
    model.eval()
    torch.set_grad_enabled(False)
    test_dataloader = load_test_data()
    # store information for evaluation
    future_coords_offsets_pd = []
    timestamps = []
    confidences_list = []
    agent_ids = []

    progress_bar = tqdm(test_dataloader)
    with torch.no_grad():
        for data in progress_bar:
            preds, confidences = forward(data, model, device)

            # fix for the new environment
            preds = preds.cpu().numpy()
            world_from_agents = data["world_from_agent"].numpy()
            centroids = data["centroid"].numpy()
            coords_offset = []

            # convert into world coordinates and compute offsets
            for idx in range(len(preds)):
                for mode in range(3):
                    preds[idx, mode, :, :] = transform_points(
                        preds[idx, mode, :, :],
                        world_from_agents[idx]) - centroids[idx][:2]

            future_coords_offsets_pd.append(preds.copy())
            confidences_list.append(confidences.cpu().numpy().copy())
            timestamps.append(data["timestamp"].numpy().copy())
            agent_ids.append(data["track_id"].numpy().copy())

    # create submission to submit to Kaggle
    pred_path = f'experiment/{exp_id}/submission.csv'
    write_pred_csv(pred_path,
                   timestamps=np.concatenate(timestamps),
                   track_ids=np.concatenate(agent_ids),
                   coords=np.concatenate(future_coords_offsets_pd),
                   confs=np.concatenate(confidences_list))
示例#13
0
def generate_ensemble_submission(submission_paths, weights):

    subs = generate_subs(submission_paths)

    assert check_validity(subs)

    predictions = {'preds': np.stack([np.concatenate([sub[conf_names[j]].values.reshape(-1, 1, 50, 2) for j in range(3)], axis=1) for sub in subs], axis=0),
                    'confs': np.stack([sub[confs].values for sub in subs], axis=0)}

    ensembled_predictions = combine_predictions(deepcopy(predictions), weights)

    # Save weights and submission
    sub_name = '_'.join(('submission', datetime.now().strftime('%Y%m%d%H%M%S')))

    save_as_pickle(os.path.join(SUBMISSIONS_DIR, '_'.join(('weights', sub_name, '.pkl'))), weights)

    write_pred_csv(os.path.join(SUBMISSIONS_DIR, '_'.join((sub_name, '.csv'))), 
                    subs[0]['timestamp'].values, 
                    subs[0]['track_id'].values, 
                    ensembled_predictions['preds'], 
                    ensembled_predictions['confs'])
示例#14
0
def eval(model, eval_dataloader, eval_dataset_ego, eval_dataset, criterion, device, eval_gt_path, cfg):
    # ===== EVAL Loop
    model.eval()
    torch.set_grad_enabled(False)
    # store information for evaluation
    future_coords_offsets_pd = []
    timestamps = []
    confidences_list = []
    agent_ids = []
    progress_bar = tqdm(eval_dataloader)
    for data, indices in progress_bar:
        loss, preds, confidences = mode.run(data, indices, model, eval_dataset_ego, eval_dataset, criterion, device = device)
        #fix for the new environment
        preds = preds.cpu().numpy()
        world_from_agents = data["world_from_agent"].numpy()
        centroids = data["centroid"].numpy()
        coords_offset = []

        # convert into world coordinates and compute offsets
        for idx in range(len(preds)):
            for mode in range(cfg["model_params"]["num_trajectories"]):
                preds[idx,mode,:, :] = transform_points(preds[idx,mode,:, :], world_from_agents[idx]) - centroids[idx][:2]

        future_coords_offsets_pd.append(preds.copy())
        confidences_list.append(confidences.cpu().numpy().copy())
        timestamps.append(data["timestamp"].numpy().copy())
        agent_ids.append(data["track_id"].numpy().copy()) 
    pred_path = f"{gettempdir()}/pred.csv"

    write_pred_csv(pred_path,
                timestamps=np.concatenate(timestamps),
                track_ids=np.concatenate(agent_ids),
                coords=np.concatenate(future_coords_offsets_pd),
                confs = np.concatenate(confidences_list)
                )
    metrics = compute_metrics_csv(eval_gt_path, pred_path, [neg_multi_log_likelihood, time_displace])
    for metric_name, metric_mean in metrics.items():
        print(metric_name, metric_mean)
    def test_epoch_end(self, outputs):
        """from https://www.kaggle.com/pestipeti/pytorch-baseline-inference"""
        pred_coords_list = []
        confidences_list = []
        timestamps_list = []
        track_id_list = []

        # convert into world coordinates and compute offsets
        for outputs, confidences, batch in outputs:
            outputs = outputs.cpu().numpy()

            world_from_agents = batch["world_from_agent"].cpu().numpy()
            centroids = batch["centroid"].cpu().numpy()
            for idx in range(len(outputs)):
                for mode in range(3):
                    outputs[idx, mode, :, :] = (transform_points(
                        outputs[idx, mode, :, :], world_from_agents[idx]) -
                                                centroids[idx][:2])
            pred_coords_list.append(outputs)

            confidences_list.append(confidences)
            timestamps_list.append(batch["timestamp"])
            track_id_list.append(batch["track_id"])

        coords = np.concatenate(pred_coords_list)
        confs = torch.cat(confidences_list).cpu().numpy()
        track_ids = torch.cat(track_id_list).cpu().numpy()
        timestamps = torch.cat(timestamps_list).cpu().numpy()

        write_pred_csv(
            CSV_PATH,
            timestamps=timestamps,
            track_ids=track_ids,
            coords=coords,
            confs=confs,
        )
        print(f"Saved to {CSV_PATH}")
def run():
    utils.seed_everything(seed=config.SEED)

    train_dataset = dataset.Lyft2ndLevelDataset(config.PRED_PATHS +
                                                [config.MODE_16_PATH])
    data_loader = torch.utils.data.DataLoader(train_dataset,
                                              batch_size=config.BATCH_SIZE,
                                              num_workers=4,
                                              shuffle=False)

    device = torch.device('cuda')
    model = models.SetTransformer(**config.MODEL_PARAMS)
    model = model.to(device)
    model.load_state_dict(torch.load(config.MODEL_PATH + 'transformer.bin'))

    coords, confs = engine.infer_fn(data_loader, model, device)

    sample_sub = pd.read_csv(config.SAMPLE_SUB_PATH)

    write_pred_csv(config.INFER_SAVE_PATH + 'submission.csv',
                   timestamps=sample_sub['timestamp'],
                   track_ids=sample_sub['track_id'],
                   coords=coords,
                   confs=confs)
示例#17
0
    confidences_list = []
    agent_ids = []

    progress_bar = tqdm(test_dataloader)
    
    for data in progress_bar:
        
        _, preds, confidences = forward(data, model, device)
    
        #fix for the new environment
        preds = preds.cpu().numpy()
        world_from_agents = data["world_from_agent"].numpy()
        centroids = data["centroid"].numpy()
        coords_offset = []
        
        # convert into world coordinates and compute offsets
        for idx in range(len(preds)):
            for mode in range(3):
                preds[idx, mode, :, :] = transform_points(preds[idx, mode, :, :], world_from_agents[idx]) - centroids[idx][:2]
    
        future_coords_offsets_pd.append(preds.copy())
        confidences_list.append(confidences.cpu().numpy().copy())
        timestamps.append(data["timestamp"].numpy().copy())
        agent_ids.append(data["track_id"].numpy().copy()) 
pred_path = 'submission.csv'
write_pred_csv(pred_path,
           timestamps=np.concatenate(timestamps),
           track_ids=np.concatenate(agent_ids),
           coords=np.concatenate(future_coords_offsets_pd),
           confs = np.concatenate(confidences_list)
示例#18
0
def prepare_submission(experiment_name, epoch, stage):
    model_str = experiment_name
    cfg = load_config_data(experiment_name)
    pprint.pprint(cfg)

    checkpoints_dir = f"./checkpoints/{model_str}"
    print("\n", experiment_name, "\n")

    model_info = DotDict(cfg["model_params"])
    model = build_model(model_info, cfg)
    model = model.cuda()
    checkpoint = torch.load(f"{checkpoints_dir}/{epoch:03}.pt")
    model.load_state_dict(checkpoint["model_state_dict"])

    model.eval()
    torch.set_grad_enabled(False)

    eval_dataset = dataset.LyftDatasetPrerendered(dset_name=stage,
                                                  cfg_data=cfg)
    # eval_dataset[0]

    eval_dataloader = DataLoader(eval_dataset,
                                 shuffle=False,
                                 batch_size=32,
                                 num_workers=16)

    # print(eval_dataset.agent_dataset)

    def run_prediction(predictor, data_loader):
        predictor.eval()

        pred_coords_list = []
        confidences_list = []
        timestamps_list = []
        track_id_list = []

        with torch.no_grad():
            for data in tqdm(data_loader):
                image = data["image"].cuda()
                # agent_state = data["agent_state"].float().cuda()
                agent_state = None

                pred, confidences = predictor(image, agent_state)
                confidences = torch.exp(confidences)

                pred_world = []
                pred = pred.cpu().numpy().copy()
                if model_info.target_space == "image":
                    if model_info.target_space == "image":
                        world_from_agents = data["world_from_agent"].numpy()
                        centroids = data["centroid"].numpy()
                        for idx in range(pred.shape[0]):
                            pred[idx] = (transform_points(
                                pred[idx].copy().reshape(-1, 2),
                                world_from_agents[idx],
                            ) - centroids[idx]).reshape(-1, 50, 2)

                for img_idx in range(pred.shape[0]):
                    pred_world.append(pred[img_idx])

                pred_coords_list.append(np.array(pred_world))
                confidences_list.append(confidences.cpu().numpy().copy())
                timestamps_list.append(data["timestamp"].numpy().copy())
                track_id_list.append(data["track_id"].numpy().copy())

        timestamps = np.concatenate(timestamps_list)
        track_ids = np.concatenate(track_id_list)
        coords = np.concatenate(pred_coords_list)
        confs = np.concatenate(confidences_list)
        return timestamps, track_ids, coords, confs

    timestamps, track_ids, coords, confs = run_prediction(
        model, eval_dataloader)
    os.makedirs("submissions", exist_ok=True)
    pred_path = f"submissions/sub_{experiment_name}_{epoch}_{stage}.csv"

    print(f"Coords: {coords.shape} conf: {confs.shape}")
    np.savez_compressed(
        f"submissions/sub_{experiment_name}_{epoch}_{stage}.npz",
        timestamps=timestamps,
        track_ids=track_ids,
        coords=coords,
        confs=confs)

    write_pred_csv(
        pred_path,
        timestamps=timestamps,
        track_ids=track_ids,
        coords=coords,
        confs=confs,
    )
    print(f"Saved to {pred_path}")
示例#19
0
def valid_fn(model, loader, device, ground_truth_file, logdir, verbose=True):
    """Validation step.

    Args:
        model (nn.Module): model to train
        loader (DataLoader): loader with data
        device (str or torch.device): device to use for placing batches
        loss_fn (nn.Module): loss function, should be callable
        verbose (bool, optional): verbosity mode.
            Default is True.

    Returns:
        dict with metics computed during the validation on loader
    """
    model.eval()

    future_coords_offsets_pd = []
    timestamps = []
    confidences_list = []
    agent_ids = []

    with torch.no_grad(), tqdm(
        total=len(loader), desc="valid", disable=not verbose
    ) as progress:
        for idx, batch in enumerate(loader):
            images, acceleration = t2d(
                [batch["image"], batch["xy_acceleration"]], device
            )

            predictions, confidences = model(images, acceleration)

            _gt = batch["target_positions"].cpu().numpy().copy()
            predictions = predictions.cpu().numpy().copy()
            world_from_agents = batch["world_from_agent"].numpy()
            centroids = batch["centroid"].numpy()

            for idx in range(len(predictions)):
                for mode in range(3):
                    # FIX
                    predictions[idx, mode, :, :] = (
                        transform_points(
                            predictions[idx, mode, :, :], world_from_agents[idx]
                        )
                        - centroids[idx][:2]
                    )
                _gt[idx, :, :] = (
                    transform_points(_gt[idx, :, :], world_from_agents[idx])
                    - centroids[idx][:2]
                )

            future_coords_offsets_pd.append(predictions.copy())
            confidences_list.append(confidences.cpu().numpy().copy())
            timestamps.append(batch["timestamp"].numpy().copy())
            agent_ids.append(batch["track_id"].numpy().copy())

            progress.update(1)

            if idx == DEBUG:
                break

    predictions_file = str(logdir / "preds_validate_chopped.csv")
    write_pred_csv(
        predictions_file,
        timestamps=np.concatenate(timestamps),
        track_ids=np.concatenate(agent_ids),
        coords=np.concatenate(future_coords_offsets_pd),
        confs=np.concatenate(confidences_list),
    )

    metrics = compute_metrics_csv(
        ground_truth_file,
        predictions_file,
        [neg_multi_log_likelihood],
    )

    return {"score": metrics["neg_multi_log_likelihood"]}
示例#20
0
def ensemble_batch_core(flags: EnsembleFlags):
    assert len(flags.weight) == len(flags.file_list)

    coords_all, confs_all = load_predictions(flags.file_list, flags.weight)

    num_modes_total = confs_all.shape[1]
    n_example = len(coords_all)

    assert coords_all.shape == (n_example, num_modes_total, 50, 2)

    np_random = np.random.RandomState(SEED)

    ens_confs = np.zeros((n_example, 3))
    ens_coords = np.zeros((n_example, 3, 50, 2))
    ens_log_probs = np.zeros((n_example, ))
    time_store = {}

    noise = np_random.normal(0,
                             scale=flags.sigma,
                             size=(flags.batch_size, flags.N_sample, 100))

    for idx in tqdm(range(0, n_example, flags.batch_size)):
        confidences = confs_all[idx:idx + flags.batch_size]
        confidences = confidences / confidences.sum(axis=1, keepdims=True)
        size = confidences.shape[0]
        assert confidences.shape == (size, num_modes_total)

        coords = coords_all[idx:idx + flags.batch_size]
        coords = coords.reshape(size * num_modes_total, 50 * 2)

        # TODO: remove for-loop
        with add_time("choice", time_store):
            indices = np.stack([
                np_random.choice(
                    num_modes_total, size=flags.N_sample, p=confidences[j])
                for j in range(size)
            ],
                               axis=0)
        indices = (np.arange(size)[:, np.newaxis] * num_modes_total +
                   indices).reshape(-1)

        X = coords[indices]
        X = X.reshape(size, flags.N_sample, 100)
        X = X + noise[:len(X)]

        if flags.give_centroids_init:
            flags.gmm_kwargs["centroids_init"] = coords.reshape(
                size, num_modes_total, 100)[:, :3, :]

        if flags.precisions_init_sigma is not None:
            flags.gmm_kwargs["precisions_init"] = np.full(
                (size, 3), flags.precisions_init_sigma)

        with add_time("fit", time_store):
            gmm = BatchSphericalGMM(n_components=3,
                                    device=flags.device,
                                    seed=SEED,
                                    **flags.gmm_kwargs)
            weights, means, _, log_probs = gmm.fit(X)

        ens_confs[idx:idx + flags.batch_size] = weights
        ens_coords[idx:idx + flags.batch_size] = means.reshape(
            means.shape[0], 3, 50, 2)
        ens_log_probs[idx:idx + flags.batch_size] = log_probs

    print(time_store)

    output_path = Path(flags.output_path)
    output_path.parent.mkdir(exist_ok=True, parents=True)

    if output_path.name.endswith(".csv"):
        save_format = "csv"
    else:
        save_format = "npz"

    timestamps, track_ids, targets, target_availabilities = load_metadata(
        flags.file_list)

    if targets is not None:
        assert target_availabilities is not None
        errors = pytorch_neg_multi_log_likelihood_batch(
            torch.as_tensor(targets),
            torch.as_tensor(ens_coords),
            torch.as_tensor(ens_confs),
            torch.as_tensor(target_availabilities),
        )
        print("errors", errors.shape, torch.mean(errors))

    if save_format == "csv":
        write_pred_csv(str(output_path), timestamps, track_ids, ens_coords,
                       ens_confs)
    else:
        np.savez_compressed(
            str(output_path),
            timestamps=timestamps,
            track_ids=track_ids,
            coords=ens_coords,
            confs=ens_confs,
            targets=targets,
            target_availabilities=target_availabilities,
            log_probs=ens_log_probs,
        )
        print(f"Saved to {output_path}")
示例#21
0
def evaluate(model, device, data_path):

    # set env variable for data
    os.environ["L5KIT_DATA_FOLDER"] = data_path
    dm = LocalDataManager(None)

    cfg = model.cfg

    # ===== INIT DATASET
    test_cfg = cfg["test_data_loader"]

    # Rasterizer
    rasterizer = build_rasterizer(cfg, dm)

    # Test dataset/dataloader
    test_zarr = ChunkedDataset(dm.require(test_cfg["key"])).open()
    test_mask = np.load(f"{data_path}/scenes/mask.npz")["arr_0"]
    test_dataset = AgentDataset(cfg,
                                test_zarr,
                                rasterizer,
                                agents_mask=test_mask)
    test_dataloader = DataLoader(test_dataset,
                                 shuffle=test_cfg["shuffle"],
                                 batch_size=test_cfg["batch_size"],
                                 num_workers=test_cfg["num_workers"])
    test_dataloader = test_dataloader
    print(test_dataloader)

    # ==== EVAL LOOP
    model.eval()
    torch.set_grad_enabled(False)
    criterion = nn.MSELoss(reduction="none")

    # store information for evaluation
    future_coords_offsets_pd = []
    timestamps = []

    agent_ids = []
    progress_bar = tqdm(test_dataloader)
    for data in progress_bar:
        _, outputs, _ = model.forward(data, device, criterion)
        future_coords_offsets_pd.append(outputs.cpu().numpy().copy())
        timestamps.append(data["timestamp"].numpy().copy())
        agent_ids.append(data["track_id"].numpy().copy())

    # ==== Save Results
    pred_path = "./submission.csv"
    write_pred_csv(pred_path,
                   timestamps=np.concatenate(timestamps),
                   track_ids=np.concatenate(agent_ids),
                   coords=np.concatenate(future_coords_offsets_pd))

    # ===== GENERATE AND LOAD CHOPPED DATASET
    num_frames_to_chop = 56
    test_cfg = cfg["test_data_loader"]
    test_base_path = create_chopped_dataset(
        zarr_path=dm.require(test_cfg["key"]),
        th_agent_prob=cfg["raster_params"]["filter_agents_threshold"],
        num_frames_to_copy=num_frames_to_chop,
        num_frames_gt=cfg["model_params"]["future_num_frames"],
        min_frame_future=MIN_FUTURE_STEPS)

    eval_zarr_path = str(
        Path(test_base_path) / Path(dm.require(test_cfg["key"])).name)
    print(eval_zarr_path)
    test_mask_path = str(Path(test_base_path) / "mask.npz")
    test_gt_path = str(Path(test_base_path) / "gt.csv")

    test_zarr = ChunkedDataset(eval_zarr_path).open()
    test_mask = np.load(test_mask_path)["arr_0"]

    # ===== INIT DATASET AND LOAD MASK
    test_dataset = AgentDataset(cfg,
                                test_zarr,
                                rasterizer,
                                agents_mask=test_mask)
    test_dataloader = DataLoader(test_dataset,
                                 shuffle=test_cfg["shuffle"],
                                 batch_size=test_cfg["batch_size"],
                                 num_workers=test_cfg["num_workers"])
    print(test_dataset)

    # ==== Perform Evaluation
    print(test_gt_path)
    metrics = compute_metrics_csv(test_gt_path, pred_path,
                                  [neg_multi_log_likelihood, time_displace])
    for metric_name, metric_mean in metrics.items():
        print(metric_name, metric_mean)
示例#22
0
def prepare_submission(rank, experiment_name, epoch, stage, dist_url, world_size):
    print(f"Running rank {rank}/{world_size} dist url: {dist_url}.")
    # setup(rank, world_size)

    dist.init_process_group(backend="nccl", init_method=dist_url,
                            world_size=world_size, rank=rank)
    torch.cuda.set_device(rank)

    distributed = True

    model_str = experiment_name
    cfg = load_config_data(experiment_name)
    pprint.pprint(cfg)

    checkpoints_dir = f"./checkpoints/{model_str}"
    print("\n", experiment_name, "\n")

    model_info = DotDict(cfg["model_params"])
    model_orig = build_model(model_info, cfg).cuda(rank)
    model = nn.SyncBatchNorm.convert_sync_batchnorm(model_orig)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank], find_unused_parameters=True)
    model.eval()

    # if rank == 0:
    fn = f"{checkpoints_dir}/{epoch:03}.pt"
    print(f'loading {fn}...')

    map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
    checkpoint = torch.load(fn, map_location=map_location)

    model.module.load_state_dict(checkpoint["model_state_dict"])
    print(f'loaded {fn}')

    model.eval()
    torch.set_grad_enabled(False)

    eval_dataset = dataset.LyftDatasetPrerendered(stage=stage, cfg_data=cfg)
    # eval_dataset[0]

    eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=32, num_workers=16)

    # print(eval_dataset.agent_dataset)

    def run_prediction(predictor, data_loader):
        predictor.eval()

        pred_coords_list = []
        confidences_list = []
        timestamps_list = []
        track_id_list = []

        with torch.no_grad():
            for data in tqdm(data_loader):
                image = data["image"].cuda()
                # agent_state = data["agent_state"].float().cuda()
                agent_state = None

                pred, confidences = predictor(image, agent_state)
                confidences = torch.exp(confidences)

                pred_world = []
                pred = pred.cpu().numpy().copy()
                if model_info.target_space == "image":
                    if model_info.target_space == "image":
                        world_from_agents = data["world_from_agent"].numpy()
                        centroids = data["centroid"].numpy()
                        for idx in range(pred.shape[0]):
                            pred[idx] = (
                                    transform_points(
                                        pred[idx].copy().reshape(-1, 2),
                                        world_from_agents[idx],
                                    )
                                    - centroids[idx]
                            ).reshape(-1, 50, 2)

                for img_idx in range(pred.shape[0]):
                    pred_world.append(pred[img_idx])

                pred_coords_list.append(np.array(pred_world))
                confidences_list.append(confidences.cpu().numpy().copy())
                timestamps_list.append(data["timestamp"].numpy().copy())
                track_id_list.append(data["track_id"].numpy().copy())

        timestamps = np.concatenate(timestamps_list)
        track_ids = np.concatenate(track_id_list)
        coords = np.concatenate(pred_coords_list)
        confs = np.concatenate(confidences_list)
        return timestamps, track_ids, coords, confs

    timestamps, track_ids, coords, confs = run_prediction(model, eval_dataloader)
    os.makedirs("submissions", exist_ok=True)
    pred_path = f"submissions/sub_{experiment_name}_{epoch}_{stage}.csv"

    print(f"Coords: {coords.shape} conf: {confs.shape}")
    np.savez_compressed(f"submissions/sub_{experiment_name}_{epoch}_{stage}.npz",
                        timestamps=timestamps,
                        track_ids=track_ids,
                        coords=coords,
                        confs=confs)

    write_pred_csv(
        pred_path,
        timestamps=timestamps,
        track_ids=track_ids,
        coords=coords,
        confs=confs,
    )
    print(f"Saved to {pred_path}")
示例#23
0
        # TODO: fix coordinates
        _gt = batch["target_positions"].cpu().numpy().copy()
        preds = preds.cpu().numpy().copy()
        world_from_agents = batch["world_from_agent"].numpy()
        centroids = batch["centroid"].numpy()
        for idx in range(len(preds)):
            for mode in range(n_trajectories):
                # FIX
                preds[idx, mode, :, :] = (transform_points(
                    preds[idx, mode, :, :], world_from_agents[idx]) -
                                          centroids[idx][:2])
            _gt[idx, :, :] = (
                transform_points(_gt[idx, :, :], world_from_agents[idx]) -
                centroids[idx][:2])

        future_coords_offsets_pd.append(preds.copy())
        confidences_list.append(confidences.cpu().numpy().copy())
        timestamps.append(batch["timestamp"].numpy().copy())
        agent_ids.append(batch["track_id"].numpy().copy())

        progress.update(1)

write_pred_csv(
    submission_file,
    timestamps=np.concatenate(timestamps),
    track_ids=np.concatenate(agent_ids),
    coords=np.concatenate(future_coords_offsets_pd),
    confs=np.concatenate(confidences_list),
)
                             shuffle=test_cfg["shuffle"],
                             batch_size=test_cfg["batch_size"],
                             num_workers=test_cfg["num_workers"])

print(test_dataloader)

model.eval()

future_coords_offsets_pd = []
timestamps = []
agent_ids = []

with torch.no_grad():
    dataiter = tqdm(test_dataloader)

    for data in dataiter:
        inputs = data["image"].to(device)
        target_availabilities = data["target_availabilities"].unsqueeze(-1).to(
            device)
        targets = data["target_positions"].to(device)

        _, outputs = forward(data, model, device, criterion)

        future_coords_offsets_pd.append(outputs.cpu().numpy().copy())
        timestamps.append(data["timestamp"].numpy().copy())
        agent_ids.append(data["track_id"].numpy().copy())

write_pred_csv('submission.csv',
               timestamps=np.concatenate(timestamps),
               track_ids=np.concatenate(agent_ids),
               coords=np.concatenate(future_coords_offsets_pd))
示例#25
0
def evaluate(cfg, model, dm, rasterizer, first_time, iters, eval_dataloader, eval_gt_path):
    if first_time:
        num_frames_to_chop = 100
        print("min_future_steps: ",MIN_FUTURE_STEPS)
        eval_cfg = cfg["val_data_loader"]
        eval_base_path = create_chopped_dataset(dm.require(eval_cfg["key"]), cfg["raster_params"]["filter_agents_threshold"], 
                                    num_frames_to_chop, cfg["model_params"]["future_num_frames"], MIN_FUTURE_STEPS)
        eval_zarr_path = str(Path(eval_base_path) / Path(dm.require(eval_cfg["key"])).name)
        eval_mask_path = str(Path(eval_base_path) / "mask.npz")
        eval_gt_path = str(Path(eval_base_path) / "gt.csv")
        eval_zarr = ChunkedDataset(eval_zarr_path).open()
        eval_mask = np.load(eval_mask_path)["arr_0"]
        eval_dataset = AgentDataset(cfg, eval_zarr, rasterizer, agents_mask=eval_mask)
        eval_dataloader = DataLoader(eval_dataset, shuffle=eval_cfg["shuffle"], batch_size=eval_cfg["batch_size"], 
                                    num_workers=eval_cfg["num_workers"])
        print(eval_dataset)
        first_time = False

    model.eval()
    torch.set_grad_enabled(False)

    future_coords_offsets_pd = []
    timestamps = []
    confidences_list = []
    agent_ids = []
    progress_bar = tqdm(eval_dataloader)
    for data in progress_bar:
        _, preds, confidences = forward(data, model)
        
        # convert agent coordinates into world offsets
        preds = preds.cpu().numpy()
        world_from_agents = data["world_from_agent"].numpy()
        centroids = data["centroid"].numpy()
        coords_offset = []
        
        for idx in range(len(preds)):
            for mode in range(3):
                preds[idx, mode, :, :] = transform_points(preds[idx, mode, :, :], world_from_agents[idx]) - centroids[idx][:2]
        
        future_coords_offsets_pd.append(preds.copy())
        confidences_list.append(confidences.cpu().numpy().copy())
        timestamps.append(data["timestamp"].numpy().copy())
        agent_ids.append(data["track_id"].numpy().copy())
    
    model.train()
    torch.set_grad_enabled(True)

    pred_path = os.path.join(cfg["save_path"],f"pred_{iters}.csv")

    write_pred_csv(pred_path,
        timestamps=np.concatenate(timestamps),
        track_ids=np.concatenate(agent_ids),
        coords=np.concatenate(future_coords_offsets_pd),
        confs = np.concatenate(confidences_list)
        )

        
    metrics = compute_metrics_csv(eval_gt_path, pred_path, [neg_multi_log_likelihood, time_displace])
    for metric_name, metric_mean in metrics.items():
        print(metric_name, metric_mean)

    return first_time, eval_dataloader, eval_gt_path
示例#26
0
def main():

    cfg = {
        'save_path': "./regnetx/",
        'seed': 39,
        'data_path': "kagglepath",
        'stem_type':'simple_stem_in',
        'stem_w':32,
        'block_type':'res_bottleneck_block',
        'ds':[2,5,13,1],
        'ws':[72,216,576,1512],
        'ss':[2,2,2,2],
        'bms':[1.0,1.0,1.0,1.0],
        'gws':[24,24,24,24],
        'se_r':0.25,
        'model_params': {
            'history_num_frames': 10, #3+20+2=25 25*h*w
            'history_step_size': 1,
            'history_delta_time': 0.1,
            'future_num_frames': 50, # 1512 -> 50*2*3+3=303 nn.linear
            'future_step_size': 1,
            'future_delta_time': 0.1,
            'opt_type' : 'adam',
            'lr': 3e-4,
            'w_decay': 0,
            'reduce_type':'stone',
0            'r_factor': 0.5,
            'r_step' : [200_000, 300_000, 360_000, 420_000, 480_000, 540_000],
            'weight_path': './050.pth',
        },

        'raster_params': {
            'raster_size': [224, 224],
            'pixel_size': [0.5, 0.5],
            'ego_center': [0.25, 0.5],
            'map_type': 'py_semantic',
            'satellite_map_key': 'aerial_map/aerial_map.png',
            'semantic_map_key': 'semantic_map/semantic_map.pb',
            'dataset_meta_key': 'meta.json',
            'filter_agents_threshold': 0.5
        },

        'train_data_loader': {
            'key': 'scenes/train.zarr',
            'batch_size': 16,
            'shuffle': True,
            'num_workers': 20
        },

        'val_data_loader': {
            'key': "scenes/validate.zarr",
            'batch_size': 16,
            'shuffle': False,
            'num_workers': 20
        },

        'test_data_loader': {
            'key': 'scenes/test.zarr',
            'batch_size': 32,
            'shuffle': False,
            'num_workers': 4
        },

        'train_params': {
            'max_num_steps': 600_000,
            'checkpoint_every_n_steps': 100_000,
            'eval_every_n_steps' : 100_000,
        }
    }
    
    writer = self_mkdir(cfg)
    set_seed(cfg["seed"])
    os.environ["L5KIT_DATA_FOLDER"] = cfg["data_path"]
    dm = LocalDataManager(None)

    model = LyftMultiModel(cfg)#1
    weight_path = cfg["model_params"]["weight_path"]
    if weight_path:
        model.load_state_dict(torch.load(weight_path))
    else: print('no check points')
    model.cuda()
    
    m_params = cfg["model_params"]
    if m_params['opt_type'] == 'sgd':
        optimizer = optim.SGD(model.parameters(), lr=m_params["lr"], weight_decay=m_params['w_decay'],)
    elif m_params['opt_type'] == 'adam':
        optimizer = optim.Adam(model.parameters(), lr=m_params["lr"], weight_decay=m_params['w_decay'],)
    else:
        assert False, 'cfg opt_type error'

    if m_params['reduce_type'] == 'stone':
        lr_sche = optim.lr_scheduler.MultiStepLR(optimizer, m_params['r_step'], 
            gamma=m_params['r_factor'], last_epoch=-1)
    else:
        assert False, 'cfg reduce_type error'

    Training = False
    if Training:
        train_cfg = cfg["train_data_loader"]
        rasterizer = build_rasterizer(cfg, dm)
        train_zarr = ChunkedDataset(dm.require(train_cfg["key"])).open()
        train_dataset = AgentDataset(cfg, train_zarr, rasterizer)
        train_dataloader = DataLoader(train_dataset, shuffle=train_cfg["shuffle"], batch_size=train_cfg["batch_size"], 
                                    num_workers=train_cfg["num_workers"])
        print(train_dataset)
        
        tr_it = iter(train_dataloader)
        progress_bar = tqdm(range(cfg["train_params"]["max_num_steps"]))
        
        
        model_name = cfg["model_params"]["model_name"]
        first_time = True
        eval_dataloader = None
        eval_gt_path = None
        model.train()
        torch.set_grad_enabled(True)
        loss_ten = 0
        for i in progress_bar:
            try:
                data = next(tr_it)
            except StopIteration:
                tr_it = iter(train_dataloader)
                data = next(tr_it)
            
            loss, _, _ = forward(data, model)#2
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            lr_sche.step()
            
            writer.add_scalar('train_loss', loss.item(), i)
            writer.add_scalar('lr', optimizer.param_groups[0]['lr'], i)
            if i == 10:
                loss_ten = loss.item()
            progress_bar.set_description(f"loss: {loss.item()} and {loss_ten}")

            if (i+1) % cfg['train_params']['checkpoint_every_n_steps'] == 0:
                torch.save(model.state_dict(), os.path.join(cfg['save_path'],f'{model_name}_{i}.pth'))
            
            if (i+1) % cfg['train_params']['eval_every_n_steps'] == 0: #3
                first_time, eval_dataloader, eval_gt_path = evaluate(cfg, model, dm, rasterizer, first_time, i+1, eval_dataloader, eval_gt_path)


    test_cfg = cfg["test_data_loader"]
    rasterizer = build_rasterizer(cfg, dm)
    test_zarr = ChunkedDataset(dm.require(test_cfg["key"])).open()
    test_mask = np.load(os.path.join(cfg["data_path"],'scenes/mask.npz'))["arr_0"]
    test_dataset = AgentDataset(cfg, test_zarr, rasterizer, agents_mask=test_mask)
    test_dataloader = DataLoader(test_dataset,shuffle=test_cfg["shuffle"],batch_size=test_cfg["batch_size"],
                                num_workers=test_cfg["num_workers"])
    print(test_dataset)

    model.eval()
    torch.set_grad_enabled(False)

    # store information for evaluation
    future_coords_offsets_pd = []
    timestamps = []
    confidences_list = []
    agent_ids = []

    progress_bar = tqdm(test_dataloader)
    
    for data in progress_bar:
        
        _, preds, confidences = forward(data, model)
    
        #fix for the new environment
        preds = preds.cpu().numpy()
        world_from_agents = data["world_from_agent"].numpy()
        centroids = data["centroid"].numpy()
        coords_offset = []
        
        # convert into world coordinates and compute offsets
        for idx in range(len(preds)):
            for mode in range(3):
                preds[idx, mode, :, :] = transform_points(preds[idx, mode, :, :], world_from_agents[idx]) - centroids[idx][:2]
    
        future_coords_offsets_pd.append(preds.copy())
        confidences_list.append(confidences.cpu().numpy().copy())
        timestamps.append(data["timestamp"].numpy().copy())
        agent_ids.append(data["track_id"].numpy().copy())

    pred_path = 'submission.csv'
    write_pred_csv(pred_path,
            timestamps=np.concatenate(timestamps),
            track_ids=np.concatenate(agent_ids),
            coords=np.concatenate(future_coords_offsets_pd),
            confs = np.concatenate(confidences_list)
            )
示例#27
0
    write_gt_csv(csv_path=eval_gt_path,
                 timestamps=timestamps_concat,
                 track_ids=track_ids_concat,
                 coords=gt_valid_final,
                 avails=target_avail_concat.squeeze(-1))

    num_examples = gt_valid_final.shape[0]
    confidence = np.array([0.33, 0.33, 0.34])
    confidences = np.empty((num_examples, 3))
    for i in range(num_examples):
        confidences[i] = confidence

    # submission.csv
    write_pred_csv(pred_path,
                   timestamps=timestamps_concat,
                   track_ids=track_ids_concat,
                   coords=coords_concat,
                   confs=confidences)

    metrics = compute_metrics_csv(eval_gt_path, pred_path,
                                  [neg_multi_log_likelihood, time_displace])
    for metric_name, metric_mean in metrics.items():
        print(metric_name, metric_mean)

    # Save Metric
    np.save(metric_path, metrics)

if test == TEST_CONF[2]:

    # generate ground truth csv
    write_gt_csv(csv_path=eval_gt_path,
示例#28
0
torch.set_grad_enabled(False)

pred_coords_list = []
confidences_list = []
timestamps_list = []
track_id_list = []

for data in tqdm(val_dataloader):
    pred, confidences = model(data["image"].to(device))
    pred = convert_agent_coordinates_to_world_offsets(
        pred.detach().cpu().numpy(),
        data["world_from_agent"].numpy(),
        data["centroid"].numpy(),
    )
    pred_coords_list.append(pred)
    confidences_list.append(confidences.detach().cpu().numpy())
    timestamps_list.append(data["timestamp"].detach().numpy())
    track_id_list.append(data["track_id"].detach().numpy())

timestamps = np.concatenate(timestamps_list)
track_ids = np.concatenate(track_id_list)
coords = np.concatenate(pred_coords_list)
confs = np.concatenate(confidences_list)

write_pred_csv(
    "predictions.csv",
    timestamps=timestamps,
    track_ids=track_ids,
    coords=coords,
    confs=confs,
)
示例#29
0
test_dataset = test_agent_dataset

test_loader = DataLoader(
    test_dataset,
    shuffle=test_cfg["shuffle"],
    batch_size=test_cfg["batch_size"],
    num_workers=test_cfg["num_workers"],
    pin_memory=True,
)

print(test_agent_dataset)
print("# AgentDataset test:", len(test_agent_dataset))
print("# ActualDataset test:", len(test_dataset))

args = parser.parse_args()
cfg = load_config_data(args.cfg)

module = LyftModule(args.cfg)
module.load_state_dict(torch.load(args.resume, map_location='cpu'))
predictor = module.predictor

timestamps, track_ids, coords, confs = run_prediction(predictor, test_loader)

csv_path = "submission.csv"
write_pred_csv(csv_path,
               timestamps=timestamps,
               track_ids=track_ids,
               coords=coords,
               confs=confs)
print(f"Saved to {csv_path}")
示例#30
0
    def evaluate(self, data_path, file_name="submission.csv"):

        # set env variable for data
        os.environ["L5KIT_DATA_FOLDER"] = data_path
        dm = LocalDataManager(None)

        cfg = self.cfg

        # ===== INIT DATASET
        test_cfg = cfg["test_data_loader"]

        # Rasterizer
        rasterizer = build_rasterizer(cfg, dm)

        # Test dataset/dataloader
        test_zarr = ChunkedDataset(dm.require(test_cfg["key"])).open()
        test_mask = np.load(f"{data_path}/scenes/mask.npz")["arr_0"]
        # test_dataset = AgentDataset(cfg, test_zarr, rasterizer, agents_mask=test_mask)
        test_dataset = KineticDataset(cfg,
                                      test_zarr,
                                      rasterizer,
                                      agents_mask=test_mask)
        test_dataloader = DataLoader(test_dataset,
                                     shuffle=test_cfg["shuffle"],
                                     batch_size=test_cfg["batch_size"],
                                     num_workers=test_cfg["num_workers"])
        test_dataloader = test_dataloader
        print(test_dataloader)

        # ==== EVAL LOOP
        self.model.eval()
        torch.set_grad_enabled(False)
        criterion = nn.MSELoss(reduction="none")

        # store information for evaluation
        future_coords_offsets_pd = []
        timestamps = []
        pred_coords = []
        confidences_list = []

        agent_ids = []
        progress_bar = tqdm(test_dataloader)
        for data in progress_bar:

            # ids = data["track_id"]
            # position_tensor = data["target_positions"].to(self.device)
            # velocity_tensor = data["target_velocities"].to(self.device)
            # acceleration_tensor = data["target_accelerations"].to(self.device)
            # yaw_tensor = data["target_yaws"].to(self.device)

            history_position_tensor = data["history_positions"].to(self.device)
            estimated_future_positions = data["estimated_future_positions"].to(
                self.device)
            # history_velocity_tensor = data["history_velocities"].to(self.device)
            # history_acceleration_tensor = data["history_accelerations"].to(self.device)
            # history_yaw_tensor = data["history_yaws"].to(self.device)
            # history_availability = data["history_availabilities"].to(self.device)

            imageTensor = data["image"].to(self.device)
            if self.verbose:
                print("Image Tensor: ", imageTensor.shape)

            # state_vector = torch.cat([history_position_tensor, history_velocity_tensor, history_acceleration_tensor,
            #                           history_yaw_tensor], 2).to(self.device)

            state_vector = torch.cat(
                [estimated_future_positions, history_position_tensor],
                1).to(self.device)

            state_vector = torch.flatten(state_vector, 1).to(self.device)
            # print(state_vector)
            if self.verbose:
                print("State Vector: ", state_vector.shape)

            pred, confidences = self.model.forward(imageTensor, state_vector)

            # future_coords_offsets_pd.append(outputs.cpu().numpy().copy())
            timestamps.append(data["timestamp"].numpy().copy())
            agent_ids.append(data["track_id"].numpy().copy())
            #
            # pred, confidences = predictor(image)

            pred_coords.append(pred.cpu().numpy().copy())
            confidences_list.append(confidences.cpu().numpy().copy())

        # ==== Save Results
        pred_path = f"{os.getcwd()}/{file_name}"
        write_pred_csv(pred_path,
                       timestamps=np.concatenate(timestamps),
                       track_ids=np.concatenate(agent_ids),
                       coords=np.concatenate(pred_coords),
                       confs=np.concatenate(confidences_list))

        torch.cuda.empty_cache()