コード例 #1
0
def load_tune_data():
    dm = get_dm()

    eval_cfg = cfg["val_data_loader"]

    eval_base_path = '/home/axot/lyft/data/scenes/validate_chopped_31'

    eval_zarr_path = str(Path(eval_base_path) /
                         Path(dm.require(eval_cfg["key"])).name)
    eval_mask_path = str(Path(eval_base_path) / "mask.npz")
    eval_gt_path = str(Path(eval_base_path) / "gt.csv")

    rasterizer = build_rasterizer(cfg, dm)
    eval_zarr = ChunkedDataset(eval_zarr_path).open()
    eval_mask = np.load(eval_mask_path)["arr_0"]
    # ===== INIT DATASET AND LOAD MASK
    eval_dataset = AgentDataset(
        cfg, eval_zarr, rasterizer, agents_mask=eval_mask)

    gt_dict = OrderedDict()
    for el in read_gt_csv(eval_gt_path):
        gt_dict[el["track_id"] + el["timestamp"]] = el
    
    eval_dataloader = DataLoader(eval_dataset,
                                 shuffle=eval_cfg["shuffle"],
                                 batch_size=eval_cfg["batch_size"],
                                 num_workers=eval_cfg["num_workers"])

    return eval_dataloader, gt_dict
コード例 #2
0
 def load_gt(self):
     # Cache ground truth
     if self.gt_path is not None:
         gt = {}
         for row in read_gt_csv(self.gt_path):
             gt[row["track_id"] + row["timestamp"]] = [row["coord"], row["avail"]]
         self.gt = gt
     else:
         self.gt = None
コード例 #3
0
ファイル: run_ensemble.py プロジェクト: piantic/Kaggle_Lyft
def generate_ground_truth(timestamps_trackid, gt_path):

    gt = {}
    for row in tqdm(read_gt_csv(gt_path)):
        gt[str(row['timestamp']) + str(row['track_id'])] = {'coords': row["coord"], 'avails': row['avail']}

    truth, avails = [], []
    for i in tqdm(range(timestamps_trackid.shape[0])):
        timestamp, track_id = int(timestamps_trackid[i, 0]), int(timestamps_trackid[i, 1])
        truth.append(gt[str(timestamp) + str(track_id)]['coords'])
        avails.append(gt[str(timestamp) + str(track_id)]['avails'])

    gt_out = {'truth': np.stack(truth, axis=0), 'avails': np.stack(avails, axis=0)}

    return gt_out
コード例 #4
0
def test_e2e_gt_csv(tmpdir: Path) -> None:
    dump_path = str(tmpdir / "gt_out.csv")
    num_example, future_len, num_coords = 100, 12, 2

    timestamps = np.random.randint(1000, 2000, num_example)
    track_ids = np.random.randint(0, 200, num_example)
    coords = np.random.randn(*(num_example, future_len, num_coords))
    avails = np.random.randint(0, 2, (num_example, future_len))
    write_gt_csv(dump_path, timestamps, track_ids, coords, avails)

    # read and check values
    for idx, el in enumerate(read_gt_csv(dump_path)):
        assert int(el["track_id"]) == track_ids[idx]
        assert int(el["timestamp"]) == timestamps[idx]
        assert np.allclose(el["coord"], coords[idx], atol=1e-4)
        assert np.allclose(el["avail"], avails[idx])
コード例 #5
0
            confidences_list.append(confidences.cpu().numpy().copy())
            timestamps.append(data["timestamp"].numpy().copy())
            agent_ids.append(data["track_id"].numpy().copy())
        pred_path = 'submission1.csv'
        write_pred_csv(pred_path,
                       timestamps=np.concatenate(timestamps),
                       track_ids=np.concatenate(agent_ids),
                       coords=np.concatenate(future_coords_offsets_pd),
                       confs=np.concatenate(confidences_list))
        metrics = compute_metrics_csv(
            eval_gt_path, pred_path, [neg_multi_log_likelihood, time_displace])
        for metric_name, metric_mean in metrics.items():
            print(metric_name, metric_mean)

        gt_rows = {}
        for row in read_gt_csv(eval_gt_path):
            gt_rows[row["track_id"] + row["timestamp"]] = row["coord"]

        eval_ego_dataset = EgoDataset(cfg, eval_dataset.dataset, rasterizer)

        for frame_number in range(
                99, len(eval_zarr.frames),
                100):  # start from last frame of scene_0 and increase by 100
            agent_indices = eval_dataset.get_frame_indices(frame_number)
            if not len(agent_indices):
                continue

            # get AV point-of-view frame
            data_ego = eval_ego_dataset[frame_number]
            im_ego = rasterizer.to_rgb(data_ego["image"].transpose(1, 2, 0))
            center = np.asarray(cfg["raster_params"]["ego_center"]
コード例 #6
0
def visualize_output(cfg, model, eval_gt_path, eval_dataset, rasterizer,
                     eval_zarr):
    model.eval()
    torch.set_grad_enabled(False)

    # build a dict to retrieve future trajectories from GT
    gt_rows = {}
    for row in read_gt_csv(eval_gt_path):
        gt_rows[row["track_id"] + row["timestamp"]] = row["coord"]

    eval_ego_dataset = EgoDataset(cfg, eval_dataset.dataset, rasterizer)
    for frame_number in range(
            99, len(eval_zarr.frames),
            100):  # start from last frame of scene_0 and increase by 100
        plt.figure(figsize=(10, 10))
        agent_indices = eval_dataset.get_frame_indices(frame_number)
        if not len(agent_indices):
            continue

        # get AV point-of-view frame
        data_ego, index = eval_ego_dataset[frame_number]
        im_ego = rasterizer.to_rgb(data_ego["image"].transpose(1, 2, 0))
        center = np.asarray(cfg["raster_params"]["ego_center"]
                            ) * cfg["raster_params"]["raster_size"]

        predicted_positions = []
        target_positions = []

        for v_index in agent_indices:
            data_agent, index = eval_dataset[v_index]

            predictions, confidences = display_CSPforward(data_agent,
                                                          index,
                                                          model,
                                                          eval_ego_dataset,
                                                          eval_dataset,
                                                          device=device)
            #         out_net = model(torch.from_numpy(data_agent["image"]).unsqueeze(0).to(device))
            #         _, preds, confidences = forward(data, model, device, criterion)
            best_prediction_idx = torch.argmax(confidences)
            out_pos = predictions[:, best_prediction_idx, :, :]
            out_pos = out_pos.reshape(-1, 2).detach().cpu().numpy()

            # store absolute world coordinates
            predicted_positions.append(
                transform_points(out_pos, data_agent["world_from_agent"]))
            # retrieve target positions from the GT and store as absolute coordinates
            track_id, timestamp = data_agent["track_id"], data_agent[
                "timestamp"]
            target_positions.append(gt_rows[str(track_id) + str(timestamp)] +
                                    data_agent["centroid"][:2])

        # convert coordinates to AV point-of-view so we can draw them
        predicted_positions = transform_points(
            np.concatenate(predicted_positions), data_ego["raster_from_world"])
        target_positions = transform_points(np.concatenate(target_positions),
                                            data_ego["raster_from_world"])

        draw_trajectory(im_ego, predicted_positions, PREDICTED_POINTS_COLOR)
        draw_trajectory(im_ego, target_positions, TARGET_POINTS_COLOR)
        plt.imshow(im_ego)
        plt.show()
コード例 #7
0
    def __init__(
        self,
        dset_name=None,
        cfg_path="./agent_motion_config.yaml",
        cfg_data=None,
        stage=None,
    ):
        print(f"Initializing LyftDataset {dset_name}...")
        if stage is not None:
            print(
                'DDEPRECATION WARNING! LyftDataset:: argument "stage=" is deprecated, use "dset_name=" instead'
            )
            if dset_name is None:
                dset_name = stage
            else:
                raise ValueError(
                    'LyftDataset::Please use only "dset_name" argument')
        assert dset_name is not None
        self.dm = LocalDataManager(None)
        self.dset_name = dset_name
        if cfg_data is None:
            self.cfg = utils.DotDict(load_config_data(cfg_path))
        else:
            self.cfg = utils.DotDict(cfg_data)

        self.dset_cfg = self.cfg[
            LyftDataset.name_2_dataloader_key[dset_name]].copy()

        if self.cfg["raster_params"]["map_type"] == "py_satellite":
            print("WARNING! USING SLOW RASTERIZER!!! py_satellite")
            self.rasterizer = build_rasterizer(self.cfg, self.dm)
        self.rasterizer = build_custom_rasterizer(self.cfg, self.dm)

        if dset_name == LyftDataset.DSET_VALIDATION_CHOPPED:
            eval_base_path = Path(
                "/opt/data3/lyft_motion_prediction/prediction_dataset/scenes/validate_chopped_100"
            )
            eval_zarr_path = str(
                Path(eval_base_path) /
                Path(self.dm.require(self.dset_cfg["key"])).name)
            eval_mask_path = str(Path(eval_base_path) / "mask.npz")
            self.eval_gt_path = str(Path(eval_base_path) / "gt.csv")
            self.zarr_dataset = ChunkedDataset(eval_zarr_path).open(
                cached=False)
            self.agent_dataset = AgentDataset(
                self.cfg,
                self.zarr_dataset,
                self.rasterizer,
                agents_mask=np.load(eval_mask_path)["arr_0"],
            )

            self.val_chopped_gt = defaultdict(dict)
            for el in read_gt_csv(self.eval_gt_path):
                self.val_chopped_gt[el["track_id"] + el["timestamp"]] = el
        elif dset_name == LyftDataset.DSET_TEST:
            self.zarr_dataset = ChunkedDataset(
                self.dm.require(self.dset_cfg["key"])).open(cached=False)
            test_mask = np.load(
                f"{config.L5KIT_DATA_FOLDER}/scenes/mask.npz")["arr_0"]
            self.agent_dataset = AgentDataset(self.cfg,
                                              self.zarr_dataset,
                                              self.rasterizer,
                                              agents_mask=test_mask)
        else:
            zarr_path = self.dm.require(self.dset_cfg["key"])
            print(f"Opening Chunked Dataset {zarr_path}...")
            self.zarr_dataset = ChunkedDataset(zarr_path).open(cached=False)
            print("Creating Agent Dataset...")
            self.agent_dataset = AgentDataset(
                self.cfg,
                self.zarr_dataset,
                self.rasterizer,
                min_frame_history=0,
                min_frame_future=10,
            )
            print("Creating Agent Dataset... [OK]")

        if dset_name == LyftDataset.DSET_VALIDATION:
            mask_frame100 = np.zeros(
                shape=self.agent_dataset.agents_mask.shape, dtype=np.bool)
            for scene in self.agent_dataset.dataset.scenes:
                frame_interval = scene["frame_index_interval"]
                agent_index_interval = self.agent_dataset.dataset.frames[
                    frame_interval[0] + 99]["agent_index_interval"]
                mask_frame100[
                    agent_index_interval[0]:agent_index_interval[1]] = True

            prev_agents_num = np.sum(self.agent_dataset.agents_mask)
            self.agent_dataset.agents_mask = self.agent_dataset.agents_mask * mask_frame100
            print(
                f"nb agent: orig {prev_agents_num} filtered {np.sum(self.agent_dataset.agents_mask)}"
            )
            # store the valid agents indexes
            self.agent_dataset.agents_indices = np.nonzero(
                self.agent_dataset.agents_mask)[0]

        self.w, self.h = self.cfg["raster_params"]["raster_size"]

        self.add_agent_state = self.cfg["model_params"]["add_agent_state"]
        self.agent_state = None