Exemple #1
0
def test_agents_layer_out(hist_data: tuple, dmg: LocalDataManager, cfg: dict) -> None:
    cfg["raster_params"]["map_type"] = "box_debug"

    cfg["raster_params"]["filter_agents_threshold"] = 1.0
    rasterizer = build_rasterizer(cfg, dmg)

    out = rasterizer.rasterize(hist_data[0][:1], hist_data[1][:1], [])
    assert out[..., 0].sum() == 0

    cfg["raster_params"]["filter_agents_threshold"] = 0.0
    rasterizer = build_rasterizer(cfg, dmg)

    out = rasterizer.rasterize(hist_data[0][:1], hist_data[1][:1], [])
    assert out[..., 0].sum() > 0
Exemple #2
0
def main(model_name, plot_loss=True):
    DIR_INPUT = cfg["data_path"]
    os.environ["L5KIT_DATA_FOLDER"] = DIR_INPUT
    dm = LocalDataManager(None)
    rasterizer = build_rasterizer(cfg, dm)
    train_dataset, train_dataset_ego, train_dataloader, train_dataloader_ego = get_train_dataloaders(
        cfg, dm)
    criterion = pytorch_neg_multi_log_likelihood_batch
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = CSP(cfg, device).to(device)
    epochs = 10
    optimizer = optim.Adam(model.parameters(), lr=1e-2)
    train_losses = train(model, train_dataloader, train_dataset_ego,
                         train_dataset, criterion, device, epochs, optimizer)
    torch.save(model, "models/{}.pt".format(model_name))
    np.save("models/training_loss_{}.npy".format(model_name), train_losses)

    eval_dataset, eval_dataloader, eval_dataset_ego, eval_gt_path = generate_eval_dataset(
        cfg, dm, rasterizer)

    eval(model, eval_dataloader, eval_dataset_ego, eval_dataset, criterion,
         device, eval_gt_path, cfg)

    if plot_loss:
        plt.plot(np.arange(len(train_losses)),
                 train_losses,
                 label="train loss")
        plt.legend()
        plt.show()
    def setup(self):
        self.rasterizer = build_rasterizer(self.cfg, self.dm)

        # called on every GPU
        if self.is_test:
            print("test mode setup")
            self.test_path, test_zarr, self.test_dataset = self.load_zarr_dataset(
                loader_name="test_data_loader")
        else:
            print("train mode setup")
            self.train_path, train_zarr, self.train_dataset = self.load_zarr_dataset(
                loader_name="train_data_loader")
            self.val_path, val_zarr, self.val_dataset = self.load_zarr_dataset(
                loader_name="val_data_loader")
            self.plot_dataset(self.train_dataset)

            if self.downsample_train:
                print(
                    "downsampling agents, using only {} frames from each scene"
                    .format(len(lyft_utils.TRAIN_DSAMPLE_FRAMES)))
                train_agents_list = lyft_utils.downsample_agents(
                    train_zarr,
                    self.train_dataset,
                    selected_frames=lyft_utils.TRAIN_DSAMPLE_FRAMES,
                )
                self.train_dataset = torch.utils.data.Subset(
                    self.train_dataset, train_agents_list)
            # downsampling the validation dataset same as test dataset or
            # l5kit.evaluation.create_chopped_dataset
            val_agents_list = lyft_utils.downsample_agents(
                val_zarr, self.val_dataset, selected_frames=VAL_SELECTED_FRAME)
            self.val_dataset = torch.utils.data.Subset(self.val_dataset,
                                                       val_agents_list)
def test_simulation_agents_mock_insert(dmg: LocalDataManager, cfg: dict,
                                       tmp_path: Path) -> None:
    zarr_dataset = _mock_dataset()
    rasterizer = build_rasterizer(cfg, dmg)

    ego_dataset = EgoDataset(cfg, zarr_dataset, rasterizer)
    sim_cfg = SimulationConfig(use_ego_gt=True,
                               use_agents_gt=True,
                               disable_new_agents=True,
                               distance_th_far=100,
                               distance_th_close=10)
    dataset = SimulationDataset.from_dataset_indices(ego_dataset, [0], sim_cfg)

    _ = dataset.rasterise_agents_frame_batch(0)

    # insert (0, 1) in following frames
    next_agent = np.zeros(1, dtype=AGENT_DTYPE)
    next_agent["centroid"] = (-1, -1)
    next_agent["yaw"] = -0.5
    next_agent["track_id"] = 1
    next_agent["extent"] = (1, 1, 1)
    next_agent["label_probabilities"][:, 3] = 1

    for frame_idx in [1, 2, 3]:
        dataset.set_agents(frame_idx, {(0, 1): next_agent})

        agents_dict = dataset.rasterise_agents_frame_batch(frame_idx)
        assert len(agents_dict) == 1 and (0, 1) in agents_dict
        assert np.allclose(agents_dict[(0, 1)]["centroid"], (-1, -1))
        assert np.allclose(agents_dict[(0, 1)]["yaw"], -0.5)
Exemple #5
0
def load_tune_data():
    dm = get_dm()

    eval_cfg = cfg["val_data_loader"]

    eval_base_path = '/home/axot/lyft/data/scenes/validate_chopped_31'

    eval_zarr_path = str(Path(eval_base_path) /
                         Path(dm.require(eval_cfg["key"])).name)
    eval_mask_path = str(Path(eval_base_path) / "mask.npz")
    eval_gt_path = str(Path(eval_base_path) / "gt.csv")

    rasterizer = build_rasterizer(cfg, dm)
    eval_zarr = ChunkedDataset(eval_zarr_path).open()
    eval_mask = np.load(eval_mask_path)["arr_0"]
    # ===== INIT DATASET AND LOAD MASK
    eval_dataset = AgentDataset(
        cfg, eval_zarr, rasterizer, agents_mask=eval_mask)

    gt_dict = OrderedDict()
    for el in read_gt_csv(eval_gt_path):
        gt_dict[el["track_id"] + el["timestamp"]] = el
    
    eval_dataloader = DataLoader(eval_dataset,
                                 shuffle=eval_cfg["shuffle"],
                                 batch_size=eval_cfg["batch_size"],
                                 num_workers=eval_cfg["num_workers"])

    return eval_dataloader, gt_dict
def test_simulation_agents_mock_disable(dmg: LocalDataManager, cfg: dict,
                                        tmp_path: Path) -> None:
    zarr_dataset = _mock_dataset()
    rasterizer = build_rasterizer(cfg, dmg)

    ego_dataset = EgoDataset(cfg, zarr_dataset, rasterizer)
    sim_cfg = SimulationConfig(use_ego_gt=True,
                               use_agents_gt=True,
                               disable_new_agents=True,
                               distance_th_far=100,
                               distance_th_close=10)
    dataset = SimulationDataset.from_dataset_indices(ego_dataset, [0], sim_cfg)

    # nothing should be tracked
    assert len(dataset._agents_tracked) == 0

    agents_dict = dataset.rasterise_agents_frame_batch(0)

    # only (0, 1) should be in
    assert len(agents_dict) == 1 and (0, 1) in agents_dict
    assert len(dataset._agents_tracked) == 1

    agents_dict = dataset.rasterise_agents_frame_batch(1)

    # again, only (0, 1) should be in
    assert len(agents_dict) == 1
    assert (0, 1) in agents_dict
    assert len(dataset._agents_tracked) == 1

    agents_dict = dataset.rasterise_agents_frame_batch(2)
    assert len(agents_dict) == 0
    assert len(dataset._agents_tracked) == 0
def test_simulation_dataset_build(zarr_cat_dataset: ChunkedDataset,
                                  dmg: LocalDataManager, cfg: dict,
                                  tmp_path: Path) -> None:
    # modify one frame to ensure everything works also when scenes are different
    zarr_cat_dataset.frames = np.asarray(zarr_cat_dataset.frames)
    for scene_idx in range(len(zarr_cat_dataset.scenes)):
        frame_slice = get_frames_slice_from_scenes(zarr_cat_dataset.scenes)
        zarr_cat_dataset.frames[
            frame_slice.start]["ego_translation"] += np.random.randn(3)

    rasterizer = build_rasterizer(cfg, dmg)
    ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer)
    sim_cfg = SimulationConfig(use_ego_gt=True,
                               use_agents_gt=True,
                               disable_new_agents=False,
                               distance_th_far=30,
                               distance_th_close=10)
    # we should be able to create the same object by using both constructor and factory
    scene_indices = list(range(len(zarr_cat_dataset.scenes)))

    scene_dataset_batch: Dict[int, EgoDataset] = {}
    for scene_idx in scene_indices:
        scene_dataset = ego_dataset.get_scene_dataset(scene_idx)
        scene_dataset_batch[scene_idx] = scene_dataset
    sim_1 = SimulationDataset(scene_dataset_batch, sim_cfg)

    sim_2 = SimulationDataset.from_dataset_indices(ego_dataset, scene_indices,
                                                   sim_cfg)

    for (k_1, v_1), (k_2, v_2) in zip(sim_1.scene_dataset_batch.items(),
                                      sim_2.scene_dataset_batch.items()):
        assert k_1 == k_2
        assert np.allclose(v_1.dataset.frames["ego_translation"],
                           v_2.dataset.frames["ego_translation"])
def test_simulation_agents(zarr_cat_dataset: ChunkedDataset,
                           dmg: LocalDataManager, cfg: dict,
                           tmp_path: Path) -> None:
    rasterizer = build_rasterizer(cfg, dmg)

    scene_indices = list(range(len(zarr_cat_dataset.scenes)))

    ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer)
    sim_cfg = SimulationConfig(use_ego_gt=True,
                               use_agents_gt=True,
                               disable_new_agents=False,
                               distance_th_far=100,
                               distance_th_close=30)
    dataset = SimulationDataset.from_dataset_indices(ego_dataset,
                                                     scene_indices, sim_cfg)

    # nothing should be tracked
    assert len(dataset._agents_tracked) == 0

    agents_dict = dataset.rasterise_agents_frame_batch(0)

    # we should have the same agents in each scene
    for k in agents_dict:
        assert (0, k[1]) in agents_dict

    # now everything should be tracked
    assert len(dataset._agents_tracked) == len(agents_dict)
    def __init__(self,
                 data_root: str,
                 config_path: str,
                 split: str,
                 show_progress=True,
                 turn_thresh=3.,
                 speed_thresh=0.5,
                 static_thresh=1.,
                 output_folder='preprocess',
                 autosave=True,
                 cache_size=1e9):
        self.autosave = autosave
        self.show_progress = show_progress
        self.turn_thresh = turn_thresh
        self.speed_thresh = speed_thresh
        self.static_thresh = static_thresh
        self.split = split
        self.config = load_config_data(config_path)
        self.output_folder = output_folder

        self.data_manager = LocalDataManager(data_root)
        self.rasterizer = build_rasterizer(self.config, self.data_manager)
        self.data_zarr = ChunkedDataset(self.data_manager.require(split)).open(
            cache_size_bytes=int(cache_size))
        self.dataset = AgentDataset(self.config, self.data_zarr,
                                    self.rasterizer)

        self.data = defaultdict(list)
        self.junk = defaultdict(list)

        self.progress = None
Exemple #10
0
def test_build_dataloader(scene_indices: tuple, dataset_cls: Callable) -> None:
    cfg = load_config_data("./l5kit/tests/artefacts/config.yaml")
    cfg["train_data_loader"]["datasets"][0]["scene_indices"] = scene_indices
    dm = LocalDataManager("./l5kit/tests/artefacts/")
    rasterizer = build_rasterizer(cfg, dm)
    dl = build_dataloader(cfg, "train", dm, dataset_cls, rasterizer)
    next(iter(dl))
Exemple #11
0
    def val_dataloader(self):
        # created chopped dataset

        rasterizer = build_rasterizer(cfg, dm)
        eval_cfg = cfg["valid_data_loader"]
        num_frames_to_chop = 100
        eval_base_path = create_chopped_dataset(
            dm.require(eval_cfg["key"]),
            cfg["raster_params"]["filter_agents_threshold"],
            num_frames_to_chop, cfg["model_params"]["future_num_frames"],
            MIN_FUTURE_STEPS)

        eval_zarr_path = str(
            Path(eval_base_path) / Path(dm.require(eval_cfg["key"])).name)
        eval_mask_path = str(Path(eval_base_path) / "mask.npz")
        eval_gt_path = str(Path(eval_base_path) / "gt.csv")
        self.eval_gt_path = eval_gt_path

        eval_zarr = ChunkedDataset(eval_zarr_path).open(cache_size_bytes=10e9)
        eval_mask = np.load(eval_mask_path)["arr_0"]

        eval_dataset = AgentDataset(cfg,
                                    eval_zarr,
                                    rasterizer,
                                    agents_mask=eval_mask)
        eval_dataloader = DataLoader(eval_dataset,
                                     shuffle=False,
                                     batch_size=eval_cfg["batch_size"],
                                     num_workers=8)

        return eval_dataloader
Exemple #12
0
def load_val_data():
    dm = get_dm()

    eval_cfg = cfg["val_data_loader"]

    # MIN_FUTURE_STEPS = 10
    # num_frames_to_chop = cfg['model_params']['history_num_frames']+1

    # eval_base_path = create_chopped_dataset(dm.require(eval_cfg["key"]),
    #                                         cfg["raster_params"]["filter_agents_threshold"],
    #                                         num_frames_to_chop,
    #                                         cfg["model_params"]["future_num_frames"],
    #                                         MIN_FUTURE_STEPS)

    eval_base_path = '/home/axot/lyft/data/scenes/validate_chopped_31'

    eval_zarr_path = str(
        Path(eval_base_path) / Path(dm.require(eval_cfg["key"])).name)
    eval_mask_path = str(Path(eval_base_path) / "mask.npz")
    eval_gt_path = str(Path(eval_base_path) / "gt.csv")

    rasterizer = build_rasterizer(cfg, dm)
    eval_zarr = ChunkedDataset(eval_zarr_path).open()
    eval_mask = np.load(eval_mask_path)["arr_0"]
    # ===== INIT DATASET AND LOAD MASK
    eval_dataset = AgentDataset(cfg,
                                eval_zarr,
                                rasterizer,
                                agents_mask=eval_mask)
    eval_dataloader = DataLoader(eval_dataset,
                                 shuffle=eval_cfg["shuffle"],
                                 batch_size=eval_cfg["batch_size"],
                                 num_workers=eval_cfg["num_workers"])

    return eval_dataloader
Exemple #13
0
def test_agents_layer_out(hist_data: tuple) -> None:
    cfg = load_config_data("./l5kit/tests/artefacts/config.yaml")
    cfg["raster_params"]["map_type"] = "box_debug"

    cfg["raster_params"]["filter_agents_threshold"] = 1.0
    dm = LocalDataManager("./l5kit/tests/artefacts/")
    rasterizer = build_rasterizer(cfg, dm)

    out = rasterizer.rasterize(hist_data[0][:1], hist_data[1][:1])
    assert out[..., 0].sum() == 0

    cfg["raster_params"]["filter_agents_threshold"] = 0.0
    rasterizer = build_rasterizer(cfg, dm)

    out = rasterizer.rasterize(hist_data[0][:1], hist_data[1][:1])
    assert out[..., 0].sum() > 0
Exemple #14
0
def test_rasterizer_created_from_config(map_type: str,
                                        dataset: ChunkedStateDataset) -> None:
    cfg = load_config_data("./l5kit/tests/artefacts/config.yaml")
    cfg["raster_params"]["map_type"] = map_type
    dm = LocalDataManager("./l5kit/tests/artefacts/")
    rasterizer = build_rasterizer(cfg, dm)
    check_rasterizer(cfg, rasterizer, dataset)
Exemple #15
0
def test_ego_layer_out_center_configs(ego_center: tuple, hist_data: tuple, dmg: LocalDataManager, cfg: dict) -> None:
    cfg["raster_params"]["map_type"] = "box_debug"
    cfg["raster_params"]["ego_center"] = np.asarray(ego_center)

    rasterizer = build_rasterizer(cfg, dmg)
    out = rasterizer.rasterize(hist_data[0][:1], hist_data[1][:1], [])
    assert out[..., -1].sum() > 0
Exemple #16
0
def get_loaders(train_batch_size=32, valid_batch_size=64):
    """Prepare loaders.

    Args:
        train_batch_size (int, optional): batch size for training dataset.
            Default is `32`.
        valid_batch_size (int, optional): batch size for validation dataset.
            Default is `64`.

    Returns:
        train and validation data loaders
    """
    rasterizer = build_rasterizer(cfg, dm)

    train_zarr = ChunkedDataset(dm.require("scenes/train.zarr")).open()
    train_dataset = CubicAgentDataset(cfg, train_zarr, rasterizer)
    n_samples = len(train_dataset) // 5
    # n_samples = 100
    train_dataset = Subset(train_dataset, list(range(n_samples)))
    train_loader = DataLoader(
        train_dataset,
        batch_size=train_batch_size,
        num_workers=NUM_WORKERS,
        shuffle=True,
        worker_init_fn=seed_all,
        drop_last=True,
    )
    print(f" * Number of elements in train dataset - {len(train_dataset)}")
    print(f" * Number of elements in train loader - {len(train_loader)}")
    return train_loader, None
Exemple #17
0
def test_rasterizer_created_from_config(map_type: str,
                                        zarr_dataset: ChunkedDataset,
                                        dmg: LocalDataManager,
                                        cfg: dict) -> None:
    cfg["raster_params"]["map_type"] = map_type
    rasterizer = build_rasterizer(cfg, dmg)
    check_rasterizer(cfg, rasterizer, zarr_dataset)
Exemple #18
0
    def evaluate(self, data_path, file_name="submission.csv"):

        # set env variable for data
        os.environ["L5KIT_DATA_FOLDER"] = data_path
        dm = LocalDataManager(None)

        cfg = self.cfg

        # ===== INIT DATASET
        test_cfg = cfg["test_data_loader"]

        # Rasterizer
        rasterizer = build_rasterizer(cfg, dm)

        # Test dataset/dataloader
        test_zarr = ChunkedDataset(dm.require(test_cfg["key"])).open()
        test_mask = np.load(f"{data_path}/scenes/mask.npz")["arr_0"]
        test_dataset = AgentDataset(cfg,
                                    test_zarr,
                                    rasterizer,
                                    agents_mask=test_mask)
        test_dataloader = DataLoader(test_dataset,
                                     shuffle=test_cfg["shuffle"],
                                     batch_size=test_cfg["batch_size"],
                                     num_workers=test_cfg["num_workers"])
        test_dataloader = test_dataloader
        print(test_dataloader)

        # ==== EVAL LOOP
        self.model.eval()
        torch.set_grad_enabled(False)
        criterion = nn.MSELoss(reduction="none")

        # store information for evaluation
        future_coords_offsets_pd = []
        timestamps = []
        pred_coords = []
        confidences_list = []

        agent_ids = []
        progress_bar = tqdm(test_dataloader)
        for data in progress_bar:
            _, pred, confidences = self.forward(data, criterion)

            # future_coords_offsets_pd.append(outputs.cpu().numpy().copy())
            timestamps.append(data["timestamp"].numpy().copy())
            agent_ids.append(data["track_id"].numpy().copy())
            #
            # pred, confidences = predictor(image)

            pred_coords.append(pred.cpu().numpy().copy())
            confidences_list.append(confidences.cpu().numpy().copy())

        # ==== Save Results
        pred_path = f"{os.getcwd()}/{file_name}"
        write_pred_csv(pred_path,
                       timestamps=np.concatenate(timestamps),
                       track_ids=np.concatenate(agent_ids),
                       coords=np.concatenate(pred_coords),
                       confs=np.concatenate(confidences_list))
Exemple #19
0
 def prepare_train_data(self):
     train_cfg = cfg["train_data_loader"]
     rasterizer = build_rasterizer(cfg, dm)
     train_zarr = ChunkedDataset(dm.require(train_cfg["key"])).open()
     train_dataset = AgentDataset(cfg, train_zarr, rasterizer)
     train_dataloader = DataLoader(train_dataset, shuffle=train_cfg["shuffle"], batch_size=train_cfg["batch_size"], 
                          num_workers=train_cfg["num_workers"])
     return train_dataloader
Exemple #20
0
def test_agent_as_ego(hist_data: tuple, dmg: LocalDataManager, cfg: dict) -> None:
    cfg["raster_params"]["map_type"] = "box_debug"
    cfg["raster_params"]["filter_agents_threshold"] = -1  # take everything
    rasterizer = build_rasterizer(cfg, dmg)

    agents = hist_data[1][0]
    for ag in agents:
        out = rasterizer.rasterize(hist_data[0][:1], hist_data[1][:1], [], ag)
        assert out[..., -1].sum() > 0
def test_rasterizer_created_from_config(map_type: str,
                                        dataset: ChunkedStateDataset) -> None:
    cfg = load_config_data("./l5kit/configs/default.yaml")
    cfg["raster_params"]["map_type"] = map_type
    with tempfile.TemporaryDirectory("", "rasterizer-test") as tmpdir:
        setup_rasterizer_artifacts_and_config(tmpdir, cfg)
        dm = LocalDataManager(tmpdir)
        rasterizer = build_rasterizer(cfg, dm)
        check_rasterizer(cfg, rasterizer, dataset)
Exemple #22
0
def test_out_shape(hist_data: tuple, dmg: LocalDataManager, cfg: dict) -> None:
    hist_length = 5
    cfg["raster_params"]["map_type"] = "box_debug"
    cfg["model_params"]["history_num_frames"] = hist_length

    rasterizer = build_rasterizer(cfg, dmg)

    out = rasterizer.rasterize(hist_data[0][: hist_length + 1], hist_data[1][: hist_length + 1], [])
    assert out.shape == (224, 224, (hist_length + 1) * 2)
def test_get_frame_indices_ego(frame_idx: int, zarr_dataset: ChunkedDataset,
                               dmg: LocalDataManager, cfg: dict) -> None:
    cfg["raster_params"]["map_type"] = "box_debug"
    rasterizer = build_rasterizer(cfg, dmg)
    dataset = EgoDataset(cfg, zarr_dataset, rasterizer)

    frame_indices = dataset.get_frame_indices(frame_idx)
    # this should be only one and match the index of the frame (i.e. it should be frame_idx)
    assert frame_indices[0] == frame_idx
def test_get_scene_indices_ego(scene_idx: int, zarr_dataset: ChunkedDataset,
                               dmg: LocalDataManager, cfg: dict) -> None:
    cfg["raster_params"]["map_type"] = "box_debug"
    rasterizer = build_rasterizer(cfg, dmg)
    dataset = EgoDataset(cfg, zarr_dataset, rasterizer)

    scene_indices = dataset.get_scene_indices(scene_idx)
    frame_slice = get_frames_slice_from_scenes(zarr_dataset.scenes[scene_idx])
    assert scene_indices[0] == frame_slice.start
    assert scene_indices[-1] == frame_slice.stop - 1
Exemple #25
0
def test_ego_layer_out_center_configs(ego_center: tuple,
                                      hist_data: tuple) -> None:
    cfg = load_config_data("./l5kit/tests/artefacts/config.yaml")
    cfg["raster_params"]["map_type"] = "box_debug"
    cfg["raster_params"]["ego_center"] = np.asarray(ego_center)

    dm = LocalDataManager("./l5kit/tests/artefacts/")
    rasterizer = build_rasterizer(cfg, dm)
    out = rasterizer.rasterize(hist_data[0][:1], hist_data[1][:1])
    assert out[..., -1].sum() > 0
Exemple #26
0
 def __init__(self, args, data_root):
     super().__init__()
     self.args = args
     if args.mode == "train":
         self.cfg = utils.get_train_cfg(args)
     else:
         self.cfg = utils.get_test_cfg(args)
     self.data_root = data_root
     self.dm = LocalDataManager(data_root)
     self.rast = build_rasterizer(self.cfg, self.dm)
def test_dataset_rasterizer(rast_name: str, dataset_cls: Callable,
                            zarr_dataset: ChunkedDataset,
                            dmg: LocalDataManager, cfg: dict) -> None:
    rasterizer = build_rasterizer(cfg, dmg)

    dataset = dataset_cls(cfg=cfg,
                          zarr_dataset=zarr_dataset,
                          rasterizer=rasterizer,
                          perturbation=None)
    check_sample(cfg, dataset)
    check_torch_loading(dataset)
def check_performance_default(num_samples=64 * 20):
    """
    Defautl datset from l5kit w/o any optimizations
    """
    scene_name = "train"
    cfg_data = get_dataset_cfg(scene_name=scene_name, map_type="py_semantic")
    dm = LocalDataManager(None)
    rasterizer = build_rasterizer(cfg_data, dm)
    zarr_dataset = ChunkedDataset(dm.require(f"scenes/{scene_name}.zarr")).open()
    dataset = AgentDataset(cfg_data, zarr_dataset, rasterizer)
    check_performance(dataset, "default")
Exemple #29
0
def test_dataset_rasterizer(rast_name: str, dataset_cls: Callable,
                            zarr_dataset: ChunkedStateDataset) -> None:
    cfg = load_config_data("./l5kit/tests/artefacts/config.yaml")
    dm = LocalDataManager("./l5kit/tests/artefacts/")
    rasterizer = build_rasterizer(cfg, dm)
    dataset = dataset_cls(cfg=cfg,
                          zarr_dataset=zarr_dataset,
                          rasterizer=rasterizer,
                          perturbation=None)
    check_sample(cfg, dataset)
    check_torch_loading(dataset)
Exemple #30
0
def test_agent_as_ego(hist_data: tuple) -> None:
    cfg = load_config_data("./l5kit/tests/artefacts/config.yaml")
    cfg["raster_params"]["map_type"] = "box_debug"
    cfg["raster_params"]["filter_agents_threshold"] = -1  # take everything
    dm = LocalDataManager("./l5kit/tests/artefacts/")
    rasterizer = build_rasterizer(cfg, dm)

    agents = hist_data[1][0]
    for ag in agents:
        out = rasterizer.rasterize(hist_data[0][:1], hist_data[1][:1], ag)
        assert out[..., -1].sum() > 0