Exemple #1
0
def test_graph_rasterizer_no_error():
    # contain traffic light
    index = 150

    cfg = load_config_data(config_file)
    cfg["raster_params"]["map_type"] = "semantic_graph"

    data_loader_conf = cfg.get(f"val_data_loader")
    dm = LocalDataManager()
    dataset_path = dm.require(data_loader_conf.get("key"))

    zarr_dataset = ChunkedDataset(dataset_path)
    zarr_dataset.open()

    rasterizer = build_rasterizer(cfg=cfg, data_manager=dm)
    dataset = AgentGraphDataset(cfg=cfg,
                                zarr_dataset=zarr_dataset,
                                rasterizer=rasterizer)
    data_point = dataset[index]

    assert "graph" in data_point
    assert "lanes" in data_point["graph"]
    assert isinstance(data_point["graph"]["lanes"], List)

    print()
    print(data_point.keys())
    element_types = SemGraphRasterizer.keys
    for e in element_types:
        print(f"---- {e} ----")
        if len(data_point["graph"][e]) > 0:
            print(data_point["graph"][e][0])
Exemple #2
0
def test_require_existing_file(tmp_path: Path) -> None:
    print(tmp_path)
    p = tmp_path / "my_file.txt"
    open(str(p), "w").write("hello")

    dm_local = LocalDataManager(tmp_path)
    assert dm_local.require("my_file.txt") == str(p)
def check_performance_default(num_samples=64 * 20):
    """
    Defautl datset from l5kit w/o any optimizations
    """
    scene_name = "train"
    cfg_data = get_dataset_cfg(scene_name=scene_name, map_type="py_semantic")
    dm = LocalDataManager(None)
    rasterizer = build_rasterizer(cfg_data, dm)
    zarr_dataset = ChunkedDataset(dm.require(f"scenes/{scene_name}.zarr")).open()
    dataset = AgentDataset(cfg_data, zarr_dataset, rasterizer)
    check_performance(dataset, "default")
Exemple #4
0
def main():
    dm = LocalDataManager(None)
    rasterizer = build_rasterizer(cfg, dm)

    num_frames_to_chop = 10
    eval_base_path = create_chopped_dataset(
        dm.require("scenes/validate.zarr"),
        cfg["raster_params"]["filter_agents_threshold"],
        num_frames_to_chop,
        cfg["model_params"]["future_num_frames"],
        MIN_FUTURE_STEPS,
    )

    print("Path:", eval_base_path)
    def setup(self, stage=None):
        if self.data_manager is None:
            self.data_manager = LocalDataManager(self.data_root)
        if self.rasterizer is None:
            self.rasterizer = build_rasterizer(self.config, self.data_manager)
        if stage == 'fit' or stage is None:
            train_zarr = ChunkedDataset(
                self.data_manager.require(self.train_split)).open(
                    cache_size_bytes=int(self.cache_size))
            train_data = AgentDataset(self.config, train_zarr, self.rasterizer)

            if self.train_idxs is not None:
                train_data = Subset(train_data, self.train_idxs)
            if self.val_split is None or self.val_split == self.train_split:
                tl = len(train_data)
                vl = int(tl * self.val_proportion)
                self.train_data, self.val_data = random_split(
                    train_data, [tl - vl, vl])
            else:
                self.train_data = train_data
                val_zarr = ChunkedDataset(
                    self.data_manager.require(self.val_split)).open(
                        cache_size_bytes=int(self.cache_size))
                self.val_data = AgentDataset(self.config, val_zarr,
                                             self.rasterizer)
                if self.val_idxs is not None:
                    self.val_data = Subset(self.val_data, self.val_idxs)
            if self.raster_cache_size:
                self.train_data = CachedDataset(self.train_data,
                                                self.raster_cache_size)
                self.val_data = CachedDataset(self.val_data,
                                              self.raster_cache_size)
        if stage == 'test' or stage is None:
            test_zarr = ChunkedDataset(
                self.data_manager.require(self.test_split)).open(
                    cache_size_bytes=int(self.cache_size))
            if self.test_mask is not None:
                test_data = AgentDataset(self.config,
                                         test_zarr,
                                         self.rasterizer,
                                         agents_mask=self.test_mask)
            else:
                test_data = AgentDataset(self.config, test_zarr,
                                         self.rasterizer)
            if self.test_idxs is not None:
                test_data = Subset(test_data, self.test_idxs)
            else:
                self.test_idxs = np.arange(start=1, stop=len(test_data) + 1)
            self.test_data = IndexedDataset(test_data, self.test_idxs)
Exemple #6
0
def main(model_name, plot_loss=True):
    DIR_INPUT = cfg["data_path"]
    os.environ["L5KIT_DATA_FOLDER"] = DIR_INPUT
    dm = LocalDataManager(None)
    rasterizer = build_rasterizer(cfg, dm)
    train_dataset, train_dataset_ego, train_dataloader, train_dataloader_ego = get_train_dataloaders(
        cfg, dm)
    criterion = pytorch_neg_multi_log_likelihood_batch
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = CSP(cfg, device).to(device)
    epochs = 10
    optimizer = optim.Adam(model.parameters(), lr=1e-2)
    train_losses = train(model, train_dataloader, train_dataset_ego,
                         train_dataset, criterion, device, epochs, optimizer)
    torch.save(model, "models/{}.pt".format(model_name))
    np.save("models/training_loss_{}.npy".format(model_name), train_losses)

    eval_dataset, eval_dataloader, eval_dataset_ego, eval_gt_path = generate_eval_dataset(
        cfg, dm, rasterizer)

    eval(model, eval_dataloader, eval_dataset_ego, eval_dataset, criterion,
         device, eval_gt_path, cfg)

    if plot_loss:
        plt.plot(np.arange(len(train_losses)),
                 train_losses,
                 label="train loss")
        plt.legend()
        plt.show()
Exemple #7
0
def test_rasterizer_created_from_config(map_type: str,
                                        dataset: ChunkedStateDataset) -> None:
    cfg = load_config_data("./l5kit/tests/artefacts/config.yaml")
    cfg["raster_params"]["map_type"] = map_type
    dm = LocalDataManager("./l5kit/tests/artefacts/")
    rasterizer = build_rasterizer(cfg, dm)
    check_rasterizer(cfg, rasterizer, dataset)
def test_zarr_scenes_chunk(dmg: LocalDataManager, tmp_path: Path,
                           zarr_dataset: ChunkedDataset,
                           num_frames_to_copy: int) -> None:
    # first let's concat so we have multiple scenes
    concat_count = 10
    zarr_input_path = dmg.require("single_scene.zarr")
    zarr_concatenated_path = str(tmp_path / f"{uuid4()}.zarr")
    zarr_concat([zarr_input_path] * concat_count, zarr_concatenated_path)

    # now let's chunk it
    zarr_chopped_path = str(tmp_path / f"{uuid4()}.zarr")
    zarr_scenes_chop(zarr_concatenated_path,
                     zarr_chopped_path,
                     num_frames_to_copy=num_frames_to_copy)

    # open both and compare
    zarr_concatenated = ChunkedDataset(zarr_concatenated_path)
    zarr_concatenated.open()
    zarr_chopped = ChunkedDataset(zarr_chopped_path)
    zarr_chopped.open()

    assert len(zarr_concatenated.scenes) == len(zarr_chopped.scenes)
    assert len(
        zarr_chopped.frames) == num_frames_to_copy * len(zarr_chopped.scenes)

    for idx in range(len(zarr_concatenated.scenes)):
        scene_cat = zarr_concatenated.scenes[idx]
        scene_chopped = zarr_chopped.scenes[idx]

        frames_cat = zarr_concatenated.frames[
            scene_cat["frame_index_interval"][0]:
            scene_cat["frame_index_interval"][0] + num_frames_to_copy]

        frames_chopped = zarr_chopped.frames[get_frames_slice_from_scenes(
            scene_chopped)]

        agents_cat = zarr_concatenated.agents[get_agents_slice_from_frames(
            *frames_cat[[0, -1]])]
        tl_faces_cat = zarr_concatenated.tl_faces[
            get_tl_faces_slice_from_frames(*frames_cat[[0, -1]])]

        agents_chopped = zarr_chopped.agents[get_agents_slice_from_frames(
            *frames_chopped[[0, -1]])]
        tl_faces_chopped = zarr_chopped.tl_faces[
            get_tl_faces_slice_from_frames(*frames_chopped[[0, -1]])]

        assert scene_chopped["host"] == scene_cat["host"]
        assert scene_chopped["start_time"] == scene_cat["start_time"]
        assert scene_chopped["end_time"] == scene_cat["end_time"]

        assert len(frames_chopped) == num_frames_to_copy

        assert np.all(frames_chopped["ego_translation"] ==
                      frames_cat["ego_translation"][:num_frames_to_copy])
        assert np.all(frames_chopped["ego_rotation"] ==
                      frames_cat["ego_rotation"][:num_frames_to_copy])

        assert np.all(agents_chopped == agents_cat)
        assert np.all(tl_faces_chopped == tl_faces_cat)
def test_zarr_split(dmg: LocalDataManager, tmp_path: Path,
                    zarr_dataset: ChunkedDataset) -> None:
    concat_count = 10
    zarr_input_path = dmg.require("single_scene.zarr")
    zarr_concatenated_path = str(tmp_path / f"{uuid4()}.zarr")
    zarr_concat([zarr_input_path] * concat_count, zarr_concatenated_path)

    split_infos = [
        {
            "name": f"{uuid4()}.zarr",
            "split_size_GB": 0.002
        },  # cut around 2MB
        {
            "name": f"{uuid4()}.zarr",
            "split_size_GB": 0.001
        },  # cut around 0.5MB
        {
            "name": f"{uuid4()}.zarr",
            "split_size_GB": -1
        },
    ]  # everything else

    scene_splits = zarr_split(zarr_concatenated_path, str(tmp_path),
                              split_infos)

    # load the zarrs and check elements
    zarr_concatenated = ChunkedDataset(zarr_concatenated_path)
    zarr_concatenated.open()

    for scene_split, split_info in zip(scene_splits, split_infos):
        zarr_out = ChunkedDataset(str(tmp_path / str(split_info["name"])))
        zarr_out.open()

        # compare elements at the start and end of each scene in both zarrs
        for idx_scene in range(len(zarr_out.scenes)):
            # compare elements in the scene
            input_scene = zarr_concatenated.scenes[scene_split[0] + idx_scene]
            input_frames = zarr_concatenated.frames[
                get_frames_slice_from_scenes(input_scene)]
            input_agents = zarr_concatenated.agents[
                get_agents_slice_from_frames(*input_frames[[0, -1]])]
            input_tl_faces = zarr_concatenated.tl_faces[
                get_tl_faces_slice_from_frames(*input_frames[[0, -1]])]

            output_scene = zarr_out.scenes[idx_scene]
            output_frames = zarr_out.frames[get_frames_slice_from_scenes(
                output_scene)]
            output_agents = zarr_out.agents[get_agents_slice_from_frames(
                *output_frames[[0, -1]])]
            output_tl_faces = zarr_out.tl_faces[get_tl_faces_slice_from_frames(
                *output_frames[[0, -1]])]

            assert np.all(input_frames["ego_translation"] ==
                          output_frames["ego_translation"])
            assert np.all(
                input_frames["ego_rotation"] == output_frames["ego_rotation"])
            assert np.all(input_agents == output_agents)
            assert np.all(input_tl_faces == output_tl_faces)