def test_zarr_split(dmg: LocalDataManager, tmp_path: Path,
                    zarr_dataset: ChunkedDataset) -> None:
    concat_count = 10
    zarr_input_path = dmg.require("single_scene.zarr")
    zarr_concatenated_path = str(tmp_path / f"{uuid4()}.zarr")
    zarr_concat([zarr_input_path] * concat_count, zarr_concatenated_path)

    split_infos = [
        {
            "name": f"{uuid4()}.zarr",
            "split_size_GB": 0.002
        },  # cut around 2MB
        {
            "name": f"{uuid4()}.zarr",
            "split_size_GB": 0.001
        },  # cut around 0.5MB
        {
            "name": f"{uuid4()}.zarr",
            "split_size_GB": -1
        },
    ]  # everything else

    scene_splits = zarr_split(zarr_concatenated_path, str(tmp_path),
                              split_infos)

    # load the zarrs and check elements
    zarr_concatenated = ChunkedDataset(zarr_concatenated_path)
    zarr_concatenated.open()

    for scene_split, split_info in zip(scene_splits, split_infos):
        zarr_out = ChunkedDataset(str(tmp_path / str(split_info["name"])))
        zarr_out.open()

        # compare elements at the start and end of each scene in both zarrs
        for idx_scene in range(len(zarr_out.scenes)):
            # compare elements in the scene
            input_scene = zarr_concatenated.scenes[scene_split[0] + idx_scene]
            input_frames = zarr_concatenated.frames[
                get_frames_slice_from_scenes(input_scene)]
            input_agents = zarr_concatenated.agents[
                get_agents_slice_from_frames(*input_frames[[0, -1]])]
            input_tl_faces = zarr_concatenated.tl_faces[
                get_tl_faces_slice_from_frames(*input_frames[[0, -1]])]

            output_scene = zarr_out.scenes[idx_scene]
            output_frames = zarr_out.frames[get_frames_slice_from_scenes(
                output_scene)]
            output_agents = zarr_out.agents[get_agents_slice_from_frames(
                *output_frames[[0, -1]])]
            output_tl_faces = zarr_out.tl_faces[get_tl_faces_slice_from_frames(
                *output_frames[[0, -1]])]

            assert np.all(input_frames["ego_translation"] ==
                          output_frames["ego_translation"])
            assert np.all(
                input_frames["ego_rotation"] == output_frames["ego_rotation"])
            assert np.all(input_agents == output_agents)
            assert np.all(input_tl_faces == output_tl_faces)
def test_get_frames_slice_from_scenes(zarr_dataset: ChunkedDataset) -> None:
    scene_a = zarr_dataset.scenes[0]
    frame_slice = get_frames_slice_from_scenes(scene_a)
    assert len(zarr_dataset.frames) == len(zarr_dataset.frames[frame_slice])

    # test e2e starting from scene
    frame_range = get_frames_slice_from_scenes(zarr_dataset.scenes[0])
    agents_range = get_agents_slice_from_frames(
        *zarr_dataset.frames[frame_range][[0, -1]])
    tl_faces_range = get_tl_faces_slice_from_frames(
        *zarr_dataset.frames[frame_range][[0, -1]])
    agents = zarr_dataset.agents[agents_range]
    tl_faces = zarr_dataset.tl_faces[tl_faces_range]

    assert len(agents) == len(zarr_dataset.agents)
    assert len(tl_faces) == len(zarr_dataset.tl_faces)
Esempio n. 3
0
def test_simulation_dataset_build(zarr_cat_dataset: ChunkedDataset,
                                  dmg: LocalDataManager, cfg: dict,
                                  tmp_path: Path) -> None:
    # modify one frame to ensure everything works also when scenes are different
    zarr_cat_dataset.frames = np.asarray(zarr_cat_dataset.frames)
    for scene_idx in range(len(zarr_cat_dataset.scenes)):
        frame_slice = get_frames_slice_from_scenes(zarr_cat_dataset.scenes)
        zarr_cat_dataset.frames[
            frame_slice.start]["ego_translation"] += np.random.randn(3)

    rasterizer = build_rasterizer(cfg, dmg)
    ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer)
    sim_cfg = SimulationConfig(use_ego_gt=True,
                               use_agents_gt=True,
                               disable_new_agents=False,
                               distance_th_far=30,
                               distance_th_close=10)
    # we should be able to create the same object by using both constructor and factory
    scene_indices = list(range(len(zarr_cat_dataset.scenes)))

    scene_dataset_batch: Dict[int, EgoDataset] = {}
    for scene_idx in scene_indices:
        scene_dataset = ego_dataset.get_scene_dataset(scene_idx)
        scene_dataset_batch[scene_idx] = scene_dataset
    sim_1 = SimulationDataset(scene_dataset_batch, sim_cfg)

    sim_2 = SimulationDataset.from_dataset_indices(ego_dataset, scene_indices,
                                                   sim_cfg)

    for (k_1, v_1), (k_2, v_2) in zip(sim_1.scene_dataset_batch.items(),
                                      sim_2.scene_dataset_batch.items()):
        assert k_1 == k_2
        assert np.allclose(v_1.dataset.frames["ego_translation"],
                           v_2.dataset.frames["ego_translation"])
def test_zarr_scenes_chunk(dmg: LocalDataManager, tmp_path: Path,
                           zarr_dataset: ChunkedDataset,
                           num_frames_to_copy: int) -> None:
    # first let's concat so we have multiple scenes
    concat_count = 10
    zarr_input_path = dmg.require("single_scene.zarr")
    zarr_concatenated_path = str(tmp_path / f"{uuid4()}.zarr")
    zarr_concat([zarr_input_path] * concat_count, zarr_concatenated_path)

    # now let's chunk it
    zarr_chopped_path = str(tmp_path / f"{uuid4()}.zarr")
    zarr_scenes_chop(zarr_concatenated_path,
                     zarr_chopped_path,
                     num_frames_to_copy=num_frames_to_copy)

    # open both and compare
    zarr_concatenated = ChunkedDataset(zarr_concatenated_path)
    zarr_concatenated.open()
    zarr_chopped = ChunkedDataset(zarr_chopped_path)
    zarr_chopped.open()

    assert len(zarr_concatenated.scenes) == len(zarr_chopped.scenes)
    assert len(
        zarr_chopped.frames) == num_frames_to_copy * len(zarr_chopped.scenes)

    for idx in range(len(zarr_concatenated.scenes)):
        scene_cat = zarr_concatenated.scenes[idx]
        scene_chopped = zarr_chopped.scenes[idx]

        frames_cat = zarr_concatenated.frames[
            scene_cat["frame_index_interval"][0]:
            scene_cat["frame_index_interval"][0] + num_frames_to_copy]

        frames_chopped = zarr_chopped.frames[get_frames_slice_from_scenes(
            scene_chopped)]

        agents_cat = zarr_concatenated.agents[get_agents_slice_from_frames(
            *frames_cat[[0, -1]])]
        tl_faces_cat = zarr_concatenated.tl_faces[
            get_tl_faces_slice_from_frames(*frames_cat[[0, -1]])]

        agents_chopped = zarr_chopped.agents[get_agents_slice_from_frames(
            *frames_chopped[[0, -1]])]
        tl_faces_chopped = zarr_chopped.tl_faces[
            get_tl_faces_slice_from_frames(*frames_chopped[[0, -1]])]

        assert scene_chopped["host"] == scene_cat["host"]
        assert scene_chopped["start_time"] == scene_cat["start_time"]
        assert scene_chopped["end_time"] == scene_cat["end_time"]

        assert len(frames_chopped) == num_frames_to_copy

        assert np.all(frames_chopped["ego_translation"] ==
                      frames_cat["ego_translation"][:num_frames_to_copy])
        assert np.all(frames_chopped["ego_rotation"] ==
                      frames_cat["ego_rotation"][:num_frames_to_copy])

        assert np.all(agents_chopped == agents_cat)
        assert np.all(tl_faces_chopped == tl_faces_cat)
Esempio n. 5
0
    def get_frame(self,
                  scene_index: int,
                  state_index: int,
                  track_id: Optional[int] = None) -> dict:
        """
        A utility function to get the rasterisation and trajectory target for a given agent in a given frame

        Args:
            scene_index (int): the index of the scene in the zarr
            state_index (int): a relative frame index in the scene
            track_id (Optional[int]): the agent to rasterize or None for the AV
        Returns:
            dict: the rasterised image, the target trajectory (position and yaw) along with their availability,
            the 2D matrix to center that agent, the agent track (-1 if ego) and the timestamp

        """
        frames = self.dataset.frames[get_frames_slice_from_scenes(
            self.dataset.scenes[scene_index])]
        data = self.sample_function(state_index, frames, self.dataset.agents,
                                    self.dataset.tl_faces, track_id)
        # 0,1,C -> C,0,1
        image = data["image"].transpose(2, 0, 1)

        target_positions = np.array(data["target_positions"], dtype=np.float32)
        # target_velocities = np.array(data["target_velocities"], dtype=np.float32)
        # target_accelerations = np.array(data["target_accelerations"], dtype=np.float32)
        target_yaws = np.array(data["target_yaws"], dtype=np.float32)

        history_positions = np.array(data["history_positions"],
                                     dtype=np.float32)
        # history_velocities = np.array(data["history_velocities"], dtype=np.float32)
        # history_accelerations = np.array(data["history_accelerations"], dtype=np.float32)
        # history_yaws = np.array(data["history_yaws"], dtype=np.float32)

        estimated_positions = np.array(data["estimated_future_positions"],
                                       dtype=np.float32)

        timestamp = frames[state_index]["timestamp"]
        track_id = np.int64(
            -1 if track_id is None else
            track_id)  # always a number to avoid crashing torch

        return {
            "image": image,
            "target_positions": target_positions,
            "target_yaws": target_yaws,
            "target_availabilities": data["target_availabilities"],
            "history_positions": history_positions,
            "history_availabilities": data["history_availabilities"],
            "estimated_future_positions": estimated_positions,
            "world_to_image": data["world_to_image"],
            "track_id": track_id,
            "timestamp": timestamp,
            "centroid": data["centroid"],
            "yaw": data["yaw"],
            "extent": data["extent"],
        }
Esempio n. 6
0
    def get_frame(self, scene_index: int, state_index: int, track_id: Optional[int] = None) -> dict:
        """
        A utility function to get the rasterisation and trajectory target for a given agent in a given frame

        Args:
            scene_index (int): the index of the scene in the zarr
            state_index (int): a relative frame index in the scene
            track_id (Optional[int]): the agent to rasterize or None for the AV
        Returns:
            dict: the rasterised image, the target trajectory (position and yaw) along with their availability,
            the 2D matrix to center that agent, the agent track (-1 if ego) and the timestamp

        """
        frames = self.dataset.frames[get_frames_slice_from_scenes(self.dataset.scenes[scene_index])]

        tl_faces = self.dataset.tl_faces
        try:
            if self.cfg["raster_params"]["disable_traffic_light_faces"]:
                tl_faces = np.empty(0, dtype=self.dataset.tl_faces.dtype)  # completely disable traffic light faces
        except KeyError:
            warnings.warn(
                "disable_traffic_light_faces not found in config, this will raise an error in the future",
                RuntimeWarning,
                stacklevel=2,
            )
        data = self.sample_function(state_index, frames, self.dataset.agents, tl_faces, track_id)

        graph = data["image"]

        target_positions = np.array(data["target_positions"], dtype=np.float32)
        target_yaws = np.array(data["target_yaws"], dtype=np.float32)

        history_positions = np.array(data["history_positions"], dtype=np.float32)
        history_yaws = np.array(data["history_yaws"], dtype=np.float32)

        timestamp = frames[state_index]["timestamp"]
        track_id = np.int64(-1 if track_id is None else track_id)  # always a number to avoid crashing torch

        return {
            "graph": graph,
            "target_positions": target_positions,
            "target_yaws": target_yaws,
            "target_availabilities": data["target_availabilities"],
            "history_positions": history_positions,
            "history_yaws": history_yaws,
            "history_availabilities": data["history_availabilities"],
            "raster_from_world": data["raster_from_world"],
            "raster_from_agent": data["raster_from_agent"],
            "agent_from_world": data["agent_from_world"],
            "world_from_agent": data["world_from_agent"],
            "track_id": track_id,
            "timestamp": timestamp,
            "centroid": data["centroid"],
            "yaw": data["yaw"],
            "extent": data["extent"],
            "scene_index": scene_index
        }
def test_get_scene_indices_ego(scene_idx: int, zarr_dataset: ChunkedDataset,
                               dmg: LocalDataManager, cfg: dict) -> None:
    cfg["raster_params"]["map_type"] = "box_debug"
    rasterizer = build_rasterizer(cfg, dmg)
    dataset = EgoDataset(cfg, zarr_dataset, rasterizer)

    scene_indices = dataset.get_scene_indices(scene_idx)
    frame_slice = get_frames_slice_from_scenes(zarr_dataset.scenes[scene_idx])
    assert scene_indices[0] == frame_slice.start
    assert scene_indices[-1] == frame_slice.stop - 1
Esempio n. 8
0
def get_frame_custom(self,
                     scene_index: int,
                     state_index: int,
                     track_id: Optional[int] = None) -> dict:
    """Customized `get_frame` function, which returns all `data` entries of `sample_function`.
    A utility function to get the rasterisation and trajectory target for a given agent in a given frame

    Args:
        self: Ego Dataset
        scene_index (int): the index of the scene in the zarr
        state_index (int): a relative frame index in the scene
        track_id (Optional[int]): the agent to rasterize or None for the AV
    Returns:
        dict: the rasterised image, the target trajectory (position and yaw) along with their availability,
        the 2D matrix to center that agent, the agent track (-1 if ego) and the timestamp

    """
    frames = self.dataset.frames[get_frames_slice_from_scenes(
        self.dataset.scenes[scene_index])]

    tl_faces = self.dataset.tl_faces
    try:
        if self.cfg["raster_params"]["disable_traffic_light_faces"]:
            tl_faces = np.empty(0, dtype=self.dataset.tl_faces.dtype
                                )  # completely disable traffic light faces
    except KeyError:
        warnings.warn(
            "disable_traffic_light_faces not found in config, this will raise an error in the future",
            RuntimeWarning,
            stacklevel=2,
        )
    data = self.sample_function(state_index, frames, self.dataset.agents,
                                tl_faces, track_id)
    # 0,1,C -> C,0,1
    image = data["image"].transpose(2, 0, 1)

    target_positions = np.array(data["target_positions"], dtype=np.float32)
    target_yaws = np.array(data["target_yaws"], dtype=np.float32)

    history_positions = np.array(data["history_positions"], dtype=np.float32)
    history_yaws = np.array(data["history_yaws"], dtype=np.float32)

    timestamp = frames[state_index]["timestamp"]
    track_id = np.int64(-1 if track_id is None else
                        track_id)  # always a number to avoid crashing torch

    data["image"] = image
    data["target_positions"] = target_positions
    data["target_yaws"] = target_yaws
    data["history_positions"] = history_positions
    data["history_yaws"] = history_yaws
    data["track_id"] = track_id
    data["timestamp"] = timestamp
    return data
Esempio n. 9
0
def test_get_agent_context(zarr_dataset: ChunkedDataset, state_index: int,
                           history_steps: int, future_steps: int) -> None:

    scene = zarr_dataset.scenes[0]
    frames = zarr_dataset.frames[get_frames_slice_from_scenes(scene)]
    agents = zarr_dataset.agents[get_agents_slice_from_frames(
        *frames[[0, -1]])]
    tls = zarr_dataset.tl_faces[get_tl_faces_slice_from_frames(
        *frames[[0, -1]])]

    frames_his_f, frames_fut_f, agents_his_f, agents_fut_f, tls_his_f, tls_fut_f = get_agent_context(
        state_index, frames, agents, tls, history_steps, future_steps)

    # test future using timestamp
    first_idx = state_index + 1
    last_idx = state_index + 1 + future_steps

    frames_fut = frames[first_idx:last_idx]
    agents_fut = filter_agents_by_frames(frames_fut, zarr_dataset.agents)
    tls_fut = filter_tl_faces_by_frames(frames_fut, zarr_dataset.tl_faces)

    assert np.all(frames_fut_f["timestamp"] == frames_fut["timestamp"])

    assert len(agents_fut) == len(agents_fut_f)
    for idx in range(len(agents_fut)):
        assert np.all(agents_fut_f[idx] == agents_fut[idx])

    assert len(tls_fut) == len(tls_fut_f)
    for idx in range(len(tls_fut)):
        assert np.all(tls_fut_f[idx] == tls_fut[idx])

    # test past (which is reversed and include present)
    first_idx = max(state_index - history_steps, 0)
    last_idx = state_index + 1

    frames_his = frames[first_idx:last_idx]
    agents_his = filter_agents_by_frames(frames_his, zarr_dataset.agents)
    tls_his = filter_tl_faces_by_frames(frames_his, zarr_dataset.tl_faces)

    assert np.all(frames_his_f["timestamp"] == frames_his["timestamp"][::-1])

    assert len(agents_his) == len(agents_his_f)
    for idx in range(len(agents_his)):
        assert np.all(agents_his_f[idx] == agents_his[len(agents_his) - idx -
                                                      1])

    assert len(tls_his) == len(tls_his_f)
    for idx in range(len(tls_his)):
        assert np.all(tls_his_f[idx] == tls_his[len(tls_his) - idx - 1])
    def get_frame(self, scene_index: int, state_index: int) -> dict:
        """
        A utility function to get the rasterisation and trajectory target for a given agent in a given frame
        Args:
            scene_index (int): the index of the scene in the zarr
            state_index (int): a relative frame index in the scene
            track_id (Optional[int]): the agent to rasterize or None for the AV
        Returns:
            dict: the rasterised image, the target trajectory (position and yaw) along with their availability,
            the 2D matrix to center that agent, the agent track (-1 if ego) and the timestamp
        """
        frames_slice = get_frames_slice_from_scenes(
            self.zarr_root[SCENE_ARRAY_KEY][scene_index])
        frames = self.zarr_root[FRAME_ARRAY_KEY][frames_slice]
        timestamp = frames[state_index]["timestamp"]
        data = self.sample_function(state_index, frames)

        results = {
            "timestamp": timestamp,
        }
        if self.with_history:
            results.update({
                "ego_centroid": data["ego_centroid"],
                "ego_speed": data["ego_speed"],
                "history_tl_faces": data["history_tl_faces"],
                "history_agents": data["history_agents"],
            })
        else:
            results.update(({
                "ego_centroid": data["ego_centroid"],
                "ego_speed": data["ego_speed"],
                "ego_yaw": data["ego_yaw"],
                "tl_faces": data["tl_faces"],
                "agents": data["agents"],
            }))
        if self.return_indices:
            results.update({
                "scene_index": scene_index,
                "state_index": state_index
            })

        return results
Esempio n. 11
0
def test_unroll(zarr_cat_dataset: ChunkedDataset, dmg: LocalDataManager,
                cfg: dict) -> None:
    rasterizer = build_rasterizer(cfg, dmg)

    # change the first yaw of scene 1
    # this will break if some broadcasting happens
    zarr_cat_dataset.frames = np.asarray(zarr_cat_dataset.frames)
    slice_frames = get_frames_slice_from_scenes(zarr_cat_dataset.scenes[1])
    rot = zarr_cat_dataset.frames[slice_frames.start]["ego_rotation"].copy()
    zarr_cat_dataset.frames[
        slice_frames.start]["ego_rotation"] = yaw_as_rotation33(
            rotation33_as_yaw(rot + 0.75))

    scene_indices = list(range(len(zarr_cat_dataset.scenes)))
    ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer)

    # control only agents at T0, control them forever
    sim_cfg = SimulationConfig(use_ego_gt=False,
                               use_agents_gt=False,
                               disable_new_agents=True,
                               distance_th_close=1000,
                               distance_th_far=1000,
                               num_simulation_steps=10)

    # ego will move by 1 each time
    ego_model = MockModel(advance_x=1.0)

    # agents will move by 0.5 each time
    agents_model = MockModel(advance_x=0.5)

    sim = ClosedLoopSimulator(sim_cfg, ego_dataset, torch.device("cpu"),
                              ego_model, agents_model)
    sim_outputs = sim.unroll(scene_indices)

    # check ego movement
    for sim_output in sim_outputs:
        ego_tr = sim_output.simulated_ego[
            "ego_translation"][:sim_cfg.num_simulation_steps, :2]
        ego_dist = np.linalg.norm(np.diff(ego_tr, axis=0), axis=-1)
        assert np.allclose(ego_dist, 1.0)

        ego_tr = sim_output.simulated_ego_states[:sim_cfg.num_simulation_steps,
                                                 TrajectoryStateIndices.
                                                 X:TrajectoryStateIndices.Y +
                                                 1]
        ego_dist = np.linalg.norm(np.diff(ego_tr.numpy(), axis=0), axis=-1)
        assert np.allclose(ego_dist, 1.0, atol=1e-3)

        # all rotations should be the same as the first one as the MockModel outputs 0 for that
        rots_sim = sim_output.simulated_ego[
            "ego_rotation"][:sim_cfg.num_simulation_steps]
        r_rep = sim_output.recorded_ego["ego_rotation"][0]
        for r_sim in rots_sim:
            assert np.allclose(rotation33_as_yaw(r_sim),
                               rotation33_as_yaw(r_rep),
                               atol=1e-2)

        # all rotations should be the same as the first one as the MockModel outputs 0 for that
        rots_sim = sim_output.simulated_ego_states[:sim_cfg.
                                                   num_simulation_steps,
                                                   TrajectoryStateIndices.
                                                   THETA]
        r_rep = sim_output.recorded_ego_states[0, TrajectoryStateIndices.THETA]
        for r_sim in rots_sim:
            assert np.allclose(r_sim, r_rep, atol=1e-2)

    # check agents movements
    for sim_output in sim_outputs:
        # we need to know which agents were controlled during simulation
        # TODO: this is not ideal, we should keep track of them through the simulation
        sim_dataset = SimulationDataset.from_dataset_indices(
            ego_dataset, [sim_output.scene_id], sim_cfg)
        sim_dataset.rasterise_agents_frame_batch(
            0)  # this will fill agents_tracked

        agents_tracks = [el[1] for el in sim_dataset._agents_tracked]
        for track_id in agents_tracks:
            states = sim_output.simulated_agents
            agents = filter_agents_by_track_id(
                states, track_id)[:sim_cfg.num_simulation_steps]
            agent_dist = np.linalg.norm(np.diff(agents["centroid"], axis=0),
                                        axis=-1)
            assert np.allclose(agent_dist, 0.5)