def test_get_agent_context(zarr_dataset: ChunkedDataset, state_index: int, history_steps: int, future_steps: int) -> None: scene = zarr_dataset.scenes[0] frames = zarr_dataset.frames[get_frames_slice_from_scenes(scene)] agents = zarr_dataset.agents[get_agents_slice_from_frames( *frames[[0, -1]])] tls = zarr_dataset.tl_faces[get_tl_faces_slice_from_frames( *frames[[0, -1]])] frames_his_f, frames_fut_f, agents_his_f, agents_fut_f, tls_his_f, tls_fut_f = get_agent_context( state_index, frames, agents, tls, history_steps, future_steps) # test future using timestamp first_idx = state_index + 1 last_idx = state_index + 1 + future_steps frames_fut = frames[first_idx:last_idx] agents_fut = filter_agents_by_frames(frames_fut, zarr_dataset.agents) tls_fut = filter_tl_faces_by_frames(frames_fut, zarr_dataset.tl_faces) assert np.all(frames_fut_f["timestamp"] == frames_fut["timestamp"]) assert len(agents_fut) == len(agents_fut_f) for idx in range(len(agents_fut)): assert np.all(agents_fut_f[idx] == agents_fut[idx]) assert len(tls_fut) == len(tls_fut_f) for idx in range(len(tls_fut)): assert np.all(tls_fut_f[idx] == tls_fut[idx]) # test past (which is reversed and include present) first_idx = max(state_index - history_steps, 0) last_idx = state_index + 1 frames_his = frames[first_idx:last_idx] agents_his = filter_agents_by_frames(frames_his, zarr_dataset.agents) tls_his = filter_tl_faces_by_frames(frames_his, zarr_dataset.tl_faces) assert np.all(frames_his_f["timestamp"] == frames_his["timestamp"][::-1]) assert len(agents_his) == len(agents_his_f) for idx in range(len(agents_his)): assert np.all(agents_his_f[idx] == agents_his[len(agents_his) - idx - 1]) assert len(tls_his) == len(tls_his_f) for idx in range(len(tls_his)): assert np.all(tls_his_f[idx] == tls_his[len(tls_his) - idx - 1])
def _rasterise_agents_frame( self, scene_index: int, state_index: int) -> Dict[Tuple[int, int], Dict[str, np.ndarray]]: """Rasterise agents of interest for a given frame in a given scene. :param scene_index: index of the scene :param state_index: frame index :return: a dict mapping [scene_idx, agent_idx] to dict """ # filter agents around ego based on distance and threshold dataset = self.scene_dataset_batch[scene_index] frame = dataset.dataset.frames[state_index] frame_agents = filter_agents_by_frames(frame, dataset.dataset.agents)[0] frame_agents = self._filter_agents(scene_index, frame_agents, frame["ego_translation"][:2]) # rasterise individual agents agents_dict: Dict[Tuple[int, int], Dict[str, np.ndarray]] = {} for agent in frame_agents: track_id = int(agent["track_id"]) el = dataset.get_frame(scene_index=0, state_index=state_index, track_id=track_id) # we replace the scene_index here to match the real one (otherwise is 0) el["scene_index"] = scene_index agents_dict[scene_index, track_id] = el self._update_agent_infos(scene_index, frame_agents["track_id"]) return agents_dict
def test_frame_trajectories_mock() -> None: # test a mock dataset ego_translation = np.asarray([(10, 10, 10), (20, 10, 20), (30, 30, 10)]) agent_1_translation = np.asarray([(10, 10), (20, 20), (30, 30)]) frames = np.zeros(3, dtype=FRAME_DTYPE) frames[0]["agent_index_interval"] = (0, 2) frames[1]["agent_index_interval"] = (2, 4) frames[2]["agent_index_interval"] = (4, 5) frames["ego_translation"] = ego_translation agents = np.zeros(5, dtype=AGENT_DTYPE) agents["track_id"] = [1, 2, 1, 2, 1] agents["centroid"][[0, 2, 4]] = agent_1_translation agents_frames = filter_agents_by_frames(frames, agents) trajs = _get_frame_trajectories(frames, agents_frames, np.asarray([1]), 0) assert len(trajs) == 2 # agent + ego assert np.allclose(trajs[0].xs, agent_1_translation[:, 0]) assert np.allclose(trajs[0].ys, agent_1_translation[:, 1]) assert np.allclose(trajs[0].track_id, 1) assert np.allclose(trajs[1].xs, ego_translation[:, 0]) assert np.allclose(trajs[1].ys, ego_translation[:, 1]) assert np.allclose(trajs[1].track_id, -1)
def check_rasterizer(cfg: dict, rasterizer: Rasterizer, zarr_dataset: ChunkedDataset) -> None: frames = zarr_dataset.frames[:] # Load all frames into memory for current_frame in [0, 50, len(frames) - 1]: history_num_frames = cfg["model_params"]["history_num_frames"] s = get_history_slice(current_frame, history_num_frames, 1, include_current_state=True) frames_to_rasterize = frames[s] agents = filter_agents_by_frames(frames_to_rasterize, zarr_dataset.agents) tl_faces = [np.empty(0, dtype=TL_FACE_DTYPE) for _ in agents] # TODO TR_FACES im = rasterizer.rasterize(frames_to_rasterize, agents, tl_faces) assert len(im.shape) == 3 assert im.shape[-1] == rasterizer.num_channels() assert im.shape[:2] == tuple(cfg["raster_params"]["raster_size"]) assert im.max() <= 1 assert im.min() >= 0 assert im.dtype == np.float32 rgb_im = rasterizer.to_rgb(im) assert im.shape[:2] == rgb_im.shape[:2] assert rgb_im.shape[2] == 3 # RGB has three channels assert rgb_im.dtype == np.uint8
def check_rasterizer(cfg: dict, rasterizer: Rasterizer, dataset: ChunkedStateDataset) -> None: frames = dataset.frames[:] # Load all frames into memory for current_frame in [0, 50, len(frames) - 1]: history_num_frames = cfg["model_params"]["history_num_frames"] history_step_size = cfg["model_params"]["history_step_size"] s = get_history_slice(current_frame, history_num_frames, history_step_size, include_current_state=True) frames_to_rasterize = frames[s] agents = filter_agents_by_frames(frames_to_rasterize, dataset.agents) im = rasterizer.rasterize(frames_to_rasterize, agents) assert len(im.shape) == 3 assert im.shape[:2] == tuple(cfg["raster_params"]["raster_size"]) assert im.shape[2] >= 3 assert im.max() <= 1 assert im.min() >= 0 assert im.dtype == np.float32 rgb_im = rasterizer.to_rgb(im) assert im.shape[:2] == rgb_im.shape[:2] assert rgb_im.shape[2] == 3 # RGB has three channels assert rgb_im.dtype == np.uint8
def hist_data() -> tuple: zarr_dataset = ChunkedStateDataset( path="./l5kit/tests/artefacts/single_scene.zarr") zarr_dataset.open() hist_frames = zarr_dataset.frames[ 100:111][::-1] # reverse to get them as history hist_agents = filter_agents_by_frames(hist_frames, zarr_dataset.agents) return hist_frames, hist_agents
def __init__(self, *args, **kwargs): # type: ignore super(BoxRasterizerTest, self).__init__(*args, **kwargs) self.dataset = ChunkedStateDataset( path="./l5kit/tests/data/single_scene.zarr") self.dataset.open() self.hist_frames = self.dataset.frames[100: 101] # we know this has agents self.hist_agents = filter_agents_by_frames(self.hist_frames, self.dataset.agents)
def test_shape(zarr_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict) -> None: hist_length = 10 cfg["raster_params"]["map_type"] = "py_satellite" cfg["raster_params"]["filter_agents_threshold"] = 1.0 cfg["model_params"]["history_num_frames"] = hist_length rasterizer = build_rasterizer(cfg, dmg) frames = zarr_dataset.frames[:hist_length + 1][::-1] agents = filter_agents_by_frames(frames, zarr_dataset.agents) out = rasterizer.rasterize(frames, agents, []) # TODO TR_FACES assert out.shape == (224, 224, (hist_length + 1) * 2 + 3)
def __init__(self, scene_dataset_batch: Dict[int, EgoDataset], sim_cfg: SimulationConfig) -> None: """This class allows to: - rasterise the same frame across multiple scenes for ego; - rasterise the same frame across multiple scenes for multiple agents; - filter agents based on distance to ego; - set ego in future frames; - set agents in future frames; .. note:: only vehicles (car label) are picked as agents :param scene_dataset_batch: a mapping from scene index to EgoDataset :param sim_cfg: the simulation config """ if not len(scene_dataset_batch): raise ValueError("can't build a simulation dataset with an empty batch") self.scene_dataset_batch: Dict[int, EgoDataset] = scene_dataset_batch self.sim_cfg = sim_cfg # we must limit the scenes to the part which will be simulated # we cut each scene so that it starts from there and ends after `num_simulation_steps` start_frame_idx = self.sim_cfg.start_frame_index if self.sim_cfg.num_simulation_steps is None: end_frame_idx = self.get_min_len() else: end_frame_idx = start_frame_idx + self.sim_cfg.num_simulation_steps if end_frame_idx > self.get_min_len(): raise ValueError(f"can't unroll until frame {end_frame_idx}, length is {self.get_min_len()}") for scene_idx in scene_dataset_batch: zarr_dt = self.scene_dataset_batch[scene_idx].dataset self.scene_dataset_batch[scene_idx].dataset = get_frames_subset(zarr_dt, start_frame_idx, end_frame_idx) # this is the only stateful field we need to change for EgoDataset, it's used in bisect frame_index_ends = self.scene_dataset_batch[scene_idx].dataset.scenes["frame_index_interval"][:, 1] self.scene_dataset_batch[scene_idx].cumulative_sizes = frame_index_ends # buffer used to keep track of tracked agents during unroll as tuples of scene_idx, agent_idx self._agents_tracked: Set[Tuple[int, int]] = set() if self.sim_cfg.disable_new_agents: # we disable all agents that wouldn't be picked at frame 0 for scene_idx, dt_ego in self.scene_dataset_batch.items(): dataset_zarr = dt_ego.dataset frame = dataset_zarr.frames[0] ego_pos = frame["ego_translation"][:2] agents = dataset_zarr.agents frame_agents = filter_agents_by_frames(frame, agents)[0] frame_agents = self._filter_agents(scene_idx, frame_agents, ego_pos) disable_agents(dataset_zarr, allowlist=frame_agents["track_id"]) # keep track of original dataset self.recorded_scene_dataset_batch = deepcopy(self.scene_dataset_batch)
def test_shape(dataset: ChunkedStateDataset) -> None: hist_length = 10 cfg = load_config_data("./l5kit/tests/artefacts/config.yaml") cfg["raster_params"]["map_type"] = "py_satellite" cfg["raster_params"]["filter_agents_threshold"] = 1.0 cfg["model_params"]["history_num_frames"] = hist_length dm = LocalDataManager("./l5kit/tests/artefacts/") rasterizer = build_rasterizer(cfg, dm) frames = dataset.frames[:hist_length + 1][::-1] agents = filter_agents_by_frames(frames, dataset.agents) out = rasterizer.rasterize(frames, agents) assert out.shape == (224, 224, (hist_length + 1) * 2 + 3)
def test_get_frames_agents_single_frame(frame_idx: int, zarr_dataset: ChunkedDataset) -> None: agents = filter_agents_by_frames(zarr_dataset.frames[frame_idx], zarr_dataset.agents) assert len(agents) == 1 assert isinstance(agents[0], np.ndarray)
def test_get_frames_agents_ret(frame_bound: int, zarr_dataset: ChunkedDataset) -> None: agents = filter_agents_by_frames(zarr_dataset.frames[0:frame_bound], zarr_dataset.agents) assert sum([len(agents_fr) for agents_fr in agents]) > 0
def test_get_frames_agents_shape(frame_bound: int, zarr_dataset: ChunkedDataset) -> None: agents = filter_agents_by_frames(zarr_dataset.frames[0:frame_bound], zarr_dataset.agents) assert len(agents) == frame_bound