def test_simulation_agents_mock_insert(dmg: LocalDataManager, cfg: dict, tmp_path: Path) -> None: zarr_dataset = _mock_dataset() rasterizer = build_rasterizer(cfg, dmg) ego_dataset = EgoDataset(cfg, zarr_dataset, rasterizer) sim_cfg = SimulationConfig(use_ego_gt=True, use_agents_gt=True, disable_new_agents=True, distance_th_far=100, distance_th_close=10) dataset = SimulationDataset.from_dataset_indices(ego_dataset, [0], sim_cfg) _ = dataset.rasterise_agents_frame_batch(0) # insert (0, 1) in following frames next_agent = np.zeros(1, dtype=AGENT_DTYPE) next_agent["centroid"] = (-1, -1) next_agent["yaw"] = -0.5 next_agent["track_id"] = 1 next_agent["extent"] = (1, 1, 1) next_agent["label_probabilities"][:, 3] = 1 for frame_idx in [1, 2, 3]: dataset.set_agents(frame_idx, {(0, 1): next_agent}) agents_dict = dataset.rasterise_agents_frame_batch(frame_idx) assert len(agents_dict) == 1 and (0, 1) in agents_dict assert np.allclose(agents_dict[(0, 1)]["centroid"], (-1, -1)) assert np.allclose(agents_dict[(0, 1)]["yaw"], -0.5)
def test_simulation_agents_mock_disable(dmg: LocalDataManager, cfg: dict, tmp_path: Path) -> None: zarr_dataset = _mock_dataset() rasterizer = build_rasterizer(cfg, dmg) ego_dataset = EgoDataset(cfg, zarr_dataset, rasterizer) sim_cfg = SimulationConfig(use_ego_gt=True, use_agents_gt=True, disable_new_agents=True, distance_th_far=100, distance_th_close=10) dataset = SimulationDataset.from_dataset_indices(ego_dataset, [0], sim_cfg) # nothing should be tracked assert len(dataset._agents_tracked) == 0 agents_dict = dataset.rasterise_agents_frame_batch(0) # only (0, 1) should be in assert len(agents_dict) == 1 and (0, 1) in agents_dict assert len(dataset._agents_tracked) == 1 agents_dict = dataset.rasterise_agents_frame_batch(1) # again, only (0, 1) should be in assert len(agents_dict) == 1 assert (0, 1) in agents_dict assert len(dataset._agents_tracked) == 1 agents_dict = dataset.rasterise_agents_frame_batch(2) assert len(agents_dict) == 0 assert len(dataset._agents_tracked) == 0
def test_simulation_dataset_build(zarr_cat_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict, tmp_path: Path) -> None: # modify one frame to ensure everything works also when scenes are different zarr_cat_dataset.frames = np.asarray(zarr_cat_dataset.frames) for scene_idx in range(len(zarr_cat_dataset.scenes)): frame_slice = get_frames_slice_from_scenes(zarr_cat_dataset.scenes) zarr_cat_dataset.frames[ frame_slice.start]["ego_translation"] += np.random.randn(3) rasterizer = build_rasterizer(cfg, dmg) ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer) sim_cfg = SimulationConfig(use_ego_gt=True, use_agents_gt=True, disable_new_agents=False, distance_th_far=30, distance_th_close=10) # we should be able to create the same object by using both constructor and factory scene_indices = list(range(len(zarr_cat_dataset.scenes))) scene_dataset_batch: Dict[int, EgoDataset] = {} for scene_idx in scene_indices: scene_dataset = ego_dataset.get_scene_dataset(scene_idx) scene_dataset_batch[scene_idx] = scene_dataset sim_1 = SimulationDataset(scene_dataset_batch, sim_cfg) sim_2 = SimulationDataset.from_dataset_indices(ego_dataset, scene_indices, sim_cfg) for (k_1, v_1), (k_2, v_2) in zip(sim_1.scene_dataset_batch.items(), sim_2.scene_dataset_batch.items()): assert k_1 == k_2 assert np.allclose(v_1.dataset.frames["ego_translation"], v_2.dataset.frames["ego_translation"])
def test_simulation_agents(zarr_cat_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict, tmp_path: Path) -> None: rasterizer = build_rasterizer(cfg, dmg) scene_indices = list(range(len(zarr_cat_dataset.scenes))) ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer) sim_cfg = SimulationConfig(use_ego_gt=True, use_agents_gt=True, disable_new_agents=False, distance_th_far=100, distance_th_close=30) dataset = SimulationDataset.from_dataset_indices(ego_dataset, scene_indices, sim_cfg) # nothing should be tracked assert len(dataset._agents_tracked) == 0 agents_dict = dataset.rasterise_agents_frame_batch(0) # we should have the same agents in each scene for k in agents_dict: assert (0, k[1]) in agents_dict # now everything should be tracked assert len(dataset._agents_tracked) == len(agents_dict)
def test_same_displacement( cfg: dict, zarr_dataset: ChunkedDataset, base_displacement: np.ndarray, raster_size: tuple, ego_center: tuple, pixel_size: tuple, ) -> None: cfg["raster_params"]["raster_size"] = raster_size cfg["raster_params"]["pixel_size"] = np.asarray(pixel_size) cfg["raster_params"]["ego_center"] = np.asarray(ego_center) render_context = RenderContext( np.asarray(raster_size), np.asarray(pixel_size), np.asarray(ego_center), set_origin_to_bottom=cfg["raster_params"]["set_origin_to_bottom"], ) dataset = EgoDataset( cfg, zarr_dataset, StubRasterizer(render_context), ) data = dataset[0] assert np.allclose(data["target_positions"], base_displacement)
def test_get_frame_indices_ego(frame_idx: int, zarr_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict) -> None: cfg["raster_params"]["map_type"] = "box_debug" rasterizer = build_rasterizer(cfg, dmg) dataset = EgoDataset(cfg, zarr_dataset, rasterizer) frame_indices = dataset.get_frame_indices(frame_idx) # this should be only one and match the index of the frame (i.e. it should be frame_idx) assert frame_indices[0] == frame_idx
def test_perturbation_is_applied(perturb_prob: float, dmg: LocalDataManager, cfg: dict, zarr_dataset: ChunkedDataset) -> None: rasterizer = build_rasterizer(cfg, dmg) dataset = EgoDataset(cfg, zarr_dataset, rasterizer, None) # no perturb data_no_perturb = dataset[0] # note we cannot change the object we already have as a partial is built at init time perturb = AckermanPerturbation(ReplayRandomGenerator( np.asarray([[4.0, 0.33]])), perturb_prob=perturb_prob) dataset = EgoDataset(cfg, zarr_dataset, rasterizer, perturb) # perturb data_perturb = dataset[0] assert np.linalg.norm(data_no_perturb["target_positions"] - data_perturb["target_positions"]) > 0 assert np.linalg.norm(data_no_perturb["target_yaws"] - data_perturb["target_yaws"]) > 0
def test_perturbation_is_applied(perturb_prob: float) -> None: cfg = load_config_data("./l5kit/tests/artefacts/config.yaml") zarr_dataset = ChunkedDataset(path="./l5kit/tests/artefacts/single_scene.zarr") zarr_dataset.open() dm = LocalDataManager("./l5kit/tests/artefacts/") rasterizer = build_rasterizer(cfg, dm) dataset = EgoDataset(cfg, zarr_dataset, rasterizer, None) # no perturb data_no_perturb = dataset[0] # note we cannot change the object we already have as a partial is built at init time perturb = AckermanPerturbation(ReplayRandomGenerator(np.asarray([[4.0, 0.33]])), perturb_prob=perturb_prob) dataset = EgoDataset(cfg, zarr_dataset, rasterizer, perturb) # perturb data_perturb = dataset[0] assert np.linalg.norm(data_no_perturb["target_positions"] - data_perturb["target_positions"]) > 0 assert np.linalg.norm(data_no_perturb["target_yaws"] - data_perturb["target_yaws"]) > 0
def test_get_scene_indices_ego(scene_idx: int, zarr_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict) -> None: cfg["raster_params"]["map_type"] = "box_debug" rasterizer = build_rasterizer(cfg, dmg) dataset = EgoDataset(cfg, zarr_dataset, rasterizer) scene_indices = dataset.get_scene_indices(scene_idx) frame_slice = get_frames_slice_from_scenes(zarr_dataset.scenes[scene_idx]) assert scene_indices[0] == frame_slice.start assert scene_indices[-1] == frame_slice.stop - 1
def get_train_dataloaders(cfg, dm): """Modified from L5Kit""" train_cfg = cfg["train_data_loader"] rasterizer = build_rasterizer(cfg, dm) train_zarr = ChunkedDataset(dm.require(train_cfg["key"])).open() train_dataset = AgentDataset(cfg, train_zarr, rasterizer) train_dataloader = DataLoader(train_dataset, shuffle=train_cfg["shuffle"], batch_size=train_cfg["batch_size"], num_workers=train_cfg["num_workers"]) train_dataset_ego = EgoDataset(cfg, train_zarr, rasterizer) train_dataloader_ego = DataLoader(train_dataset_ego, shuffle=train_cfg["shuffle"], batch_size=train_cfg["batch_size"], num_workers=train_cfg["num_workers"]) return train_dataset, train_dataset_ego, train_dataloader, train_dataloader_ego
def __init__(self): print("Visualization Class initialized.") # get config self.cfg = load_config_data("/mnt/extra/kaggle/competitions/2020lyft/ProjectLyft/Modules/visualisation_config.yaml") print(self.cfg) dm = LocalDataManager() self.dataset_path = dm.require(self.cfg["val_data_loader"]["key"]) self.zarr_dataset = ChunkedDataset(self.dataset_path) self.zarr_dataset.open() # Dataset package self.rast = build_rasterizer(self.cfg, dm) self.dataset = EgoDataset(self.cfg, self.zarr_dataset, self.rast)
def test_invalid_simulation_dataset(zarr_cat_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict, tmp_path: Path) -> None: rasterizer = build_rasterizer(cfg, dmg) scene_indices = [0, len(zarr_cat_dataset.scenes)] ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer) sim_cfg = SimulationConfig(use_ego_gt=True, use_agents_gt=True, disable_new_agents=False, distance_th_far=30, distance_th_close=10) with pytest.raises(ValueError): SimulationDataset.from_dataset_indices(ego_dataset, scene_indices, sim_cfg)
def base_displacement(zarr_dataset: ChunkedStateDataset) -> np.ndarray: cfg = load_config_data("./l5kit/tests/artefacts/config.yaml") cfg["raster_params"]["raster_size"] = (100, 100) cfg["raster_params"]["ego_center"] = np.asarray((0.5, 0.5)) cfg["raster_params"]["pixel_size"] = np.asarray((0.25, 0.25)) dataset = EgoDataset( cfg, zarr_dataset, StubRasterizer( cfg["raster_params"]["raster_size"], cfg["raster_params"]["pixel_size"], cfg["raster_params"]["ego_center"], 0.5, ), ) data = dataset[0] return data["target_positions"]
def test_simulation_ego(zarr_cat_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict, tmp_path: Path) -> None: rasterizer = build_rasterizer(cfg, dmg) scene_indices = list(range(len(zarr_cat_dataset.scenes))) ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer) sim_cfg = SimulationConfig(use_ego_gt=True, use_agents_gt=True, disable_new_agents=False, distance_th_far=30, distance_th_close=10) dataset = SimulationDataset.from_dataset_indices(ego_dataset, scene_indices, sim_cfg) # this also ensure order is checked assert list(dataset.scene_dataset_batch.keys()) == scene_indices # ensure we can call the aggregated get frame out_0 = dataset.rasterise_frame_batch(0) assert len(out_0) == len(scene_indices) out_last = dataset.rasterise_frame_batch(len(dataset) - 1) assert len(out_last) == len(scene_indices) with pytest.raises(IndexError): _ = dataset.rasterise_frame_batch(len(dataset)) # ensure we can set the ego in multiple frames for all scenes frame_indices = np.random.randint(0, len(dataset), 10) for frame_idx in frame_indices: mock_tr = np.random.rand(len(scene_indices), 12, 2) mock_yaw = np.random.rand(len(scene_indices), 12) dataset.set_ego(frame_idx, 0, mock_tr, mock_yaw) for scene_idx in scene_indices: scene_zarr = dataset.scene_dataset_batch[scene_idx].dataset ego_tr = scene_zarr.frames["ego_translation"][frame_idx] ego_yaw = rotation33_as_yaw( scene_zarr.frames["ego_rotation"][frame_idx]) assert np.allclose(mock_tr[scene_idx, 0], ego_tr[:2]) assert np.allclose(mock_yaw[scene_idx, 0], ego_yaw)
def test_coordinates_straight_road(zarr_dataset: ChunkedDataset, cfg: dict) -> None: # on a straight road `target_positions` should increase on x only render_context = RenderContext( np.asarray(cfg["raster_params"]["raster_size"]), np.asarray(cfg["raster_params"]["pixel_size"]), np.asarray(cfg["raster_params"]["ego_center"]), ) dataset = EgoDataset( cfg, zarr_dataset, StubRasterizer( render_context, 0.5, ), ) # get first prediction and first 50 centroids centroids = [] preds = [] preds_world = [] for idx in range(50): data = dataset[idx] if idx == 0: preds = data["target_positions"] preds_world = transform_points( preds, np.linalg.inv(data["agent_from_world"])) centroids.append(data["centroid"][:2]) centroids = np.stack(centroids) # compute XY variances for preds and centroids var_preds = np.var(preds, 0, ddof=1) var_centroids = np.var(centroids, 0, ddof=1) assert var_preds[1] / var_preds[ 0] < 0.001 # variance on Y is way lower than on X assert var_centroids[1] / var_centroids[ 0] > 0.9 # variance on Y is similar to X # check similarity between coordinates assert np.allclose(preds_world[:-1], centroids[1:])
def generate_eval_dataset(cfg, dm, rasterizer): eval_cfg = cfg["test_data_loader"] eval_dir = shutil.copytree(dm.require(eval_cfg["key"]), '/tmp/lyft/test.zarr') eval_cfg = cfg["test_data_loader"] num_frames_to_chop = 50 eval_base_path = create_chopped_dataset(eval_dir, cfg["raster_params"]["filter_agents_threshold"], num_frames_to_chop, cfg["model_params"]["future_num_frames"], MIN_FUTURE_STEPS) eval_zarr_path = str(Path(eval_base_path) / "test.zarr") eval_mask_path = str(Path(eval_base_path) / "mask.npz") eval_gt_path = str(Path(eval_base_path) / "gt.csv") eval_zarr = ChunkedDataset(eval_zarr_path).open() eval_mask = np.load(eval_mask_path)["arr_0"] # ===== INIT DATASET AND LOAD MASK eval_dataset = AgentDataset(cfg, eval_zarr, rasterizer, agents_mask=eval_mask) eval_dataloader = DataLoader(eval_dataset, shuffle=eval_cfg["shuffle"], batch_size=eval_cfg["batch_size"], num_workers=eval_cfg["num_workers"]) eval_dataset_ego = EgoDataset(cfg, eval_zarr, rasterizer) return eval_dataset, eval_dataloader, eval_dataset_ego, eval_gt_path
def __init__( self, cfg: dict, zarr_dataset: ChunkedDataset, rasterizer: Rasterizer, perturbation: Optional[Perturbation] = None, agents_mask: Optional[np.ndarray] = None, min_frame_history: int = MIN_FRAME_HISTORY, min_frame_future: int = MIN_FRAME_FUTURE, override_sample_function_name: str = "", ): assert perturbation is None, "AgentDataset does not support perturbation (yet)" self.cfg = cfg self.ego_dataset = EgoDataset(cfg, zarr_dataset, rasterizer, perturbation) self.get_frame_arguments = self.load_get_frame_arguments(agents_mask, min_frame_history, min_frame_future) if override_sample_function_name != "": print("override_sample_function_name", override_sample_function_name) if override_sample_function_name == "generate_agent_sample_tl_history": self.ego_dataset.sample_function = create_generate_agent_sample_tl_history_partial(cfg, rasterizer) elif override_sample_function_name == "generate_agent_sample_fixing_yaw": self.ego_dataset.sample_function = create_generate_agent_sample_fixing_yaw_partial(cfg, rasterizer)
def test_same_displacement( zarr_dataset: ChunkedStateDataset, base_displacement: np.ndarray, raster_size: tuple, ego_center: tuple, pixel_size: tuple, ) -> None: cfg = load_config_data("./l5kit/tests/artefacts/config.yaml") cfg["raster_params"]["raster_size"] = raster_size cfg["raster_params"]["ego_center"] = np.asarray(ego_center) cfg["raster_params"]["pixel_size"] = np.asarray(pixel_size) dataset = EgoDataset( cfg, zarr_dataset, StubRasterizer( cfg["raster_params"]["raster_size"], cfg["raster_params"]["pixel_size"], cfg["raster_params"]["ego_center"], 0.5, ), ) data = dataset[0] assert np.allclose(data["target_positions"], base_displacement)
def test_same_displacement( cfg: dict, zarr_dataset: ChunkedDataset, base_displacement: np.ndarray, raster_size: tuple, ego_center: tuple, pixel_size: tuple, ) -> None: cfg["raster_params"]["raster_size"] = raster_size cfg["raster_params"]["ego_center"] = np.asarray(ego_center) cfg["raster_params"]["pixel_size"] = np.asarray(pixel_size) dataset = EgoDataset( cfg, zarr_dataset, StubRasterizer( cfg["raster_params"]["raster_size"], cfg["raster_params"]["pixel_size"], cfg["raster_params"]["ego_center"], 0.5, ), ) data = dataset[0] assert np.allclose(data["target_positions"], base_displacement)
zarr_dataset.open() print(zarr_dataset) frames = zarr_dataset.frames # This is much faster! coords = frames["ego_translation"][:, :2] plt.scatter(coords[:, 0], coords[:, 1], marker='.') axes = plt.gca() axes.set_xlim([-2500, 1600]) axes.set_ylim([-2500, 1600]) plt.title("ego_translation of frames") semantic_rasterizer = build_rasterizer(cfg, dm) semantic_dataset = EgoDataset(cfg, zarr_dataset, semantic_rasterizer) def visualize_trajectory( dataset, index, title="target_positions movement with draw_trajectory"): data = dataset[index] im = data["image"].transpose(1, 2, 0) im = dataset.rasterizer.to_rgb(im) target_positions_pixels = transform_points( data["target_positions"] + data["centroid"][:2], data["world_to_image"]) draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR)
def ego_cat_dataset(cfg: dict, dmg: LocalDataManager, zarr_cat_dataset: ChunkedDataset) -> EgoDataset: rasterizer = build_rasterizer(cfg, dmg) return EgoDataset(cfg, zarr_cat_dataset, rasterizer)
pred_path = 'submission1.csv' write_pred_csv(pred_path, timestamps=np.concatenate(timestamps), track_ids=np.concatenate(agent_ids), coords=np.concatenate(future_coords_offsets_pd), confs=np.concatenate(confidences_list)) metrics = compute_metrics_csv( eval_gt_path, pred_path, [neg_multi_log_likelihood, time_displace]) for metric_name, metric_mean in metrics.items(): print(metric_name, metric_mean) gt_rows = {} for row in read_gt_csv(eval_gt_path): gt_rows[row["track_id"] + row["timestamp"]] = row["coord"] eval_ego_dataset = EgoDataset(cfg, eval_dataset.dataset, rasterizer) for frame_number in range( 99, len(eval_zarr.frames), 100): # start from last frame of scene_0 and increase by 100 agent_indices = eval_dataset.get_frame_indices(frame_number) if not len(agent_indices): continue # get AV point-of-view frame data_ego = eval_ego_dataset[frame_number] im_ego = rasterizer.to_rgb(data_ego["image"].transpose(1, 2, 0)) center = np.asarray(cfg["raster_params"]["ego_center"] ) * cfg["raster_params"]["raster_size"] predicted_positions = []
def test_unroll_subset(zarr_cat_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict, frame_range: Tuple[int, int]) -> None: rasterizer = build_rasterizer(cfg, dmg) scene_indices = list(range(len(zarr_cat_dataset.scenes))) ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer) # control only agents at T0, control them forever sim_cfg = SimulationConfig(use_ego_gt=False, use_agents_gt=False, disable_new_agents=True, distance_th_close=1000, distance_th_far=1000, num_simulation_steps=frame_range[1], start_frame_index=frame_range[0]) # ego will move by 1 each time ego_model = MockModel(advance_x=1.0) # agents will move by 0.5 each time agents_model = MockModel(advance_x=0.5) sim = ClosedLoopSimulator(sim_cfg, ego_dataset, torch.device("cpu"), ego_model, agents_model) sim_outputs = sim.unroll(scene_indices) for sim_out in sim_outputs: assert zarr_cat_dataset.frames[ 0] != sim_out.recorded_dataset.dataset.frames[0] assert zarr_cat_dataset.frames[ 0] != sim_out.simulated_dataset.dataset.frames[0] for ego_in_out in sim_out.ego_ins_outs: assert "positions" in ego_in_out.outputs and "yaws" in ego_in_out.outputs assert np.allclose(ego_in_out.outputs["positions"][:, 0], 1.0) assert np.allclose(ego_in_out.outputs["positions"][:, 1], 0.0) for agents_in_out in sim_out.agents_ins_outs: for agent_in_out in agents_in_out: assert "positions" in agent_in_out.outputs and "yaws" in agent_in_out.outputs assert np.allclose(agent_in_out.outputs["positions"][:, 0], 0.5) assert np.allclose(agent_in_out.outputs["positions"][:, 1], 0.0) if None not in frame_range: assert len( sim_out.recorded_dataset.dataset.frames) == frame_range[1] assert len( sim_out.simulated_dataset.dataset.frames) == frame_range[1] assert len(sim_out.simulated_ego_states) == frame_range[1] assert len(sim_out.recorded_ego_states) == frame_range[1] assert len(sim_out.recorded_ego) == frame_range[1] assert len(sim_out.simulated_ego) == frame_range[1] assert len(sim_out.ego_ins_outs) == len( sim_out.agents_ins_outs) == frame_range[1] ego_tr = sim_out.simulated_ego[ "ego_translation"][:sim_cfg.num_simulation_steps, :2] ego_dist = np.linalg.norm(np.diff(ego_tr, axis=0), axis=-1) assert np.allclose(ego_dist, 1.0)
'max_num_steps': 1000, } } zarr_loc = cfg["train_data_loader"]["key"] dm = LocalDataManager() train_zarr = ChunkedDataset(dm.require(zarr_loc)).open() #let's see what one of the objects looks like print(train_zarr) #Let us visualize the EGO DATASET rast = build_rasterizer(cfg, dm) #Rasterisation is one of the typical techniques of rendering 3D models dataset = EgoDataset(cfg, train_zarr , rast) #For visualizing the Av. Av stands for Autonomous Vehicle #Let's get a sample from the dataset and use our rasterizer to get an RGB image we can plot data = dataset[70] im = data['image'].transpose(1, 2, 0) im = dataset.rasterizer.to_rgb(im) target_positions_pixels = transform_points(data["target_positions"] + data["centroid"][:2], data["world_to_image"]) draw_trajectory(im, target_positions_pixels, data["target_yaws"], TARGET_POINTS_COLOR) plt.imshow(im[::-1]) plt.show() # FROM WHAT I CAN DEDUCE THE RED BOX YOU SEE IS THE intersection entry wait line for ego car #If it is Yellow means an intersection entry wait line for other cars while Green is intersection exit line. #Let Visualize other images|
timestamps=np.concatenate(timestamps), track_ids=np.concatenate(agent_ids), coords=np.concatenate(future_coords_offsets_pd), ) metrics = compute_metrics_csv(eval_gt_path, pred_path, [neg_multi_log_likelihood, time_displace]) for metric_name, metric_mean in metrics.items(): print(metric_name, metric_mean) model.eval() torch.set_grad_enabled(False) gt_rows = {} for row in read_gt_csv(eval_gt_path): gt_rows[row["track_id"] + row["timestamp"]] = row["coord"] eval_ego_dataset = EgoDataset(training_cfg, eval_dataset.dataset, rasterizer) for frame_number in range(99, len(eval_zarr.frames), 1000): # start from last frame of scene_0 and increase by 100 agent_indices = eval_dataset.get_frame_indices(frame_number) if not len(agent_indices): continue # get AV point-of-view frame data_ego = eval_ego_dataset[frame_number] im_ego = rasterizer.to_rgb(data_ego["image"].transpose(1, 2, 0)) center = np.asarray(training_cfg["raster_params"]["ego_center"]) * training_cfg["raster_params"]["raster_size"] predicted_positions = [] target_positions = [] for v_index in agent_indices:
def test_unroll(zarr_cat_dataset: ChunkedDataset, dmg: LocalDataManager, cfg: dict) -> None: rasterizer = build_rasterizer(cfg, dmg) # change the first yaw of scene 1 # this will break if some broadcasting happens zarr_cat_dataset.frames = np.asarray(zarr_cat_dataset.frames) slice_frames = get_frames_slice_from_scenes(zarr_cat_dataset.scenes[1]) rot = zarr_cat_dataset.frames[slice_frames.start]["ego_rotation"].copy() zarr_cat_dataset.frames[ slice_frames.start]["ego_rotation"] = yaw_as_rotation33( rotation33_as_yaw(rot + 0.75)) scene_indices = list(range(len(zarr_cat_dataset.scenes))) ego_dataset = EgoDataset(cfg, zarr_cat_dataset, rasterizer) # control only agents at T0, control them forever sim_cfg = SimulationConfig(use_ego_gt=False, use_agents_gt=False, disable_new_agents=True, distance_th_close=1000, distance_th_far=1000, num_simulation_steps=10) # ego will move by 1 each time ego_model = MockModel(advance_x=1.0) # agents will move by 0.5 each time agents_model = MockModel(advance_x=0.5) sim = ClosedLoopSimulator(sim_cfg, ego_dataset, torch.device("cpu"), ego_model, agents_model) sim_outputs = sim.unroll(scene_indices) # check ego movement for sim_output in sim_outputs: ego_tr = sim_output.simulated_ego[ "ego_translation"][:sim_cfg.num_simulation_steps, :2] ego_dist = np.linalg.norm(np.diff(ego_tr, axis=0), axis=-1) assert np.allclose(ego_dist, 1.0) ego_tr = sim_output.simulated_ego_states[:sim_cfg.num_simulation_steps, TrajectoryStateIndices. X:TrajectoryStateIndices.Y + 1] ego_dist = np.linalg.norm(np.diff(ego_tr.numpy(), axis=0), axis=-1) assert np.allclose(ego_dist, 1.0, atol=1e-3) # all rotations should be the same as the first one as the MockModel outputs 0 for that rots_sim = sim_output.simulated_ego[ "ego_rotation"][:sim_cfg.num_simulation_steps] r_rep = sim_output.recorded_ego["ego_rotation"][0] for r_sim in rots_sim: assert np.allclose(rotation33_as_yaw(r_sim), rotation33_as_yaw(r_rep), atol=1e-2) # all rotations should be the same as the first one as the MockModel outputs 0 for that rots_sim = sim_output.simulated_ego_states[:sim_cfg. num_simulation_steps, TrajectoryStateIndices. THETA] r_rep = sim_output.recorded_ego_states[0, TrajectoryStateIndices.THETA] for r_sim in rots_sim: assert np.allclose(r_sim, r_rep, atol=1e-2) # check agents movements for sim_output in sim_outputs: # we need to know which agents were controlled during simulation # TODO: this is not ideal, we should keep track of them through the simulation sim_dataset = SimulationDataset.from_dataset_indices( ego_dataset, [sim_output.scene_id], sim_cfg) sim_dataset.rasterise_agents_frame_batch( 0) # this will fill agents_tracked agents_tracks = [el[1] for el in sim_dataset._agents_tracked] for track_id in agents_tracks: states = sim_output.simulated_agents agents = filter_agents_by_track_id( states, track_id)[:sim_cfg.num_simulation_steps] agent_dist = np.linalg.norm(np.diff(agents["centroid"], axis=0), axis=-1) assert np.allclose(agent_dist, 0.5)
def __init__(self, env_config_path: Optional[str] = None, dmg: Optional[LocalDataManager] = None, sim_cfg: Optional[SimulationConfig] = None, train: bool = True, reward: Optional[Reward] = None, cle: bool = True, rescale_action: bool = True, use_kinematic: bool = False, kin_model: Optional[KinematicModel] = None, reset_scene_id: Optional[int] = None, return_info: bool = False, randomize_start: bool = True) -> None: """Constructor method """ super(L5Env, self).__init__() # Required to register environment if env_config_path is None: return # env config dm = dmg if dmg is not None else LocalDataManager(None) cfg = load_config_data(env_config_path) self.step_time = cfg["model_params"]["step_time"] # rasterisation rasterizer = build_rasterizer(cfg, dm) raster_size = cfg["raster_params"]["raster_size"][0] n_channels = rasterizer.num_channels() # load dataset of environment self.train = train self.overfit = cfg["gym_params"]["overfit"] self.randomize_start_frame = randomize_start if self.train or self.overfit: loader_key = cfg["train_data_loader"]["key"] else: loader_key = cfg["val_data_loader"]["key"] dataset_zarr = ChunkedDataset(dm.require(loader_key)).open() self.dataset = EgoDataset(cfg, dataset_zarr, rasterizer) # Define action and observation space # Continuous Action Space: gym.spaces.Box (X, Y, Yaw * number of future states) self.action_space = spaces.Box(low=-1, high=1, shape=(3, )) # Observation Space: gym.spaces.Dict (image: [n_channels, raster_size, raster_size]) obs_shape = (n_channels, raster_size, raster_size) self.observation_space = spaces.Dict({ 'image': spaces.Box(low=0, high=1, shape=obs_shape, dtype=np.float32) }) # Simulator Config within Gym self.sim_cfg = sim_cfg if sim_cfg is not None else SimulationConfigGym( ) self.simulator = ClosedLoopSimulator(self.sim_cfg, self.dataset, device=torch.device("cpu"), mode=ClosedLoopSimulatorModes.GYM) self.reward = reward if reward is not None else L2DisplacementYawReward( ) self.max_scene_id = cfg["gym_params"]["max_scene_id"] if not self.train: self.max_scene_id = cfg["gym_params"]["max_val_scene_id"] self.randomize_start_frame = False if self.overfit: self.overfit_scene_id = cfg["gym_params"]["overfit_id"] self.randomize_start_frame = False self.cle = cle self.rescale_action = rescale_action self.use_kinematic = use_kinematic if self.use_kinematic: self.kin_model = kin_model if kin_model is not None else UnicycleModel( ) self.kin_rescale = self._get_kin_rescale_params() else: self.non_kin_rescale = self._get_non_kin_rescale_params() # If not None, reset_scene_id is the scene_id that will be rolled out when reset is called self.reset_scene_id = reset_scene_id if self.overfit: self.reset_scene_id = self.overfit_scene_id # flag to decide whether to return any info at end of episode # helps to limit the IPC self.return_info = return_info self.seed()
# Save Metric np.save(metric_path, metrics) ######################## Plot Prediction Tractories ################################## model.eval() torch.set_grad_enabled(False) # Uncomment to choose satelliter or semantic rasterizer # validate_cfg["raster_params"]["map_type"] = "py_satellite" validate_cfg["raster_params"]["map_type"] = "py_semantic" rast = build_rasterizer(validate_cfg, dm) eval_ego_dataset = EgoDataset(validate_cfg, valid_dataset.dataset, rast) num_frames = 2 # randomly pick _ frames random_frames = np.random.randint(0, len(eval_ego_dataset) - 1, (num_frames, )) for frame_number in random_frames: agent_indices = valid_dataset.get_frame_indices(frame_number) if not len(agent_indices): continue # get AV point-of-view frame data_ego = eval_ego_dataset[frame_number] im_ego = rasterizer.to_rgb(data_ego["image"].transpose(1, 2, 0)) center = np.asarray(validate_cfg["raster_params"]["ego_center"] ) * validate_cfg["raster_params"]["raster_size"] predicted_positions = []
# Prepare all rasterizer and EgoDataset for each rasterizer rasterizer_dict = {} dataset_dict = {} rasterizer_type_list = [ "py_satellite", "satellite_debug", "py_semantic", "semantic_debug", "box_debug", "stub_debug" ] for i, key in enumerate(rasterizer_type_list): # print("key", key) cfg["raster_params"]["map_type"] = key rasterizer_dict[key] = build_rasterizer(cfg, dm) dataset_dict[key] = EgoDataset(cfg, zarr_dataset, rasterizer_dict[key]) # default lane color is "light yellow" (255, 217, 82). # green, yellow, red color on lane is to show trafic light condition. # orange box represents crosswalk fig, axes = plt.subplots(2, 3, figsize=(15, 10)) axes = axes.flatten() for i, key in enumerate([ "stub_debug", "satellite_debug", "semantic_debug", "box_debug", "py_satellite", "py_semantic" ]): visualize_rgb_image(dataset_dict[key], index=0, title=f"{key}: {type(rasterizer_dict[key]).__name__}", ax=axes[i])
vis_inputs = args.vis_inputs vis_predictions = args.vis_predictions start_scene = args.start_scene end_scene = args.end_scene scenes_per_video = args.scenes_per_video dataset_path = f"input/scenes/{dataset_basename}_filtered_min_frame_history_4_min_frame_future_1_with_mask_idx.zarr" zarr_dataset_filtered = ChunkedDataset(dataset_path) zarr_dataset_filtered.open() cfg = load_config_data("input/visualisation_config.yaml") cfg["raster_params"]["map_type"] = "py_semantic" dm = LocalDataManager() rast = build_rasterizer(cfg, dm) dataset_filtered = EgoDataset(cfg, zarr_dataset_filtered, rast) frame_dataset = FramesDataset(dataset_path) def plot_line( ax, line_id, speed=None, completion=None, speed_min=0, speed_max=12.295, color=None, text1="", text2="", ): lane_center_line = get_lane_center_line(line_id)