def test_compute_mse_error(tmp_path: Path, zarr_dataset: ChunkedDataset, cfg: dict) -> None: render_context = RenderContext( np.asarray((10, 10)), np.asarray((0.25, 0.25)), np.asarray((0.5, 0.5)), set_origin_to_bottom=cfg["raster_params"]["set_origin_to_bottom"], ) rast = StubRasterizer(render_context) dataset = AgentDataset(cfg, zarr_dataset, rast) gt_coords = [] gt_avails = [] timestamps = [] track_ids = [] for idx, el in enumerate(dataset): # type: ignore gt_coords.append(el["target_positions"]) gt_avails.append(el["target_availabilities"]) timestamps.append(el["timestamp"]) track_ids.append(el["track_id"]) if idx == 100: break # speed up test gt_coords = np.asarray(gt_coords) gt_avails = np.asarray(gt_avails) timestamps = np.asarray(timestamps) track_ids = np.asarray(track_ids) # test same values error write_gt_csv(str(tmp_path / "gt1.csv"), timestamps, track_ids, gt_coords, gt_avails) write_pred_csv(str(tmp_path / "pred1.csv"), timestamps, track_ids, gt_coords, confs=None) metrics = compute_metrics_csv(str(tmp_path / "gt1.csv"), str(tmp_path / "pred1.csv"), [neg_multi_log_likelihood]) for metric_value in metrics.values(): assert np.all(metric_value == 0.0) # test different values error pred_coords = gt_coords.copy() pred_coords += np.random.randn(*pred_coords.shape) write_pred_csv(str(tmp_path / "pred3.csv"), timestamps, track_ids, pred_coords, confs=None) metrics = compute_metrics_csv(str(tmp_path / "gt1.csv"), str(tmp_path / "pred3.csv"), [neg_multi_log_likelihood]) for metric_value in metrics.values(): assert np.any(metric_value > 0.0) # test invalid conf by removing lines in gt1 with open(str(tmp_path / "pred4.csv"), "w") as fp: lines = open(str(tmp_path / "pred1.csv")).readlines() fp.writelines(lines[:-10]) with pytest.raises(ValueError): compute_metrics_csv(str(tmp_path / "gt1.csv"), str(tmp_path / "pred4.csv"), [neg_multi_log_likelihood])
def test_write_gt_csv(tmpdir: Path) -> None: dump_path = str(tmpdir / "gt_out.csv") num_example, future_len, num_coords = 100, 12, 2 timestamps = np.zeros(num_example) track_ids = np.zeros(num_example) # test some invalid shapes for coords and avails with pytest.raises(AssertionError): coords = np.zeros( (num_example, 2, future_len, num_coords)) # gt multi-modal avails = np.zeros((num_example, future_len)) write_gt_csv(dump_path, timestamps, track_ids, coords, avails) with pytest.raises(AssertionError): coords = np.zeros((num_example, future_len, num_coords)) avails = np.zeros((num_example, future_len + 5)) # mismatch write_gt_csv(dump_path, timestamps, track_ids, coords, avails) with pytest.raises(AssertionError): coords = np.zeros((num_example, future_len, num_coords)) avails = np.zeros( (num_example, future_len, num_coords)) # avails per coords write_gt_csv(dump_path, timestamps, track_ids, coords, avails) # test a valid configuration coords = np.zeros((num_example, future_len, num_coords)) avails = np.zeros((num_example, future_len)) write_gt_csv(dump_path, timestamps, track_ids, coords, avails) assert Path(dump_path).exists()
def test_e2e_gt_csv(tmpdir: Path) -> None: dump_path = str(tmpdir / "gt_out.csv") num_example, future_len, num_coords = 100, 12, 2 timestamps = np.random.randint(1000, 2000, num_example) track_ids = np.random.randint(0, 200, num_example) coords = np.random.randn(*(num_example, future_len, num_coords)) avails = np.random.randint(0, 2, (num_example, future_len)) write_gt_csv(dump_path, timestamps, track_ids, coords, avails) # read and check values for idx, el in enumerate(read_gt_csv(dump_path)): assert int(el["track_id"]) == track_ids[idx] assert int(el["timestamp"]) == timestamps[idx] assert np.allclose(el["coord"], coords[idx], atol=1e-4) assert np.allclose(el["avail"], avails[idx])
valid_coords_gts.append(data["target_positions"].numpy().copy()) target_avail_pd.append( data["target_availabilities"].unsqueeze(-1).numpy().copy()) timestamps_concat = np.concatenate(timestamps) track_ids_concat = np.concatenate(agent_ids) coords_concat = np.concatenate(future_coords_offsets_pd) gt_valid_final = np.concatenate(valid_coords_gts) target_avail_concat = np.concatenate(target_avail_pd) if test == TEST_CONF[0] or test == TEST_CONF[1]: # generate ground truth csv write_gt_csv(csv_path=eval_gt_path, timestamps=timestamps_concat, track_ids=track_ids_concat, coords=gt_valid_final, avails=target_avail_concat.squeeze(-1)) num_examples = gt_valid_final.shape[0] confidence = np.array([0.33, 0.33, 0.34]) confidences = np.empty((num_examples, 3)) for i in range(num_examples): confidences[i] = confidence # submission.csv write_pred_csv(pred_path, timestamps=timestamps_concat, track_ids=track_ids_concat, coords=coords_concat, confs=confidences)