def test_remove_duplicate_instances_ground_truth(): """Ensure that if an extra duplicate cuboid is present in ground truth, it would be ignored.""" dt_fpath = TEST_DATA_LOC / "remove_duplicates_detections" gt_fpath = TEST_DATA_LOC / "remove_duplicates_ground_truth" fig_fpath = TEST_DATA_LOC / "test_figures" evaluator = DetectionEvaluator(dt_fpath, gt_fpath, fig_fpath) metrics = evaluator.evaluate() assert metrics.AP.loc["Vehicle"] == 1.0 assert metrics.AP.loc["Pedestrian"] == 1.0
def test_AP_on_filtered_instances() -> None: """ """ dt_fpath = TEST_DATA_LOC / "remove_nonroi_detections" gt_fpath = TEST_DATA_LOC / "remove_nonroi_ground_truth" fig_fpath = TEST_DATA_LOC / "test_figures" cfg = DetectionCfg(eval_only_roi_instances=True) evaluator = DetectionEvaluator(dt_fpath, gt_fpath, fig_fpath, cfg) metrics = evaluator.evaluate() assert metrics.AP.loc["Vehicle"] == 1.0
def evaluator() -> DetectionEvaluator: """Definte an evaluator that compares a set of detections with known error to the ground truth.""" detection_cfg = DetectionCfg(dt_classes=["VEHICLE"]) return DetectionEvaluator( TEST_DATA_LOC / "detections", TEST_DATA_LOC, TEST_DATA_LOC / "test_figures", detection_cfg, )
def evaluator_assignment() -> DetectionEvaluator: """Define an evaluator that compares a set of results to one with an extra detection to check assignment.""" detection_cfg = DetectionCfg(dt_classes=["VEHICLE"]) return DetectionEvaluator( TEST_DATA_LOC / "detections_assignment", TEST_DATA_LOC, TEST_DATA_LOC / "test_figures", detection_cfg, )
def evaluator_identity() -> DetectionEvaluator: """Define an evaluator that compares a set of results to itself.""" detection_cfg = DetectionCfg(dt_classes=["VEHICLE"]) return DetectionEvaluator( TEST_DATA_LOC / "detections_identity", TEST_DATA_LOC, TEST_DATA_LOC / "test_figures", detection_cfg, )
def metrics(evaluator: DetectionEvaluator) -> DataFrame: """Get the metrics for an evaluator with known error.""" return evaluator.evaluate()
def metrics_assignment(evaluator_assignment: DetectionEvaluator) -> DataFrame: """Get the metrics for an evaluator that has extra detections to test for assignment errors.""" return evaluator_assignment.evaluate()
def metrics_identity(evaluator_identity: DetectionEvaluator) -> DataFrame: """Get the metrics for an evaluator that compares a set of results to itself.""" return evaluator_identity.evaluate()