def test_duplicate_detections(self): gt = make_representation(["0 0 0 5 5"], is_ground_truth=True) pred = make_representation(["0 0 0 5 5; 0 0 0 5 5"], score=1) metric = _test_metric_wrapper(Recall, single_class_dataset()) assert 1 == metric(gt, pred)[0] assert metric.meta.get('names') == ['dog']
def test_false_negative(self): gt = make_representation(["0 10 10 20 20; 0 0 0 5 5"], is_ground_truth=True) pred = make_representation(["0 0 0 5 5"], score=1) metric = _test_metric_wrapper(Recall, single_class_dataset()) assert 0.5 == metric(gt, pred)[0] assert metric.meta.get('names') == ['dog']
def test_selects_all_detections(self): gt = make_representation(["0 0 0 5 5"], is_ground_truth=True) pred = make_representation(["0 0 0 5 5; 0 0 0 5 5"], score=1) metric = _test_metric_wrapper(DetectionMAP, single_class_dataset()) metric(gt, pred) assert not metric.distinct_conf assert metric.overlap_threshold == 0.5 assert metric.ignore_difficult assert metric.meta.get('names') == ['dog']
def test_false_positive(self): gt2 = make_representation(["0 10 10 20 20"], is_ground_truth=True) pred2 = make_representation(["0 0 0 5 5"], score=1) metric = _test_metric_wrapper(Recall, single_class_dataset()) assert 0 == metric(gt2, pred2)[0] assert metric.meta.get('names') == ['dog'] gt1 = make_representation(["0 0 0 5 5"], is_ground_truth=True) pred1 = make_representation(["0 0 0 5 5; 0 10 10 20 20"], score=1) assert 1 == metric(gt1, pred1)[0] assert metric.meta.get('names') == ['dog']
def test_two_objects(self): gt = make_representation(["0 0 0 5 5; 0 10 10 20 20"], is_ground_truth=True) pred = make_representation(["0 0 0 5 5; 0 10 10 20 20"], score=1) assert 1 == _test_metric_wrapper(Recall, single_class_dataset())(gt, pred)[0]