def test_multiple_detections_with_ignore_difficult_and_not_allow_multiple_matches_per_ignored( self): gt = make_representation("0 0 0 5 5", is_ground_truth=True) pred = make_representation("1 0 0 0 5 5; 0.9 0 0 0 5 5") gt[0].metadata['difficult_boxes'] = [0] overlap_evaluator = IOU({}) tp, fp, _, n = bbox_match(gt, pred, 0, overlap_evaluator, ignore_difficult=True, allow_multiple_matches_per_ignored=False)[:4] assert n == 0 assert tp[0] == 0 assert tp[1] == 0 assert fp[0] == 0 assert fp[1] == 1
def test_two_objects(self): gt = make_representation(["0 0 0 5 5; 0 10 10 20 20"], is_ground_truth=True) pred = make_representation(["0 0 0 5 5; 0 10 10 20 20"], score=1) assert 1 == _test_metric_wrapper(Recall, single_class_dataset())(gt, pred)[0]
def test_one_object(self): gt = make_representation(["0 0 0 5 5"], is_ground_truth=True) pred = make_representation(["0 0 0 5 5"], score=1) metric = _test_metric_wrapper(Recall, single_class_dataset()) assert 1 == metric(gt, pred)[0] assert metric.meta.get('names') == ['dog']
def test_false_negative(self): gt = make_representation(["0 10 10 20 20; 0 0 0 5 5"], is_ground_truth=True) pred = make_representation(["0 0 0 5 5"], score=1) metric = _test_metric_wrapper(Recall, single_class_dataset()) assert 0.5 == metric(gt, pred)[0] assert metric.meta.get('names') == ['dog']