예제 #1
0
def test_metrics_perfect_prediction():
    bb = BBox(0, 5, 0, 5).normalize(10, 10)
    annotations = [
        Annotation.ground_truth("a", bb),
        Annotation.prediction("a", bb, 0.9),
    ]
    metrics = get_metrics([annotations])
    assert metrics.mAP == 1.0
예제 #2
0
def test_metrics_missing_gt():
    bb = BBox(0, 5, 0, 5).normalize(10, 10)
    annotations = [
        Annotation.prediction("a", bb, 0.9),
        Annotation.prediction("b", bb, 0.9),
        Annotation.prediction("b", bb, 0.8),
    ]
    metrics = get_metrics([annotations])
    assert metrics.per_class["a"].total_FP == 1
    assert metrics.per_class["b"].total_FP == 2
    assert metrics.mAP == 0.0
예제 #3
0
def test_per_class_map():
    annotations = [
        Annotation.ground_truth("a", BBox(0, 5, 0, 5)),
        Annotation.prediction("a", BBox(0, 5, 0, 5), 0.9),
        Annotation.ground_truth("b", BBox(0, 5, 0, 5)),
        Annotation.prediction("b", BBox(5, 6, 5, 6), 0.9),
    ]
    metrics = get_metrics([annotations], iou_threshold=0.9)
    assert metrics.per_class["a"].AP == 1
    assert metrics.per_class["b"].AP == 0
    assert metrics.mAP == 0.5
예제 #4
0
def test_metrics_two_predictions_one_gt_2():
    width, height = 10, 10
    bbox = BBox(0, 2, 0, 2).normalize(width, height)
    annotations = [
        Annotation.ground_truth("a", bbox),
        Annotation.prediction("a", bbox, 0.9),
        Annotation.prediction("a", bbox.move(0.5, 0.5), 0.5),
    ]
    metrics = get_metrics([annotations])
    assert metrics.per_class["a"].total_FP == 1
    assert metrics.per_class["a"].total_TP == 1
    assert metrics.mAP == 1
예제 #5
0
def test_metrics_multiple_images_perfect_prediction():
    width, height = 10, 10
    bbox = BBox(0, 5, 0, 5).normalize(width, height)
    image_a = [
        Annotation.ground_truth("a", bbox),
        Annotation.prediction("a", bbox, 0.9),
    ]
    image_b = [
        Annotation.ground_truth("a", bbox),
        Annotation.prediction("a", bbox, 0.9),
    ]
    metrics = get_metrics([image_a, image_b])
    assert metrics.mAP == 1.0
예제 #6
0
def test_iou_threshold():
    bbox = BBox(0, 5, 0, 5)
    annotations = [
        Annotation.ground_truth("a", bbox),
        Annotation.prediction("a", bbox.move(2.5, 0), 0.9),
    ]
    metrics = get_metrics([annotations], iou_threshold=0.9)
    assert metrics.per_class["a"].total_FP == 1
    assert metrics.per_class["a"].total_TP == 0
    assert metrics.mAP == 0

    metrics = get_metrics([annotations], iou_threshold=0.2)
    assert metrics.per_class["a"].total_FP == 0
    assert metrics.per_class["a"].total_TP == 1
    assert metrics.mAP == 1
예제 #7
0
def test_metrics_missing_prediction():
    bb = BBox(0, 5, 0, 5).normalize(10, 10)
    annotations = [Annotation.ground_truth("a", bb)]
    metrics = get_metrics([annotations])
    assert metrics.mAP == 0.0