Exemplo n.º 1
0
def test_metrics_perfect_prediction():
    bb = BBox(0, 5, 0, 5).normalize(10, 10)
    annotations = [
        AnnotatedBBox.ground_truth("a", bb),
        AnnotatedBBox.prediction("a", bb, 0.9),
    ]
    metrics = get_metrics([annotations])
    assert metrics.mAP == 1.0
Exemplo n.º 2
0
def test_metrics_missing_gt():
    bb = BBox(0, 5, 0, 5).normalize(10, 10)
    annotations = [
        AnnotatedBBox.prediction("a", bb, 0.9),
        AnnotatedBBox.prediction("b", bb, 0.9),
        AnnotatedBBox.prediction("b", bb, 0.8),
    ]
    metrics = get_metrics([annotations])
    assert metrics.per_class["a"].total_FP == 1
    assert metrics.per_class["b"].total_FP == 2
    assert metrics.mAP == 0.0
Exemplo n.º 3
0
def test_per_class_map():
    annotations = [
        AnnotatedBBox.ground_truth("a", BBox(0, 5, 0, 5)),
        AnnotatedBBox.prediction("a", BBox(0, 5, 0, 5), 0.9),
        AnnotatedBBox.ground_truth("b", BBox(0, 5, 0, 5)),
        AnnotatedBBox.prediction("b", BBox(5, 6, 5, 6), 0.9),
    ]
    metrics = get_metrics([annotations], iou_threshold=0.9)
    assert metrics.per_class["a"].AP == 1
    assert metrics.per_class["b"].AP == 0
    assert metrics.mAP == 0.5
Exemplo n.º 4
0
def test_metrics_two_predictions_one_gt_2():
    width, height = 10, 10
    bbox = BBox(0, 2, 0, 2).normalize(width, height)
    annotations = [
        AnnotatedBBox.ground_truth("a", bbox),
        AnnotatedBBox.prediction("a", bbox, 0.9),
        AnnotatedBBox.prediction("a", bbox.move(0.5, 0.5), 0.5),
    ]
    metrics = get_metrics([annotations])
    assert metrics.per_class["a"].total_FP == 1
    assert metrics.per_class["a"].total_TP == 1
    assert metrics.mAP == 1
Exemplo n.º 5
0
def test_metrics_multiple_images_perfect_prediction():
    width, height = 10, 10
    bbox = BBox(0, 5, 0, 5).normalize(width, height)
    image_a = [
        AnnotatedBBox.ground_truth("a", bbox),
        AnnotatedBBox.prediction("a", bbox, 0.9),
    ]
    image_b = [
        AnnotatedBBox.ground_truth("a", bbox),
        AnnotatedBBox.prediction("a", bbox, 0.9),
    ]
    metrics = get_metrics([image_a, image_b])
    assert metrics.mAP == 1.0
Exemplo n.º 6
0
def test_iou_threshold():
    bbox = BBox(0, 5, 0, 5)
    annotations = [
        AnnotatedBBox.ground_truth("a", bbox),
        AnnotatedBBox.prediction("a", bbox.move(2.5, 0), 0.9),
    ]
    metrics = get_metrics([annotations], iou_threshold=0.9)
    assert metrics.per_class["a"].total_FP == 1
    assert metrics.per_class["a"].total_TP == 0
    assert metrics.mAP == 0

    metrics = get_metrics([annotations], iou_threshold=0.2)
    assert metrics.per_class["a"].total_FP == 0
    assert metrics.per_class["a"].total_TP == 1
    assert metrics.mAP == 1
Exemplo n.º 7
0
def test_metrics_do_not_contain_numpy_type():
    annotations = [
        AnnotatedBBox.ground_truth("a", BBox(0, 5, 0, 5)),
        AnnotatedBBox.prediction("a", BBox(0, 5, 0, 5), 0.9),
        AnnotatedBBox.ground_truth("b", BBox(0, 5, 0, 5)),
        AnnotatedBBox.prediction("b", BBox(5, 6, 5, 6), 0.9),
    ]
    metrics = get_metrics([annotations], iou_threshold=0.9)
    assert not isinstance(metrics.mAP, np.floating)
    for value in metrics.per_class.values():
        for item in value.precision:
            assert not isinstance(item, np.floating)
        for item in value.recall:
            assert not isinstance(item, np.floating)
        for item in value.interpolated_precision:
            assert not isinstance(item, np.floating)
        for item in value.interpolated_recall:
            assert not isinstance(item, np.floating)
        assert not isinstance(value.AP, np.floating)
        assert not isinstance(value.total_GT, np.integer)
        assert not isinstance(value.total_TP, np.integer)
        assert not isinstance(value.total_FP, np.integer)
Exemplo n.º 8
0
def test_metrics_missing_prediction():
    bb = BBox(0, 5, 0, 5).normalize(10, 10)
    annotations = [AnnotatedBBox.ground_truth("a", bb)]
    metrics = get_metrics([annotations])
    assert metrics.mAP == 0.0