示例#1
0
def test_missing_gt():
    """The symmetric case of test_missing_pred.

    One good detection, one false positive. Map should be lower than 1. Actually it is 0.5, but the exact value depends
    on where we are sampling (i.e. recall's values)
    """
    gts = [
        dict(boxes=torch.Tensor([[10, 20, 15, 25]]),
             labels=torch.IntTensor([0])),
        dict(boxes=torch.Tensor([]), labels=torch.IntTensor([])),
    ]
    preds = [
        dict(boxes=torch.Tensor([[10, 20, 15, 25]]),
             scores=torch.Tensor([0.9]),
             labels=torch.IntTensor([0])),
        dict(boxes=torch.Tensor([[10, 20, 15, 25]]),
             scores=torch.Tensor([0.95]),
             labels=torch.IntTensor([0])),
    ]

    metric = MeanAveragePrecision()
    metric.update(preds, gts)
    result = metric.compute()
    assert result[
        "map"] < 1, "MAP cannot be 1, as there is an image with no ground truth, but some predictions."
示例#2
0
def test_missing_pred():
    """One good detection, one false negative.

    Map should be lower than 1. Actually it is 0.5, but the exact value depends on where we are sampling (i.e. recall's
    values)
    """
    gts = [
        dict(boxes=torch.Tensor([[10, 20, 15, 25]]),
             labels=torch.IntTensor([0])),
        dict(boxes=torch.Tensor([[10, 20, 15, 25]]),
             labels=torch.IntTensor([0])),
    ]
    preds = [
        dict(boxes=torch.Tensor([[10, 20, 15, 25]]),
             scores=torch.Tensor([0.9]),
             labels=torch.IntTensor([0])),
        # Empty prediction
        dict(boxes=torch.Tensor([]),
             scores=torch.Tensor([]),
             labels=torch.IntTensor([])),
    ]
    metric = MeanAveragePrecision()
    metric.update(preds, gts)
    result = metric.compute()
    assert result[
        "map"] < 1, "MAP cannot be 1, as there is a missing prediction."
示例#3
0
def test_error_on_wrong_init():
    """Test class raises the expected errors."""
    MeanAveragePrecision()  # no error

    with pytest.raises(
            ValueError,
            match="Expected argument `class_metrics` to be a boolean"):
        MeanAveragePrecision(class_metrics=0)
示例#4
0
def test_map_gpu():
    """Test predictions on single gpu."""
    metric = MeanAveragePrecision()
    metric = metric.to("cuda")
    preds = _inputs.preds[0]
    targets = _inputs.target[0]

    metric.update(_move_to_gpu(preds), _move_to_gpu(targets))
    metric.compute()
示例#5
0
def test_empty_preds():
    """Test empty predictions."""
    metric = MeanAveragePrecision()

    metric.update(
        [
            dict(boxes=torch.Tensor([]),
                 scores=torch.Tensor([]),
                 labels=torch.IntTensor([])),
        ],
        [
            dict(boxes=torch.Tensor([[214.1500, 41.2900, 562.4100, 285.0700]]),
                 labels=torch.IntTensor([4])),
        ],
    )
    metric.compute()
示例#6
0
        # The scores keyword should contain an [N,] tensor where
        # each element is confidence score between 0 and 1
        scores=torch.Tensor([0.536]),
        # The labels keyword should contain an [N,] tensor
        # with integers of the predicted classes
        labels=torch.IntTensor([0]),
    )
]

# Target should be a list of elements, where each element is a dict
# containing 2 keys: boxes and labels. Each keyword should be formatted
# similar to the preds argument. The number of elements in preds and
# target need to match
target = [
    dict(
        boxes=torch.Tensor([[214.0, 41.0, 562.0, 285.0]]),
        labels=torch.IntTensor([0]),
    )
]

if __name__ == "__main__":
    # Initialize metric
    metric = MeanAveragePrecision()

    # Update metric with predictions and respective ground truth
    metric.update(preds, target)

    # Compute the results
    result = metric.compute()
    print(result)
示例#7
0
def test_error_on_wrong_input():
    """Test class input validation."""
    metric = MeanAveragePrecision()

    metric.update([], [])  # no error

    with pytest.raises(
            ValueError,
            match="Expected argument `preds` to be of type Sequence"):
        metric.update(torch.Tensor(), [])  # type: ignore

    with pytest.raises(
            ValueError,
            match="Expected argument `target` to be of type Sequence"):
        metric.update([], torch.Tensor())  # type: ignore

    with pytest.raises(
            ValueError,
            match=
            "Expected argument `preds` and `target` to have the same length"):
        metric.update([dict()], [dict(), dict()])

    with pytest.raises(
            ValueError,
            match="Expected all dicts in `preds` to contain the `boxes` key"):
        metric.update(
            [dict(scores=torch.Tensor(), labels=torch.IntTensor)],
            [dict(boxes=torch.Tensor(), labels=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all dicts in `preds` to contain the `scores` key"):
        metric.update(
            [dict(boxes=torch.Tensor(), labels=torch.IntTensor)],
            [dict(boxes=torch.Tensor(), labels=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all dicts in `preds` to contain the `labels` key"):
        metric.update(
            [dict(boxes=torch.Tensor(), scores=torch.IntTensor)],
            [dict(boxes=torch.Tensor(), labels=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all dicts in `target` to contain the `boxes` key"):
        metric.update(
            [
                dict(boxes=torch.Tensor(),
                     scores=torch.IntTensor,
                     labels=torch.IntTensor)
            ],
            [dict(labels=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all dicts in `target` to contain the `labels` key"
    ):
        metric.update(
            [
                dict(boxes=torch.Tensor(),
                     scores=torch.IntTensor,
                     labels=torch.IntTensor)
            ],
            [dict(boxes=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all boxes in `preds` to be of type Tensor"):
        metric.update(
            [dict(boxes=[], scores=torch.Tensor(), labels=torch.IntTensor())],
            [dict(boxes=torch.Tensor(), labels=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all scores in `preds` to be of type Tensor"):
        metric.update(
            [dict(boxes=torch.Tensor(), scores=[], labels=torch.IntTensor())],
            [dict(boxes=torch.Tensor(), labels=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all labels in `preds` to be of type Tensor"):
        metric.update(
            [dict(boxes=torch.Tensor(), scores=torch.Tensor(), labels=[])],
            [dict(boxes=torch.Tensor(), labels=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all boxes in `target` to be of type Tensor"):
        metric.update(
            [
                dict(boxes=torch.Tensor(),
                     scores=torch.Tensor(),
                     labels=torch.IntTensor())
            ],
            [dict(boxes=[], labels=torch.IntTensor())],
        )

    with pytest.raises(
            ValueError,
            match="Expected all labels in `target` to be of type Tensor"):
        metric.update(
            [
                dict(boxes=torch.Tensor(),
                     scores=torch.Tensor(),
                     labels=torch.IntTensor())
            ],
            [dict(boxes=torch.Tensor(), labels=[])],
        )
示例#8
0
def test_empty_metric():
    """Test empty metric."""
    metric = MeanAveragePrecision()
    metric.compute()