Esempio n. 1
0
    def compute_confusion_matrix(
        self,
        predictions: List[Union[str, int, bool, float]],
        targets: List[Union[str, int, bool, float]],
        scores: List[float] = None,
        target_field: str = None,
        prediction_field: str = None,
        score_field: str = None,
    ):
        """
        computes the confusion_matrix, if one is already present merges to old one.

        Args:
            predictions (List[Union[str, int, bool]]):
            targets (List[Union[str, int, bool]]):
            scores (List[float], optional):
            target_field (str, optional):
            prediction_field (str, optional):
            score_field (str, optional):
        """
        labels = sorted(list(set(targets + predictions)))
        confusion_matrix = ConfusionMatrix(
            labels,
            target_field=target_field,
            prediction_field=prediction_field,
            score_field=score_field,
        )
        confusion_matrix.add(predictions, targets, scores)

        if self.confusion_matrix is None or self.confusion_matrix.labels is None or self.confusion_matrix.labels == []:
            self.confusion_matrix = confusion_matrix
        else:
            self.confusion_matrix = self.confusion_matrix.merge(
                confusion_matrix)
Esempio n. 2
0
def test_large_confusion_matrix():
    targets, predictions = _generateTestTargetsAndPredictionsForEachLabel(
        65, 65, 0.0)
    labels = set(targets)
    confusion_matrix = ConfusionMatrix(labels)
    confusion_matrix.add(predictions, targets, [1.0 for _ in labels])
    assert len(confusion_matrix.confusion_matrix) == 65
Esempio n. 3
0
def test_merged_labels_over_threshold_confusion_matrix():
    labels = range(200)
    more_labels = range(201, 300)
    matrix_1 = ConfusionMatrix(labels)
    matrix_2 = ConfusionMatrix(more_labels)

    with pytest.raises(ValueError):
        matrix_1.merge(matrix_2)
Esempio n. 4
0
def test_merge_with_none():
    targets_1 = ["cat", "dog", "pig"]
    predictions_1 = ["cat", "dog", "dog"]
    scores_1 = [0.1, 0.2, 0.4]

    labels_1 = ["cat", "dog", "pig"]
    matrix = ConfusionMatrix(labels_1)
    matrix.add(predictions_1, targets_1, scores_1)

    res = matrix.merge(other_cm=None)
    assert res.target_field is None
    assert res.prediction_field is None
    assert res.score_field is None
Esempio n. 5
0
 def from_protobuf(
     cls,
     message,
 ):
     return ModelMetrics(
         confusion_matrix=ConfusionMatrix.from_protobuf(
             message.scoreMatrix),
         regression_metrics=RegressionMetrics.from_protobuf(
             message.regressionMetrics),
         model_type=message.modelType,
     )
Esempio n. 6
0
def test_merge_conf_matrix():
    """
    tests merging two confusion matrices of different sizes
    """
    targets_1 = ["cat", "dog", "pig"]
    targets_2 = ["cat", "dog"]

    predictions_1 = ["cat", "dog", "dog"]
    predictions_2 = ["cat", "cat"]

    scores_1 = [0.1, 0.2, 0.4]
    scores_2 = [0.4, 0.3]

    expected_1 = [[1, 0, 0], [0, 1, 1], [0, 0, 0]]
    expected_2 = [[1, 1], [0, 0]]
    expected_merge = [[2, 1, 0], [0, 1, 1], [0, 0, 0]]

    labels_1 = ["cat", "dog", "pig"]
    conf_M_1 = ConfusionMatrix(labels_1)
    conf_M_1.add(predictions_1, targets_1, scores_1)

    for idx, value in enumerate(conf_M_1.labels):
        for jdx, value_2 in enumerate(conf_M_1.labels):
            assert conf_M_1.confusion_matrix[
                idx, jdx].floats.count == expected_1[idx][jdx]
    labels_2 = ["cat", "dog"]
    conf_M_2 = ConfusionMatrix(labels_2)
    conf_M_2.add(predictions_2, targets_2, scores_2)

    for idx, value in enumerate(conf_M_2.labels):
        for jdx, value_2 in enumerate(conf_M_2.labels):
            assert conf_M_2.confusion_matrix[idx, jdx].floats.count == \
                   expected_2[idx][jdx]

    new_conf = conf_M_1.merge(conf_M_2)

    print(new_conf.labels)
    for idx, value in enumerate(new_conf.labels):
        for jdx, value_2 in enumerate(new_conf.labels):
            print(idx, jdx)
            assert new_conf.confusion_matrix[
                idx, jdx].floats.count == expected_merge[idx][jdx]
Esempio n. 7
0
def test_confusion_matrix_to_protobuf():
    targets_1 = ["cat", "dog", "pig"]
    predictions_1 = ["cat", "dog", "dog"]
    scores_1 = [0.1, 0.2, 0.4]

    labels_1 = ["cat", "dog", "pig"]
    conf_M_1 = ConfusionMatrix(labels_1)
    conf_M_1.add(predictions_1, targets_1, scores_1)
    message = conf_M_1.to_protobuf()

    expected_1 = [[1, 0, 0], [0, 1, 1], [0, 0, 0]]

    new_conf = ConfusionMatrix.from_protobuf(message)
    for idx, value in enumerate(new_conf.labels):
        assert value == conf_M_1.labels[idx]

    for idx, value in enumerate(new_conf.labels):
        for jdx, value_2 in enumerate(new_conf.labels):
            assert new_conf.confusion_matrix[
                idx, jdx].floats.count == expected_1[idx][jdx]
Esempio n. 8
0
def test_positive_count():
    """
    check the tp and fp totals from the confusion matrix
    """
    targets = [["cat", "dog", "pig"], ["cat", "dog"], [0.1, 0.4], [0, 1, 3],
               [True, False, True]]
    predictions = [
        ["cat", "dog", "dog"],
        ["cat", "cat"],
        [0.3, 0.5],
        [0, 2, 3],
        [False, False, True],
    ]

    scores = [[0.1, 0.2, 0.4], None, [0.4, 0.3], [0.3, 0.1, 0.9],
              [0.2, 0.1, 0.2]]

    expectd_tp_counts = [{
        "cat": 1,
        "dog": 1,
        "pig": 0
    }, {
        "cat": 1,
        "dog": 0
    }, None, {
        0: 1,
        1: 0,
        2: 0,
        3: 1,
    }, {
        True: 1,
        False: 1
    }]

    expectd_fp_counts = [{
        "cat": 0,
        "dog": 1,
        "pig": 0
    }, {
        "cat": 1,
        "dog": 0
    }, None, {
        0: 0,
        1: 0,
        2: 1,
        3: 0,
    }, {
        True: 0,
        False: 1
    }]
    expectd_tn_counts = [{
        "cat": 1,
        "dog": 0,
        "pig": 0
    }, {
        "cat": 1,
        "dog": 0
    }, None, {
        0: 1,
        1: 0,
        2: 0,
        3: 1,
    }, {
        True: 1,
        False: 1
    }]

    for indx, each_targets in enumerate(targets):

        target_type = type_of_target(each_targets)
        labels = set(each_targets + predictions[indx])
        if target_type not in SUPPORTED_TYPES:
            continue
        conf_M = ConfusionMatrix(labels)
        conf_M.add(predictions[indx], each_targets, scores[indx])
        for each_ind, label in enumerate(conf_M.labels):
            # check the number of TP is correct
            assert conf_M.confusion_matrix[
                each_ind,
                each_ind].floats.count == expectd_tp_counts[indx][label]
            # check the number of FP
            sum_fp = np.sum([
                nt.floats.count for nt in conf_M.confusion_matrix[each_ind, :]
            ])
            assert (sum_fp - expectd_tp_counts[indx][label]
                    ) == expectd_fp_counts[indx][label]
Esempio n. 9
0
def test_parse_empty_protobuf_should_return_none():
    empty_message = ScoreMatrixMessage()
    assert ConfusionMatrix.from_protobuf(empty_message) is None
Esempio n. 10
0
def test_model_metrics_init():
    reg_met = RegressionMetrics()
    conf_ma = ConfusionMatrix()
    with pytest.raises(NotImplementedError):
        metrics = ModelMetrics(confusion_matrix=conf_ma, regression_metrics=reg_met)
Esempio n. 11
0
def test_over_threshold_confusion_matrix():
    with pytest.raises(ValueError):
        ConfusionMatrix(range(257))
Esempio n. 12
0
 def from_protobuf(
     cls,
     message,
 ):
     return ModelMetrics(confusion_matrix=ConfusionMatrix.from_protobuf(
         message.scoreMatrix))
Esempio n. 13
0
 def __init__(self, confusion_matrix: ConfusionMatrix = None):
     if confusion_matrix is None:
         confusion_matrix = ConfusionMatrix()
     self.confusion_matrix = confusion_matrix