def tests_model_metrics_to_protobuf_classification():
    mod_met = ModelMetrics(model_type=ModelType.CLASSIFICATION)

    targets_1 = ["cat", "dog", "pig"]
    predictions_1 = ["cat", "dog", "dog"]
    scores_1 = [0.1, 0.2, 0.4]

    mod_met.compute_confusion_matrix(predictions_1, targets_1, scores_1)

    message = mod_met.to_protobuf()

    model_metrics = ModelMetrics.from_protobuf(message)
    assert model_metrics.model_type == ModelType.CLASSIFICATION
    assert model_metrics.confusion_matrix.labels == ["cat", "dog", "pig"]
Example #2
0
def tests_model_metrics_to_protobuf():
    mod_met = ModelMetrics()

    targets_1 = ["cat", "dog", "pig"]
    predictions_1 = ["cat", "dog", "dog"]
    scores_1 = [0.1, 0.2, 0.4]

    expected_1 = [[1, 0, 0], [0, 1, 1], [0, 0, 0]]

    mod_met.compute_confusion_matrix(predictions_1, targets_1, scores_1)

    message = mod_met.to_protobuf()

    ModelMetrics.from_protobuf(message)
def tests_model_metrics():
    mod_met = ModelMetrics(model_type=ModelType.CLASSIFICATION)

    targets_1 = ["cat", "dog", "pig"]
    predictions_1 = ["cat", "dog", "dog"]
    scores_1 = [0.1, 0.2, 0.4]

    expected_1 = [[1, 0, 0], [0, 1, 1], [0, 0, 0]]

    mod_met.compute_confusion_matrix(predictions_1, targets_1, scores_1)

    assert mod_met.model_type == ModelType.CLASSIFICATION

    for idx, value in enumerate(mod_met.confusion_matrix.labels):
        for jdx, value_2 in enumerate(mod_met.confusion_matrix.labels):
            print(idx, jdx)
            assert mod_met.confusion_matrix.confusion_matrix[idx, jdx].floats.count == expected_1[idx][jdx]