Exemplo n.º 1
0
def test_y_arguments():
    "Test exception is raised when user provides invalid vectors"
    try:
        scoring(y_target=[1, 2], y_predicted=[1])
        assert False
    except AttributeError:
        assert True
Exemplo n.º 2
0
def test_metric_argument():
    "Test exception is raised when user provides invalid metric argument"
    try:
        scoring(y_target=[1], y_predicted=[1], metric='test')
        assert False
    except AttributeError:
        assert True
Exemplo n.º 3
0
def test_metric_argument():
    "Test exception is raised when user provides invalid metric argument"
    try:
        scoring(y_target=[1], y_predicted=[1], metric='test')
        assert False
    except AttributeError:
        assert True
Exemplo n.º 4
0
def test_y_arguments():
    "Test exception is raised when user provides invalid vectors"
    try:
        scoring(y_target=[1, 2], y_predicted=[1])
        assert False
    except AttributeError:
        assert True
Exemplo n.º 5
0
def test_binary():
    "Test exception is raised if label is not binary in f1"
    try:
        y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
        y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
        scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
        assert False
    except AttributeError:
        assert True
Exemplo n.º 6
0
def test_binary():
    "Test exception is raised if label is not binary in f1"
    try:
        y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
        y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
        scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
        assert False
    except AttributeError:
        assert True
Exemplo n.º 7
0
    def _get_scores(target, predicted):
        """
        Helper method to get the accuracy, recall, precision and f1 scores
        :param target: a numpy array containing the target values
        :param predicted: a numpy array containing the predicted values
        :return: a numpy array containing the scores
        """
        recall = scoring(target, predicted, metric="recall")
        precision = scoring(target, predicted, metric="precision")
        accuracy = scoring(target, predicted, metric="accuracy")
        f_score = scoring(target, predicted, metric="f1")

        return [recall, precision, accuracy, f_score]
Exemplo n.º 8
0
def test_avg_perclass_error():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='per-class error')
    assert round(res, 3) == 0.333, res
Exemplo n.º 9
0
def test_avg_perclass_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='per-class accuracy')
    assert round(res, 3) == 0.667, res
Exemplo n.º 10
0
def test_matthews_corr_coef():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='matthews_corr_coef')
    assert round(res, 3) == 0.258, res
Exemplo n.º 11
0
def test_falsepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='false_positive_rate')
    assert round(res, 3) == 0.333, res
Exemplo n.º 12
0
def test_falsepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='false_positive_rate')
    assert round(res, 3) == 0.333, res
Exemplo n.º 13
0
def test_avg_perclass_error():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='average per-class error')
    assert round(res, 3) == 0.333, res
Exemplo n.º 14
0
def test_matthews_corr_coef():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='matthews_corr_coef')
    assert round(res, 3) == 0.258, res
Exemplo n.º 15
0
def test_balanced_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='balanced accuracy')
    assert round(res, 3) == 0.578, res
Exemplo n.º 16
0
def test_avg_perclass_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='average per-class accuracy')
    assert round(res, 3) == 0.667, res
Exemplo n.º 17
0
def test_precision():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='precision')
    assert round(res, 3) == 0.75, res
Exemplo n.º 18
0
def test_f1():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
    assert round(res, 3) == 0.667, res
Exemplo n.º 19
0
def test_error():
    "Test error metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error')
    assert res == 0.25
Exemplo n.º 20
0
def test_accuracy():
    "Test accuracy metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='accuracy')
    assert res == 0.75
Exemplo n.º 21
0
def test_accuracy():
    "Test accuracy metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='accuracy')
    assert res == 0.75
Exemplo n.º 22
0
def test_f1():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
    assert round(res, 3) == 0.667, res
Exemplo n.º 23
0
def test_error():
    "Test error metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error')
    assert res == 0.25
Exemplo n.º 24
0
def test_precision():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='precision')
    assert round(res, 3) == 0.75, res
Exemplo n.º 25
0
def test_specificity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='specificity')
    assert round(res, 3) == 0.667, res
Exemplo n.º 26
0
def test_specificity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='specificity')
    assert round(res, 3) == 0.667, res
Exemplo n.º 27
0
def test_recall():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='recall')
    assert round(res, 3) == 0.6, res
Exemplo n.º 28
0
def test_sensitivity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='sensitivity')
    assert round(res, 3) == 0.6, res
Exemplo n.º 29
0
def test_recall():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='recall')
    assert round(res, 3) == 0.6, res
Exemplo n.º 30
0
def test_sensitivity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='sensitivity')
    assert round(res, 3) == 0.6, res
Exemplo n.º 31
0
def test_truepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric="true_positive_rate")
    assert round(res, 3) == 0.6, res