示例#1
0
def test_y_arguments():
    "Test exception is raised when user provides invalid vectors"
    try:
        scoring(y_target=[1, 2], y_predicted=[1])
        assert False
    except AttributeError:
        assert True
示例#2
0
def test_metric_argument():
    "Test exception is raised when user provides invalid metric argument"
    try:
        scoring(y_target=[1], y_predicted=[1], metric='test')
        assert False
    except AttributeError:
        assert True
示例#3
0
def test_metric_argument():
    "Test exception is raised when user provides invalid metric argument"
    try:
        scoring(y_target=[1], y_predicted=[1], metric='test')
        assert False
    except AttributeError:
        assert True
示例#4
0
def test_y_arguments():
    "Test exception is raised when user provides invalid vectors"
    try:
        scoring(y_target=[1, 2], y_predicted=[1])
        assert False
    except AttributeError:
        assert True
示例#5
0
def test_binary():
    "Test exception is raised if label is not binary in f1"
    try:
        y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
        y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
        scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
        assert False
    except AttributeError:
        assert True
示例#6
0
def test_binary():
    "Test exception is raised if label is not binary in f1"
    try:
        y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
        y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
        scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
        assert False
    except AttributeError:
        assert True
示例#7
0
    def _get_scores(target, predicted):
        """
        Helper method to get the accuracy, recall, precision and f1 scores
        :param target: a numpy array containing the target values
        :param predicted: a numpy array containing the predicted values
        :return: a numpy array containing the scores
        """
        recall = scoring(target, predicted, metric="recall")
        precision = scoring(target, predicted, metric="precision")
        accuracy = scoring(target, predicted, metric="accuracy")
        f_score = scoring(target, predicted, metric="f1")

        return [recall, precision, accuracy, f_score]
示例#8
0
def test_avg_perclass_error():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='per-class error')
    assert round(res, 3) == 0.333, res
示例#9
0
def test_avg_perclass_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='per-class accuracy')
    assert round(res, 3) == 0.667, res
示例#10
0
def test_matthews_corr_coef():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='matthews_corr_coef')
    assert round(res, 3) == 0.258, res
示例#11
0
def test_falsepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='false_positive_rate')
    assert round(res, 3) == 0.333, res
示例#12
0
def test_falsepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='false_positive_rate')
    assert round(res, 3) == 0.333, res
示例#13
0
def test_avg_perclass_error():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='average per-class error')
    assert round(res, 3) == 0.333, res
示例#14
0
def test_matthews_corr_coef():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='matthews_corr_coef')
    assert round(res, 3) == 0.258, res
示例#15
0
def test_balanced_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='balanced accuracy')
    assert round(res, 3) == 0.578, res
示例#16
0
def test_avg_perclass_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='average per-class accuracy')
    assert round(res, 3) == 0.667, res
示例#17
0
def test_precision():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='precision')
    assert round(res, 3) == 0.75, res
示例#18
0
def test_f1():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
    assert round(res, 3) == 0.667, res
示例#19
0
def test_error():
    "Test error metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error')
    assert res == 0.25
示例#20
0
def test_accuracy():
    "Test accuracy metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='accuracy')
    assert res == 0.75
示例#21
0
def test_accuracy():
    "Test accuracy metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='accuracy')
    assert res == 0.75
示例#22
0
def test_f1():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
    assert round(res, 3) == 0.667, res
示例#23
0
def test_error():
    "Test error metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error')
    assert res == 0.25
示例#24
0
def test_precision():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='precision')
    assert round(res, 3) == 0.75, res
示例#25
0
def test_specificity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='specificity')
    assert round(res, 3) == 0.667, res
示例#26
0
def test_specificity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='specificity')
    assert round(res, 3) == 0.667, res
示例#27
0
def test_recall():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='recall')
    assert round(res, 3) == 0.6, res
示例#28
0
def test_sensitivity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='sensitivity')
    assert round(res, 3) == 0.6, res
示例#29
0
def test_recall():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='recall')
    assert round(res, 3) == 0.6, res
示例#30
0
def test_sensitivity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='sensitivity')
    assert round(res, 3) == 0.6, res
示例#31
0
def test_truepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric="true_positive_rate")
    assert round(res, 3) == 0.6, res