예제 #1
0
def test_y_arguments():
    "Test exception is raised when user provides invalid vectors"
    try:
        scoring(y_target=[1, 2], y_predicted=[1])
        assert False
    except AttributeError:
        assert True
예제 #2
0
def test_metric_argument():
    "Test exception is raised when user provides invalid metric argument"
    try:
        scoring(y_target=[1], y_predicted=[1], metric='test')
        assert False
    except AttributeError:
        assert True
예제 #3
0
def test_metric_argument():
    "Test exception is raised when user provides invalid metric argument"
    try:
        scoring(y_target=[1], y_predicted=[1], metric='test')
        assert False
    except AttributeError:
        assert True
예제 #4
0
def test_y_arguments():
    "Test exception is raised when user provides invalid vectors"
    try:
        scoring(y_target=[1, 2], y_predicted=[1])
        assert False
    except AttributeError:
        assert True
예제 #5
0
def test_binary():
    "Test exception is raised if label is not binary in f1"
    try:
        y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
        y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
        scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
        assert False
    except AttributeError:
        assert True
예제 #6
0
def test_binary():
    "Test exception is raised if label is not binary in f1"
    try:
        y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
        y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
        scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
        assert False
    except AttributeError:
        assert True
예제 #7
0
    def _get_scores(target, predicted):
        """
        Helper method to get the accuracy, recall, precision and f1 scores
        :param target: a numpy array containing the target values
        :param predicted: a numpy array containing the predicted values
        :return: a numpy array containing the scores
        """
        recall = scoring(target, predicted, metric="recall")
        precision = scoring(target, predicted, metric="precision")
        accuracy = scoring(target, predicted, metric="accuracy")
        f_score = scoring(target, predicted, metric="f1")

        return [recall, precision, accuracy, f_score]
예제 #8
0
def test_avg_perclass_error():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='per-class error')
    assert round(res, 3) == 0.333, res
예제 #9
0
def test_avg_perclass_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='per-class accuracy')
    assert round(res, 3) == 0.667, res
예제 #10
0
def test_matthews_corr_coef():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='matthews_corr_coef')
    assert round(res, 3) == 0.258, res
예제 #11
0
def test_falsepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='false_positive_rate')
    assert round(res, 3) == 0.333, res
예제 #12
0
def test_falsepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='false_positive_rate')
    assert round(res, 3) == 0.333, res
예제 #13
0
def test_avg_perclass_error():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='average per-class error')
    assert round(res, 3) == 0.333, res
예제 #14
0
def test_matthews_corr_coef():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='matthews_corr_coef')
    assert round(res, 3) == 0.258, res
예제 #15
0
def test_balanced_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='balanced accuracy')
    assert round(res, 3) == 0.578, res
예제 #16
0
def test_avg_perclass_accuracy():
    y_targ = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
    y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
    res = scoring(y_target=y_targ,
                  y_predicted=y_pred,
                  metric='average per-class accuracy')
    assert round(res, 3) == 0.667, res
예제 #17
0
def test_precision():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='precision')
    assert round(res, 3) == 0.75, res
예제 #18
0
def test_f1():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
    assert round(res, 3) == 0.667, res
예제 #19
0
def test_error():
    "Test error metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error')
    assert res == 0.25
예제 #20
0
def test_accuracy():
    "Test accuracy metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='accuracy')
    assert res == 0.75
예제 #21
0
def test_accuracy():
    "Test accuracy metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='accuracy')
    assert res == 0.75
예제 #22
0
def test_f1():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='f1')
    assert round(res, 3) == 0.667, res
예제 #23
0
def test_error():
    "Test error metric"
    y_targ = [1, 1, 1, 0, 0, 2, 0, 3]
    y_pred = [1, 0, 1, 0, 0, 2, 1, 3]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='error')
    assert res == 0.25
예제 #24
0
def test_precision():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='precision')
    assert round(res, 3) == 0.75, res
예제 #25
0
def test_specificity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='specificity')
    assert round(res, 3) == 0.667, res
예제 #26
0
def test_specificity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='specificity')
    assert round(res, 3) == 0.667, res
예제 #27
0
def test_recall():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='recall')
    assert round(res, 3) == 0.6, res
예제 #28
0
def test_sensitivity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='sensitivity')
    assert round(res, 3) == 0.6, res
예제 #29
0
def test_recall():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='recall')
    assert round(res, 3) == 0.6, res
예제 #30
0
def test_sensitivity():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric='sensitivity')
    assert round(res, 3) == 0.6, res
예제 #31
0
def test_truepositiverate():
    y_targ = [1, 1, 1, 0, 0, 1, 0, 1]
    y_pred = [1, 0, 1, 0, 0, 0, 1, 1]
    res = scoring(y_target=y_targ, y_predicted=y_pred, metric="true_positive_rate")
    assert round(res, 3) == 0.6, res