Beispiel #1
0
def gender_performance(df_test_encoded, predictions, print_=False):
    predictions_m = []
    predictions_f = []
    df_test_encoded_m = []
    df_test_encoded_f = []
    tamanho = len(df_test_encoded['sex'])

    for i in range(tamanho):
        if df_test_encoded['sex'].iloc[i] == 1:
            df_test_encoded_m.append(df_test_encoded['earnings'].iloc[i])
            predictions_m.append(predictions[i])
        else:
            df_test_encoded_f.append(df_test_encoded['earnings'].iloc[i])
            predictions_f.append(predictions[i])
  

    true_positive_m = true_positive_rate(df_test_encoded_m, predictions_m, pos_label=1)
    false_positive_m = false_positive_rate(df_test_encoded_m, predictions_m, pos_label=1)
    true_negative_m = true_negative_rate(df_test_encoded_m, predictions_m, pos_label=1)
    false_negative_m = false_negative_rate(df_test_encoded_m, predictions_m, pos_label=1)

    true_positive_f = true_positive_rate(df_test_encoded_f, predictions_f, pos_label=1)
    false_positive_f = false_positive_rate(df_test_encoded_f, predictions_f, pos_label=1)
    true_negative_f = true_negative_rate(df_test_encoded_f, predictions_f, pos_label=1)
    false_negative_f = false_negative_rate(df_test_encoded_f, predictions_f, pos_label=1)

    if print_:
        print("True Positive Rate for Male:", true_positive_m)
        print("True Positive Rate for Female:", true_positive_f)
        print("False Positive Rate for Male:", false_positive_m)
        print("False Positive Rate for Female:", false_positive_f)
        print("True Negative Rate for Male:", true_negative_m)
        print("True Negative Rate for Female:", true_negative_f)
        print("False Negative Rate for Male:", false_negative_m)
        print("False Negative Rate for Female:", false_negative_f)
    def test_against_sklearn_weighted(self):
        y_true = [
            0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1,
            1, 0, 1, 0, 1, 0, 1, 0
        ]
        y_pred = [
            0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
            1, 1, 1, 0, 0, 1, 0, 1
        ]
        weights = [
            1, 2, 3, 5, 3, 2, 5, 3, 5, 1, 5, 3, 5, 2, 3, 2, 5, 2, 3, 1, 5, 3,
            2, 1, 1, 5, 2, 3, 5, 1
        ]

        actual = metrics.true_negative_rate(y_true,
                                            y_pred,
                                            sample_weight=weights)
        tn, fp, fn, tp = skm.confusion_matrix(y_true,
                                              y_pred,
                                              sample_weight=weights).ravel()
        assert (tn / (tn + fp)) == actual
        actual = metrics.true_negative_rate(y_true,
                                            y_pred,
                                            pos_label=0,
                                            sample_weight=weights)
        tn, fp, fn, tp = skm.confusion_matrix(y_true,
                                              y_pred,
                                              labels=[1, 0],
                                              sample_weight=weights).ravel()
        assert (tn / (tn + fp)) == actual
Beispiel #3
0
    def test_some_correct_other_labels(self):
        y_true = ['b', 'b', 'b', 'b', 'a']
        y_pred = ['b', 'b', 'b', 'a', 'a']

        result = metrics.true_negative_rate(y_true, y_pred, pos_label='b')
        assert result == 1
        result = metrics.true_negative_rate(y_true, y_pred, pos_label='a')
        assert result == 0.75
Beispiel #4
0
    def test_some_correct(self):
        y_true = [-1, -1, -1, -1, 1]
        y_pred = [-1, -1, -1, 1, 1]

        result = metrics.true_negative_rate(y_true, y_pred)
        assert result == 0.75
        result = metrics.true_negative_rate(y_true, y_pred, pos_label=-1)
        assert result == 1
Beispiel #5
0
    def test_some_correct_other_labels(self):
        y_true = ["b", "b", "b", "b", "a"]
        y_pred = ["b", "b", "b", "a", "a"]

        result = metrics.true_negative_rate(y_true, y_pred, pos_label="b")
        assert result == 1
        result = metrics.true_negative_rate(y_true, y_pred, pos_label="a")
        assert result == 0.75
Beispiel #6
0
    def test_none_correct(self):
        y_true = [0, 0, 0, 0, 1]
        y_pred = [1, 1, 1, 1, 0]

        result = metrics.true_negative_rate(y_true, y_pred)
        assert result == 0

        result = metrics.true_negative_rate(y_true, y_pred, pos_label=0)
        assert result == 0
Beispiel #7
0
    def test_against_sklearn(self):
        y_true = [0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1,
                  0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0]
        y_pred = [0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0,
                  1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1]

        actual = metrics.true_negative_rate(y_true, y_pred)
        tn, fp, fn, tp = skm.confusion_matrix(y_true, y_pred).ravel()
        assert (tn/(tn+fp)) == actual
        actual = metrics.true_negative_rate(y_true, y_pred, pos_label=0)
        tn, fp, fn, tp = skm.confusion_matrix(y_true, y_pred, labels=[1, 0]).ravel()
        assert (tn/(tn+fp)) == actual
Beispiel #8
0
    def test_all_negative_ones(self):
        neg_ones = -1 * np.ones(10)

        # Should behave as if pos_label=1
        assert metrics.true_positive_rate(neg_ones, neg_ones) == 0
        assert metrics.false_positive_rate(neg_ones, neg_ones) == 0
        assert metrics.true_negative_rate(neg_ones, neg_ones) == 1
        assert metrics.false_negative_rate(neg_ones, neg_ones) == 0

        assert metrics.true_positive_rate(neg_ones, neg_ones, pos_label=1) == 0
        assert metrics.false_positive_rate(neg_ones, neg_ones, pos_label=1) == 0
        assert metrics.true_negative_rate(neg_ones, neg_ones, pos_label=1) == 1
        assert metrics.false_negative_rate(neg_ones, neg_ones, pos_label=1) == 0

        assert metrics.true_positive_rate(neg_ones, neg_ones, pos_label=-1) == 1
        assert metrics.false_positive_rate(neg_ones, neg_ones, pos_label=-1) == 0
        assert metrics.true_negative_rate(neg_ones, neg_ones, pos_label=-1) == 0
        assert metrics.false_negative_rate(neg_ones, neg_ones, pos_label=-1) == 0
Beispiel #9
0
    def test_all_ones(self):
        ones = np.ones(10)

        # should behave as if pos_label=1
        assert metrics.true_positive_rate(ones, ones) == 1
        assert metrics.false_positive_rate(ones, ones) == 0
        assert metrics.true_negative_rate(ones, ones) == 0
        assert metrics.false_negative_rate(ones, ones) == 0

        assert metrics.true_positive_rate(ones, ones, pos_label=1) == 1
        assert metrics.false_positive_rate(ones, ones, pos_label=1) == 0
        assert metrics.true_negative_rate(ones, ones, pos_label=1) == 0
        assert metrics.false_negative_rate(ones, ones, pos_label=1) == 0

        assert metrics.true_positive_rate(ones, ones, pos_label=0) == 0
        assert metrics.false_positive_rate(ones, ones, pos_label=0) == 0
        assert metrics.true_negative_rate(ones, ones, pos_label=0) == 1
        assert metrics.false_negative_rate(ones, ones, pos_label=0) == 0
Beispiel #10
0
    def test_all_zeros(self):
        zeros = np.zeros(10)

        # Should behave as if pos_label=1
        assert metrics.true_positive_rate(zeros, zeros) == 0
        assert metrics.false_positive_rate(zeros, zeros) == 0
        assert metrics.true_negative_rate(zeros, zeros) == 1
        assert metrics.false_negative_rate(zeros, zeros) == 0

        assert metrics.true_positive_rate(zeros, zeros, pos_label=1) == 0
        assert metrics.false_positive_rate(zeros, zeros, pos_label=1) == 0
        assert metrics.true_negative_rate(zeros, zeros, pos_label=1) == 1
        assert metrics.false_negative_rate(zeros, zeros, pos_label=1) == 0

        assert metrics.true_positive_rate(zeros, zeros, pos_label=0) == 1
        assert metrics.false_positive_rate(zeros, zeros, pos_label=0) == 0
        assert metrics.true_negative_rate(zeros, zeros, pos_label=0) == 0
        assert metrics.false_negative_rate(zeros, zeros, pos_label=0) == 0
Beispiel #11
0
    def test_tnr_some_correct_with_false_negative(self):
        y_true = [0, 0, 0, 0, 1]
        y_pred = [0, 0, 1, 0, 0]

        result = metrics.true_negative_rate(y_true, y_pred)
        assert result == 0.75
def test_tnr_all_correct():
    y_true = [0, 0, 0, 0, 1]
    y_pred = [0, 0, 0, 0, 1]

    result = metrics.true_negative_rate(y_true, y_pred)
    assert result == 1