Пример #1
0
    def test_specific_metrics(self):
        y_t = [0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1]
        y_p = [1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0]
        s_f = [0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]

        exp_acc = accuracy_score_group_summary(y_t,
                                               y_p,
                                               sensitive_features=s_f)
        exp_roc = roc_auc_score_group_summary(y_t, y_p, sensitive_features=s_f)

        predictions = {"some model": y_p}
        sensitive_feature = {"my sf": s_f}

        actual = _create_group_metric_set(y_t, predictions, sensitive_feature,
                                          'binary_classification')

        # Do some sanity checks
        validate_dashboard_dictionary(actual)
        assert actual['trueY'] == y_t
        assert actual['predictedY'][0] == y_p
        assert actual['precomputedFeatureBins'][0]['binVector'] == s_f
        assert len(actual['precomputedMetrics'][0][0]) == 11

        # Cross check the two metrics we computed
        # Comparisons simplified because s_f was already {0,1}
        actual_acc = actual['precomputedMetrics'][0][0]['accuracy_score']
        assert actual_acc['global'] == exp_acc.overall
        assert actual_acc['bins'] == list(exp_acc.by_group.values())

        actual_roc = actual['precomputedMetrics'][0][0][
            'balanced_accuracy_score']
        assert actual_roc['global'] == exp_roc.overall
        assert actual_roc['bins'] == list(exp_roc.by_group.values())
def test_group_roc_auc_score_max_fpr():
    result = metrics.roc_auc_score_group_summary(Y_true,
                                                 Y_pred,
                                                 sensitive_features=groups,
                                                 max_fpr=0.5)
    expected_overall = skm.roc_auc_score(Y_true, Y_pred, max_fpr=0.5)

    assert expected_overall == result.overall
def test_group_roc_auc_score_average():
    result = metrics.roc_auc_score_group_summary(Y_true,
                                                 Y_pred,
                                                 sensitive_features=groups,
                                                 average='samples')
    expected_overall = skm.roc_auc_score(Y_true, Y_pred, average='samples')

    assert expected_overall == result.overall