示例#1
0
def equalized_odds(df_test_encoded, predictions, print_=False):
    eod_sex = equalized_odds_difference(df_test_encoded.earnings, predictions, sensitive_features=df_test_encoded.sex)
    eor_sex = equalized_odds_ratio(df_test_encoded.earnings, predictions, sensitive_features=df_test_encoded.sex)

    if (print_):
        print(f"equalised odds difference sex: {eod_sex:.3f}")
        print(f"equalised odds ratio sex: {eor_sex:.3f}")
示例#2
0
def __binary_group_fairness_measures(X,
                                     prtc_attr,
                                     y_true,
                                     y_pred,
                                     y_prob=None,
                                     priv_grp=1):
    """[summary]

    Args:
        X (pandas DataFrame): Sample features
        prtc_attr (named array-like): values for the protected attribute
            (note: protected attribute may also be present in X)
        y_true (pandas DataFrame): Sample targets
        y_pred (pandas DataFrame): Sample target predictions
        y_prob (pandas DataFrame, optional): Sample target probabilities. Defaults
            to None.

    Returns:
        [type]: [description]
    """
    pa_names = prtc_attr.columns.tolist()
    gf_vals = {}
    gf_key = 'Group Fairness'
    gf_vals['Statistical Parity Difference'] = \
        aif_mtrc.statistical_parity_difference(y_true, y_pred, prot_attr=pa_names)
    gf_vals['Disparate Impact Ratio'] = \
        aif_mtrc.disparate_impact_ratio(y_true, y_pred, prot_attr=pa_names)
    if not helper.is_tutorial_running() and not len(pa_names) > 1:
        gf_vals['Demographic Parity Difference'] = \
            fl_mtrc.demographic_parity_difference(y_true, y_pred,
                                                  sensitive_features=prtc_attr)
        gf_vals['Demographic Parity Ratio'] = \
            fl_mtrc.demographic_parity_ratio(y_true, y_pred,
                                             sensitive_features=prtc_attr)
    gf_vals['Average Odds Difference'] = \
        aif_mtrc.average_odds_difference(y_true, y_pred, prot_attr=pa_names)
    gf_vals['Equal Opportunity Difference'] = \
        aif_mtrc.equal_opportunity_difference(y_true, y_pred, prot_attr=pa_names)
    if not helper.is_tutorial_running() and not len(pa_names) > 1:
        gf_vals['Equalized Odds Difference'] = \
            fl_mtrc.equalized_odds_difference(y_true, y_pred,
                                              sensitive_features=prtc_attr)
        gf_vals['Equalized Odds Ratio'] = \
            fl_mtrc.equalized_odds_ratio(y_true, y_pred,
                                         sensitive_features=prtc_attr)
    gf_vals['Positive Predictive Parity Difference'] = \
        aif_mtrc.difference(sk_metric.precision_score, y_true,
                            y_pred, prot_attr=pa_names, priv_group=priv_grp)
    gf_vals['Balanced Accuracy Difference'] = \
        aif_mtrc.difference(sk_metric.balanced_accuracy_score, y_true,
                            y_pred, prot_attr=pa_names, priv_group=priv_grp)
    if y_prob is not None:
        gf_vals['AUC Difference'] = \
            aif_mtrc.difference(sk_metric.roc_auc_score, y_true, y_prob,
                                prot_attr=pa_names, priv_group=priv_grp)
    return (gf_key, gf_vals)
示例#3
0
def test_equalized_odds_ratio(agg_method):
    actual = equalized_odds_ratio(y_t,
                                  y_p,
                                  method=agg_method,
                                  sensitive_features=g_1)

    metrics = {'tpr': true_positive_rate, 'fpr': false_positive_rate}
    gm = MetricFrame(metrics, y_t, y_p, sensitive_features=g_1)

    ratios = gm.ratio(method=agg_method)
    assert actual == ratios.min()
示例#4
0
def test_equalized_odds_ratio_weighted(agg_method):
    actual = equalized_odds_ratio(y_t,
                                  y_p,
                                  method=agg_method,
                                  sensitive_features=g_1,
                                  sample_weight=s_w)

    metrics = {'tpr': true_positive_rate, 'fpr': false_positive_rate}
    sw = {'sample_weight': s_w}
    sp = {'tpr': sw, 'fpr': sw}
    gm = MetricFrame(metrics,
                     y_t,
                     y_p,
                     sensitive_features=g_1,
                     sample_params=sp)

    ratios = gm.ratio(method=agg_method)
    assert actual == ratios.min()
示例#5
0
def evaluate_model(model, device, criterion, data_loader):
    model.eval()
    y_true = []
    y_pred = []
    y_out = []
    sensitives = []
    for i, data in enumerate(data_loader):
        x, y, sensitive_features = data
        x = x.to(device)
        y = y.to(device)
        sensitive_features = sensitive_features.to(device)
        with torch.no_grad():
            logit = model(x)
        # logit : binary prediction size=(b, 1)
        bina = (torch.sigmoid(logit) > 0.5).float()
        y_true += y.cpu().tolist()
        y_pred += bina.cpu().tolist()
        y_out += torch.sigmoid(logit).tolist()
        sensitives += sensitive_features.cpu().tolist()
    result = {}
    result["acc"] = skm.accuracy_score(y_true, y_pred)
    result["f1score"] = skm.f1_score(y_true, y_pred)
    result["AUC"] = skm.roc_auc_score(y_true, y_out)
    result['DP'] = {
        "diff":
        flm.demographic_parity_difference(
            y_true, y_pred, sensitive_features=sensitive_features),
        "ratio":
        flm.demographic_parity_ratio(y_true,
                                     y_pred,
                                     sensitive_features=sensitive_features),
    }
    result["EO"] = {
        "diff":
        flm.equalized_odds_difference(y_true,
                                      y_pred,
                                      sensitive_features=sensitive_features),
        "ratio":
        flm.equalized_odds_ratio(y_true,
                                 y_pred,
                                 sensitive_features=sensitive_features),
    }
    return result