def get_classifier_metrics(test_list, prediction_list):
    privileged_groups = [{'sex': 1}]
    unprivileged_groups = [{'sex': 0}]
    acc_list = []
    bal_acc_list = []
    avg_odds_list = []
    recall_diff_list = []
    precision_diff_list = []
    for test_, pred_ in zip(test_list, prediction_list):
        model_metric = ClassificationMetric(
            test_,
            pred_,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        acc_list.append(model_metric.accuracy().round(3))
        bal_acc_list.append(((model_metric.true_positive_rate() +
                              model_metric.true_negative_rate()) / 2).round(3))
        avg_odds_list.append(model_metric.average_odds_difference().round(3))
        recall_diff_list.append(
            model_metric.equal_opportunity_difference().round(3))
        precision_diff_list.append(
            (model_metric.precision(privileged=False) -
             model_metric.precision(privileged=True)).round(3))
    return acc_list, bal_acc_list, avg_odds_list, recall_diff_list, precision_diff_list
示例#2
0
def get_metric_reports(true_dataset,classfied_dataset,privileged_groups,unprivileged_groups):

	mirror_dataset=classfied_dataset.copy(deepcopy=True)
	mirror_dataset.labels=copy.deepcopy(true_dataset.labels)

	metric=ClassificationMetric(
		dataset=mirror_dataset,
		classified_dataset=classfied_dataset,
		unprivileged_groups=unprivileged_groups,
		privileged_groups=privileged_groups)
	#Measuring unfairness end
	
	report=OrderedDict()
	report['TPR']=metric.true_positive_rate()
	report['TNR']=metric.true_negative_rate()
	report['FPR']=metric.false_positive_rate()
	report['FNR']=metric.false_negative_rate()
	report['Balanced_Acc']=0.5*(report['TPR']+report['TNR'])
	report['Acc']=metric.accuracy()
	report["Statistical parity difference"]=metric.statistical_parity_difference()
	report["Disparate impact"]=metric.disparate_impact()
	report["Equal opportunity difference"]=metric.equal_opportunity_difference()
	report["Average odds difference"]=metric.average_odds_difference()
	report["Theil index"]=metric.theil_index()
	report["United Fairness"]=metric.generalized_entropy_index()

	return report
示例#3
0
def compute_metrics(dataset_true,
                    dataset_pred,
                    unprivileged_groups,
                    privileged_groups,
                    disp=True):
    """ Compute the key metrics """
    classified_metric_pred = ClassificationMetric(
        dataset_true,
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    metrics = OrderedDict()
    metrics["Balanced accuracy"] = 0.5 * (
        classified_metric_pred.true_positive_rate() +
        classified_metric_pred.true_negative_rate())
    metrics[
        "Statistical parity difference"] = classified_metric_pred.statistical_parity_difference(
        )
    metrics["Disparate impact"] = classified_metric_pred.disparate_impact()
    metrics[
        "Average odds difference"] = classified_metric_pred.average_odds_difference(
        )
    metrics[
        "Equal opportunity difference"] = classified_metric_pred.equal_opportunity_difference(
        )
    metrics["Theil index"] = classified_metric_pred.theil_index()

    if disp:
        for k in metrics:
            print("%s = %.4f" % (k, metrics[k]))

    return metrics
def test(dataset, model, x_test, thresh_arr, unprivileged_groups,
         privileged_groups):

    bld = BinaryLabelDataset(df=dataset,
                             label_names=['labels'],
                             protected_attribute_names=['age'])

    if np.isin(k, model_AIF):
        y_val_pred_prob = model.predict_proba(bld)
    else:
        y_val_pred_prob, A_val_pred_prob = model.predict_proba(x_test)

    metric_arrs = np.empty([0, 8])
    for thresh in thresh_arr:
        if np.isin(k, model_AIF):
            y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
        else:
            y_val_pred = (y_val_pred_prob.numpy() > thresh).astype(np.float64)

        metric_arrs = np.append(metric_arrs,
                                roc_auc_score(y_test, y_val_pred_prob))

        if np.isin(k, model_AIF):
            metric_arrs = np.append(metric_arrs, 0)
        else:
            metric_arrs = np.append(metric_arrs,
                                    roc_auc_score(A_test, A_val_pred_prob))

        dataset_pred = dataset.copy()
        dataset_pred.labels = y_val_pred
        bld2 = BinaryLabelDataset(df=dataset_pred,
                                  label_names=['labels'],
                                  protected_attribute_names=['age'])

        metric = ClassificationMetric(bld,
                                      bld2,
                                      unprivileged_groups=unprivileged_groups,
                                      privileged_groups=privileged_groups)

        metric_arrs = np.append(
            metric_arrs,
            ((metric.true_positive_rate() + metric.true_negative_rate()) / 2))
        metric_arrs = np.append(metric_arrs, metric.average_odds_difference())
        metric_arrs = np.append(metric_arrs, metric.disparate_impact())
        metric_arrs = np.append(metric_arrs,
                                metric.statistical_parity_difference())
        metric_arrs = np.append(metric_arrs,
                                metric.equal_opportunity_difference())
        metric_arrs = np.append(metric_arrs, metric.theil_index())

    return metric_arrs
示例#5
0
        def get_cm_metrics():
            df_pred = X.copy()
            df_pred[df.columns[-1]] = np.expand_dims(ypred_class, axis=1)

            dataset_pred = BinaryLabelDataset(df=df_pred, label_names=[
                'action_taken_name'], protected_attribute_names=['applicant_sex_name_Female'])

            metric_CM = ClassificationMetric(
                dataset, dataset_pred, privileged_groups=privileged_group, unprivileged_groups=unprivileged_group)

            return {
                "Equal Opportunity Difference":   metric_CM.equal_opportunity_difference(),
                'Average Odds Difference': metric_CM.average_odds_difference(),
                "Accuracy Male": metric_CM.accuracy(privileged=True),
                "Accuracy Female":  metric_CM.accuracy(privileged=False)
            }
示例#6
0
def fairness_IBM(y_pred, Ztr, ytr, verbose=0):
    from aif360.datasets import BinaryLabelDataset
    from aif360.metrics import ClassificationMetric

    assert np.array_equal(np.unique(Ztr),
                          np.array([0, 1])), "Z must contain either 0 or 1"
    # if len(ytr.shape) == 1:
    # ytr = np.expand_dims(ytr, -1)

    Ztr = np.squeeze(Ztr)
    if verbose:
        print(ytr.shape)
        print(Ztr.shape)
    unprivileged_groups = [{"zs": [0]}]
    privileged_groups = [{"zs": [1]}]
    metric_arrs = defaultdict(list)
    dict_ = {"y_true": ytr, "zs": Ztr}
    df = pd.DataFrame(dict_)
    dataset = BinaryLabelDataset(df=df,
                                 label_names=["y_true"],
                                 protected_attribute_names=["zs"],
                                 unprivileged_protected_attributes=[[0]],
                                 privileged_protected_attributes=[[1]])

    dataset_pred = dataset.copy()
    dataset_pred.labels = y_pred
    metric = ClassificationMetric(dataset,
                                  dataset_pred,
                                  unprivileged_groups=unprivileged_groups,
                                  privileged_groups=privileged_groups)

    # metric_arrs['bal_acc'].append((metric.true_positive_rate()
    #                              + metric.true_negative_rate()) / 2)
    metric_arrs["EA"].append(
        metric.accuracy(privileged=False) - metric.accuracy(privileged=True))
    # ASSUMING ALL OTHER METRICS RETURN U - P
    metric_arrs['EO'].append(metric.average_odds_difference())
    # The ideal value of this metric is 1.0
    # A value < 1 implies higher benefit for the privileged group
    # and a value >1 implies a higher
    metric_arrs['DI'].append(metric.disparate_impact() - 1)
    metric_arrs['DP'].append(metric.statistical_parity_difference())
    metric_arrs['EQ'].append(metric.equal_opportunity_difference())
    metric_arrs['TH'].append(metric.between_group_theil_index() * 10)
    results = pd.DataFrame(metric_arrs)
    return results
示例#7
0
    def metrics_form(y_val_pred_prob, y_test, A_prob, A_test, bld, dataset):

        metric_arrs = np.empty([0, 8])

        if np.isin(k, model_AIF):
            y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
        else:
            y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
            A_pred = (A_prob > thresh).astype(np.float64)

        metric_arrs = np.append(metric_arrs,
                                roc_auc_score(y_test, y_val_pred_prob))
        print("y {}".format(roc_auc_score(y_test, y_val_pred_prob)))
        metric_arrs = np.append(metric_arrs,
                                accuracy_score(y_test, y_val_pred))

        if np.isin(k, model_AIF):
            metric_arrs = np.append(metric_arrs, 0)
        else:
            metric_arrs = np.append(metric_arrs, roc_auc_score(A_test, A_prob))
            print("A {}".format(roc_auc_score(A_test, A_prob)))

        dataset_pred = dataset.copy()
        dataset_pred.labels = y_val_pred

        bld2 = BinaryLabelDataset(df=dataset_pred,
                                  label_names=['labels'],
                                  protected_attribute_names=protected)

        metric = ClassificationMetric(bld,
                                      bld2,
                                      unprivileged_groups=unprivileged_groups,
                                      privileged_groups=privileged_groups)

        metric_arrs = np.append(
            metric_arrs,
            ((metric.true_positive_rate() + metric.true_negative_rate()) / 2))
        metric_arrs = np.append(metric_arrs,
                                np.abs(metric.average_odds_difference()))
        metric_arrs = np.append(metric_arrs, metric.disparate_impact())
        metric_arrs = np.append(metric_arrs,
                                np.abs(metric.statistical_parity_difference()))
        metric_arrs = np.append(metric_arrs,
                                np.abs(metric.equal_opportunity_difference()))

        return metric_arrs
def compute_aif_metrics(dataset_true, dataset_pred, unprivileged_groups, privileged_groups,\
                        ret_eval_dict=True):

    metrics_cls = ClassificationMetric(dataset_true, dataset_pred, 
                                                 unprivileged_groups=unprivileged_groups,
                                                 privileged_groups=privileged_groups)
    metrics_dict = {}
    metrics_dict["BA"] = 0.5*(metrics_cls.true_positive_rate()+
                                             metrics_cls.true_negative_rate())
    metrics_dict["SPD"] = metrics_cls.statistical_parity_difference()
    metrics_dict["DI"] = metrics_cls.disparate_impact()
    metrics_dict["AOD"] = metrics_cls.average_odds_difference()
    metrics_dict["EOD"] = metrics_cls.equal_opportunity_difference()
    metrics_dict["DFBA"] = metrics_cls.differential_fairness_bias_amplification()
    metrics_dict["TI"] = metrics_cls.theil_index()
    
    if ret_eval_dict:
        return metrics_dict, metrics_cls
    else:
        return metrics_cls
示例#9
0
        "#### Plain model - without debiasing - classification metrics on test set"
    )
    # print("Test set: Classification accuracy = %f" % classified_metric_nodebiasing_test.accuracy())
    # print("Test set: Balanced classification accuracy = %f" % bal_acc_nodebiasing_test)
    # print("Test set: Statistical parity difference = %f" % classified_metric_nodebiasing_test.statistical_parity_difference())
    # print("Test set: Disparate impact = %f" % classified_metric_nodebiasing_test.disparate_impact())
    # print("Test set: Equal opportunity difference = %f" % classified_metric_nodebiasing_test.equal_opportunity_difference())
    # print("Test set: Average odds difference = %f" % classified_metric_nodebiasing_test.average_odds_difference())
    # print("Test set: Theil index = %f" % classified_metric_nodebiasing_test.theil_index())
    # print("Test set: False negative rate difference = %f" % classified_metric_nodebiasing_test.false_negative_rate_difference())

    metrics = {
        "Classification accuracy":
        classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy":
        bal_acc_nodebiasing_test,
        "Statistical parity difference":
        classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact":
        classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference":
        classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference":
        classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index":
        classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference":
        classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
示例#10
0
equal_opp_diff = []
for c in C:
    predictions, _ = train_and_predict(X_train, y_train, X_test, c, norm_type)

    ds_te_pred = ds_te.copy()
    ds_te_pred.labels = predictions

    metric_te = ClassificationMetric(ds_te,
                                     ds_te_pred,
                                     unprivileged_groups=unpriv,
                                     privileged_groups=priv)

    BACC = 0.5*(metric_te.true_positive_rate()\
        +metric_te.true_negative_rate())
    metric_1 = metric_te.statistical_parity_difference()
    metric_2 = metric_te.average_odds_difference()
    metric_3 = metric_te.equal_opportunity_difference()

    accuracy.append(BACC)
    mean_diff.append(metric_1)
    average_odds_diff.append(metric_2)
    equal_opp_diff.append(metric_3)

# save plots
plot_results(C, norm_type, accuracy, mean_diff, average_odds_diff, \
    equal_opp_diff, name+'_all_metrics_'+norm_type)


def results_table(C, accuracy, mean_diff, avg_odds_diff, equal_opp_diff):
    results = pd.DataFrame()
    results['c'] = C
示例#11
0
            ClassificationMetric(
                            dataset_ground_truth,
                            dataset_classifier,
                            unprivileged_groups=unprivileged_groups,
                            privileged_groups=privileged_groups)

TPR = classificaltion_metric.true_positive_rate()
TNR = classificaltion_metric.true_negative_rate()
bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)

metrics = {
    "classification_accuracy":
    classificaltion_metric.accuracy(),
    "balanced_classification_accuracy":
    bal_acc_nodebiasing_test,
    "statistical_parity_difference":
    classificaltion_metric.statistical_parity_difference(),
    "disparate_impact":
    classificaltion_metric.disparate_impact(),
    "equal_opportunity_difference":
    classificaltion_metric.equal_opportunity_difference(),
    "average_odds_difference":
    classificaltion_metric.average_odds_difference(),
    "theil_index":
    classificaltion_metric.theil_index(),
    "false_negative_rate_difference":
    classificaltion_metric.false_negative_rate_difference()
}

sys.stdout.write(json.dumps(metrics))
示例#12
0
#%%
# =============================================================================
# FAIRNESS METRICS (FOR REAL THIS TIME)
# =============================================================================
# TODO: (1) Redo the previous cell for gender bias and recompute the corresponding
# fairness metrics
# (2)collect these values in a table
# (3) think about a way to visualize these values
# Statistical Parity difference (SPD)
spd_pre_race = fairness_metrics.statistical_parity_difference()

# Disparate Impact Ratio
dir_pre_race = fairness_metrics.disparate_impact()

# Average Odds Difference and Average absolute odds difference
aod_pre_race = fairness_metrics.average_odds_difference()
aaod_pre_race = fairness_metrics.average_abs_odds_difference()

# Equal Opportunity Difference aka true positive rate difference
eod_pre_race = fairness_metrics.equal_opportunity_difference()

# Generealized entropy index with various alpha's
fairness_metrics.between_all_groups_generalized_entropy_index(alpha=2)

ClassificationMetric(dataset=bld_true,
                     classified_dataset=bld_pred,
                     unprivileged_groups=None,
                     privileged_groups=None).false_positive_rate()

df_fm.head()
示例#13
0
def fairness_check(s3_url, bucket_name, s3_username, s3_password, training_id):

    cos = boto3.resource("s3",
                         endpoint_url=s3_url,
                         aws_access_key_id=s3_username,
                         aws_secret_access_key=s3_password)

    y_test_out = 'y_test.out'
    p_test_out = 'p_test.out'
    y_pred_out = 'y_pred.out'
    get_s3_item(cos, bucket_name, training_id + '/' + y_test_out, y_test_out)
    get_s3_item(cos, bucket_name, training_id + '/' + p_test_out, p_test_out)
    get_s3_item(cos, bucket_name, training_id + '/' + y_pred_out, y_pred_out)


    """Need to generalize the protected features"""

    unprivileged_groups = [{'race': 4.0}]
    privileged_groups = [{'race': 0.0}]
    favorable_label = 0.0
    unfavorable_label = 1.0

    """Load the necessary labels and protected features for fairness check"""

    y_test = np.loadtxt(y_test_out)
    p_test = np.loadtxt(p_test_out)
    y_pred = np.loadtxt(y_pred_out)

    """Calculate the fairness metrics"""

    original_test_dataset = dataset_wrapper(outcome=y_test, protected=p_test,
                                            unprivileged_groups=unprivileged_groups,
                                            privileged_groups=privileged_groups,
                                            favorable_label=favorable_label,
                                            unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(outcome=y_pred, protected=p_test,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups,
                                                     favorable_label=favorable_label,
                                                     unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(original_test_dataset,
                                                              plain_predictions_test_dataset,
                                                              unprivileged_groups=unprivileged_groups,
                                                              privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5*(TPR+TNR)

    print("#### Plain model - without debiasing - classification metrics on test set")

    metrics = {
        "Classification accuracy": classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy": bal_acc_nodebiasing_test,
        "Statistical parity difference": classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact": classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference": classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference": classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index": classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference": classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
    return metrics
示例#14
0
def comb_algorithm(l, m, n, dataset_original1, privileged_groups1,
                   unprivileged_groups1, optim_options1):

    dataset_original2 = copy.deepcopy(dataset_original1)
    privileged_groups2 = copy.deepcopy(privileged_groups1)
    unprivileged_groups2 = copy.deepcopy(unprivileged_groups1)
    optim_options2 = copy.deepcopy(optim_options1)

    print(l, m, n)
    dataset_original_train, dataset_original_vt = dataset_original2.split(
        [0.7], shuffle=True)
    dataset_original_valid, dataset_original_test = dataset_original_vt.split(
        [0.5], shuffle=True)
    dataset_original_test.labels = dataset_original_test.labels
    print('=======================')
    #print(dataset_original_test.labels)
    dataset_orig_train = copy.deepcopy(dataset_original_train)
    dataset_orig_valid = copy.deepcopy(dataset_original_valid)
    dataset_orig_test = copy.deepcopy(dataset_original_test)

    if l == 0:
        dataset_transfer_train = copy.deepcopy(dataset_original_train)
        dataset_transfer_valid = copy.deepcopy(dataset_original_valid)
        dataset_transfer_test = copy.deepcopy(dataset_original_test)
        #dataset_transf_train, dataset_transf_valid, dataset_transf_test = dataset_orig_train, dataset_orig_valid, dataset_orig_test
    else:
        pre_used = preAlgorithm[l - 1]
        dataset_transfer_train, dataset_transfer_valid, dataset_transfer_test = Pre(
            pre_used, dataset_orig_train, dataset_orig_valid,
            dataset_orig_test, privileged_groups2, unprivileged_groups2,
            optim_options2)

    dataset_transf_train = copy.deepcopy(dataset_transfer_train)
    dataset_transf_valid = copy.deepcopy(dataset_transfer_valid)
    dataset_transf_test = copy.deepcopy(dataset_transfer_test)
    if m == 0:
        dataset_transfer_valid_pred, dataset_transfer_test_pred = plain_model(
            dataset_transf_train, dataset_transf_valid, dataset_transf_test,
            privileged_groups2, unprivileged_groups2)
    else:
        in_used = inAlgorithm[m - 1]
        if in_used == "adversarial_debiasing":
            dataset_transfer_valid_pred, dataset_transfer_test_pred = adversarial_debiasing(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "art_classifier":
            dataset_transfer_valid_pred, dataset_transfer_test_pred = art_classifier(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "prejudice_remover":
            for key, value in privileged_groups2[0].items():
                sens_attr = key
            dataset_transfer_valid_pred, dataset_transfer_test_pred = prejudice_remover(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2,
                sens_attr)

    dataset_transf_valid_pred = copy.deepcopy(dataset_transfer_valid_pred)
    dataset_transf_test_pred = copy.deepcopy(dataset_transfer_test_pred)
    if n == 0:
        dataset_transf_test_pred_transf = copy.deepcopy(
            dataset_transfer_test_pred)

    else:
        post_used = postAlgorithm[n - 1]
        if post_used == "calibrated_eqodds":
            cpp = CalibratedEqOddsPostprocessing(
                privileged_groups=privileged_groups2,
                unprivileged_groups=unprivileged_groups2,
                cost_constraint=cost_constraint)
            cpp = cpp.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = cpp.predict(
                dataset_transf_test_pred)

        elif post_used == "eqodds":
            EO = EqOddsPostprocessing(unprivileged_groups=unprivileged_groups2,
                                      privileged_groups=privileged_groups2)
            EO = EO.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = EO.predict(
                dataset_transf_test_pred)

        elif post_used == "reject_option":
            #dataset_transf_test_pred_transf = reject_option(dataset_transf_valid, dataset_transf_valid_pred, dataset_transf_test, dataset_transf_test_pred, privileged_groups2, unprivileged_groups2)

            ROC = RejectOptionClassification(
                unprivileged_groups=unprivileged_groups2,
                privileged_groups=privileged_groups2)
            ROC = ROC.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = ROC.predict(
                dataset_transf_test_pred)

    #print('=======================')
    org_labels = dataset_orig_test.labels
    print(dataset_orig_test.labels)
    #print(dataset_transf_test.labels)
    #print('=======================')
    pred_labels = dataset_transf_test_pred.labels
    print(dataset_transf_test_pred.labels)

    true_pred = org_labels == pred_labels
    print("acc after in: ", float(np.sum(true_pred)) / pred_labels.shape[1])
    #print('=======================')
    #print(dataset_transf_test_pred_transf.labels)
    #print(dataset_transf_test_pred_transf.labels.shape)

    metric = ClassificationMetric(dataset_transfer_test,
                                  dataset_transf_test_pred_transf,
                                  unprivileged_groups=unprivileged_groups2,
                                  privileged_groups=privileged_groups2)

    metrics = OrderedDict()
    metrics["Classification accuracy"] = metric.accuracy()
    TPR = metric.true_positive_rate()
    TNR = metric.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)
    metrics["Balanced classification accuracy"] = bal_acc_nodebiasing_test
    metrics[
        "Statistical parity difference"] = metric.statistical_parity_difference(
        )
    metrics["Disparate impact"] = metric.disparate_impact()
    metrics[
        "Equal opportunity difference"] = metric.equal_opportunity_difference(
        )
    metrics["Average odds difference"] = metric.average_odds_difference()
    metrics["Theil index"] = metric.theil_index()
    metrics["United Fairness"] = metric.generalized_entropy_index()

    feature = []
    feature_str = "["
    for m in metrics:
        data = round(metrics[m], 4)
        feature.append(data)
        feature_str = feature_str + str(data) + " "
    feature_str = feature_str + "]"

    return feature, feature_str
示例#15
0
def compute_metrics(model, X_test, y_test, X_train, y_train, dataset_test,
                    dataset_name, model_name, unprivileged_groups,
                    privileged_groups, position):
    """
    Calculate and return: model accuracy and fairness metrics
    
    Parameters
    ----------
    model: scikit-learn classifier    
    X_test: numpy 2d array
    y_test: numpy 1d array
    X_train: numpy 2d array
    y_train: numpy 1d array
    dataset_test: aif360.datasets.BinaryLabelDataset
    dataset_name: string
        Dataset name used in the analysis
    model_name: string
    unprivileged_groups: list<dict>
        Dictionary where the key is the name of the sensitive column in the 
        dataset, and the value is the value of the unprivileged group in the
        dataset
    privileged_groups: list<dict>
        Dictionary where the key is the name of the sensitive column in the 
        dataset, and the value is the value of the privileged group in the
        dataset
    position: int
        Column position of the sensitive group in the dataset 
    """

    y_pred_test = model.predict(X_test)
    acc_test = accuracy_score(y_true=y_test, y_pred=y_pred_test)
    print("Test accuracy: ", acc_test)
    y_pred_train = model.predict(X_train)
    acc_train = accuracy_score(y_true=y_train, y_pred=y_pred_train)
    print("Train accuracy: ", acc_train)

    dataset_pred = dataset_test.copy()
    dataset_pred.labels = y_pred_test

    bin_metric = BinaryLabelDatasetMetric(
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    disparate_impact_bin = bin_metric.disparate_impact()
    print('Disparate impact: ', disparate_impact_bin)
    mean_difference = bin_metric.mean_difference()
    print('Mean difference: ', mean_difference)

    classif_metric = ClassificationMetric(
        dataset_test,
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    classif_disparete_impact = classif_metric.disparate_impact()
    avg_odds = classif_metric.average_odds_difference()
    print('Average odds difference:', avg_odds)
    equal_opport = classif_metric.equal_opportunity_difference()
    print('Equality of opportunity:', equal_opport)
    false_discovery_rate = classif_metric.false_discovery_rate_difference()
    print('False discovery rate difference:', false_discovery_rate)
    entropy_index = classif_metric.generalized_entropy_index()
    print('Generalized entropy index:', entropy_index)

    cons_comp = consitency_mod(bin_metric, position, n_neighbors=5)
    print('Consistency: ', cons_comp)

    result = (dataset_name, model_name, acc_test, disparate_impact_bin,
              mean_difference, classif_disparete_impact, avg_odds,
              equal_opport, false_discovery_rate, entropy_index, cons_comp)

    return result
示例#16
0
def fairness_check(label_dir, model_dir):
    """Need to generalize the protected features"""

    # races_to_consider = [0,4]
    unprivileged_groups = [{'race': 4.0}]
    privileged_groups = [{'race': 0.0}]
    favorable_label = 0.0
    unfavorable_label = 1.0

    """Load the necessary labels and protected features for fairness check"""

    # y_train = np.loadtxt(label_dir + '/y_train.out')
    # p_train = np.loadtxt(label_dir + '/p_train.out')
    y_test = np.loadtxt(label_dir + '/y_test.out')
    p_test = np.loadtxt(label_dir + '/p_test.out')
    y_pred = np.loadtxt(label_dir + '/y_pred.out')

    """Calculate the fairness metrics"""

    # original_traning_dataset = dataset_wrapper(outcome=y_train, protected=p_train,
    #                                            unprivileged_groups=unprivileged_groups,
    #                                            privileged_groups=privileged_groups,
    #                                            favorable_label=favorable_label,
    #                                            unfavorable_label=unfavorable_label)
    original_test_dataset = dataset_wrapper(outcome=y_test, protected=p_test,
                                            unprivileged_groups=unprivileged_groups,
                                            privileged_groups=privileged_groups,
                                            favorable_label=favorable_label,
                                            unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(outcome=y_pred, protected=p_test,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups,
                                                     favorable_label=favorable_label,
                                                     unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(original_test_dataset,
                                                              plain_predictions_test_dataset,
                                                              unprivileged_groups=unprivileged_groups,
                                                              privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5*(TPR+TNR)

    print("#### Plain model - without debiasing - classification metrics on test set")
    # print("Test set: Classification accuracy = %f" % classified_metric_nodebiasing_test.accuracy())
    # print("Test set: Balanced classification accuracy = %f" % bal_acc_nodebiasing_test)
    # print("Test set: Statistical parity difference = %f" % classified_metric_nodebiasing_test.statistical_parity_difference())
    # print("Test set: Disparate impact = %f" % classified_metric_nodebiasing_test.disparate_impact())
    # print("Test set: Equal opportunity difference = %f" % classified_metric_nodebiasing_test.equal_opportunity_difference())
    # print("Test set: Average odds difference = %f" % classified_metric_nodebiasing_test.average_odds_difference())
    # print("Test set: Theil index = %f" % classified_metric_nodebiasing_test.theil_index())
    # print("Test set: False negative rate difference = %f" % classified_metric_nodebiasing_test.false_negative_rate_difference())

    metrics = {
        "Classification accuracy": classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy": bal_acc_nodebiasing_test,
        "Statistical parity difference": classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact": classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference": classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference": classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index": classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference": classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    return {"metrics": metrics}
    def fit(self, dataset_true, dataset_pred):
        """Estimates the optimal classification threshold and margin for reject
        option classification that optimizes the metric provided.

        Note:
            The `fit` function is a no-op for this algorithm.

        Args:
            dataset_true (BinaryLabelDataset): Dataset containing the true
                `labels`.
            dataset_pred (BinaryLabelDataset): Dataset containing the predicted
                `scores`.

        Returns:
            RejectOptionClassification: Returns self.
        """

        fair_metric_arr = np.zeros(self.num_class_thresh*self.num_ROC_margin)
        balanced_acc_arr = np.zeros_like(fair_metric_arr)
        ROC_margin_arr = np.zeros_like(fair_metric_arr)
        class_thresh_arr = np.zeros_like(fair_metric_arr)

        cnt = 0
        # Iterate through class thresholds
        for class_thresh in np.linspace(self.low_class_thresh,
                                        self.high_class_thresh,
                                        self.num_class_thresh):

            self.classification_threshold = class_thresh
            if class_thresh <= 0.5:
                low_ROC_margin = 0.0
                high_ROC_margin = class_thresh
            else:
                low_ROC_margin = 0.0
                high_ROC_margin = (1.0-class_thresh)

                # Iterate through ROC margins
                for ROC_margin in np.linspace(
                                    low_ROC_margin,
                                    high_ROC_margin,
                                    self.num_ROC_margin):
                    self.ROC_margin = ROC_margin

                    # Predict using the current threshold and margin
                    dataset_transf_pred = self.predict(dataset_pred)

                    dataset_transf_metric_pred = BinaryLabelDatasetMetric(
                                                 dataset_transf_pred,
                                                 unprivileged_groups=self.unprivileged_groups,
                                                 privileged_groups=self.privileged_groups)
                    classified_transf_metric = ClassificationMetric(
                                                 dataset_true,
                                                 dataset_transf_pred,
                                                 unprivileged_groups=self.unprivileged_groups,
                                                 privileged_groups=self.privileged_groups)

                    ROC_margin_arr[cnt] = self.ROC_margin
                    class_thresh_arr[cnt] = self.classification_threshold

                    # Balanced accuracy and fairness metric computations
                    balanced_acc_arr[cnt] = 0.5*(classified_transf_metric.true_positive_rate()\
                                           +classified_transf_metric.true_negative_rate())
                    if self.metric_name == "Statistical parity difference":
                        fair_metric_arr[cnt] = dataset_transf_metric_pred.mean_difference()
                    elif self.metric_name == "Average odds difference":
                        fair_metric_arr[cnt] = classified_transf_metric.average_odds_difference()
                    elif self.metric_name == "Equal opportunity difference":
                        fair_metric_arr[cnt] = classified_transf_metric.equal_opportunity_difference()

                    cnt += 1

        rel_inds = np.logical_and(fair_metric_arr >= self.metric_lb,
                                  fair_metric_arr <= self.metric_ub)
        if any(rel_inds):
            best_ind = np.where(balanced_acc_arr[rel_inds]
                                == np.max(balanced_acc_arr[rel_inds]))[0][0]
        else:
            warn("Unable to satisy fairness constraints")
            rel_inds = np.ones(len(fair_metric_arr), dtype=bool)
            best_ind = np.where(fair_metric_arr[rel_inds]
                                == np.min(fair_metric_arr[rel_inds]))[0][0]

        self.ROC_margin = ROC_margin_arr[rel_inds][best_ind]
        self.classification_threshold = class_thresh_arr[rel_inds][best_ind]

        return self
def compute_metrics(model, X_test, y_test, X_train, y_train, dataset_test, 
                    unprivileged_groups, privileged_groups, protect_attribute, 
                    print_result):
    """
    Calculate and return: model accuracy and fairness metrics
    
    Parameters
    ----------
    model: scikit-learn classifier    
    X_test: numpy 2d array
    y_test: numpy 1d array
    X_train: numpy 2d array
    y_train: numpy 1d array
    dataset_test: aif360.datasets.BinaryLabelDataset
    unprivileged_groups: list<dict>
        Dictionary where the key is the name of the sensitive column in the 
        dataset, and the value is the value of the unprivileged group in the
        dataset
    privileged_groups: list<dict>
        Dictionary where the key is the name of the sensitive column in the 
        dataset, and the value is the value of the privileged group in the
        dataset
    protect_attribute
    print_result
    """
    result = {}
    
    y_pred_test = model.predict(X_test)
    result['acc_test'] = accuracy_score(y_true=y_test, y_pred=y_pred_test)
    y_pred_train = model.predict(X_train)
    result['acc_train'] = accuracy_score(y_true=y_train, y_pred=y_pred_train)
    
    dataset_pred = dataset_test.copy()
    dataset_pred.labels = y_pred_test

    bin_metric = BinaryLabelDatasetMetric(dataset_pred, 
                                          unprivileged_groups=unprivileged_groups,
                                          privileged_groups=privileged_groups)
    result['disp_impact'] = bin_metric.disparate_impact()
    result['stat_parity'] = bin_metric.mean_difference()

    classif_metric = ClassificationMetric(dataset_test, dataset_pred, 
                                          unprivileged_groups=unprivileged_groups,
                                          privileged_groups=privileged_groups)
    result['avg_odds'] = classif_metric.average_odds_difference()
    result['equal_opport'] = classif_metric.equal_opportunity_difference()
    result['false_discovery_rate'] = classif_metric.false_discovery_rate_difference()
    result['entropy_index'] = classif_metric.generalized_entropy_index()
    result['acc_test_clf'] = classif_metric.accuracy(privileged=None)
    result['acc_test_priv'] = classif_metric.accuracy(privileged=True)
    result['acc_test_unpriv'] = classif_metric.accuracy(privileged=False)
    
    result['consistency'] = consitency(X_test, y_pred_test, protect_attribute, n_neighbors=5)
    result['counterfactual'] = counterfactual(X_test, model, protect_attribute)
    
    if print_result:
        print("Train accuracy: ", result['acc_train'])
        print("Test accuracy: ", result['acc_test'])
        print("Test accuracy clf: ", result['acc_test_clf'])
        print("Test accuracy priv.: ", result['acc_test_priv'])
        print("Test accuracy unpriv.: ", result['acc_test_unpriv'])
        print('Disparate impact: ', result['disp_impact'])
        print('Mean difference: ', result['stat_parity'])
        print('Average odds difference:', result['avg_odds'])
        print('Equality of opportunity:', result['equal_opport'])
        print('False discovery rate difference:', result['false_discovery_rate'])
        print('Generalized entropy index:', result['entropy_index'])
        print('Consistency: ', result['consistency'])
        print('Counterfactual fairness: ', result['counterfactual'])

    return result
示例#19
0
def fairness_check(object_storage_url,
                   object_storage_username,
                   object_storage_password,
                   data_bucket_name,
                   result_bucket_name,
                   model_id,
                   feature_testset_path='processed_data/X_test.npy',
                   label_testset_path='processed_data/y_test.npy',
                   protected_label_testset_path='processed_data/p_test.npy',
                   model_class_file='model.py',
                   model_class_name='model',
                   favorable_label=0.0,
                   unfavorable_label=1.0,
                   privileged_groups=[{
                       'race': 0.0
                   }],
                   unprivileged_groups=[{
                       'race': 4.0
                   }]):

    url = re.compile(r"https?://")
    cos = Minio(url.sub('', object_storage_url),
                access_key=object_storage_username,
                secret_key=object_storage_password,
                secure=False)  # Local Minio server won't have HTTPS

    dataset_filenamex = "X_test.npy"
    dataset_filenamey = "y_test.npy"
    dataset_filenamep = "p_test.npy"
    weights_filename = "model.pt"
    model_files = model_id + '/_submitted_code/model.zip'

    cos.fget_object(data_bucket_name, feature_testset_path, dataset_filenamex)
    cos.fget_object(data_bucket_name, label_testset_path, dataset_filenamey)
    cos.fget_object(data_bucket_name, protected_label_testset_path,
                    dataset_filenamep)
    cos.fget_object(result_bucket_name, model_id + '/' + weights_filename,
                    weights_filename)
    cos.fget_object(result_bucket_name, model_files, 'model.zip')

    # Load PyTorch model definition from the source code.
    zip_ref = zipfile.ZipFile('model.zip', 'r')
    zip_ref.extractall('model_files')
    zip_ref.close()

    modulename = 'model_files.' + model_class_file.split('.')[0].replace(
        '-', '_')
    '''
    We required users to define where the model class is located or follow
    some naming convention we have provided.
    '''
    model_class = getattr(importlib.import_module(modulename),
                          model_class_name)

    # load & compile model
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model = model_class().to(device)
    model.load_state_dict(torch.load(weights_filename, map_location=device))
    """Load the necessary labels and protected features for fairness check"""

    x_test = np.load(dataset_filenamex)
    y_test = np.load(dataset_filenamey)
    p_test = np.load(dataset_filenamep)

    _, y_pred = evaluate(model, x_test, y_test)
    """Calculate the fairness metrics"""

    original_test_dataset = dataset_wrapper(
        outcome=y_test,
        protected=p_test,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups,
        favorable_label=favorable_label,
        unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(
        outcome=y_pred,
        protected=p_test,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups,
        favorable_label=favorable_label,
        unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(
        original_test_dataset,
        plain_predictions_test_dataset,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)

    print(
        "#### Plain model - without debiasing - classification metrics on test set"
    )

    metrics = {
        "Classification accuracy":
        classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy":
        bal_acc_nodebiasing_test,
        "Statistical parity difference":
        classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact":
        classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference":
        classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference":
        classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index":
        classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference":
        classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
    return metrics
示例#20
0
def comb_algorithm(l, m, n, dataset_original1, privileged_groups1,
                   unprivileged_groups1, optim_options1):

    dataset_original2 = copy.deepcopy(dataset_original1)
    privileged_groups2 = copy.deepcopy(privileged_groups1)
    unprivileged_groups2 = copy.deepcopy(unprivileged_groups1)
    optim_options2 = copy.deepcopy(optim_options1)

    print(l, m, n)
    dataset_orig_train, dataset_orig_vt = dataset_original2.split([0.7],
                                                                  shuffle=True)
    dataset_orig_valid, dataset_orig_test = dataset_orig_vt.split([0.5],
                                                                  shuffle=True)

    if l == 0:
        dataset_transf_train, dataset_transf_valid, dataset_transf_test = dataset_orig_train, dataset_orig_valid, dataset_orig_test
    else:
        pre_used = preAlgorithm[l - 1]
        dataset_transf_train, dataset_transf_valid, dataset_transf_test = Pre(
            pre_used, dataset_orig_train, dataset_orig_valid,
            dataset_orig_test, privileged_groups2, unprivileged_groups2,
            optim_options2)

    #assert (l,m,n)!=(2,0,0)
    #assert not np.all(dataset_transf_train.labels.flatten()==1.0)

    if m == 0:
        dataset_transf_valid_pred, dataset_transf_test_pred = train(
            dataset_transf_train, dataset_transf_valid, dataset_transf_test,
            privileged_groups2, unprivileged_groups2)
    else:
        in_used = inAlgorithm[m - 1]
        if in_used == "adversarial_debiasing":
            dataset_transf_valid_pred, dataset_transf_test_pred = adversarial_debiasing(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "art_classifier":
            dataset_transf_valid_pred, dataset_transf_test_pred = art_classifier(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "prejudice_remover":
            for key, value in privileged_groups2[0].items():
                sens_attr = key
            dataset_transf_valid_pred, dataset_transf_test_pred = prejudice_remover(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2,
                sens_attr)

    if n == 0:
        dataset_transf_test_pred_transf = dataset_transf_test_pred

    else:
        post_used = postAlgorithm[n - 1]
        if post_used == "calibrated_eqodds":
            cpp = CalibratedEqOddsPostprocessing(
                privileged_groups=privileged_groups2,
                unprivileged_groups=unprivileged_groups2,
                cost_constraint=cost_constraint,
                seed=1)
            cpp = cpp.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = cpp.predict(
                dataset_transf_test_pred)

        elif post_used == "eqodds":
            EO = EqOddsPostprocessing(unprivileged_groups=unprivileged_groups2,
                                      privileged_groups=privileged_groups2,
                                      seed=1)
            EO = EO.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = EO.predict(
                dataset_transf_test_pred)

        elif post_used == "reject_option":
            ROC = RejectOptionClassification(
                unprivileged_groups=unprivileged_groups2,
                privileged_groups=privileged_groups2,
                low_class_thresh=0.01,
                high_class_thresh=0.99,
                num_class_thresh=100,
                num_ROC_margin=50,
                metric_name=allowed_metrics[0],
                metric_ub=metric_ub,
                metric_lb=metric_lb)
            ROC = ROC.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = ROC.predict(
                dataset_transf_test_pred)

    metric = ClassificationMetric(dataset_transf_test,
                                  dataset_transf_test_pred_transf,
                                  unprivileged_groups=unprivileged_groups2,
                                  privileged_groups=privileged_groups2)

    metrics = OrderedDict()
    metrics["Classification accuracy"] = metric.accuracy()
    TPR = metric.true_positive_rate()
    TNR = metric.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)
    metrics["Balanced classification accuracy"] = bal_acc_nodebiasing_test
    metrics[
        "Statistical parity difference"] = metric.statistical_parity_difference(
        )
    metrics["Disparate impact"] = metric.disparate_impact()
    metrics[
        "Equal opportunity difference"] = metric.equal_opportunity_difference(
        )
    metrics["Average odds difference"] = metric.average_odds_difference()
    metrics["Theil index"] = metric.theil_index()
    metrics["United Fairness"] = metric.generalized_entropy_index()
    # print(metrics)

    feature = "["
    for m in metrics:
        feature = feature + " " + str(round(metrics[m], 4))
    feature = feature + "]"

    return feature