Пример #1
0
def calculate_bias_measures(data_orig_train, data_orig_vt, unprivileged_groups,
                            privileged_groups):
    model = RandomForestClassifier().fit(
        data_orig_train.features,
        data_orig_train.labels.ravel(),
        sample_weight=data_orig_train.instance_weights)
    dataset = data_orig_vt
    dataset_pred = dataset.copy()
    dataset_pred.labels = model.predict(data_orig_vt.features)
    classified_metric_race = ClassificationMetric(
        dataset,
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    metric_pred_race = BinaryLabelDatasetMetric(
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    print("Mean difference {}".format(metric_pred_race.mean_difference()))
    print("Disparate Metric {}".format(metric_pred_race.disparate_impact()))
    print("Equal Opportunity Difference {}".format(
        classified_metric_race.equal_opportunity_difference()))
    print("Average Abs Odds Difference {}".format(
        classified_metric_race.average_abs_odds_difference()))
    print("Theil index {}".format(classified_metric_race.theil_index()))
Пример #2
0
def compute_metrics(dataset_true,
                    dataset_pred,
                    unprivileged_groups,
                    privileged_groups,
                    disp=True):
    """ Compute the key metrics """
    classified_metric_pred = ClassificationMetric(
        dataset_true,
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    metrics = OrderedDict()
    metrics["Balanced accuracy"] = 0.5 * (
        classified_metric_pred.true_positive_rate() +
        classified_metric_pred.true_negative_rate())
    metrics[
        "Statistical parity difference"] = classified_metric_pred.statistical_parity_difference(
        )
    metrics["Disparate impact"] = classified_metric_pred.disparate_impact()
    metrics[
        "Average odds difference"] = classified_metric_pred.average_odds_difference(
        )
    metrics[
        "Equal opportunity difference"] = classified_metric_pred.equal_opportunity_difference(
        )
    metrics["Theil index"] = classified_metric_pred.theil_index()

    if disp:
        for k in metrics:
            print("%s = %.4f" % (k, metrics[k]))

    return metrics
Пример #3
0
def get_metric_reports(true_dataset,classfied_dataset,privileged_groups,unprivileged_groups):

	mirror_dataset=classfied_dataset.copy(deepcopy=True)
	mirror_dataset.labels=copy.deepcopy(true_dataset.labels)

	metric=ClassificationMetric(
		dataset=mirror_dataset,
		classified_dataset=classfied_dataset,
		unprivileged_groups=unprivileged_groups,
		privileged_groups=privileged_groups)
	#Measuring unfairness end
	
	report=OrderedDict()
	report['TPR']=metric.true_positive_rate()
	report['TNR']=metric.true_negative_rate()
	report['FPR']=metric.false_positive_rate()
	report['FNR']=metric.false_negative_rate()
	report['Balanced_Acc']=0.5*(report['TPR']+report['TNR'])
	report['Acc']=metric.accuracy()
	report["Statistical parity difference"]=metric.statistical_parity_difference()
	report["Disparate impact"]=metric.disparate_impact()
	report["Equal opportunity difference"]=metric.equal_opportunity_difference()
	report["Average odds difference"]=metric.average_odds_difference()
	report["Theil index"]=metric.theil_index()
	report["United Fairness"]=metric.generalized_entropy_index()

	return report
Пример #4
0
def fair_metrics(dataset, pred, pred_is_dataset=False):
    if pred_is_dataset:
        dataset_pred = pred
    else:
        dataset_pred = dataset.copy()
        dataset_pred.labels = pred

    cols = [
        'statistical_parity_difference', 'equal_opportunity_difference',
        'average_abs_odds_difference', 'disparate_impact', 'theil_index'
    ]
    obj_fairness = [[0, 0, 0, 1, 0]]

    fair_metrics = pd.DataFrame(data=obj_fairness,
                                index=['objective'],
                                columns=cols)

    for attr in dataset_pred.protected_attribute_names:
        idx = dataset_pred.protected_attribute_names.index(attr)
        privileged_groups = [{
            attr:
            dataset_pred.privileged_protected_attributes[idx][0]
        }]
        unprivileged_groups = [{
            attr:
            dataset_pred.unprivileged_protected_attributes[idx][0]
        }]

        classified_metric = ClassificationMetric(
            dataset,
            dataset_pred,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        metric_pred = BinaryLabelDatasetMetric(
            dataset_pred,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        acc = classified_metric.accuracy()

        row = pd.DataFrame([[
            metric_pred.mean_difference(),
            classified_metric.equal_opportunity_difference(),
            classified_metric.average_abs_odds_difference(),
            metric_pred.disparate_impact(),
            classified_metric.theil_index()
        ]],
                           columns=cols,
                           index=[attr])
        fair_metrics = fair_metrics.append(row)

    fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)

    return fair_metrics
def test(dataset, model, x_test, thresh_arr, unprivileged_groups,
         privileged_groups):

    bld = BinaryLabelDataset(df=dataset,
                             label_names=['labels'],
                             protected_attribute_names=['age'])

    if np.isin(k, model_AIF):
        y_val_pred_prob = model.predict_proba(bld)
    else:
        y_val_pred_prob, A_val_pred_prob = model.predict_proba(x_test)

    metric_arrs = np.empty([0, 8])
    for thresh in thresh_arr:
        if np.isin(k, model_AIF):
            y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
        else:
            y_val_pred = (y_val_pred_prob.numpy() > thresh).astype(np.float64)

        metric_arrs = np.append(metric_arrs,
                                roc_auc_score(y_test, y_val_pred_prob))

        if np.isin(k, model_AIF):
            metric_arrs = np.append(metric_arrs, 0)
        else:
            metric_arrs = np.append(metric_arrs,
                                    roc_auc_score(A_test, A_val_pred_prob))

        dataset_pred = dataset.copy()
        dataset_pred.labels = y_val_pred
        bld2 = BinaryLabelDataset(df=dataset_pred,
                                  label_names=['labels'],
                                  protected_attribute_names=['age'])

        metric = ClassificationMetric(bld,
                                      bld2,
                                      unprivileged_groups=unprivileged_groups,
                                      privileged_groups=privileged_groups)

        metric_arrs = np.append(
            metric_arrs,
            ((metric.true_positive_rate() + metric.true_negative_rate()) / 2))
        metric_arrs = np.append(metric_arrs, metric.average_odds_difference())
        metric_arrs = np.append(metric_arrs, metric.disparate_impact())
        metric_arrs = np.append(metric_arrs,
                                metric.statistical_parity_difference())
        metric_arrs = np.append(metric_arrs,
                                metric.equal_opportunity_difference())
        metric_arrs = np.append(metric_arrs, metric.theil_index())

    return metric_arrs
Пример #6
0
def nondebiased_classifier(train, test, privileged_groups,
                           unprivileged_groups):
    sess = tf.Session()
    NN_model = AdversarialDebiasing(privileged_groups,
                                    unprivileged_groups,
                                    scope_name='nondebiased_classifier',
                                    debias=False,
                                    sess=sess)
    NN_model.fit(train)

    # predict outcome using the test set
    pred_NNmodel = NN_model.predict(test)
    sess.close()
    tf.reset_default_graph()

    # calculate accuracy
    accuracy = accuracy_score(y_true=test.labels, y_pred=pred_NNmodel.labels)

    # calculate fairness metrics
    metric_test = BinaryLabelDatasetMetric(
        pred_NNmodel,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    acc_test = ClassificationMetric(test,
                                    pred_NNmodel,
                                    unprivileged_groups=unprivileged_groups,
                                    privileged_groups=privileged_groups)
    equal_opportunity_difference = equal_opp_diff(test,
                                                  pred_NNmodel,
                                                  'sex',
                                                  privileged=1,
                                                  unprivileged=0,
                                                  favourable=1,
                                                  unfavourable=0)
    average_odds_difference = avg_odds_diff(test,
                                            pred_NNmodel,
                                            'sex',
                                            privileged=1,
                                            unprivileged=0,
                                            favourable=1,
                                            unfavourable=0)

    metrics = [
        metric_test.mean_difference(),
        acc_test.disparate_impact(), equal_opportunity_difference,
        average_odds_difference,
        acc_test.theil_index()
    ]

    return pred_NNmodel, accuracy, metrics
Пример #7
0
def test_theil_index():
    data = np.array([[0, 1], [0, 0], [1, 0], [1, 1], [1, 0], [1, 0], [2, 1],
                     [2, 0], [2, 1], [2, 1]])
    pred = data.copy()
    pred[[3, 9], -1] = 0
    pred[[4, 5], -1] = 1
    df = pd.DataFrame(data, columns=['feat', 'label'])
    df2 = pd.DataFrame(pred, columns=['feat', 'label'])
    bld = BinaryLabelDataset(df=df,
                             label_names=['label'],
                             protected_attribute_names=['feat'])
    bld2 = BinaryLabelDataset(df=df2,
                              label_names=['label'],
                              protected_attribute_names=['feat'])
    cm = ClassificationMetric(bld, bld2)

    assert cm.theil_index() == 4 * np.log(2) / 10
Пример #8
0
def prejudice(train, test, unprivileged_groups, privileged_groups):
    prejudice_model = PrejudiceRemover(eta=100, sensitive_attr='sex')
    prejudice_model.fit(train)

    # predict outcome using the test set
    pred_prejudice = prejudice_model.predict(test)

    # calculate accuracy
    accuracy = accuracy_score(y_true=test.labels, y_pred=pred_prejudice.labels)

    # calculate fairness metrics
    metric_test = BinaryLabelDatasetMetric(
        pred_prejudice,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    acc_test = ClassificationMetric(test,
                                    pred_prejudice,
                                    unprivileged_groups=unprivileged_groups,
                                    privileged_groups=privileged_groups)
    equal_opportunity_difference = equal_opp_diff(test,
                                                  pred_prejudice,
                                                  'sex',
                                                  privileged=1,
                                                  unprivileged=0,
                                                  favourable=1,
                                                  unfavourable=0)
    average_odds_difference = avg_odds_diff(test,
                                            pred_prejudice,
                                            'sex',
                                            privileged=1,
                                            unprivileged=0,
                                            favourable=1,
                                            unfavourable=0)

    if acc_test.disparate_impact() == math.inf:
        disparate_impact = 5.0
    else:
        disparate_impact = acc_test.disparate_impact()

    metrics = [
        metric_test.mean_difference(), disparate_impact,
        equal_opportunity_difference, average_odds_difference,
        acc_test.theil_index()
    ]

    return pred_prejudice, accuracy, metrics
Пример #9
0
def ensemble(test, pred_adversarial, pred_prejudice, pred_nondebiased,
             unprivileged_groups, privileged_groups):
    pred_labels = []
    for i in range(0, len(test.features)):
        arr = mode([
            pred_adversarial.labels[i], pred_prejudice.labels[i],
            pred_nondebiased.labels[i]
        ])
        pred_labels.append(arr[0][0])

    pred_ensemble = test.copy()
    pred_ensemble.labels = np.array(pred_labels)

    accuracy = accuracy_score(y_true=test.labels, y_pred=pred_ensemble.labels)

    metric_test = BinaryLabelDatasetMetric(
        pred_ensemble,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    acc_test = ClassificationMetric(test,
                                    pred_ensemble,
                                    unprivileged_groups=unprivileged_groups,
                                    privileged_groups=privileged_groups)
    equal_opportunity_difference = equal_opp_diff(test,
                                                  pred_ensemble,
                                                  'sex',
                                                  privileged=1,
                                                  unprivileged=0,
                                                  favourable=1,
                                                  unfavourable=0)
    average_odds_difference = avg_odds_diff(test,
                                            pred_ensemble,
                                            'sex',
                                            privileged=1,
                                            unprivileged=0,
                                            favourable=1,
                                            unfavourable=0)

    metrics = [
        metric_test.mean_difference(),
        acc_test.disparate_impact(), equal_opportunity_difference,
        average_odds_difference,
        acc_test.theil_index()
    ]

    return accuracy, metrics
def compute_aif_metrics(dataset_true, dataset_pred, unprivileged_groups, privileged_groups,\
                        ret_eval_dict=True):

    metrics_cls = ClassificationMetric(dataset_true, dataset_pred, 
                                                 unprivileged_groups=unprivileged_groups,
                                                 privileged_groups=privileged_groups)
    metrics_dict = {}
    metrics_dict["BA"] = 0.5*(metrics_cls.true_positive_rate()+
                                             metrics_cls.true_negative_rate())
    metrics_dict["SPD"] = metrics_cls.statistical_parity_difference()
    metrics_dict["DI"] = metrics_cls.disparate_impact()
    metrics_dict["AOD"] = metrics_cls.average_odds_difference()
    metrics_dict["EOD"] = metrics_cls.equal_opportunity_difference()
    metrics_dict["DFBA"] = metrics_cls.differential_fairness_bias_amplification()
    metrics_dict["TI"] = metrics_cls.theil_index()
    
    if ret_eval_dict:
        return metrics_dict, metrics_cls
    else:
        return metrics_cls
Пример #11
0
        "#### Plain model - without debiasing - classification metrics on test set"
    )
    # print("Test set: Classification accuracy = %f" % classified_metric_nodebiasing_test.accuracy())
    # print("Test set: Balanced classification accuracy = %f" % bal_acc_nodebiasing_test)
    # print("Test set: Statistical parity difference = %f" % classified_metric_nodebiasing_test.statistical_parity_difference())
    # print("Test set: Disparate impact = %f" % classified_metric_nodebiasing_test.disparate_impact())
    # print("Test set: Equal opportunity difference = %f" % classified_metric_nodebiasing_test.equal_opportunity_difference())
    # print("Test set: Average odds difference = %f" % classified_metric_nodebiasing_test.average_odds_difference())
    # print("Test set: Theil index = %f" % classified_metric_nodebiasing_test.theil_index())
    # print("Test set: False negative rate difference = %f" % classified_metric_nodebiasing_test.false_negative_rate_difference())

    metrics = {
        "Classification accuracy":
        classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy":
        bal_acc_nodebiasing_test,
        "Statistical parity difference":
        classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact":
        classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference":
        classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference":
        classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index":
        classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference":
        classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
Пример #12
0
def get_fair_metrics(dataset, pred, pred_is_dataset=False):
    """
    Measure fairness metrics.
    
    Parameters: 
    dataset (pandas dataframe): Dataset
    pred (array): Model predictions
    pred_is_dataset, optional (bool): True if prediction is already part of the dataset, column name 'labels'.
    
    Returns:
    fair_metrics: Fairness metrics.
    """
    if pred_is_dataset:
        dataset_pred = pred
    else:
        dataset_pred = dataset.copy()
        dataset_pred.labels = pred

    cols = [
        'statistical_parity_difference', 'equal_opportunity_difference',
        'average_abs_odds_difference', 'disparate_impact', 'theil_index'
    ]
    obj_fairness = [[0, 0, 0, 1, 0]]

    fair_metrics = pd.DataFrame(data=obj_fairness,
                                index=['objective'],
                                columns=cols)

    for attr in dataset_pred.protected_attribute_names:
        idx = dataset_pred.protected_attribute_names.index(attr)
        privileged_groups = [{
            attr:
            dataset_pred.privileged_protected_attributes[idx][0]
        }]
        unprivileged_groups = [{
            attr:
            dataset_pred.unprivileged_protected_attributes[idx][0]
        }]

        classified_metric = ClassificationMetric(
            dataset,
            dataset_pred,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        metric_pred = BinaryLabelDatasetMetric(
            dataset_pred,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        acc = classified_metric.accuracy()

        row = pd.DataFrame([[
            metric_pred.mean_difference(),
            classified_metric.equal_opportunity_difference(),
            classified_metric.average_abs_odds_difference(),
            metric_pred.disparate_impact(),
            classified_metric.theil_index()
        ]],
                           columns=cols,
                           index=[attr])
        fair_metrics = fair_metrics.append(row)

    fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)

    return fair_metrics
Пример #13
0
def fairness_check(s3_url, bucket_name, s3_username, s3_password, training_id):

    cos = boto3.resource("s3",
                         endpoint_url=s3_url,
                         aws_access_key_id=s3_username,
                         aws_secret_access_key=s3_password)

    y_test_out = 'y_test.out'
    p_test_out = 'p_test.out'
    y_pred_out = 'y_pred.out'
    get_s3_item(cos, bucket_name, training_id + '/' + y_test_out, y_test_out)
    get_s3_item(cos, bucket_name, training_id + '/' + p_test_out, p_test_out)
    get_s3_item(cos, bucket_name, training_id + '/' + y_pred_out, y_pred_out)


    """Need to generalize the protected features"""

    unprivileged_groups = [{'race': 4.0}]
    privileged_groups = [{'race': 0.0}]
    favorable_label = 0.0
    unfavorable_label = 1.0

    """Load the necessary labels and protected features for fairness check"""

    y_test = np.loadtxt(y_test_out)
    p_test = np.loadtxt(p_test_out)
    y_pred = np.loadtxt(y_pred_out)

    """Calculate the fairness metrics"""

    original_test_dataset = dataset_wrapper(outcome=y_test, protected=p_test,
                                            unprivileged_groups=unprivileged_groups,
                                            privileged_groups=privileged_groups,
                                            favorable_label=favorable_label,
                                            unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(outcome=y_pred, protected=p_test,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups,
                                                     favorable_label=favorable_label,
                                                     unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(original_test_dataset,
                                                              plain_predictions_test_dataset,
                                                              unprivileged_groups=unprivileged_groups,
                                                              privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5*(TPR+TNR)

    print("#### Plain model - without debiasing - classification metrics on test set")

    metrics = {
        "Classification accuracy": classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy": bal_acc_nodebiasing_test,
        "Statistical parity difference": classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact": classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference": classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference": classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index": classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference": classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
    return metrics
Пример #14
0
def comb_algorithm(l, m, n, dataset_original1, privileged_groups1,
                   unprivileged_groups1, optim_options1):

    dataset_original2 = copy.deepcopy(dataset_original1)
    privileged_groups2 = copy.deepcopy(privileged_groups1)
    unprivileged_groups2 = copy.deepcopy(unprivileged_groups1)
    optim_options2 = copy.deepcopy(optim_options1)

    print(l, m, n)
    dataset_original_train, dataset_original_vt = dataset_original2.split(
        [0.7], shuffle=True)
    dataset_original_valid, dataset_original_test = dataset_original_vt.split(
        [0.5], shuffle=True)
    dataset_original_test.labels = dataset_original_test.labels
    print('=======================')
    #print(dataset_original_test.labels)
    dataset_orig_train = copy.deepcopy(dataset_original_train)
    dataset_orig_valid = copy.deepcopy(dataset_original_valid)
    dataset_orig_test = copy.deepcopy(dataset_original_test)

    if l == 0:
        dataset_transfer_train = copy.deepcopy(dataset_original_train)
        dataset_transfer_valid = copy.deepcopy(dataset_original_valid)
        dataset_transfer_test = copy.deepcopy(dataset_original_test)
        #dataset_transf_train, dataset_transf_valid, dataset_transf_test = dataset_orig_train, dataset_orig_valid, dataset_orig_test
    else:
        pre_used = preAlgorithm[l - 1]
        dataset_transfer_train, dataset_transfer_valid, dataset_transfer_test = Pre(
            pre_used, dataset_orig_train, dataset_orig_valid,
            dataset_orig_test, privileged_groups2, unprivileged_groups2,
            optim_options2)

    dataset_transf_train = copy.deepcopy(dataset_transfer_train)
    dataset_transf_valid = copy.deepcopy(dataset_transfer_valid)
    dataset_transf_test = copy.deepcopy(dataset_transfer_test)
    if m == 0:
        dataset_transfer_valid_pred, dataset_transfer_test_pred = plain_model(
            dataset_transf_train, dataset_transf_valid, dataset_transf_test,
            privileged_groups2, unprivileged_groups2)
    else:
        in_used = inAlgorithm[m - 1]
        if in_used == "adversarial_debiasing":
            dataset_transfer_valid_pred, dataset_transfer_test_pred = adversarial_debiasing(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "art_classifier":
            dataset_transfer_valid_pred, dataset_transfer_test_pred = art_classifier(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "prejudice_remover":
            for key, value in privileged_groups2[0].items():
                sens_attr = key
            dataset_transfer_valid_pred, dataset_transfer_test_pred = prejudice_remover(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2,
                sens_attr)

    dataset_transf_valid_pred = copy.deepcopy(dataset_transfer_valid_pred)
    dataset_transf_test_pred = copy.deepcopy(dataset_transfer_test_pred)
    if n == 0:
        dataset_transf_test_pred_transf = copy.deepcopy(
            dataset_transfer_test_pred)

    else:
        post_used = postAlgorithm[n - 1]
        if post_used == "calibrated_eqodds":
            cpp = CalibratedEqOddsPostprocessing(
                privileged_groups=privileged_groups2,
                unprivileged_groups=unprivileged_groups2,
                cost_constraint=cost_constraint)
            cpp = cpp.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = cpp.predict(
                dataset_transf_test_pred)

        elif post_used == "eqodds":
            EO = EqOddsPostprocessing(unprivileged_groups=unprivileged_groups2,
                                      privileged_groups=privileged_groups2)
            EO = EO.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = EO.predict(
                dataset_transf_test_pred)

        elif post_used == "reject_option":
            #dataset_transf_test_pred_transf = reject_option(dataset_transf_valid, dataset_transf_valid_pred, dataset_transf_test, dataset_transf_test_pred, privileged_groups2, unprivileged_groups2)

            ROC = RejectOptionClassification(
                unprivileged_groups=unprivileged_groups2,
                privileged_groups=privileged_groups2)
            ROC = ROC.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = ROC.predict(
                dataset_transf_test_pred)

    #print('=======================')
    org_labels = dataset_orig_test.labels
    print(dataset_orig_test.labels)
    #print(dataset_transf_test.labels)
    #print('=======================')
    pred_labels = dataset_transf_test_pred.labels
    print(dataset_transf_test_pred.labels)

    true_pred = org_labels == pred_labels
    print("acc after in: ", float(np.sum(true_pred)) / pred_labels.shape[1])
    #print('=======================')
    #print(dataset_transf_test_pred_transf.labels)
    #print(dataset_transf_test_pred_transf.labels.shape)

    metric = ClassificationMetric(dataset_transfer_test,
                                  dataset_transf_test_pred_transf,
                                  unprivileged_groups=unprivileged_groups2,
                                  privileged_groups=privileged_groups2)

    metrics = OrderedDict()
    metrics["Classification accuracy"] = metric.accuracy()
    TPR = metric.true_positive_rate()
    TNR = metric.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)
    metrics["Balanced classification accuracy"] = bal_acc_nodebiasing_test
    metrics[
        "Statistical parity difference"] = metric.statistical_parity_difference(
        )
    metrics["Disparate impact"] = metric.disparate_impact()
    metrics[
        "Equal opportunity difference"] = metric.equal_opportunity_difference(
        )
    metrics["Average odds difference"] = metric.average_odds_difference()
    metrics["Theil index"] = metric.theil_index()
    metrics["United Fairness"] = metric.generalized_entropy_index()

    feature = []
    feature_str = "["
    for m in metrics:
        data = round(metrics[m], 4)
        feature.append(data)
        feature_str = feature_str + str(data) + " "
    feature_str = feature_str + "]"

    return feature, feature_str
Пример #15
0
def fairness_check(label_dir, model_dir):
    """Need to generalize the protected features"""

    # races_to_consider = [0,4]
    unprivileged_groups = [{'race': 4.0}]
    privileged_groups = [{'race': 0.0}]
    favorable_label = 0.0
    unfavorable_label = 1.0

    """Load the necessary labels and protected features for fairness check"""

    # y_train = np.loadtxt(label_dir + '/y_train.out')
    # p_train = np.loadtxt(label_dir + '/p_train.out')
    y_test = np.loadtxt(label_dir + '/y_test.out')
    p_test = np.loadtxt(label_dir + '/p_test.out')
    y_pred = np.loadtxt(label_dir + '/y_pred.out')

    """Calculate the fairness metrics"""

    # original_traning_dataset = dataset_wrapper(outcome=y_train, protected=p_train,
    #                                            unprivileged_groups=unprivileged_groups,
    #                                            privileged_groups=privileged_groups,
    #                                            favorable_label=favorable_label,
    #                                            unfavorable_label=unfavorable_label)
    original_test_dataset = dataset_wrapper(outcome=y_test, protected=p_test,
                                            unprivileged_groups=unprivileged_groups,
                                            privileged_groups=privileged_groups,
                                            favorable_label=favorable_label,
                                            unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(outcome=y_pred, protected=p_test,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups,
                                                     favorable_label=favorable_label,
                                                     unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(original_test_dataset,
                                                              plain_predictions_test_dataset,
                                                              unprivileged_groups=unprivileged_groups,
                                                              privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5*(TPR+TNR)

    print("#### Plain model - without debiasing - classification metrics on test set")
    # print("Test set: Classification accuracy = %f" % classified_metric_nodebiasing_test.accuracy())
    # print("Test set: Balanced classification accuracy = %f" % bal_acc_nodebiasing_test)
    # print("Test set: Statistical parity difference = %f" % classified_metric_nodebiasing_test.statistical_parity_difference())
    # print("Test set: Disparate impact = %f" % classified_metric_nodebiasing_test.disparate_impact())
    # print("Test set: Equal opportunity difference = %f" % classified_metric_nodebiasing_test.equal_opportunity_difference())
    # print("Test set: Average odds difference = %f" % classified_metric_nodebiasing_test.average_odds_difference())
    # print("Test set: Theil index = %f" % classified_metric_nodebiasing_test.theil_index())
    # print("Test set: False negative rate difference = %f" % classified_metric_nodebiasing_test.false_negative_rate_difference())

    metrics = {
        "Classification accuracy": classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy": bal_acc_nodebiasing_test,
        "Statistical parity difference": classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact": classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference": classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference": classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index": classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference": classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    return {"metrics": metrics}
Пример #16
0
            ClassificationMetric(
                            dataset_ground_truth,
                            dataset_classifier,
                            unprivileged_groups=unprivileged_groups,
                            privileged_groups=privileged_groups)

TPR = classificaltion_metric.true_positive_rate()
TNR = classificaltion_metric.true_negative_rate()
bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)

metrics = {
    "classification_accuracy":
    classificaltion_metric.accuracy(),
    "balanced_classification_accuracy":
    bal_acc_nodebiasing_test,
    "statistical_parity_difference":
    classificaltion_metric.statistical_parity_difference(),
    "disparate_impact":
    classificaltion_metric.disparate_impact(),
    "equal_opportunity_difference":
    classificaltion_metric.equal_opportunity_difference(),
    "average_odds_difference":
    classificaltion_metric.average_odds_difference(),
    "theil_index":
    classificaltion_metric.theil_index(),
    "false_negative_rate_difference":
    classificaltion_metric.false_negative_rate_difference()
}

sys.stdout.write(json.dumps(metrics))
Пример #17
0
def fairness_check(object_storage_url,
                   object_storage_username,
                   object_storage_password,
                   data_bucket_name,
                   result_bucket_name,
                   model_id,
                   feature_testset_path='processed_data/X_test.npy',
                   label_testset_path='processed_data/y_test.npy',
                   protected_label_testset_path='processed_data/p_test.npy',
                   model_class_file='model.py',
                   model_class_name='model',
                   favorable_label=0.0,
                   unfavorable_label=1.0,
                   privileged_groups=[{
                       'race': 0.0
                   }],
                   unprivileged_groups=[{
                       'race': 4.0
                   }]):

    url = re.compile(r"https?://")
    cos = Minio(url.sub('', object_storage_url),
                access_key=object_storage_username,
                secret_key=object_storage_password,
                secure=False)  # Local Minio server won't have HTTPS

    dataset_filenamex = "X_test.npy"
    dataset_filenamey = "y_test.npy"
    dataset_filenamep = "p_test.npy"
    weights_filename = "model.pt"
    model_files = model_id + '/_submitted_code/model.zip'

    cos.fget_object(data_bucket_name, feature_testset_path, dataset_filenamex)
    cos.fget_object(data_bucket_name, label_testset_path, dataset_filenamey)
    cos.fget_object(data_bucket_name, protected_label_testset_path,
                    dataset_filenamep)
    cos.fget_object(result_bucket_name, model_id + '/' + weights_filename,
                    weights_filename)
    cos.fget_object(result_bucket_name, model_files, 'model.zip')

    # Load PyTorch model definition from the source code.
    zip_ref = zipfile.ZipFile('model.zip', 'r')
    zip_ref.extractall('model_files')
    zip_ref.close()

    modulename = 'model_files.' + model_class_file.split('.')[0].replace(
        '-', '_')
    '''
    We required users to define where the model class is located or follow
    some naming convention we have provided.
    '''
    model_class = getattr(importlib.import_module(modulename),
                          model_class_name)

    # load & compile model
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model = model_class().to(device)
    model.load_state_dict(torch.load(weights_filename, map_location=device))
    """Load the necessary labels and protected features for fairness check"""

    x_test = np.load(dataset_filenamex)
    y_test = np.load(dataset_filenamey)
    p_test = np.load(dataset_filenamep)

    _, y_pred = evaluate(model, x_test, y_test)
    """Calculate the fairness metrics"""

    original_test_dataset = dataset_wrapper(
        outcome=y_test,
        protected=p_test,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups,
        favorable_label=favorable_label,
        unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(
        outcome=y_pred,
        protected=p_test,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups,
        favorable_label=favorable_label,
        unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(
        original_test_dataset,
        plain_predictions_test_dataset,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)

    print(
        "#### Plain model - without debiasing - classification metrics on test set"
    )

    metrics = {
        "Classification accuracy":
        classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy":
        bal_acc_nodebiasing_test,
        "Statistical parity difference":
        classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact":
        classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference":
        classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference":
        classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index":
        classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference":
        classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
    return metrics
Пример #18
0
def comb_algorithm(l, m, n, dataset_original1, privileged_groups1,
                   unprivileged_groups1, optim_options1):

    dataset_original2 = copy.deepcopy(dataset_original1)
    privileged_groups2 = copy.deepcopy(privileged_groups1)
    unprivileged_groups2 = copy.deepcopy(unprivileged_groups1)
    optim_options2 = copy.deepcopy(optim_options1)

    print(l, m, n)
    dataset_orig_train, dataset_orig_vt = dataset_original2.split([0.7],
                                                                  shuffle=True)
    dataset_orig_valid, dataset_orig_test = dataset_orig_vt.split([0.5],
                                                                  shuffle=True)

    if l == 0:
        dataset_transf_train, dataset_transf_valid, dataset_transf_test = dataset_orig_train, dataset_orig_valid, dataset_orig_test
    else:
        pre_used = preAlgorithm[l - 1]
        dataset_transf_train, dataset_transf_valid, dataset_transf_test = Pre(
            pre_used, dataset_orig_train, dataset_orig_valid,
            dataset_orig_test, privileged_groups2, unprivileged_groups2,
            optim_options2)

    #assert (l,m,n)!=(2,0,0)
    #assert not np.all(dataset_transf_train.labels.flatten()==1.0)

    if m == 0:
        dataset_transf_valid_pred, dataset_transf_test_pred = train(
            dataset_transf_train, dataset_transf_valid, dataset_transf_test,
            privileged_groups2, unprivileged_groups2)
    else:
        in_used = inAlgorithm[m - 1]
        if in_used == "adversarial_debiasing":
            dataset_transf_valid_pred, dataset_transf_test_pred = adversarial_debiasing(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "art_classifier":
            dataset_transf_valid_pred, dataset_transf_test_pred = art_classifier(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "prejudice_remover":
            for key, value in privileged_groups2[0].items():
                sens_attr = key
            dataset_transf_valid_pred, dataset_transf_test_pred = prejudice_remover(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2,
                sens_attr)

    if n == 0:
        dataset_transf_test_pred_transf = dataset_transf_test_pred

    else:
        post_used = postAlgorithm[n - 1]
        if post_used == "calibrated_eqodds":
            cpp = CalibratedEqOddsPostprocessing(
                privileged_groups=privileged_groups2,
                unprivileged_groups=unprivileged_groups2,
                cost_constraint=cost_constraint,
                seed=1)
            cpp = cpp.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = cpp.predict(
                dataset_transf_test_pred)

        elif post_used == "eqodds":
            EO = EqOddsPostprocessing(unprivileged_groups=unprivileged_groups2,
                                      privileged_groups=privileged_groups2,
                                      seed=1)
            EO = EO.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = EO.predict(
                dataset_transf_test_pred)

        elif post_used == "reject_option":
            ROC = RejectOptionClassification(
                unprivileged_groups=unprivileged_groups2,
                privileged_groups=privileged_groups2,
                low_class_thresh=0.01,
                high_class_thresh=0.99,
                num_class_thresh=100,
                num_ROC_margin=50,
                metric_name=allowed_metrics[0],
                metric_ub=metric_ub,
                metric_lb=metric_lb)
            ROC = ROC.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = ROC.predict(
                dataset_transf_test_pred)

    metric = ClassificationMetric(dataset_transf_test,
                                  dataset_transf_test_pred_transf,
                                  unprivileged_groups=unprivileged_groups2,
                                  privileged_groups=privileged_groups2)

    metrics = OrderedDict()
    metrics["Classification accuracy"] = metric.accuracy()
    TPR = metric.true_positive_rate()
    TNR = metric.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)
    metrics["Balanced classification accuracy"] = bal_acc_nodebiasing_test
    metrics[
        "Statistical parity difference"] = metric.statistical_parity_difference(
        )
    metrics["Disparate impact"] = metric.disparate_impact()
    metrics[
        "Equal opportunity difference"] = metric.equal_opportunity_difference(
        )
    metrics["Average odds difference"] = metric.average_odds_difference()
    metrics["Theil index"] = metric.theil_index()
    metrics["United Fairness"] = metric.generalized_entropy_index()
    # print(metrics)

    feature = "["
    for m in metrics:
        feature = feature + " " + str(round(metrics[m], 4))
    feature = feature + "]"

    return feature