コード例 #1
0
def equal_opportunity(random_data, predicted_data, target_variable, protected_variable, unprivileged_input):
    random_data['Pred'] = np.random.binomial(1, .5, 1000)
    dataset = BinaryLabelDataset(df=random_data, label_names=[target_variable], protected_attribute_names=[protected_variable])
    classified_dataset = BinaryLabelDataset(df=predicted_data, label_names=[target_variable], protected_attribute_names=[protected_variable])
    privileged_group = []
    for v in predicted_data[protected_variable].unique()[predicted_data[protected_variable].unique() != unprivileged_input]:
        privileged_group.append({protected_variable: v})
    unprivileged_group = [{protected_variable: unprivileged_input}] #female=0
    metric = ClassificationMetric(dataset, classified_dataset, unprivileged_group, privileged_group)
    print(metric.equal_opportunity_difference())
    if abs(metric.equal_opportunity_difference().round(3)) < 0.2:
        print('The algorithm can be considered to be not biased')
    else:
        print('There is a potential bias')
コード例 #2
0
def get_metric_reports(true_dataset,classfied_dataset,privileged_groups,unprivileged_groups):

	mirror_dataset=classfied_dataset.copy(deepcopy=True)
	mirror_dataset.labels=copy.deepcopy(true_dataset.labels)

	metric=ClassificationMetric(
		dataset=mirror_dataset,
		classified_dataset=classfied_dataset,
		unprivileged_groups=unprivileged_groups,
		privileged_groups=privileged_groups)
	#Measuring unfairness end
	
	report=OrderedDict()
	report['TPR']=metric.true_positive_rate()
	report['TNR']=metric.true_negative_rate()
	report['FPR']=metric.false_positive_rate()
	report['FNR']=metric.false_negative_rate()
	report['Balanced_Acc']=0.5*(report['TPR']+report['TNR'])
	report['Acc']=metric.accuracy()
	report["Statistical parity difference"]=metric.statistical_parity_difference()
	report["Disparate impact"]=metric.disparate_impact()
	report["Equal opportunity difference"]=metric.equal_opportunity_difference()
	report["Average odds difference"]=metric.average_odds_difference()
	report["Theil index"]=metric.theil_index()
	report["United Fairness"]=metric.generalized_entropy_index()

	return report
コード例 #3
0
def compute_metrics(dataset_true,
                    dataset_pred,
                    unprivileged_groups,
                    privileged_groups,
                    disp=True):
    """ Compute the key metrics """
    classified_metric_pred = ClassificationMetric(
        dataset_true,
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    metrics = OrderedDict()
    metrics["Balanced accuracy"] = 0.5 * (
        classified_metric_pred.true_positive_rate() +
        classified_metric_pred.true_negative_rate())
    metrics[
        "Statistical parity difference"] = classified_metric_pred.statistical_parity_difference(
        )
    metrics["Disparate impact"] = classified_metric_pred.disparate_impact()
    metrics[
        "Average odds difference"] = classified_metric_pred.average_odds_difference(
        )
    metrics[
        "Equal opportunity difference"] = classified_metric_pred.equal_opportunity_difference(
        )
    metrics["Theil index"] = classified_metric_pred.theil_index()

    if disp:
        for k in metrics:
            print("%s = %.4f" % (k, metrics[k]))

    return metrics
コード例 #4
0
def get_classifier_metrics(test_list, prediction_list):
    privileged_groups = [{'sex': 1}]
    unprivileged_groups = [{'sex': 0}]
    acc_list = []
    bal_acc_list = []
    avg_odds_list = []
    recall_diff_list = []
    precision_diff_list = []
    for test_, pred_ in zip(test_list, prediction_list):
        model_metric = ClassificationMetric(
            test_,
            pred_,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        acc_list.append(model_metric.accuracy().round(3))
        bal_acc_list.append(((model_metric.true_positive_rate() +
                              model_metric.true_negative_rate()) / 2).round(3))
        avg_odds_list.append(model_metric.average_odds_difference().round(3))
        recall_diff_list.append(
            model_metric.equal_opportunity_difference().round(3))
        precision_diff_list.append(
            (model_metric.precision(privileged=False) -
             model_metric.precision(privileged=True)).round(3))
    return acc_list, bal_acc_list, avg_odds_list, recall_diff_list, precision_diff_list
コード例 #5
0
def show_classifier_metrics(test_list, prediction_list):
    privileged_groups = [{'sex': 1}]
    unprivileged_groups = [{'sex': 0}]

    counter = 1
    for test_, pred_ in zip(test_list, prediction_list):

        display(Markdown("#### Model {}  dataset metrics".format(counter)))

        model_metric = ClassificationMetric(
            test_,
            pred_,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        ex_model_metric = MetricTextExplainer(model_metric)
        print(ex_model_metric.average_odds_difference())

        print(
            'Difference in Recall between Unprivileged and Privileged: {:.3f}'.
            format(model_metric.equal_opportunity_difference()))

        print(
            'Difference in Precision between Unprivileged and Privileged: {:.3f}.'
            .format(
                model_metric.precision(privileged=False) -
                model_metric.precision(privileged=True)))
        counter += 1
コード例 #6
0
def calculate_bias_measures(data_orig_train, data_orig_vt, unprivileged_groups,
                            privileged_groups):
    model = RandomForestClassifier().fit(
        data_orig_train.features,
        data_orig_train.labels.ravel(),
        sample_weight=data_orig_train.instance_weights)
    dataset = data_orig_vt
    dataset_pred = dataset.copy()
    dataset_pred.labels = model.predict(data_orig_vt.features)
    classified_metric_race = ClassificationMetric(
        dataset,
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    metric_pred_race = BinaryLabelDatasetMetric(
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    print("Mean difference {}".format(metric_pred_race.mean_difference()))
    print("Disparate Metric {}".format(metric_pred_race.disparate_impact()))
    print("Equal Opportunity Difference {}".format(
        classified_metric_race.equal_opportunity_difference()))
    print("Average Abs Odds Difference {}".format(
        classified_metric_race.average_abs_odds_difference()))
    print("Theil index {}".format(classified_metric_race.theil_index()))
コード例 #7
0
def equal_ops_values(random_data, predicted_data, target_variable, protected_variable, unprivileged_input):
    random_data['Pred'] = np.random.binomial(1, .5, 1000)
    dataset = BinaryLabelDataset(df=random_data, label_names=[target_variable], protected_attribute_names=[protected_variable])
    classified_dataset = BinaryLabelDataset(df=predicted_data, label_names=[target_variable], protected_attribute_names=[protected_variable])
    privileged_group = []
    for v in predicted_data[protected_variable].unique()[predicted_data[protected_variable].unique() != unprivileged_input]:
        privileged_group.append({protected_variable: v})
    unprivileged_group = [{protected_variable: unprivileged_input}] #female=0
    metric = ClassificationMetric(dataset, classified_dataset, unprivileged_group, privileged_group)
    return abs(metric.equal_opportunity_difference())
コード例 #8
0
def fair_metrics(dataset, pred, pred_is_dataset=False):
    if pred_is_dataset:
        dataset_pred = pred
    else:
        dataset_pred = dataset.copy()
        dataset_pred.labels = pred

    cols = [
        'statistical_parity_difference', 'equal_opportunity_difference',
        'average_abs_odds_difference', 'disparate_impact', 'theil_index'
    ]
    obj_fairness = [[0, 0, 0, 1, 0]]

    fair_metrics = pd.DataFrame(data=obj_fairness,
                                index=['objective'],
                                columns=cols)

    for attr in dataset_pred.protected_attribute_names:
        idx = dataset_pred.protected_attribute_names.index(attr)
        privileged_groups = [{
            attr:
            dataset_pred.privileged_protected_attributes[idx][0]
        }]
        unprivileged_groups = [{
            attr:
            dataset_pred.unprivileged_protected_attributes[idx][0]
        }]

        classified_metric = ClassificationMetric(
            dataset,
            dataset_pred,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        metric_pred = BinaryLabelDatasetMetric(
            dataset_pred,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        acc = classified_metric.accuracy()

        row = pd.DataFrame([[
            metric_pred.mean_difference(),
            classified_metric.equal_opportunity_difference(),
            classified_metric.average_abs_odds_difference(),
            metric_pred.disparate_impact(),
            classified_metric.theil_index()
        ]],
                           columns=cols,
                           index=[attr])
        fair_metrics = fair_metrics.append(row)

    fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)

    return fair_metrics
コード例 #9
0
def fit_classifier(classifier, weights, lambda_values, X_train, y_train, X_test, y_test, test_pred):
    '''
    Function to fit classifiers for range of Lambda values
    
    Args:
        classifier: SVM or Logistic regression
        weights: weights for each sample
        lambda_values: range of lambda values to assess
        X_train: training data
        y_train: training lables
        X_test: test data
        y_test: test labels
        test_pred: prepared format to store predictions

    Returns: 
        accuracy_list: test accuracy for each model
        equal_opp_list: Equal Opportunity difference for each model
        stat_parity_list: Statistical Parity difference for each model
    '''

    accuracy_list = []
    equal_opp_list = []
    stat_parity_list = []

    for l in lambda_values:
        print("-------- \n", 'Lambda: ', "{0:.2f}".format(l))
        if classifier == "Logistic Regression":
            learner = LogisticRegression(solver='liblinear', random_state=1, penalty='l2', C=1/l)  
        else:
            learner = svm.SVC(C=1/l)  
        learner.fit(X_train,y_train, sample_weight=weights)
        test_pred.labels = learner.predict(X_test)
        metric = ClassificationMetric(test, test_pred, unprivileged_groups=unprivileged_groups,
                                        privileged_groups=privileged_groups)
        print("Equal opportunity:", "{0:.3f}".format(metric.equal_opportunity_difference()))
        print("Statistical parity:", "{0:.3f}".format(metric.statistical_parity_difference()))
        print("Accuracy:", "{0:.3f}".format(metric.accuracy()))
        accuracy_list.append(metric.accuracy())
        equal_opp_list.append(metric.equal_opportunity_difference())
        stat_parity_list.append(metric.statistical_parity_difference())

    return accuracy_list, equal_opp_list, stat_parity_list
コード例 #10
0
def test(dataset, model, x_test, thresh_arr, unprivileged_groups,
         privileged_groups):

    bld = BinaryLabelDataset(df=dataset,
                             label_names=['labels'],
                             protected_attribute_names=['age'])

    if np.isin(k, model_AIF):
        y_val_pred_prob = model.predict_proba(bld)
    else:
        y_val_pred_prob, A_val_pred_prob = model.predict_proba(x_test)

    metric_arrs = np.empty([0, 8])
    for thresh in thresh_arr:
        if np.isin(k, model_AIF):
            y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
        else:
            y_val_pred = (y_val_pred_prob.numpy() > thresh).astype(np.float64)

        metric_arrs = np.append(metric_arrs,
                                roc_auc_score(y_test, y_val_pred_prob))

        if np.isin(k, model_AIF):
            metric_arrs = np.append(metric_arrs, 0)
        else:
            metric_arrs = np.append(metric_arrs,
                                    roc_auc_score(A_test, A_val_pred_prob))

        dataset_pred = dataset.copy()
        dataset_pred.labels = y_val_pred
        bld2 = BinaryLabelDataset(df=dataset_pred,
                                  label_names=['labels'],
                                  protected_attribute_names=['age'])

        metric = ClassificationMetric(bld,
                                      bld2,
                                      unprivileged_groups=unprivileged_groups,
                                      privileged_groups=privileged_groups)

        metric_arrs = np.append(
            metric_arrs,
            ((metric.true_positive_rate() + metric.true_negative_rate()) / 2))
        metric_arrs = np.append(metric_arrs, metric.average_odds_difference())
        metric_arrs = np.append(metric_arrs, metric.disparate_impact())
        metric_arrs = np.append(metric_arrs,
                                metric.statistical_parity_difference())
        metric_arrs = np.append(metric_arrs,
                                metric.equal_opportunity_difference())
        metric_arrs = np.append(metric_arrs, metric.theil_index())

    return metric_arrs
コード例 #11
0
        def get_cm_metrics():
            df_pred = X.copy()
            df_pred[df.columns[-1]] = np.expand_dims(ypred_class, axis=1)

            dataset_pred = BinaryLabelDataset(df=df_pred, label_names=[
                'action_taken_name'], protected_attribute_names=['applicant_sex_name_Female'])

            metric_CM = ClassificationMetric(
                dataset, dataset_pred, privileged_groups=privileged_group, unprivileged_groups=unprivileged_group)

            return {
                "Equal Opportunity Difference":   metric_CM.equal_opportunity_difference(),
                'Average Odds Difference': metric_CM.average_odds_difference(),
                "Accuracy Male": metric_CM.accuracy(privileged=True),
                "Accuracy Female":  metric_CM.accuracy(privileged=False)
            }
コード例 #12
0
    def metrics_form(y_val_pred_prob, y_test, A_prob, A_test, bld, dataset):

        metric_arrs = np.empty([0, 8])

        if np.isin(k, model_AIF):
            y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
        else:
            y_val_pred = (y_val_pred_prob > thresh).astype(np.float64)
            A_pred = (A_prob > thresh).astype(np.float64)

        metric_arrs = np.append(metric_arrs,
                                roc_auc_score(y_test, y_val_pred_prob))
        print("y {}".format(roc_auc_score(y_test, y_val_pred_prob)))
        metric_arrs = np.append(metric_arrs,
                                accuracy_score(y_test, y_val_pred))

        if np.isin(k, model_AIF):
            metric_arrs = np.append(metric_arrs, 0)
        else:
            metric_arrs = np.append(metric_arrs, roc_auc_score(A_test, A_prob))
            print("A {}".format(roc_auc_score(A_test, A_prob)))

        dataset_pred = dataset.copy()
        dataset_pred.labels = y_val_pred

        bld2 = BinaryLabelDataset(df=dataset_pred,
                                  label_names=['labels'],
                                  protected_attribute_names=protected)

        metric = ClassificationMetric(bld,
                                      bld2,
                                      unprivileged_groups=unprivileged_groups,
                                      privileged_groups=privileged_groups)

        metric_arrs = np.append(
            metric_arrs,
            ((metric.true_positive_rate() + metric.true_negative_rate()) / 2))
        metric_arrs = np.append(metric_arrs,
                                np.abs(metric.average_odds_difference()))
        metric_arrs = np.append(metric_arrs, metric.disparate_impact())
        metric_arrs = np.append(metric_arrs,
                                np.abs(metric.statistical_parity_difference()))
        metric_arrs = np.append(metric_arrs,
                                np.abs(metric.equal_opportunity_difference()))

        return metric_arrs
コード例 #13
0
def fairness_IBM(y_pred, Ztr, ytr, verbose=0):
    from aif360.datasets import BinaryLabelDataset
    from aif360.metrics import ClassificationMetric

    assert np.array_equal(np.unique(Ztr),
                          np.array([0, 1])), "Z must contain either 0 or 1"
    # if len(ytr.shape) == 1:
    # ytr = np.expand_dims(ytr, -1)

    Ztr = np.squeeze(Ztr)
    if verbose:
        print(ytr.shape)
        print(Ztr.shape)
    unprivileged_groups = [{"zs": [0]}]
    privileged_groups = [{"zs": [1]}]
    metric_arrs = defaultdict(list)
    dict_ = {"y_true": ytr, "zs": Ztr}
    df = pd.DataFrame(dict_)
    dataset = BinaryLabelDataset(df=df,
                                 label_names=["y_true"],
                                 protected_attribute_names=["zs"],
                                 unprivileged_protected_attributes=[[0]],
                                 privileged_protected_attributes=[[1]])

    dataset_pred = dataset.copy()
    dataset_pred.labels = y_pred
    metric = ClassificationMetric(dataset,
                                  dataset_pred,
                                  unprivileged_groups=unprivileged_groups,
                                  privileged_groups=privileged_groups)

    # metric_arrs['bal_acc'].append((metric.true_positive_rate()
    #                              + metric.true_negative_rate()) / 2)
    metric_arrs["EA"].append(
        metric.accuracy(privileged=False) - metric.accuracy(privileged=True))
    # ASSUMING ALL OTHER METRICS RETURN U - P
    metric_arrs['EO'].append(metric.average_odds_difference())
    # The ideal value of this metric is 1.0
    # A value < 1 implies higher benefit for the privileged group
    # and a value >1 implies a higher
    metric_arrs['DI'].append(metric.disparate_impact() - 1)
    metric_arrs['DP'].append(metric.statistical_parity_difference())
    metric_arrs['EQ'].append(metric.equal_opportunity_difference())
    metric_arrs['TH'].append(metric.between_group_theil_index() * 10)
    results = pd.DataFrame(metric_arrs)
    return results
コード例 #14
0
def compute_aif_metrics(dataset_true, dataset_pred, unprivileged_groups, privileged_groups,\
                        ret_eval_dict=True):

    metrics_cls = ClassificationMetric(dataset_true, dataset_pred, 
                                                 unprivileged_groups=unprivileged_groups,
                                                 privileged_groups=privileged_groups)
    metrics_dict = {}
    metrics_dict["BA"] = 0.5*(metrics_cls.true_positive_rate()+
                                             metrics_cls.true_negative_rate())
    metrics_dict["SPD"] = metrics_cls.statistical_parity_difference()
    metrics_dict["DI"] = metrics_cls.disparate_impact()
    metrics_dict["AOD"] = metrics_cls.average_odds_difference()
    metrics_dict["EOD"] = metrics_cls.equal_opportunity_difference()
    metrics_dict["DFBA"] = metrics_cls.differential_fairness_bias_amplification()
    metrics_dict["TI"] = metrics_cls.theil_index()
    
    if ret_eval_dict:
        return metrics_dict, metrics_cls
    else:
        return metrics_cls
コード例 #15
0
def fairness_check(s3_url, bucket_name, s3_username, s3_password, training_id):

    cos = boto3.resource("s3",
                         endpoint_url=s3_url,
                         aws_access_key_id=s3_username,
                         aws_secret_access_key=s3_password)

    y_test_out = 'y_test.out'
    p_test_out = 'p_test.out'
    y_pred_out = 'y_pred.out'
    get_s3_item(cos, bucket_name, training_id + '/' + y_test_out, y_test_out)
    get_s3_item(cos, bucket_name, training_id + '/' + p_test_out, p_test_out)
    get_s3_item(cos, bucket_name, training_id + '/' + y_pred_out, y_pred_out)


    """Need to generalize the protected features"""

    unprivileged_groups = [{'race': 4.0}]
    privileged_groups = [{'race': 0.0}]
    favorable_label = 0.0
    unfavorable_label = 1.0

    """Load the necessary labels and protected features for fairness check"""

    y_test = np.loadtxt(y_test_out)
    p_test = np.loadtxt(p_test_out)
    y_pred = np.loadtxt(y_pred_out)

    """Calculate the fairness metrics"""

    original_test_dataset = dataset_wrapper(outcome=y_test, protected=p_test,
                                            unprivileged_groups=unprivileged_groups,
                                            privileged_groups=privileged_groups,
                                            favorable_label=favorable_label,
                                            unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(outcome=y_pred, protected=p_test,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups,
                                                     favorable_label=favorable_label,
                                                     unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(original_test_dataset,
                                                              plain_predictions_test_dataset,
                                                              unprivileged_groups=unprivileged_groups,
                                                              privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5*(TPR+TNR)

    print("#### Plain model - without debiasing - classification metrics on test set")

    metrics = {
        "Classification accuracy": classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy": bal_acc_nodebiasing_test,
        "Statistical parity difference": classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact": classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference": classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference": classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index": classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference": classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
    return metrics
コード例 #16
0
def k_fold_statistics(k_folds, classifier, lambda_values, dataset, unprivileged_groups, privileged_groups):
    '''
    Function to fit classifier to k number of random train/test splits
    
    Args:
        k_folds: number of folds of statistics
        classifier: SVM or Logistic regression
        weights: weights for each sample
        lambda_value: selected level of regularisation
        dataset: dataset to be used

    Returns: 
        accuracy_list: test accuracy for each model
        equal_opp_list: Equal Opportunity difference for each model
        stat_parity_list: Statistical Parity difference for each model
    '''

    accuracy_list = []
    equal_opp_list = []
    stat_parity_list = []

    for k in range(k_folds):
        train, test = dataset_orig.split([0.8], shuffle=True)
        train, validation = train.split([0.8], shuffle=True)
        scale_orig = StandardScaler()
        X_train = scale_orig.fit_transform(train.features)
        y_train = train.labels.ravel()
        X_test = scale_orig.transform(test.features)
        y_test = validation.labels.ravel()
        X_valid = scale_orig.transform(validation.features)
        y_valid = test.labels.ravel()        
        test_pred = test.copy() 
        valid_pred = validation.copy()

        RW = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
        
        best_mean_statistic = 0
        
        # fit all candidate models
        for lambda_value in lambda_values:
            train = RW.fit_transform(train)
            if classifier == "Logistic Regression":
                learner = LogisticRegression(solver='liblinear', random_state=1, penalty='l2', C=1/lambda_value)  
            else:
                learner = svm.SVC(C=1/lambda_value)  
            learner.fit(X_train,y_train, sample_weight=train.instance_weights)
            valid_pred.labels = learner.predict(X_valid)
            metric = ClassificationMetric(validation, valid_pred, unprivileged_groups=unprivileged_groups,
                                        privileged_groups=privileged_groups)
            mean_statistic = (1-abs(metric.equal_opportunity_difference())+metric.accuracy())/2
            if mean_statistic > best_mean_statistic:
                best_learner = learner

        test_pred.labels = best_learner.predict(X_test)
        metric = ClassificationMetric(test, test_pred, unprivileged_groups=unprivileged_groups,
                                        privileged_groups=privileged_groups)
        print("----------------")
        print("Split {}/{}".format(k, k_folds))
        print("Equal opportunity:", "{0:.3f}".format(metric.equal_opportunity_difference()))
        print("Statistical parity:", "{0:.3f}".format(metric.statistical_parity_difference()))
        print("Accuracy:", "{0:.3f}".format(metric.accuracy()))
        accuracy_list.append(metric.accuracy())
        equal_opp_list.append(metric.equal_opportunity_difference())
        stat_parity_list.append(metric.statistical_parity_difference())

    accuracy_list = np.array(accuracy_list)
    equal_opp_list = np.array(equal_opp_list)
    stat_parity_list = np.array(stat_parity_list)
    print('The mean statistics for {} folds is:'.format(k_folds))
    print("Mean Accuracy: {0:.3f},".format(np.mean(accuracy_list)), "Std: {0:.3f}".format(np.std(accuracy_list)))
    print("Mean Equal Opportunity: {0:.3f},".format(np.mean(equal_opp_list)), "Std: {0:.3f}".format( np.std(equal_opp_list))) 
    print("Mean Statistical Parity: {0:.3f},".format(np.mean(stat_parity_list)), "Std: {0:.3f}".format(np.std(stat_parity_list)))
    
    return accuracy_list, equal_opp_list, stat_parity_list
コード例 #17
0
def fairness_check(label_dir, model_dir):
    """Need to generalize the protected features"""

    # races_to_consider = [0,4]
    unprivileged_groups = [{'race': 4.0}]
    privileged_groups = [{'race': 0.0}]
    favorable_label = 0.0
    unfavorable_label = 1.0

    """Load the necessary labels and protected features for fairness check"""

    # y_train = np.loadtxt(label_dir + '/y_train.out')
    # p_train = np.loadtxt(label_dir + '/p_train.out')
    y_test = np.loadtxt(label_dir + '/y_test.out')
    p_test = np.loadtxt(label_dir + '/p_test.out')
    y_pred = np.loadtxt(label_dir + '/y_pred.out')

    """Calculate the fairness metrics"""

    # original_traning_dataset = dataset_wrapper(outcome=y_train, protected=p_train,
    #                                            unprivileged_groups=unprivileged_groups,
    #                                            privileged_groups=privileged_groups,
    #                                            favorable_label=favorable_label,
    #                                            unfavorable_label=unfavorable_label)
    original_test_dataset = dataset_wrapper(outcome=y_test, protected=p_test,
                                            unprivileged_groups=unprivileged_groups,
                                            privileged_groups=privileged_groups,
                                            favorable_label=favorable_label,
                                            unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(outcome=y_pred, protected=p_test,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups,
                                                     favorable_label=favorable_label,
                                                     unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(original_test_dataset,
                                                              plain_predictions_test_dataset,
                                                              unprivileged_groups=unprivileged_groups,
                                                              privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5*(TPR+TNR)

    print("#### Plain model - without debiasing - classification metrics on test set")
    # print("Test set: Classification accuracy = %f" % classified_metric_nodebiasing_test.accuracy())
    # print("Test set: Balanced classification accuracy = %f" % bal_acc_nodebiasing_test)
    # print("Test set: Statistical parity difference = %f" % classified_metric_nodebiasing_test.statistical_parity_difference())
    # print("Test set: Disparate impact = %f" % classified_metric_nodebiasing_test.disparate_impact())
    # print("Test set: Equal opportunity difference = %f" % classified_metric_nodebiasing_test.equal_opportunity_difference())
    # print("Test set: Average odds difference = %f" % classified_metric_nodebiasing_test.average_odds_difference())
    # print("Test set: Theil index = %f" % classified_metric_nodebiasing_test.theil_index())
    # print("Test set: False negative rate difference = %f" % classified_metric_nodebiasing_test.false_negative_rate_difference())

    metrics = {
        "Classification accuracy": classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy": bal_acc_nodebiasing_test,
        "Statistical parity difference": classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact": classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference": classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference": classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index": classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference": classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    return {"metrics": metrics}
コード例 #18
0
class FairnessBoundsWarning:
    """Raise warnings if classifier misses specified fairness bounds.

    Bounds are checked using AIF360s classification metric if the specified
    bound is not None.
    """

    DISPARATE_IMPACT_RATIO_BOUND = 0.8
    FPR_RATIO_BOUND = 0.8
    FNR_RATIO_BOUND = 0.8
    ERROR_RATIO_BOUND = 0.8

    EO_DIFFERENCE_BOUND = 0.1

    FPR_DIFFERENCE_BOUND = None
    FNR_DIFFERENCE_BOUND = None
    ERROR_DIFFERENCE_BOUND = None

    def __init__(
        self,
        raw_dataset: BinaryLabelDataset,
        predicted_dataset: BinaryLabelDataset,
        privileged_groups=None,
        unprivileged_groups=None,
    ):
        """
        Args:
            raw_dataset (BinaryLabelDataset): Dataset with ground-truth labels.
            predicted_dataset (BinaryLabelDataset): Dataset after predictions.
            privileged_groups (list(dict)): Privileged groups. Format is a list
                of `dicts` where the keys are `protected_attribute_names` and
                the values are values in `protected_attributes`. Each `dict`
                element describes a single group.
            unprivileged_groups (list(dict)): Unprivileged groups. Same format
                as privileged_groups.
        """
        self._raw_dataset = raw_dataset
        self._predicted_dataset = predicted_dataset

        if privileged_groups is None:
            privileged_groups = [
                dict(
                    zip(
                        predicted_dataset.protected_attribute_names,
                        predicted_dataset.privileged_protected_attributes,
                    )
                )
            ]

        if unprivileged_groups is None:
            unprivileged_groups = [
                dict(
                    zip(
                        predicted_dataset.protected_attribute_names,
                        predicted_dataset.unprivileged_protected_attributes,
                    )
                )
            ]

        self._classification_metric = ClassificationMetric(
            raw_dataset,
            predicted_dataset,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups,
        )

    def check_bounds(self):
        """Run methods checking each bound."""
        self._check_disparate_impact()
        self._check_fpr_bound()
        self._check_fnr_bound()
        self._check_all_errors_bound()
        self._check_eo_bound()

    @staticmethod
    def _warn_bound(metric_name, computed_ratio, tolerated_ratio):
        """Raise warning with default message."""
        warning_msg = (
            "Classifier has "
            + metric_name
            + " of : "
            + str(computed_ratio)
            + " above threshold of "
            + str(tolerated_ratio)
        )

        warnings.warn(warning_msg)

    @staticmethod
    def _maybe_scale(num):
        """Return inverse of num if num is larger than one."""
        return num if num < 1 else 1 / num

    def _check_disparate_impact(self):
        """Raise warning if disparate impact bound is breached."""
        if self.DISPARATE_IMPACT_RATIO_BOUND is not None:
            dsp_im = self._maybe_scale(
                self._classification_metric.disparate_impact()
            )

            if dsp_im > self.DISPARATE_IMPACT_RATIO_BOUND:
                self._warn_bound(
                    "disparate impact",
                    dsp_im,
                    self.DISPARATE_IMPACT_RATIO_BOUND,
                )

    def _check_fpr_bound(self):
        """Raise warning if false positive bound is breached."""
        if self.FPR_RATIO_BOUND is not None:
            fprr = self._maybe_scale(
                self._classification_metric.false_positive_rate_ratio()
            )

            if fprr > self.FPR_RATIO_BOUND:
                self._warn_bound(
                    "false positive ratio", fprr, self.FPR_RATIO_BOUND
                )

        if self.FPR_DIFFERENCE_BOUND is not None:
            fprd = self._classification_metric.false_positive_rate_difference()

            if fprd > self.FPR_DIFFERENCE_BOUND:
                self._warn_bound(
                    "false positive rate difference",
                    fprd,
                    self.FPR_DIFFERENCE_BOUND,
                )

    def _check_fnr_bound(self):
        """Raise warning if false negative bound is breached."""
        if self.FNR_RATIO_BOUND is not None:
            fnrr = self._maybe_scale(
                self._classification_metric.false_negative_rate_ratio()
            )

            if fnrr > self.FNR_RATIO_BOUND:
                self._warn_bound(
                    "false negative ratio", fnrr, self.FNR_RATIO_BOUND
                )

        if self.FNR_DIFFERENCE_BOUND is not None:
            fnrd = self._classification_metric.false_positive_rate_difference()
            if fnrd > self.FNR_DIFFERENCE_BOUND:
                self._warn_bound(
                    "false negative rate difference",
                    fnrd,
                    self.FNR_DIFFERENCE_BOUND,
                )

    def _check_all_errors_bound(self):
        """Raise warning if overall error bound is breached."""
        if self.ERROR_RATIO_BOUND is not None:
            err = self._maybe_scale(
                self._classification_metric.error_rate_ratio()
            )

            if err > self.ERROR_RATIO_BOUND:
                self._warn_bound("error ratio", err, self.ERROR_RATIO_BOUND)

        if self.ERROR_DIFFERENCE_BOUND is not None:
            errd = self._classification_metric.error_rate_difference()

            if errd > self.ERROR_DIFFERENCE_BOUND:
                self._warn_bound(
                    "error rate difference", errd, self.ERROR_DIFFERENCE_BOUND
                )

    def _check_eo_bound(self):
        """Raise warning if equalized odds difference is breached."""
        if self.EO_DIFFERENCE_BOUND is not None:
            eo = self._classification_metric.equal_opportunity_difference()

            if eo > self.EO_DIFFERENCE_BOUND:
                self._warn_bound(
                    "true positive rate", eo, self.EO_DIFFERENCE_BOUND
                )
コード例 #19
0
    else:
        priv.append([])

stdDs = StandardDataset(validation_comp, 'is_violent_recid', [0], prot, priv)
stdPred = StandardDataset(validation_pred, 'is_violent_recid', [0], prot, priv)
bi_met = BinaryLabelDatasetMetric(stdDs,
                                  privileged_groups=[priv_dict],
                                  unprivileged_groups=[unpriv_dict])
class_met = ClassificationMetric(stdDs,
                                 stdPred,
                                 unprivileged_groups=[unpriv_dict],
                                 privileged_groups=[priv_dict])

disparate_impact = bi_met.disparate_impact()
#error_rate_ratio = class_met.error_rate_ratio()
eq_diff = class_met.equal_opportunity_difference()

#Create 2 Bar Graphs
x = [1]
di_y = [disparate_impact]
er_y = [error_rate_ratio]
plt.ylim(bottom=0, top=2)
plt.xlim(left=0, right=2)

ax = plt.gca()
ax.axes.xaxis.set_visible(False)

plt.bar(x, di_y, width=0.6)
plt.axhline(y=1.25, xmin=0, xmax=2, linestyle='--', color='black')
plt.axhline(y=1, xmin=0, xmax=2, linestyle='--', color='green')
plt.axhline(y=0.75, xmin=0, xmax=2, linestyle='--', color='black')
コード例 #20
0
        "#### Plain model - without debiasing - classification metrics on test set"
    )
    # print("Test set: Classification accuracy = %f" % classified_metric_nodebiasing_test.accuracy())
    # print("Test set: Balanced classification accuracy = %f" % bal_acc_nodebiasing_test)
    # print("Test set: Statistical parity difference = %f" % classified_metric_nodebiasing_test.statistical_parity_difference())
    # print("Test set: Disparate impact = %f" % classified_metric_nodebiasing_test.disparate_impact())
    # print("Test set: Equal opportunity difference = %f" % classified_metric_nodebiasing_test.equal_opportunity_difference())
    # print("Test set: Average odds difference = %f" % classified_metric_nodebiasing_test.average_odds_difference())
    # print("Test set: Theil index = %f" % classified_metric_nodebiasing_test.theil_index())
    # print("Test set: False negative rate difference = %f" % classified_metric_nodebiasing_test.false_negative_rate_difference())

    metrics = {
        "Classification accuracy":
        classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy":
        bal_acc_nodebiasing_test,
        "Statistical parity difference":
        classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact":
        classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference":
        classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference":
        classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index":
        classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference":
        classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
コード例 #21
0
def compute_metrics(model, X_test, y_test, X_train, y_train, dataset_test,
                    dataset_name, model_name, unprivileged_groups,
                    privileged_groups, position):
    """
    Calculate and return: model accuracy and fairness metrics
    
    Parameters
    ----------
    model: scikit-learn classifier    
    X_test: numpy 2d array
    y_test: numpy 1d array
    X_train: numpy 2d array
    y_train: numpy 1d array
    dataset_test: aif360.datasets.BinaryLabelDataset
    dataset_name: string
        Dataset name used in the analysis
    model_name: string
    unprivileged_groups: list<dict>
        Dictionary where the key is the name of the sensitive column in the 
        dataset, and the value is the value of the unprivileged group in the
        dataset
    privileged_groups: list<dict>
        Dictionary where the key is the name of the sensitive column in the 
        dataset, and the value is the value of the privileged group in the
        dataset
    position: int
        Column position of the sensitive group in the dataset 
    """

    y_pred_test = model.predict(X_test)
    acc_test = accuracy_score(y_true=y_test, y_pred=y_pred_test)
    print("Test accuracy: ", acc_test)
    y_pred_train = model.predict(X_train)
    acc_train = accuracy_score(y_true=y_train, y_pred=y_pred_train)
    print("Train accuracy: ", acc_train)

    dataset_pred = dataset_test.copy()
    dataset_pred.labels = y_pred_test

    bin_metric = BinaryLabelDatasetMetric(
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    disparate_impact_bin = bin_metric.disparate_impact()
    print('Disparate impact: ', disparate_impact_bin)
    mean_difference = bin_metric.mean_difference()
    print('Mean difference: ', mean_difference)

    classif_metric = ClassificationMetric(
        dataset_test,
        dataset_pred,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    classif_disparete_impact = classif_metric.disparate_impact()
    avg_odds = classif_metric.average_odds_difference()
    print('Average odds difference:', avg_odds)
    equal_opport = classif_metric.equal_opportunity_difference()
    print('Equality of opportunity:', equal_opport)
    false_discovery_rate = classif_metric.false_discovery_rate_difference()
    print('False discovery rate difference:', false_discovery_rate)
    entropy_index = classif_metric.generalized_entropy_index()
    print('Generalized entropy index:', entropy_index)

    cons_comp = consitency_mod(bin_metric, position, n_neighbors=5)
    print('Consistency: ', cons_comp)

    result = (dataset_name, model_name, acc_test, disparate_impact_bin,
              mean_difference, classif_disparete_impact, avg_odds,
              equal_opport, false_discovery_rate, entropy_index, cons_comp)

    return result
コード例 #22
0
def comb_algorithm(l, m, n, dataset_original1, privileged_groups1,
                   unprivileged_groups1, optim_options1):

    dataset_original2 = copy.deepcopy(dataset_original1)
    privileged_groups2 = copy.deepcopy(privileged_groups1)
    unprivileged_groups2 = copy.deepcopy(unprivileged_groups1)
    optim_options2 = copy.deepcopy(optim_options1)

    print(l, m, n)
    dataset_orig_train, dataset_orig_vt = dataset_original2.split([0.7],
                                                                  shuffle=True)
    dataset_orig_valid, dataset_orig_test = dataset_orig_vt.split([0.5],
                                                                  shuffle=True)

    if l == 0:
        dataset_transf_train, dataset_transf_valid, dataset_transf_test = dataset_orig_train, dataset_orig_valid, dataset_orig_test
    else:
        pre_used = preAlgorithm[l - 1]
        dataset_transf_train, dataset_transf_valid, dataset_transf_test = Pre(
            pre_used, dataset_orig_train, dataset_orig_valid,
            dataset_orig_test, privileged_groups2, unprivileged_groups2,
            optim_options2)

    #assert (l,m,n)!=(2,0,0)
    #assert not np.all(dataset_transf_train.labels.flatten()==1.0)

    if m == 0:
        dataset_transf_valid_pred, dataset_transf_test_pred = train(
            dataset_transf_train, dataset_transf_valid, dataset_transf_test,
            privileged_groups2, unprivileged_groups2)
    else:
        in_used = inAlgorithm[m - 1]
        if in_used == "adversarial_debiasing":
            dataset_transf_valid_pred, dataset_transf_test_pred = adversarial_debiasing(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "art_classifier":
            dataset_transf_valid_pred, dataset_transf_test_pred = art_classifier(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "prejudice_remover":
            for key, value in privileged_groups2[0].items():
                sens_attr = key
            dataset_transf_valid_pred, dataset_transf_test_pred = prejudice_remover(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2,
                sens_attr)

    if n == 0:
        dataset_transf_test_pred_transf = dataset_transf_test_pred

    else:
        post_used = postAlgorithm[n - 1]
        if post_used == "calibrated_eqodds":
            cpp = CalibratedEqOddsPostprocessing(
                privileged_groups=privileged_groups2,
                unprivileged_groups=unprivileged_groups2,
                cost_constraint=cost_constraint,
                seed=1)
            cpp = cpp.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = cpp.predict(
                dataset_transf_test_pred)

        elif post_used == "eqodds":
            EO = EqOddsPostprocessing(unprivileged_groups=unprivileged_groups2,
                                      privileged_groups=privileged_groups2,
                                      seed=1)
            EO = EO.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = EO.predict(
                dataset_transf_test_pred)

        elif post_used == "reject_option":
            ROC = RejectOptionClassification(
                unprivileged_groups=unprivileged_groups2,
                privileged_groups=privileged_groups2,
                low_class_thresh=0.01,
                high_class_thresh=0.99,
                num_class_thresh=100,
                num_ROC_margin=50,
                metric_name=allowed_metrics[0],
                metric_ub=metric_ub,
                metric_lb=metric_lb)
            ROC = ROC.fit(dataset_transf_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = ROC.predict(
                dataset_transf_test_pred)

    metric = ClassificationMetric(dataset_transf_test,
                                  dataset_transf_test_pred_transf,
                                  unprivileged_groups=unprivileged_groups2,
                                  privileged_groups=privileged_groups2)

    metrics = OrderedDict()
    metrics["Classification accuracy"] = metric.accuracy()
    TPR = metric.true_positive_rate()
    TNR = metric.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)
    metrics["Balanced classification accuracy"] = bal_acc_nodebiasing_test
    metrics[
        "Statistical parity difference"] = metric.statistical_parity_difference(
        )
    metrics["Disparate impact"] = metric.disparate_impact()
    metrics[
        "Equal opportunity difference"] = metric.equal_opportunity_difference(
        )
    metrics["Average odds difference"] = metric.average_odds_difference()
    metrics["Theil index"] = metric.theil_index()
    metrics["United Fairness"] = metric.generalized_entropy_index()
    # print(metrics)

    feature = "["
    for m in metrics:
        feature = feature + " " + str(round(metrics[m], 4))
    feature = feature + "]"

    return feature
コード例 #23
0
def compute_metrics(model, X_test, y_test, X_train, y_train, dataset_test, 
                    unprivileged_groups, privileged_groups, protect_attribute, 
                    print_result):
    """
    Calculate and return: model accuracy and fairness metrics
    
    Parameters
    ----------
    model: scikit-learn classifier    
    X_test: numpy 2d array
    y_test: numpy 1d array
    X_train: numpy 2d array
    y_train: numpy 1d array
    dataset_test: aif360.datasets.BinaryLabelDataset
    unprivileged_groups: list<dict>
        Dictionary where the key is the name of the sensitive column in the 
        dataset, and the value is the value of the unprivileged group in the
        dataset
    privileged_groups: list<dict>
        Dictionary where the key is the name of the sensitive column in the 
        dataset, and the value is the value of the privileged group in the
        dataset
    protect_attribute
    print_result
    """
    result = {}
    
    y_pred_test = model.predict(X_test)
    result['acc_test'] = accuracy_score(y_true=y_test, y_pred=y_pred_test)
    y_pred_train = model.predict(X_train)
    result['acc_train'] = accuracy_score(y_true=y_train, y_pred=y_pred_train)
    
    dataset_pred = dataset_test.copy()
    dataset_pred.labels = y_pred_test

    bin_metric = BinaryLabelDatasetMetric(dataset_pred, 
                                          unprivileged_groups=unprivileged_groups,
                                          privileged_groups=privileged_groups)
    result['disp_impact'] = bin_metric.disparate_impact()
    result['stat_parity'] = bin_metric.mean_difference()

    classif_metric = ClassificationMetric(dataset_test, dataset_pred, 
                                          unprivileged_groups=unprivileged_groups,
                                          privileged_groups=privileged_groups)
    result['avg_odds'] = classif_metric.average_odds_difference()
    result['equal_opport'] = classif_metric.equal_opportunity_difference()
    result['false_discovery_rate'] = classif_metric.false_discovery_rate_difference()
    result['entropy_index'] = classif_metric.generalized_entropy_index()
    result['acc_test_clf'] = classif_metric.accuracy(privileged=None)
    result['acc_test_priv'] = classif_metric.accuracy(privileged=True)
    result['acc_test_unpriv'] = classif_metric.accuracy(privileged=False)
    
    result['consistency'] = consitency(X_test, y_pred_test, protect_attribute, n_neighbors=5)
    result['counterfactual'] = counterfactual(X_test, model, protect_attribute)
    
    if print_result:
        print("Train accuracy: ", result['acc_train'])
        print("Test accuracy: ", result['acc_test'])
        print("Test accuracy clf: ", result['acc_test_clf'])
        print("Test accuracy priv.: ", result['acc_test_priv'])
        print("Test accuracy unpriv.: ", result['acc_test_unpriv'])
        print('Disparate impact: ', result['disp_impact'])
        print('Mean difference: ', result['stat_parity'])
        print('Average odds difference:', result['avg_odds'])
        print('Equality of opportunity:', result['equal_opport'])
        print('False discovery rate difference:', result['false_discovery_rate'])
        print('Generalized entropy index:', result['entropy_index'])
        print('Consistency: ', result['consistency'])
        print('Counterfactual fairness: ', result['counterfactual'])

    return result
コード例 #24
0
ファイル: fairness.py プロジェクト: Jdorri/ML-coursework
for c in C:
    predictions, _ = train_and_predict(X_train, y_train, X_test, c, norm_type)

    ds_te_pred = ds_te.copy()
    ds_te_pred.labels = predictions

    metric_te = ClassificationMetric(ds_te,
                                     ds_te_pred,
                                     unprivileged_groups=unpriv,
                                     privileged_groups=priv)

    BACC = 0.5*(metric_te.true_positive_rate()\
        +metric_te.true_negative_rate())
    metric_1 = metric_te.statistical_parity_difference()
    metric_2 = metric_te.average_odds_difference()
    metric_3 = metric_te.equal_opportunity_difference()

    accuracy.append(BACC)
    mean_diff.append(metric_1)
    average_odds_diff.append(metric_2)
    equal_opp_diff.append(metric_3)

# save plots
plot_results(C, norm_type, accuracy, mean_diff, average_odds_diff, \
    equal_opp_diff, name+'_all_metrics_'+norm_type)


def results_table(C, accuracy, mean_diff, avg_odds_diff, equal_opp_diff):
    results = pd.DataFrame()
    results['c'] = C
    results['bACC'] = accuracy
コード例 #25
0
            ClassificationMetric(
                            dataset_ground_truth,
                            dataset_classifier,
                            unprivileged_groups=unprivileged_groups,
                            privileged_groups=privileged_groups)

TPR = classificaltion_metric.true_positive_rate()
TNR = classificaltion_metric.true_negative_rate()
bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)

metrics = {
    "classification_accuracy":
    classificaltion_metric.accuracy(),
    "balanced_classification_accuracy":
    bal_acc_nodebiasing_test,
    "statistical_parity_difference":
    classificaltion_metric.statistical_parity_difference(),
    "disparate_impact":
    classificaltion_metric.disparate_impact(),
    "equal_opportunity_difference":
    classificaltion_metric.equal_opportunity_difference(),
    "average_odds_difference":
    classificaltion_metric.average_odds_difference(),
    "theil_index":
    classificaltion_metric.theil_index(),
    "false_negative_rate_difference":
    classificaltion_metric.false_negative_rate_difference()
}

sys.stdout.write(json.dumps(metrics))
コード例 #26
0
def comb_algorithm(l, m, n, dataset_original1, privileged_groups1,
                   unprivileged_groups1, optim_options1):

    dataset_original2 = copy.deepcopy(dataset_original1)
    privileged_groups2 = copy.deepcopy(privileged_groups1)
    unprivileged_groups2 = copy.deepcopy(unprivileged_groups1)
    optim_options2 = copy.deepcopy(optim_options1)

    print(l, m, n)
    dataset_original_train, dataset_original_vt = dataset_original2.split(
        [0.7], shuffle=True)
    dataset_original_valid, dataset_original_test = dataset_original_vt.split(
        [0.5], shuffle=True)
    dataset_original_test.labels = dataset_original_test.labels
    print('=======================')
    #print(dataset_original_test.labels)
    dataset_orig_train = copy.deepcopy(dataset_original_train)
    dataset_orig_valid = copy.deepcopy(dataset_original_valid)
    dataset_orig_test = copy.deepcopy(dataset_original_test)

    if l == 0:
        dataset_transfer_train = copy.deepcopy(dataset_original_train)
        dataset_transfer_valid = copy.deepcopy(dataset_original_valid)
        dataset_transfer_test = copy.deepcopy(dataset_original_test)
        #dataset_transf_train, dataset_transf_valid, dataset_transf_test = dataset_orig_train, dataset_orig_valid, dataset_orig_test
    else:
        pre_used = preAlgorithm[l - 1]
        dataset_transfer_train, dataset_transfer_valid, dataset_transfer_test = Pre(
            pre_used, dataset_orig_train, dataset_orig_valid,
            dataset_orig_test, privileged_groups2, unprivileged_groups2,
            optim_options2)

    dataset_transf_train = copy.deepcopy(dataset_transfer_train)
    dataset_transf_valid = copy.deepcopy(dataset_transfer_valid)
    dataset_transf_test = copy.deepcopy(dataset_transfer_test)
    if m == 0:
        dataset_transfer_valid_pred, dataset_transfer_test_pred = plain_model(
            dataset_transf_train, dataset_transf_valid, dataset_transf_test,
            privileged_groups2, unprivileged_groups2)
    else:
        in_used = inAlgorithm[m - 1]
        if in_used == "adversarial_debiasing":
            dataset_transfer_valid_pred, dataset_transfer_test_pred = adversarial_debiasing(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "art_classifier":
            dataset_transfer_valid_pred, dataset_transfer_test_pred = art_classifier(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2)
        elif in_used == "prejudice_remover":
            for key, value in privileged_groups2[0].items():
                sens_attr = key
            dataset_transfer_valid_pred, dataset_transfer_test_pred = prejudice_remover(
                dataset_transf_train, dataset_transf_valid,
                dataset_transf_test, privileged_groups2, unprivileged_groups2,
                sens_attr)

    dataset_transf_valid_pred = copy.deepcopy(dataset_transfer_valid_pred)
    dataset_transf_test_pred = copy.deepcopy(dataset_transfer_test_pred)
    if n == 0:
        dataset_transf_test_pred_transf = copy.deepcopy(
            dataset_transfer_test_pred)

    else:
        post_used = postAlgorithm[n - 1]
        if post_used == "calibrated_eqodds":
            cpp = CalibratedEqOddsPostprocessing(
                privileged_groups=privileged_groups2,
                unprivileged_groups=unprivileged_groups2,
                cost_constraint=cost_constraint)
            cpp = cpp.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = cpp.predict(
                dataset_transf_test_pred)

        elif post_used == "eqodds":
            EO = EqOddsPostprocessing(unprivileged_groups=unprivileged_groups2,
                                      privileged_groups=privileged_groups2)
            EO = EO.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = EO.predict(
                dataset_transf_test_pred)

        elif post_used == "reject_option":
            #dataset_transf_test_pred_transf = reject_option(dataset_transf_valid, dataset_transf_valid_pred, dataset_transf_test, dataset_transf_test_pred, privileged_groups2, unprivileged_groups2)

            ROC = RejectOptionClassification(
                unprivileged_groups=unprivileged_groups2,
                privileged_groups=privileged_groups2)
            ROC = ROC.fit(dataset_transfer_valid, dataset_transf_valid_pred)
            dataset_transf_test_pred_transf = ROC.predict(
                dataset_transf_test_pred)

    #print('=======================')
    org_labels = dataset_orig_test.labels
    print(dataset_orig_test.labels)
    #print(dataset_transf_test.labels)
    #print('=======================')
    pred_labels = dataset_transf_test_pred.labels
    print(dataset_transf_test_pred.labels)

    true_pred = org_labels == pred_labels
    print("acc after in: ", float(np.sum(true_pred)) / pred_labels.shape[1])
    #print('=======================')
    #print(dataset_transf_test_pred_transf.labels)
    #print(dataset_transf_test_pred_transf.labels.shape)

    metric = ClassificationMetric(dataset_transfer_test,
                                  dataset_transf_test_pred_transf,
                                  unprivileged_groups=unprivileged_groups2,
                                  privileged_groups=privileged_groups2)

    metrics = OrderedDict()
    metrics["Classification accuracy"] = metric.accuracy()
    TPR = metric.true_positive_rate()
    TNR = metric.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)
    metrics["Balanced classification accuracy"] = bal_acc_nodebiasing_test
    metrics[
        "Statistical parity difference"] = metric.statistical_parity_difference(
        )
    metrics["Disparate impact"] = metric.disparate_impact()
    metrics[
        "Equal opportunity difference"] = metric.equal_opportunity_difference(
        )
    metrics["Average odds difference"] = metric.average_odds_difference()
    metrics["Theil index"] = metric.theil_index()
    metrics["United Fairness"] = metric.generalized_entropy_index()

    feature = []
    feature_str = "["
    for m in metrics:
        data = round(metrics[m], 4)
        feature.append(data)
        feature_str = feature_str + str(data) + " "
    feature_str = feature_str + "]"

    return feature, feature_str
コード例 #27
0
    def fit(self, dataset_true, dataset_pred):
        """Estimates the optimal classification threshold and margin for reject
        option classification that optimizes the metric provided.

        Note:
            The `fit` function is a no-op for this algorithm.

        Args:
            dataset_true (BinaryLabelDataset): Dataset containing the true
                `labels`.
            dataset_pred (BinaryLabelDataset): Dataset containing the predicted
                `scores`.

        Returns:
            RejectOptionClassification: Returns self.
        """

        fair_metric_arr = np.zeros(self.num_class_thresh*self.num_ROC_margin)
        balanced_acc_arr = np.zeros_like(fair_metric_arr)
        ROC_margin_arr = np.zeros_like(fair_metric_arr)
        class_thresh_arr = np.zeros_like(fair_metric_arr)

        cnt = 0
        # Iterate through class thresholds
        for class_thresh in np.linspace(self.low_class_thresh,
                                        self.high_class_thresh,
                                        self.num_class_thresh):

            self.classification_threshold = class_thresh
            if class_thresh <= 0.5:
                low_ROC_margin = 0.0
                high_ROC_margin = class_thresh
            else:
                low_ROC_margin = 0.0
                high_ROC_margin = (1.0-class_thresh)

                # Iterate through ROC margins
                for ROC_margin in np.linspace(
                                    low_ROC_margin,
                                    high_ROC_margin,
                                    self.num_ROC_margin):
                    self.ROC_margin = ROC_margin

                    # Predict using the current threshold and margin
                    dataset_transf_pred = self.predict(dataset_pred)

                    dataset_transf_metric_pred = BinaryLabelDatasetMetric(
                                                 dataset_transf_pred,
                                                 unprivileged_groups=self.unprivileged_groups,
                                                 privileged_groups=self.privileged_groups)
                    classified_transf_metric = ClassificationMetric(
                                                 dataset_true,
                                                 dataset_transf_pred,
                                                 unprivileged_groups=self.unprivileged_groups,
                                                 privileged_groups=self.privileged_groups)

                    ROC_margin_arr[cnt] = self.ROC_margin
                    class_thresh_arr[cnt] = self.classification_threshold

                    # Balanced accuracy and fairness metric computations
                    balanced_acc_arr[cnt] = 0.5*(classified_transf_metric.true_positive_rate()\
                                           +classified_transf_metric.true_negative_rate())
                    if self.metric_name == "Statistical parity difference":
                        fair_metric_arr[cnt] = dataset_transf_metric_pred.mean_difference()
                    elif self.metric_name == "Average odds difference":
                        fair_metric_arr[cnt] = classified_transf_metric.average_odds_difference()
                    elif self.metric_name == "Equal opportunity difference":
                        fair_metric_arr[cnt] = classified_transf_metric.equal_opportunity_difference()

                    cnt += 1

        rel_inds = np.logical_and(fair_metric_arr >= self.metric_lb,
                                  fair_metric_arr <= self.metric_ub)
        if any(rel_inds):
            best_ind = np.where(balanced_acc_arr[rel_inds]
                                == np.max(balanced_acc_arr[rel_inds]))[0][0]
        else:
            warn("Unable to satisy fairness constraints")
            rel_inds = np.ones(len(fair_metric_arr), dtype=bool)
            best_ind = np.where(fair_metric_arr[rel_inds]
                                == np.min(fair_metric_arr[rel_inds]))[0][0]

        self.ROC_margin = ROC_margin_arr[rel_inds][best_ind]
        self.classification_threshold = class_thresh_arr[rel_inds][best_ind]

        return self
コード例 #28
0
# TODO: (1) Redo the previous cell for gender bias and recompute the corresponding
# fairness metrics
# (2)collect these values in a table
# (3) think about a way to visualize these values
# Statistical Parity difference (SPD)
spd_pre_race = fairness_metrics.statistical_parity_difference()

# Disparate Impact Ratio
dir_pre_race = fairness_metrics.disparate_impact()

# Average Odds Difference and Average absolute odds difference
aod_pre_race = fairness_metrics.average_odds_difference()
aaod_pre_race = fairness_metrics.average_abs_odds_difference()

# Equal Opportunity Difference aka true positive rate difference
eod_pre_race = fairness_metrics.equal_opportunity_difference()

# Generealized entropy index with various alpha's
fairness_metrics.between_all_groups_generalized_entropy_index(alpha=2)

ClassificationMetric(dataset=bld_true,
                     classified_dataset=bld_pred,
                     unprivileged_groups=None,
                     privileged_groups=None).false_positive_rate()

df_fm.head()

# TO DELETE
# =============================================================================
# bld_pred.align_datasets
# bld_true.temporarily_ignore('score_cat')
コード例 #29
0
def fairness_check(object_storage_url,
                   object_storage_username,
                   object_storage_password,
                   data_bucket_name,
                   result_bucket_name,
                   model_id,
                   feature_testset_path='processed_data/X_test.npy',
                   label_testset_path='processed_data/y_test.npy',
                   protected_label_testset_path='processed_data/p_test.npy',
                   model_class_file='model.py',
                   model_class_name='model',
                   favorable_label=0.0,
                   unfavorable_label=1.0,
                   privileged_groups=[{
                       'race': 0.0
                   }],
                   unprivileged_groups=[{
                       'race': 4.0
                   }]):

    url = re.compile(r"https?://")
    cos = Minio(url.sub('', object_storage_url),
                access_key=object_storage_username,
                secret_key=object_storage_password,
                secure=False)  # Local Minio server won't have HTTPS

    dataset_filenamex = "X_test.npy"
    dataset_filenamey = "y_test.npy"
    dataset_filenamep = "p_test.npy"
    weights_filename = "model.pt"
    model_files = model_id + '/_submitted_code/model.zip'

    cos.fget_object(data_bucket_name, feature_testset_path, dataset_filenamex)
    cos.fget_object(data_bucket_name, label_testset_path, dataset_filenamey)
    cos.fget_object(data_bucket_name, protected_label_testset_path,
                    dataset_filenamep)
    cos.fget_object(result_bucket_name, model_id + '/' + weights_filename,
                    weights_filename)
    cos.fget_object(result_bucket_name, model_files, 'model.zip')

    # Load PyTorch model definition from the source code.
    zip_ref = zipfile.ZipFile('model.zip', 'r')
    zip_ref.extractall('model_files')
    zip_ref.close()

    modulename = 'model_files.' + model_class_file.split('.')[0].replace(
        '-', '_')
    '''
    We required users to define where the model class is located or follow
    some naming convention we have provided.
    '''
    model_class = getattr(importlib.import_module(modulename),
                          model_class_name)

    # load & compile model
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model = model_class().to(device)
    model.load_state_dict(torch.load(weights_filename, map_location=device))
    """Load the necessary labels and protected features for fairness check"""

    x_test = np.load(dataset_filenamex)
    y_test = np.load(dataset_filenamey)
    p_test = np.load(dataset_filenamep)

    _, y_pred = evaluate(model, x_test, y_test)
    """Calculate the fairness metrics"""

    original_test_dataset = dataset_wrapper(
        outcome=y_test,
        protected=p_test,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups,
        favorable_label=favorable_label,
        unfavorable_label=unfavorable_label)
    plain_predictions_test_dataset = dataset_wrapper(
        outcome=y_pred,
        protected=p_test,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups,
        favorable_label=favorable_label,
        unfavorable_label=unfavorable_label)

    classified_metric_nodebiasing_test = ClassificationMetric(
        original_test_dataset,
        plain_predictions_test_dataset,
        unprivileged_groups=unprivileged_groups,
        privileged_groups=privileged_groups)
    TPR = classified_metric_nodebiasing_test.true_positive_rate()
    TNR = classified_metric_nodebiasing_test.true_negative_rate()
    bal_acc_nodebiasing_test = 0.5 * (TPR + TNR)

    print(
        "#### Plain model - without debiasing - classification metrics on test set"
    )

    metrics = {
        "Classification accuracy":
        classified_metric_nodebiasing_test.accuracy(),
        "Balanced classification accuracy":
        bal_acc_nodebiasing_test,
        "Statistical parity difference":
        classified_metric_nodebiasing_test.statistical_parity_difference(),
        "Disparate impact":
        classified_metric_nodebiasing_test.disparate_impact(),
        "Equal opportunity difference":
        classified_metric_nodebiasing_test.equal_opportunity_difference(),
        "Average odds difference":
        classified_metric_nodebiasing_test.average_odds_difference(),
        "Theil index":
        classified_metric_nodebiasing_test.theil_index(),
        "False negative rate difference":
        classified_metric_nodebiasing_test.false_negative_rate_difference()
    }
    print("metrics: ", metrics)
    return metrics
コード例 #30
0
def get_fair_metrics(dataset, pred, pred_is_dataset=False):
    """
    Measure fairness metrics.
    
    Parameters: 
    dataset (pandas dataframe): Dataset
    pred (array): Model predictions
    pred_is_dataset, optional (bool): True if prediction is already part of the dataset, column name 'labels'.
    
    Returns:
    fair_metrics: Fairness metrics.
    """
    if pred_is_dataset:
        dataset_pred = pred
    else:
        dataset_pred = dataset.copy()
        dataset_pred.labels = pred

    cols = [
        'statistical_parity_difference', 'equal_opportunity_difference',
        'average_abs_odds_difference', 'disparate_impact', 'theil_index'
    ]
    obj_fairness = [[0, 0, 0, 1, 0]]

    fair_metrics = pd.DataFrame(data=obj_fairness,
                                index=['objective'],
                                columns=cols)

    for attr in dataset_pred.protected_attribute_names:
        idx = dataset_pred.protected_attribute_names.index(attr)
        privileged_groups = [{
            attr:
            dataset_pred.privileged_protected_attributes[idx][0]
        }]
        unprivileged_groups = [{
            attr:
            dataset_pred.unprivileged_protected_attributes[idx][0]
        }]

        classified_metric = ClassificationMetric(
            dataset,
            dataset_pred,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        metric_pred = BinaryLabelDatasetMetric(
            dataset_pred,
            unprivileged_groups=unprivileged_groups,
            privileged_groups=privileged_groups)

        acc = classified_metric.accuracy()

        row = pd.DataFrame([[
            metric_pred.mean_difference(),
            classified_metric.equal_opportunity_difference(),
            classified_metric.average_abs_odds_difference(),
            metric_pred.disparate_impact(),
            classified_metric.theil_index()
        ]],
                           columns=cols,
                           index=[attr])
        fair_metrics = fair_metrics.append(row)

    fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)

    return fair_metrics