def compute_aif_metrics(dataset_true, dataset_pred, unprivileged_groups, privileged_groups,\ ret_eval_dict=True): metrics_cls = ClassificationMetric(dataset_true, dataset_pred, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) metrics_dict = {} metrics_dict["BA"] = 0.5*(metrics_cls.true_positive_rate()+ metrics_cls.true_negative_rate()) metrics_dict["SPD"] = metrics_cls.statistical_parity_difference() metrics_dict["DI"] = metrics_cls.disparate_impact() metrics_dict["AOD"] = metrics_cls.average_odds_difference() metrics_dict["EOD"] = metrics_cls.equal_opportunity_difference() metrics_dict["DFBA"] = metrics_cls.differential_fairness_bias_amplification() metrics_dict["TI"] = metrics_cls.theil_index() if ret_eval_dict: return metrics_dict, metrics_cls else: return metrics_cls
def test_bias_amplification_binary_groups(): metric = ClassificationMetric(adult_test, adult_pred) bias_amp = metric.differential_fairness_bias_amplification() eps_data = dataset_metric.smoothed_empirical_differential_fairness() eps_clf = classifier_metric.smoothed_empirical_differential_fairness() assert bias_amp == (eps_clf - eps_data)