def show_classifier_metrics(test_list, prediction_list): privileged_groups = [{'sex': 1}] unprivileged_groups = [{'sex': 0}] counter = 1 for test_, pred_ in zip(test_list, prediction_list): display(Markdown("#### Model {} dataset metrics".format(counter))) model_metric = ClassificationMetric( test_, pred_, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) ex_model_metric = MetricTextExplainer(model_metric) print(ex_model_metric.average_odds_difference()) print( 'Difference in Recall between Unprivileged and Privileged: {:.3f}'. format(model_metric.equal_opportunity_difference())) print( 'Difference in Precision between Unprivileged and Privileged: {:.3f}.' .format( model_metric.precision(privileged=False) - model_metric.precision(privileged=True))) counter += 1
def get_classifier_metrics(test_list, prediction_list): privileged_groups = [{'sex': 1}] unprivileged_groups = [{'sex': 0}] acc_list = [] bal_acc_list = [] avg_odds_list = [] recall_diff_list = [] precision_diff_list = [] for test_, pred_ in zip(test_list, prediction_list): model_metric = ClassificationMetric( test_, pred_, unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups) acc_list.append(model_metric.accuracy().round(3)) bal_acc_list.append(((model_metric.true_positive_rate() + model_metric.true_negative_rate()) / 2).round(3)) avg_odds_list.append(model_metric.average_odds_difference().round(3)) recall_diff_list.append( model_metric.equal_opportunity_difference().round(3)) precision_diff_list.append( (model_metric.precision(privileged=False) - model_metric.precision(privileged=True)).round(3)) return acc_list, bal_acc_list, avg_odds_list, recall_diff_list, precision_diff_list