def fairness(df): """ Genera todo el módulo de Equidad """ print("Módulo de Equidad") print("-"*30) f = Fairness() bias_ = Bias() g = Group() xtab, atts = g.get_crosstabs(df, attr_cols=["delegacion"]) absolute_metrics = g.list_absolute_metrics(xtab) bdf = bias_.get_disparity_predefined_groups(xtab, original_df = df, ref_groups_dict = {'delegacion': 'IZTAPALAPA'}, alpha=0.05) fdf = f.get_group_value_fairness(bdf) parity_determinations = f.list_parities(fdf) print("Imprimiendo tabla de métricas (conteos en frecuencias):") print(fdf[['attribute_name', 'attribute_value'] + absolute_metrics + bias_.list_disparities(fdf) + parity_determinations].round(2)) print("Impriendo métricas generales") gof = f.get_overall_fairness(fdf) print(gof) print("Aequitas analysis completed.") print("-"*30)
def fairnessf(bdf, absolute_metrics, bias): """ args: df (dataframe): Recibe el data frame que tiene los features sobre los que queremos medir la equidad. returns: - """ fair = Fairness() fdf = fair.get_group_value_fairness(bdf) parity_determinations = fair.list_parities(fdf) fairness = fdf[['attribute_name', 'attribute_value'] + absolute_metrics + bias.list_disparities(fdf) + parity_determinations].round(2) ## Storing metadata aq_metadata["v_group"] = str(fdf.loc[0, "attribute_value"]) aq_metadata["FOR_p"] = str(fdf.loc[0, "FOR Parity"]) aq_metadata["FNR_p"] = str(fdf.loc[0, "FNR Parity"]) #return df_aeq gaf = fair.get_group_attribute_fairness(fdf) gof = fair.get_overall_fairness(fdf) return fairness, gaf, gof
def run_aequitas(predictions_data_path): ''' Check for False negative rate, chances of certain group missing out on assistance using aequitas toolkit The functions transform the data to make it aequitas complaint and checks for series of bias and fairness metrics Input: model prediction path for the selected model (unzip the selected file to run) Output: plots saved in charts folder ''' best_model_pred = pd.read_csv(predictions_data_path) # Transform data for aquetias module compliance aqc = [ 'Other', 'White', 'African American', 'Asian', 'Hispanic', 'American Indian' ] aqcol = [ 'White alone_scale', 'Black/AfAmer alone_scale', 'AmInd/Alaskn alone_scale', 'Asian alone_scale', 'HI alone_scale', 'Some other race alone_scale', 'Hispanic or Latino_scale' ] display(aqcol) aqcol_label = [ 'no_renew_nextpd', 'pred_class_10%', 'Median household income (1999 dollars)_scale' ] + aqcol aqus = best_model_pred[aqcol_label] print('Creating classes for racial and income distribution', '\n') # Convert to binary bin_var = [ 'no_renew_nextpd', 'pred_class_10%', ] for var in bin_var: aqus[var] = np.where(aqus[var] == True, 1, 0) # Rename aqus.rename(columns={ 'no_renew_nextpd': 'label_value', 'pred_class_10%': 'score' }, inplace=True) print('Define majority rule defined on relative proportion of the class', '\n') aqus['race'] = aqus[aqcol].idxmax(axis=1) # Use quantile income distribution aqus['income'] = pd.qcut( aqus['Median household income (1999 dollars)_scale'], 3, labels=["rich", "median", "poor"]) # Final form aqus.drop(aqcol, axis=1, inplace=True) aqus.drop(['Median household income (1999 dollars)_scale'], axis=1, inplace=True) aq = aqus.reset_index() aq.rename(columns={'index': 'entity_id'}, inplace=True) aq['race'] = aq['race'].replace({ 'Some other race alone_scale': 'Other', 'White alone_scale': 'White', 'Black/AfAmer alone_scale': 'African American', 'Asian alone_scale': 'Asian', 'HI alone_scale': 'Hispanic', 'AmInd/Alaskn alone_scale': 'American Indian' }) # Consolidate types aq['income'] = aq['income'].astype(object) aq['entity_id'] = aq['entity_id'].astype(object) aq['score'] = aq['score'].astype(object) aq['label_value'] = aq['label_value'].astype(object) # Distribuion of categories aq_palette = sns.diverging_palette(225, 35, n=2) by_race = sns.countplot(x="race", data=aq[aq.race.isin(aqc)]) by_race.set_xticklabels(by_race.get_xticklabels(), rotation=40, ha="right") plt.savefig('charts/Racial distribution in data.png') # Primary distribuion against score aq_palette = sns.diverging_palette(225, 35, n=2) by_race = sns.countplot(x="race", hue="score", data=aq[aq.race.isin(aqc)], palette=aq_palette) by_race.set_xticklabels(by_race.get_xticklabels(), rotation=40, ha="right") # Race plt.savefig('charts/race_score.png') # Income by_inc = sns.countplot(x="income", hue="score", data=aq, palette=aq_palette) plt.savefig('charts/income_score.png') # Set Group g = Group() xtab, _ = g.get_crosstabs(aq) # False Negative Rates aqp = Plot() fnr = aqp.plot_group_metric(xtab, 'fnr', min_group_size=0.05) p = aqp.plot_group_metric_all(xtab, metrics=['ppr', 'pprev', 'fnr', 'fpr'], ncols=4) p.savefig('charts/eth_metrics.png') # Bias with respect to white rich category b = Bias() bdf = b.get_disparity_predefined_groups(xtab, original_df=aq, ref_groups_dict={ 'race': 'White', 'income': 'rich' }, alpha=0.05, mask_significance=True) bdf.style calculated_disparities = b.list_disparities(bdf) disparity_significance = b.list_significance(bdf) aqp.plot_disparity(bdf, group_metric='fpr_disparity', attribute_name='race', significance_alpha=0.05) plt.savefig('charts/disparity.png') # Fairness hbdf = b.get_disparity_predefined_groups(xtab, original_df=aq, ref_groups_dict={ 'race': 'African American', 'income': 'poor' }, alpha=0.05, mask_significance=False) majority_bdf = b.get_disparity_major_group(xtab, original_df=aq, mask_significance=True) min_metric_bdf = b.get_disparity_min_metric(df=xtab, original_df=aq) f = Fairness() fdf = f.get_group_value_fairness(bdf) parity_detrminations = f.list_parities(fdf) gaf = f.get_group_attribute_fairness(fdf) gof = f.get_overall_fairness(fdf) z = aqp.plot_fairness_group(fdf, group_metric='ppr') plt.savefig('charts/fairness_overall.png') # Checking for False Omission Rate and False Negative Rates fg = aqp.plot_fairness_group_all(fdf, metrics=['for', 'fnr'], ncols=2) fg.savefig('charts/fairness_metrics.png') return None
hbdf = b.get_disparity_predefined_groups(xtab, original_df=df, ref_groups_dict={ 'race': 'WHITE', 'gender': 'MALE' }, alpha=0.5, mask_significance=False) majority_bdf = b.get_disparity_major_group(xtab, original_df=df, mask_significance=False) majority_bdf[['attribute_name', 'attribute_value'] + calculated_disparities + disparity_significance] tm_capped = aqp.plot_disparity_all(hbdf, attributes=['gender', 'race'], metrics='all', significance_alpha=0.05) f = Fairness() fdf = f.get_group_value_fairness(bdf) parity_detrminations = f.list_parities(fdf) fdf[['attribute_name', 'attribute_value'] + absolute_metrics + calculated_disparities + parity_detrminations].style fg = aqp.plot_fairness_group_all(fdf, ncols=5, metrics="all") n_tm = aqp.plot_fairness_disparity_all(fdf, attributes=['race', 'gender'], significance_alpha=0.05)
def fun_bias_fair(a_zip, a_type, fea_eng, model): X = fea_eng.drop([ 'aka_name', 'facility_type', 'address', 'inspection_date', 'inspection_type', 'violations', 'results', 'pass' ], axis=1) y_pred = model.predict(X) xt = pd.DataFrame([ fea_eng['zip'].astype(float), fea_eng['facility_type'], fea_eng['pass'], y_pred ]).transpose() a_zip['zip'] = a_zip['zip'].astype(float) compas = pd.merge(left=xt, right=a_zip, how='left', left_on='zip', right_on='zip') compas = pd.merge(left=compas, right=a_type, how='left', left_on='facility_type', right_on='facility_type') compas = compas.rename(columns={ 'Unnamed 0': 'score', 'pass': '******' }) compas.pop('zip') compas.pop('facility_type') compas['zone'] = compas['zone'].astype(str) compas['score'] = compas['score'].astype(int) compas['label_value'] = compas['label_value'].astype(int) from aequitas.group import Group from aequitas.bias import Bias from aequitas.fairness import Fairness #Group g = Group() xtab, attrbs = g.get_crosstabs(compas) absolute_metrics = g.list_absolute_metrics(xtab) xtab[[col for col in xtab.columns if col not in absolute_metrics]] group_df = xtab[['attribute_name', 'attribute_value'] + [col for col in xtab.columns if col in absolute_metrics]].round(4) abs_gpo = xtab[['attribute_name', 'attribute_value'] + [col for col in xtab.columns if col in absolute_metrics]].round(4) #Bias bias = Bias() bdf = bias.get_disparity_predefined_groups(xtab, original_df=compas, ref_groups_dict={ 'zone': 'West', 'facility_group': 'grocery' }, alpha=0.05) # View disparity metrics added to dataframe bias_bdf = bdf[['attribute_name', 'attribute_value'] + bias.list_disparities(bdf)].round(2) majority_bdf = bias.get_disparity_major_group(xtab, original_df=compas) bias_maj_bdf = majority_bdf[['attribute_name', 'attribute_value'] + bias.list_disparities(majority_bdf)].round(2) min_bdf = bias.get_disparity_min_metric(xtab, original_df=compas) bias_min_bdf = min_bdf[['attribute_name', 'attribute_value'] + bias.list_disparities(min_bdf)].round(2) min_bdf[['attribute_name', 'attribute_value'] + bias.list_disparities(min_bdf)].round(2) #Fairness fair = Fairness() fdf = fair.get_group_value_fairness(bdf) parity_determinations = fair.list_parities(fdf) fair_fdf = fdf[['attribute_name', 'attribute_value'] + absolute_metrics + bias.list_disparities(fdf) + parity_determinations].round(2) gaf = fair.get_group_attribute_fairness(fdf) fairness_df = fdf.copy() gof = fair.get_overall_fairness(fdf) tab_bias_fair = fair_fdf[[ 'attribute_name', 'attribute_value', 'for', 'fnr', 'for_disparity', 'fnr_disparity', 'FOR Parity', 'FNR Parity' ]] tab_bias_fair.rename(columns={ 'attribute_value': 'group_name', 'FOR Parity': 'for_parity', 'FNR Parity': 'fnr_parity', 'for': 'for_' }, inplace=True) print(tab_bias_fair) return tab_bias_fair