Example #1
0
def biasf(df_aeq, xtab):
    """
     args:
         df (dataframe): Recibe el data frame que tiene los features sobre los que queremos medir la disparidad
     returns:
         -
    """
    bias = Bias()
    bdf = bias.get_disparity_predefined_groups(
        xtab,
        original_df=df_aeq,
        ref_groups_dict={'reference_group': 'High'},
        alpha=0.05,
        check_significance=True,
        mask_significance=True)

    ## Storing metadata
    aq_metadata["value_k"] = list(bdf["k"])[0]

    disparities = bdf[['attribute_name', 'attribute_value'] +
                      bias.list_disparities(bdf)].round(2)

    majority_bdf = bias.get_disparity_major_group(xtab, original_df=df_aeq)
    disparities_majority = majority_bdf[
        ['attribute_name', 'attribute_value'] +
        bias.list_disparities(majority_bdf)].round(2)

    min_bdf = bias.get_disparity_min_metric(xtab, original_df=df_aeq)
    disparities_min = min_bdf[['attribute_name', 'attribute_value'] +
                              bias.list_disparities(min_bdf)].round(2)

    return bdf, disparities, disparities_majority, disparities_min, bias
Example #2
0
def audit(df, configs, model_id=1, preprocessed=False):
    """

    :param df:
    :param ref_groups_method:
    :param model_id:
    :param configs:
    :param report:
    :param preprocessed:
    :return:
    """
    if not preprocessed:
        df, attr_cols_input = preprocess_input_df(df)
        if not configs.attr_cols:
            configs.attr_cols = attr_cols_input
    g = Group()
    print('Welcome to Aequitas-Audit')
    print('Fairness measures requested:',
          ','.join(configs.fair_measures_requested))
    groups_model, attr_cols = g.get_crosstabs(
        df,
        score_thresholds=configs.score_thresholds,
        model_id=model_id,
        attr_cols=configs.attr_cols)
    print('audit: df shape from the crosstabs:', groups_model.shape)
    b = Bias()
    # todo move this to the new configs object / the attr_cols now are passed through the configs object...
    ref_groups_method = configs.ref_groups_method
    if ref_groups_method == 'predefined' and configs.ref_groups:
        bias_df = b.get_disparity_predefined_groups(groups_model,
                                                    configs.ref_groups)
    elif ref_groups_method == 'majority':
        bias_df = b.get_disparity_major_group(groups_model)
    else:
        bias_df = b.get_disparity_min_metric(groups_model)
    print('Any NaN?: ', bias_df.isnull().values.any())
    print('bias_df shape:', bias_df.shape)
    f = Fairness(tau=configs.fairness_threshold)
    print('Fairness Threshold:', configs.fairness_threshold)
    print('Fairness Measures:', configs.fair_measures_requested)
    group_value_df = f.get_group_value_fairness(
        bias_df, fair_measures_requested=configs.fair_measures_requested)
    group_attribute_df = f.get_group_attribute_fairness(
        group_value_df,
        fair_measures_requested=configs.fair_measures_requested)
    fair_results = f.get_overall_fairness(group_attribute_df)
    print(fair_results)
    report = None
    if configs.report is True:
        report = audit_report_markdown(configs, group_value_df,
                                       f.fair_measures_depend, fair_results)
    return group_value_df, report
Example #3
0
    def get_model_fairness(self, level='model'):
        g = Group()
        xtab, _ = g.get_crosstabs(self.df)

        b = Bias()
        majority_bdf = b.get_disparity_major_group(xtab,
                                                   original_df=self.df,
                                                   mask_significance=True)

        f = Fairness()
        fdf = f.get_group_value_fairness(majority_bdf)
        f_res = fdf
        if level == 'model':
            f_res = f.get_overall_fairness(fdf)
        elif level == 'attribute':
            f_res = f.get_group_attribute_fairness(fdf)

        return f_res
def get_fpr_disparity(df, protected_attribute):
     g = Group()
     xtab, _ = g.get_crosstabs(df)

     df_count = df[['distance', 'score']].groupby(['distance']).agg(['count'])
     get_largest_cat = df_count[('score')].sort_values(['count'], ascending = False).index[0]

     b = Bias()
     bdf = b.get_disparity_predefined_groups(xtab, original_df=df,
      ref_groups_dict={'originwac':df[:1].originwac[0],'distance':get_largest_cat},
      alpha=0.05, mask_significance=True)
     calculated_disparities = b.list_disparities(bdf)
     disparity_significance = b.list_significance(bdf)
     majority_bdf = b.get_disparity_major_group(xtab, original_df=df, mask_significance=True)
     results = majority_bdf[['attribute_name', 'attribute_value'] + \
     calculated_disparities + disparity_significance]

     res_distance = results[results.attribute_name ==protected_attribute]
     values= res_distance.attribute_value.tolist()
     disparity = res_distance.fpr_disparity.tolist()
     bias_result = values + disparity
     return bias_result
Example #5
0
def bias(df):
    """
    Function to print bias metrics. 
    :param df: Aequitas-compliant dataframe.  
    """
    
    print("Módulo de Sesgo")
    print("-"*30)
    bias_ = Bias()
    g = Group()
    xtab, atts = g.get_crosstabs(df, attr_cols=["delegacion"])
    absolute_metrics = g.list_absolute_metrics(xtab)
    bdf = bias_.get_disparity_predefined_groups(xtab, original_df = df, 
                                ref_groups_dict = {'delegacion': 'IZTAPALAPA'},
                                alpha=0.05)
    print("Disparities:")
    print(bdf[['attribute_name', 'attribute_value'] + bias_.list_disparities(bdf)].round(2))
    print("Minority Analysis:")
    min_bdf = bias_.get_disparity_min_metric(xtab, original_df=df)
    print(min_bdf[['attribute_name', 'attribute_value'] +  bias_.list_disparities(min_bdf)].round(2))
    print("Majority Analysis:")
    majority_bdf = bias_.get_disparity_major_group(xtab, original_df=df)
    print(majority_bdf[['attribute_name', 'attribute_value'] +  bias_.list_disparities(majority_bdf)].round(2))
Example #6
0
    def _write_audit_to_db(self, model_id, protected_df, predictions_proba,
                           labels, tie_breaker, subset_hash, matrix_type,
                           evaluation_start_time, evaluation_end_time,
                           matrix_uuid):
        """
        Runs the bias audit and saves the result in the bias table.

        Args:
            model_id (int) primary key of the model
            protected_df (pandas.DataFrame) A dataframe with protected group attributes:
            predictions_proba (np.array) List of prediction probabilities
            labels (pandas.Series): List of labels
            tie_breaker: 'best' or 'worst' case tiebreaking rule that the predictions and labels were sorted by
            subset_hash (str) the hash of the subset, if any, that the
                evaluation is made on
            matrix_type (triage.component.catwalk.storage.MatrixType)
                The type of matrix used
            evaluation_start_time (pandas._libs.tslibs.timestamps.Timestamp)
                first as_of_date included in the evaluation period
            evaluation_end_time (pandas._libs.tslibs.timestamps.Timestamp) last
                as_of_date included in the evaluation period
            matrix_uuid: the uuid of the matrix
        Returns:

        """
        if protected_df.empty:
            return

        # to preprocess aequitas requires the following columns:
        # score, label value, model_id, protected attributes
        # fill out the protected_df, which just has protected attributes at this point
        protected_df = protected_df.copy()
        protected_df['model_id'] = model_id
        protected_df['score'] = predictions_proba
        protected_df['label_value'] = labels
        aequitas_df, attr_cols_input = preprocess_input_df(protected_df)

        # create group crosstabs
        g = Group()
        score_thresholds = {}
        score_thresholds['rank_abs'] = self.bias_config['thresholds'].get(
            'top_n', [])
        # convert 0-100 percentile to 0-1 that Aequitas expects
        score_thresholds['rank_pct'] = [
            value / 100.0
            for value in self.bias_config['thresholds'].get('percentiles', [])
        ]
        groups_model, attr_cols = g.get_crosstabs(
            aequitas_df,
            score_thresholds=score_thresholds,
            attr_cols=attr_cols_input)
        # analyze bias from reference groups
        bias = Bias()
        ref_groups_method = self.bias_config.get('ref_groups_method', None)
        if ref_groups_method == 'predefined' and self.bias_config['ref_groups']:
            bias_df = bias.get_disparity_predefined_groups(
                groups_model, aequitas_df, self.bias_config['ref_groups'])
        elif ref_groups_method == 'majority':
            bias_df = bias.get_disparity_major_group(groups_model, aequitas_df)
        else:
            bias_df = bias.get_disparity_min_metric(groups_model, aequitas_df)

        # analyze fairness for each group
        f = Fairness(tau=0.8)  # the default fairness threshold is 0.8
        group_value_df = f.get_group_value_fairness(bias_df)
        group_value_df['subset_hash'] = subset_hash
        group_value_df['tie_breaker'] = tie_breaker
        group_value_df['evaluation_start_time'] = evaluation_start_time
        group_value_df['evaluation_end_time'] = evaluation_end_time
        group_value_df['matrix_uuid'] = matrix_uuid
        group_value_df = group_value_df.rename(
            index=str, columns={"score_threshold": "parameter"})
        if group_value_df.empty:
            raise ValueError(f"""
            Bias audit: aequitas_audit() failed.
            Returned empty dataframe for model_id = {model_id}, and subset_hash = {subset_hash}
            and matrix_type = {matrix_type}""")
        with scoped_session(self.db_engine) as session:
            for index, row in group_value_df.iterrows():
                session.query(matrix_type.aequitas_obj).filter_by(
                    model_id=row['model_id'],
                    evaluation_start_time=row['evaluation_start_time'],
                    evaluation_end_time=row['evaluation_end_time'],
                    subset_hash=row['subset_hash'],
                    parameter=row['parameter'],
                    tie_breaker=row['tie_breaker'],
                    matrix_uuid=row['matrix_uuid'],
                    attribute_name=row['attribute_name'],
                    attribute_value=row['attribute_value']).delete()
            session.bulk_insert_mappings(
                matrix_type.aequitas_obj,
                group_value_df.to_dict(orient="records"))
Example #7
0
calculated_disparities
disparity_significance
bdf[['attribute_name', 'attribute_value'] + calculated_disparities +
    disparity_significance]

hbdf = b.get_disparity_predefined_groups(xtab,
                                         original_df=df,
                                         ref_groups_dict={
                                             'race': 'WHITE',
                                             'gender': 'MALE'
                                         },
                                         alpha=0.5,
                                         mask_significance=False)

majority_bdf = b.get_disparity_major_group(xtab,
                                           original_df=df,
                                           mask_significance=False)
majority_bdf[['attribute_name', 'attribute_value'] + calculated_disparities +
             disparity_significance]

tm_capped = aqp.plot_disparity_all(hbdf,
                                   attributes=['gender', 'race'],
                                   metrics='all',
                                   significance_alpha=0.05)

f = Fairness()
fdf = f.get_group_value_fairness(bdf)
parity_detrminations = f.list_parities(fdf)
fdf[['attribute_name', 'attribute_value'] + absolute_metrics +
    calculated_disparities + parity_detrminations].style
fg = aqp.plot_fairness_group_all(fdf, ncols=5, metrics="all")
Example #8
0
xtab[[col for col in xtab.columns if col not in absolute_metrics]]

aqp = Plot()

a = aqp.plot_group_metric_all(xtab, ncols=3)

######¿Cuál es el nivel de disparidad qué existe entre los grupos de población (categorías de referencia)########

b = Bias()

bdf = b.get_disparity_predefined_groups(xtab, original_df=tabla, ref_groups_dict={'borough':'borough_brooklyn', 'programtype':'programtype_preschool'}, alpha=0.05, mask_significance=True)

hbdf = b.get_disparity_predefined_groups(xtab, original_df=tabla,
                                         ref_groups_dict={'borough':'borough_brooklyn', 'programtype':'programtype_preschool'},
                                         alpha=0.05,
                                         mask_significance=False)


majority_bdf = b.get_disparity_major_group(xtab, original_df=tabla, mask_significance=True)

f = Fairness()

fdf = f.get_group_value_fairness(bdf)

fg = aqp.plot_fairness_group_all(fdf, ncols=5, metrics = "all")

a_tm = aqp.plot_fairness_disparity_all(fdf, attributes=['borough'], metrics='all',
                                       significance_alpha=0.05)


def run_aequitas(predictions_data_path):
    '''
	Check for False negative rate, chances of certain group missing out on assistance using aequitas toolkit
	The functions transform the data to make it aequitas complaint and checks for series of bias and fairness metrics
	Input: model prediction path for the selected model (unzip the selected file to run)
	Output: plots saved in charts folder
	'''

    best_model_pred = pd.read_csv(predictions_data_path)

    # Transform data for aquetias module compliance
    aqc = [
        'Other', 'White', 'African American', 'Asian', 'Hispanic',
        'American Indian'
    ]
    aqcol = [
        'White alone_scale', 'Black/AfAmer alone_scale',
        'AmInd/Alaskn alone_scale', 'Asian alone_scale', 'HI alone_scale',
        'Some other race alone_scale', 'Hispanic or Latino_scale'
    ]
    display(aqcol)
    aqcol_label = [
        'no_renew_nextpd', 'pred_class_10%',
        'Median household income (1999 dollars)_scale'
    ] + aqcol
    aqus = best_model_pred[aqcol_label]
    print('Creating classes for racial and income distribution', '\n')

    # Convert to binary
    bin_var = [
        'no_renew_nextpd',
        'pred_class_10%',
    ]
    for var in bin_var:
        aqus[var] = np.where(aqus[var] == True, 1, 0)
    # Rename
    aqus.rename(columns={
        'no_renew_nextpd': 'label_value',
        'pred_class_10%': 'score'
    },
                inplace=True)

    print('Define majority rule defined on relative proportion of the class',
          '\n')
    aqus['race'] = aqus[aqcol].idxmax(axis=1)
    # Use quantile income distribution
    aqus['income'] = pd.qcut(
        aqus['Median household income (1999 dollars)_scale'],
        3,
        labels=["rich", "median", "poor"])

    # Final form
    aqus.drop(aqcol, axis=1, inplace=True)
    aqus.drop(['Median household income (1999 dollars)_scale'],
              axis=1,
              inplace=True)
    aq = aqus.reset_index()
    aq.rename(columns={'index': 'entity_id'}, inplace=True)
    aq['race'] = aq['race'].replace({
        'Some other race alone_scale':
        'Other',
        'White alone_scale':
        'White',
        'Black/AfAmer alone_scale':
        'African American',
        'Asian alone_scale':
        'Asian',
        'HI alone_scale':
        'Hispanic',
        'AmInd/Alaskn alone_scale':
        'American Indian'
    })

    # Consolidate types
    aq['income'] = aq['income'].astype(object)
    aq['entity_id'] = aq['entity_id'].astype(object)
    aq['score'] = aq['score'].astype(object)
    aq['label_value'] = aq['label_value'].astype(object)

    # Distribuion of categories
    aq_palette = sns.diverging_palette(225, 35, n=2)
    by_race = sns.countplot(x="race", data=aq[aq.race.isin(aqc)])
    by_race.set_xticklabels(by_race.get_xticklabels(), rotation=40, ha="right")
    plt.savefig('charts/Racial distribution in data.png')

    # Primary distribuion against score
    aq_palette = sns.diverging_palette(225, 35, n=2)
    by_race = sns.countplot(x="race",
                            hue="score",
                            data=aq[aq.race.isin(aqc)],
                            palette=aq_palette)
    by_race.set_xticklabels(by_race.get_xticklabels(), rotation=40, ha="right")
    # Race
    plt.savefig('charts/race_score.png')
    # Income
    by_inc = sns.countplot(x="income",
                           hue="score",
                           data=aq,
                           palette=aq_palette)
    plt.savefig('charts/income_score.png')

    # Set Group
    g = Group()
    xtab, _ = g.get_crosstabs(aq)

    # False Negative Rates
    aqp = Plot()
    fnr = aqp.plot_group_metric(xtab, 'fnr', min_group_size=0.05)
    p = aqp.plot_group_metric_all(xtab,
                                  metrics=['ppr', 'pprev', 'fnr', 'fpr'],
                                  ncols=4)
    p.savefig('charts/eth_metrics.png')

    # Bias with respect to white rich category
    b = Bias()
    bdf = b.get_disparity_predefined_groups(xtab,
                                            original_df=aq,
                                            ref_groups_dict={
                                                'race': 'White',
                                                'income': 'rich'
                                            },
                                            alpha=0.05,
                                            mask_significance=True)
    bdf.style
    calculated_disparities = b.list_disparities(bdf)
    disparity_significance = b.list_significance(bdf)
    aqp.plot_disparity(bdf,
                       group_metric='fpr_disparity',
                       attribute_name='race',
                       significance_alpha=0.05)
    plt.savefig('charts/disparity.png')

    # Fairness
    hbdf = b.get_disparity_predefined_groups(xtab,
                                             original_df=aq,
                                             ref_groups_dict={
                                                 'race': 'African American',
                                                 'income': 'poor'
                                             },
                                             alpha=0.05,
                                             mask_significance=False)
    majority_bdf = b.get_disparity_major_group(xtab,
                                               original_df=aq,
                                               mask_significance=True)
    min_metric_bdf = b.get_disparity_min_metric(df=xtab, original_df=aq)
    f = Fairness()
    fdf = f.get_group_value_fairness(bdf)
    parity_detrminations = f.list_parities(fdf)
    gaf = f.get_group_attribute_fairness(fdf)
    gof = f.get_overall_fairness(fdf)
    z = aqp.plot_fairness_group(fdf, group_metric='ppr')
    plt.savefig('charts/fairness_overall.png')
    # Checking for False Omission Rate and False Negative Rates
    fg = aqp.plot_fairness_group_all(fdf, metrics=['for', 'fnr'], ncols=2)
    fg.savefig('charts/fairness_metrics.png')

    return None
def fun_bias_fair(a_zip, a_type, fea_eng, model):

    X = fea_eng.drop([
        'aka_name', 'facility_type', 'address', 'inspection_date',
        'inspection_type', 'violations', 'results', 'pass'
    ],
                     axis=1)
    y_pred = model.predict(X)

    xt = pd.DataFrame([
        fea_eng['zip'].astype(float), fea_eng['facility_type'],
        fea_eng['pass'], y_pred
    ]).transpose()
    a_zip['zip'] = a_zip['zip'].astype(float)
    compas = pd.merge(left=xt,
                      right=a_zip,
                      how='left',
                      left_on='zip',
                      right_on='zip')
    compas = pd.merge(left=compas,
                      right=a_type,
                      how='left',
                      left_on='facility_type',
                      right_on='facility_type')
    compas = compas.rename(columns={
        'Unnamed 0': 'score',
        'pass': '******'
    })

    compas.pop('zip')
    compas.pop('facility_type')

    compas['zone'] = compas['zone'].astype(str)
    compas['score'] = compas['score'].astype(int)
    compas['label_value'] = compas['label_value'].astype(int)

    from aequitas.group import Group
    from aequitas.bias import Bias
    from aequitas.fairness import Fairness

    #Group
    g = Group()
    xtab, attrbs = g.get_crosstabs(compas)
    absolute_metrics = g.list_absolute_metrics(xtab)
    xtab[[col for col in xtab.columns if col not in absolute_metrics]]
    group_df = xtab[['attribute_name', 'attribute_value'] +
                    [col for col in xtab.columns
                     if col in absolute_metrics]].round(4)
    abs_gpo = xtab[['attribute_name', 'attribute_value'] +
                   [col for col in xtab.columns
                    if col in absolute_metrics]].round(4)

    #Bias
    bias = Bias()
    bdf = bias.get_disparity_predefined_groups(xtab,
                                               original_df=compas,
                                               ref_groups_dict={
                                                   'zone': 'West',
                                                   'facility_group': 'grocery'
                                               },
                                               alpha=0.05)
    # View disparity metrics added to dataframe
    bias_bdf = bdf[['attribute_name', 'attribute_value'] +
                   bias.list_disparities(bdf)].round(2)
    majority_bdf = bias.get_disparity_major_group(xtab, original_df=compas)
    bias_maj_bdf = majority_bdf[['attribute_name', 'attribute_value'] +
                                bias.list_disparities(majority_bdf)].round(2)
    min_bdf = bias.get_disparity_min_metric(xtab, original_df=compas)
    bias_min_bdf = min_bdf[['attribute_name', 'attribute_value'] +
                           bias.list_disparities(min_bdf)].round(2)
    min_bdf[['attribute_name', 'attribute_value'] +
            bias.list_disparities(min_bdf)].round(2)

    #Fairness
    fair = Fairness()
    fdf = fair.get_group_value_fairness(bdf)
    parity_determinations = fair.list_parities(fdf)
    fair_fdf = fdf[['attribute_name', 'attribute_value'] + absolute_metrics +
                   bias.list_disparities(fdf) + parity_determinations].round(2)
    gaf = fair.get_group_attribute_fairness(fdf)
    fairness_df = fdf.copy()
    gof = fair.get_overall_fairness(fdf)

    tab_bias_fair = fair_fdf[[
        'attribute_name', 'attribute_value', 'for', 'fnr', 'for_disparity',
        'fnr_disparity', 'FOR Parity', 'FNR Parity'
    ]]
    tab_bias_fair.rename(columns={
        'attribute_value': 'group_name',
        'FOR Parity': 'for_parity',
        'FNR Parity': 'fnr_parity',
        'for': 'for_'
    },
                         inplace=True)

    print(tab_bias_fair)

    return tab_bias_fair
Example #11
0
def audit(df, configs, preprocessed=False):
    """

    :param df:
    :param configs:
    :param preprocessed:
    :return:
    """
    if not preprocessed:
        df, attr_cols_input = preprocess_input_df(df)
        if not configs.attr_cols:
            configs.attr_cols = attr_cols_input
    g = Group()
    print('Welcome to Aequitas-Audit')
    print('Fairness measures requested:',
          ','.join(configs.fair_measures_requested))
    groups_model, attr_cols = g.get_crosstabs(
        df,
        score_thresholds=configs.score_thresholds,
        attr_cols=configs.attr_cols)
    print('audit: df shape from the crosstabs:', groups_model.shape)
    b = Bias()

    # todo move this to the new configs object / the attr_cols now are passed through the configs object...
    ref_groups_method = configs.ref_groups_method
    if ref_groups_method == 'predefined' and configs.ref_groups:
        bias_df = b.get_disparity_predefined_groups(
            groups_model,
            df,
            configs.ref_groups,
            check_significance=configs.check_significance,
            alpha=configs.alpha,
            selected_significance=configs.selected_significance,
            mask_significance=configs.mask_significance)
    elif ref_groups_method == 'majority':
        bias_df = b.get_disparity_major_group(
            groups_model,
            df,
            check_significance=configs.check_significance,
            alpha=configs.alpha,
            selected_significance=configs.selected_significance,
            mask_significance=configs.mask_significance)
    else:
        bias_df = b.get_disparity_min_metric(
            df=groups_model,
            original_df=df,
            check_significance=configs.check_significance,
            alpha=configs.alpha,
            label_score_ref='fpr',
            selected_significance=configs.selected_significance,
            mask_significance=configs.mask_significance)

    print('Any NaN?: ', bias_df.isnull().values.any())
    print('bias_df shape:', bias_df.shape)

    aqp = Plot()

    if configs.plot_bias_metrics:
        if len(configs.plot_bias_metrics) == 1:
            fig1 = aqp.plot_group_metric(
                bias_df, group_metric=configs.plot_bias_metrics[0])

        elif len(configs.plot_bias_metrics) > 1:
            fig1 = aqp.plot_group_metric_all(
                bias_df, metrics=configs.plot_disparity_attributes)

        if (len(configs.plot_bias_disparities) == 1) and (len(
                configs.plot_disparity_attributes) == 1):
            fig2 = aqp.plot_disparity(
                bias_df,
                group_metric=configs.plot_bias_disparities[0],
                attribute_name=configs.plot_disparity_attributes[0])

        elif (len(configs.plot_bias_disparities) > 1) or (len(
                configs.plot_disparity_attributes) > 1):
            fig2 = aqp.plot_disparity_all(
                bias_df,
                metrics=configs.plot_bias_disparities,
                attributes=configs.plot_disparity_attributes)

    f = Fairness(tau=configs.fairness_threshold)
    print('Fairness Threshold:', configs.fairness_threshold)
    print('Fairness Measures:', configs.fair_measures_requested)
    group_value_df = f.get_group_value_fairness(
        bias_df, fair_measures_requested=configs.fair_measures_requested)
    group_attribute_df = f.get_group_attribute_fairness(
        group_value_df,
        fair_measures_requested=configs.fair_measures_requested)
    fair_results = f.get_overall_fairness(group_attribute_df)

    if configs.plot_bias_metrics:
        if len(configs.plot_bias_metrics) == 1:
            fig3 = aqp.plot_fairness_group(
                group_value_df, group_metric=configs.plot_bias_metrics[0])
        elif len(configs.plot_bias_metrics) > 1:
            fig3 = aqp.plot_fairness_group_all(
                group_value_df, metrics=configs.plot_bias_metrics)

        if (len(configs.plot_bias_disparities) == 1) and (len(
                configs.plot_disparity_attributes) == 1):
            fig4 = aqp.plot_fairness_disparity(
                group_value_df,
                group_metric=configs.plot_bias_disparities[0],
                attribute_name=configs.plot_disparity_attributes[0])
        elif (len(configs.plot_bias_disparities) > 1) or (len(
                configs.plot_disparity_attributes) > 1):
            fig4 = aqp.plot_fairness_disparity_all(
                group_value_df,
                metrics=configs.plot_bias_disparities,
                attributes=configs.plot_disparity_attributes)

    print(fair_results)
    report = None
    if configs.report is True:
        report = audit_report_markdown(configs, group_value_df,
                                       f.fair_measures_depend, fair_results)
    return group_value_df, report