def test_demographic_parity_ratio(agg_method): actual = demographic_parity_ratio(y_t, y_p, sensitive_features=g_1, method=agg_method) gm = MetricFrame(selection_rate, y_t, y_p, sensitive_features=g_1) assert actual == gm.ratio(method=agg_method)
def test_equalized_odds_ratio(agg_method): actual = equalized_odds_ratio(y_t, y_p, method=agg_method, sensitive_features=g_1) metrics = {'tpr': true_positive_rate, 'fpr': false_positive_rate} gm = MetricFrame(metrics, y_t, y_p, sensitive_features=g_1) ratios = gm.ratio(method=agg_method) assert actual == ratios.min()
def test_demographic_parity_ratio_weighted(agg_method): actual = demographic_parity_ratio(y_t, y_p, sensitive_features=g_1, sample_weight=s_w, method=agg_method) gm = MetricFrame(selection_rate, y_t, y_p, sensitive_features=g_1, sample_params={'sample_weight': s_w}) assert actual == gm.ratio(method=agg_method)
def test_equalized_odds_ratio_weighted(agg_method): actual = equalized_odds_ratio(y_t, y_p, method=agg_method, sensitive_features=g_1, sample_weight=s_w) metrics = {'tpr': true_positive_rate, 'fpr': false_positive_rate} sw = {'sample_weight': s_w} sp = {'tpr': sw, 'fpr': sw} gm = MetricFrame(metrics, y_t, y_p, sensitive_features=g_1, sample_params=sp) ratios = gm.ratio(method=agg_method) assert actual == ratios.min()
# This can be quantified in terms of a difference between the subgroup with # the highest value of the metric, and the subgroup with the lowest value. # For this, we provide the method ``difference(method='between_groups)``: grouped_on_race.difference(method='between_groups') # %% # We can also evaluate the difference relative to the corresponding overall # value of the metric. In this case we take the absolute value, so that the # result is always positive: grouped_on_race.difference(method='to_overall') # %% # There are situations where knowing the ratios of the metrics evaluated on # the subgroups is more useful. For this we have the ``ratio()`` method. # We can take the ratios between the minimum and maximum values of each metric: grouped_on_race.ratio(method='between_groups') # %% # We can also compute the ratios relative to the overall value for each # metric. Analogous to the differences, the ratios are always in the range # :math:`[0,1]`: grouped_on_race.ratio(method='to_overall') # %% # Intersections of Features # ========================= # # So far we have only considered a single sensitive feature at a time, # and we have already found some serious issues in our example data. # However, sometimes serious issues can be hiding in intersections of # features. For example, the
# This can be quantified in terms of a difference between the subgroup with # the highest value of the metric, and the subgroup with the lowest value. # For this, we provide the method ``difference(method='between_groups)``: grouped_on_race.difference(method="between_groups") # %% # We can also evaluate the difference relative to the corresponding overall # value of the metric. In this case we take the absolute value, so that the # result is always positive: grouped_on_race.difference(method="to_overall") # %% # There are situations where knowing the ratios of the metrics evaluated on # the subgroups is more useful. For this we have the ``ratio()`` method. # We can take the ratios between the minimum and maximum values of each metric: grouped_on_race.ratio(method="between_groups") # %% # We can also compute the ratios relative to the overall value for each # metric. Analogous to the differences, the ratios are always in the range # :math:`[0,1]`: grouped_on_race.ratio(method="to_overall") # %% # Intersections of Features # ========================= # # So far we have only considered a single sensitive feature at a time, # and we have already found some serious issues in our example data. # However, sometimes serious issues can be hiding in intersections of # features. For example, the