def train_test_classifier():
		w = fdm.train_model_disp_mist(x_train, y_train, x_control_train, loss_function, EPS, cons_params)

		train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test = fdm.get_clf_stats(w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs)

		
		# accuracy and FPR are for the test because we need of for plotting
		return w, test_score, s_attr_to_fp_fn_test
            def train_test_classifier():
                w = fdm.train_model_disp_mist(x_train, y_train, x_control_train, loss_function, EPS, cons_params)

                train_score, test_score, cov_all_train, cov_all_test, s_attr_to_fp_fn_train, s_attr_to_fp_fn_test = fdm.get_clf_stats(
                    w, x_train, y_train, x_control_train, x_test, y_test, x_control_test, sensitive_attrs)

                # accuracy and FPR are for the test because we need of for plotting
                # the covariance is for train, because we need it for setting the thresholds
                return w, train_score, test_score, s_attr_to_fp_fn_test, s_attr_to_fp_fn_train, cov_all_train
def train_classifier(x, y, control, cons_params):
    loss_function = "logreg"
    EPS = 0.0001
    w = fdm.train_model_disp_mist(
        x, y, control, loss_function,
        EPS,
        cons_params
        )
    return w
Example #4
0
def train_classifier(x, y, control, cons_params, eps):
    loss_function = "logreg"  # only logistic regression is implemented
    w = fdm.train_model_disp_mist(x, y, control, loss_function, eps,
                                  cons_params)
    return w
Example #5
0
def _run_test_Zafar(test, data, sensible_name, learner, creteria):
    '''
    Invoking Zafar's algorithm.
    '''
    dataX, dataY, dataA, dataX_train, dataY_train, dataA_train, dataX_test, dataY_test, dataA_test = data
    x, y, x_control, x_train, y_train, x_control_train, x_test, y_test, x_control_test = _convert_data_format_Zafar(
        data, sensible_name)

    w = None
    if creteria == 'EO':

        loss_function = "logreg"  # perform the experiments with logistic regression
        EPS = 1e-6

        cons_type = 1  # FPR constraint -- just change the cons_type, the rest of parameters should stay the same
        tau = 5.0
        mu = 1.2
        sensitive_attrs_to_cov_thresh = {
            sensible_name: {
                0: {
                    0: 0,
                    1: test['eps']
                },
                1: {
                    0: 0,
                    1: test['eps']
                },
                2: {
                    0: 0,
                    1: test['eps']
                }
            }
        }  # zero covariance threshold, means try to get the fairest solution
        cons_params = {
            "cons_type": cons_type,
            "tau": tau,
            "mu": mu,
            "sensitive_attrs_to_cov_thresh": sensitive_attrs_to_cov_thresh
        }

        w = fdm.train_model_disp_mist(x, y, x_control, loss_function, EPS,
                                      cons_params)

    else:
        apply_fairness_constraints = 1  # set this flag to one since we want to optimize accuracy subject to fairness constraints
        apply_accuracy_constraint = 0
        sep_constraint = 0

        loss_function = lf._logistic_loss
        sensitive_attrs = [sensible_name]
        # print('eps:',test['eps'])
        sensitive_attrs_to_cov_thresh = {sensible_name: test['eps']}

        gamma = None

        w = ut.train_model(x, y, x_control, loss_function,
                           apply_fairness_constraints,
                           apply_accuracy_constraint, sep_constraint,
                           sensitive_attrs, sensitive_attrs_to_cov_thresh,
                           gamma)

    y_pred_train = np.sign(np.dot(x_train, w))
    disp_train = fair_measure(y_pred_train, dataA_train, dataY_train, creteria)
    train_score = accuracy_score(y_train, y_pred_train)

    y_pred_test = np.sign(np.dot(x_test, w))
    disp_test = fair_measure(y_pred_test, dataA_test, dataY_test, creteria)
    test_score = accuracy_score(y_test, y_pred_test)

    res = dict()
    res["disp_train"] = disp_train
    res["disp_test"] = disp_test
    res["error_train"] = 1 - train_score
    res["error_test"] = 1 - test_score

    return res