Ejemplo n.º 1
0
    def test_add_fairness_metrics_baseline_model(self):
        config = tf_estimator.RunConfig(model_dir=self.model_dir,
                                        save_checkpoints_steps=2)
        feature_columns, _, _, label_column_name = self.load_dataset.get_feature_columns(include_sensitive_columns=True)  # pylint: disable=line-too-long
        estimator = baseline_model.get_estimator(
            feature_columns=feature_columns,
            label_column_name=label_column_name,
            config=config,
            model_dir=self.model_dir,
            hidden_units=self.hidden_units,
            batch_size=self.batch_size)
        self.assertIsInstance(estimator, tf_estimator.Estimator)

        # Adds additional fairness metrics to estimator
        eval_metrics_fn = self.fairness_metrics.create_fairness_metrics_fn()
        estimator = tf_estimator.add_metrics(estimator, eval_metrics_fn)

        train_input_fn, test_input_fn = self._get_train_test_input_fn()
        estimator.train(input_fn=train_input_fn, steps=self.train_steps)
        eval_results = estimator.evaluate(input_fn=test_input_fn,
                                          steps=self.test_steps)
        self.assertNotEmpty(eval_results)
        # # Checks if auc metric is computed for all subgroups
        for subgroup in self.subgroups:
            self.assertIn('auc subgroup {}'.format(subgroup), eval_results)
    def test_threshold_metrics(self):

        # Instantiates a robust estimator
        estimator = self._get_estimator()
        self.assertIsInstance(estimator, tf_estimator.Estimator)

        # Adds additional fairness metrics to estimator
        eval_metrics_fn = self.fairness_metrics.create_fairness_metrics_fn(
            num_thresholds=self.num_thresholds)
        estimator = tf_estimator.add_metrics(estimator, eval_metrics_fn)

        # Trains and evaluated robust model
        train_input_fn, test_input_fn = self._get_train_test_input_fn()
        estimator.train(input_fn=train_input_fn, steps=self.train_steps)
        eval_results = estimator.evaluate(input_fn=test_input_fn,
                                          steps=self.test_steps)

        # # Checks if tp,tn,fp,fn metrics are computed at thresholds
        self.assertIn('fp_th', eval_results)
        self.assertIn('fn_th', eval_results)
        self.assertIn('tp_th', eval_results)
        self.assertIn('tn_th', eval_results)

        # # Checks if the len of tp_th matches self.num_thresholds
        self.assertLen(eval_results['tp_th'], self.num_thresholds)

        # # Checks if threshold metrics are computed for protected_groups
        self.assertIn('fp_th subgroup {}'.format(self.subgroups[0]),
                      eval_results)
        self.assertIn('fp_th {} group 0'.format(self.protected_groups[0]),
                      eval_results)
        self.assertIn('fp_th {} group 1'.format(self.protected_groups[0]),
                      eval_results)
    def test_subgroup_metrics(self):

        # Instantiates a robust estimator
        estimator = self._get_estimator()
        self.assertIsInstance(estimator, tf_estimator.Estimator)

        # Adds additional fairness metrics to estimator
        eval_metrics_fn = self.fairness_metrics.create_fairness_metrics_fn(
            num_thresholds=self.num_thresholds)
        estimator = tf_estimator.add_metrics(estimator, eval_metrics_fn)

        # Trains and evaluated robust model
        train_input_fn, test_input_fn = self._get_train_test_input_fn()
        estimator.train(input_fn=train_input_fn, steps=self.train_steps)
        eval_results = estimator.evaluate(input_fn=test_input_fn,
                                          steps=self.test_steps)

        # Checks if eval_results are computed
        self.assertNotEmpty(eval_results)

        # # Checks if auc metric is computed for all subgroups
        for subgroup in self.subgroups:
            self.assertIn('auc subgroup {}'.format(subgroup), eval_results)
            self.assertIn('fpr subgroup {}'.format(subgroup), eval_results)
            self.assertIn('fnr subgroup {}'.format(subgroup), eval_results)
    def test_create_and_add_fairness_metrics_with_print_dir(self):
        # Instantiates a robust estimator
        estimator = self._get_estimator()
        self.assertIsInstance(estimator, tf_estimator.Estimator)

        # Adds additional fairness metrics to estimator
        self.fairness_metrics_with_print = RobustFairnessMetrics(
            label_column_name=self.label_column_name,
            protected_groups=self.protected_groups,
            subgroups=self.subgroups,
            print_dir=self.print_dir)
        eval_metrics_fn = self.fairness_metrics.create_fairness_metrics_fn(
            num_thresholds=self.num_thresholds)
        estimator = tf_estimator.add_metrics(estimator, eval_metrics_fn)

        # Trains and evaluated robust model
        train_input_fn, test_input_fn = self._get_train_test_input_fn()
        estimator.train(input_fn=train_input_fn, steps=self.train_steps)
        eval_results = estimator.evaluate(input_fn=test_input_fn,
                                          steps=self.test_steps)

        # Checks if eval_results are computed
        self.assertNotEmpty(eval_results)
        for key in self.eval_metric_keys:
            self.assertIn(key, eval_results)
    def test_create_and_add_fairness_metrics(self):
        # Instantiates a robust estimator
        estimator = self._get_estimator()
        self.assertIsInstance(estimator, tf_estimator.Estimator)

        # Adds additional fairness metrics to estimator
        eval_metrics_fn = self.fairness_metrics.create_fairness_metrics_fn(
            num_thresholds=self.num_thresholds)
        estimator = tf_estimator.add_metrics(estimator, eval_metrics_fn)

        # Trains and evaluated robust model
        train_input_fn, test_input_fn = self._get_train_test_input_fn()
        estimator.train(input_fn=train_input_fn, steps=self.train_steps)
        eval_results = estimator.evaluate(input_fn=test_input_fn,
                                          steps=self.test_steps)

        # Checks if eval_results are computed
        self.assertNotEmpty(eval_results)

        for key in self.eval_metric_keys:
            self.assertIn(key, eval_results)
Ejemplo n.º 6
0
def run_model():
    """Instantiate and run model.

  Raises:
    ValueError: if model_name is not implemented.
    ValueError: if dataset is not implemented.
  """
    if FLAGS.model_name not in MODEL_KEYS:
        raise ValueError("Model {} is not implemented.".format(
            FLAGS.model_name))
    else:
        model_dir, model_name, print_dir = _initialize_model_dir()

    tf.logging.info(
        "Creating experiment, storing model files in {}".format(model_dir))

    # Instantiates dataset and gets input_fn
    if FLAGS.dataset == "law_school":
        load_dataset = LawSchoolInput(dataset_base_dir=FLAGS.dataset_base_dir,
                                      train_file=FLAGS.train_file,
                                      test_file=FLAGS.test_file)
    elif FLAGS.dataset == "compas":
        load_dataset = CompasInput(dataset_base_dir=FLAGS.dataset_base_dir,
                                   train_file=FLAGS.train_file,
                                   test_file=FLAGS.test_file)
    elif FLAGS.dataset == "uci_adult":
        load_dataset = UCIAdultInput(dataset_base_dir=FLAGS.dataset_base_dir,
                                     train_file=FLAGS.train_file,
                                     test_file=FLAGS.test_file)
    else:
        raise ValueError("Input_fn for {} dataset is not implemented.".format(
            FLAGS.dataset))

    train_input_fn = load_dataset.get_input_fn(
        mode=tf_estimator.ModeKeys.TRAIN, batch_size=FLAGS.batch_size)
    test_input_fn = load_dataset.get_input_fn(mode=tf_estimator.ModeKeys.EVAL,
                                              batch_size=FLAGS.batch_size)

    feature_columns, _, protected_groups, label_column_name = (
        load_dataset.get_feature_columns(
            embedding_dimension=FLAGS.embedding_dimension,
            include_sensitive_columns=FLAGS.include_sensitive_columns))

    # Constructs a int list enumerating the number of subgroups in the dataset.
    # # For example, if the dataset has two (binary) protected_groups. The dataset has 2^2 = 4 subgroups, which we enumerate as [0, 1, 2, 3].
    # # If the  dataset has two protected features ["race","sex"] that are cast as binary features race=["White"(0), "Black"(1)], and sex=["Male"(0), "Female"(1)].
    # # We call their catesian product ["White Male" (00), "White Female" (01), "Black Male"(10), "Black Female"(11)] as subgroups  which are enumerated as [0, 1, 2, 3].
    subgroups = np.arange(
        len(protected_groups) *
        2)  # Assumes each protected_group has two possible values.

    # Instantiates tf.estimator.Estimator object
    estimator = get_estimator(model_dir,
                              model_name,
                              feature_columns=feature_columns,
                              label_column_name=label_column_name)

    # Adds additional fairness metrics
    fairness_metrics = RobustFairnessMetrics(
        label_column_name=label_column_name,
        protected_groups=protected_groups,
        subgroups=subgroups,
        print_dir=print_dir)
    eval_metrics_fn = fairness_metrics.create_fairness_metrics_fn()
    estimator = tf_estimator.add_metrics(estimator, eval_metrics_fn)

    # Creates training and evaluation specifications
    train_steps = int(FLAGS.total_train_steps / FLAGS.batch_size)
    train_spec = tf_estimator.TrainSpec(input_fn=train_input_fn,
                                        max_steps=train_steps)
    eval_spec = tf_estimator.EvalSpec(input_fn=test_input_fn,
                                      steps=FLAGS.test_steps)

    tf_estimator.train_and_evaluate(estimator, train_spec, eval_spec)
    tf.logging.info("Training completed.")

    eval_results = estimator.evaluate(input_fn=test_input_fn,
                                      steps=FLAGS.test_steps)

    eval_results_path = os.path.join(model_dir, FLAGS.output_file_name)
    write_to_output_file(eval_results, eval_results_path)