def _setup_directories(config):
    file_util.makedirs(config['results_dir'])
    with file_util.open(
            os.path.join(config['results_dir'],
                         config['experiment_name'] + '_info.txt'),
            'w') as outfile:
        outfile.write(fg_core.to_json(config))
Example #2
0
    def test_json_encode_function(self):
        def my_function(x):
            return x

        self.assertIn('my_function',
                      core.to_json({'params': {
                          'function': my_function
                      }}))
Example #3
0
    def test_json_encode_function(self):
        def my_function(x):
            return x

        self.assertIn("my_function",
                      core.to_json({"params": {
                          "function": my_function
                      }}))
Example #4
0
    def run(self):
        """Run a lending experiment.

    Returns:
      A json encoding of the experiment result.
    """

        env, agent = self.scenario_builder()
        metrics = {
            "initial_credit_distribution":
            lending_metrics.CreditDistribution(env, step=0),
            "final_credit_distributions":
            lending_metrics.CreditDistribution(env, step=-1),
            "recall":
            error_metrics.RecallMetric(
                env,
                prediction_fn=lambda x: x.action,
                ground_truth_fn=lambda x: not x.state.will_default,
                stratify_fn=lambda x: str(x.state.group_id),
            ),
            "precision":
            error_metrics.PrecisionMetric(
                env,
                prediction_fn=lambda x: x.action,
                ground_truth_fn=lambda x: not x.state.will_default,
                stratify_fn=lambda x: str(x.state.group_id),
            ),
            "profit rate":
            value_tracking_metrics.ValueChange(env, state_var="bank_cash"),
        }

        if self.include_cumulative_loans:
            metrics["cumulative_loans"] = lending_metrics.CumulativeLoans(env)
            metrics["cumulative_recall"] = lending_metrics.CumulativeRecall(
                env)

        metric_results = run_util.run_simulation(env, agent, metrics,
                                                 self.num_steps, self.seed)
        report = {
            "environment": {
                "name": env.__class__.__name__,
                "params": env.initial_params,
                "history": env.history,
            },
            "agent": {
                "name": agent.__class__.__name__,
                "params": agent.params,
                "debug_string": agent.debug_string(),
                "threshold_history": agent.group_specific_threshold_history,
                "tpr_targets": agent.target_recall_history,
            },
            "experiment_params": self,
            "metric_results": metric_results,
        }
        if self.return_json:
            return core.to_json(report, indent=4)
        return report
Example #5
0
def run_baseline_experiment(feature_mu=None):
  """Run baseline experiment without noise."""
  results = {}
  agent_types = ['fixed', 'static', 'continuous', 'robust']
  thresholds = np.arange(0, 1, THRESHOLD_SPACING)
  if feature_mu is None:
    feature_mu = [0.5, 0.5]
  if len(feature_mu) != 2:
    raise ValueError('Expected feature_mu to be of length 2.')
  env_config_params = copy.deepcopy(ENV_PARAMS)

  env_config_params.update({
      'feature_params':
          params.GMM(mix_weight=[0.5, 0.5], mu=feature_mu,
                     sigma=[0.1, 0.1])
  })

  for agent_type in agent_types:
    results[agent_type] = {}
    for threshold in thresholds:
      results[agent_type][threshold] = {}
      if agent_type != 'fixed' and threshold > 0:
        continue
      num_steps = FIXED_AGENT_NUMSTEPS if agent_type == 'fixed' else FLAGS.num_steps
      college_experiment = college_admission.CollegeExperiment(
          num_steps=num_steps,
          env_config=env_config_params,
          agent_type=agent_type,
          agent_threshold=threshold,
          burnin=FLAGS.burnin,
          epsilon_greedy=FLAGS.epsilon_greedy,
          initial_epsilon_prob=FLAGS.initial_epsilon_prob)
      json_dump = college_experiment.run_experiment()
      exp_result = json.loads(json_dump)
      exp_params = copy.deepcopy(attr.asdict(college_experiment))
      exp_result.update({'exp_params': exp_params})
      if FLAGS.verbose:
        log_results(exp_result)
      with open(
          os.path.join(FLAGS.output_dir, 'experiment_results.json'), 'a+') as f:
        core.to_json(exp_result, f)
        f.write('\n---------------------------------------\n')
      results[agent_type][threshold] = exp_result
  return results, thresholds
Example #6
0
    def run(self):
        """Run a lending experiment.

    Returns:
      A json encoding of the experiment result.
    """

        env, agent = self.scenario_builder()
        metrics = {
            'initial_credit_distribution':
            lending_metrics.CreditDistribution(env, step=0),
            'final_credit_distributions':
            lending_metrics.CreditDistribution(env, step=-1),
            'recall':
            error_metrics.RecallMetric(
                env,
                prediction_fn=lambda x: x.action,
                ground_truth_fn=lambda x: not x.state.will_default,
                stratify_fn=lambda x: str(x.state.group_id)),
            'precision':
            error_metrics.PrecisionMetric(
                env,
                prediction_fn=lambda x: x.action,
                ground_truth_fn=lambda x: not x.state.will_default,
                stratify_fn=lambda x: str(x.state.group_id)),
            'profit rate':
            value_tracking_metrics.ValueChange(env, state_var='bank_cash'),
        }

        if self.include_cumulative_loans:
            metrics['cumulative_loans'] = lending_metrics.CumulativeLoans(env)
            metrics['cumulative_recall'] = lending_metrics.CumulativeRecall(
                env)

        metric_results = run_util.run_simulation(env, agent, metrics,
                                                 self.num_steps, self.seed)
        report = {
            'environment': {
                'name': env.__class__.__name__,
                'params': env.initial_params,
                'history': env.history,
                'env': env
            },
            'agent': {
                'name': agent.__class__.__name__,
                'params': agent.params,
                'debug_string': agent.debug_string(),
                'threshold_history': agent.group_specific_threshold_history,
                'tpr_targets': agent.target_recall_history,
            },
            'experiment_params': self,
            'metric_results': metric_results,
        }
        if self.return_json:
            return core.to_json(report, indent=4)
        return report
Example #7
0
def _setup_directories(config):
    try:
        file_util.makedirs(config['results_dir'])
        file_util.makedirs('./runs/{}'.format(config['experiment_name']))
    except FileExistsError:
        pass
    with file_util.open(
            os.path.join(config['results_dir'],
                         config['experiment_name'] + '_info.txt'),
            'w') as outfile:
        outfile.write(fg_core.to_json(config))
Example #8
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    gin.parse_config_file(FLAGS.gin_config_path)
    runner = runner_lib.Runner()

    results = runner.run()
    logging.info('Results: %s', results)

    with open(FLAGS.output_path, 'w') as f:
        f.write(core.to_json(results))
Example #9
0
    def run_experiment(self):
        """Main experiment runner."""
        env, agent = self.build_scenario()

        social_burden = value_tracking_metrics.AggregatorMetric(
            env=env,
            selection_fn=self.selection_fn_social_burden_eligible_auditor,
            modifier_fn=None,
            stratify_fn=self.stratify_by_group,
            realign_fn=self.realign_history,
            calc_mean=True)
        accuracy = error_metrics.AccuracyMetric(
            env=env,
            numerator_fn=self.accuracy_nr_fn,
            denominator_fn=None,
            stratify_fn=self.stratify_by_group,
            realign_fn=self.realign_history)
        overall_accuracy = error_metrics.AccuracyMetric(
            env=env,
            numerator_fn=self.accuracy_nr_fn,
            denominator_fn=None,
            # pylint: disable=g-long-lambda
            stratify_fn=lambda x:
            [1 for _ in range(env.initial_params.num_applicants)],
            realign_fn=self.realign_history)
        overall_social_burden = value_tracking_metrics.AggregatorMetric(
            env=env,
            selection_fn=self.selection_fn_social_burden_eligible_auditor,
            modifier_fn=None,
            # pylint: disable=g-long-lambda
            stratify_fn=lambda x:
            [1 for _ in range(env.initial_params.num_applicants)],
            realign_fn=self.realign_history,
            calc_mean=True)
        final_threshold = value_tracking_metrics.FinalValueMetric(
            env=env,
            state_var='decision_threshold',
            realign_fn=self.realign_history)

        metrics = [
            social_burden, accuracy, overall_accuracy, overall_social_burden,
            final_threshold
        ]
        metric_names = [
            'social_burden', 'accuracy', 'overall_accuracy',
            'overall_social_burden', 'final_threshold'
        ]
        metric_results = run_util.run_stackelberg_simulation(
            env, agent, metrics, self.num_steps, self.seed)
        return core.to_json({
            'metric_results':
            dict(zip(metric_names, metric_results)),
        })
def report(experiment, named_metric_results):
    """Report results as a json string."""
    return core.to_json({
        'metrics': named_metric_results,
        'env_class': experiment.env_class.__name__,
        'agent_class': experiment.agent_class.__name__,
        'env_params': experiment.env_params,
        'agent_params': experiment.agent_params,
        'num_runs': experiment.num_runs,
        'num_steps': experiment.num_steps,
        'seed': experiment.seed
    })
def report(experiment, named_metric_results):
    """Report results as a json string."""
    return core.to_json({
        "metrics": named_metric_results,
        "env_class": experiment.env_class.__name__,
        "agent_class": experiment.agent_class.__name__,
        "env_params": experiment.env_params,
        "agent_params": experiment.agent_params,
        "num_runs": experiment.num_runs,
        "num_steps": experiment.num_steps,
        "seed": experiment.seed,
    })
Example #12
0
def train(config):
    """Trains and returns an Safe RNN agent."""
    _set_experiment_name(config)
    logging.info('Launching experiment id: %s', config['experiment_name'])
    _setup_directories(config)
    envs = _envs_builder(config, config['num_episodes_per_update'])
    config_str = fg_core.to_json(config)
    config = types.SimpleNamespace(**config)
    pkl.dump(config, open('config.pkl', 'wb'))
    json.dump(config_str, open('config_.json', 'w'))
    print("Config has been saved")
    agent = _agent_builder(envs[0], config)
    writer = tf.summary.FileWriter(
        os.path.join(config.results_dir,
                     'runs/{}'.format(config.experiment_name)))
    while config.warm_start.initial_batch < config.num_updates:
        last_checkpoint = _training_loop(envs, agent, config, writer)
        agent.set_batch_size(1)
        metrics = evaluation.evaluate_agent(
            agent,
            envs[0],
            alpha=config.alpha,
            num_users=config.num_users_eval,
            deterministic=config.eval_deterministic)
        agent.set_batch_size(len(envs))
        step = config.warm_start.initial_batch * config.num_episodes_per_update
        print(step, last_checkpoint, metrics)
        log_tboard(metrics, writer, step)
        summary = tf.Summary(value=[
            tf.Summary.Value(tag='training_var', simple_value=agent.var)
        ])
        writer.add_summary(summary, step)

    # Do one final eval at the end.
    agent.set_batch_size(1)
    metrics = evaluation.evaluate_agent(
        agent,
        envs[0],
        alpha=config.alpha,
        num_users=config.num_users_eval_final,
        deterministic=config.eval_deterministic)
    agent.set_batch_size(len(envs))
    return agent
Example #13
0
def run_noisy_experiment(noise_dist='gaussian',
                         noisy_features=False,
                         noisy_threshold=False,
                         feature_mu=None,
                         stdevs=None):
  """Noisy experiment runs."""
  results = {}
  deltas = {}
  agent_types = ['fixed', 'static', 'continuous', 'robust']
  thresholds = np.arange(0, 1, THRESHOLD_SPACING)
  if noise_dist == 'beta':
    logging.info('Using Beta Noise Distribution.')
    stdevs = np.arange(2, 9, 1)
    mu = 2
    max_value = 0.7
    min_value = 0
  else:
    logging.info('Using Gaussian Noise Distribution.')
    mu = 0
    max_value = 0.35
    min_value = -0.35
  if feature_mu is None:
    feature_mu = [0.5, 0.5]
  if len(feature_mu) != 2:
    raise ValueError('Expected feature_mu to be of length 2.')
  if stdevs is None:
    stdevs = STDEV_RANGE_DEFAULTS
  for sd in stdevs:
    env_config_params = copy.deepcopy(ENV_PARAMS)
    env_config_params.update({
        'noise_dist':
            noise_dist,
        'noise_params':
            params.BoundedGaussian(
                max=max_value, min=min_value, mu=mu, sigma=sd),
        'noisy_features':
            noisy_features,
        'noisy_threshold':
            noisy_threshold,
        'feature_params':
            params.GMM(mix_weight=[0.5, 0.5], mu=feature_mu, sigma=[0.1, 0.1]),
    })
    logging.info('Stdev %f', sd)
    results[sd] = {}
    for agent_type in agent_types:
      results[sd][agent_type] = {}
      for threshold in thresholds:
        results[sd][agent_type][threshold] = {}
        if agent_type != 'fixed' and threshold > 0:
          continue
        num_steps = FIXED_AGENT_NUMSTEPS if agent_type == 'fixed' else FLAGS.num_steps
        college_experiment = college_admission.CollegeExperiment(
            num_steps=num_steps,
            env_config=env_config_params,
            agent_type=agent_type,
            agent_threshold=threshold,
            burnin=FLAGS.burnin,
            epsilon_greedy=FLAGS.epsilon_greedy,
            initial_epsilon_prob=FLAGS.initial_epsilon_prob)
        json_dump = college_experiment.run_experiment()
        exp_result = json.loads(json_dump)
        exp_params = copy.deepcopy(attr.asdict(college_experiment))
        exp_result.update({'exp_params': exp_params})
        if FLAGS.verbose:
          log_results(exp_result)
        with open(
            os.path.join(FLAGS.output_dir, 'experiment_results.json'),
            'w+') as f:
          core.to_json(exp_result, f)
          f.write('\n---------------------------------------\n')
        results[sd][agent_type][threshold] = exp_result
    deltas[sd] = (
        results[sd]['continuous'][0.0]['metric_results']['final_threshold'] -
        results[sd]['static'][0.0]['metric_results']['final_threshold'])
  return results, thresholds, deltas, stdevs
Example #14
0
 def test_to_json_with_indent(self):
     self.assertNotIn('\n', core.to_json({'a': 5, 'b': [1, 2, 3]}))
     self.assertIn('\n', core.to_json({'a': 5, 'b': [1, 2, 3]}, indent=4))
Example #15
0
 def test_confusion_matrix_serializes(self):
     confusion_matrix = error_metrics.ConfusionMatrix()
     confusion_matrix.update(prediction=1,
                             truth=0)  # Add one false positive
     confusion_matrix.update(prediction=0, truth=0)  # Add one true negative
     core.to_json(confusion_matrix)
Example #16
0
 def test_to_json_with_indent(self):
     self.assertNotIn("\n", core.to_json({"a": 5, "b": [1, 2, 3]}))
     self.assertIn("\n", core.to_json({"a": 5, "b": [1, 2, 3]}, indent=4))