def run(self): """Run a lending experiment. Returns: A json encoding of the experiment result. """ env, agent = self.scenario_builder() metrics = { "initial_credit_distribution": lending_metrics.CreditDistribution(env, step=0), "final_credit_distributions": lending_metrics.CreditDistribution(env, step=-1), "recall": error_metrics.RecallMetric( env, prediction_fn=lambda x: x.action, ground_truth_fn=lambda x: not x.state.will_default, stratify_fn=lambda x: str(x.state.group_id), ), "precision": error_metrics.PrecisionMetric( env, prediction_fn=lambda x: x.action, ground_truth_fn=lambda x: not x.state.will_default, stratify_fn=lambda x: str(x.state.group_id), ), "profit rate": value_tracking_metrics.ValueChange(env, state_var="bank_cash"), } if self.include_cumulative_loans: metrics["cumulative_loans"] = lending_metrics.CumulativeLoans(env) metrics["cumulative_recall"] = lending_metrics.CumulativeRecall( env) metric_results = run_util.run_simulation(env, agent, metrics, self.num_steps, self.seed) report = { "environment": { "name": env.__class__.__name__, "params": env.initial_params, "history": env.history, }, "agent": { "name": agent.__class__.__name__, "params": agent.params, "debug_string": agent.debug_string(), "threshold_history": agent.group_specific_threshold_history, "tpr_targets": agent.target_recall_history, }, "experiment_params": self, "metric_results": metric_results, } if self.return_json: return core.to_json(report, indent=4) return report
def run(self): """Run a lending experiment. Returns: A json encoding of the experiment result. """ env, agent = self.scenario_builder() metrics = { 'initial_credit_distribution': lending_metrics.CreditDistribution(env, step=0), 'final_credit_distributions': lending_metrics.CreditDistribution(env, step=-1), 'recall': error_metrics.RecallMetric( env, prediction_fn=lambda x: x.action, ground_truth_fn=lambda x: not x.state.will_default, stratify_fn=lambda x: str(x.state.group_id)), 'precision': error_metrics.PrecisionMetric( env, prediction_fn=lambda x: x.action, ground_truth_fn=lambda x: not x.state.will_default, stratify_fn=lambda x: str(x.state.group_id)), 'profit rate': value_tracking_metrics.ValueChange(env, state_var='bank_cash'), } if self.include_cumulative_loans: metrics['cumulative_loans'] = lending_metrics.CumulativeLoans(env) metrics['cumulative_recall'] = lending_metrics.CumulativeRecall( env) metric_results = run_util.run_simulation(env, agent, metrics, self.num_steps, self.seed) report = { 'environment': { 'name': env.__class__.__name__, 'params': env.initial_params, 'history': env.history, 'env': env }, 'agent': { 'name': agent.__class__.__name__, 'params': agent.params, 'debug_string': agent.debug_string(), 'threshold_history': agent.group_specific_threshold_history, 'tpr_targets': agent.target_recall_history, }, 'experiment_params': self, 'metric_results': metric_results, } if self.return_json: return core.to_json(report, indent=4) return report
def run_single_simulation(experiment): """Create env, agent and metric objects and run a single run of an experiment.""" env, agent = experiment.build_scenario() def _discovered_incidents_selection_fn(history_step): state, _ = history_step return state.incidents_seen discovered_incidents_metric = value_tracking_metrics.SummingMetric( env, _discovered_incidents_selection_fn) def _occurred_incidents_selection_fn(history_step): state, _ = history_step return state.incidents_occurred occurred_incidents_metric = value_tracking_metrics.SummingMetric( env, _occurred_incidents_selection_fn) def _discovered_occurred_selection_fn(history_step): state, _ = history_step return state.incidents_seen, state.incidents_occurred discovered_occurred_ratio_metric = RatioMetric( env, _discovered_occurred_selection_fn) discovered_occurred_ratio_weighted_metric = WeightedRatioMetric( env, _discovered_occurred_selection_fn) metrics = [ discovered_incidents_metric, occurred_incidents_metric, discovered_occurred_ratio_metric, discovered_occurred_ratio_weighted_metric, ] metric_results = run_util.run_simulation(env, agent, metrics, experiment.num_steps, experiment.seed) history = _get_relevant_history(env) metric_results.append(history) return metric_results
def run_simulation(env, agent, metrics, num_steps): """Runs a single simulation and returns metrics.""" return run_util.run_simulation(env, agent, metrics, num_steps)