def __call__(self, server_state, metrics, round_num): """A function suitable for passing as an eval hook to the training_loop. Args: server_state: A `ServerState`. metrics: A dict of metrics computed in TFF. round_num: The current round number. """ tff.learning.assign_weights_to_keras_model(self.model, server_state.model) eval_metrics = self.model.evaluate(self.eval_dataset, verbose=0) metrics['eval'] = collections.OrderedDict( zip(['loss', 'sparse_categorical_accuracy'], eval_metrics)) flat_metrics = collections.OrderedDict( nest.flatten_with_joined_string_paths(metrics)) # Use a DataFrame just to get nice formatting. df = pd.DataFrame.from_dict(flat_metrics, orient='index', columns=['value']) print(df) # Also write metrics to a tf.summary logdir with self.summary_writer.as_default(): for name, value in flat_metrics.items(): tf.compat.v2.summary.scalar(name, value, step=round_num) self.results = self.results.append(flat_metrics, ignore_index=True) utils.atomic_write_to_csv(self.results, self.results_file)
def build(cls, exp_name, output_dir, eval_dataset, hparam_dict): """Constructs the MetricsHook. Args: exp_name: A unique filesystem-friendly name for the experiment. output_dir: A root output directory used for all experiment runs in a grid. The MetricsHook will combine this with exp_name to form suitable output directories for this run. eval_dataset: Evaluation dataset. hparam_dict: A dictionary of hyperparameters to be recorded to .csv and exported to TensorBoard. Returns: The `MetricsHook` object. """ summary_logdir = os.path.join(output_dir, 'logdir/{}'.format(exp_name)) _check_not_exists(summary_logdir) tf.io.gfile.makedirs(summary_logdir) summary_writer = tf.compat.v2.summary.create_file_writer( summary_logdir, name=exp_name) with summary_writer.as_default(): hp.hparams(hparam_dict) # Using .bz2 rather than .zip due to # https://github.com/pandas-dev/pandas/issues/26023 results_file = os.path.join(output_dir, '{}.results.csv.bz2'.format(exp_name)) # Also write the hparam_dict to a CSV: hparam_dict['results_file'] = results_file hparams_file = os.path.join(output_dir, '{}.hparams.csv'.format(exp_name)) utils.atomic_write_to_csv(pd.Series(hparam_dict), hparams_file) model = create_compiled_keras_model() logging.info('Writing ...') logging.info(' result csv to: %s', results_file) logging.info(' summaries to: %s', summary_logdir) return cls( results_file=results_file, summary_writer=summary_writer, eval_dataset=eval_dataset, model=model)