Example #1
0
  def __call__(self, server_state, metrics, round_num):
    """A function suitable for passing as an eval hook to the training_loop.

    Args:
      server_state: A `ServerState`.
      metrics: A dict of metrics computed in TFF.
      round_num: The current round number.
    """
    tff.learning.assign_weights_to_keras_model(self.model, server_state.model)
    eval_metrics = self.model.evaluate(self.eval_dataset, verbose=0)

    metrics['eval'] = collections.OrderedDict(
        zip(['loss', 'sparse_categorical_accuracy'], eval_metrics))

    flat_metrics = collections.OrderedDict(
        nest_fork.flatten_with_joined_string_paths(metrics))

    # Use a DataFrame just to get nice formatting.
    df = pd.DataFrame.from_dict(flat_metrics, orient='index', columns=['value'])
    print(df)

    # Also write metrics to a tf.summary logdir
    with self.summary_writer.as_default():
      for name, value in flat_metrics.items():
        tf.compat.v2.summary.scalar(name, value, step=round_num)

    self.results = self.results.append(flat_metrics, ignore_index=True)
    utils_impl.atomic_write_to_csv(self.results, self.results_file)
Example #2
0
    def __call__(self, server_state, train_metrics, round_num):
        """A function suitable for passing as an eval hook to the training_loop.

    Args:
      server_state: A `ServerState`.
      train_metrics: A `dict` of training metrics computed in TFF.
      round_num: The current round number.
    """
        tff.learning.assign_weights_to_keras_model(self.model,
                                                   server_state.model)
        eval_metrics = self.model.evaluate(self.eval_dataset, verbose=0)

        metrics = {
            'train':
            train_metrics,
            'eval':
            collections.OrderedDict(
                zip(['loss', 'sparse_categorical_accuracy'], eval_metrics))
        }
        flat_metrics = collections.OrderedDict(
            nest.flatten_with_joined_string_paths(metrics))
        flat_metrics['round'] = round_num

        logging.info('Evaluation at round {:d}:\n{!s}'.format(
            round_num, pprint.pformat(flat_metrics)))

        # Also write metrics to a tf.summary logdir
        with self.summary_writer.as_default():
            for name, value in flat_metrics.items():
                tf.compat.v2.summary.scalar(name, value, step=round_num)

        self.results = self.results.append(flat_metrics, ignore_index=True)
        utils_impl.atomic_write_to_csv(self.results, self.results_file)
Example #3
0
 def testFlattenWithStringPaths(self, inputs, expected):
     self.assertEqual(
         nest.flatten_with_joined_string_paths(inputs, separator="/"),
         expected)