def test_federated_cifar_structure(self):
    crop_shape = (28, 28, 3)
    cifar_train, cifar_test = cifar100_dataset.get_federated_datasets(
        train_client_batch_size=3,
        test_client_batch_size=5,
        crop_shape=crop_shape)

    sample_train_ds = cifar_train.create_tf_dataset_for_client(
        cifar_train.client_ids[0])
    train_batch = next(iter(sample_train_ds))
    train_batch_shape = tuple(train_batch[0].shape)
    self.assertEqual(train_batch_shape, (3, 28, 28, 3))

    sample_test_ds = cifar_test.create_tf_dataset_for_client(
        cifar_test.client_ids[0])
    test_batch = next(iter(sample_test_ds))
    test_batch_shape = tuple(test_batch[0].shape)
    self.assertEqual(test_batch_shape, (5, 28, 28, 3))
示例#2
0
  def test_preprocess_applied(self, mock_load_data):
    if tf.config.list_logical_devices('GPU'):
      self.skipTest('skip GPU test')
    # Mock out the actual data loading from disk. Assert that the preprocessing
    # function is applied to the client data, and that only the ClientData
    # objects we desired are used.
    #
    # The correctness of the preprocessing function is tested in other tests.
    mock_train = mock.create_autospec(tff.simulation.ClientData)
    mock_test = mock.create_autospec(tff.simulation.ClientData)
    mock_load_data.return_value = (mock_train, mock_test)

    _, _ = cifar100_dataset.get_federated_datasets()

    mock_load_data.assert_called_once()

    # Assert the training and testing data are preprocessed.
    self.assertEqual(mock_train.mock_calls,
                     mock.call.preprocess(mock.ANY).call_list())
    self.assertEqual(mock_test.mock_calls,
                     mock.call.preprocess(mock.ANY).call_list())
 def test_raises_negative_epochs(self):
   with self.assertRaisesRegex(
       ValueError, 'client_epochs_per_round must be a positive integer.'):
     cifar100_dataset.get_federated_datasets(train_client_epochs_per_round=-1)
 def test_raises_length_2_crop(self):
   with self.assertRaises(ValueError):
     cifar100_dataset.get_federated_datasets(crop_shape=(32, 32))
   with self.assertRaises(ValueError):
     cifar100_dataset.get_centralized_datasets(crop_shape=(32, 32))
示例#5
0
def run_federated(
    iterative_process_builder: Callable[..., tff.templates.IterativeProcess],
    client_epochs_per_round: int,
    client_batch_size: int,
    clients_per_round: int,
    client_datasets_random_seed: Optional[int] = None,
    crop_size: Optional[int] = 24,
    total_rounds: Optional[int] = 1500,
    experiment_name: Optional[str] = 'federated_cifar100',
    root_output_dir: Optional[str] = '/tmp/fed_opt',
    max_eval_batches: Optional[int] = None,
    **kwargs):
  """Runs an iterative process on the CIFAR-100 classification task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research.utils.training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  The iterative process must also have a callable attribute `get_model_weights`
  that takes as input the state of the iterative process, and returns a
  `tff.learning.ModelWeights` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    crop_size: An optional integer representing the resulting size of input
      images after preprocessing.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_eval_batches: If set to a positive integer, evaluation datasets are
      capped to at most that many batches. If set to None or a nonpositive
      integer, the full evaluation datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `federated_research/utils/training_utils.py`.
  """

  crop_shape = (crop_size, crop_size, 3)

  cifar_train, _ = cifar100_dataset.get_federated_datasets(
      train_client_epochs_per_round=client_epochs_per_round,
      train_client_batch_size=client_batch_size,
      crop_shape=crop_shape)

  _, cifar_test = cifar100_dataset.get_centralized_datasets(
      train_batch_size=client_batch_size,
      crop_shape=crop_shape)
  if max_eval_batches and max_eval_batches >= 1:
    cifar_test = cifar_test.take(max_eval_batches)

  input_spec = cifar_train.create_tf_dataset_for_client(
      cifar_train.client_ids[0]).element_spec

  model_builder = functools.partial(
      resnet_models.create_resnet18,
      input_shape=crop_shape,
      num_classes=NUM_CLASSES)

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

  def tff_model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  training_process = iterative_process_builder(tff_model_fn)

  client_datasets_fn = training_utils.build_client_datasets_fn(
      dataset=cifar_train,
      clients_per_round=clients_per_round,
      random_seed=client_datasets_random_seed)

  evaluate_fn = training_utils.build_centralized_evaluate_fn(
      eval_dataset=cifar_test,
      model_builder=model_builder,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  logging.info('Training model:')
  logging.info(model_builder().summary())

  training_loop.run(
      iterative_process=training_process,
      client_datasets_fn=client_datasets_fn,
      validation_fn=evaluate_fn,
      test_fn=evaluate_fn,
      total_rounds=total_rounds,
      experiment_name=experiment_name,
      root_output_dir=root_output_dir,
      **kwargs)