Ejemplo n.º 1
0
def run_centralized(optimizer: tf.keras.optimizers.Optimizer,
                    experiment_name: str,
                    root_output_dir: str,
                    num_epochs: int,
                    batch_size: int,
                    decay_epochs: Optional[int] = None,
                    lr_decay: Optional[float] = None,
                    hparams_dict: Optional[Mapping[str, Any]] = None,
                    crop_size: Optional[int] = 24,
                    max_batches: Optional[int] = None):
    """Trains a ResNet-18 on CIFAR-10 using a given optimizer.

  Args:
    optimizer: A `tf.keras.optimizers.Optimizer` used to perform training.
    experiment_name: The name of the experiment. Part of the output directory.
    root_output_dir: The top-level output directory for experiment runs. The
      `experiment_name` argument will be appended, and the directory will
      contain tensorboard logs, metrics written as CSVs, and a CSV of
      hyperparameter choices (if `hparams_dict` is used).
    num_epochs: The number of training epochs.
    batch_size: The batch size, used for train, validation, and test.
    decay_epochs: The number of epochs of training before decaying the learning
      rate. If None, no decay occurs.
    lr_decay: The amount to decay the learning rate by after `decay_epochs`
      training epochs have occurred.
    hparams_dict: A mapping with string keys representing the hyperparameters
      and their values. If not None, this is written to CSV.
    crop_size: The crop size used for CIFAR-10 preprocessing.
    max_batches: If set to a positive integer, datasets are capped to at most
      that many batches. If set to None or a nonpositive integer, the full
      datasets are used.
  """

    crop_shape = (crop_size, crop_size, NUM_CHANNELS)

    cifar_train, cifar_test = cifar10_dataset.get_centralized_datasets(
        train_batch_size=batch_size, crop_shape=crop_shape)

    if max_batches and max_batches >= 1:
        cifar_train = cifar_train.take(max_batches)
        cifar_test = cifar_test.take(max_batches)

    model = resnet_models.create_resnet18(input_shape=crop_shape,
                                          num_classes=NUM_CLASSES)
    model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  optimizer=optimizer,
                  metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

    centralized_training_loop.run(keras_model=model,
                                  train_dataset=cifar_train,
                                  validation_dataset=cifar_test,
                                  experiment_name=experiment_name,
                                  root_output_dir=root_output_dir,
                                  num_epochs=num_epochs,
                                  hparams_dict=hparams_dict,
                                  decay_epochs=decay_epochs,
                                  lr_decay=lr_decay)
Ejemplo n.º 2
0
 def test_centralized_cifar_structure(self):
     crop_shape = (24, 24, 3)
     cifar_train, cifar_test = cifar10_dataset.get_centralized_datasets(
         train_batch_size=20, test_batch_size=100, crop_shape=crop_shape)
     train_batch = next(iter(cifar_train))
     train_batch_shape = tuple(train_batch[0].shape)
     self.assertEqual(train_batch_shape, (20, 24, 24, 3))
     test_batch = next(iter(cifar_test))
     test_batch_shape = tuple(test_batch[0].shape)
     self.assertEqual(test_batch_shape, (100, 24, 24, 3))
Ejemplo n.º 3
0
 def test_raises_length_2_crop(self):
     with self.assertRaises(ValueError):
         cifar10_dataset.get_centralized_datasets(crop_shape=(32, 32))
Ejemplo n.º 4
0
def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        client_datasets_random_seed: Optional[int] = None,
        crop_size: Optional[int] = 24,
        total_rounds: Optional[int] = 1500,
        experiment_name: Optional[str] = 'federated_cifar10',
        root_output_dir: Optional[str] = '/tmp/fed_opt',
        uniform_weighting: Optional[bool] = False,
        **kwargs):
    """Runs an iterative process on the CIFAR-10 classification task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research.utils.training_loop`.
  We assume that the iterative process has the following functional type
  signatures:
    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.
  The iterative process must also have a callable attribute `get_model_weights`
  that takes as input the state of the iterative process, and returns a
  `tff.learning.ModelWeights` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    crop_size: An optional integer representing the resulting size of input
      images after preprocessing.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    uniform_weighting: Whether to weigh clients uniformly. If false, clients are
      weighted by the number of samples.
    **kwargs: Additional arguments configuring the training loop. For details on
      supported arguments, see `federated_research/utils/training_utils.py`.
  """

    crop_shape = (crop_size, crop_size, 3)

    cifar_train, _ = cifar10_dataset.get_federated_datasets(
        train_client_epochs_per_round=client_epochs_per_round,
        train_client_batch_size=client_batch_size,
        crop_shape=crop_shape)

    _, cifar_test = cifar10_dataset.get_centralized_datasets(
        crop_shape=crop_shape)

    input_spec = cifar_train.create_tf_dataset_for_client(
        cifar_train.client_ids[0]).element_spec

    model_builder = functools.partial(resnet_models.create_resnet18,
                                      input_shape=crop_shape,
                                      num_classes=NUM_CLASSES)

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    if uniform_weighting:
        client_weight_fn = tff.learning.ClientWeighting.UNIFORM
    else:
        client_weight_fn = tff.learning.ClientWeighting.NUM_EXAMPLES

    training_process = iterative_process_builder(tff_model_fn,
                                                 client_weight_fn)

    client_datasets_fn = tff.simulation.build_uniform_client_sampling_fn(
        dataset=cifar_train,
        clients_per_round=clients_per_round,
        random_seed=client_datasets_random_seed)

    evaluate_fn = tff.learning.build_federated_evaluation(tff_model_fn)

    def validation_fn(model_weights, round_num):
        del round_num
        return evaluate_fn(model_weights, [cifar_test])

    def test_fn(model_weights):
        return evaluate_fn(model_weights, [cifar_test])

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(iterative_process=training_process,
                      train_client_datasets_fn=client_datasets_fn,
                      evaluation_fn=validation_fn,
                      test_fn=test_fn,
                      total_rounds=total_rounds,
                      experiment_name=experiment_name,
                      root_output_dir=root_output_dir,
                      **kwargs)