def test_build_sample_fn_without_random_seed(self, a, replace):
    size = 10
    round_num = 5

    sample_fn_1 = training_utils.build_sample_fn(a, size, replace=replace)
    sample_1 = sample_fn_1(round_num)

    sample_fn_2 = training_utils.build_sample_fn(a, size, replace=replace)
    sample_2 = sample_fn_2(round_num)

    self.assertNotAllEqual(sample_1, sample_2)
def configure_training(
    task_spec: training_specs.TaskSpec) -> training_specs.RunnerSpec:
  """Configures training for the EMNIST autoencoder task.

  This method will load and pre-process datasets and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process compatible with `federated_research.utils.training_loop`.

  Args:
    task_spec: A `TaskSpec` class for creating federated training tasks.

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """

  emnist_task = 'autoencoder'
  emnist_train, _ = tff.simulation.datasets.emnist.load_data(only_digits=False)
  _, emnist_test = emnist_dataset.get_centralized_datasets(
      only_digits=False, emnist_task=emnist_task)

  train_preprocess_fn = emnist_dataset.create_preprocess_fn(
      num_epochs=task_spec.client_epochs_per_round,
      batch_size=task_spec.client_batch_size,
      emnist_task=emnist_task)

  input_spec = train_preprocess_fn.type_signature.result.element

  model_builder = emnist_ae_models.create_autoencoder_model
  loss_builder = functools.partial(
      tf.keras.losses.MeanSquaredError, reduction=tf.keras.losses.Reduction.SUM)
  metrics_builder = lambda: [tf.keras.metrics.MeanSquaredError()]

  def tff_model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  iterative_process = task_spec.iterative_process_builder(tff_model_fn)

  if hasattr(emnist_train, 'dataset_computation'):

    @tff.tf_computation(tf.string)
    def build_train_dataset_from_client_id(client_id):
      client_dataset = emnist_train.dataset_computation(client_id)
      return train_preprocess_fn(client_dataset)

    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        build_train_dataset_from_client_id, iterative_process)
    client_ids_fn = training_utils.build_sample_fn(
        emnist_train.client_ids,
        size=task_spec.clients_per_round,
        replace=False,
        random_seed=task_spec.sampling_random_seed)
    # We convert the output to a list (instead of an np.ndarray) so that it can
    # be used as input to the iterative process.
    client_sampling_fn = lambda x: list(client_ids_fn(x))

  else:
    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        train_preprocess_fn, iterative_process)
    client_sampling_fn = training_utils.build_client_datasets_fn(
        dataset=emnist_train,
        clients_per_round=task_spec.clients_per_round,
        random_seed=task_spec.sampling_random_seed)

  training_process.get_model_weights = iterative_process.get_model_weights

  test_fn = training_utils.build_centralized_evaluate_fn(
      eval_dataset=emnist_test,
      model_builder=model_builder,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  validation_fn = lambda model_weights, round_num: test_fn(model_weights)

  return training_specs.RunnerSpec(
      iterative_process=training_process,
      client_datasets_fn=client_sampling_fn,
      validation_fn=validation_fn,
      test_fn=test_fn)
示例#3
0
def configure_training(
        task_spec: training_specs.TaskSpec,
        crop_size: int = 24,
        distort_train_images: bool = True) -> training_specs.RunnerSpec:
    """Configures training for the CIFAR-100 classification task.

  This method will load and pre-process datasets and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process compatible with `federated_research.utils.training_loop`.

  Args:
    task_spec: A `TaskSpec` class for creating federated training tasks.
    crop_size: An optional integer representing the resulting size of input
      images after preprocessing.
    distort_train_images: A boolean indicating whether to distort training
      images during preprocessing via random crops, as opposed to simply
      resizing the image.

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """
    crop_shape = (crop_size, crop_size, 3)

    cifar_train, _ = tff.simulation.datasets.cifar100.load_data()
    _, cifar_test = cifar100_dataset.get_centralized_datasets(
        train_batch_size=task_spec.client_batch_size, crop_shape=crop_shape)

    train_preprocess_fn = cifar100_dataset.create_preprocess_fn(
        num_epochs=task_spec.client_epochs_per_round,
        batch_size=task_spec.client_batch_size,
        crop_shape=crop_shape,
        distort_image=distort_train_images)
    input_spec = train_preprocess_fn.type_signature.result.element

    model_builder = functools.partial(resnet_models.create_resnet18,
                                      input_shape=crop_shape,
                                      num_classes=NUM_CLASSES)

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    iterative_process = task_spec.iterative_process_builder(tff_model_fn)

    @tff.tf_computation(tf.string)
    def build_train_dataset_from_client_id(client_id):
        client_dataset = cifar_train.dataset_computation(client_id)
        return train_preprocess_fn(client_dataset)

    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        build_train_dataset_from_client_id, iterative_process)
    client_ids_fn = training_utils.build_sample_fn(
        cifar_train.client_ids,
        size=task_spec.clients_per_round,
        replace=False,
        random_seed=task_spec.client_datasets_random_seed)
    # We convert the output to a list (instead of an np.ndarray) so that it can
    # be used as input to the iterative process.
    client_sampling_fn = lambda x: list(client_ids_fn(x))

    training_process.get_model_weights = iterative_process.get_model_weights

    centralized_eval_fn = training_utils.build_centralized_evaluate_fn(
        eval_dataset=cifar_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    def test_fn(state):
        return centralized_eval_fn(iterative_process.get_model_weights(state))

    def validation_fn(state, round_num):
        del round_num
        return test_fn(state)

    return training_specs.RunnerSpec(iterative_process=training_process,
                                     client_datasets_fn=client_sampling_fn,
                                     validation_fn=validation_fn,
                                     test_fn=test_fn)
示例#4
0
def configure_training(task_spec: training_specs.TaskSpec,
                       sequence_length: int = 80) -> training_specs.RunnerSpec:
    """Configures training for the Shakespeare next-character prediction task.

  This method will load and pre-process datasets and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process compatible with `federated_research.utils.training_loop`.

  Args:
    task_spec: A `TaskSpec` class for creating federated training tasks.
    sequence_length: An int specifying the length of the character sequences
      used for prediction.

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """

    shakespeare_train, _ = tff.simulation.datasets.shakespeare.load_data()
    _, shakespeare_test = shakespeare_dataset.get_centralized_datasets(
        sequence_length=sequence_length)

    train_preprocess_fn = shakespeare_dataset.create_preprocess_fn(
        num_epochs=task_spec.client_epochs_per_round,
        batch_size=task_spec.client_batch_size,
        sequence_length=sequence_length)
    input_spec = train_preprocess_fn.type_signature.result.element

    model_builder = functools.partial(create_shakespeare_model,
                                      sequence_length=sequence_length)
    loss_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    iterative_process = task_spec.iterative_process_builder(tff_model_fn)

    if hasattr(shakespeare_train, 'dataset_computation'):

        @tff.tf_computation(tf.string)
        def build_train_dataset_from_client_id(client_id):
            client_dataset = shakespeare_train.dataset_computation(client_id)
            return train_preprocess_fn(client_dataset)

        training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
            build_train_dataset_from_client_id, iterative_process)
        client_ids_fn = training_utils.build_sample_fn(
            shakespeare_train.client_ids,
            size=task_spec.clients_per_round,
            replace=False,
            random_seed=task_spec.client_datasets_random_seed)
        # We convert the output to a list (instead of an np.ndarray) so that it can
        # be used as input to the iterative process.
        client_sampling_fn = lambda x: list(client_ids_fn(x))

    else:
        training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
            train_preprocess_fn, iterative_process)
        client_sampling_fn = training_utils.build_client_datasets_fn(
            dataset=shakespeare_train,
            clients_per_round=task_spec.clients_per_round,
            random_seed=task_spec.client_datasets_random_seed)

    training_process.get_model_weights = iterative_process.get_model_weights

    test_fn = training_utils.build_centralized_evaluate_fn(
        eval_dataset=shakespeare_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    validation_fn = lambda model_weights, round_num: test_fn(model_weights)

    return training_specs.RunnerSpec(iterative_process=training_process,
                                     client_datasets_fn=client_sampling_fn,
                                     validation_fn=validation_fn,
                                     test_fn=test_fn)
示例#5
0
def run_federated(
    iterative_process_builder: Callable[..., tff.templates.IterativeProcess],
    client_epochs_per_round: int,
    client_batch_size: int,
    clients_per_round: int,
    max_elements_per_user: int,
    image_size: int,
    num_groups: int = 8,
    total_rounds: int = 3000,
    dataset_type: dataset.DatasetType = dataset.DatasetType.GLD23K,
    experiment_name: str = 'federated_gld23k',
    root_output_dir: str = '/tmp/fedopt_guide',
    dropout_prob: Optional[float] = None,
    client_datasets_random_seed: Optional[int] = None,
    **kwargs) -> None:
  """Runs an iterative process on the Google Landmark dataset.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research/fedopt_guide/training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  The iterative process must also have a callable attribute `get_model_weights`
  that takes as input the state of the iterative process, and returns a
  `tff.learning.ModelWeights` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, a
      `client_weight_fn` and returns a `tff.templates.IterativeProcess`. The
      `model_fn` must return a `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_elements_per_user: The maximum number of elements processed for each
      client's dataset. This has be to a positive value or -1 (which means that
      all elements are taken for training).
    image_size: The height and width of images after preprocessing.
    num_groups: The number of groups in the GroupNorm layers of MobilenetV2.
    total_rounds: The number of federated training rounds.
    dataset_type: A `dataset.DatasetType` specifying which dataset is used for
      experiments.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    dropout_prob: Probability of setting a weight to zero in the dropout layer
      of MobilenetV2. Must be in the range [0, 1). Setting it to None (default)
      or zero means no dropout.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    **kwargs: Additional arguments configuring the training loop. For details on
      supported arguments, see
      `federated_research/fedopt_guide/training_utils.py`.
  """
  num_classes, shuffle_buffer_size = dataset.get_dataset_stats(dataset_type)

  train_data, _ = tff.simulation.datasets.gldv2.load_data(
      gld23k=True if dataset_type == dataset.DatasetType.GLD23K else False)
  _, test_data = dataset.get_centralized_datasets(
      image_size=image_size,
      batch_size=client_batch_size,
      dataset_type=dataset_type)

  if dropout_prob and (dropout_prob < 0 or dropout_prob >= 1):
    raise ValueError(
        f'Expected a value in [0, 1) for `dropout_prob`, found {dropout_prob}.')

  def model_builder() -> tf.keras.Model:
    return mobilenet_v2.create_mobilenet_v2(
        input_shape=(image_size, image_size, 3),
        num_groups=num_groups,
        num_classes=num_classes,
        dropout_prob=dropout_prob)

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]
  input_spec = test_data.element_spec

  def model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  training_process = iterative_process_builder(
      model_fn=model_fn, client_weight_fn=None)

  preprocessing_fn = dataset.get_preprocessing_fn(
      image_size=image_size,
      batch_size=client_batch_size,
      num_epochs=client_epochs_per_round,
      max_elements=max_elements_per_user,
      shuffle_buffer_size=shuffle_buffer_size)

  @tff.tf_computation(tf.string)
  def train_dataset_computation(client_id):
    client_train_data = train_data.dataset_computation(client_id)
    return preprocessing_fn(client_train_data)

  trainer = tff.simulation.compose_dataset_computation_with_iterative_process(
      dataset_computation=train_dataset_computation, process=training_process)

  # `compose_dataset_computation_with_iterative_process` does not inherit the
  # `get_model_weights` attribute from the `training_process`.
  if not hasattr(training_process, 'get_model_weights'):
    raise ValueError(
        'The `iterative_process_builder` must create an iterative process '
        'that has an attribute `get_model_weights`. It is a `tff.Computation` '
        'that accepts as input the state of an iterative process, and returns '
        'the model weights part from the state. If you use '
        '`tff.learning.build_federated_averaging_process`, it should already '
        'satisfy this requirement.')
  else:
    trainer.get_model_weights = training_process.get_model_weights

  client_ids_fn = training_utils.build_sample_fn(
      train_data.client_ids,
      size=clients_per_round,
      replace=False,
      random_seed=client_datasets_random_seed)
  # We convert the output to a list (instead of an np.ndarray) so that it can
  # be used as input to the iterative process.
  client_ids_fn_as_list = lambda x: list(client_ids_fn(x))

  evaluate_fn = training_utils.build_centralized_evaluate_fn(
      model_builder=model_builder,
      eval_dataset=test_data,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  logging.info('Training model:')
  logging.info(model_builder().summary())

  training_loop.run(
      iterative_process=trainer,
      train_client_datasets_fn=client_ids_fn_as_list,
      evaluation_fn=lambda model, _: evaluate_fn(model),
      test_fn=evaluate_fn,
      total_rounds=total_rounds,
      experiment_name=experiment_name,
      root_output_dir=root_output_dir,
      **kwargs)
def configure_training(
        task_spec: training_specs.TaskSpec,
        vocab_size: int = 10000,
        num_oov_buckets: int = 1,
        sequence_length: int = 20,
        max_elements_per_user: int = 1000,
        num_validation_examples: int = 10000,
        embedding_size: int = 96,
        latent_size: int = 670,
        num_layers: int = 1,
        shared_embedding: bool = False) -> training_specs.RunnerSpec:
    """Configures training for Stack Overflow next-word prediction.

  This method will load and pre-process datasets and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process compatible with `federated_research.utils.training_loop`.

  Args:
    task_spec: A `TaskSpec` class for creating federated training tasks.
    vocab_size: Integer dictating the number of most frequent words to use in
      the vocabulary.
    num_oov_buckets: The number of out-of-vocabulary buckets to use.
    sequence_length: The maximum number of words to take for each sequence.
    max_elements_per_user: The maximum number of elements processed for each
      client's dataset.
    num_validation_examples: The number of test examples to use for validation.
    embedding_size: The dimension of the word embedding layer.
    latent_size: The dimension of the latent units in the recurrent layers.
    num_layers: The number of stacked recurrent layers to use.
    shared_embedding: Boolean indicating whether to tie input and output
      embeddings.

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """

    model_builder = functools.partial(
        stackoverflow_models.create_recurrent_model,
        vocab_size=vocab_size,
        num_oov_buckets=num_oov_buckets,
        embedding_size=embedding_size,
        latent_size=latent_size,
        num_layers=num_layers,
        shared_embedding=shared_embedding)

    loss_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    special_tokens = stackoverflow_word_prediction.get_special_tokens(
        vocab_size, num_oov_buckets)
    pad_token = special_tokens.pad
    oov_tokens = special_tokens.oov
    eos_token = special_tokens.eos

    def metrics_builder():
        return [
            keras_metrics.MaskedCategoricalAccuracy(name='accuracy_with_oov',
                                                    masked_tokens=[pad_token]),
            keras_metrics.MaskedCategoricalAccuracy(name='accuracy_no_oov',
                                                    masked_tokens=[pad_token] +
                                                    oov_tokens),
            # Notice BOS never appears in ground truth.
            keras_metrics.MaskedCategoricalAccuracy(
                name='accuracy_no_oov_or_eos',
                masked_tokens=[pad_token, eos_token] + oov_tokens),
            keras_metrics.NumBatchesCounter(),
            keras_metrics.NumTokensCounter(masked_tokens=[pad_token])
        ]

    train_clientdata, _, _ = tff.simulation.datasets.stackoverflow.load_data()

    # TODO(b/161914546): consider moving evaluation to use
    # `tff.learning.build_federated_evaluation` to get metrics over client
    # distributions, as well as the example weight means from this centralized
    # evaluation.
    _, validation_dataset, test_dataset = stackoverflow_word_prediction.get_centralized_datasets(
        vocab_size=vocab_size,
        max_sequence_length=sequence_length,
        num_validation_examples=num_validation_examples,
        num_oov_buckets=num_oov_buckets)

    train_dataset_preprocess_comp = stackoverflow_word_prediction.create_preprocess_fn(
        vocab=stackoverflow_word_prediction.create_vocab(vocab_size),
        num_oov_buckets=num_oov_buckets,
        client_batch_size=task_spec.client_batch_size,
        client_epochs_per_round=task_spec.client_epochs_per_round,
        max_sequence_length=sequence_length,
        max_elements_per_client=max_elements_per_user)

    input_spec = train_dataset_preprocess_comp.type_signature.result.element

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    iterative_process = task_spec.iterative_process_builder(tff_model_fn)

    @tff.tf_computation(tf.string)
    def train_dataset_computation(client_id):
        client_train_data = train_clientdata.dataset_computation(client_id)
        return train_dataset_preprocess_comp(client_train_data)

    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        train_dataset_computation, iterative_process)
    client_ids_fn = training_utils.build_sample_fn(
        train_clientdata.client_ids,
        size=task_spec.clients_per_round,
        replace=False,
        random_seed=task_spec.client_datasets_random_seed)
    # We convert the output to a list (instead of an np.ndarray) so that it can
    # be used as input to the iterative process.
    client_sampling_fn = lambda x: list(client_ids_fn(x))

    training_process.get_model_weights = iterative_process.get_model_weights

    centralized_validation_fn = training_utils.build_centralized_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=validation_dataset,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    def validation_fn(server_state, round_num):
        del round_num
        return centralized_validation_fn(
            iterative_process.get_model_weights(server_state))

    centralized_test_fn = training_utils.build_centralized_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=validation_dataset.concatenate(test_dataset),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    def test_fn(server_state):
        return centralized_test_fn(
            iterative_process.get_model_weights(server_state))

    return training_specs.RunnerSpec(iterative_process=training_process,
                                     client_datasets_fn=client_sampling_fn,
                                     validation_fn=validation_fn,
                                     test_fn=test_fn)
示例#7
0
def configure_training(task_spec: training_specs.TaskSpec,
                       model: str = 'cnn',
                      cache_dir: str = '~') -> training_specs.RunnerSpec:
  """Configures training for the EMNIST character recognition task.

  This method will load and pre-process datasets and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process compatible with `federated_research.utils.training_loop`.

  Args:
    task_spec: A `TaskSpec` class for creating federated training tasks.
    model: A string specifying the model used for character recognition. Can be
      one of `cnn` and `2nn`, corresponding to a CNN model and a densely
      connected 2-layer model (respectively).

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """
  emnist_task = 'digit_recognition'
  emnist_train, _ = tff.simulation.datasets.emnist.load_data(only_digits=False, cache_dir=cache_dir)
  _, emnist_test = emnist_dataset.get_centralized_datasets(
      only_digits=False, emnist_task=emnist_task, cache_dir=cache_dir)

  train_preprocess_fn = emnist_dataset.create_preprocess_fn(
      num_epochs=task_spec.client_epochs_per_round,
      batch_size=task_spec.client_batch_size,
      emnist_task=emnist_task)

  input_spec = train_preprocess_fn.type_signature.result.element

  if model == 'cnn':
    model_builder = functools.partial(
        emnist_models.create_conv_dropout_model, only_digits=False)
  elif model == '2nn':
    model_builder = functools.partial(
        emnist_models.create_two_hidden_layer_model, only_digits=False)
  else:
    raise ValueError(
        'Cannot handle model flag [{!s}], must be one of {!s}.'.format(
            model, EMNIST_MODELS))

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

  def tff_model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  iterative_process = task_spec.iterative_process_builder(tff_model_fn)

  @tff.tf_computation(tf.string)
  def build_train_dataset_from_client_id(client_id):
    client_dataset = emnist_train.dataset_computation(client_id)
    return train_preprocess_fn(client_dataset)

  training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
      build_train_dataset_from_client_id, iterative_process)
  client_ids_fn = training_utils.build_sample_fn(
      emnist_train.client_ids,
      size=task_spec.clients_per_round,
      replace=False,
      random_seed=task_spec.client_datasets_random_seed)
  # We convert the output to a list (instead of an np.ndarray) so that it can
  # be used as input to the iterative process.
  client_sampling_fn = lambda x: list(client_ids_fn(x))

  training_process.get_model_weights = iterative_process.get_model_weights

  test_fn = training_utils.build_centralized_evaluate_fn(
      eval_dataset=emnist_test,
      model_builder=model_builder,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  validation_fn = lambda model_weights, round_num: test_fn(model_weights)

  return training_specs.RunnerSpec(
      iterative_process=training_process,
      client_datasets_fn=client_sampling_fn,
      validation_fn=validation_fn,
      test_fn=test_fn)
示例#8
0
def configure_training(
        task_spec: training_specs.TaskSpec,
        vocab_tokens_size: int = 10000,
        vocab_tags_size: int = 500,
        max_elements_per_user: int = 1000,
        num_validation_examples: int = 10000) -> training_specs.RunnerSpec:
    """Configures training for the Stack Overflow tag prediction task.

  This tag prediction is performed via multi-class one-versus-rest logistic
  regression. This method will load and pre-process datasets and construct a
  model used for the task. It then uses `iterative_process_builder` to create an
  iterative process compatible with `federated_research.utils.training_loop`.

  Args:
    task_spec: A `TaskSpec` class for creating federated training tasks.
    vocab_tokens_size: Integer dictating the number of most frequent words to
      use in the vocabulary.
    vocab_tags_size: Integer dictating the number of most frequent tags to use
      in the label creation.
    max_elements_per_user: The maximum number of elements processed for each
      client's dataset.
    num_validation_examples: The number of test examples to use for validation.

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """

    stackoverflow_train, _, _ = tff.simulation.datasets.stackoverflow.load_data(
    )

    _, stackoverflow_validation, stackoverflow_test = stackoverflow_tag_prediction.get_centralized_datasets(
        train_batch_size=task_spec.client_batch_size,
        word_vocab_size=vocab_tokens_size,
        tag_vocab_size=vocab_tags_size,
        num_validation_examples=num_validation_examples)

    word_vocab = stackoverflow_tag_prediction.create_word_vocab(
        vocab_tokens_size)
    tag_vocab = stackoverflow_tag_prediction.create_tag_vocab(vocab_tags_size)

    train_preprocess_fn = stackoverflow_tag_prediction.create_preprocess_fn(
        word_vocab=word_vocab,
        tag_vocab=tag_vocab,
        client_batch_size=task_spec.client_batch_size,
        client_epochs_per_round=task_spec.client_epochs_per_round,
        max_elements_per_client=max_elements_per_user)
    input_spec = train_preprocess_fn.type_signature.result.element

    model_builder = functools.partial(
        stackoverflow_lr_models.create_logistic_model,
        vocab_tokens_size=vocab_tokens_size,
        vocab_tags_size=vocab_tags_size)

    loss_builder = functools.partial(tf.keras.losses.BinaryCrossentropy,
                                     from_logits=False,
                                     reduction=tf.keras.losses.Reduction.SUM)

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    iterative_process = task_spec.iterative_process_builder(tff_model_fn)

    @tff.tf_computation(tf.string)
    def build_train_dataset_from_client_id(client_id):
        client_dataset = stackoverflow_train.dataset_computation(client_id)
        return train_preprocess_fn(client_dataset)

    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        build_train_dataset_from_client_id, iterative_process)
    client_ids_fn = training_utils.build_sample_fn(
        stackoverflow_train.client_ids,
        size=task_spec.clients_per_round,
        replace=False,
        random_seed=task_spec.client_datasets_random_seed)
    # We convert the output to a list (instead of an np.ndarray) so that it can
    # be used as input to the iterative process.
    client_sampling_fn = lambda x: list(client_ids_fn(x))

    training_process.get_model_weights = iterative_process.get_model_weights

    centralized_validation_fn = training_utils.build_centralized_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=stackoverflow_validation,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    def validation_fn(server_state, round_num):
        del round_num
        return centralized_validation_fn(
            iterative_process.get_model_weights(server_state))

    centralized_test_fn = training_utils.build_centralized_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    def test_fn(server_state):
        return centralized_test_fn(
            iterative_process.get_model_weights(server_state))

    return training_specs.RunnerSpec(iterative_process=training_process,
                                     client_datasets_fn=client_sampling_fn,
                                     validation_fn=validation_fn,
                                     test_fn=test_fn)
示例#9
0
def configure_training(task_spec: training_specs.TaskSpec,
                       eval_spec: Optional[training_specs.EvalSpec] = None,
                       model: str = 'cnn') -> training_specs.RunnerSpec:
  """Configures training for the EMNIST character recognition task.

  This method will load and pre-process datasets and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process compatible with `federated_research.utils.training_loop`.

  Args:
    task_spec: A `TaskSpec` class for creating federated training tasks.
    eval_spec: An `EvalSpec` class for configuring federated evaluation. If set
      to None, centralized evaluation is used for validation and testing
      instead.
    model: A string specifying the model used for character recognition. Can be
      one of `cnn` and `2nn`, corresponding to a CNN model and a densely
      connected 2-layer model (respectively).

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """
  emnist_task = 'digit_recognition'

  emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
      only_digits=False)

  train_preprocess_fn = emnist_dataset.create_preprocess_fn(
      num_epochs=task_spec.client_epochs_per_round,
      batch_size=task_spec.client_batch_size,
      emnist_task=emnist_task)

  input_spec = train_preprocess_fn.type_signature.result.element

  if model == 'cnn':
    model_builder = functools.partial(
        emnist_models.create_conv_dropout_model, only_digits=False)
  elif model == '2nn':
    model_builder = functools.partial(
        emnist_models.create_two_hidden_layer_model, only_digits=False)
  else:
    raise ValueError(
        'Cannot handle model flag [{!s}], must be one of {!s}.'.format(
            model, EMNIST_MODELS))

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

  def tff_model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  iterative_process = task_spec.iterative_process_builder(tff_model_fn)

  clients_per_train_round = min(task_spec.clients_per_round,
                                TOTAL_NUM_TRAIN_CLIENTS)

  if hasattr(emnist_train, 'dataset_computation'):

    @tff.tf_computation(tf.string)
    def build_train_dataset_from_client_id(client_id):
      client_dataset = emnist_train.dataset_computation(client_id)
      return train_preprocess_fn(client_dataset)

    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        build_train_dataset_from_client_id, iterative_process)
    client_ids_fn = training_utils.build_sample_fn(
        emnist_train.client_ids,
        size=clients_per_train_round,
        replace=False,
        random_seed=task_spec.sampling_random_seed)
    # We convert the output to a list (instead of an np.ndarray) so that it can
    # be used as input to the iterative process.
    client_sampling_fn = lambda x: list(client_ids_fn(x))

  else:
    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        train_preprocess_fn, iterative_process)
    client_sampling_fn = training_utils.build_client_datasets_fn(
        dataset=emnist_train,
        clients_per_round=clients_per_train_round,
        random_seed=task_spec.sampling_random_seed)

  training_process.get_model_weights = iterative_process.get_model_weights

  if eval_spec:

    if eval_spec.clients_per_validation_round is None:
      clients_per_validation_round = TOTAL_NUM_TEST_CLIENTS
    else:
      clients_per_validation_round = min(eval_spec.clients_per_validation_round,
                                         TOTAL_NUM_TEST_CLIENTS)

    if eval_spec.clients_per_test_round is None:
      clients_per_test_round = TOTAL_NUM_TEST_CLIENTS
    else:
      clients_per_test_round = min(eval_spec.clients_per_test_round,
                                   TOTAL_NUM_TEST_CLIENTS)

    test_preprocess_fn = emnist_dataset.create_preprocess_fn(
        num_epochs=1,
        batch_size=eval_spec.client_batch_size,
        shuffle_buffer_size=1,
        emnist_task=emnist_task)
    emnist_test = emnist_test.preprocess(test_preprocess_fn)

    def eval_metrics_builder():
      return [
          tf.keras.metrics.SparseCategoricalCrossentropy(),
          tf.keras.metrics.SparseCategoricalAccuracy()
      ]

    federated_eval_fn = training_utils.build_federated_evaluate_fn(
        model_builder=model_builder, metrics_builder=eval_metrics_builder)

    validation_client_sampling_fn = training_utils.build_client_datasets_fn(
        emnist_test,
        clients_per_validation_round,
        random_seed=eval_spec.sampling_random_seed)
    test_client_sampling_fn = training_utils.build_client_datasets_fn(
        emnist_test,
        clients_per_test_round,
        random_seed=eval_spec.sampling_random_seed)

    def validation_fn(model_weights, round_num):
      validation_clients = validation_client_sampling_fn(round_num)
      return federated_eval_fn(model_weights, validation_clients)

    def test_fn(model_weights):
      # We fix the round number to get deterministic behavior
      test_round_num = 0
      test_clients = test_client_sampling_fn(test_round_num)
      return federated_eval_fn(model_weights, test_clients)

  else:
    _, central_emnist_test = emnist_dataset.get_centralized_datasets(
        only_digits=False, emnist_task=emnist_task)

    test_fn = training_utils.build_centralized_evaluate_fn(
        eval_dataset=central_emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    validation_fn = lambda model_weights, round_num: test_fn(model_weights)

  return training_specs.RunnerSpec(
      iterative_process=training_process,
      client_datasets_fn=client_sampling_fn,
      validation_fn=validation_fn,
      test_fn=test_fn)
示例#10
0
def run_federated(
    iterative_process_builder: Callable[..., tff.templates.IterativeProcess],
    client_epochs_per_round: int,
    client_batch_size: int,
    clients_per_round: int,
    max_elements_per_user: int,
    total_rounds: int = 3000,
    vocab_size: int = 10000,
    num_oov_buckets: int = 1,
    sequence_length: int = 20,
    num_validation_examples: int = 10000,
    dim_embed: int = 96,
    dim_model: int = 512,
    dim_hidden: int = 2048,
    num_heads: int = 8,
    num_layers: int = 1,
    max_position_encoding: int = 1000,
    dropout: float = 0.1,
    client_datasets_random_seed: Optional[int] = None,
    experiment_name: str = 'federated_stackoverflow',
    root_output_dir: str = '/tmp/fedopt_guide',
    max_val_test_batches: Optional[int] = None,
    **kwargs) -> None:
  """Configures training for Stack Overflow next-word prediction.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research/fedopt_guide/training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  The iterative process must also have a callable attribute `get_model_weights`
  that takes as input the state of the iterative process, and returns a
  `tff.learning.ModelWeights` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, a
      `client_weight_fn` and returns a `tff.templates.IterativeProcess`. The
      `model_fn` must return a `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_elements_per_user: The maximum number of elements processed for each
      client's dataset. This has be to a positive value or -1 (which means that
      all elements are taken for training).
    total_rounds: The number of federated training rounds.
    vocab_size: Integer dictating the number of most frequent words to use in
      the vocabulary.
    num_oov_buckets: The number of out-of-vocabulary buckets to use.
    sequence_length: The maximum number of words to take for each sequence.
    num_validation_examples: The number of test examples to use for validation.
    dim_embed: An integer for the dimension of the token embeddings.
    dim_model: An integer for the dimension of features of MultiHeadAttention
      layers.
    dim_hidden: An integer for the dimension of hidden layers of the FFN.
    num_heads:  An integer for the number of attention heads.
    num_layers: An integer for the number of Transformer blocks.
    max_position_encoding: Maximum number of positions for position embeddings.
    dropout: Dropout rate.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_val_test_batches: If set to a positive integer, val and test datasets
      are capped to at most that many batches. If set to None or a nonpositive
      integer, the full datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details on
      supported arguments, see
      `federated_research/fedopt_guide/training_utils.py`.

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """

  train_clientdata, _, _ = tff.simulation.datasets.stackoverflow.load_data()

  _, validation_dataset, test_dataset = stackoverflow_word_prediction.get_centralized_datasets(
      vocab_size=vocab_size,
      max_sequence_length=sequence_length,
      num_validation_examples=num_validation_examples,
      num_oov_buckets=num_oov_buckets)

  if max_val_test_batches and max_val_test_batches >= 1:
    validation_dataset = validation_dataset.take(max_val_test_batches)
    test_dataset = test_dataset.take(max_val_test_batches)

  model_builder = functools.partial(
      transformer_models.create_transformer_lm,
      vocab_size=vocab_size,
      num_oov_buckets=num_oov_buckets,
      d_embed=dim_embed,
      d_model=dim_model,
      d_hidden=dim_hidden,
      num_heads=num_heads,
      num_layers=num_layers,
      max_position_encoding=max_position_encoding,
      dropout=dropout,
      name='stackoverflow-transformer')

  loss_builder = functools.partial(
      tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

  special_tokens = stackoverflow_word_prediction.get_special_tokens(
      vocab_size, num_oov_buckets)
  pad_token = special_tokens.pad
  oov_tokens = special_tokens.oov
  eos_token = special_tokens.eos

  def metrics_builder():
    return [
        keras_metrics.MaskedCategoricalAccuracy(
            name='accuracy_with_oov', masked_tokens=[pad_token]),
        keras_metrics.MaskedCategoricalAccuracy(
            name='accuracy_no_oov', masked_tokens=[pad_token] + oov_tokens),
        # Notice BOS never appears in ground truth.
        keras_metrics.MaskedCategoricalAccuracy(
            name='accuracy_no_oov_or_eos',
            masked_tokens=[pad_token, eos_token] + oov_tokens),
        keras_metrics.NumBatchesCounter(),
        keras_metrics.NumTokensCounter(masked_tokens=[pad_token])
    ]

  train_dataset_preprocess_comp = stackoverflow_word_prediction.create_preprocess_fn(
      vocab=stackoverflow_word_prediction.create_vocab(vocab_size),
      num_oov_buckets=num_oov_buckets,
      client_batch_size=client_batch_size,
      client_epochs_per_round=client_epochs_per_round,
      max_sequence_length=sequence_length,
      max_elements_per_client=max_elements_per_user)

  input_spec = train_dataset_preprocess_comp.type_signature.result.element

  def tff_model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  def client_weight_fn(local_outputs):
    # Num_tokens is a tensor with type int64[1], to use as a weight need
    # a float32 scalar.
    return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

  iterative_process = iterative_process_builder(
      tff_model_fn, client_weight_fn=client_weight_fn)

  if hasattr(train_clientdata, 'dataset_computation'):

    @tff.tf_computation(tf.string)
    def train_dataset_computation(client_id):
      client_train_data = train_clientdata.dataset_computation(client_id)
      return train_dataset_preprocess_comp(client_train_data)

    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        train_dataset_computation, iterative_process)
    client_ids_fn = training_utils.build_sample_fn(
        train_clientdata.client_ids,
        size=clients_per_round,
        replace=False,
        random_seed=client_datasets_random_seed)
    # We convert the output to a list (instead of an np.ndarray) so that it can
    # be used as input to the iterative process.
    client_sampling_fn = lambda x: list(client_ids_fn(x))
  else:
    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        train_dataset_preprocess_comp, iterative_process)
    client_sampling_fn = training_utils.build_client_datasets_fn(
        dataset=train_clientdata,
        clients_per_round=clients_per_round,
        random_seed=client_datasets_random_seed)

  training_process.get_model_weights = iterative_process.get_model_weights

  evaluate_fn = training_utils.build_centralized_evaluate_fn(
      model_builder=model_builder,
      eval_dataset=validation_dataset,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  validation_fn = lambda model_weights, round_num: evaluate_fn(model_weights)

  test_fn = training_utils.build_centralized_evaluate_fn(
      model_builder=model_builder,
      # Use both val and test for symmetry with other experiments, which
      # evaluate on the entire test set.
      eval_dataset=validation_dataset.concatenate(test_dataset),
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  logging.info('Training model:')
  logging.info(model_builder().summary())

  training_loop.run(
      iterative_process=training_process,
      train_client_datasets_fn=client_sampling_fn,
      evaluation_fn=validation_fn,
      test_fn=test_fn,
      total_rounds=total_rounds,
      experiment_name=experiment_name,
      root_output_dir=root_output_dir,
      **kwargs)