def test_preprocess_applied(self, mock_load_data, mock_load_word_counts,
                                mock_load_tag_counts):
        if tf.config.list_logical_devices('GPU'):
            self.skipTest('skip GPU test')
        # Mock out the actual data loading from disk. Assert that the preprocessing
        # function is applied to the client data, and that only the ClientData
        # objects we desired are used.
        #
        # The correctness of the preprocessing function is tested in other tests.
        sample_ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)

        mock_train = mock.create_autospec(tff.simulation.datasets.ClientData)
        mock_train.create_tf_dataset_from_all_clients = mock.Mock(
            return_value=sample_ds)

        mock_validation = mock.create_autospec(
            tff.simulation.datasets.ClientData)

        mock_test = mock.create_autospec(tff.simulation.datasets.ClientData)
        mock_test.create_tf_dataset_from_all_clients = mock.Mock(
            return_value=sample_ds)

        mock_load_data.return_value = (mock_train, mock_validation, mock_test)
        mock_load_word_counts.return_value = collections.OrderedDict(a=1)
        mock_load_tag_counts.return_value = collections.OrderedDict(a=1)

        _, _, _ = stackoverflow_tag_prediction.get_centralized_datasets(
            word_vocab_size=1000,
            tag_vocab_size=500,
            train_batch_size=10,
            validation_batch_size=50,
            test_batch_size=100,
            num_validation_examples=10000)

        # Assert the validation ClientData isn't used.
        mock_load_data.assert_called_once()
        self.assertEmpty(mock_validation.mock_calls)

        # Assert the validation ClientData isn't used, and the train and test
        # are amalgamated into datasets single datasets over all clients.
        mock_load_data.assert_called_once()
        self.assertEmpty(mock_validation.mock_calls)
        self.assertEqual(
            mock_train.mock_calls,
            mock.call.create_tf_dataset_from_all_clients().call_list())
        self.assertEqual(
            mock_test.mock_calls,
            mock.call.create_tf_dataset_from_all_clients().call_list())

        # Assert the word counts were loaded once to apply to each dataset.
        mock_load_word_counts.assert_called_once()
        mock_load_tag_counts.assert_called_once()
def run_centralized(optimizer: tf.keras.optimizers.Optimizer,
                    experiment_name: str,
                    root_output_dir: str,
                    num_epochs: int,
                    batch_size: int,
                    decay_epochs: Optional[int] = None,
                    lr_decay: Optional[float] = None,
                    hparams_dict: Optional[Mapping[str, Any]] = None,
                    vocab_tokens_size: Optional[int] = 10000,
                    vocab_tags_size: Optional[int] = 500,
                    num_validation_examples: Optional[int] = 10000,
                    max_batches: Optional[int] = None):
    """Trains an RNN on the Stack Overflow next word prediction task.

  Args:
    optimizer: A `tf.keras.optimizers.Optimizer` used to perform training.
    experiment_name: The name of the experiment. Part of the output directory.
    root_output_dir: The top-level output directory for experiment runs. The
      `experiment_name` argument will be appended, and the directory will
      contain tensorboard logs, metrics written as CSVs, and a CSV of
      hyperparameter choices (if `hparams_dict` is used).
    num_epochs: The number of training epochs.
    batch_size: The batch size, used for train, validation, and test.
    decay_epochs: The number of epochs of training before decaying the learning
      rate. If None, no decay occurs.
    lr_decay: The amount to decay the learning rate by after `decay_epochs`
      training epochs have occurred.
    hparams_dict: A mapping with string keys representing the hyperparameters
      and their values. If not None, this is written to CSV.
    vocab_tokens_size: Integer dictating the number of most frequent words to
      use in the vocabulary.
    vocab_tags_size: Integer dictating the number of most frequent tags to use
      in the label creation.
    num_validation_examples: The number of test examples to use for validation.
    max_batches: If set to a positive integer, datasets are capped to at most
      that many batches. If set to None or a nonpositive integer, the full
      datasets are used.
  """

    train_dataset, validation_dataset, test_dataset = stackoverflow_tag_prediction.get_centralized_datasets(
        train_batch_size=batch_size,
        word_vocab_size=vocab_tokens_size,
        tag_vocab_size=vocab_tags_size,
        num_validation_examples=num_validation_examples)

    if max_batches and max_batches >= 1:
        train_dataset = train_dataset.take(max_batches)
        validation_dataset = validation_dataset.take(max_batches)
        test_dataset = test_dataset.take(max_batches)

    model = stackoverflow_lr_models.create_logistic_model(
        vocab_tokens_size=vocab_tokens_size, vocab_tags_size=vocab_tags_size)

    model.compile(loss=tf.keras.losses.BinaryCrossentropy(
        from_logits=False, reduction=tf.keras.losses.Reduction.SUM),
                  optimizer=optimizer,
                  metrics=[
                      tf.keras.metrics.Precision(),
                      tf.keras.metrics.Recall(top_k=5)
                  ])

    centralized_training_loop.run(keras_model=model,
                                  train_dataset=train_dataset,
                                  validation_dataset=validation_dataset,
                                  test_dataset=test_dataset,
                                  experiment_name=experiment_name,
                                  root_output_dir=root_output_dir,
                                  num_epochs=num_epochs,
                                  hparams_dict=hparams_dict,
                                  decay_epochs=decay_epochs,
                                  lr_decay=lr_decay)
Example #3
0
def run_federated(
    iterative_process_builder: Callable[..., tff.templates.IterativeProcess],
    client_epochs_per_round: int,
    client_batch_size: int,
    clients_per_round: int,
    client_datasets_random_seed: Optional[int] = None,
    vocab_tokens_size: Optional[int] = 10000,
    vocab_tags_size: Optional[int] = 500,
    max_elements_per_user: Optional[int] = 1000,
    num_validation_examples: Optional[int] = 10000,
    total_rounds: Optional[int] = 1500,
    experiment_name: Optional[str] = 'federated_so_lr',
    root_output_dir: Optional[str] = '/tmp/fed_opt',
    max_eval_batches: Optional[int] = None,
    **kwargs):
  """Runs an iterative process on the Stack Overflow logistic regression task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `federated_research.utils.training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  The iterative process must also have a callable attribute `get_model_weights`
  that takes as input the state of the iterative process, and returns a
  `tff.learning.ModelWeights` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    vocab_tokens_size: Integer dictating the number of most frequent words to
      use in the vocabulary.
    vocab_tags_size: Integer dictating the number of most frequent tags to use
      in the label creation.
    max_elements_per_user: The maximum number of elements processed for each
      client's dataset.
    num_validation_examples: The number of test examples to use for validation.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_eval_batches: If set to a positive integer, evaluation datasets are
      capped to at most that many batches. If set to None or a nonpositive
      integer, the full evaluation datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `federated_research/utils/training_utils.py`.
  """

  stackoverflow_train, _ = stackoverflow_tag_prediction.get_federated_datasets(
      word_vocab_size=vocab_tokens_size,
      tag_vocab_size=vocab_tags_size,
      train_client_batch_size=client_batch_size,
      train_client_epochs_per_round=client_epochs_per_round,
      max_elements_per_train_client=max_elements_per_user)

  _, stackoverflow_validation, stackoverflow_test = stackoverflow_tag_prediction.get_centralized_datasets(
      train_batch_size=client_batch_size,
      word_vocab_size=vocab_tokens_size,
      tag_vocab_size=vocab_tags_size,
      num_validation_examples=num_validation_examples)

  if max_eval_batches and max_eval_batches >= 1:
    stackoverflow_validation = stackoverflow_validation.take(max_eval_batches)
    stackoverflow_test = stackoverflow_test.take(max_eval_batches)

  input_spec = stackoverflow_train.create_tf_dataset_for_client(
      stackoverflow_train.client_ids[0]).element_spec

  model_builder = functools.partial(
      stackoverflow_lr_models.create_logistic_model,
      vocab_tokens_size=vocab_tokens_size,
      vocab_tags_size=vocab_tags_size)

  loss_builder = functools.partial(
      tf.keras.losses.BinaryCrossentropy,
      from_logits=False,
      reduction=tf.keras.losses.Reduction.SUM)

  def tff_model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  training_process = iterative_process_builder(tff_model_fn)

  client_datasets_fn = training_utils.build_client_datasets_fn(
      dataset=stackoverflow_train,
      clients_per_round=clients_per_round,
      random_seed=client_datasets_random_seed)

  evaluate_fn = training_utils.build_centralized_evaluate_fn(
      model_builder=model_builder,
      eval_dataset=stackoverflow_validation,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  test_fn = training_utils.build_centralized_evaluate_fn(
      model_builder=model_builder,
      # Use both val and test for symmetry with other experiments, which
      # evaluate on the entire test set.
      eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  logging.info('Training model:')
  logging.info(model_builder().summary())

  training_loop.run(
      iterative_process=training_process,
      client_datasets_fn=client_datasets_fn,
      validation_fn=evaluate_fn,
      test_fn=test_fn,
      total_rounds=total_rounds,
      experiment_name=experiment_name,
      root_output_dir=root_output_dir,
      **kwargs)
Example #4
0
def configure_training(
        task_spec: training_specs.TaskSpec,
        vocab_tokens_size: int = 10000,
        vocab_tags_size: int = 500,
        max_elements_per_user: int = 1000,
        num_validation_examples: int = 10000) -> training_specs.RunnerSpec:
    """Configures training for the Stack Overflow tag prediction task.

  This tag prediction is performed via multi-class one-versus-rest logistic
  regression. This method will load and pre-process datasets and construct a
  model used for the task. It then uses `iterative_process_builder` to create an
  iterative process compatible with `federated_research.utils.training_loop`.

  Args:
    task_spec: A `TaskSpec` class for creating federated training tasks.
    vocab_tokens_size: Integer dictating the number of most frequent words to
      use in the vocabulary.
    vocab_tags_size: Integer dictating the number of most frequent tags to use
      in the label creation.
    max_elements_per_user: The maximum number of elements processed for each
      client's dataset.
    num_validation_examples: The number of test examples to use for validation.

  Returns:
    A `RunnerSpec` containing attributes used for running the newly created
    federated task.
  """

    stackoverflow_train, _, _ = tff.simulation.datasets.stackoverflow.load_data(
    )

    _, stackoverflow_validation, stackoverflow_test = stackoverflow_tag_prediction.get_centralized_datasets(
        train_batch_size=task_spec.client_batch_size,
        word_vocab_size=vocab_tokens_size,
        tag_vocab_size=vocab_tags_size,
        num_validation_examples=num_validation_examples)

    word_vocab = stackoverflow_tag_prediction.create_word_vocab(
        vocab_tokens_size)
    tag_vocab = stackoverflow_tag_prediction.create_tag_vocab(vocab_tags_size)

    train_preprocess_fn = stackoverflow_tag_prediction.create_preprocess_fn(
        word_vocab=word_vocab,
        tag_vocab=tag_vocab,
        client_batch_size=task_spec.client_batch_size,
        client_epochs_per_round=task_spec.client_epochs_per_round,
        max_elements_per_client=max_elements_per_user)
    input_spec = train_preprocess_fn.type_signature.result.element

    model_builder = functools.partial(
        stackoverflow_lr_models.create_logistic_model,
        vocab_tokens_size=vocab_tokens_size,
        vocab_tags_size=vocab_tags_size)

    loss_builder = functools.partial(tf.keras.losses.BinaryCrossentropy,
                                     from_logits=False,
                                     reduction=tf.keras.losses.Reduction.SUM)

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    iterative_process = task_spec.iterative_process_builder(tff_model_fn)

    @tff.tf_computation(tf.string)
    def build_train_dataset_from_client_id(client_id):
        client_dataset = stackoverflow_train.dataset_computation(client_id)
        return train_preprocess_fn(client_dataset)

    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        build_train_dataset_from_client_id, iterative_process)
    client_ids_fn = training_utils.build_sample_fn(
        stackoverflow_train.client_ids,
        size=task_spec.clients_per_round,
        replace=False,
        random_seed=task_spec.client_datasets_random_seed)
    # We convert the output to a list (instead of an np.ndarray) so that it can
    # be used as input to the iterative process.
    client_sampling_fn = lambda x: list(client_ids_fn(x))

    training_process.get_model_weights = iterative_process.get_model_weights

    centralized_validation_fn = training_utils.build_centralized_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=stackoverflow_validation,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    def validation_fn(server_state, round_num):
        del round_num
        return centralized_validation_fn(
            iterative_process.get_model_weights(server_state))

    centralized_test_fn = training_utils.build_centralized_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    def test_fn(server_state):
        return centralized_test_fn(
            iterative_process.get_model_weights(server_state))

    return training_specs.RunnerSpec(iterative_process=training_process,
                                     client_datasets_fn=client_sampling_fn,
                                     validation_fn=validation_fn,
                                     test_fn=test_fn)