def configure_training(task_spec: training_specs.TaskSpec, model: str = 'cnn') -> training_specs.RunnerSpec: """Configures training for the EMNIST character recognition task. This method will load and pre-process datasets and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process compatible with `federated_research.utils.training_loop`. Args: task_spec: A `TaskSpec` class for creating federated training tasks. model: A string specifying the model used for character recognition. Can be one of `cnn` and `2nn`, corresponding to a CNN model and a densely connected 2-layer model (respectively). Returns: A `RunnerSpec` containing attributes used for running the newly created federated task. """ emnist_task = 'digit_recognition' emnist_train, _ = tff.simulation.datasets.emnist.load_data(only_digits=False) _, emnist_test = emnist_dataset.get_centralized_datasets( only_digits=False, emnist_task=emnist_task) train_preprocess_fn = emnist_dataset.create_preprocess_fn( num_epochs=task_spec.client_epochs_per_round, batch_size=task_spec.client_batch_size, emnist_task=emnist_task) input_spec = train_preprocess_fn.type_signature.result.element if model == 'cnn': model_builder = functools.partial( emnist_models.create_conv_dropout_model, only_digits=False) elif model == '2nn': model_builder = functools.partial( emnist_models.create_two_hidden_layer_model, only_digits=False) else: raise ValueError( 'Cannot handle model flag [{!s}], must be one of {!s}.'.format( model, EMNIST_MODELS)) loss_builder = tf.keras.losses.SparseCategoricalCrossentropy metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()] def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model( keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) iterative_process = task_spec.iterative_process_builder(tff_model_fn) @tff.tf_computation(tf.string) def build_train_dataset_from_client_id(client_id): client_dataset = emnist_train.dataset_computation(client_id) return train_preprocess_fn(client_dataset) training_process = tff.simulation.compose_dataset_computation_with_iterative_process( build_train_dataset_from_client_id, iterative_process) client_ids_fn = training_utils.build_sample_fn( emnist_train.client_ids, size=task_spec.clients_per_round, replace=False, random_seed=task_spec.client_datasets_random_seed) # We convert the output to a list (instead of an np.ndarray) so that it can # be used as input to the iterative process. client_sampling_fn = lambda x: list(client_ids_fn(x)) training_process.get_model_weights = iterative_process.get_model_weights centralized_eval_fn = training_utils.build_centralized_evaluate_fn( eval_dataset=emnist_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) def test_fn(state): return centralized_eval_fn(iterative_process.get_model_weights(state)) def validation_fn(state, round_num): del round_num return test_fn(state) return training_specs.RunnerSpec( iterative_process=training_process, client_datasets_fn=client_sampling_fn, validation_fn=validation_fn, test_fn=test_fn)
def main(argv): if len(argv) > 1: raise app.UsageError('Expected no command-line arguments, ' 'got: {}'.format(argv)) tff.backends.native.set_local_execution_context(max_fanout=10) model_builder = functools.partial( stackoverflow_models.create_recurrent_model, vocab_size=FLAGS.vocab_size, embedding_size=FLAGS.embedding_size, latent_size=FLAGS.latent_size, num_layers=FLAGS.num_layers, shared_embedding=FLAGS.shared_embedding) loss_builder = functools.partial( tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True) special_tokens = stackoverflow_word_prediction.get_special_tokens( FLAGS.vocab_size) pad_token = special_tokens.pad oov_tokens = special_tokens.oov eos_token = special_tokens.eos def metrics_builder(): return [ keras_metrics.MaskedCategoricalAccuracy(name='accuracy_with_oov', masked_tokens=[pad_token]), keras_metrics.MaskedCategoricalAccuracy(name='accuracy_no_oov', masked_tokens=[pad_token] + oov_tokens), # Notice BOS never appears in ground truth. keras_metrics.MaskedCategoricalAccuracy( name='accuracy_no_oov_or_eos', masked_tokens=[pad_token, eos_token] + oov_tokens), keras_metrics.NumBatchesCounter(), keras_metrics.NumTokensCounter(masked_tokens=[pad_token]), ] train_dataset, _ = stackoverflow_word_prediction.get_federated_datasets( vocab_size=FLAGS.vocab_size, train_client_batch_size=FLAGS.client_batch_size, train_client_epochs_per_round=FLAGS.client_epochs_per_round, max_sequence_length=FLAGS.sequence_length, max_elements_per_train_client=FLAGS.max_elements_per_user) _, validation_dataset, test_dataset = stackoverflow_word_prediction.get_centralized_datasets( vocab_size=FLAGS.vocab_size, max_sequence_length=FLAGS.sequence_length, num_validation_examples=FLAGS.num_validation_examples) if FLAGS.uniform_weighting: def client_weight_fn(local_outputs): del local_outputs return 1.0 else: def client_weight_fn(local_outputs): return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32) def model_fn(): return tff.learning.from_keras_model( model_builder(), loss_builder(), input_spec=validation_dataset.element_spec, metrics=metrics_builder()) if FLAGS.noise_multiplier is not None: if not FLAGS.uniform_weighting: raise ValueError( 'Differential privacy is only implemented for uniform weighting.' ) dp_query = tff.utils.build_dp_query( clip=FLAGS.clip, noise_multiplier=FLAGS.noise_multiplier, expected_total_weight=FLAGS.clients_per_round, adaptive_clip_learning_rate=FLAGS.adaptive_clip_learning_rate, target_unclipped_quantile=FLAGS.target_unclipped_quantile, clipped_count_budget_allocation=FLAGS. clipped_count_budget_allocation, expected_clients_per_round=FLAGS.clients_per_round) weights_type = tff.learning.framework.weights_type_from_model(model_fn) aggregation_process = tff.utils.build_dp_aggregate_process( weights_type.trainable, dp_query) else: aggregation_process = None server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags( 'server') client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags( 'client') iterative_process = tff.learning.build_federated_averaging_process( model_fn=model_fn, server_optimizer_fn=server_optimizer_fn, client_weight_fn=client_weight_fn, client_optimizer_fn=client_optimizer_fn, aggregation_process=aggregation_process) client_datasets_fn = training_utils.build_client_datasets_fn( train_dataset, FLAGS.clients_per_round) evaluate_fn = training_utils.build_centralized_evaluate_fn( model_builder=model_builder, eval_dataset=validation_dataset, loss_builder=loss_builder, metrics_builder=metrics_builder) test_fn = training_utils.build_centralized_evaluate_fn( model_builder=model_builder, # Use both val and test for symmetry with other experiments, which # evaluate on the entire test set. eval_dataset=validation_dataset.concatenate(test_dataset), loss_builder=loss_builder, metrics_builder=metrics_builder) logging.info('Training model:') logging.info(model_builder().summary()) hparam_dict = utils_impl.lookup_flag_values(utils_impl.get_hparam_flags()) training_loop_dict = utils_impl.lookup_flag_values(training_loop_flags) training_loop.run(iterative_process=iterative_process, client_datasets_fn=client_datasets_fn, validation_fn=evaluate_fn, test_fn=test_fn, hparam_dict=hparam_dict, **training_loop_dict)
def run_federated( iterative_process_builder: Callable[..., tff.templates.IterativeProcess], client_epochs_per_round: int, client_batch_size: int, clients_per_round: int, client_datasets_random_seed: Optional[int] = None, crop_size: Optional[int] = 24, total_rounds: Optional[int] = 1500, experiment_name: Optional[str] = 'federated_cifar100', root_output_dir: Optional[str] = '/tmp/fed_opt', **kwargs): """Runs an iterative process on the CIFAR-100 classification task. This method will load and pre-process dataset and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process that it applies to the task, using `federated_research.utils.training_loop`. We assume that the iterative process has the following functional type signatures: * `initialize`: `( -> S@SERVER)` where `S` represents the server state. * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S` represents the server state, `{B*}` represents the client datasets, and `T` represents a python `Mapping` object. The iterative process must also have a callable attribute `get_model_weights` that takes as input the state of the iterative process, and returns a `tff.learning.ModelWeights` object. Args: iterative_process_builder: A function that accepts a no-arg `model_fn`, and returns a `tff.templates.IterativeProcess`. The `model_fn` must return a `tff.learning.Model`. client_epochs_per_round: An integer representing the number of epochs of training performed per client in each training round. client_batch_size: An integer representing the batch size used on clients. clients_per_round: An integer representing the number of clients participating in each round. client_datasets_random_seed: An optional int used to seed which clients are sampled at each round. If `None`, no seed is used. crop_size: An optional integer representing the resulting size of input images after preprocessing. total_rounds: The number of federated training rounds. experiment_name: The name of the experiment being run. This will be appended to the `root_output_dir` for purposes of writing outputs. root_output_dir: The name of the root output directory for writing experiment outputs. **kwargs: Additional arguments configuring the training loop. For details on supported arguments, see `federated_research/utils/training_utils.py`. """ crop_shape = (crop_size, crop_size, 3) cifar_train, _ = cifar100_dataset.get_federated_datasets( train_client_epochs_per_round=client_epochs_per_round, train_client_batch_size=client_batch_size, crop_shape=crop_shape) _, cifar_test = cifar100_dataset.get_centralized_datasets( train_batch_size=client_batch_size, crop_shape=crop_shape) input_spec = cifar_train.create_tf_dataset_for_client( cifar_train.client_ids[0]).element_spec model_builder = functools.partial(resnet_models.create_resnet18, input_shape=crop_shape, num_classes=NUM_CLASSES) loss_builder = tf.keras.losses.SparseCategoricalCrossentropy metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()] def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model(keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) training_process = iterative_process_builder(tff_model_fn) client_datasets_fn = training_utils.build_client_datasets_fn( dataset=cifar_train, clients_per_round=clients_per_round, random_seed=client_datasets_random_seed) test_fn = training_utils.build_centralized_evaluate_fn( eval_dataset=cifar_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) validation_fn = lambda model_weights, round_num: test_fn(model_weights) logging.info('Training model:') logging.info(model_builder().summary()) training_loop.run(iterative_process=training_process, client_datasets_fn=client_datasets_fn, validation_fn=validation_fn, test_fn=test_fn, total_rounds=total_rounds, experiment_name=experiment_name, root_output_dir=root_output_dir, **kwargs)
def run_federated( iterative_process_builder: Callable[..., tff.templates.IterativeProcess], client_epochs_per_round: int, client_batch_size: int, clients_per_round: int, client_datasets_random_seed: Optional[int] = None, sequence_length: Optional[int] = 80, total_rounds: Optional[int] = 1500, experiment_name: Optional[str] = 'federated_shakespeare', root_output_dir: Optional[str] = '/tmp/fed_opt', max_eval_batches: Optional[int] = None, **kwargs): """Runs an iterative process on a Shakespeare next character prediction task. This method will load and pre-process dataset and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process that it applies to the task, using `federated_research.utils.training_loop`. We assume that the iterative process has the following functional type signatures: * `initialize`: `( -> S@SERVER)` where `S` represents the server state. * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S` represents the server state, `{B*}` represents the client datasets, and `T` represents a python `Mapping` object. The iterative process must also have a callable attribute `get_model_weights` that takes as input the state of the iterative process, and returns a `tff.learning.ModelWeights` object. Args: iterative_process_builder: A function that accepts a no-arg `model_fn`, and a `client_weight_fn`, and returns a `tff.templates.IterativeProcess`. The `model_fn` must return a `tff.learning.Model`. client_epochs_per_round: An integer representing the number of epochs of training performed per client in each training round. client_batch_size: An integer representing the batch size used on clients. clients_per_round: An integer representing the number of clients participating in each round. client_datasets_random_seed: An optional int used to seed which clients are sampled at each round. If `None`, no seed is used. sequence_length: An int specifying the length of the character sequences used for prediction. total_rounds: The number of federated training rounds. experiment_name: The name of the experiment being run. This will be appended to the `root_output_dir` for purposes of writing outputs. root_output_dir: The name of the root output directory for writing experiment outputs. max_eval_batches: If set to a positive integer, evaluation datasets are capped to at most that many batches. If set to None or a nonpositive integer, the full evaluation datasets are used. **kwargs: Additional arguments configuring the training loop. For details on supported arguments, see `federated_research/utils/training_utils.py`. """ shakespeare_train, _ = shakespeare_dataset.get_federated_datasets( train_client_batch_size=client_batch_size, train_client_epochs_per_round=client_epochs_per_round, sequence_length=sequence_length) _, shakespeare_test = shakespeare_dataset.get_centralized_datasets( sequence_length=sequence_length) if max_eval_batches and max_eval_batches >= 1: shakespeare_test = shakespeare_test.take(max_eval_batches) model_builder = functools.partial( create_shakespeare_model, sequence_length=sequence_length) loss_builder = functools.partial( tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True) input_spec = shakespeare_train.element_type_structure def client_weight_fn(local_outputs): # Num_tokens is a tensor with type int64[1], to use as a weight need # a float32 scalar. return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32) def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model( keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) training_process = iterative_process_builder( tff_model_fn, client_weight_fn=client_weight_fn) client_datasets_fn = training_utils.build_client_datasets_fn( dataset=shakespeare_train, clients_per_round=clients_per_round, random_seed=client_datasets_random_seed) evaluate_fn = training_utils.build_centralized_evaluate_fn( eval_dataset=shakespeare_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) logging.info('Training model:') logging.info(model_builder().summary()) training_loop.run( iterative_process=training_process, client_datasets_fn=client_datasets_fn, validation_fn=evaluate_fn, test_fn=evaluate_fn, total_rounds=total_rounds, experiment_name=experiment_name, root_output_dir=root_output_dir, **kwargs)
def main(argv): if len(argv) > 1: raise app.UsageError('Expected no command-line arguments, ' 'got: {}'.format(argv)) emnist_train, _ = emnist_dataset.get_federated_datasets( train_client_batch_size=FLAGS.client_batch_size, train_client_epochs_per_round=FLAGS.client_epochs_per_round, only_digits=False) _, emnist_test = emnist_dataset.get_centralized_datasets() if FLAGS.model == 'cnn': model_builder = functools.partial( emnist_models.create_conv_dropout_model, only_digits=False) elif FLAGS.model == '2nn': model_builder = functools.partial( emnist_models.create_two_hidden_layer_model, only_digits=False) else: raise ValueError('Cannot handle model flag [{!s}].'.format( FLAGS.model)) loss_builder = tf.keras.losses.SparseCategoricalCrossentropy metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()] if FLAGS.uniform_weighting: def client_weight_fn(local_outputs): del local_outputs return 1.0 else: client_weight_fn = None # Defaults to the number of examples per client. def model_fn(): return tff.learning.from_keras_model( model_builder(), loss_builder(), input_spec=emnist_test.element_spec, metrics=metrics_builder()) if FLAGS.noise_multiplier is not None: if not FLAGS.uniform_weighting: raise ValueError( 'Differential privacy is only implemented for uniform weighting.' ) dp_query = tff.utils.build_dp_query( clip=FLAGS.clip, noise_multiplier=FLAGS.noise_multiplier, expected_total_weight=FLAGS.clients_per_round, adaptive_clip_learning_rate=FLAGS.adaptive_clip_learning_rate, target_unclipped_quantile=FLAGS.target_unclipped_quantile, clipped_count_budget_allocation=FLAGS. clipped_count_budget_allocation, expected_clients_per_round=FLAGS.clients_per_round) weights_type = tff.learning.framework.weights_type_from_model(model_fn) aggregation_process = tff.utils.build_dp_aggregate_process( weights_type.trainable, dp_query) else: aggregation_process = None server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags( 'server') client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags( 'client') iterative_process = tff.learning.build_federated_averaging_process( model_fn=model_fn, server_optimizer_fn=server_optimizer_fn, client_weight_fn=client_weight_fn, client_optimizer_fn=client_optimizer_fn, aggregation_process=aggregation_process) client_datasets_fn = training_utils.build_client_datasets_fn( emnist_train, FLAGS.clients_per_round) evaluate_fn = training_utils.build_centralized_evaluate_fn( eval_dataset=emnist_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) logging.info('Training model:') logging.info(model_builder().summary()) hparam_dict = utils_impl.lookup_flag_values(utils_impl.get_hparam_flags()) training_loop_dict = utils_impl.lookup_flag_values(training_loop_flags) training_loop.run(iterative_process=iterative_process, client_datasets_fn=client_datasets_fn, validation_fn=evaluate_fn, hparam_dict=hparam_dict, **training_loop_dict)
def configure_training( task_spec: training_specs.TaskSpec, crop_size: int = 24, distort_train_images: bool = True) -> training_specs.RunnerSpec: """Configures training for the CIFAR-100 classification task. This method will load and pre-process datasets and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process compatible with `federated_research.utils.training_loop`. Args: task_spec: A `TaskSpec` class for creating federated training tasks. crop_size: An optional integer representing the resulting size of input images after preprocessing. distort_train_images: A boolean indicating whether to distort training images during preprocessing via random crops, as opposed to simply resizing the image. Returns: A `RunnerSpec` containing attributes used for running the newly created federated task. """ crop_shape = (crop_size, crop_size, 3) cifar_train, _ = tff.simulation.datasets.cifar100.load_data() _, cifar_test = cifar100_dataset.get_centralized_datasets( train_batch_size=task_spec.client_batch_size, crop_shape=crop_shape) train_preprocess_fn = cifar100_dataset.create_preprocess_fn( num_epochs=task_spec.client_epochs_per_round, batch_size=task_spec.client_batch_size, crop_shape=crop_shape, distort_image=distort_train_images) input_spec = train_preprocess_fn.type_signature.result.element model_builder = functools.partial(resnet_models.create_resnet18, input_shape=crop_shape, num_classes=NUM_CLASSES) loss_builder = tf.keras.losses.SparseCategoricalCrossentropy metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()] def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model(keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) iterative_process = task_spec.iterative_process_builder(tff_model_fn) if hasattr(cifar_train, 'dataset_computation'): @tff.tf_computation(tf.string) def build_train_dataset_from_client_id(client_id): client_dataset = cifar_train.dataset_computation(client_id) return train_preprocess_fn(client_dataset) training_process = tff.simulation.compose_dataset_computation_with_iterative_process( build_train_dataset_from_client_id, iterative_process) client_ids_fn = training_utils.build_sample_fn( cifar_train.client_ids, size=task_spec.clients_per_round, replace=False, random_seed=task_spec.sampling_random_seed) # We convert the output to a list (instead of an np.ndarray) so that it can # be used as input to the iterative process. client_sampling_fn = lambda x: list(client_ids_fn(x)) else: training_process = tff.simulation.compose_dataset_computation_with_iterative_process( train_preprocess_fn, iterative_process) client_sampling_fn = training_utils.build_client_datasets_fn( dataset=cifar_train, clients_per_round=task_spec.clients_per_round, random_seed=task_spec.sampling_random_seed) training_process.get_model_weights = iterative_process.get_model_weights test_fn = training_utils.build_centralized_evaluate_fn( eval_dataset=cifar_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) validation_fn = lambda model_weights, round_num: test_fn(model_weights) return training_specs.RunnerSpec(iterative_process=training_process, client_datasets_fn=client_sampling_fn, validation_fn=validation_fn, test_fn=test_fn)
def run_federated( iterative_process_builder: Callable[..., tff.templates.IterativeProcess], client_epochs_per_round: int, client_batch_size: int, clients_per_round: int, max_batches_per_client: Optional[int] = -1, client_datasets_random_seed: Optional[int] = None, vocab_tokens_size: Optional[int] = 10000, vocab_tags_size: Optional[int] = 500, max_elements_per_user: Optional[int] = 1000, num_validation_examples: Optional[int] = 10000, total_rounds: Optional[int] = 1500, experiment_name: Optional[str] = 'federated_so_lr', root_output_dir: Optional[str] = '/tmp/fed_opt', max_eval_batches: Optional[int] = None, **kwargs): """Runs an iterative process on the Stack Overflow logistic regression task. This method will load and pre-process dataset and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process that it applies to the task, using `federated_research.utils.training_loop`. We assume that the iterative process has the following functional type signatures: * `initialize`: `( -> S@SERVER)` where `S` represents the server state. * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S` represents the server state, `{B*}` represents the client datasets, and `T` represents a python `Mapping` object. Moreover, the server state must have an attribute `model` of type `tff.learning.ModelWeights`. Args: iterative_process_builder: A function that accepts a no-arg `model_fn`, and returns a `tff.templates.IterativeProcess`. The `model_fn` must return a `tff.learning.Model`. client_epochs_per_round: An integer representing the number of epochs of training performed per client in each training round. client_batch_size: An integer representing the batch size used on clients. clients_per_round: An integer representing the number of clients participating in each round. max_batches_per_client: An optional int specifying the number of batches taken by each client at each round. If `-1`, the entire client dataset is used. client_datasets_random_seed: An optional int used to seed which clients are sampled at each round. If `None`, no seed is used. vocab_tokens_size: Integer dictating the number of most frequent words to use in the vocabulary. vocab_tags_size: Integer dictating the number of most frequent tags to use in the label creation. max_elements_per_user: The maximum number of elements processed for each client's dataset. num_validation_examples: The number of test examples to use for validation. total_rounds: The number of federated training rounds. experiment_name: The name of the experiment being run. This will be appended to the `root_output_dir` for purposes of writing outputs. root_output_dir: The name of the root output directory for writing experiment outputs. max_eval_batches: If set to a positive integer, evaluation datasets are capped to at most that many batches. If set to None or a nonpositive integer, the full evaluation datasets are used. **kwargs: Additional arguments configuring the training loop. For details on supported arguments, see `federated_research/utils/training_utils.py`. """ stackoverflow_train, _, _ = stackoverflow_lr_dataset.get_stackoverflow_datasets( vocab_tokens_size=vocab_tokens_size, vocab_tags_size=vocab_tags_size, client_batch_size=client_batch_size, client_epochs_per_round=client_epochs_per_round, max_training_elements_per_user=max_elements_per_user, max_batches_per_user=max_batches_per_client, num_validation_examples=num_validation_examples) _, stackoverflow_validation, stackoverflow_test = stackoverflow_lr_dataset.get_centralized_datasets( train_batch_size=client_batch_size, vocab_tokens_size=vocab_tokens_size, vocab_tags_size=vocab_tags_size, num_validation_examples=num_validation_examples, max_validation_batches=max_eval_batches, max_test_batches=max_eval_batches) input_spec = stackoverflow_train.create_tf_dataset_for_client( stackoverflow_train.client_ids[0]).element_spec model_builder = functools.partial( stackoverflow_lr_models.create_logistic_model, vocab_tokens_size=vocab_tokens_size, vocab_tags_size=vocab_tags_size) loss_builder = functools.partial( tf.keras.losses.BinaryCrossentropy, from_logits=False, reduction=tf.keras.losses.Reduction.SUM) def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model( keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) training_process = iterative_process_builder(tff_model_fn) client_datasets_fn = training_utils.build_client_datasets_fn( dataset=stackoverflow_train, clients_per_round=clients_per_round, random_seed=client_datasets_random_seed) evaluate_fn = training_utils.build_centralized_evaluate_fn( model_builder=model_builder, eval_dataset=stackoverflow_validation, loss_builder=loss_builder, metrics_builder=metrics_builder) test_fn = training_utils.build_centralized_evaluate_fn( model_builder=model_builder, # Use both val and test for symmetry with other experiments, which # evaluate on the entire test set. eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test), loss_builder=loss_builder, metrics_builder=metrics_builder) logging.info('Training model:') logging.info(model_builder().summary()) training_loop.run( iterative_process=training_process, client_datasets_fn=client_datasets_fn, validation_fn=evaluate_fn, test_fn=test_fn, total_rounds=total_rounds, experiment_name=experiment_name, root_output_dir=root_output_dir, **kwargs)
def run_federated( iterative_process_builder: Callable[..., tff.templates.IterativeProcess], client_epochs_per_round: int, client_batch_size: int, clients_per_round: int, client_datasets_random_seed: Optional[int] = None, total_rounds: Optional[int] = 1500, experiment_name: Optional[str] = 'federated_emnist_ae', root_output_dir: Optional[str] = '/tmp/fed_opt', max_eval_batches: Optional[int] = None, **kwargs): """Runs an iterative process on the EMNIST autoencoder task. This method will load and pre-process dataset and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process that it applies to the task, using `federated_research.utils.training_loop`. We assume that the iterative process has the following functional type signatures: * `initialize`: `( -> S@SERVER)` where `S` represents the server state. * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S` represents the server state, `{B*}` represents the client datasets, and `T` represents a python `Mapping` object. The iterative process must also have a callable attribute `get_model_weights` that takes as input the state of the iterative process, and returns a `tff.learning.ModelWeights` object. Args: iterative_process_builder: A function that accepts a no-arg `model_fn`, and returns a `tff.templates.IterativeProcess`. The `model_fn` must return a `tff.learning.Model`. client_epochs_per_round: An integer representing the number of epochs of training performed per client in each training round. client_batch_size: An integer representing the batch size used on clients. clients_per_round: An integer representing the number of clients participating in each round. client_datasets_random_seed: An optional int used to seed which clients are sampled at each round. If `None`, no seed is used. total_rounds: The number of federated training rounds. experiment_name: The name of the experiment being run. This will be appended to the `root_output_dir` for purposes of writing outputs. root_output_dir: The name of the root output directory for writing experiment outputs. max_eval_batches: If set to a positive integer, evaluation datasets are capped to at most that many batches. If set to None or a nonpositive integer, the full evaluation datasets are used. **kwargs: Additional arguments configuring the training loop. For details on supported arguments, see `federated_research/utils/training_utils.py`. """ emnist_train, _ = emnist_dataset.get_federated_datasets( train_client_batch_size=client_batch_size, train_client_epochs_per_round=client_epochs_per_round, only_digits=False, emnist_task='autoencoder') _, emnist_test = emnist_dataset.get_centralized_datasets( train_batch_size=client_batch_size, only_digits=False, emnist_task='autoencoder') if max_eval_batches and max_eval_batches >= 1: emnist_test = emnist_test.take(max_eval_batches) input_spec = emnist_train.create_tf_dataset_for_client( emnist_train.client_ids[0]).element_spec model_builder = emnist_ae_models.create_autoencoder_model loss_builder = functools.partial(tf.keras.losses.MeanSquaredError, reduction=tf.keras.losses.Reduction.SUM) metrics_builder = lambda: [tf.keras.metrics.MeanSquaredError()] def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model(keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) training_process = iterative_process_builder(tff_model_fn) client_datasets_fn = training_utils.build_client_datasets_fn( dataset=emnist_train, clients_per_round=clients_per_round, random_seed=client_datasets_random_seed) evaluate_fn = training_utils.build_centralized_evaluate_fn( eval_dataset=emnist_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) logging.info('Training model:') logging.info(model_builder().summary()) training_loop.run(iterative_process=training_process, client_datasets_fn=client_datasets_fn, validation_fn=evaluate_fn, test_fn=evaluate_fn, total_rounds=total_rounds, experiment_name=experiment_name, root_output_dir=root_output_dir, **kwargs)
def run_federated( iterative_process_builder: Callable[..., tff.templates.IterativeProcess], client_epochs_per_round: int, client_batch_size: int, clients_per_round: int, max_elements_per_user: int, total_rounds: int = 3000, vocab_size: int = 10000, num_oov_buckets: int = 1, sequence_length: int = 20, num_validation_examples: int = 10000, dim_embed: int = 96, dim_model: int = 512, dim_hidden: int = 2048, num_heads: int = 8, num_layers: int = 1, max_position_encoding: int = 1000, dropout: float = 0.1, client_datasets_random_seed: Optional[int] = None, experiment_name: str = 'federated_stackoverflow', root_output_dir: str = '/tmp/fedopt_guide', max_val_test_batches: Optional[int] = None, **kwargs) -> None: """Configures training for Stack Overflow next-word prediction. This method will load and pre-process dataset and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process that it applies to the task, using `federated_research/fedopt_guide/training_loop`. We assume that the iterative process has the following functional type signatures: * `initialize`: `( -> S@SERVER)` where `S` represents the server state. * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S` represents the server state, `{B*}` represents the client datasets, and `T` represents a python `Mapping` object. The iterative process must also have a callable attribute `get_model_weights` that takes as input the state of the iterative process, and returns a `tff.learning.ModelWeights` object. Args: iterative_process_builder: A function that accepts a no-arg `model_fn`, a `client_weight_fn` and returns a `tff.templates.IterativeProcess`. The `model_fn` must return a `tff.learning.Model`. client_epochs_per_round: An integer representing the number of epochs of training performed per client in each training round. client_batch_size: An integer representing the batch size used on clients. clients_per_round: An integer representing the number of clients participating in each round. max_elements_per_user: The maximum number of elements processed for each client's dataset. This has be to a positive value or -1 (which means that all elements are taken for training). total_rounds: The number of federated training rounds. vocab_size: Integer dictating the number of most frequent words to use in the vocabulary. num_oov_buckets: The number of out-of-vocabulary buckets to use. sequence_length: The maximum number of words to take for each sequence. num_validation_examples: The number of test examples to use for validation. dim_embed: An integer for the dimension of the token embeddings. dim_model: An integer for the dimension of features of MultiHeadAttention layers. dim_hidden: An integer for the dimension of hidden layers of the FFN. num_heads: An integer for the number of attention heads. num_layers: An integer for the number of Transformer blocks. max_position_encoding: Maximum number of positions for position embeddings. dropout: Dropout rate. client_datasets_random_seed: An optional int used to seed which clients are sampled at each round. If `None`, no seed is used. experiment_name: The name of the experiment being run. This will be appended to the `root_output_dir` for purposes of writing outputs. root_output_dir: The name of the root output directory for writing experiment outputs. max_val_test_batches: If set to a positive integer, val and test datasets are capped to at most that many batches. If set to None or a nonpositive integer, the full datasets are used. **kwargs: Additional arguments configuring the training loop. For details on supported arguments, see `federated_research/fedopt_guide/training_utils.py`. Returns: A `RunnerSpec` containing attributes used for running the newly created federated task. """ train_clientdata, _, _ = tff.simulation.datasets.stackoverflow.load_data() _, validation_dataset, test_dataset = stackoverflow_word_prediction.get_centralized_datasets( vocab_size=vocab_size, max_sequence_length=sequence_length, num_validation_examples=num_validation_examples, num_oov_buckets=num_oov_buckets) if max_val_test_batches and max_val_test_batches >= 1: validation_dataset = validation_dataset.take(max_val_test_batches) test_dataset = test_dataset.take(max_val_test_batches) model_builder = functools.partial( transformer_models.create_transformer_lm, vocab_size=vocab_size, num_oov_buckets=num_oov_buckets, d_embed=dim_embed, d_model=dim_model, d_hidden=dim_hidden, num_heads=num_heads, num_layers=num_layers, max_position_encoding=max_position_encoding, dropout=dropout, name='stackoverflow-transformer') loss_builder = functools.partial( tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True) special_tokens = stackoverflow_word_prediction.get_special_tokens( vocab_size, num_oov_buckets) pad_token = special_tokens.pad oov_tokens = special_tokens.oov eos_token = special_tokens.eos def metrics_builder(): return [ keras_metrics.MaskedCategoricalAccuracy( name='accuracy_with_oov', masked_tokens=[pad_token]), keras_metrics.MaskedCategoricalAccuracy( name='accuracy_no_oov', masked_tokens=[pad_token] + oov_tokens), # Notice BOS never appears in ground truth. keras_metrics.MaskedCategoricalAccuracy( name='accuracy_no_oov_or_eos', masked_tokens=[pad_token, eos_token] + oov_tokens), keras_metrics.NumBatchesCounter(), keras_metrics.NumTokensCounter(masked_tokens=[pad_token]) ] train_dataset_preprocess_comp = stackoverflow_word_prediction.create_preprocess_fn( vocab=stackoverflow_word_prediction.create_vocab(vocab_size), num_oov_buckets=num_oov_buckets, client_batch_size=client_batch_size, client_epochs_per_round=client_epochs_per_round, max_sequence_length=sequence_length, max_elements_per_client=max_elements_per_user) input_spec = train_dataset_preprocess_comp.type_signature.result.element def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model( keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) def client_weight_fn(local_outputs): # Num_tokens is a tensor with type int64[1], to use as a weight need # a float32 scalar. return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32) iterative_process = iterative_process_builder( tff_model_fn, client_weight_fn=client_weight_fn) if hasattr(train_clientdata, 'dataset_computation'): @tff.tf_computation(tf.string) def train_dataset_computation(client_id): client_train_data = train_clientdata.dataset_computation(client_id) return train_dataset_preprocess_comp(client_train_data) training_process = tff.simulation.compose_dataset_computation_with_iterative_process( train_dataset_computation, iterative_process) client_ids_fn = training_utils.build_sample_fn( train_clientdata.client_ids, size=clients_per_round, replace=False, random_seed=client_datasets_random_seed) # We convert the output to a list (instead of an np.ndarray) so that it can # be used as input to the iterative process. client_sampling_fn = lambda x: list(client_ids_fn(x)) else: training_process = tff.simulation.compose_dataset_computation_with_iterative_process( train_dataset_preprocess_comp, iterative_process) client_sampling_fn = training_utils.build_client_datasets_fn( dataset=train_clientdata, clients_per_round=clients_per_round, random_seed=client_datasets_random_seed) training_process.get_model_weights = iterative_process.get_model_weights evaluate_fn = training_utils.build_centralized_evaluate_fn( model_builder=model_builder, eval_dataset=validation_dataset, loss_builder=loss_builder, metrics_builder=metrics_builder) validation_fn = lambda model_weights, round_num: evaluate_fn(model_weights) test_fn = training_utils.build_centralized_evaluate_fn( model_builder=model_builder, # Use both val and test for symmetry with other experiments, which # evaluate on the entire test set. eval_dataset=validation_dataset.concatenate(test_dataset), loss_builder=loss_builder, metrics_builder=metrics_builder) logging.info('Training model:') logging.info(model_builder().summary()) training_loop.run( iterative_process=training_process, train_client_datasets_fn=client_sampling_fn, evaluation_fn=validation_fn, test_fn=test_fn, total_rounds=total_rounds, experiment_name=experiment_name, root_output_dir=root_output_dir, **kwargs)
def configure_training( task_spec: training_specs.TaskSpec, vocab_tokens_size: int = 10000, vocab_tags_size: int = 500, max_elements_per_user: int = 1000, num_validation_examples: int = 10000) -> training_specs.RunnerSpec: """Configures training for the Stack Overflow tag prediction task. This tag prediction is performed via multi-class one-versus-rest logistic regression. This method will load and pre-process datasets and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process compatible with `federated_research.utils.training_loop`. Args: task_spec: A `TaskSpec` class for creating federated training tasks. vocab_tokens_size: Integer dictating the number of most frequent words to use in the vocabulary. vocab_tags_size: Integer dictating the number of most frequent tags to use in the label creation. max_elements_per_user: The maximum number of elements processed for each client's dataset. num_validation_examples: The number of test examples to use for validation. Returns: A `RunnerSpec` containing attributes used for running the newly created federated task. """ stackoverflow_train, _, _ = tff.simulation.datasets.stackoverflow.load_data( ) _, stackoverflow_validation, stackoverflow_test = stackoverflow_tag_prediction.get_centralized_datasets( train_batch_size=task_spec.client_batch_size, word_vocab_size=vocab_tokens_size, tag_vocab_size=vocab_tags_size, num_validation_examples=num_validation_examples) word_vocab = stackoverflow_tag_prediction.create_word_vocab( vocab_tokens_size) tag_vocab = stackoverflow_tag_prediction.create_tag_vocab(vocab_tags_size) train_preprocess_fn = stackoverflow_tag_prediction.create_preprocess_fn( word_vocab=word_vocab, tag_vocab=tag_vocab, client_batch_size=task_spec.client_batch_size, client_epochs_per_round=task_spec.client_epochs_per_round, max_elements_per_client=max_elements_per_user) input_spec = train_preprocess_fn.type_signature.result.element model_builder = functools.partial( stackoverflow_lr_models.create_logistic_model, vocab_tokens_size=vocab_tokens_size, vocab_tags_size=vocab_tags_size) loss_builder = functools.partial(tf.keras.losses.BinaryCrossentropy, from_logits=False, reduction=tf.keras.losses.Reduction.SUM) def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model(keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) iterative_process = task_spec.iterative_process_builder(tff_model_fn) if hasattr(stackoverflow_train, 'dataset_computation'): @tff.tf_computation(tf.string) def build_train_dataset_from_client_id(client_id): client_dataset = stackoverflow_train.dataset_computation(client_id) return train_preprocess_fn(client_dataset) training_process = tff.simulation.compose_dataset_computation_with_iterative_process( build_train_dataset_from_client_id, iterative_process) client_ids_fn = training_utils.build_sample_fn( stackoverflow_train.client_ids, size=task_spec.clients_per_round, replace=False, random_seed=task_spec.sampling_random_seed) # We convert the output to a list (instead of an np.ndarray) so that it can # be used as input to the iterative process. client_sampling_fn = lambda x: list(client_ids_fn(x)) else: training_process = tff.simulation.compose_dataset_computation_with_iterative_process( train_preprocess_fn, iterative_process) client_sampling_fn = training_utils.build_client_datasets_fn( dataset=stackoverflow_train, clients_per_round=task_spec.clients_per_round, random_seed=task_spec.sampling_random_seed) training_process.get_model_weights = iterative_process.get_model_weights evaluate_fn = training_utils.build_centralized_evaluate_fn( model_builder=model_builder, eval_dataset=stackoverflow_validation, loss_builder=loss_builder, metrics_builder=metrics_builder) validation_fn = lambda model_weights, round_num: evaluate_fn(model_weights) test_fn = training_utils.build_centralized_evaluate_fn( model_builder=model_builder, # Use both val and test for symmetry with other experiments, which # evaluate on the entire test set. eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test), loss_builder=loss_builder, metrics_builder=metrics_builder) return training_specs.RunnerSpec(iterative_process=training_process, client_datasets_fn=client_sampling_fn, validation_fn=validation_fn, test_fn=test_fn)