def test_take_with_repeat(self): emnist_train, _ = emnist_dataset.get_emnist_datasets( client_batch_size=10, client_epochs_per_round=-1, max_batches_per_client=10, only_digits=True) self.assertEqual(len(emnist_train.client_ids), 3383) for i in range(10): client_ds = emnist_train.create_tf_dataset_for_client( emnist_train.client_ids[i]) self.assertEqual(_compute_length_of_dataset(client_ds), 10)
def test_emnist_dataset_structure(self): emnist_train, emnist_test = emnist_dataset.get_emnist_datasets( client_batch_size=10, client_epochs_per_round=1, only_digits=True) self.assertEqual(len(emnist_train.client_ids), 3383) sample_train_ds = emnist_train.create_tf_dataset_for_client( emnist_train.client_ids[0]) train_batch = next(iter(sample_train_ds)) train_batch_shape = train_batch[0].shape test_batch = next(iter(emnist_test)) test_batch_shape = test_batch[0].shape self.assertEqual(train_batch_shape.as_list(), [10, 28, 28, 1]) self.assertEqual(test_batch_shape.as_list(), [TEST_BATCH_SIZE, 28, 28, 1])
def run_federated( iterative_process_builder: Callable[..., tff.templates.IterativeProcess], client_epochs_per_round: int, client_batch_size: int, clients_per_round: int, max_batches_per_client: Optional[int] = -1, client_datasets_random_seed: Optional[int] = None, model: Optional[str] = 'cnn', total_rounds: Optional[int] = 1500, experiment_name: Optional[str] = 'federated_emnist_cr', root_output_dir: Optional[str] = '/tmp/fed_opt', max_eval_batches: Optional[int] = None, **kwargs): """Runs an iterative process on the EMNIST character recognition task. This method will load and pre-process dataset and construct a model used for the task. It then uses `iterative_process_builder` to create an iterative process that it applies to the task, using `federated_research.utils.training_loop`. We assume that the iterative process has the following functional type signatures: * `initialize`: `( -> S@SERVER)` where `S` represents the server state. * `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S` represents the server state, `{B*}` represents the client datasets, and `T` represents a python `Mapping` object. Moreover, the server state must have an attribute `model` of type `tff.learning.ModelWeights`. Args: iterative_process_builder: A function that accepts a no-arg `model_fn`, and returns a `tff.templates.IterativeProcess`. The `model_fn` must return a `tff.learning.Model`. client_epochs_per_round: An integer representing the number of epochs of training performed per client in each training round. client_batch_size: An integer representing the batch size used on clients. clients_per_round: An integer representing the number of clients participating in each round. max_batches_per_client: An optional int specifying the number of batches taken by each client at each round. If `-1`, the entire client dataset is used. client_datasets_random_seed: An optional int used to seed which clients are sampled at each round. If `None`, no seed is used. model: A string specifying the model used for character recognition. Can be one of `cnn` and `2nn`, corresponding to a CNN model and a densely connected 2-layer model (respectively). total_rounds: The number of federated training rounds. experiment_name: The name of the experiment being run. This will be appended to the `root_output_dir` for purposes of writing outputs. root_output_dir: The name of the root output directory for writing experiment outputs. max_eval_batches: If set to a positive integer, evaluation datasets are capped to at most that many batches. If set to None or a nonpositive integer, the full evaluation datasets are used. **kwargs: Additional arguments configuring the training loop. For details on supported arguments, see `federated_research/utils/training_utils.py`. """ emnist_train, _ = emnist_dataset.get_emnist_datasets( client_batch_size, client_epochs_per_round, max_batches_per_client=max_batches_per_client, only_digits=False) _, emnist_test = emnist_dataset.get_centralized_datasets( train_batch_size=client_batch_size, max_test_batches=max_eval_batches, only_digits=False) input_spec = emnist_train.create_tf_dataset_for_client( emnist_train.client_ids[0]).element_spec if model == 'cnn': model_builder = functools.partial( emnist_models.create_conv_dropout_model, only_digits=False) elif model == '2nn': model_builder = functools.partial( emnist_models.create_two_hidden_layer_model, only_digits=False) else: raise ValueError( 'Cannot handle model flag [{!s}], must be one of {!s}.'.format( model, EMNIST_MODELS)) loss_builder = tf.keras.losses.SparseCategoricalCrossentropy metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()] def tff_model_fn() -> tff.learning.Model: return tff.learning.from_keras_model( keras_model=model_builder(), input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) training_process = iterative_process_builder(tff_model_fn) client_datasets_fn = training_utils.build_client_datasets_fn( dataset=emnist_train, clients_per_round=clients_per_round, random_seed=client_datasets_random_seed) evaluate_fn = training_utils.build_centralized_evaluate_fn( eval_dataset=emnist_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) logging.info('Training model:') logging.info(model_builder().summary()) training_loop.run( iterative_process=training_process, client_datasets_fn=client_datasets_fn, validation_fn=evaluate_fn, test_fn=evaluate_fn, total_rounds=total_rounds, experiment_name=experiment_name, root_output_dir=root_output_dir, **kwargs)
def test_raises_no_repeat_and_no_take(self): with self.assertRaisesRegex( ValueError, 'Argument client_epochs_per_round is set to -1'): emnist_dataset.get_emnist_datasets(client_batch_size=10, client_epochs_per_round=-1, max_batches_per_client=-1)
def run_experiment(): """Data preprocessing and experiment execution.""" emnist_train, emnist_test = emnist_dataset.get_emnist_datasets( FLAGS.client_batch_size, FLAGS.client_epochs_per_round, only_digits=FLAGS.only_digits) example_dataset = emnist_train.create_tf_dataset_for_client( emnist_train.client_ids[0]) input_spec = example_dataset.element_spec client_datasets_fn = training_utils.build_client_datasets_fn( emnist_train, FLAGS.clients_per_round) evaluate_fn = training_utils.build_centralized_evaluate_fn( eval_dataset=emnist_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) client_optimizer_fn = functools.partial( utils_impl.create_optimizer_from_flags, 'client') server_optimizer_fn = functools.partial( utils_impl.create_optimizer_from_flags, 'server') def tff_model_fn(): keras_model = model_builder() return tff.learning.from_keras_model(keras_model, input_spec=input_spec, loss=loss_builder(), metrics=metrics_builder()) if FLAGS.use_compression: # We create a `MeasuredProcess` for broadcast process and a # `MeasuredProcess` for aggregate process by providing the # `_broadcast_encoder_fn` and `_mean_encoder_fn` to corresponding utilities. # The fns are called once for each of the model weights created by # tff_model_fn, and return instances of appropriate encoders. encoded_broadcast_process = ( tff.learning.framework.build_encoded_broadcast_process_from_model( tff_model_fn, _broadcast_encoder_fn)) encoded_mean_process = ( tff.learning.framework.build_encoded_mean_process_from_model( tff_model_fn, _mean_encoder_fn)) else: encoded_broadcast_process = None encoded_mean_process = None iterative_process = tff.learning.build_federated_averaging_process( model_fn=tff_model_fn, client_optimizer_fn=client_optimizer_fn, server_optimizer_fn=server_optimizer_fn, aggregation_process=encoded_mean_process, broadcast_process=encoded_broadcast_process) hparam_dict = utils_impl.lookup_flag_values(utils_impl.get_hparam_flags()) training_loop_dict = utils_impl.lookup_flag_values(training_loop_flags) training_loop.run(iterative_process=iterative_process, client_datasets_fn=client_datasets_fn, validation_fn=evaluate_fn, hparam_dict=hparam_dict, **training_loop_dict)
def main(argv): if len(argv) > 1: raise app.UsageError('Expected no command-line arguments, ' 'got: {}'.format(argv)) emnist_train, emnist_test = emnist_dataset.get_emnist_datasets( FLAGS.client_batch_size, FLAGS.client_epochs_per_round, only_digits=False) if FLAGS.model == 'cnn': model_builder = functools.partial( emnist_models.create_conv_dropout_model, only_digits=False) elif FLAGS.model == '2nn': model_builder = functools.partial( emnist_models.create_two_hidden_layer_model, only_digits=False) else: raise ValueError('Cannot handle model flag [{!s}].'.format( FLAGS.model)) loss_builder = tf.keras.losses.SparseCategoricalCrossentropy metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()] if FLAGS.uniform_weighting: def client_weight_fn(local_outputs): del local_outputs return 1.0 else: client_weight_fn = None # Defaults to the number of examples per client. def model_fn(): return tff.learning.from_keras_model( model_builder(), loss_builder(), input_spec=emnist_test.element_spec, metrics=metrics_builder()) if FLAGS.noise_multiplier is not None: if not FLAGS.uniform_weighting: raise ValueError( 'Differential privacy is only implemented for uniform weighting.' ) dp_query = tff.utils.build_dp_query( clip=FLAGS.clip, noise_multiplier=FLAGS.noise_multiplier, expected_total_weight=FLAGS.clients_per_round, adaptive_clip_learning_rate=FLAGS.adaptive_clip_learning_rate, target_unclipped_quantile=FLAGS.target_unclipped_quantile, clipped_count_budget_allocation=FLAGS. clipped_count_budget_allocation, expected_clients_per_round=FLAGS.clients_per_round, per_vector_clipping=FLAGS.per_vector_clipping, model=model_fn()) weights_type = tff.learning.framework.weights_type_from_model(model_fn) aggregation_process = tff.utils.build_dp_aggregate_process( weights_type.trainable, dp_query) else: aggregation_process = None server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags( 'server') client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags( 'client') iterative_process = tff.learning.build_federated_averaging_process( model_fn=model_fn, server_optimizer_fn=server_optimizer_fn, client_weight_fn=client_weight_fn, client_optimizer_fn=client_optimizer_fn, aggregation_process=aggregation_process) client_datasets_fn = training_utils.build_client_datasets_fn( emnist_train, FLAGS.clients_per_round) evaluate_fn = training_utils.build_evaluate_fn( eval_dataset=emnist_test, model_builder=model_builder, loss_builder=loss_builder, metrics_builder=metrics_builder) logging.info('Training model:') logging.info(model_builder().summary()) hparam_dict = utils_impl.lookup_flag_values(utils_impl.get_hparam_flags()) training_loop_dict = utils_impl.lookup_flag_values(training_loop_flags) training_loop.run(iterative_process=iterative_process, client_datasets_fn=client_datasets_fn, validation_fn=evaluate_fn, hparam_dict=hparam_dict, **training_loop_dict)