Пример #1
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    stackoverflow_train, stackoverflow_validation, stackoverflow_test = stackoverflow_lr_dataset.get_stackoverflow_datasets(
        vocab_tokens_size=FLAGS.vocab_tokens_size,
        vocab_tags_size=FLAGS.vocab_tags_size,
        client_batch_size=FLAGS.client_batch_size,
        client_epochs_per_round=FLAGS.client_epochs_per_round,
        max_training_elements_per_user=FLAGS.max_elements_per_user,
        num_validation_examples=FLAGS.num_validation_examples)

    input_spec = stackoverflow_train.create_tf_dataset_for_client(
        stackoverflow_train.client_ids[0]).element_spec

    model_builder = functools.partial(
        stackoverflow_lr_models.create_logistic_model,
        vocab_tokens_size=FLAGS.vocab_tokens_size,
        vocab_tags_size=FLAGS.vocab_tags_size)

    loss_builder = functools.partial(tf.keras.losses.BinaryCrossentropy,
                                     from_logits=False,
                                     reduction=tf.keras.losses.Reduction.SUM)

    training_process = iterative_process_builder.from_flags(
        input_spec=input_spec,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=stackoverflow_train,
        train_clients_per_round=FLAGS.clients_per_round,
        random_seed=FLAGS.client_datasets_random_seed)

    assign_weights_fn = fed_avg_schedule.ServerState.assign_weights_to_keras_model

    evaluate_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=stackoverflow_validation,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    test_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(training_process,
                      client_datasets_fn,
                      evaluate_fn,
                      test_fn=test_fn)
Пример #2
0
    def test_raises_no_model_attribute_in_state(self):
        class BadIterativeProcess(adapters.IterativeProcessPythonAdapter):
            """Converts iterative process results from anonymous tuples."""
            def __init__(self):
                pass

            def initialize(self):
                return {}

            def next(self, state, data):
                return {}

        iterative_process = BadIterativeProcess()
        federated_data = [[_batch_fn()]]

        def client_datasets_fn(round_num):
            del round_num
            return federated_data

        def eval_fn(model):
            del model
            return {}

        with self.assertRaisesRegex(
                TypeError, 'The server state must have a model attribute'):
            training_loop.run(iterative_process, client_datasets_fn, eval_fn)
Пример #3
0
    def test_fn_writes_metrics(self):
        FLAGS.total_rounds = 1
        FLAGS.rounds_per_eval = 10
        FLAGS.experiment_name = 'test_metrics'
        iterative_process = _build_federated_averaging_process()
        batch = _batch_fn()
        federated_data = [[batch]]

        def client_datasets_fn(round_num):
            del round_num
            return federated_data

        def evaluate(model):
            keras_model = tff.simulation.models.mnist.create_keras_model(
                compile_model=True)
            model.assign_weights_to(keras_model)
            return {'loss': keras_model.evaluate(batch.x, batch.y)}

        temp_filepath = self.get_temp_dir()
        FLAGS.root_output_dir = temp_filepath
        training_loop.run(iterative_process,
                          client_datasets_fn,
                          evaluate,
                          test_fn=evaluate)

        results_dir = os.path.join(FLAGS.root_output_dir, 'results',
                                   FLAGS.experiment_name)

        scalar_manager = metrics_manager.ScalarMetricsManager(results_dir)
        metrics = scalar_manager.get_metrics()
        self.assertEqual(2, len(metrics.index))
        self.assertIn('eval/loss', metrics.columns)
        self.assertIn('test/loss', metrics.columns)
        self.assertNotIn('train_eval/loss', metrics.columns)
Пример #4
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    tf.compat.v1.enable_v2_behavior()

    stackoverflow_train, stackoverflow_validation, stackoverflow_test = dataset.get_stackoverflow_datasets(
        vocab_tokens_size=FLAGS.vocab_tokens_size,
        vocab_tags_size=FLAGS.vocab_tags_size,
        client_batch_size=FLAGS.client_batch_size,
        client_epochs_per_round=FLAGS.client_epochs_per_round,
        max_training_elements_per_user=FLAGS.max_elements_per_user,
        num_validation_examples=FLAGS.num_validation_examples)

    sample_client_dataset = stackoverflow_train.create_tf_dataset_for_client(
        stackoverflow_train.client_ids[0])
    # TODO(b/144382142): Sample batches cannot be eager tensors, since they are
    # passed (implicitly) to tff.learning.build_federated_averaging_process.
    sample_batch = tf.nest.map_structure(lambda x: x.numpy(),
                                         next(iter(sample_client_dataset)))

    model_builder = functools.partial(
        models.create_logistic_model,
        vocab_tokens_size=FLAGS.vocab_tokens_size,
        vocab_tags_size=FLAGS.vocab_tags_size)

    loss_builder = functools.partial(tf.keras.losses.BinaryCrossentropy,
                                     from_logits=False,
                                     reduction=tf.keras.losses.Reduction.SUM)

    training_process = iterative_process_builder.from_flags(
        dummy_batch=sample_batch,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        stackoverflow_train, FLAGS.clients_per_round)

    evaluate_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=stackoverflow_validation,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    test_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(training_process,
                      client_datasets_fn,
                      evaluate_fn,
                      test_fn=test_fn)
Пример #5
0
def run_experiment():
    """Data preprocessing and experiment execution."""
    emnist_train, emnist_test = dataset.get_emnist_datasets(
        FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round,
        only_digits=FLAGS.only_digits)

    example_dataset = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0])
    sample_batch = tf.nest.map_structure(lambda x: x.numpy(),
                                         next(iter(example_dataset)))

    client_datasets_fn = training_utils.build_client_datasets_fn(
        emnist_train, FLAGS.clients_per_round)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    client_optimizer_fn = functools.partial(
        utils_impl.create_optimizer_from_flags, 'client')
    server_optimizer_fn = functools.partial(
        utils_impl.create_optimizer_from_flags, 'server')

    def tff_model_fn():
        keras_model = model_builder()
        return tff.learning.from_keras_model(keras_model,
                                             dummy_batch=sample_batch,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    if FLAGS.use_compression:
        # We create a `StatefulBroadcastFn` and `StatefulAggregateFn` by providing
        # the `_broadcast_encoder_fn` and `_mean_encoder_fn` to corresponding
        # utilities. The fns are called once for each of the model weights created
        # by tff_model_fn, and return instances of appropriate encoders.
        encoded_broadcast_fn = (
            tff.learning.framework.build_encoded_broadcast_from_model(
                tff_model_fn, _broadcast_encoder_fn))
        encoded_mean_fn = tff.learning.framework.build_encoded_mean_from_model(
            tff_model_fn, _mean_encoder_fn)
    else:
        encoded_broadcast_fn = None
        encoded_mean_fn = None

    iterative_process = tff.learning.build_federated_averaging_process(
        model_fn=tff_model_fn,
        client_optimizer_fn=client_optimizer_fn,
        server_optimizer_fn=server_optimizer_fn,
        stateful_delta_aggregate_fn=encoded_mean_fn,
        stateful_model_broadcast_fn=encoded_broadcast_fn)
    iterative_process = compression_process_adapter.CompressionProcessAdapter(
        iterative_process)

    training_loop.run(iterative_process=iterative_process,
                      client_datasets_fn=client_datasets_fn,
                      evaluate_fn=evaluate_fn)
Пример #6
0
def run_experiment():
    """Data preprocessing and experiment execution."""
    emnist_train, emnist_test = emnist_dataset.get_emnist_datasets(
        FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round,
        only_digits=FLAGS.only_digits)

    example_dataset = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0])
    input_spec = example_dataset.element_spec

    client_datasets_fn = training_utils.build_client_datasets_fn(
        emnist_train, FLAGS.clients_per_round)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    client_optimizer_fn = functools.partial(
        utils_impl.create_optimizer_from_flags, 'client')
    server_optimizer_fn = functools.partial(
        utils_impl.create_optimizer_from_flags, 'server')

    def tff_model_fn():
        keras_model = model_builder()
        return tff.learning.from_keras_model(keras_model,
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    if FLAGS.use_compression:
        # We create a `MeasuredProcess` for broadcast process and a
        # `MeasuredProcess` for aggregate process by providing the
        # `_broadcast_encoder_fn` and `_mean_encoder_fn` to corresponding utilities.
        # The fns are called once for each of the model weights created by
        # tff_model_fn, and return instances of appropriate encoders.
        encoded_broadcast_process = (
            tff.learning.framework.build_encoded_broadcast_process_from_model(
                tff_model_fn, _broadcast_encoder_fn))
        encoded_mean_process = (
            tff.learning.framework.build_encoded_mean_process_from_model(
                tff_model_fn, _mean_encoder_fn))
    else:
        encoded_broadcast_process = None
        encoded_mean_process = None

    iterative_process = tff.learning.build_federated_averaging_process(
        model_fn=tff_model_fn,
        client_optimizer_fn=client_optimizer_fn,
        server_optimizer_fn=server_optimizer_fn,
        aggregation_process=encoded_mean_process,
        broadcast_process=encoded_broadcast_process)

    training_loop.run(iterative_process=iterative_process,
                      client_datasets_fn=client_datasets_fn,
                      validation_fn=evaluate_fn)
Пример #7
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Expected no command-line arguments, '
                             'got: {}'.format(argv))

    tf.compat.v1.enable_v2_behavior()
    # TODO(b/139129100): Remove this once the local executor is the default.
    tff.framework.set_default_executor(
        tff.framework.local_executor_factory(max_fanout=25))

    emnist_train, emnist_test = dataset.get_emnist_datasets(
        FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round,
        only_digits=False)

    sample_client_dataset = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0])
    # TODO(b/144382142): Sample batches cannot be eager tensors, since they are
    # passed (implicitly) to tff.learning.build_federated_averaging_process.
    sample_batch = tf.nest.map_structure(lambda x: x.numpy(),
                                         next(iter(sample_client_dataset)))

    if FLAGS.model == 'cnn':
        model_builder = functools.partial(models.create_conv_dropout_model,
                                          only_digits=False)
    elif FLAGS.model == '2nn':
        model_builder = functools.partial(models.create_two_hidden_layer_model,
                                          only_digits=False)
    else:
        raise ValueError('Cannot handle model flag [{!s}].'.format(
            FLAGS.model))

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    training_process = iterative_process_builder.from_flags(
        dummy_batch=sample_batch,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        emnist_train, FLAGS.clients_per_round)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(
        iterative_process=training_process,
        client_datasets_fn=client_datasets_fn,
        evaluate_fn=evaluate_fn,
    )
Пример #8
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Expected no command-line arguments, '
                             'got: {}'.format(argv))

    tf.compat.v1.enable_v2_behavior()

    emnist_train, emnist_test = dataset.get_emnist_datasets(
        FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round,
        only_digits=False)

    input_spec = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0]).element_spec

    if FLAGS.model == 'cnn':
        model_builder = functools.partial(models.create_conv_dropout_model,
                                          only_digits=False)
    elif FLAGS.model == '2nn':
        model_builder = functools.partial(models.create_two_hidden_layer_model,
                                          only_digits=False)
    else:
        raise ValueError('Cannot handle model flag [{!s}].'.format(
            FLAGS.model))

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    training_process = iterative_process_builder.from_flags(
        input_spec=input_spec,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=emnist_train,
        train_clients_per_round=FLAGS.clients_per_round,
        random_seed=FLAGS.client_datasets_random_seed)

    assign_weights_fn = fed_avg_schedule.ServerState.assign_weights_to_keras_model

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(
        iterative_process=training_process,
        client_datasets_fn=client_datasets_fn,
        evaluate_fn=evaluate_fn,
    )
Пример #9
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    tf.compat.v1.enable_v2_behavior()
    tff.framework.set_default_executor(
        tff.framework.local_executor_factory(max_fanout=25))

    train_clientdata, test_dataset = dataset.construct_character_level_datasets(
        FLAGS.client_batch_size, FLAGS.client_epochs_per_round,
        FLAGS.sequence_length)
    test_dataset = test_dataset.cache()

    loss_fn_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    # Need to iterate until we find a client with data.
    for client_id in train_clientdata.client_ids:
        try:
            sample_batch = next(
                iter(train_clientdata.create_tf_dataset_for_client(client_id)))
            break
        except StopIteration:
            pass  # Client had no batches.
    sample_batch = tf.nest.map_structure(lambda t: t.numpy(), sample_batch)

    def client_weight_fn(local_outputs):
        # Num_tokens is a tensor with type int64[1], to use as a weight need
        # a float32 scalar.
        return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

    training_process = iterative_process_builder.from_flags(
        dummy_batch=sample_batch,
        model_builder=model_builder,
        loss_builder=loss_fn_builder,
        metrics_builder=metrics_builder,
        client_weight_fn=client_weight_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(
        iterative_process=training_process,
        client_datasets_fn=training_utils.build_client_datasets_fn(
            train_clientdata, FLAGS.clients_per_round),
        evaluate_fn=training_utils.build_evaluate_fn(
            eval_dataset=test_dataset,
            model_builder=model_builder,
            loss_builder=loss_fn_builder,
            metrics_builder=metrics_builder),
    )
Пример #10
0
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Expected no command-line arguments, '
                         'got: {}'.format(argv))

  tf.compat.v1.enable_v2_behavior()

  cifar_train, cifar_test = dataset.get_federated_cifar100(
      client_epochs_per_round=FLAGS.client_epochs_per_round,
      train_batch_size=FLAGS.client_batch_size,
      crop_shape=CROP_SHAPE)

  input_spec = cifar_train.create_tf_dataset_for_client(
      cifar_train.client_ids[0]).element_spec

  model_builder = functools.partial(
      resnet_models.create_resnet18,
      input_shape=CROP_SHAPE,
      num_classes=NUM_CLASSES)

  logging.info('Training model:')
  logging.info(model_builder().summary())

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

  training_process = iterative_process_builder.from_flags(
      input_spec=input_spec,
      model_builder=model_builder,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  client_datasets_fn = training_utils.build_client_datasets_fn(
      train_dataset=cifar_train,
      train_clients_per_round=FLAGS.clients_per_round,
      random_seed=FLAGS.client_datasets_random_seed)

  assign_weights_fn = fed_avg_schedule.ServerState.assign_weights_to_keras_model

  evaluate_fn = training_utils.build_evaluate_fn(
      eval_dataset=cifar_test,
      model_builder=model_builder,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder,
      assign_weights_to_keras_model=assign_weights_fn)

  training_loop.run(
      iterative_process=training_process,
      client_datasets_fn=client_datasets_fn,
      evaluate_fn=evaluate_fn)
Пример #11
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    tf.compat.v1.enable_v2_behavior()

    train_clientdata, test_dataset = dataset.construct_character_level_datasets(
        FLAGS.client_batch_size, FLAGS.client_epochs_per_round,
        FLAGS.sequence_length)
    test_dataset = test_dataset.cache()

    loss_fn_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    input_spec = train_clientdata.create_tf_dataset_for_client(
        train_clientdata.client_ids[0]).element_spec

    def client_weight_fn(local_outputs):
        # Num_tokens is a tensor with type int64[1], to use as a weight need
        # a float32 scalar.
        return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

    training_process = iterative_process_builder.from_flags(
        input_spec=input_spec,
        model_builder=model_builder,
        loss_builder=loss_fn_builder,
        metrics_builder=metrics_builder,
        client_weight_fn=client_weight_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=train_clientdata,
        train_clients_per_round=FLAGS.clients_per_round,
        random_seed=FLAGS.client_datasets_random_seed)

    assign_weights_fn = fed_avg_schedule.ServerState.assign_weights_to_keras_model

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=test_dataset,
        model_builder=model_builder,
        loss_builder=loss_fn_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(iterative_process=training_process,
                      client_datasets_fn=client_datasets_fn,
                      evaluate_fn=evaluate_fn)
Пример #12
0
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Expected no command-line arguments, '
                         'got: {}'.format(argv))

  tf.compat.v1.enable_v2_behavior()
  # TODO(b/139129100): Remove this once the local executor is the default.
  tff.framework.set_default_executor(
      tff.framework.local_executor_factory(max_fanout=25))

  cifar_train, cifar_test = dataset.get_federated_cifar100(
      client_epochs_per_round=FLAGS.client_epochs_per_round,
      train_batch_size=FLAGS.client_batch_size,
      crop_shape=CROP_SHAPE)

  sample_client_dataset = cifar_train.create_tf_dataset_for_client(
      cifar_train.client_ids[0])

  sample_batch = tf.nest.map_structure(lambda x: x.numpy(),
                                       next(iter(sample_client_dataset)))

  model_builder = functools.partial(
      resnet_models.create_resnet18,
      input_shape=CROP_SHAPE,
      num_classes=NUM_CLASSES)

  logging.info('Training model:')
  logging.info(model_builder().summary())

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

  training_process = iterative_process_builder.from_flags(
      dummy_batch=sample_batch,
      model_builder=model_builder,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  training_loop.run(
      iterative_process=training_process,
      client_datasets_fn=training_utils.build_client_datasets_fn(
          cifar_train, FLAGS.clients_per_round),
      evaluate_fn=training_utils.build_evaluate_fn(
          eval_dataset=cifar_test,
          model_builder=model_builder,
          loss_builder=loss_builder,
          metrics_builder=metrics_builder),
  )
Пример #13
0
    def test_raises_non_callable_client_dataset(self):
        FLAGS.total_rounds = 10
        FLAGS.experiment_name = 'non_callable_client_dataset'
        iterative_process = _build_federated_averaging_process()
        federated_data = [[_batch_fn()]]

        client_dataset = federated_data

        def evaluate_fn(model):
            del model
            return {}

        temp_filepath = self.get_temp_dir()
        FLAGS.root_output_dir = temp_filepath
        with self.assertRaises(TypeError):
            training_loop.run(iterative_process, client_dataset, evaluate_fn)
Пример #14
0
    def test_fedavg_training_decreases_loss(self):
        FLAGS.total_rounds = 1
        FLAGS.experiment_name = 'fedavg_decreases_loss'
        batch = _batch_fn()
        federated_data = [[batch]]
        iterative_process = _build_federated_averaging_process()

        def client_datasets_fn(round_num):
            del round_num
            return federated_data

        def evaluate(model):
            keras_model = tff.simulation.models.mnist.create_keras_model(
                compile_model=True)
            model.assign_weights_to(keras_model)
            return {'loss': keras_model.evaluate(batch.x, batch.y)}

        initial_state = iterative_process.initialize()

        temp_filepath = self.get_temp_dir()
        FLAGS.root_output_dir = temp_filepath
        final_state = training_loop.run(iterative_process, client_datasets_fn,
                                        evaluate)
        self.assertLess(
            evaluate(final_state.model)['loss'],
            evaluate(initial_state.model)['loss'])
Пример #15
0
  def test_raises_non_callable_evaluate_fn(self):
    FLAGS.total_rounds = 10
    FLAGS.experiment_name = 'non_callable_evaluate'
    iterative_process = _build_federated_averaging_process()
    federated_data = [[_batch_fn()]]

    def client_datasets_fn(round_num):
      del round_num
      return federated_data

    metrics_dict = {}

    temp_filepath = self.get_temp_dir()
    FLAGS.root_output_dir = temp_filepath
    with self.assertRaises(TypeError):
      training_loop.run(iterative_process, client_datasets_fn, metrics_dict)
Пример #16
0
def main(_):

    tf.enable_v2_behavior()
    # TODO(b/139129100): Remove this once the local executor is the default.
    tff.framework.set_default_executor(
        tff.framework.local_executor_factory(max_fanout=25))

    emnist_train, emnist_test = dataset.get_emnist_datasets(
        FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round,
        only_digits=False)

    sample_client_dataset = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0])
    # TODO(b/144382142): Sample batches cannot be eager tensors, since they are
    # passed (implicitly) to tff.learning.build_federated_averaging_process.
    sample_batch = tf.nest.map_structure(lambda x: x.numpy(),
                                         next(iter(sample_client_dataset)))

    model_builder = models.create_autoencoder_model

    loss_builder = tf.keras.losses.MeanSquaredError
    metrics_builder = lambda: [tf.keras.metrics.MeanSquaredError()]

    training_process = iterative_process_builder.from_flags(
        dummy_batch=sample_batch,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        emnist_train, FLAGS.clients_per_round)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(
        iterative_process=training_process,
        client_datasets_fn=client_datasets_fn,
        evaluate_fn=evaluate_fn,
    )
Пример #17
0
    def test_raises_non_str_output_dir(self):
        FLAGS.total_rounds = 10
        FLAGS.root_output_dir = 1
        FLAGS.experiment_name = 'non_str_output_dir'
        iterative_process = _build_federated_averaging_process()
        federated_data = [[_batch_fn()]]

        def client_datasets_fn(round_num):
            del round_num
            return federated_data

        def eval_fn(model):
            del model
            return {}

        with self.assertRaises(TypeError):
            training_loop.run(iterative_process, client_datasets_fn, eval_fn)
Пример #18
0
def main(_):

    emnist_train, emnist_test = emnist_ae_dataset.get_emnist_datasets(
        FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round,
        only_digits=False)

    input_spec = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0]).element_spec

    model_builder = emnist_ae_models.create_autoencoder_model

    loss_builder = functools.partial(tf.keras.losses.MeanSquaredError,
                                     reduction=tf.keras.losses.Reduction.SUM)

    metrics_builder = lambda: [tf.keras.metrics.MeanSquaredError()]

    training_process = iterative_process_builder.from_flags(
        input_spec=input_spec,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=emnist_train,
        train_clients_per_round=FLAGS.clients_per_round,
        random_seed=FLAGS.client_datasets_random_seed)

    assign_weights_fn = fed_avg_schedule.ServerState.assign_weights_to_keras_model

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(
        iterative_process=training_process,
        client_datasets_fn=client_datasets_fn,
        evaluate_fn=evaluate_fn,
    )
Пример #19
0
    def test_raises_non_iterative_process(self):
        FLAGS.total_rounds = 10
        FLAGS.experiment_name = 'non_iterative_process'
        bad_iterative_process = _build_federated_averaging_process().next
        federated_data = [[_batch_fn()]]

        def client_datasets_fn(round_num):
            del round_num
            return federated_data

        def evaluate_fn(model):
            del model
            return {}

        temp_filepath = self.get_temp_dir()
        FLAGS.root_output_dir = temp_filepath
        with self.assertRaises(TypeError):
            training_loop.run([bad_iterative_process], client_datasets_fn,
                              evaluate_fn)
Пример #20
0
    def test_checkpoint_manager_saves_state(self):
        FLAGS.total_rounds = 1
        FLAGS.experiment_name = 'checkpoint_manager_saves_state'
        iterative_process = _build_federated_averaging_process()
        federated_data = [[_batch_fn()]]

        def client_datasets_fn(round_num):
            del round_num
            return federated_data

        def evaluate_fn(model):
            del model
            return {}

        temp_filepath = self.get_temp_dir()
        FLAGS.root_output_dir = temp_filepath
        final_state = training_loop.run(iterative_process, client_datasets_fn,
                                        evaluate_fn)

        ckpt_manager = checkpoint_manager.FileCheckpointManager(
            os.path.join(
                temp_filepath,
                'checkpoints',
                FLAGS.experiment_name,
            ))
        restored_state, restored_round = ckpt_manager.load_latest_checkpoint(
            final_state)

        self.assertEqual(restored_round, 0)

        keras_model = tff.simulation.models.mnist.create_keras_model(
            compile_model=True)
        restored_state.model.assign_weights_to(keras_model)
        restored_loss = keras_model.test_on_batch(federated_data[0][0].x,
                                                  federated_data[0][0].y)
        final_state.model.assign_weights_to(keras_model)
        final_loss = keras_model.test_on_batch(federated_data[0][0].x,
                                               federated_data[0][0].y)
        self.assertEqual(final_loss, restored_loss)
Пример #21
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Expected no command-line arguments, '
                             'got: {}'.format(argv))
    tff.framework.set_default_executor(
        tff.framework.local_executor_factory(max_fanout=10))

    model_builder = functools.partial(
        stackoverflow_models.create_recurrent_model,
        vocab_size=FLAGS.vocab_size,
        embedding_size=FLAGS.embedding_size,
        latent_size=FLAGS.latent_size,
        num_layers=FLAGS.num_layers,
        shared_embedding=FLAGS.shared_embedding)

    loss_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    pad_token, oov_token, _, eos_token = stackoverflow_dataset.get_special_tokens(
        FLAGS.vocab_size)

    def metrics_builder():
        return [
            keras_metrics.MaskedCategoricalAccuracy(name='accuracy_with_oov',
                                                    masked_tokens=[pad_token]),
            keras_metrics.MaskedCategoricalAccuracy(
                name='accuracy_no_oov', masked_tokens=[pad_token, oov_token]),
            # Notice BOS never appears in ground truth.
            keras_metrics.MaskedCategoricalAccuracy(
                name='accuracy_no_oov_or_eos',
                masked_tokens=[pad_token, oov_token, eos_token]),
            keras_metrics.NumBatchesCounter(),
            keras_metrics.NumTokensCounter(masked_tokens=[pad_token]),
        ]

    datasets = stackoverflow_dataset.construct_word_level_datasets(
        FLAGS.vocab_size, FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round, FLAGS.sequence_length,
        FLAGS.max_elements_per_user, FLAGS.num_validation_examples)
    train_dataset, validation_dataset, test_dataset = datasets

    if FLAGS.uniform_weighting:

        def client_weight_fn(local_outputs):
            del local_outputs
            return 1.0
    else:

        def client_weight_fn(local_outputs):
            return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

    def model_fn():
        return tff.learning.from_keras_model(
            model_builder(),
            loss_builder(),
            input_spec=validation_dataset.element_spec,
            metrics=metrics_builder())

    if FLAGS.noise_multiplier is not None:
        if not FLAGS.uniform_weighting:
            raise ValueError(
                'Differential privacy is only implemented for uniform weighting.'
            )

        dp_query = tff.utils.build_dp_query(
            clip=FLAGS.clip,
            noise_multiplier=FLAGS.noise_multiplier,
            expected_total_weight=FLAGS.clients_per_round,
            adaptive_clip_learning_rate=FLAGS.adaptive_clip_learning_rate,
            target_unclipped_quantile=FLAGS.target_unclipped_quantile,
            clipped_count_budget_allocation=FLAGS.
            clipped_count_budget_allocation,
            expected_num_clients=FLAGS.clients_per_round,
            per_vector_clipping=FLAGS.per_vector_clipping,
            model=model_fn())

        dp_aggregate_fn, _ = tff.utils.build_dp_aggregate(dp_query)
    else:
        dp_aggregate_fn = None

    server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags(
        'server')
    client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags(
        'client')
    training_process = (
        tff.learning.federated_averaging.build_federated_averaging_process(
            model_fn=model_fn,
            server_optimizer_fn=server_optimizer_fn,
            client_weight_fn=client_weight_fn,
            client_optimizer_fn=client_optimizer_fn,
            stateful_delta_aggregate_fn=dp_aggregate_fn))

    adaptive_clipping = (FLAGS.adaptive_clip_learning_rate > 0)
    training_process = dp_utils.DPFedAvgProcessAdapter(
        training_process, FLAGS.per_vector_clipping, adaptive_clipping)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset, FLAGS.clients_per_round)

    evaluate_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=validation_dataset,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=dp_utils.assign_weights_to_keras_model)

    test_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=validation_dataset.concatenate(test_dataset),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=dp_utils.assign_weights_to_keras_model)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(training_process,
                      client_datasets_fn,
                      evaluate_fn,
                      test_fn=test_fn)
Пример #22
0
def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        assign_weights_fn: Callable[[Any, tf.keras.Model], None],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        max_batches_per_client: Optional[int] = -1,
        client_datasets_random_seed: Optional[int] = None,
        vocab_size: Optional[int] = 10000,
        num_oov_buckets: Optional[int] = 1,
        sequence_length: Optional[int] = 20,
        max_elements_per_user: Optional[int] = 1000,
        num_validation_examples: Optional[int] = 10000,
        embedding_size: Optional[int] = 96,
        latent_size: Optional[int] = 670,
        num_layers: Optional[int] = 1,
        shared_embedding: Optional[bool] = False,
        total_rounds: Optional[int] = 1500,
        experiment_name: Optional[str] = 'federated_so_nwp',
        root_output_dir: Optional[str] = '/tmp/fed_opt',
        max_eval_batches: Optional[int] = None,
        **kwargs):
    """Runs an iterative process on the Stack Overflow next word prediction task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `tensorflow_federated.python.research.utils.training_loop`.

   We assume that the iterative process has the following functional type
   signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, a
      `client_weight_fn` and a `dataset_preprocess_comp`, and returns a
      `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    assign_weights_fn: A function that accepts the server state `S` and a
      `tf.keras.Model`, and updates the weights in the Keras model. This is used
      to do evaluation using Keras.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_batches_per_client: An optional int specifying the number of batches
      taken by each client at each round. If `-1`, the entire client dataset is
      used.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    vocab_size: Integer dictating the number of most frequent words to use in
      the vocabulary.
    num_oov_buckets: The number of out-of-vocabulary buckets to use.
    sequence_length: The maximum number of words to take for each sequence.
    max_elements_per_user: The maximum number of elements processed for each
      client's dataset.
    num_validation_examples: The number of test examples to use for validation.
    embedding_size: The dimension of the word embedding layer.
    latent_size: The dimension of the latent units in the recurrent layers.
    num_layers: The number of stacked recurrent layers to use.
    shared_embedding: Boolean indicating whether to tie input and output
      embeddings.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_eval_batches: If set to a positive integer, evaluation datasets are
      capped to at most that many batches. If set to None or a nonpositive
      integer, the full evaluation datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `tensorflow_federated/python/research/utils/training_utils.py`.
  """

    model_builder = functools.partial(
        stackoverflow_models.create_recurrent_model,
        vocab_size=vocab_size,
        num_oov_buckets=num_oov_buckets,
        embedding_size=embedding_size,
        latent_size=latent_size,
        num_layers=num_layers,
        shared_embedding=shared_embedding)

    loss_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    special_tokens = stackoverflow_dataset.get_special_tokens(
        vocab_size, num_oov_buckets)
    pad_token = special_tokens.pad
    oov_tokens = special_tokens.oov
    eos_token = special_tokens.eos

    def metrics_builder():
        return [
            keras_metrics.MaskedCategoricalAccuracy(name='accuracy_with_oov',
                                                    masked_tokens=[pad_token]),
            keras_metrics.MaskedCategoricalAccuracy(name='accuracy_no_oov',
                                                    masked_tokens=[pad_token] +
                                                    oov_tokens),
            # Notice BOS never appears in ground truth.
            keras_metrics.MaskedCategoricalAccuracy(
                name='accuracy_no_oov_or_eos',
                masked_tokens=[pad_token, eos_token] + oov_tokens),
            keras_metrics.NumBatchesCounter(),
            keras_metrics.NumTokensCounter(masked_tokens=[pad_token])
        ]

    train_clientdata, _, _ = tff.simulation.datasets.stackoverflow.load_data()

    # TODO(b/161914546): consider moving evaluation to use
    # `tff.learning.build_federated_evaluation` to get metrics over client
    # distributions, as well as the example weight means from this centralized
    # evaluation.
    _, validation_dataset, test_dataset = stackoverflow_dataset.get_centralized_datasets(
        vocab_size=vocab_size,
        max_seq_len=sequence_length,
        train_batch_size=client_batch_size,
        max_validation_batches=max_eval_batches,
        max_test_batches=max_eval_batches,
        num_validation_examples=num_validation_examples,
        num_oov_buckets=num_oov_buckets)

    train_dataset_preprocess_comp = stackoverflow_dataset.create_train_dataset_preprocess_fn(
        vocab=stackoverflow_dataset.create_vocab(vocab_size),
        num_oov_buckets=num_oov_buckets,
        client_batch_size=client_batch_size,
        client_epochs_per_round=client_epochs_per_round,
        max_seq_len=sequence_length,
        max_training_elements_per_user=max_elements_per_user,
        max_batches_per_user=max_batches_per_client)

    input_spec = train_dataset_preprocess_comp.type_signature.result.element

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    def client_weight_fn(local_outputs):
        # Num_tokens is a tensor with type int64[1], to use as a weight need
        # a float32 scalar.
        return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

    training_process = iterative_process_builder(
        tff_model_fn, client_weight_fn=client_weight_fn)

    training_process = tff.simulation.compose_dataset_computation_with_iterative_process(
        train_dataset_preprocess_comp, training_process)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=train_clientdata,
        train_clients_per_round=clients_per_round,
        random_seed=client_datasets_random_seed)

    evaluate_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=validation_dataset,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    test_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=validation_dataset.concatenate(test_dataset),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(iterative_process=training_process,
                      client_datasets_fn=client_datasets_fn,
                      validation_fn=evaluate_fn,
                      test_fn=test_fn,
                      total_rounds=total_rounds,
                      experiment_name=experiment_name,
                      root_output_dir=root_output_dir,
                      **kwargs)
Пример #23
0
def run_federated(
    iterative_process_builder: Callable[..., tff.templates.IterativeProcess],
    client_epochs_per_round: int,
    client_batch_size: int,
    clients_per_round: int,
    max_batches_per_client: Optional[int] = -1,
    client_datasets_random_seed: Optional[int] = None,
    emnist_model: Optional[str] = 'cnn',
    total_rounds: Optional[int] = 1500,
    experiment_name: Optional[str] = 'federated_emnist_cr',
    root_output_dir: Optional[str] = '/tmp/fed_opt',
    max_eval_batches: Optional[int] = None,
    **kwargs):
  """Runs an iterative process on the EMNIST character recognition task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `tensorflow_federated.python.research.utils.training_loop`.

  We assume that the iterative process has the following functional type
  signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Moreover, the server state must have an attribute `model` of type
  `tff.learning.ModelWeights`.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_batches_per_client: An optional int specifying the number of batches
      taken by each client at each round. If `-1`, the entire client dataset is
      used.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    emnist_model: A string specifying the model used for character recognition.
      Can be one of `cnn` and `2nn`, corresponding to a CNN model and a densely
      connected 2-layer model (respectively).
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    max_eval_batches: If set to a positive integer, evaluation datasets are
      capped to at most that many batches. If set to None or a nonpositive
      integer, the full evaluation datasets are used.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `tensorflow_federated/python/research/utils/training_utils.py`.
  """

  emnist_train, _ = emnist_dataset.get_emnist_datasets(
      client_batch_size,
      client_epochs_per_round,
      max_batches_per_client=max_batches_per_client,
      only_digits=False)

  _, emnist_test = emnist_dataset.get_centralized_datasets(
      train_batch_size=client_batch_size,
      max_test_batches=max_eval_batches,
      only_digits=False)

  input_spec = emnist_train.create_tf_dataset_for_client(
      emnist_train.client_ids[0]).element_spec

  if emnist_model == 'cnn':
    model_builder = functools.partial(
        emnist_models.create_conv_dropout_model, only_digits=False)
  elif emnist_model == '2nn':
    model_builder = functools.partial(
        emnist_models.create_two_hidden_layer_model, only_digits=False)
  else:
    raise ValueError(
        'Cannot handle model flag [{!s}], must be one of {!s}.'.format(
            emnist_model, EMNIST_MODELS))

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

  def tff_model_fn() -> tff.learning.Model:
    return tff.learning.from_keras_model(
        keras_model=model_builder(),
        input_spec=input_spec,
        loss=loss_builder(),
        metrics=metrics_builder())

  training_process = iterative_process_builder(tff_model_fn)

  client_datasets_fn = training_utils.build_client_datasets_fn(
      train_dataset=emnist_train,
      train_clients_per_round=clients_per_round,
      random_seed=client_datasets_random_seed)

  evaluate_fn = training_utils.build_evaluate_fn(
      eval_dataset=emnist_test,
      model_builder=model_builder,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder)

  logging.info('Training model:')
  logging.info(model_builder().summary())

  training_loop.run(
      iterative_process=training_process,
      client_datasets_fn=client_datasets_fn,
      validation_fn=evaluate_fn,
      test_fn=evaluate_fn,
      total_rounds=total_rounds,
      experiment_name=experiment_name,
      root_output_dir=root_output_dir,
      **kwargs)
Пример #24
0
def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        assign_weights_fn: Callable[[Any, tf.keras.Model], None],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        max_batches_per_client: Optional[int] = -1,
        client_datasets_random_seed: Optional[int] = None,
        crop_size: Optional[int] = 24,
        total_rounds: Optional[int] = 1500,
        experiment_name: Optional[str] = 'federated_cifar100',
        root_output_dir: Optional[str] = '/tmp/fed_opt',
        **kwargs):
    """Runs an iterative process on the CIFAR-100 classification task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `tensorflow_federated.python.research.utils.training_loop`.

   We assume that the iterative process has the following functional type
   signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    assign_weights_fn: A function that accepts the server state `S` and a
      `tf.keras.Model`, and updates the weights in the Keras model. This is used
      to do evaluation using Keras.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_batches_per_client: An optional int specifying the number of batches
      taken by each client at each round. If `-1`, the entire client dataset is
      used.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    crop_size: An optional integer representing the resulting size of input
      images after preprocessing.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `tensorflow_federated/python/research/utils/training_utils.py`.
  """

    crop_shape = (crop_size, crop_size, 3)

    cifar_train, cifar_test = cifar100_dataset.get_federated_cifar100(
        client_epochs_per_round=client_epochs_per_round,
        train_batch_size=client_batch_size,
        crop_shape=crop_shape,
        max_batches_per_client=max_batches_per_client)

    input_spec = cifar_train.create_tf_dataset_for_client(
        cifar_train.client_ids[0]).element_spec

    model_builder = functools.partial(resnet_models.create_resnet18,
                                      input_shape=crop_shape,
                                      num_classes=NUM_CLASSES)

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    training_process = iterative_process_builder(tff_model_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=cifar_train,
        train_clients_per_round=clients_per_round,
        random_seed=client_datasets_random_seed)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=cifar_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(iterative_process=training_process,
                      client_datasets_fn=client_datasets_fn,
                      validation_fn=evaluate_fn,
                      test_fn=evaluate_fn,
                      total_rounds=total_rounds,
                      experiment_name=experiment_name,
                      root_output_dir=root_output_dir,
                      **kwargs)
Пример #25
0
def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        assign_weights_fn: Callable[[Any, tf.keras.Model], None],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        max_batches_per_client: Optional[int] = -1,
        client_datasets_random_seed: Optional[int] = None,
        sequence_length: Optional[int] = 80,
        total_rounds: Optional[int] = 1500,
        experiment_name: Optional[str] = 'federated_shakespeare',
        root_output_dir: Optional[str] = '/tmp/fed_opt',
        **kwargs):
    """Runs an iterative process on a Shakespeare next character prediction task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `tensorflow_federated.python.research.utils.training_loop`.

   We assume that the iterative process has the following functional type
   signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      a `client_weight_fn`, and returns a `tff.templates.IterativeProcess`. The
      `model_fn` must return a `tff.learning.Model`.
    assign_weights_fn: A function that accepts the server state `S` and a
      `tf.keras.Model`, and updates the weights in the Keras model. This is used
      to do evaluation using Keras.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    max_batches_per_client: An optional int specifying the number of batches
      taken by each client at each round. If `-1`, the entire client dataset is
      used.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    sequence_length: An int specifying the length of the character sequences
      used for prediction.
    total_rounds: The number of federated training rounds.
    experiment_name: The name of the experiment being run. This will be appended
      to the `root_output_dir` for purposes of writing outputs.
    root_output_dir: The name of the root output directory for writing
      experiment outputs.
    **kwargs: Additional arguments configuring the training loop. For details
      on supported arguments, see
      `tensorflow_federated/python/research/utils/training_utils.py`.
  """

    train_clientdata = shakespeare_dataset.construct_character_level_datasets(
        client_batch_size=client_batch_size,
        client_epochs_per_round=client_epochs_per_round,
        sequence_length=sequence_length,
        max_batches_per_client=max_batches_per_client)
    _, test_dataset = shakespeare_dataset.construct_centralized_datasets()
    test_dataset = test_dataset.cache()

    model_builder = functools.partial(create_shakespeare_model,
                                      sequence_length=sequence_length)

    loss_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    input_spec = train_clientdata.create_tf_dataset_for_client(
        train_clientdata.client_ids[0]).element_spec

    def client_weight_fn(local_outputs):
        # Num_tokens is a tensor with type int64[1], to use as a weight need
        # a float32 scalar.
        return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    training_process = iterative_process_builder(
        tff_model_fn, client_weight_fn=client_weight_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=train_clientdata,
        train_clients_per_round=clients_per_round,
        random_seed=client_datasets_random_seed)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=test_dataset,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(iterative_process=training_process,
                      client_datasets_fn=client_datasets_fn,
                      validation_fn=evaluate_fn,
                      test_fn=evaluate_fn,
                      total_rounds=total_rounds,
                      experiment_name=experiment_name,
                      root_output_dir=root_output_dir,
                      **kwargs)
Пример #26
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    train_clientdata = shakespeare_dataset.construct_character_level_datasets(
        FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round,
        sequence_length=FLAGS.sequence_length,
        max_batches_per_client=FLAGS.max_batches_per_client,
        shuffle_buffer_size=0)
    eval_train_dataset, eval_test_dataset = (
        shakespeare_dataset.construct_centralized_datasets())

    loss_fn_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    input_spec = train_clientdata.create_tf_dataset_for_client(
        train_clientdata.client_ids[0]).element_spec

    if FLAGS.client_weight == 'uniform':

        def client_weight_fn(local_outputs):
            del local_outputs
            return 1.0

    elif FLAGS.client_weight == 'num_tokens':

        def client_weight_fn(local_outputs):
            # Num_tokens is a tensor with type int64[1], to use as a weight need
            # a float32 scalar.
            return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

    else:
        raise ValueError(
            'Unsupported client_weight flag [{!s}]. Currently only '
            '`uniform` and `num_tokens` are supported.'.format(
                FLAGS.client_weight))

    training_process = decay_iterative_process_builder.from_flags(
        input_spec=input_spec,
        model_builder=model_builder,
        loss_builder=loss_fn_builder,
        metrics_builder=metrics_builder,
        client_weight_fn=client_weight_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_clientdata,
        FLAGS.clients_per_round,
        random_seed=FLAGS.client_datasets_random_seed)

    assign_weights_fn = adaptive_fed_avg.ServerState.assign_weights_to_keras_model

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=eval_test_dataset,
        model_builder=model_builder,
        loss_builder=loss_fn_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    train_evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=eval_train_dataset,
        model_builder=model_builder,
        loss_builder=loss_fn_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(
        iterative_process=training_process,
        client_datasets_fn=client_datasets_fn,
        validation_fn=evaluate_fn,
        train_eval_fn=train_evaluate_fn,
    )
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Expected no command-line arguments, '
                             'got: {}'.format(argv))

    model_builder = functools.partial(
        stackoverflow_models.create_recurrent_model,
        vocab_size=FLAGS.vocab_size,
        embedding_size=FLAGS.embedding_size,
        latent_size=FLAGS.latent_size,
        num_layers=FLAGS.num_layers,
        shared_embedding=FLAGS.shared_embedding)

    loss_builder = functools.partial(
        tf.keras.losses.SparseCategoricalCrossentropy, from_logits=True)

    special_tokens = stackoverflow_dataset.get_special_tokens(FLAGS.vocab_size)

    pad_token = special_tokens.pad
    oov_tokens = special_tokens.oov
    eos_token = special_tokens.eos

    def metrics_builder():
        return [
            keras_metrics.MaskedCategoricalAccuracy(name='accuracy_with_oov',
                                                    masked_tokens=[pad_token]),
            keras_metrics.MaskedCategoricalAccuracy(name='accuracy_no_oov',
                                                    masked_tokens=[pad_token] +
                                                    oov_tokens),
            # Notice BOS never appears in ground truth.
            keras_metrics.MaskedCategoricalAccuracy(
                name='accuracy_no_oov_or_eos',
                masked_tokens=[pad_token, eos_token] + oov_tokens),
            keras_metrics.NumBatchesCounter(),
            keras_metrics.NumTokensCounter(masked_tokens=[pad_token])
        ]

    train_set, validation_set, test_set = stackoverflow_dataset.construct_word_level_datasets(
        FLAGS.vocab_size,
        FLAGS.client_batch_size,
        FLAGS.client_epochs_per_round,
        FLAGS.sequence_length,
        FLAGS.max_elements_per_user,
        FLAGS.num_validation_examples,
        max_batches_per_user=FLAGS.max_batches_per_client)

    input_spec = validation_set.element_spec

    if FLAGS.client_weight == 'uniform':

        def client_weight_fn(local_outputs):
            del local_outputs
            return 1.0

    elif FLAGS.client_weight == 'num_tokens':

        def client_weight_fn(local_outputs):
            # Num_tokens is a tensor with type int64[1], to use as a weight need
            # a float32 scalar.
            return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)

    else:
        raise ValueError(
            'Unsupported client_weight flag [{!s}]. Currently only '
            '`uniform` and `num_tokens` are supported.'.format(
                FLAGS.client_weight))

    training_process = decay_iterative_process_builder.from_flags(
        input_spec=input_spec,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        client_weight_fn=client_weight_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_set,
        FLAGS.clients_per_round,
        random_seed=FLAGS.client_datasets_random_seed)

    assign_weights_fn = adaptive_fed_avg.ServerState.assign_weights_to_keras_model

    evaluate_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=validation_set,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    test_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=validation_set.concatenate(test_set),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(training_process,
                      client_datasets_fn,
                      evaluate_fn,
                      test_fn=test_fn)
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    stackoverflow_train, stackoverflow_validation, stackoverflow_test = stackoverflow_lr_dataset.get_stackoverflow_datasets(
        vocab_tokens_size=FLAGS.vocab_tokens_size,
        vocab_tags_size=FLAGS.vocab_tags_size,
        client_batch_size=FLAGS.client_batch_size,
        client_epochs_per_round=FLAGS.client_epochs_per_round,
        max_training_elements_per_user=FLAGS.max_elements_per_user,
        max_batches_per_user=FLAGS.max_batches_per_client,
        num_validation_examples=FLAGS.num_validation_examples)

    input_spec = stackoverflow_train.create_tf_dataset_for_client(
        stackoverflow_train.client_ids[0]).element_spec

    model_builder = functools.partial(
        stackoverflow_lr_models.create_logistic_model,
        vocab_tokens_size=FLAGS.vocab_tokens_size,
        vocab_tags_size=FLAGS.vocab_tags_size)

    loss_builder = functools.partial(tf.keras.losses.BinaryCrossentropy,
                                     from_logits=False,
                                     reduction=tf.keras.losses.Reduction.SUM)

    if FLAGS.client_weight == 'uniform':

        def client_weight_fn(local_outputs):
            del local_outputs
            return 1.0

    elif FLAGS.client_weight == 'num_samples':
        client_weight_fn = None
    else:
        raise ValueError(
            'Unsupported client_weight flag [{!s}]. Currently only '
            '`uniform` and `num_samples` are supported.'.format(
                FLAGS.client_weight))

    training_process = decay_iterative_process_builder.from_flags(
        input_spec=input_spec,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        client_weight_fn=client_weight_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        stackoverflow_train,
        FLAGS.clients_per_round,
        random_seed=FLAGS.client_datasets_random_seed)

    assign_weights_fn = adaptive_fed_avg.ServerState.assign_weights_to_keras_model

    evaluate_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        eval_dataset=stackoverflow_validation,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    test_fn = training_utils.build_evaluate_fn(
        model_builder=model_builder,
        # Use both val and test for symmetry with other experiments, which
        # evaluate on the entire test set.
        eval_dataset=stackoverflow_validation.concatenate(stackoverflow_test),
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(training_process,
                      client_datasets_fn,
                      evaluate_fn,
                      test_fn=test_fn)
Пример #29
0
def main(argv):
  if len(argv) > 1:
    raise app.UsageError('Expected no command-line arguments, '
                         'got: {}'.format(argv))

  emnist_train, emnist_test = emnist_dataset.get_emnist_datasets(
      FLAGS.client_batch_size, FLAGS.client_epochs_per_round, only_digits=False)

  if FLAGS.model == 'cnn':
    model_builder = functools.partial(
        emnist_models.create_conv_dropout_model, only_digits=False)
  elif FLAGS.model == '2nn':
    model_builder = functools.partial(
        emnist_models.create_two_hidden_layer_model, only_digits=False)
  else:
    raise ValueError('Cannot handle model flag [{!s}].'.format(FLAGS.model))

  loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
  metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

  if FLAGS.uniform_weighting:

    def client_weight_fn(local_outputs):
      del local_outputs
      return 1.0

  else:
    client_weight_fn = None  #  Defaults to the number of examples per client.

  def model_fn():
    return tff.learning.from_keras_model(
        model_builder(),
        loss_builder(),
        input_spec=emnist_test.element_spec,
        metrics=metrics_builder())

  if FLAGS.noise_multiplier is not None:
    if not FLAGS.uniform_weighting:
      raise ValueError(
          'Differential privacy is only implemented for uniform weighting.')

    dp_query = tff.utils.build_dp_query(
        clip=FLAGS.clip,
        noise_multiplier=FLAGS.noise_multiplier,
        expected_total_weight=FLAGS.clients_per_round,
        adaptive_clip_learning_rate=FLAGS.adaptive_clip_learning_rate,
        target_unclipped_quantile=FLAGS.target_unclipped_quantile,
        clipped_count_budget_allocation=FLAGS.clipped_count_budget_allocation,
        expected_num_clients=FLAGS.clients_per_round,
        per_vector_clipping=FLAGS.per_vector_clipping,
        model=model_fn())

    weights_type = tff.learning.framework.weights_type_from_model(model_fn)
    aggregation_process = tff.utils.build_dp_aggregate_process(
        weights_type.trainable, dp_query)
  else:
    aggregation_process = None

  server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')
  client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')
  iterative_process = tff.learning.build_federated_averaging_process(
      model_fn=model_fn,
      server_optimizer_fn=server_optimizer_fn,
      client_weight_fn=client_weight_fn,
      client_optimizer_fn=client_optimizer_fn,
      aggregation_process=aggregation_process)

  client_datasets_fn = training_utils.build_client_datasets_fn(
      emnist_train, FLAGS.clients_per_round)

  evaluate_fn = training_utils.build_evaluate_fn(
      eval_dataset=emnist_test,
      model_builder=model_builder,
      loss_builder=loss_builder,
      metrics_builder=metrics_builder,
      assign_weights_to_keras_model=dp_utils.assign_weights_to_keras_model)

  logging.info('Training model:')
  logging.info(model_builder().summary())

  training_loop.run(
      iterative_process=iterative_process,
      client_datasets_fn=client_datasets_fn,
      validation_fn=evaluate_fn,
  )
Пример #30
0
def run_federated(
        iterative_process_builder: Callable[...,
                                            tff.templates.IterativeProcess],
        assign_weights_fn: Callable[[Any, tf.keras.Model], None],
        client_epochs_per_round: int,
        client_batch_size: int,
        clients_per_round: int,
        client_datasets_random_seed: Optional[int] = None,
        emnist_model: Optional[str] = 'cnn'):
    """Runs an iterative process on the EMNIST character recognition task.

  This method will load and pre-process dataset and construct a model used for
  the task. It then uses `iterative_process_builder` to create an iterative
  process that it applies to the task, using
  `tensorflow_federated.python.research.utils.training_loop`.

   We assume that the iterative process has the following functional type
   signatures:

    *   `initialize`: `( -> S@SERVER)` where `S` represents the server state.
    *   `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
        represents the server state, `{B*}` represents the client datasets,
        and `T` represents a python `Mapping` object.

  Args:
    iterative_process_builder: A function that accepts a no-arg `model_fn`, and
      returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
      `tff.learning.Model`.
    assign_weights_fn: A function that accepts the server state `S` and a
      `tf.keras.Model`, and updates the weights in the Keras model. This is used
      to do evaluation using Keras.
    client_epochs_per_round: An integer representing the number of epochs of
      training performed per client in each training round.
    client_batch_size: An integer representing the batch size used on clients.
    clients_per_round: An integer representing the number of clients
      participating in each round.
    client_datasets_random_seed: An optional int used to seed which clients are
      sampled at each round. If `None`, no seed is used.
    emnist_model: A string specifying the model used for character recognition.
      Can be one of `cnn` and `2nn`, corresponding to a CNN model and a densely
      connected 2-layer model (respectively).
  """

    emnist_train, emnist_test = emnist_dataset.get_emnist_datasets(
        client_batch_size, client_epochs_per_round, only_digits=False)

    input_spec = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0]).element_spec

    if emnist_model == 'cnn':
        model_builder = functools.partial(
            emnist_models.create_conv_dropout_model, only_digits=False)
    elif emnist_model == '2nn':
        model_builder = functools.partial(
            emnist_models.create_two_hidden_layer_model, only_digits=False)
    else:
        raise ValueError(
            'Cannot handle model flag [{!s}], must be one of {!s}.'.format(
                emnist_model, EMNIST_MODELS))

    loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
    metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]

    def tff_model_fn() -> tff.learning.Model:
        return tff.learning.from_keras_model(keras_model=model_builder(),
                                             input_spec=input_spec,
                                             loss=loss_builder(),
                                             metrics=metrics_builder())

    training_process = iterative_process_builder(tff_model_fn)

    client_datasets_fn = training_utils.build_client_datasets_fn(
        train_dataset=emnist_train,
        train_clients_per_round=clients_per_round,
        random_seed=client_datasets_random_seed)

    evaluate_fn = training_utils.build_evaluate_fn(
        eval_dataset=emnist_test,
        model_builder=model_builder,
        loss_builder=loss_builder,
        metrics_builder=metrics_builder,
        assign_weights_to_keras_model=assign_weights_fn)

    logging.info('Training model:')
    logging.info(model_builder().summary())

    training_loop.run(iterative_process=training_process,
                      client_datasets_fn=client_datasets_fn,
                      validation_fn=evaluate_fn,
                      test_fn=evaluate_fn)