Ejemplo n.º 1
0
def iterator(coefficient_fn: COEFFICIENT_FN, model_fn: MODEL_FN,
             client_state_fn: CLIENT_STATE_FN,
             server_optimizer_fn: OPTIMIZER_FN,
             client_optimizer_fn: OPTIMIZER_FN):
    model = model_fn()
    client_state = client_state_fn()

    init_tf = tff.tf_computation(
        lambda: __initialize_server(model_fn, server_optimizer_fn))

    server_state_type = init_tf.type_signature.result
    client_state_type = tff.framework.type_from_tensors(client_state)

    update_server_tf = tff.tf_computation(
        lambda state, weights_delta: __update_server(
            state, weights_delta, model_fn, server_optimizer_fn,
            tf.function(server.update)),
        (server_state_type, server_state_type.model.trainable))

    state_to_message_tf = tff.tf_computation(
        lambda state: __state_to_message(state, tf.function(server.to_message)
                                         ), server_state_type)

    dataset_type = tff.SequenceType(model.input_spec)
    server_message_type = state_to_message_tf.type_signature.result

    update_client_tf = tff.tf_computation(
        lambda dataset, state, message: __update_client(
            dataset, state, message, coefficient_fn, model_fn,
            client_optimizer_fn, tf.function(client.update)),
        (dataset_type, client_state_type, server_message_type))

    federated_server_state_type = tff.type_at_server(server_state_type)
    federated_dataset_type = tff.type_at_clients(dataset_type)
    federated_client_state_type = tff.type_at_clients(client_state_type)

    def init_tff():
        return tff.federated_value(init_tf(), tff.SERVER)

    def next_tff(server_state, datasets, client_states):
        message = tff.federated_map(state_to_message_tf, server_state)
        broadcast = tff.federated_broadcast(message)

        outputs = tff.federated_map(update_client_tf,
                                    (datasets, client_states, broadcast))
        weights_delta = tff.federated_mean(outputs.weights_delta,
                                           weight=outputs.client_weight)

        metrics = model.federated_output_computation(outputs.metrics)

        next_state = tff.federated_map(update_server_tf,
                                       (server_state, weights_delta))

        return next_state, metrics, outputs.client_state

    return tff.templates.IterativeProcess(
        initialize_fn=tff.federated_computation(init_tff),
        next_fn=tff.federated_computation(
            next_tff, (federated_server_state_type, federated_dataset_type,
                       federated_client_state_type)))
Ejemplo n.º 2
0
def local_acc(model, all_batches):
    acc = tff.sequence_sum(
        tff.sequence_map(
            tff.federated_computation(lambda b: batch_pre(model, b),
                                      BATCH_TYPE), all_batches))

    acc_l = tff.sequence_map(
        tff.federated_computation(lambda b: batch_pre(model, b), BATCH_TYPE),
        all_batches)
    return acc
Ejemplo n.º 3
0
def evaluator(coefficient_fn: COEFFICIENT_FN, model_fn: MODEL_FN,
              client_state_fn: CLIENT_STATE_FN):
    model = model_fn()
    client_state = client_state_fn()

    dataset_type = tff.SequenceType(model.input_spec)
    client_state_type = tff.framework.type_from_tensors(client_state)
    weights_type = tff.framework.type_from_tensors(
        tff.learning.ModelWeights.from_model(model))

    evaluate_client_tf = tff.tf_computation(
        lambda dataset, state, weights: __evaluate_client(
            dataset, state, weights, coefficient_fn, model_fn,
            tf.function(client.evaluate)),
        (dataset_type, client_state_type, weights_type))

    federated_weights_type = tff.type_at_server(weights_type)
    federated_dataset_type = tff.type_at_clients(dataset_type)
    federated_client_state_type = tff.type_at_clients(client_state_type)

    def evaluate(weights, datasets, client_states):
        broadcast = tff.federated_broadcast(weights)
        outputs = tff.federated_map(evaluate_client_tf,
                                    (datasets, client_states, broadcast))

        confusion_matrix = tff.federated_sum(outputs.confusion_matrix)
        aggregated_metrics = model.federated_output_computation(
            outputs.metrics)
        collected_metrics = tff.federated_collect(outputs.metrics)

        return confusion_matrix, aggregated_metrics, collected_metrics

    return tff.federated_computation(
        evaluate, (federated_weights_type, federated_dataset_type,
                   federated_client_state_type))
Ejemplo n.º 4
0
def validator(
  model_fn: MODEL_FN,
  client_state_fn: CLIENT_STATE_FN
):
  model = model_fn()
  client_state = client_state_fn()

  dataset_type = tff.SequenceType(model.input_spec)
  client_state_type = tff.framework.type_from_tensors(client_state)

  validate_client_tf = tff.tf_computation(
    lambda dataset, state: __validate_client(
      dataset,
      state,
      model_fn,
      tf.function(client.validate)
    ),
    (dataset_type, client_state_type)
  )

  federated_dataset_type = tff.type_at_clients(dataset_type)
  federated_client_state_type = tff.type_at_clients(client_state_type)    

  def validate(datasets, client_states):
    outputs = tff.federated_map(validate_client_tf, (datasets, client_states))
    metrics = model.federated_output_computation(outputs.metrics)

    return metrics

  return tff.federated_computation(
    validate,
    (federated_dataset_type, federated_client_state_type)
  )
Ejemplo n.º 5
0
def validator(coefficient_fn: COEFFICIENT_FN, model_fn: MODEL_FN,
              client_state_fn: CLIENT_STATE_FN):
    model = model_fn()
    client_state = client_state_fn()

    dataset_type = tff.SequenceType(model.input_spec)
    client_state_type = tff.framework.type_from_tensors(client_state)
    weights_type = tff.learning.framework.weights_type_from_model(model)

    validate_client_tf = tff.tf_computation(
        lambda dataset, state, weights: __validate_client(
            dataset, state, weights, coefficient_fn, model_fn,
            tf.function(client.validate)),
        (dataset_type, client_state_type, weights_type))

    federated_weights_type = tff.type_at_server(weights_type)
    federated_dataset_type = tff.type_at_clients(dataset_type)
    federated_client_state_type = tff.type_at_clients(client_state_type)

    def validate(weights, datasets, client_states):
        broadcast = tff.federated_broadcast(weights)
        outputs = tff.federated_map(validate_client_tf,
                                    (datasets, client_states, broadcast))
        metrics = model.federated_output_computation(outputs.metrics)

        return metrics

    return tff.federated_computation(
        validate, (federated_weights_type, federated_dataset_type,
                   federated_client_state_type))
Ejemplo n.º 6
0
def federated_output_computation_from_metrics(
        metrics: List[tf.keras.metrics.Metric]) -> tff.federated_computation:
    """Produces a federated computation for aggregating Keras metrics.

  This can be used to evaluate both Keras and non-Keras models using Keras
  metrics. Aggregates metrics across clients by summing their internal
  variables, producing new metrics with summed internal variables, and calling
  metric.result() on each. See `federated_aggregate_keras_metric` for details.

  Args:
    metrics: A List of `tf.keras.metrics.Metric` to aggregate.

  Returns:
    A `tff.federated_computation` aggregating metrics across clients by summing
    their internal variables, producing new metrics with summed internal
    variables, and calling metric.result() on each.
  """
    # Get a sample of metric variables to use to determine its type.
    sample_metric_variables = read_metric_variables(metrics)

    metric_variable_type_dict = tf.nest.map_structure(
        tf.TensorSpec.from_tensor, sample_metric_variables)
    federated_local_outputs_type = tff.type_at_clients(
        metric_variable_type_dict)

    def federated_output(local_outputs):
        return federated_aggregate_keras_metric(metrics, local_outputs)

    federated_output_computation = tff.federated_computation(
        federated_output, federated_local_outputs_type)
    return federated_output_computation
Ejemplo n.º 7
0
def local_eval(model, all_batches):
    #
    return tff.sequence_sum(
        tff.sequence_map(
            tff.federated_computation(
                lambda b: batch_loss(model, b), BATCH_TYPE),
            all_batches))
Ejemplo n.º 8
0
def iterator(
  model_fn: MODEL_FN,
  client_state_fn: CLIENT_STATE_FN,
  client_optimizer_fn: OPTIMIZER_FN
):
  model = model_fn()
  client_state = client_state_fn()

  init_tf = tff.tf_computation(
    lambda: ()
  )
  
  server_state_type = init_tf.type_signature.result
  client_state_type = tff.framework.type_from_tensors(client_state)
  dataset_type = tff.SequenceType(model.input_spec)
  
  update_client_tf = tff.tf_computation(
    lambda dataset, state: __update_client(
      dataset,
      state,
      model_fn,
      client_optimizer_fn,
      tf.function(client.update)
    ),
    (dataset_type, client_state_type)
  )
  
  federated_server_state_type = tff.type_at_server(server_state_type)
  federated_dataset_type = tff.type_at_clients(dataset_type)
  federated_client_state_type = tff.type_at_clients(client_state_type)

  def init_tff():
    return tff.federated_value(init_tf(), tff.SERVER)
  
  def next_tff(server_state, datasets, client_states):
    outputs = tff.federated_map(update_client_tf, (datasets, client_states))
    metrics = model.federated_output_computation(outputs.metrics)

    return server_state, metrics, outputs.client_state

  return tff.templates.IterativeProcess(
    initialize_fn=tff.federated_computation(init_tff),
    next_fn=tff.federated_computation(
      next_tff,
      (federated_server_state_type, federated_dataset_type, federated_client_state_type)
    )
  )
Ejemplo n.º 9
0
def local_eval(model, all_batches):
    # tff.sequence_sum Replace with `tff.sequence_average()` once implemented.
    return tff.sequence_sum(
        tff.sequence_map(
            #?????
            tff.federated_computation(lambda b: batch_loss(model, b),
                                      BATCH_TYPE),
            all_batches))
def local_eval(model, all_batchs):
    #计算平均loss值时使用tff.sequence_average函数
    return tff.sequence_sum(
        #序列map函数,将all_batchs中的每个batch都进行loss值计算
        #tff.sequence_map与tff.sequence_reduce的差别在于map是并行计算,reduce是串行计算
        tff.sequence_map(
            tff.federated_computation(lambda b: batch_loss(model, b),
                                      BATCH_TYPE), all_batchs))
Ejemplo n.º 11
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    channel = grpc.insecure_channel('{}:{}'.format(FLAGS.host, FLAGS.port))
    context = tff.backends.native.create_remote_python_execution_context(
        channels=[channel])
    tff.framework.set_default_context(context)

    print(tff.federated_computation(lambda: 'Hello World')())
Ejemplo n.º 12
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    channel = grpc.insecure_channel('{}:{}'.format(FLAGS.host, FLAGS.port))
    remote_executor = tff.framework.RemoteExecutor(channel)
    caching_executor = tff.framework.CachingExecutor(remote_executor)
    lambda_executor = tff.framework.LambdaExecutor(caching_executor)
    tff.framework.set_default_executor(lambda_executor)
    print(tff.federated_computation(lambda: 'Hello World')())
Ejemplo n.º 13
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    channel = grpc.insecure_channel('{}:{}'.format(FLAGS.host, FLAGS.port))
    ex = tff.framework.RemoteExecutor(channel)
    ex = tff.framework.CachingExecutor(ex)
    ex = tff.framework.ReferenceResolvingExecutor(ex)
    factory = tff.framework.create_executor_factory(lambda _: ex)
    context = tff.framework.ExecutionContext(factory)
    tff.framework.set_default_context(context)
    print(tff.federated_computation(lambda: 'Hello World')())
Ejemplo n.º 14
0
def build_federated_averaging_process_attacked(
    model_fn,
    client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.1),
    server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
    stateful_delta_aggregate_fn=build_stateless_mean(),
    client_update_tf=ClientExplicitBoosting(boost_factor=1.0)):
    """Builds the TFF computations for optimization using federated averaging with potentially malicious clients.

  Args:
    model_fn: A no-arg function that returns a `tff.learning.Model`.
    client_optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer`, use during local client training.
    server_optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer`, use to apply updates to the global model.
    stateful_delta_aggregate_fn: A 'tff.computation' that aggregates model
      deltas placed@CLIENTS to an aggregated model delta placed@SERVER.
    client_update_tf: a 'tf.function' computes the ClientOutput.

  Returns:
    A `tff.templates.IterativeProcess`.
  """

    dummy_model_for_metadata = model_fn()

    server_init_tf = build_server_init_fn(
        model_fn, server_optimizer_fn,
        stateful_delta_aggregate_fn.initialize())
    server_state_type = server_init_tf.type_signature.result
    server_update_fn = build_server_update_fn(model_fn, server_optimizer_fn,
                                              server_state_type,
                                              server_state_type.model)
    tf_dataset_type = tff.SequenceType(dummy_model_for_metadata.input_spec)

    client_update_fn = build_client_update_fn(model_fn, client_optimizer_fn,
                                              client_update_tf,
                                              tf_dataset_type,
                                              server_state_type.model)

    federated_server_state_type = tff.FederatedType(server_state_type,
                                                    tff.SERVER)

    federated_dataset_type = tff.FederatedType(tf_dataset_type, tff.CLIENTS)

    run_one_round_tff = build_run_one_round_fn_attacked(
        server_update_fn, client_update_fn, stateful_delta_aggregate_fn,
        dummy_model_for_metadata, federated_server_state_type,
        federated_dataset_type)

    return tff.templates.IterativeProcess(
        initialize_fn=tff.federated_computation(
            lambda: tff.federated_value(server_init_tf(), tff.SERVER)),
        next_fn=run_one_round_tff)
Ejemplo n.º 15
0
def build_federated_averaging_process(
    model_fn,
    client_optimizer_fn,
    server_optimizer_fn=lambda: flars_optimizer.FLARSOptimizer(learning_rate=
                                                               1.0)):
    """Builds the TFF computations for optimization using federated averaging.

  Args:
    model_fn: A no-arg function that returns a `tff.learning.Model`.
    client_optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer` for the local client training.
    server_optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer` for applying updates on the server.

  Returns:
    A `tff.templates.IterativeProcess`.
  """
    with tf.Graph().as_default():
        dummy_model_for_metadata = model_fn()
    type_signature_grads_norm = tuple(
        weight.dtype for weight in tf.nest.flatten(
            dummy_model_for_metadata.trainable_variables))

    server_init_tf = build_server_init_fn(model_fn, server_optimizer_fn)

    server_state_type = server_init_tf.type_signature.result
    server_update_fn = build_server_update_fn(model_fn, server_optimizer_fn,
                                              server_state_type,
                                              server_state_type.model,
                                              type_signature_grads_norm)

    tf_dataset_type = tff.SequenceType(dummy_model_for_metadata.input_spec)
    client_update_fn = build_client_update_fn(model_fn, client_optimizer_fn,
                                              tf_dataset_type,
                                              server_state_type.model)

    federated_server_state_type = tff.FederatedType(server_state_type,
                                                    tff.SERVER)
    federated_dataset_type = tff.FederatedType(tf_dataset_type, tff.CLIENTS)
    run_one_round_tff = build_run_one_round_fn(server_update_fn,
                                               client_update_fn,
                                               dummy_model_for_metadata,
                                               federated_server_state_type,
                                               federated_dataset_type)

    return tff.templates.IterativeProcess(
        initialize_fn=tff.federated_computation(
            lambda: tff.federated_eval(server_init_tf, tff.SERVER)),
        next_fn=run_one_round_tff)
Ejemplo n.º 16
0
def build_federated_averaging_process(
    model_fn,
    server_optimizer_fn=lambda: flars_optimizer.FLARSOptimizer(learning_rate=
                                                               1.0)):
    """Builds the TFF computations for optimization using federated averaging.

  Args:
    model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
    server_optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer`.

  Returns:
    A `tff.utils.IterativeProcess`.
  """
    dummy_model_for_metadata = model_fn()

    type_signature_grads_norm = tff.NamedTupleType([
        weight.dtype for weight in tf.nest.flatten(
            _get_weights(dummy_model_for_metadata).trainable)
    ])

    server_init_tf = build_server_init_fn(model_fn, server_optimizer_fn)
    server_state_type = server_init_tf.type_signature.result
    server_update_fn = build_server_update_fn(model_fn, server_optimizer_fn,
                                              server_state_type,
                                              server_state_type.model,
                                              type_signature_grads_norm)

    tf_dataset_type = tff.SequenceType(dummy_model_for_metadata.input_spec)
    client_update_fn = build_client_update_fn(model_fn, tf_dataset_type,
                                              server_state_type.model)

    federated_server_state_type = tff.FederatedType(server_state_type,
                                                    tff.SERVER)
    federated_dataset_type = tff.FederatedType(tf_dataset_type, tff.CLIENTS)
    run_one_round_tff = build_run_one_round_fn(server_update_fn,
                                               client_update_fn,
                                               dummy_model_for_metadata,
                                               federated_server_state_type,
                                               federated_dataset_type)

    return tff.utils.IterativeProcess(initialize_fn=tff.federated_computation(
        lambda: tff.federated_value(server_init_tf(), tff.SERVER)),
                                      next_fn=run_one_round_tff)
Ejemplo n.º 17
0
def evaluator(
  model_fn: MODEL_FN,
  client_state_fn: CLIENT_STATE_FN
):
  model = model_fn()
  client_state = client_state_fn()

  dataset_type = tff.SequenceType(model.input_spec)
  client_state_type = tff.framework.type_from_tensors(client_state)

  evaluate_client_tf = tff.tf_computation(
    lambda dataset, state: __evaluate_client(
      dataset,
      state,
      model_fn,
      tf.function(client.evaluate)
    ),
    (dataset_type, client_state_type)
  )

  federated_dataset_type = tff.type_at_clients(dataset_type)
  federated_client_state_type = tff.type_at_clients(client_state_type)    

  def evaluate(datasets, client_states):
    outputs = tff.federated_map(evaluate_client_tf, (datasets, client_states))
    
    confusion_matrix = tff.federated_sum(outputs.confusion_matrix)
    aggregated_metrics = model.federated_output_computation(outputs.metrics)
    collected_metrics = tff.federated_collect(outputs.metrics)

    return confusion_matrix, aggregated_metrics, collected_metrics

  return tff.federated_computation(
    evaluate,
    (federated_dataset_type, federated_client_state_type)
  )
Ejemplo n.º 18
0
NUM_CLIENTS = 10

# NOTE: If the statement below fails, it means that you are
# using an older version of TFF without the high-performance
# executor stack. Call `tff.framework.set_default_executor()`
# instead to use the default reference runtime.
#if six.PY3:
#      tff.framework.set_default_executor(
#                    tff.framework.create_local_executor(NUM_CLIENTS))
#      tff.framework.set_default_executor()

if six.PY3:
    tff.framework.set_default_executor()
#      tff.framework.create_local_executor(NUM_CLIENTS))

tff.federated_computation(lambda: 'Hello, World!')()

#@test {"output": "ignore"}
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()

emnist_train.output_types, emnist_train.output_shapes

example_dataset = emnist_train.create_tf_dataset_for_client(
    emnist_train.client_ids[0])

example_element = iter(example_dataset).next()

example_element['label'].numpy()

NUM_EPOCHS = 10
BATCH_SIZE = 20
Ejemplo n.º 19
0
def recreate_tutorial(cuda_vislble_devices='1'):
    os.environ["CUDA_VISIBLE_DEVICES"] = cuda_vislble_devices

    tff.federated_computation(lambda: 'Hello, World!')()
    emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()

    print(f'Number of clients: {len(emnist_train.client_ids)}')

    print(f'Element type structure: {emnist_train.element_type_structure}')

    example_dataset = emnist_train.create_tf_dataset_for_client(
        emnist_train.client_ids[0])

    example_element = next(iter(example_dataset))

    print('Example element label:')
    print(example_element['label'].numpy())

    plt.imshow(example_element['pixels'].numpy(), cmap='gray', aspect='equal')
    plt.grid(False)
    plt.savefig('tmp.png')

    preprocessed_example_dataset = preprocess(example_dataset)

    sample_batch = tf.nest.map_structure(
        lambda x: x.numpy(), next(iter(preprocessed_example_dataset)))

    print('sample batch:')
    print(sample_batch)

    sample_clients = emnist_train.client_ids[0:NUM_CLIENTS]

    federated_train_data = make_federated_data(emnist_train, sample_clients)

    print('Number of client datasets: {l}'.format(l=len(federated_train_data)))
    print('First dataset: {d}'.format(d=federated_train_data[0]))

    def model_fn():
        # We _must_ create a new model here, and _not_ capture it from an external
        # scope. TFF will call this within different graph contexts.
        keras_model = create_keras_model()
        return tff.learning.from_keras_model(
            keras_model,
            input_spec=preprocessed_example_dataset.element_spec,
            loss=tf.keras.losses.SparseCategoricalCrossentropy(),
            metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

    iterative_process = tff.learning.build_federated_averaging_process(
        model_fn,
        client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02
                                                            ),
        server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))

    print('Iterative process type signature:')
    print(str(iterative_process.initialize.type_signature))

    state = iterative_process.initialize()

    state, metrics = iterative_process.next(state, federated_train_data)
    print('round  1, metrics={}'.format(metrics))

    for round_num in range(2, NUM_ROUNDS):
        state, metrics = iterative_process.next(state, federated_train_data)
        print('round {:2d}, metrics={}'.format(round_num, metrics))

    logdir = "/tmp/logs/scalars/training/"
    summary_writer = tf.summary.create_file_writer(logdir)
    state = iterative_process.initialize()

    with summary_writer.as_default():
        for round_num in range(1, NUM_ROUNDS):
            state, metrics = iterative_process.next(state,
                                                    federated_train_data)
            for name, value in metrics._asdict().items():
                tf.summary.scalar(name, value, step=round_num)
def test():
    ''' Check that everything is in place.'''
    test = tff.federated_computation(lambda: 'Hello, World!')()
    assert test == b'Hello, World!'
    print(test)
Ejemplo n.º 21
0
import six
from six.moves import range
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_federated as tff

warnings.simplefilter('ignore')

tf.compat.v1.enable_v2_behavior()

np.random.seed(0)

if six.PY3:
    tff.framework.set_default_executor(tff.framework.create_local_executor())

tff.federated_computation(lambda: 'Hello')()

emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()

dataset = emnist_train.create_tf_dataset_for_client(emnist_train.client_ids[0])

element = iter(dataset).next()
element['label'].numpy()

NUM_CLIENTS = 10
NUM_EPOCHS = 10
BATCH_SIZE = 20
SHUFFLE_BUFFER = 500


def preprocess(dataset):
Ejemplo n.º 22
0
def build_triehh_process(possible_prefix_extensions: List[str],
                         num_sub_rounds: int,
                         max_num_heavy_hitters: int,
                         max_user_contribution: int,
                         default_terminator: str = '$'):
  """Builds the TFF computations for heavy hitters discovery with TrieHH.

  TrieHH works by interactively keeping track of popular prefixes. In each
  round, the server broadcasts the popular prefixes it has
  discovered so far and the list of `possible_prefix_extensions` to a small
  fraction of selected clients. The select clients sample
  `max_user_contributions` words from their local datasets, and use them to vote
  on character extensions to the broadcasted popular prefixes. Client votes are
  accumulated across `num_sub_rounds` rounds, and then the top
  `max_num_heavy_hitters` extensions are used to extend the already discovered
  prefixes, and the extended prefixes are used in the next round. When an
  already discovered prefix is extended by `default_terminator` it is added to
  the list of discovered heavy hitters.

  Args:
    possible_prefix_extensions: A list containing all the possible extensions to
      learned prefixes. Each extensions must be a single character strings.
    num_sub_rounds: The total number of sub rounds to be executed before
      decoding aggregated votes. Must be positive.
    max_num_heavy_hitters: The maximum number of discoverable heavy hitters.
      Must be positive.
    max_user_contribution: The maximum number of examples a user can contribute.
      Must be positive.
    default_terminator: The end of sequence symbol.

  Returns:
    A `tff.utils.IterativeProcess`.
  """

  @tff.tf_computation
  def server_init_tf():
    return ServerState(
        discovered_heavy_hitters=tf.constant([], dtype=tf.string),
        discovered_prefixes=tf.constant([''], dtype=tf.string),
        possible_prefix_extensions=tf.constant(
            possible_prefix_extensions, dtype=tf.string),
        round_num=tf.constant(0, dtype=tf.int32),
        accumulated_votes=tf.zeros(
            dtype=tf.int32,
            shape=[max_num_heavy_hitters,
                   len(possible_prefix_extensions)]))

  # We cannot use server_init_tf.type_signature.result because the
  # discovered_* fields need to have [None] shapes, since they will grow over
  # time.
  server_state_type = (
      tff.to_type(
          ServerState(
              discovered_heavy_hitters=tff.TensorType(
                  dtype=tf.string, shape=[None]),
              discovered_prefixes=tff.TensorType(dtype=tf.string, shape=[None]),
              possible_prefix_extensions=tff.TensorType(
                  dtype=tf.string, shape=[len(possible_prefix_extensions)]),
              round_num=tff.TensorType(dtype=tf.int32, shape=[]),
              accumulated_votes=tff.TensorType(
                  dtype=tf.int32, shape=[None,
                                         len(possible_prefix_extensions)]),
          )))

  sub_round_votes_type = tff.TensorType(
      dtype=tf.int32,
      shape=[max_num_heavy_hitters,
             len(possible_prefix_extensions)])

  @tff.tf_computation(server_state_type, sub_round_votes_type)
  @tf.function
  def server_update_fn(server_state, sub_round_votes):
    server_state = server_update(
        server_state,
        sub_round_votes,
        num_sub_rounds=tf.constant(num_sub_rounds),
        max_num_heavy_hitters=tf.constant(max_num_heavy_hitters),
        default_terminator=tf.constant(default_terminator, dtype=tf.string))
    return server_state

  tf_dataset_type = tff.SequenceType(tf.string)
  discovered_prefixes_type = tff.TensorType(dtype=tf.string, shape=[None])
  round_num_type = tff.TensorType(dtype=tf.int32, shape=[])

  @tff.tf_computation(tf_dataset_type, discovered_prefixes_type, round_num_type)
  @tf.function
  def client_update_fn(tf_dataset, discovered_prefixes, round_num):
    result = client_update(tf_dataset, discovered_prefixes,
                           tf.constant(possible_prefix_extensions), round_num,
                           num_sub_rounds, max_num_heavy_hitters,
                           max_user_contribution)
    return result

  federated_server_state_type = tff.FederatedType(server_state_type, tff.SERVER)
  federated_dataset_type = tff.FederatedType(
      tf_dataset_type, tff.CLIENTS, all_equal=False)

  @tff.federated_computation(federated_server_state_type,
                             federated_dataset_type)
  def run_one_round(server_state, federated_dataset):
    """Orchestration logic for one round of TrieHH computation.

    Args:
      server_state: A `ServerState`.
      federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.

    Returns:
      An updated `ServerState`
    """
    discovered_prefixes = tff.federated_broadcast(
        server_state.discovered_prefixes)
    round_num = tff.federated_broadcast(server_state.round_num)

    client_outputs = tff.federated_map(
        client_update_fn,
        tff.federated_zip([federated_dataset, discovered_prefixes, round_num]))

    accumulated_votes = tff.federated_sum(client_outputs.client_votes)

    server_state = tff.federated_map(server_update_fn,
                                     (server_state, accumulated_votes))

    server_output = tff.federated_value([], tff.SERVER)

    return server_state, server_output

  return tff.utils.IterativeProcess(
      initialize_fn=tff.federated_computation(
          lambda: tff.federated_value(server_init_tf(), tff.SERVER)),
      next_fn=run_one_round)
Ejemplo n.º 23
0
def build_federated_averaging_process(
    model_fn,
    server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
    client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.1)):
    """Builds the TFF computations for optimization using federated averaging.

  Args:
    model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
    server_optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer` for server update.
    client_optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer` for client update.

  Returns:
    A `tff.utils.IterativeProcess`.
  """

    dummy_model = model_fn(
    )  # TODO(b/144510813): try remove dependency on dummy model

    @tff.tf_computation
    def server_init_tf():
        model = model_fn()
        server_optimizer = server_optimizer_fn()
        _initialize_optimizer_vars(model, server_optimizer)
        return ServerState(model=model.weights,
                           optimizer_state=server_optimizer.variables())

    server_state_type = server_init_tf.type_signature.result
    model_weights_type = server_state_type.model

    @tff.tf_computation(server_state_type, model_weights_type.trainable)
    def server_update_fn(server_state, model_delta):
        model = model_fn()
        server_optimizer = server_optimizer_fn()
        _initialize_optimizer_vars(model, server_optimizer)
        return server_update(model, server_optimizer, server_state,
                             model_delta)

    tf_dataset_type = tff.SequenceType(dummy_model.input_spec)

    @tff.tf_computation(tf_dataset_type, model_weights_type)
    def client_update_fn(tf_dataset, initial_model_weights):
        model = model_fn()
        client_optimizer = client_optimizer_fn()
        return client_update(model, tf_dataset, initial_model_weights,
                             client_optimizer)

    federated_server_state_type = tff.FederatedType(server_state_type,
                                                    tff.SERVER)
    federated_dataset_type = tff.FederatedType(tf_dataset_type, tff.CLIENTS)

    @tff.federated_computation(federated_server_state_type,
                               federated_dataset_type)
    def run_one_round(server_state, federated_dataset):
        """Orchestration logic for one round of computation.

    Args:
      server_state: A `ServerState`.
      federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.

    Returns:
      A tuple of updated `ServerState` and the result of
      `tff.learning.Model.federated_output_computation`.
    """
        client_model = tff.federated_broadcast(server_state.model)

        client_outputs = tff.federated_map(client_update_fn,
                                           (federated_dataset, client_model))

        weight_denom = client_outputs.client_weight
        round_model_delta = tff.federated_mean(client_outputs.weights_delta,
                                               weight=weight_denom)

        server_state = tff.federated_map(server_update_fn,
                                         (server_state, round_model_delta))

        aggregated_outputs = dummy_model.federated_output_computation(
            client_outputs.model_output)
        aggregated_outputs = tff.federated_zip(aggregated_outputs)

        return server_state, aggregated_outputs

    return tff.utils.IterativeProcess(initialize_fn=tff.federated_computation(
        lambda: tff.federated_value(server_init_tf(), tff.SERVER)),
                                      next_fn=run_one_round)
Ejemplo n.º 24
0
if six.PY3:
    print('Python3')
    tff.framework.set_default_executor(tff.framework.create_local_executor())


def print_line():
    print('------------------------------------------------------------')


print_line()
print('TensorFlow Version: ', tf.__version__)
print_line()
print('TensorFlow Federated Version: ', tff.__version__)
print_line()
print(
    tff.federated_computation(lambda: 'TensorFlow Federated is working -_-')())

# Load data from file using Pandas
IoT_features = pd.read_csv('/home/user/csv_files/features_2016_10Min.csv',
                           sep=',')
print_line()

############################################################
# Normalization
############################################################
print('Normalization ...')
# Isolate labels from features
IoT_features_labels = IoT_features.pop('device_co')
# Normalize
ary = IoT_features.values
min_max_scaler = preprocessing.MinMaxScaler()
Ejemplo n.º 25
0
def build_triehh_process(
        possible_prefix_extensions: List[str],
        num_sub_rounds: int,
        max_num_prefixes: int,
        threshold: int,
        max_user_contribution: int,
        default_terminator: str = triehh_tf.DEFAULT_TERMINATOR):
    """Builds the TFF computations for heavy hitters discovery with TrieHH.

  TrieHH works by interactively keeping track of popular prefixes. In each
  round, the server broadcasts the popular prefixes it has
  discovered so far and the list of `possible_prefix_extensions` to a small
  fraction of selected clients. The select clients sample
  `max_user_contributions` words from their local datasets, and use them to vote
  on character extensions to the broadcasted popular prefixes. Client votes are
  accumulated across `num_sub_rounds` rounds, and then the top
  `max_num_prefixes` extensions get at least 'threshold' votes are used to
  extend the already discovered
  prefixes, and the extended prefixes are used in the next round. When an
  already discovered prefix is extended by `default_terminator` it is added to
  the list of discovered heavy hitters.

  Args:
    possible_prefix_extensions: A list containing all the possible extensions to
      learned prefixes. Each extensions must be a single character strings. This
      list should not contain the default_terminator.
    num_sub_rounds: The total number of sub rounds to be executed before
      decoding aggregated votes. Must be positive.
    max_num_prefixes: The maximum number of prefixes we can keep in the trie.
      Must be positive.
    threshold: The threshold for heavy hitters and discovered prefixes. Only
      those get at least `threshold` votes are discovered. Must be positive.
    max_user_contribution: The maximum number of examples a user can contribute.
      Must be positive.
    default_terminator: The end of sequence symbol.

  Returns:
    A `tff.templates.IterativeProcess`.

  Raises:
    ValueError: If possible_prefix_extensions contains default_terminator.
  """
    if default_terminator in possible_prefix_extensions:
        raise ValueError(
            'default_terminator should not appear in possible_prefix_extensions'
        )

    # Append `default_terminator` to `possible_prefix_extensions` to make sure it
    # is the last item in the list.
    possible_prefix_extensions.append(default_terminator)

    @tff.tf_computation
    def server_init_tf():
        return ServerState(
            discovered_heavy_hitters=tf.constant([], dtype=tf.string),
            heavy_hitters_frequencies=tf.constant([], dtype=tf.float64),
            discovered_prefixes=tf.constant([''], dtype=tf.string),
            round_num=tf.constant(0, dtype=tf.int32),
            accumulated_votes=tf.zeros(
                dtype=tf.int32,
                shape=[max_num_prefixes,
                       len(possible_prefix_extensions)]),
            accumulated_weights=tf.constant(0, dtype=tf.int32))

    # We cannot use server_init_tf.type_signature.result because the
    # discovered_* fields need to have [None] shapes, since they will grow over
    # time.
    server_state_type = (tff.to_type(
        ServerState(
            discovered_heavy_hitters=tff.TensorType(dtype=tf.string,
                                                    shape=[None]),
            heavy_hitters_frequencies=tff.TensorType(dtype=tf.float64,
                                                     shape=[None]),
            discovered_prefixes=tff.TensorType(dtype=tf.string, shape=[None]),
            round_num=tff.TensorType(dtype=tf.int32, shape=[]),
            accumulated_votes=tff.TensorType(
                dtype=tf.int32, shape=[None,
                                       len(possible_prefix_extensions)]),
            accumulated_weights=tff.TensorType(dtype=tf.int32, shape=[]),
        )))

    sub_round_votes_type = tff.TensorType(
        dtype=tf.int32,
        shape=[max_num_prefixes,
               len(possible_prefix_extensions)])
    sub_round_weight_type = tff.TensorType(dtype=tf.int32, shape=[])

    @tff.tf_computation(server_state_type, sub_round_votes_type,
                        sub_round_weight_type)
    def server_update_fn(server_state, sub_round_votes, sub_round_weight):
        return server_update(server_state,
                             tf.constant(possible_prefix_extensions),
                             sub_round_votes,
                             sub_round_weight,
                             num_sub_rounds=tf.constant(num_sub_rounds),
                             max_num_prefixes=tf.constant(max_num_prefixes),
                             threshold=tf.constant(threshold))

    tf_dataset_type = tff.SequenceType(tf.string)
    discovered_prefixes_type = tff.TensorType(dtype=tf.string, shape=[None])
    round_num_type = tff.TensorType(dtype=tf.int32, shape=[])

    @tff.tf_computation(tf_dataset_type, discovered_prefixes_type,
                        round_num_type)
    def client_update_fn(tf_dataset, discovered_prefixes, round_num):
        return client_update(tf_dataset, discovered_prefixes,
                             tf.constant(possible_prefix_extensions),
                             round_num, num_sub_rounds, max_num_prefixes,
                             max_user_contribution)

    federated_server_state_type = tff.FederatedType(server_state_type,
                                                    tff.SERVER)
    federated_dataset_type = tff.FederatedType(tf_dataset_type,
                                               tff.CLIENTS,
                                               all_equal=False)

    @tff.federated_computation(federated_server_state_type,
                               federated_dataset_type)
    def run_one_round(server_state, federated_dataset):
        """Orchestration logic for one round of TrieHH computation.

    Args:
      server_state: A `ServerState`.
      federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.

    Returns:
      An updated `ServerState`
    """
        discovered_prefixes = tff.federated_broadcast(
            server_state.discovered_prefixes)
        round_num = tff.federated_broadcast(server_state.round_num)

        client_outputs = tff.federated_map(
            client_update_fn,
            (federated_dataset, discovered_prefixes, round_num))

        accumulated_votes = tff.federated_sum(client_outputs.client_votes)

        accumulated_weights = tff.federated_sum(client_outputs.client_weight)

        server_state = tff.federated_map(
            server_update_fn,
            (server_state, accumulated_votes, accumulated_weights))

        server_output = tff.federated_value([], tff.SERVER)

        return server_state, server_output

    return tff.templates.IterativeProcess(
        initialize_fn=tff.federated_computation(
            lambda: tff.federated_eval(server_init_tf, tff.SERVER)),
        next_fn=run_one_round)
Ejemplo n.º 26
0
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# tutorial: https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classification
import warnings
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff

# check that environment is correctly set up
warnings.simplefilter('ignore')
tf.compat.v1.enable_v2_behavior()
np.random.seed(0)
tff.framework.set_default_executor(tff.framework.create_local_executor())

assert tff.federated_computation(lambda: 'Hello, World!')() == b'Hello, World!'

# variables are tff.simulation.ClientData objects
(emnist_train, emnist_test) = tff.simulation.datasets.emnist.load_data()

assert len(emnist_train.client_ids) == 3383
assert emnist_train.element_type_structure == OrderedDict([
    ('label', TensorSpec(shape=(), dtype=tf.int32, name=None)),
    ('pixels', TensorSpec(shape=(28, 28), dtype=tf.float32, name=None))
])

# creates a new tf.data.Dataset containing the client[0] training examples
example_dataset = emnist_train.create_tf_dataset_for_client(
    emnist_train.client_ids[0])

# test for specific datum