コード例 #1
0
    def run_one_round(server_state, federated_dataset):
        """Orchestration logic for one round of TrieHH computation.

    Args:
      server_state: A `ServerState`.
      federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.

    Returns:
      An updated `ServerState`
    """
        discovered_prefixes = tff.federated_broadcast(
            server_state.discovered_prefixes)
        round_num = tff.federated_broadcast(server_state.round_num)

        client_outputs = tff.federated_map(
            client_update_fn,
            (federated_dataset, discovered_prefixes, round_num))

        accumulated_votes = tff.federated_sum(client_outputs.client_votes)

        accumulated_weights = tff.federated_sum(client_outputs.client_weight)

        server_state = tff.federated_map(
            server_update_fn,
            (server_state, accumulated_votes, accumulated_weights))

        server_output = tff.federated_value([], tff.SERVER)

        return server_state, server_output
コード例 #2
0
ファイル: tff_gans.py プロジェクト: uu0316/federated
    def run_one_round(server_state, server_gen_inputs, client_gen_inputs,
                      client_real_data):
        """The `tff.Computation` to be returned."""
        # TODO(b/131429028): The federated_zip should be automatic.
        from_server = tff.federated_zip(
            gan_training_tf_fns.FromServer(
                generator_weights=server_state.generator_weights,
                discriminator_weights=server_state.discriminator_weights))
        client_input = tff.federated_broadcast(from_server)
        client_outputs = tff.federated_map(
            client_computation,
            (client_gen_inputs, client_real_data, client_input))

        if gan.dp_averaging_fn is None:
            # Not using differential privacy.
            new_dp_averaging_state = server_state.dp_averaging_state
            averaged_discriminator_weights_delta = tff.federated_mean(
                client_outputs.discriminator_weights_delta,
                weight=client_outputs.update_weight)
        else:
            # Using differential privacy. Note that the weight argument is set to None
            # here. This is because the DP aggregation code explicitly does not do
            # weighted aggregation. (If weighted aggregation is desired, differential
            # privacy needs to be turned off.)
            new_dp_averaging_state, averaged_discriminator_weights_delta = (
                gan.dp_averaging_fn(server_state.dp_averaging_state,
                                    client_outputs.discriminator_weights_delta,
                                    weight=None))

        # TODO(b/131085687): Perhaps reconsider the choice to also use
        # ClientOutput to hold the aggregated client output.
        aggregated_client_output = gan_training_tf_fns.ClientOutput(
            discriminator_weights_delta=averaged_discriminator_weights_delta,
            # We don't actually need the aggregated update_weight, but
            # this keeps the types of the non-aggregated and aggregated
            # client_output the same, which is convenient. And I can
            # imagine wanting this.
            update_weight=tff.federated_sum(client_outputs.update_weight),
            counters=tff.federated_sum(client_outputs.counters))

        # TODO(b/131839522): This federated_zip shouldn't be needed.
        aggregated_client_output = tff.federated_zip(aggregated_client_output)

        server_state = tff.federated_map(
            server_computation,
            (server_state, server_gen_inputs, aggregated_client_output,
             new_dp_averaging_state))
        return server_state
コード例 #3
0
  def foo(x):

    @tff.tf_computation(element_type)
    def local_sum(nums):
      return tf.math.reduce_sum(nums)

    return tff.federated_sum(tff.federated_map(local_sum, x))
コード例 #4
0
    def run_one_round(server_state, federated_dataset, client_states):
        """Orchestration logic for one round of computation.

    Args:
      server_state: A `stateful_fedavg_tf.ServerState`.
      federated_dataset: A federated `tf.data.Dataset` with placement
        `tff.CLIENTS`.
      client_states: A federated `stateful_fedavg_tf.ClientState`.

    Returns:
      A tuple of updated `ServerState` and `tf.Tensor` of average loss.
    """
        server_message = tff.federated_map(server_message_fn, server_state)
        server_message_at_client = tff.federated_broadcast(server_message)

        client_outputs = tff.federated_map(
            client_update_fn,
            (federated_dataset, client_states, server_message_at_client))

        weight_denom = client_outputs.client_weight
        round_model_delta = tff.federated_mean(client_outputs.weights_delta,
                                               weight=weight_denom)
        total_iters_count = tff.federated_sum(
            client_outputs.client_state.iters_count)
        server_state = tff.federated_map(
            server_update_fn,
            (server_state, round_model_delta, total_iters_count))
        round_loss_metric = tff.federated_mean(client_outputs.model_output,
                                               weight=weight_denom)

        return server_state, round_loss_metric, client_outputs.client_state
コード例 #5
0
ファイル: computations.py プロジェクト: tensorflow/federated
def evaluation(
        server_state: int,
        client_data: tf.data.Dataset) -> collections.OrderedDict[str, Any]:
    """Computes the sum of all the integers on the clients.

  Computes the sum of all the integers on the clients and returns the following
  metrics:

  * `sum_client_data.METRICS_TOTAL_SUM`: The sum of all the client_data on the
    clients.

  Args:
    server_state: The server state.
    client_data: The data on the clients.

  Returns:
    The evaluation metrics.
  """
    del server_state  # Unused.
    client_sums = tff.federated_map(_sum_dataset, client_data)
    total_sum = tff.federated_sum(client_sums)
    metrics = collections.OrderedDict([
        (METRICS_TOTAL_SUM, total_sum),
    ])
    return metrics
コード例 #6
0
ファイル: computations.py プロジェクト: tensorflow/federated
def train(
    server_state: int, client_data: tf.data.Dataset
) -> Tuple[int, collections.OrderedDict[str, Any]]:
    """Computes the sum of all the integers on the clients.

  Computes the sum of all the integers on the clients, updates the server state,
  and returns the updated server state and the following metrics:

  * `sum_client_data.METRICS_TOTAL_SUM`: The sum of all the client_data on the
    clients.

  Args:
    server_state: The server state.
    client_data: The data on the clients.

  Returns:
    A tuple of the updated server state and the train metrics.
  """
    client_sums = tff.federated_map(_sum_dataset, client_data)
    total_sum = tff.federated_sum(client_sums)
    updated_state = tff.federated_map(_sum_integers, (server_state, total_sum))
    metrics = collections.OrderedDict([
        (METRICS_TOTAL_SUM, total_sum),
    ])
    return updated_state, metrics
コード例 #7
0
  def next_fn(state, deltas, weights):

    @tff.tf_computation(model_update_type)
    def clip_by_global_norm(update):
      clipped_update, global_norm = tf.clip_by_global_norm(
          tf.nest.flatten(update), tf.constant(clip_norm))
      was_clipped = tf.cond(
          tf.greater(global_norm, tf.constant(clip_norm)),
          lambda: tf.constant(1),
          lambda: tf.constant(0),
      )
      clipped_update = tf.nest.pack_sequence_as(update, clipped_update)
      return clipped_update, global_norm, was_clipped

    clipped_deltas, client_norms, client_was_clipped = tff.federated_map(
        clip_by_global_norm, deltas)

    return collections.OrderedDict(
        state=state,
        result=tff.federated_mean(clipped_deltas, weight=weights),
        measurements=tff.federated_zip(
            NormClippedAggregationMetrics(
                max_global_norm=tff.utils.federated_max(client_norms),
                num_clipped=tff.federated_sum(client_was_clipped),
            )))
コード例 #8
0
    def run_one_round(server_state, server_gen_inputs, client_gen_inputs,
                      client_real_data):
        """The `tff.Computation` to be returned."""
        from_server = gan_training_tf_fns.FromServer(
            generator_weights=server_state.generator_weights,
            discriminator_weights=server_state.discriminator_weights)
        client_input = tff.federated_broadcast(from_server)
        client_outputs = tff.federated_map(
            client_computation,
            (client_gen_inputs, client_real_data, client_input))

        if gan.dp_averaging_fn is None:
            # Not using differential privacy.
            new_dp_averaging_state = server_state.dp_averaging_state
            averaged_discriminator_weights_delta = tff.federated_mean(
                client_outputs.discriminator_weights_delta,
                weight=client_outputs.update_weight)
        else:
            # Using differential privacy. Note that the weight argument is set to
            # a constant 1.0 here, however the underlying AggregationProcess ignores
            # the parameter and performs no weighting.
            ignored_weight = tff.federated_value(1.0, tff.CLIENTS)
            aggregation_output = gan.dp_averaging_fn.next(
                server_state.dp_averaging_state,
                client_outputs.discriminator_weights_delta,
                weight=ignored_weight)
            new_dp_averaging_state = aggregation_output.state
            averaged_discriminator_weights_delta = aggregation_output.result

        # TODO(b/131085687): Perhaps reconsider the choice to also use
        # ClientOutput to hold the aggregated client output.
        aggregated_client_output = gan_training_tf_fns.ClientOutput(
            discriminator_weights_delta=averaged_discriminator_weights_delta,
            # We don't actually need the aggregated update_weight, but
            # this keeps the types of the non-aggregated and aggregated
            # client_output the same, which is convenient. And I can
            # imagine wanting this.
            update_weight=tff.federated_sum(client_outputs.update_weight),
            counters=tff.federated_sum(client_outputs.counters))

        server_computation = build_server_computation(
            gan, server_state.type_signature.member, client_output_type)
        server_state = tff.federated_map(
            server_computation,
            (server_state, server_gen_inputs, aggregated_client_output,
             new_dp_averaging_state))
        return server_state
コード例 #9
0
    def run_one_round(server_state, server_gen_inputs, client_gen_inputs,
                      client_real_data):
        """The `tff.Computation` to be returned."""
        from_server = gan_training_tf_fns.FromServer(
            generator_weights=server_state.generator_weights,
            discriminator_weights=server_state.discriminator_weights)
        client_input = tff.federated_broadcast(from_server)
        client_outputs = tff.federated_map(
            client_computation,
            (client_gen_inputs, client_real_data, client_input))

        # Note that weight goes unused here if the aggregation is involving
        # Differential Privacy; the underlying AggregationProcess doesn't take the
        # parameter, as it just uniformly weights the clients.
        if gan.aggregation_process.is_weighted:
            aggregation_output = gan.aggregation_process.next(
                server_state.aggregation_state,
                client_outputs.discriminator_weights_delta,
                client_outputs.update_weight)
        else:
            aggregation_output = gan.aggregation_process.next(
                server_state.aggregation_state,
                client_outputs.discriminator_weights_delta)

        new_aggregation_state = aggregation_output.state
        averaged_discriminator_weights_delta = aggregation_output.result

        # TODO(b/131085687): Perhaps reconsider the choice to also use
        # ClientOutput to hold the aggregated client output.
        aggregated_client_output = gan_training_tf_fns.ClientOutput(
            discriminator_weights_delta=averaged_discriminator_weights_delta,
            # We don't actually need the aggregated update_weight, but
            # this keeps the types of the non-aggregated and aggregated
            # client_output the same, which is convenient. And I can
            # imagine wanting this.
            update_weight=tff.federated_sum(client_outputs.update_weight),
            counters=tff.federated_sum(client_outputs.counters))

        server_computation = build_server_computation(
            gan, server_state.type_signature.member, client_output_type,
            gan.aggregation_process.state_type.member)
        server_state = tff.federated_map(
            server_computation,
            (server_state, server_gen_inputs, aggregated_client_output,
             new_aggregation_state))
        return server_state
コード例 #10
0
  def evaluate(datasets, client_states):
    outputs = tff.federated_map(evaluate_client_tf, (datasets, client_states))
    
    confusion_matrix = tff.federated_sum(outputs.confusion_matrix)
    aggregated_metrics = model.federated_output_computation(outputs.metrics)
    collected_metrics = tff.federated_collect(outputs.metrics)

    return confusion_matrix, aggregated_metrics, collected_metrics
コード例 #11
0
 def next_fn(state, value):
   one_at_clients = tff.federated_value(1, tff.CLIENTS)
   dp_sum = self._dp_sum_process.next(state, value)
   summed_one = tff.federated_sum(one_at_clients)
   return tff.templates.MeasuredProcessOutput(
       state=dp_sum.state,
       result=tff.federated_map(div, (dp_sum.result, summed_one)),
       measurements=dp_sum.measurements)
コード例 #12
0
 def next_fn(state, value, weight):
     weighted_values = tff.federated_map(_mul, (value, weight))
     summed_value = tff.federated_sum(weighted_values)
     normalized_value = tff.federated_map(_div, (summed_value, state))
     measurements = tff.federated_value((), tff.SERVER)
     return tff.templates.MeasuredProcessOutput(
         state=state,
         result=normalized_value,
         measurements=measurements)
コード例 #13
0
    def federated_mean_masked(value, weight):
        """Computes federated weighted mean masking out zeros elementwize.

    Masking out zero elements essentially changes the denominator of the
    mean by not counting zero values.

    Args:
      value: The federated value which mean is to be computed.
      weight: The federated weight to be used in a weighted mean.

    Returns:
      A federated weighted mean of the value with omitted zeros elementwise.
    """
        weighted_value = tff.federated_map(_multiply_by_weight,
                                           (value, weight))
        weighted_numerator = tff.federated_sum(weighted_value)
        weighted_mask_denominator = tff.federated_sum(
            tff.federated_map(_create_weighted_mask, (value, weight)))
        return tff.federated_map(
            _divide_no_nan, (weighted_numerator, weighted_mask_denominator))
コード例 #14
0
def aggregate_metrics_across_clients(metrics):
    global metrics_name
    output = collections.OrderedDict()

    for metric in metrics_name:
        if metric == 'num_examples':
            output[metric] = tff.federated_sum(getattr(metrics, metric))
            output['per_client/' + metric] = tff.federated_collect(
                getattr(metrics, metric))
        else:
            output[metric] = tff.federated_mean(getattr(metrics, metric),
                                                metrics.num_examples)
            output['per_client/' + metric] = tff.federated_collect(
                getattr(metrics, metric))
    return output
コード例 #15
0
        def run_one_inner_loop_weights_computation(prob):
            """Orchestration logic for one round of computation.
            Args:
              prob: Probability of each client to communicate update.
            Returns:
            A tuple of updated `Probabilities` and `tf.float32` of rescaling factor.
            """

            prob_message = create_prob_message_on_clients(prob)
            prob_aggreg = tff.federated_sum(prob_message)
            rescaling_factor_master = compute_rescaling_on_master(prob_aggreg)
            rescaling_factor_clients = tff.federated_broadcast(
                rescaling_factor_master)
            prob = rescale_prob_on_clients(prob, rescaling_factor_clients)

            return prob, rescaling_factor_master
コード例 #16
0
        def run_gradient_computation_round(server_state, federated_dataset):
            """Orchestration logic for one round of gradient computation.
            Args:
              server_state: A `ServerState`.
              federated_dataset: A federated `tf.data.Dataset` with placement
                `tff.CLIENTS`.
            Returns:
            A tuple of updated `tf.Tensor` of clients initial probability and `ClientOutput`.
            """
            server_message = tff.federated_map(server_message_fn, server_state)
            server_message_at_client = tff.federated_broadcast(server_message)

            client_outputs = tff.federated_map(
                client_update_fn,
                (federated_dataset, server_message_at_client))

            update_norm_sum_weighted = tff.federated_sum(
                client_outputs.update_norm_weighted)
            norm_sum_clients_weighted = tff.federated_broadcast(
                update_norm_sum_weighted)

            prob_init = scale_on_clients(client_outputs.update_norm_weighted,
                                         norm_sum_clients_weighted)
            return prob_init, client_outputs
コード例 #17
0
 def foo(x):
     return tff.federated_sum(x)
コード例 #18
0
def aggregate_mnist_metrics_across_clients(metrics):
  return {
      'num_examples': tff.federated_sum(metrics.num_examples),
      'loss': tff.federated_mean(metrics.loss, metrics.num_examples),
      'accuracy': tff.federated_mean(metrics.accuracy, metrics.num_examples)
  }
コード例 #19
0
 def map_add_one_and_sum(federated_arg):
     return tff.federated_sum(tff.federated_map(add_one, federated_arg))
コード例 #20
0
    def foo(x):
        @tff.tf_computation(element_type)
        def local_sum(nums):
            return nums.reduce(0, lambda x, y: x + y)

        return tff.federated_sum(tff.federated_map(local_sum, x))
コード例 #21
0
    def run_one_round(server_state, server_gen_inputs, client_gen_inputs,
                      client_real_data):
        """The `tff.Computation` to be returned."""

        from_server = gan_training_tf_fns.FromServer(
            generator_weights=server_state.generator_weights,
            discriminator_weights=server_state.discriminator_weights,
            state_gen_optimizer_weights=server_state.
            state_gen_optimizer_weights,
            state_disc_optimizer_weights=server_state.
            state_disc_optimizer_weights,
            counters=server_state.counters)
        client_input = tff.federated_broadcast(from_server)
        client_outputs = tff.federated_map(
            client_computation,
            (client_gen_inputs, client_real_data, client_input))

        if gan.dp_averaging_fn is None:
            # Not using differential privacy.
            new_dp_averaging_state = server_state.dp_averaging_state
            averaged_discriminator_weights_delta = tff.federated_mean(
                client_outputs.discriminator_weights_delta,
                weight=client_outputs.update_weight_G)
            averaged_generator_weights_delta = tff.federated_mean(
                client_outputs.generator_weights_delta,
                weight=client_outputs.update_weight_G)
            averaged_gen_opt_delta = tff.federated_mean(
                client_outputs.state_gen_opt_delta,
                weight=client_outputs.update_weight_G)
            averaged_disc_opt_delta = tff.federated_mean(
                client_outputs.state_disc_opt_delta,
                weight=client_outputs.update_weight_G)
        else:
            # Using differential privacy. Note that the weight argument is set to None
            # here. This is because the DP aggregation code explicitly does not do
            # weighted aggregation. (If weighted aggregation is desired, differential
            # privacy needs to be turned off.)
            new_dp_averaging_state, averaged_discriminator_weights_delta = (
                gan.dp_averaging_fn(server_state.dp_averaging_state,
                                    client_outputs.discriminator_weights_delta,
                                    weight=None))

        aggregated_client_output = gan_training_tf_fns.ClientOutput(
            discriminator_weights_delta=averaged_discriminator_weights_delta,
            generator_weights_delta=averaged_generator_weights_delta,
            state_gen_opt_delta=averaged_gen_opt_delta,
            state_disc_opt_delta=averaged_disc_opt_delta,
            # We don't actually need the aggregated update_weight, but
            # this keeps the types of the non-aggregated and aggregated
            # client_output the same, which is convenient. And I can
            # imagine wanting this.
            update_weight=tff.federated_sum(client_outputs.update_weight_D),
            update_weight_D=tff.federated_sum(client_outputs.update_weight_D),
            update_weight_G=tff.federated_sum(client_outputs.update_weight_G),
            counters=tff.federated_sum(client_outputs.counters))

        server_state = tff.federated_map(
            server_computation,
            (server_state, server_gen_inputs, aggregated_client_output,
             new_dp_averaging_state))
        return server_state
コード例 #22
0
 def aggregator(federated_values, weight=None):
   del weight
   return tff.federated_sum(federated_values)
コード例 #23
0
def aggregate_mnist_metrics_across_clients(metrics):
    return collections.OrderedDict(
        num_examples=tff.federated_sum(metrics.num_examples),
        loss=tff.federated_mean(metrics.loss, metrics.num_examples),
        accuracy=tff.federated_mean(metrics.accuracy, metrics.num_examples))
コード例 #24
0
 def sum_arg(x):
   return tff.federated_sum(tff.federated_broadcast(x))