Exemplo n.º 1
0
def aggregate_metrics_across_clients(metrics):
    global metrics_name
    output = collections.OrderedDict()

    for metric in metrics_name:
        if metric == 'num_examples':
            output[metric] = tff.federated_sum(getattr(metrics, metric))
            output['per_client/' + metric] = tff.federated_collect(
                getattr(metrics, metric))
        else:
            output[metric] = tff.federated_mean(getattr(metrics, metric),
                                                metrics.num_examples)
            output['per_client/' + metric] = tff.federated_collect(
                getattr(metrics, metric))
    return output
Exemplo n.º 2
0
  def evaluate(datasets, client_states):
    outputs = tff.federated_map(evaluate_client_tf, (datasets, client_states))
    
    confusion_matrix = tff.federated_sum(outputs.confusion_matrix)
    aggregated_metrics = model.federated_output_computation(outputs.metrics)
    collected_metrics = tff.federated_collect(outputs.metrics)

    return confusion_matrix, aggregated_metrics, collected_metrics
Exemplo n.º 3
0
 def comp(client_data):
     return tff.federated_collect(client_data)