コード例 #1
0
def setUpModule():
    # Create flags here to ensure duplicate flags are not created.
    optimizer_utils.define_optimizer_flags(TEST_SERVER_FLAG_PREFIX)
    optimizer_utils.define_optimizer_flags(TEST_CLIENT_FLAG_PREFIX)
    optimizer_utils.define_lr_schedule_flags(TEST_SERVER_FLAG_PREFIX)
    optimizer_utils.define_lr_schedule_flags(TEST_CLIENT_FLAG_PREFIX)
コード例 #2
0
from tensorflow_federated.python.research.optimization.cifar100 import federated_cifar100
from tensorflow_federated.python.research.optimization.emnist import federated_emnist
from tensorflow_federated.python.research.optimization.emnist_ae import federated_emnist_ae
from tensorflow_federated.python.research.optimization.shakespeare import federated_shakespeare
from tensorflow_federated.python.research.optimization.shared import optimizer_utils
from tensorflow_federated.python.research.optimization.stackoverflow import federated_stackoverflow
from tensorflow_federated.python.research.optimization.stackoverflow_lr import federated_stackoverflow_lr
from tensorflow_federated.python.research.utils import utils_impl

_SUPPORTED_TASKS = [
    'cifar100', 'emnist_cr', 'emnist_ae', 'shakespeare', 'stackoverflow_nwp',
    'stackoverflow_lr'
]

with utils_impl.record_hparam_flags() as optimizer_flags:
    optimizer_utils.define_optimizer_flags('client')
    optimizer_utils.define_optimizer_flags('server')

with utils_impl.record_hparam_flags() as callback_flags:
    flags.DEFINE_float(
        'client_decay_factor', 0.1, 'Amount to decay the client learning rate '
        'upon reaching a plateau.')
    flags.DEFINE_float(
        'server_decay_factor', 0.9, 'Amount to decay the server learning rate '
        'upon reaching a plateau.')
    flags.DEFINE_float(
        'min_delta', 1e-4,
        'Minimum delta for improvement in the learning rate '
        'callbacks.')
    flags.DEFINE_integer(
        'window_size', 100,
コード例 #3
0
from absl import logging
import pandas as pd
import tensorflow as tf
import tensorflow_federated as tff

from tensorflow_federated.python.research.optimization.shakespeare import dataset
from tensorflow_federated.python.research.optimization.shakespeare import models
from tensorflow_federated.python.research.optimization.shared import keras_callbacks
from tensorflow_federated.python.research.optimization.shared import keras_metrics
from tensorflow_federated.python.research.optimization.shared import optimizer_utils
from tensorflow_federated.python.research.utils import utils_impl

FLAGS = flags.FLAGS

with utils_impl.record_new_flags() as hparam_flags:
    optimizer_utils.define_optimizer_flags('centralized')
    flags.DEFINE_string(
        'experiment_name', None,
        'Name of the experiment. Part of the name of the output '
        'directory.')
    flags.DEFINE_integer('num_epochs', 60, 'Number of epochs to train.')
    flags.DEFINE_integer('batch_size', 10,
                         'Size of batches for training and eval.')
    flags.DEFINE_boolean('shuffle_train_data', True,
                         'Whether to shuffle the training data.')

flags.DEFINE_string(
    'root_output_dir', '/tmp/tff/optimization/shakespeare/centralized',
    'The top-level output directory experiment runs. --experiment_name will '
    'be append, and the directory will contain tensorboard logs, metrics CSVs '
    'and other output.')