'can be a convolutional model (cnn) or a two hidden-layer ' 'densely connected network (2nn).') flags.DEFINE_integer('client_batch_size', 20, 'Batch size used on the client.') flags.DEFINE_integer('clients_per_round', 10, 'How many clients to sample per round.') flags.DEFINE_integer( 'client_epochs_per_round', 1, 'Number of client (inner optimizer) epochs per federated round.') flags.DEFINE_boolean( 'uniform_weighting', False, 'Whether to weigh clients uniformly. If false, clients ' 'are weighted by the number of samples.') # Optimizer configuration (this defines one or more flags per optimizer). utils_impl.define_optimizer_flags('server') utils_impl.define_optimizer_flags('client') # Differential privacy flags flags.DEFINE_float('clip', 0.05, 'Initial clip.') flags.DEFINE_float('noise_multiplier', None, 'Noise multiplier. If None, no DP is used.') flags.DEFINE_float('adaptive_clip_learning_rate', 0, 'Adaptive clip learning rate.') flags.DEFINE_float('target_unclipped_quantile', 0.5, 'Target unclipped quantile.') flags.DEFINE_float( 'clipped_count_budget_allocation', 0.1, 'Fraction of privacy budget to allocate for clipped counts.') flags.DEFINE_boolean( 'per_vector_clipping', False, 'Use per-vector clipping'
'in filenames.') flags.DEFINE_integer('random_seed', 0, 'Random seed for the experiment.') # Training hyperparameters flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.') flags.DEFINE_integer('rounds_per_eval', 1, 'How often to evaluate') flags.DEFINE_integer('train_clients_per_round', 2, 'How many clients to sample per round.') flags.DEFINE_integer('client_epochs_per_round', 1, 'Number of epochs in the client to take per round.') flags.DEFINE_integer('batch_size', 20, 'Batch size used on the client.') flags.DEFINE_integer('num_pseudo_clients', 1, 'Number of pseudo-clients.') # Optimizer configuration (this defines one or more flags per optimizer). utils_impl.define_optimizer_flags('server', defaults=dict(learning_rate=1.0)) utils_impl.define_optimizer_flags('client', defaults=dict(learning_rate=0.2)) # Differential privacy hyperparameters flags.DEFINE_float('clip', 0.05, 'Initial clip.') flags.DEFINE_float('noise_multiplier', 1.0, 'Noise multiplier.') flags.DEFINE_float('adaptive_clip_learning_rate', 0, 'Adaptive clip learning rate.') flags.DEFINE_float('target_unclipped_quantile', 0.5, 'Target unclipped quantile.') flags.DEFINE_float( 'clipped_count_budget_allocation', 0.1, 'Fraction of privacy budget to allocate for clipped counts.') flags.DEFINE_boolean('use_per_vector', False, 'Use per-vector clipping.')
'sentences to use per user.') flags.DEFINE_integer('batch_size', 8, 'Batch size used.') flags.DEFINE_integer('sequence_length', 20, 'Max sequence length to use.') flags.DEFINE_integer('epochs', 3, 'Number of epochs to train for.') flags.DEFINE_integer('shuffle_buffer_size', 1000, 'Buffer size for data shuffling.') flags.DEFINE_integer('num_validation_examples', 10000, 'Number of examples to take for validation set.') flags.DEFINE_integer('num_test_examples', 10000, 'Number of examples to take for test set.') flags.DEFINE_integer('tensorboard_update_frequency', 100 * 1000, 'Number of steps between tensorboard logging calls.') flags.DEFINE_string('root_output_dir', '/tmp/centralized_stackoverflow/', 'Root directory for writing experiment output.') flags.DEFINE_integer('steps_per_epoch', None, 'Steps per epoch') utils_impl.define_optimizer_flags('centralized') # Modeling flags flags.DEFINE_integer( 'vocab_size', 10000, 'Size of the vocab to use; results in most `vocab_size` number of most ' 'common words used as vocabulary.') flags.DEFINE_integer('embedding_size', 96, 'Dimension of word embedding to use.') flags.DEFINE_integer('latent_size', 512, 'Dimension of latent size to use in recurrent cell') flags.DEFINE_integer('num_layers', 1, 'Number of stacked recurrent layers to use.') flags.DEFINE_boolean( 'lstm', True, 'Boolean indicating LSTM recurrent cell. If False, GRU is used.')
def test_define_optimizer_unused_default(self): with self.assertRaisesRegex(ValueError, 'not consumed'): # Use a different prefix to avoid declaring duplicate flags: utils_impl.define_optimizer_flags('client', defaults=dict(lr=1.25))
def test_get_optimizer_from_flags(self): utils_impl.define_optimizer_flags('server', defaults=dict(learning_rate=1.25)) self.assertEqual(FLAGS.server_learning_rate, 1.25) optimizer = utils_impl.get_optimizer_from_flags('server') self.assertEqual(optimizer.get_config()['learning_rate'], 1.25)
def setUpModule(): # Create flags here to ensure duplicate flags are not created. utils_impl.define_optimizer_flags(TEST_SERVER_FLAG_PREFIX) utils_impl.define_optimizer_flags(TEST_CLIENT_FLAG_PREFIX)