def test_run_simple_model(self): vocab_size = 6 mask_model = models.create_recurrent_model(vocab_size, sequence_length=5) mask_model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=[keras_metrics.MaskedCategoricalAccuracy()]) no_mask_model = models.create_recurrent_model(vocab_size, sequence_length=5, mask_zero=False) no_mask_model.compile( optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=[keras_metrics.MaskedCategoricalAccuracy()]) constant_test_weights = tf.nest.map_structure(tf.ones_like, mask_model.weights) mask_model.set_weights(constant_test_weights) no_mask_model.set_weights(constant_test_weights) # `tf.data.Dataset.from_tensor_slices` aggresively coalesces the input into # a single tensor, but we want a tuple of two tensors per example, so we # apply a transformation to split. def split_to_tuple(t): return (t[0, :], t[1, :]) data = tf.data.Dataset.from_tensor_slices([ ([0, 1, 2, 3, 4], [1, 2, 3, 4, 0]), ([2, 3, 4, 0, 1], [3, 4, 0, 1, 2]), ]).map(split_to_tuple).batch(2) mask_metrics = mask_model.evaluate(data) no_mask_metrics = no_mask_model.evaluate(data) self.assertNotAllClose(mask_metrics, no_mask_metrics, atol=1e-3)
def test_run_simple_model(self): vocab_size = 6 model = models.create_recurrent_model(vocab_size, sequence_length=5) model.compile( optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=[keras_metrics.FlattenedCategoricalAccuracy(vocab_size)]) metrics = model.test_on_batch( x=tf.constant([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]], dtype=tf.int64), y=tf.constant([[2, 3, 4, 5, 0], [2, 3, 4, 5, 0]], dtype=tf.int64)) self.assertAllClose( metrics, [ 8.886, # loss 0.2, # accuracy ], atol=1e-3) # `tf.data.Dataset.from_tensor_slices` aggresively coalesces the input into # a single tensor, but we want a tuple of two tensors per example, so we # apply a transformation to split. def split_to_tuple(t): return (t[0, :], t[1, :]) data = tf.data.Dataset.from_tensor_slices([ ([0, 1, 2, 3, 4], [1, 2, 3, 4, 0]), ([2, 3, 4, 0, 1], [3, 4, 0, 1, 2]), ]).map(split_to_tuple).batch(2) metrics = model.evaluate(data) self.assertAllClose(metrics, [5.085, 0.125], atol=1e-3)
def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') tf.compat.v1.enable_v2_behavior() experiment_output_dir = FLAGS.root_output_dir tensorboard_dir = os.path.join(experiment_output_dir, 'logdir', FLAGS.experiment_name) results_dir = os.path.join(experiment_output_dir, 'results', FLAGS.experiment_name) for path in [experiment_output_dir, tensorboard_dir, results_dir]: try: tf.io.gfile.makedirs(path) except tf.errors.OpError: pass # Directory already exists. hparam_dict = collections.OrderedDict([(name, FLAGS[name].value) for name in hparam_flags]) hparam_dict['results_file'] = results_dir hparams_file = os.path.join(results_dir, 'hparams.csv') logging.info('Saving hyper parameters to: [%s]', hparams_file) utils_impl.atomic_write_to_csv(pd.Series(hparam_dict), hparams_file) train_client_data, test_client_data = ( tff.simulation.datasets.shakespeare.load_data()) def preprocess(ds): return dataset.convert_snippets_to_character_sequence_examples( ds, FLAGS.batch_size, epochs=1).cache() train_dataset = train_client_data.create_tf_dataset_from_all_clients() if FLAGS.shuffle_train_data: train_dataset = train_dataset.shuffle(buffer_size=10000) train_dataset = preprocess(train_dataset) eval_dataset = preprocess( test_client_data.create_tf_dataset_from_all_clients()) optimizer = optimizer_utils.create_optimizer_fn_from_flags('centralized')() # Vocabulary with one OOV ID and zero for the mask. vocab_size = len(dataset.CHAR_VOCAB) + 2 model = models.create_recurrent_model(vocab_size=vocab_size, batch_size=FLAGS.batch_size) model.compile( optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[ keras_metrics.FlattenedCategoricalAccuracy(vocab_size=vocab_size, mask_zero=True) ]) logging.info('Training model:') logging.info(model.summary()) csv_logger_callback = keras_callbacks.AtomicCSVLogger(results_dir) tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=tensorboard_dir) # Reduce the learning rate every 20 epochs. def decay_lr(epoch, lr): if (epoch + 1) % 20 == 0: return lr * 0.1 else: return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(decay_lr, verbose=1) history = model.fit( train_dataset, validation_data=eval_dataset, epochs=FLAGS.num_epochs, callbacks=[lr_callback, tensorboard_callback, csv_logger_callback]) logging.info('Final metrics:') for name in ['loss', 'accuracy']: metric = history.history['val_{}'.format(name)][-1] logging.info('\t%s: %.4f', name, metric)
def model_builder(): """Constructs a `tf.keras.Model` to train.""" return models.create_recurrent_model(vocab_size=VOCAB_SIZE, batch_size=FLAGS.client_batch_size)
def model_builder(): """Constructs a `tf.keras.Model` to train.""" return models.create_recurrent_model( vocab_size=VOCAB_SIZE, sequence_length=FLAGS.sequence_length)