def run_centralized(optimizer: tf.keras.optimizers.Optimizer,
                    experiment_name: str,
                    root_output_dir: str,
                    num_epochs: int,
                    batch_size: int,
                    decay_epochs: Optional[int] = None,
                    lr_decay: Optional[float] = None,
                    hparams_dict: Optional[Mapping[str, Any]] = None,
                    sequence_length: Optional[int] = 80,
                    max_batches: Optional[int] = None):
    """Trains a two-layer RNN on Shakespeare next-character-prediction.

  Args:
    optimizer: A `tf.keras.optimizers.Optimizer` used to perform training.
    experiment_name: The name of the experiment. Part of the output directory.
    root_output_dir: The top-level output directory for experiment runs. The
      `experiment_name` argument will be appended, and the directory will
      contain tensorboard logs, metrics written as CSVs, and a CSV of
      hyperparameter choices (if `hparams_dict` is used).
    num_epochs: The number of training epochs.
    batch_size: The batch size, used for train, validation, and test.
    decay_epochs: The number of epochs of training before decaying the learning
      rate. If None, no decay occurs.
    lr_decay: The amount to decay the learning rate by after `decay_epochs`
      training epochs have occurred.
    hparams_dict: A mapping with string keys representing the hyperparameters
      and their values. If not None, this is written to CSV.
    sequence_length: The sequence length used for Shakespeare preprocessing.
    max_batches: If set to a positive integer, datasets are capped to at most
      that many batches. If set to None or a nonpositive integer, the full
      datasets are used.
  """

    train_dataset, eval_dataset = shakespeare_dataset.get_centralized_datasets(
        train_batch_size=batch_size,
        max_train_batches=max_batches,
        max_test_batches=max_batches,
        sequence_length=sequence_length)

    pad_token, _, _, _ = shakespeare_dataset.get_special_tokens()
    model = shakespeare_models.create_recurrent_model(
        vocab_size=VOCAB_SIZE, sequence_length=sequence_length)
    model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[
            keras_metrics.MaskedCategoricalAccuracy(masked_tokens=[pad_token])
        ])

    centralized_training_loop.run(keras_model=model,
                                  train_dataset=train_dataset,
                                  validation_dataset=eval_dataset,
                                  experiment_name=experiment_name,
                                  root_output_dir=root_output_dir,
                                  num_epochs=num_epochs,
                                  hparams_dict=hparams_dict,
                                  decay_epochs=decay_epochs,
                                  lr_decay=lr_decay)
예제 #2
0
def metrics_builder():
    """Returns a `list` of `tf.keras.metric.Metric` objects."""
    pad_token, _, _, _ = shakespeare_dataset.get_special_tokens()

    return [
        keras_metrics.NumBatchesCounter(),
        keras_metrics.NumExamplesCounter(),
        keras_metrics.NumTokensCounter(masked_tokens=[pad_token]),
        keras_metrics.MaskedCategoricalAccuracy(masked_tokens=[pad_token]),
    ]
 def test_to_ids(self):
     pad, _, bos, eos = shakespeare_dataset.get_special_tokens()
     to_tokens = shakespeare_dataset._build_tokenize_fn(split_length=5)
     tokens = self.evaluate(to_tokens({'snippets': tf.constant('abc')}))
     self.assertAllEqual(tokens, [bos, 64, 42, 21, eos])
     to_tokens = shakespeare_dataset._build_tokenize_fn(split_length=12)
     tokens = self.evaluate(
         to_tokens({'snippets': tf.constant('star wars')}))
     self.assertAllEqual(tokens,
                         [bos, 25, 5, 64, 46, 14, 26, 64, 46, 25, eos, pad])
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    train_client_data, test_client_data = (
        tff.simulation.datasets.shakespeare.load_data())

    def preprocess(ds):
        return shakespeare_dataset.convert_snippets_to_character_sequence_examples(
            dataset=ds,
            batch_size=FLAGS.batch_size,
            epochs=1,
            shuffle_buffer_size=0,
            sequence_length=FLAGS.shakespeare_sequence_length)

    train_dataset = train_client_data.create_tf_dataset_from_all_clients()
    if FLAGS.shuffle_train_data:
        train_dataset = train_dataset.shuffle(buffer_size=10000)
    train_dataset = preprocess(train_dataset)

    eval_dataset = preprocess(
        test_client_data.create_tf_dataset_from_all_clients())

    optimizer = optimizer_utils.create_optimizer_fn_from_flags('centralized')()

    pad_token, _, _, _ = shakespeare_dataset.get_special_tokens()
    model = shakespeare_models.create_recurrent_model(
        vocab_size=VOCAB_SIZE,
        sequence_length=FLAGS.shakespeare_sequence_length)
    model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[
            keras_metrics.MaskedCategoricalAccuracy(masked_tokens=[pad_token])
        ])

    hparams_dict = collections.OrderedDict([(name, FLAGS[name].value)
                                            for name in hparam_flags])

    centralized_training_loop.run(keras_model=model,
                                  train_dataset=train_dataset,
                                  validation_dataset=eval_dataset,
                                  experiment_name=FLAGS.experiment_name,
                                  root_output_dir=FLAGS.root_output_dir,
                                  num_epochs=FLAGS.num_epochs,
                                  hparams_dict=hparams_dict,
                                  decay_epochs=FLAGS.decay_epochs,
                                  lr_decay=FLAGS.lr_decay)
 def test_convert_snippets_to_character_sequence_examples(self):
     pad, _, bos, eos = shakespeare_dataset.get_special_tokens()
     ds = shakespeare_dataset.convert_snippets_to_character_sequence_examples(
         tf.data.Dataset.from_tensor_slices({
             'snippets': ['a snippet', 'different snippet'],
         }),
         batch_size=2,
         epochs=2,
         shuffle_buffer_size=1,
         sequence_length=10)
     expected_outputs = [
         # First batch.
         ([[bos, 64, 14, 25, 45, 66, 4, 4, 65, 5],
           [bos, 1, 66, 43, 43, 65, 46, 65, 45,
            5]], [[64, 14, 25, 45, 66, 4, 4, 65, 5, eos],
                  [1, 66, 43, 43, 65, 46, 65, 45, 5, 14]]),
         # Second batch.
         ([
             [25, 45, 66, 4, 4, 65, 5, eos, pad, pad],
             [bos, 64, 14, 25, 45, 66, 4, 4, 65, 5],
         ], [
             [45, 66, 4, 4, 65, 5, eos, pad, pad, pad],
             [64, 14, 25, 45, 66, 4, 4, 65, 5, eos],
         ]),
         # Third batch.
         ([[bos, 1, 66, 43, 43, 65, 46, 65, 45, 5],
           [25, 45, 66, 4, 4, 65, 5, eos, pad,
            pad]], [[1, 66, 43, 43, 65, 46, 65, 45, 5, 14],
                    [45, 66, 4, 4, 65, 5, eos, pad, pad, pad]]),
     ]
     for batch_num, actual in enumerate(ds):
         self.assertGreater(
             len(expected_outputs),
             0,
             msg='Actual output contains more than expected.\nActual: {!s}'.
             format(actual))
         expected = expected_outputs.pop(0)
         self.assertAllEqual(
             actual,
             expected,
             msg='Batch {:d} not equal. Actual: {!s}\nExpected: {!s}'.
             format(batch_num, actual, expected))
     self.assertLen(
         expected_outputs,
         0,
         msg='Not all expected output seen.\nLeft over expectations: {!s}'.
         format(expected_outputs))
예제 #6
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')

    experiment_output_dir = FLAGS.root_output_dir
    tensorboard_dir = os.path.join(experiment_output_dir, 'logdir',
                                   FLAGS.experiment_name)
    results_dir = os.path.join(experiment_output_dir, 'results',
                               FLAGS.experiment_name)

    for path in [experiment_output_dir, tensorboard_dir, results_dir]:
        try:
            tf.io.gfile.makedirs(path)
        except tf.errors.OpError:
            pass  # Directory already exists.

    hparam_dict = collections.OrderedDict([(name, FLAGS[name].value)
                                           for name in hparam_flags])
    hparam_dict['results_file'] = results_dir
    hparams_file = os.path.join(results_dir, 'hparams.csv')
    logging.info('Saving hyper parameters to: [%s]', hparams_file)
    utils_impl.atomic_write_to_csv(pd.Series(hparam_dict), hparams_file)

    train_client_data, test_client_data = (
        tff.simulation.datasets.shakespeare.load_data())

    def preprocess(ds):
        return shakespeare_dataset.convert_snippets_to_character_sequence_examples(
            ds, FLAGS.batch_size, epochs=1).cache()

    train_dataset = train_client_data.create_tf_dataset_from_all_clients()
    if FLAGS.shuffle_train_data:
        train_dataset = train_dataset.shuffle(buffer_size=10000)
    train_dataset = preprocess(train_dataset)

    eval_dataset = preprocess(
        test_client_data.create_tf_dataset_from_all_clients())

    optimizer = optimizer_utils.create_optimizer_fn_from_flags('centralized')()

    pad_token, _, _, _ = shakespeare_dataset.get_special_tokens()

    # Vocabulary with one OOV ID and zero for the mask.
    vocab_size = len(shakespeare_dataset.CHAR_VOCAB) + 2
    model = shakespeare_models.create_recurrent_model(
        vocab_size=vocab_size, batch_size=FLAGS.batch_size)
    model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[
            keras_metrics.MaskedCategoricalAccuracy(masked_tokens=[pad_token])
        ])

    logging.info('Training model:')
    logging.info(model.summary())

    csv_logger_callback = keras_callbacks.AtomicCSVLogger(results_dir)
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=tensorboard_dir)

    # Reduce the learning rate every 20 epochs.
    def decay_lr(epoch, lr):
        if (epoch + 1) % 20 == 0:
            return lr * 0.1
        else:
            return lr

    lr_callback = tf.keras.callbacks.LearningRateScheduler(decay_lr, verbose=1)

    history = model.fit(
        train_dataset,
        validation_data=eval_dataset,
        epochs=FLAGS.num_epochs,
        callbacks=[lr_callback, tensorboard_callback, csv_logger_callback])

    logging.info('Final metrics:')
    for name in ['loss', 'accuracy']:
        metric = history.history['val_{}'.format(name)][-1]
        logging.info('\t%s: %.4f', name, metric)
예제 #7
0
 def test_last_id_not_oov(self):
     _, oov, bos, eos = shakespeare_dataset.get_special_tokens()
     to_tokens = shakespeare_dataset._build_tokenize_fn(split_length=5)
     tokens = to_tokens({'snippets': tf.constant('a\r~')})
     self.assertAllEqual(tokens, [bos, 64, 86, oov, eos])