Ejemplo n.º 1
0
    def test_run_simple_model(self):
        vocab_size = 6
        mask_model = shakespeare_models.create_recurrent_model(
            vocab_size, sequence_length=5)
        mask_model.compile(optimizer='sgd',
                           loss='sparse_categorical_crossentropy',
                           metrics=[keras_metrics.MaskedCategoricalAccuracy()])

        no_mask_model = shakespeare_models.create_recurrent_model(
            vocab_size, sequence_length=5, mask_zero=False)
        no_mask_model.compile(
            optimizer='sgd',
            loss='sparse_categorical_crossentropy',
            metrics=[keras_metrics.MaskedCategoricalAccuracy()])

        constant_test_weights = tf.nest.map_structure(tf.ones_like,
                                                      mask_model.weights)
        mask_model.set_weights(constant_test_weights)
        no_mask_model.set_weights(constant_test_weights)

        # `tf.data.Dataset.from_tensor_slices` aggresively coalesces the input into
        # a single tensor, but we want a tuple of two tensors per example, so we
        # apply a transformation to split.
        def split_to_tuple(t):
            return (t[0, :], t[1, :])

        data = tf.data.Dataset.from_tensor_slices([
            ([0, 1, 2, 3, 4], [1, 2, 3, 4, 0]),
            ([2, 3, 4, 0, 1], [3, 4, 0, 1, 2]),
        ]).map(split_to_tuple).batch(2)
        mask_metrics = mask_model.evaluate(data)
        no_mask_metrics = no_mask_model.evaluate(data)

        self.assertNotAllClose(mask_metrics, no_mask_metrics, atol=1e-3)
Ejemplo n.º 2
0
 def test_model_initialization_uses_random_seed(self):
     model_1_with_seed_0 = shakespeare_models.create_recurrent_model(
         vocab_size=6, sequence_length=5, seed=0)
     model_2_with_seed_0 = shakespeare_models.create_recurrent_model(
         vocab_size=6, sequence_length=5, seed=0)
     model_1_with_seed_1 = shakespeare_models.create_recurrent_model(
         vocab_size=6, sequence_length=5, seed=1)
     model_2_with_seed_1 = shakespeare_models.create_recurrent_model(
         vocab_size=6, sequence_length=5, seed=1)
     self.assertAllClose(model_1_with_seed_0.weights,
                         model_2_with_seed_0.weights)
     self.assertAllClose(model_1_with_seed_1.weights,
                         model_2_with_seed_1.weights)
     self.assertNotAllClose(model_1_with_seed_0.weights,
                            model_1_with_seed_1.weights)
Ejemplo n.º 3
0
def run_centralized(optimizer: tf.keras.optimizers.Optimizer,
                    experiment_name: str,
                    root_output_dir: str,
                    num_epochs: int,
                    batch_size: int,
                    decay_epochs: Optional[int] = None,
                    lr_decay: Optional[float] = None,
                    hparams_dict: Optional[Mapping[str, Any]] = None,
                    sequence_length: Optional[int] = 80,
                    max_batches: Optional[int] = None):
    """Trains a two-layer RNN on Shakespeare next-character-prediction.

  Args:
    optimizer: A `tf.keras.optimizers.Optimizer` used to perform training.
    experiment_name: The name of the experiment. Part of the output directory.
    root_output_dir: The top-level output directory for experiment runs. The
      `experiment_name` argument will be appended, and the directory will
      contain tensorboard logs, metrics written as CSVs, and a CSV of
      hyperparameter choices (if `hparams_dict` is used).
    num_epochs: The number of training epochs.
    batch_size: The batch size, used for train, validation, and test.
    decay_epochs: The number of epochs of training before decaying the learning
      rate. If None, no decay occurs.
    lr_decay: The amount to decay the learning rate by after `decay_epochs`
      training epochs have occurred.
    hparams_dict: A mapping with string keys representing the hyperparameters
      and their values. If not None, this is written to CSV.
    sequence_length: The sequence length used for Shakespeare preprocessing.
    max_batches: If set to a positive integer, datasets are capped to at most
      that many batches. If set to None or a nonpositive integer, the full
      datasets are used.
  """

    shakespeare_train, shakespeare_test = shakespeare_dataset.get_centralized_datasets(
        train_batch_size=batch_size, sequence_length=sequence_length)

    if max_batches and max_batches >= 1:
        shakespeare_train = shakespeare_train.take(max_batches)
        shakespeare_test = shakespeare_test.take(max_batches)

    pad_token, _, _, _ = shakespeare_dataset.get_special_tokens()
    model = shakespeare_models.create_recurrent_model(
        vocab_size=VOCAB_SIZE, sequence_length=sequence_length)
    model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[
            keras_metrics.MaskedCategoricalAccuracy(masked_tokens=[pad_token])
        ])

    centralized_training_loop.run(keras_model=model,
                                  train_dataset=shakespeare_train,
                                  validation_dataset=shakespeare_test,
                                  experiment_name=experiment_name,
                                  root_output_dir=root_output_dir,
                                  num_epochs=num_epochs,
                                  hparams_dict=hparams_dict,
                                  decay_epochs=decay_epochs,
                                  lr_decay=lr_decay)
Ejemplo n.º 4
0
def create_shakespeare_model(sequence_length):
    """Constructs a `tf.keras.Model` to train."""
    return shakespeare_models.create_recurrent_model(
        vocab_size=VOCAB_SIZE, sequence_length=sequence_length)