def test_network_creation(self, use_position_id, pack_multiple_sequences,
                            use_float16):
    """Validate that the Keras object can be created."""
    if use_float16:
      tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
    seq_length = 16
    vocab_size = 100
    max_position_embeddings = 32
    type_vocab_size = 2
    hidden_size = 32
    embedding_cfg = dict(
        vocab_size=vocab_size,
        type_vocab_size=2,
        hidden_size=hidden_size,
        max_seq_length=max_position_embeddings,
        initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
        dropout_rate=0.1,
        use_position_id=use_position_id,
        pack_multiple_sequences=pack_multiple_sequences,
    )
    test_object = packed_sequence_embedding.PackedSequenceEmbedding(
        **embedding_cfg)

    input_word_ids = tf.keras.Input(shape=(seq_length,), dtype=tf.int32)
    input_mask = tf.keras.Input(shape=(seq_length,), dtype=tf.int32)
    input_type_ids = tf.keras.Input(shape=(seq_length,), dtype=tf.int32)
    network_inputs = {
        'input_word_ids': input_word_ids,
        'input_mask': input_mask,
        'input_type_ids': input_type_ids,
    }
    if use_position_id:
      network_inputs['position_ids'] = tf.keras.Input(
          shape=(seq_length,), dtype=tf.int32)

    embedding, mask = test_object(network_inputs)

    # Create a model based off of this network:
    model = tf.keras.Model(network_inputs, [embedding, mask])

    # Invoke the model. We can't validate the output data here (the model is too
    # complex) but this will catch structural runtime errors.
    batch_size = 3
    word_id_data = np.random.randint(vocab_size, size=(batch_size, seq_length))
    mask_data = np.random.randint(2, size=(batch_size, seq_length))
    type_id_data = np.random.randint(
        type_vocab_size, size=(batch_size, seq_length))
    feed_input = {
        'input_word_ids': word_id_data,
        'input_mask': mask_data,
        'input_type_ids': type_id_data,
    }
    if use_position_id:
      feed_input['position_ids'] = np.random.randint(
          seq_length, size=(batch_size, seq_length))
    embeddings, attention_mask = model.predict(feed_input)
    expected_embeddings_shape = [3, seq_length, hidden_size]
    expected_attention_mask_shape = [3, seq_length, seq_length]
    self.assertAllEqual(expected_embeddings_shape, embeddings.shape)
    self.assertAllEqual(expected_attention_mask_shape, attention_mask.shape)
  def test_serialize_deserialize(self):
    tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
    # Create a network object that sets all of its config options.
    embedding_cfg = dict(
        vocab_size=100,
        type_vocab_size=2,
        hidden_size=64,
        max_seq_length=32,
        initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
        dropout_rate=0.1,
        use_position_id=True,
        pack_multiple_sequences=False,
    )
    network = packed_sequence_embedding.PackedSequenceEmbedding(**embedding_cfg)

    expected_config = dict(embedding_cfg)
    expected_config['initializer'] = tf.keras.initializers.serialize(
        tf.keras.initializers.get(expected_config['initializer']))
    self.assertEqual(network.get_config(), expected_config)

    # Create another network object from the first object's config.
    new_network = packed_sequence_embedding.PackedSequenceEmbedding.from_config(
        network.get_config())

    # Validate that the config can be forced to JSON.
    _ = new_network.to_json()

    # If the serialization was successful, the new config should match the old.
    self.assertAllEqual(network.get_config(), new_network.get_config())
示例#3
0
 def _get_network(self, vocab_size):
   sequence_length = 512
   hidden_size = 50
   embedding_cfg = {
       'vocab_size': vocab_size,
       'type_vocab_size': 1,
       'hidden_size': hidden_size,
       'embedding_width': hidden_size,
       'max_seq_length': sequence_length,
       'initializer': tf.keras.initializers.TruncatedNormal(stddev=0.02),
       'dropout_rate': 0.1,
   }
   embedding_inst = packed_sequence_embedding.PackedSequenceEmbedding(
       **embedding_cfg)
   hidden_cfg = {
       'num_attention_heads':
           2,
       'intermediate_size':
           3072,
       'intermediate_activation':
           activations.gelu,
       'dropout_rate':
           0.1,
       'attention_dropout_rate':
           0.1,
       'kernel_initializer':
           tf.keras.initializers.TruncatedNormal(stddev=0.02),
   }
   return encoder_scaffold.EncoderScaffold(
       num_hidden_instances=2,
       pooled_output_dim=hidden_size,
       embedding_cfg=embedding_cfg,
       embedding_cls=embedding_inst,
       hidden_cfg=hidden_cfg,
       dict_outputs=True)