コード例 #1
0
  def test_network_invocation(self):
    hidden_size = 32
    sequence_length = 21
    vocab_size = 57
    num_types = 7
    # Create a small TransformerEncoder for testing.
    test_network = albert_transformer_encoder.AlbertTransformerEncoder(
        vocab_size=vocab_size,
        embedding_width=8,
        hidden_size=hidden_size,
<<<<<<< HEAD
        sequence_length=sequence_length,
=======
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
        num_attention_heads=2,
        num_layers=3,
        type_vocab_size=num_types)
    self.assertTrue(
        test_network._position_embedding_layer._use_dynamic_slicing)
    # Create the inputs (note that the first dimension is implicit).
    word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
    mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
    type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
    data, pooled = test_network([word_ids, mask, type_ids])

    # Create a model based off of this network:
    model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])

    # Invoke the model. We can't validate the output data here (the model is too
    # complex) but this will catch structural runtime errors.
    batch_size = 3
    word_id_data = np.random.randint(
        vocab_size, size=(batch_size, sequence_length))
    mask_data = np.random.randint(2, size=(batch_size, sequence_length))
    type_id_data = np.random.randint(
        num_types, size=(batch_size, sequence_length))
    _ = model.predict([word_id_data, mask_data, type_id_data])

    # Creates a TransformerEncoder with max_sequence_length != sequence_length
    max_sequence_length = 128
    test_network = albert_transformer_encoder.AlbertTransformerEncoder(
        vocab_size=vocab_size,
        embedding_width=8,
        hidden_size=hidden_size,
<<<<<<< HEAD
        sequence_length=sequence_length,
=======
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
        max_sequence_length=max_sequence_length,
        num_attention_heads=2,
        num_layers=3,
        type_vocab_size=num_types)
    self.assertTrue(test_network._position_embedding_layer._use_dynamic_slicing)
    model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
    _ = model.predict([word_id_data, mask_data, type_id_data])
コード例 #2
0
    def test_serialize_deserialize(self):
        tf.keras.mixed_precision.experimental.set_policy("mixed_float16")
        # Create a network object that sets all of its config options.
        kwargs = dict(vocab_size=100,
                      embedding_width=8,
                      hidden_size=32,
                      num_layers=3,
                      num_attention_heads=2,
                      max_sequence_length=21,
                      type_vocab_size=12,
                      intermediate_size=1223,
                      activation="relu",
                      dropout_rate=0.05,
                      attention_dropout_rate=0.22,
                      initializer="glorot_uniform")
        network = albert_transformer_encoder.AlbertTransformerEncoder(**kwargs)

        expected_config = dict(kwargs)
        expected_config["activation"] = tf.keras.activations.serialize(
            tf.keras.activations.get(expected_config["activation"]))
        expected_config["initializer"] = tf.keras.initializers.serialize(
            tf.keras.initializers.get(expected_config["initializer"]))
        self.assertEqual(network.get_config(), expected_config)

        # Create another network object from the first object's config.
        new_network = (
            albert_transformer_encoder.AlbertTransformerEncoder.from_config(
                network.get_config()))

        # Validate that the config can be forced to JSON.
        _ = new_network.to_json()

        # If the serialization was successful, the new config should match the old.
        self.assertAllEqual(network.get_config(), new_network.get_config())
コード例 #3
0
  def test_network_creation(self, expected_dtype):
    hidden_size = 32
    sequence_length = 21

    kwargs = dict(
        vocab_size=100,
        hidden_size=hidden_size,
<<<<<<< HEAD
        sequence_length=sequence_length,
=======
>>>>>>> a811a3b7e640722318ad868c99feddf3f3063e36
        num_attention_heads=2,
        num_layers=3)
    if expected_dtype == tf.float16:
      tf.keras.mixed_precision.experimental.set_policy("mixed_float16")

    # Create a small TransformerEncoder for testing.
    test_network = albert_transformer_encoder.AlbertTransformerEncoder(**kwargs)

    # Create the inputs (note that the first dimension is implicit).
    word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
    mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
    type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
    data, pooled = test_network([word_ids, mask, type_ids])

    expected_data_shape = [None, sequence_length, hidden_size]
    expected_pooled_shape = [None, hidden_size]
    self.assertAllEqual(expected_data_shape, data.shape.as_list())
    self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())

    # If float_dtype is set to float16, the data output is float32 (from a layer
    # norm) and pool output should be float16.
    self.assertEqual(tf.float32, data.dtype)
    self.assertEqual(expected_dtype, pooled.dtype)

    # ALBERT has additonal 'embedding_hidden_mapping_in' weights and
    # it shares transformer weights.
    self.assertNotEmpty(
        [x for x in test_network.weights if "embedding_projection/" in x.name])
    self.assertNotEmpty(
        [x for x in test_network.weights if "transformer/" in x.name])
    self.assertEmpty(
        [x for x in test_network.weights if "transformer/layer" in x.name])
コード例 #4
0
    def test_network_creation(self, expected_dtype, float_dtype=None):
        hidden_size = 32
        sequence_length = 21

        kwargs = dict(vocab_size=100,
                      hidden_size=hidden_size,
                      sequence_length=sequence_length,
                      num_attention_heads=2,
                      num_layers=3)
        if float_dtype is not None:
            kwargs["float_dtype"] = float_dtype

        # Create a small TransformerEncoder for testing.
        test_network = albert_transformer_encoder.AlbertTransformerEncoder(
            **kwargs)

        # Create the inputs (note that the first dimension is implicit).
        word_ids = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32)
        mask = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32)
        type_ids = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32)
        data, pooled = test_network([word_ids, mask, type_ids])

        expected_data_shape = [None, sequence_length, hidden_size]
        expected_pooled_shape = [None, hidden_size]
        self.assertAllEqual(expected_data_shape, data.shape.as_list())
        self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())

        self.assertEqual(expected_dtype, data.dtype)
        self.assertEqual(expected_dtype, pooled.dtype)

        # ALBERT has additonal 'embedding_hidden_mapping_in' weights and
        # it shares transformer weights.
        self.assertNotEmpty([
            x for x in test_network.weights
            if "embedding_projection/" in x.name
        ])
        self.assertNotEmpty(
            [x for x in test_network.weights if "transformer/" in x.name])
        self.assertEmpty(
            [x for x in test_network.weights if "transformer/layer" in x.name])
コード例 #5
0
    def test_network_invocation(self):
        hidden_size = 32
        sequence_length = 21
        vocab_size = 57
        num_types = 7
        # Create a small TransformerEncoder for testing.
        test_network = albert_transformer_encoder.AlbertTransformerEncoder(
            vocab_size=vocab_size,
            embedding_width=8,
            hidden_size=hidden_size,
            num_attention_heads=2,
            num_layers=3,
            type_vocab_size=num_types)
        # Create the inputs (note that the first dimension is implicit).
        word_ids = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32)
        mask = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32)
        type_ids = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32)
        data, pooled = test_network([word_ids, mask, type_ids])

        # Create a model based off of this network:
        model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])

        # Invoke the model. We can't validate the output data here (the model is too
        # complex) but this will catch structural runtime errors.
        batch_size = 3
        word_id_data = np.random.randint(vocab_size,
                                         size=(batch_size, sequence_length))
        mask_data = np.random.randint(2, size=(batch_size, sequence_length))
        type_id_data = np.random.randint(num_types,
                                         size=(batch_size, sequence_length))
        list_outputs = model.predict([word_id_data, mask_data, type_id_data])

        # Creates a TransformerEncoder with max_sequence_length != sequence_length
        max_sequence_length = 128
        test_network = albert_transformer_encoder.AlbertTransformerEncoder(
            vocab_size=vocab_size,
            embedding_width=8,
            hidden_size=hidden_size,
            max_sequence_length=max_sequence_length,
            num_attention_heads=2,
            num_layers=3,
            type_vocab_size=num_types)
        model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
        _ = model.predict([word_id_data, mask_data, type_id_data])

        # Tests dictionary outputs.
        test_network_dict = albert_transformer_encoder.AlbertTransformerEncoder(
            vocab_size=vocab_size,
            embedding_width=8,
            hidden_size=hidden_size,
            max_sequence_length=max_sequence_length,
            num_attention_heads=2,
            num_layers=3,
            type_vocab_size=num_types,
            dict_outputs=True)
        _ = test_network_dict([word_ids, mask, type_ids])
        test_network_dict.set_weights(test_network.get_weights())
        list_outputs = test_network([word_id_data, mask_data, type_id_data])
        dict_outputs = test_network_dict(
            dict(input_word_ids=word_id_data,
                 input_mask=mask_data,
                 input_type_ids=type_id_data))
        self.assertAllEqual(list_outputs[0], dict_outputs["sequence_output"])
        self.assertAllEqual(list_outputs[1], dict_outputs["pooled_output"])