def instantiate_from_cfg(config: BertPretrainerConfig, encoder_network: Optional[tf.keras.Model] = None): """Instantiates a BertPretrainer from the config.""" encoder_cfg = config.encoder if encoder_network is None: encoder_network = networks.TransformerEncoder( vocab_size=encoder_cfg.vocab_size, hidden_size=encoder_cfg.hidden_size, num_layers=encoder_cfg.num_layers, num_attention_heads=encoder_cfg.num_attention_heads, intermediate_size=encoder_cfg.intermediate_size, activation=tf_utils.get_activation(encoder_cfg.hidden_activation), dropout_rate=encoder_cfg.dropout_rate, attention_dropout_rate=encoder_cfg.attention_dropout_rate, max_sequence_length=encoder_cfg.max_position_embeddings, type_vocab_size=encoder_cfg.type_vocab_size, initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range)) if config.cls_heads: classification_heads = [ layers.ClassificationHead(**cfg.as_dict()) for cfg in config.cls_heads ] else: classification_heads = [] return bert_pretrainer.BertPretrainerV2( config.num_masked_tokens, mlm_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), encoder_network=encoder_network, classification_heads=classification_heads)
def test_bert_pretrainerv2(self): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 test_network = networks.TransformerEncoder( vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length) # Create a BERT trainer with the created network. bert_trainer_model = bert_pretrainer.BertPretrainerV2( encoder_network=test_network) num_token_predictions = 20 # Create a set of 2-dimensional inputs (the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length, ), dtype=tf.int32) lm_mask = tf.keras.Input(shape=(num_token_predictions, ), dtype=tf.int32) # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = bert_trainer_model([word_ids, mask, type_ids, lm_mask]) # Validate that the outputs are of the expected shape. expected_lm_shape = [None, num_token_predictions, vocab_size] self.assertAllEqual(expected_lm_shape, outputs['lm_output'].shape.as_list())
def test_multiple_cls_outputs(self): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 hidden_size = 48 num_layers = 2 test_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=num_layers, hidden_size=hidden_size, max_sequence_length=sequence_length, dict_outputs=True) bert_trainer_model = bert_pretrainer.BertPretrainerV2( encoder_network=test_network, classification_heads=[layers.MultiClsHeads( inner_dim=5, cls_list=[('foo', 2), ('bar', 3)])]) num_token_predictions = 20 # Create a set of 2-dimensional inputs (the first dimension is implicit). inputs = dict( input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), masked_lm_positions=tf.keras.Input( shape=(num_token_predictions,), dtype=tf.int32)) # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = bert_trainer_model(inputs) self.assertEqual(outputs['foo'].shape.as_list(), [None, 2]) self.assertEqual(outputs['bar'].shape.as_list(), [None, 3])
def test_bert_pretrainerv2(self, dict_outputs, return_all_encoder_outputs, use_customized_masked_lm): """Validate that the Keras object can be created.""" # Build a transformer network to use within the BERT trainer. vocab_size = 100 sequence_length = 512 hidden_size = 48 num_layers = 2 test_network = networks.BertEncoder( vocab_size=vocab_size, num_layers=num_layers, hidden_size=hidden_size, max_sequence_length=sequence_length, return_all_encoder_outputs=return_all_encoder_outputs, dict_outputs=dict_outputs) # Create a BERT trainer with the created network. if use_customized_masked_lm: customized_masked_lm = layers.MaskedLM( embedding_table=test_network.get_embedding_table()) else: customized_masked_lm = None bert_trainer_model = bert_pretrainer.BertPretrainerV2( encoder_network=test_network, customized_masked_lm=customized_masked_lm) num_token_predictions = 20 # Create a set of 2-dimensional inputs (the first dimension is implicit). inputs = dict( input_word_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_mask=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), input_type_ids=tf.keras.Input(shape=(sequence_length,), dtype=tf.int32), masked_lm_positions=tf.keras.Input( shape=(num_token_predictions,), dtype=tf.int32)) # Invoke the trainer model on the inputs. This causes the layer to be built. outputs = bert_trainer_model(inputs) has_encoder_outputs = dict_outputs or return_all_encoder_outputs if has_encoder_outputs: self.assertSameElements( outputs.keys(), ['sequence_output', 'pooled_output', 'mlm_logits', 'encoder_outputs']) self.assertLen(outputs['encoder_outputs'], num_layers) else: self.assertSameElements( outputs.keys(), ['sequence_output', 'pooled_output', 'mlm_logits']) # Validate that the outputs are of the expected shape. expected_lm_shape = [None, num_token_predictions, vocab_size] self.assertAllEqual(expected_lm_shape, outputs['mlm_logits'].shape.as_list()) expected_sequence_output_shape = [None, sequence_length, hidden_size] self.assertAllEqual(expected_sequence_output_shape, outputs['sequence_output'].shape.as_list()) expected_pooled_output_shape = [None, hidden_size] self.assertAllEqual(expected_pooled_output_shape, outputs['pooled_output'].shape.as_list())
def instantiate_pretrainer_from_cfg( config: BertPretrainerConfig, encoder_network: Optional[tf.keras.Model] = None ) -> bert_pretrainer.BertPretrainerV2: """Instantiates a BertPretrainer from the config.""" encoder_cfg = config.encoder if encoder_network is None: encoder_network = encoders.instantiate_encoder_from_cfg(encoder_cfg) return bert_pretrainer.BertPretrainerV2( mlm_activation=tf_utils.get_activation(encoder_cfg.hidden_activation), mlm_initializer=tf.keras.initializers.TruncatedNormal( stddev=encoder_cfg.initializer_range), encoder_network=encoder_network, classification_heads=instantiate_classification_heads_from_cfgs( config.cls_heads))
def test_v2_serialize_deserialize(self): """Validate that the BERT trainer can be serialized and deserialized.""" # Build a transformer network to use within the BERT trainer. test_network = networks.BertEncoderV2(vocab_size=100, num_layers=2) # Create a BERT trainer with the created network. (Note that all the args # are different, so we can catch any serialization mismatches.) bert_trainer_model = bert_pretrainer.BertPretrainerV2( encoder_network=test_network) # Create another BERT trainer via serialization and deserialization. config = bert_trainer_model.get_config() new_bert_trainer_model = bert_pretrainer.BertPretrainerV2.from_config( config) # Validate that the config can be forced to JSON. _ = new_bert_trainer_model.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(bert_trainer_model.get_config(), new_bert_trainer_model.get_config())