Beispiel #1
0
    def create_model(self, input_ids, input_mask, segment_ids,
                     masked_lm_positions, masked_lm_ids, masked_lm_weights,
                     is_training, bert_config):
        """Create Masked Language Model"""
        # 只有一个bert复用,其他没了
        model = modeling.BertModel(config=bert_config,
                                   is_training=is_training,
                                   input_ids=input_ids,
                                   input_mask=input_mask,
                                   token_type_ids=segment_ids,
                                   use_one_hot_embeddings=False)

        masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs, probs = self.get_masked_lm_output(
            bert_config, model.get_sequence_output(),
            model.get_embedding_table(), masked_lm_positions, masked_lm_ids,
            masked_lm_weights)

        return masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs, probs
    def create_model(self, input_ids, input_mask, segment_ids,
                     masked_lm_positions, masked_lm_ids, masked_lm_weights,
                     is_training, bert_config):
        """Create Masked Language Model"""

        model = modeling.BertModel(config=bert_config,
                                   is_training=is_training,
                                   input_ids=input_ids,
                                   input_mask=input_mask,
                                   token_type_ids=segment_ids,
                                   use_one_hot_embeddings=False)
        '''
        注意一下函数的两个参数
        model.get_sequence_output() 得到原bert最后一个encoer的输出 [bs, seq,hs]
        model.get_embedding_table() 得到原bert的embedding_table
        '''
        masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs, probs = self.get_masked_lm_output(
            bert_config, model.get_sequence_output(),
            model.get_embedding_table(), masked_lm_positions, masked_lm_ids,
            masked_lm_weights)

        return masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs, probs