コード例 #1
0
ファイル: bert_ner_test.py プロジェクト: youngjt/fennlp
 def call(self, inputs, is_training=True):
     bert = self.bert(inputs, is_training)
     sequence_output = bert.get_sequence_output()  # batch,sequence,768
     pre = self.dense(sequence_output)
     pre = tf.reshape(pre, [self.batch_size, self.maxlen, -1])
     output = tf.math.softmax(pre, axis=-1)
     return output
コード例 #2
0
 def call(self, inputs, is_training=True):
     # 数据切分
     input_ids, token_type_ids, input_mask, Y = tf.split(inputs, 4, 0)
     input_ids = tf.cast(tf.squeeze(input_ids, axis=0), tf.int64)
     token_type_ids = tf.cast(tf.squeeze(token_type_ids, axis=0), tf.int64)
     input_mask = tf.cast(tf.squeeze(input_mask, axis=0), tf.int64)
     Y = tf.cast(tf.squeeze(Y, axis=0), tf.int64)
     # 模型构建
     bert = self.bert([input_ids, token_type_ids, input_mask], is_training)
     sequence_output = bert.get_sequence_output()  # batch,sequence,768
     predict = self.dense(sequence_output)
     predict = tf.reshape(predict, [self.batch_size, self.maxlen, -1])
     # 损失计算
     log_likelihood, transition = self.crf(predict, Y, sequence_lengths=tf.reduce_sum(input_mask, 1))
     loss = tf.math.reduce_mean(-log_likelihood)
     predict, viterbi_score = self.crf.crf_decode(predict, transition,
                                                  sequence_length=tf.reduce_sum(input_mask, 1))
     return loss, predict