コード例 #1
0
ファイル: bert_ner_crf_test.py プロジェクト: zyq11223/NLPGNN

model = BERT_NER(param)

model.build(input_shape=(4, param.batch_size, param.maxlen))

model.summary()

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen, vocab_file, modes=["valid"], check_exist=True)

ner_load = TFLoader(param.maxlen, param.batch_size)

# Metrics
f1score = Metric.SparseF1Score("macro", predict_sparse=True)
precsionscore = Metric.SparsePrecisionScore("macro", predict_sparse=True)
recallscore = Metric.SparseRecallScore("macro", predict_sparse=True)
accuarcyscore = Metric.SparseAccuracy(predict_sparse=True)

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint('./save'))
# For test model
# print(dir(checkpoint))
Batch = 0
f1s = []
precisions = []
recalls = []
accuracys = []
for X, token_type_id, input_mask, Y in ner_load.load_valid():
    predict = model.predict([X, token_type_id, input_mask,
コード例 #2
0
model = BERT_NER(param)

model.build(input_shape=(3, param.batch_size, param.maxlen))

model.summary()

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen, vocab_file,
                    modes=["valid"], check_exist=False)

ner_load = TFLoader(param.maxlen, param.batch_size, epoch=3)

# Metrics
f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint('./save'))
# For test model
Batch = 0
f1s = []
precisions = []
recalls = []
accuracys = []
for X, token_type_id, input_mask, Y in ner_load.load_valid():
    predict = model.predict([X, token_type_id, input_mask])  # [batch_size, max_length,label_size]
    # predict = tf.argmax(output, -1)
コード例 #3
0

model = BERT_NER(param)

model.build(input_shape=(3, param.batch_size, param.maxlen))

model.summary()

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen, vocab_file, modes=["valid"], check_exist=True)

ner_load = TFLoader(param.maxlen, param.batch_size)

# Metrics
f1score = Metric.SparseF1Score("macro")
precsionscore = Metric.SparsePrecisionScore("macro")
recallscore = Metric.SparseRecallScore("macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint('./save'))
# For test model
# print(dir(checkpoint))
Batch = 0
predicts = []
true_label = []
masks = []
for X, token_type_id, input_mask, Y in ner_load.load_valid():
    predict = model.predict([X, token_type_id, input_mask
                             ])  # [batch_size, max_length,label_size]