Esempio n. 1
0
    def predict(self, inputs, is_training=False):
        output = self(inputs, is_training=is_training)
        return output


model = BERT_NER(param)

model.build(input_shape=(3, param.batch_size, param.maxlen))

model.summary()

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen, vocab_file,
                    modes=["valid"], check_exist=False)

ner_load = TFLoader(param.maxlen, param.batch_size, epoch=3)

# Metrics
f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint('./save'))
# For test model
Batch = 0
f1s = []
precisions = []
recalls = []
Esempio n. 2
0

model = BERT_NER(param)

model.build(input_shape=(3, param.batch_size, param.maxlen))

model.summary()

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen,
                  vocab_file,
                  modes=["valid"],
                  task='cls',
                  check_exist=True)

load = TFLoader(param.maxlen, param.batch_size, task='cls')

# Metrics
f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint('./save'))
# For train model

Batch = 0
f1s = []
precisions = []
Esempio n. 3
0
maxlen = 128
batch_size = 64
embedding_dims = 100
vocab_file = "Input/vocab.txt"
word2vec = "./corpus/word2vec.vector"
class_num = 2
vocab_size = 30522  # line in vocab.txt

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(maxlen,
                  vocab_file,
                  modes=["train"],
                  task='cls',
                  check_exist=False)

load = TFLoader(maxlen, batch_size, task='cls', epoch=3)

# init_weights = writer.get_init_weight(word2vec,
#                                       vocab_size,
#                                       embedding_dims)

model = TextCNN.TextCNN(
    maxlen,
    vocab_size,
    embedding_dims,
    class_num,
    # init_weights,
    weights_trainable=True)

# model = TextCNN.TextCNN(maxlen, vocab_size, embedding_dims, class_num)
Esempio n. 4
0
mask_sparse_categotical_loss = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=False)
# # 初始化参数
bert_init_weights_from_checkpoint(model,
                                  model_path,
                                  param.num_hidden_layers,
                                  pooler=True)

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen,
                  vocab_file,
                  modes=["train"],
                  task='cls',
                  check_exist=True)

load = TFLoader(param.maxlen, param.batch_size, task='cls', epoch=5)

# 训练模型
# 使用tensorboard
summary_writer = tf.summary.create_file_writer("./tensorboard")

# Metrics
f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(checkpoint,
                                     directory="./save",