Пример #1
0
    def predict(self, inputs, is_training=False):
        loss, predict = self(inputs, is_training)
        return predict


model = BERT_NER(param)

model.build(input_shape=(4, param.batch_size, param.maxlen))

model.summary()

# 构建优化器

optimizer_bert = optim.AdamWarmup(
    learning_rate=2e-5,  # 重要参数
    decay_steps=10000,  # 重要参数
    warmup_steps=1000,
)
optimizer_crf = optim.AdamWarmup(
    learning_rate=1e-3,
    decay_steps=10000,  # 重要参数
    warmup_steps=1000,
)
#
# 初始化参数
bert_init_weights_from_checkpoint(model,
                                  model_path,
                                  param.num_hidden_layers,
                                  pooler=False)

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
Пример #2
0
model = TextCNN.TextCNN(
    maxlen,
    vocab_size,
    embedding_dims,
    class_num,
    # init_weights,
    weights_trainable=True)

# model = TextCNN.TextCNN(maxlen, vocab_size, embedding_dims, class_num)

# 构建优化器
# lr = tf.keras.optimizers.schedules.PolynomialDecay(0.01, decay_steps=18000,
#                                                    end_learning_rate=0.0001,
#                                                    cycle=False)

optimizer = optim.AdamWarmup(learning_rate=0.01, decay_steps=15000)

# 构建损失函数
mask_sparse_categotical_loss = Losess.MaskSparseCategoricalCrossentropy(
    from_logits=False)

f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(checkpoint,
                                     directory="./save",
                                     checkpoint_name="model.ckpt",
Пример #3
0
    def call(self, inputs, training=True):
        logits = self.bilstm(inputs, training)  # [B,T,2H]
        logits, _ = self.att(logits)
        logits = self.dense(logits)
        return logits

    def predict(self,inputs,training=False):
        out = self(inputs,training)
        return out



model = BilstmAttention(maxlen, vocab_size, embedding_dims, hidden_dim)

# 构建优化器
optimizer = optim.AdamWarmup(0.01, decay_steps=15000)
# 构建损失函数
mask_sparse_categotical_loss = Losess.MaskSparseCategoricalCrossentropy(from_logits=False)

f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()

# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(checkpoint, directory="./save",
                                     checkpoint_name="model.ckpt",
                                     max_to_keep=3)
Batch = 0
for X, token_type_id, input_mask, Y in load.load_train():
Пример #4
0
    def predict(self, inputs, is_training=False):
        output = self(inputs, is_training=is_training)
        return output


model = ALBERT_NER(param)

model.build(input_shape=(3, param.batch_size, param.maxlen))

model.summary()

# batch_size增加k学习率也要增加k倍
# optimizer_bert = optim.AdamWarmup(learning_rate=3e-6,  # 重要参数
#                                   decay_steps= 0,  # 重要参数
#                                   warmup_steps= 0)  # 0.1
optimizer_bert = optim.AdamWarmup(learning_rate=1e-6, decay_steps=40000)  # 0.1

sparse_categotical_loss = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=False)

# 初始化参数
albert_init_weights_from_checkpoint(model,
                                    model_path,
                                    param.num_hidden_layers,
                                    pooler=True)

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen,
                  vocab_file,
                  modes=["train"],
                  check_exist=False,
Пример #5
0
def main(_):
    model = BERT_NER(param)

    model.build(input_shape=(4, param.batch_size, param.maxlen))

    model.summary()

# 构建优化器

    optimizer_bert = optim.AdamWarmup(learning_rate=2e-5,  # 重要参数
                                      decay_steps=10000,  # 重要参数
                                      warmup_steps=1000, )
    optimizer_crf = optim.AdamWarmup(learning_rate=1e-3,
                                     decay_steps=10000,  # 重要参数
                                     warmup_steps=1000,
                                     )
    #
    # 初始化参数
    bert_init_weights_from_checkpoint(model,
                                      model_path,
                                      param.num_hidden_layers,
                                      pooler=False)

    # 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
    writer = TFWriter(param.maxlen, vocab_file, data_dir=FLAGS.data_dir,
                      modes=["train"], check_exist=False)

    ner_load = TFLoader(param.maxlen, param.batch_size, data_dir=FLAGS.data_dir, epoch=5)

    # 训练模型
    # 使用tensorboard
    summary_writer = tf.summary.create_file_writer("./tensorboard")

    # Metrics
    f1score = Metric.SparseF1Score(average="macro", predict_sparse=True)
    precsionscore = Metric.SparsePrecisionScore(average="macro", predict_sparse=True)
    recallscore = Metric.SparseRecallScore(average="macro", predict_sparse=True)
    accuarcyscore = Metric.SparseAccuracy(predict_sparse=True)

    # 保存模型
    checkpoint = tf.train.Checkpoint(model=model)
    manager = tf.train.CheckpointManager(checkpoint, directory="./save",
                                         checkpoint_name="model.ckpt",
                                         max_to_keep=3)
    # For train model
    Batch = 0
    for X, token_type_id, input_mask, Y in ner_load.load_train():
        with tf.GradientTape(persistent=True) as tape:
            loss, predict = model([X, token_type_id, input_mask, Y])

            f1 = f1score(Y, predict)
            precision = precsionscore(Y, predict)
            recall = recallscore(Y, predict)
            accuracy = accuarcyscore(Y, predict)
            if Batch % 101 == 0:
                print("Batch:{}\tloss:{:.4f}".format(Batch, loss.numpy()))
                print("Batch:{}\tacc:{:.4f}".format(Batch, accuracy))
                print("Batch:{}\tprecision{:.4f}".format(Batch, precision))
                print("Batch:{}\trecall:{:.4f}".format(Batch, recall))
                print("Batch:{}\tf1score:{:.4f}".format(Batch, f1))

                print("Sentence", writer.convert_id_to_vocab(tf.reshape(X, [-1]).numpy()))
                print("predict", writer.convert_id_to_label(tf.reshape(predict, [-1]).numpy()))
                print("label", writer.convert_id_to_label(tf.reshape(Y, [-1]).numpy()))
                manager.save(checkpoint_number=Batch)

            with summary_writer.as_default():
                tf.summary.scalar("loss", loss, step=Batch)
                tf.summary.scalar("acc", accuracy, step=Batch)
                tf.summary.scalar("f1", f1, step=Batch)
                tf.summary.scalar("precision", precision, step=Batch)
                tf.summary.scalar("recall", recall, step=Batch)

        grads_bert = tape.gradient(loss, model.bert.variables + model.dense.variables)
        grads_crf = tape.gradient(loss, model.crf.variables)
        optimizer_bert.apply_gradients(grads_and_vars=zip(grads_bert, model.bert.variables + model.dense.variables))
        optimizer_crf.apply_gradients(grads_and_vars=zip(grads_crf, model.crf.variables))
        Batch += 1
    
    model.save("model_save")
Пример #6
0
drop_rate = 0.5
epoch = 200
early_stopping = 100
penalty = 0.01

loader = graphloader.GCNLoader(dataset="citeseer",
                               loop=True,
                               features_norm=True)

features, adj, y_train, y_val, y_test, train_mask, val_mask, test_mask = loader.load(
)

model = GIN.GINLayer(hidden_dim, num_class, drop_rate)

# optimizer = tf.keras.optimizers.Adam(0.01)
optimizer = optim.AdamWarmup(0.01, 500, weight_decay_rate=0)
crossentropy = Losess.MaskCategoricalCrossentropy()
accscore = Metric.MaskAccuracy()
stop_monitor = EarlyStopping(monitor="loss", patience=early_stopping)

# ---------------------------------------------------------
# For train
for p in range(epoch):
    t = time.time()
    with tf.GradientTape() as tape:
        predict = model(features, adj, training=True)
        loss = crossentropy(y_train, predict, train_mask)
        loss += penalty * tf.nn.l2_loss(model.variables[0])

    grads = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(
Пример #7
0
    def predict(self, inputs, is_training=False):
        output = self(inputs, is_training=is_training)
        return output


model = ALBERT_NER(param)

model.build(input_shape=(3, param.batch_size, param.maxlen))

model.summary()

# batch_size增加k学习率也要增加k倍
# optimizer_bert = optim.AdamWarmup(learning_rate=2e-5,  # 重要参数
#                                   decay_steps=10000,  # 重要参数
#                                   warmup_steps=1000)  # 0.1
optimizer_bert = optim.AdamWarmup(learning_rate=1e-5)  # 0.1

sparse_categotical_loss = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=False)

# 初始化参数
albert_init_weights_from_checkpoint(model,
                                    model_path,
                                    param.num_hidden_layers,
                                    pooler=False)

# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen,
                  vocab_file,
                  modes=["train"],
                  check_exist=False,