Esempio n. 1
0
from Utils import *
import time
from tqdm import tqdm
from conlleval import evaluate

tf.config.set_soft_device_placement(True)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

recordFileName = 'test_record01'
create_record_dirs(recordFileName)
epochNum = get_epochNum(recordFileName)  # 获取当前记录的epoch数

# 配置模型参数、检查点
configers = conf()
myModel = Model_NER(configers)
ckpt_dir_inner = os.path.join(recordFileName, 'checkpoints')
ckpt_dir_theta_0 = os.path.join(recordFileName, 'theta_0')
ckpt_path_theta_0 = os.path.join(ckpt_dir_theta_0, 'ckpt_theta_0')
ckpt_dir_theta_t = os.path.join(recordFileName, 'theta_t')
ckpt_path_theta_t = os.path.join(ckpt_dir_theta_t, 'ckpt_theta_t')
checkpoint = tf.train.Checkpoint(optimizer=myModel.optimizer, model=myModel)
ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=ckpt_dir_inner, max_to_keep=5)

if epochNum == 0:
    myModel.save_weights(ckpt_path_theta_0)
else:
    myModel.load_weights(ckpt_path_theta_t)

# 配置tensorboard
Esempio n. 2
0
train_batches = get_batches_v3(train_data_path_list,200, batch_num=train_batch_num, taskname=task_samples)
# 获取验证集数据: 验证集分两部分,验证_训练集 和 验证_测试集
vali_train_data_paths = []
vali_test_data_path = ['data/CLUE_BIOES_dev']
vali_test_batches = get_batches_v3(train_data_path_list=vali_test_data_path, batch_size=200, batch_num=1,
                                    taskname=task_samples)
print('batches is ready!\n'
      '-----------------------------------------------------------\n')

task = 'CLUE_ALL'
recordFileName = 'record_bilstm_' + str(train_batch_num*3) + 'b-' + task_names
create_record_dirs(recordFileName)
epochNum = get_epochNum(recordFileName)  # 获取当前记录的epoch数

# 配置模型参数、检查点
configers = conf(choose_mod='BiLSTM')
myModel_instance = Model_NER(configers)
ckpt_dir = os.path.join(recordFileName, 'checkpoints')
checkpoint = tf.train.Checkpoint(optimizer=myModel_instance.optimizer, model=myModel_instance)
ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=ckpt_dir, max_to_keep=5)

if epochNum != 0:
    # 加载checkpoints
    latest_ckpt = tf.train.latest_checkpoint(ckpt_dir)
    ckp = tf.train.Checkpoint(model=myModel_instance)
    ckp.restore(latest_ckpt)

# 配置tensorboard
# log_dir_train = recordFileName + '/tensorboard/' + datetime.datetime.now().strftime("%Y%m%d-%H%M") + '-train'
# log_dir_train = recordFileName + '/tensorboard/' + '-train'
log_dir_train = recordFileName + '/tensorboard/' + '-train'
tf.config.set_soft_device_placement(True)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

mod = 'BiLSTM'
inner_iters = 8
e = 0.1
e_final = 0.001
batch_size = 200
recordFileName = '_'.join(['record_3L_reptile+' + mod, str(inner_iters) + 'i', str(e)+'-' + str(e_final) + 'e',str(batch_size)+'bs'])
create_record_dirs(recordFileName)
epochNum = get_epochNum(recordFileName)  # 获取当前记录的epoch数

# 配置模型参数、检查点
configers = conf(choose_mod=mod)
myModel = Model_NER(configers)
ckpt_dir_inner = os.path.join(recordFileName, 'checkpoints')
ckpt_dir_theta_0 = os.path.join(recordFileName, 'theta_0')
ckpt_path_theta_0 = os.path.join(ckpt_dir_theta_0, 'ckpt_theta_0')
# ckpt_dir_theta_t = os.path.join(recordFileName, 'theta_t')
# ckpt_path_theta_t = os.path.join(ckpt_dir_theta_t, 'ckpt_theta_t')
checkpoint = tf.train.Checkpoint(optimizer=myModel.optimizer, model=myModel)
ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=ckpt_dir_inner, max_to_keep=5)

if epochNum == 0:
    myModel.save_weights(ckpt_path_theta_0)
else:
    myModel.load_weights(ckpt_path_theta_0)

# 配置tensorboard
from Utils import *
import time
from tqdm import tqdm
from conlleval import evaluate

tf.config.set_soft_device_placement(True)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(gpu, True)

recordFileName = 'record_reptile+idcnn_20i_0.1e'
create_record_dirs(recordFileName)
epochNum = get_epochNum(recordFileName)  # 获取当前记录的epoch数

# 配置模型参数、检查点
configers = conf(choose_mod='IDCNN')
myModel = Model_NER(configers)
ckpt_dir_inner = os.path.join(recordFileName, 'checkpoints')
ckpt_dir_theta_0 = os.path.join(recordFileName, 'theta_0')
ckpt_path_theta_0 = os.path.join(ckpt_dir_theta_0, 'ckpt_theta_0')
ckpt_dir_theta_t = os.path.join(recordFileName, 'theta_t')
ckpt_path_theta_t = os.path.join(ckpt_dir_theta_t, 'ckpt_theta_t')
checkpoint = tf.train.Checkpoint(optimizer=myModel.optimizer, model=myModel)
ckpt_manager = tf.train.CheckpointManager(checkpoint, directory=ckpt_dir_inner, max_to_keep=5)

if epochNum == 0:
    myModel.save_weights(ckpt_path_theta_0)
else:
    myModel.load_weights(ckpt_path_theta_t)

# 配置tensorboard