コード例 #1
0
                                         layers=config.layers,
                                         word_dropout=config.word_dropout,
                                         char_dropout=config.char_dropout,
                                         lstm_dropout=config.lstm_dropout)
if config.if_gpu:
    ner_model = ner_model.cuda()

parameters = filter(lambda p: p.requires_grad, ner_model.parameters())
optimizer, lr_scheduler = create_opt(parameters,
                                     config.opt,
                                     lr=config.lr,
                                     l2=config.l2,
                                     lr_patience=config.lr_patience)

train_sequence_label_batches = [
    pack_target(ner_model, train_label_batch, train_mask_batch)
    for train_label_batch, train_mask_batch in zip(train_label_batches,
                                                   train_mask_batches)
]

logger.info("{} batches expected for training".format(
    len(train_token_iv_batches)))
logger.info("")
best_model = None
best_per = float('-inf')
best_loss = float('inf')
train_all_batches = list(
    zip(train_token_iv_batches, train_token_ooev_batches, train_char_batches,
        train_sequence_label_batches, train_mask_batches))

train_start_time = time.time()
コード例 #2
0
config.label_size = label_dict.size()

config.if_gpu = config.if_gpu and torch.cuda.is_available()

logger.info(config)  # print training setting

ner_model = BiRecurrentConvCRF4NestedNER(config.bert_model, config.label_size,
                                         hidden_size=config.hidden_size, layers=config.layers,
                                         lstm_dropout=config.lstm_dropout)
if config.if_gpu:
    ner_model = ner_model.cuda()

parameters = filter(lambda p: p.requires_grad, ner_model.parameters())
optimizer, lr_scheduler = create_opt(parameters, config.opt, lr=config.lr, l2=config.l2, lr_patience=config.lr_patience)

train_sequence_label_batches = [pack_target(ner_model, train_label_batch, train_mask_batch)
                                for train_label_batch, train_mask_batch in zip(train_label_batches, train_mask_batches)]

logger.info("{} batches expected for training".format(len(train_input_ids_batches)))
logger.info("")
best_model = None
best_per = float('-inf')
best_loss = float('inf')
train_all_batches = list(zip(train_input_ids_batches,
                             train_input_mask_batches,
                             train_first_sub_tokens_batches,
                             train_sequence_label_batches,
                             train_mask_batches))

train_start_time = time.time()
num_batches = len(train_all_batches)