示例#1
0
    logger.addHandler(fh)

    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    logger.addHandler(sh)

    return logger


torch.cuda.set_device(1)

dataset_pretrain = Data.HeadlineforPretraining()
dataset_pretrain.build()
batch_num = dataset_pretrain.get_batch_num(Config.args.pretrain_batch_size)

model = Model.BertPolarityPretrain()
model = model.to(Config.args.device)

optimizer = AdamW(model.parameters(),
                  lr=Config.args.pretrain_rate,
                  weight_decay=0.01)
training_steps = Config.args.pretrain_epoch_num * batch_num
warmup_steps = int(training_steps * Config.args.pretrain_warm_up)
scheduler = get_linear_schedule_with_warmup(optimizer,
                                            num_warmup_steps=warmup_steps,
                                            num_training_steps=training_steps)

logger = get_logger(Config.args.pretrain_log_path)
for epoch in range(Config.args.pretrain_epoch_num):
    batch_generator = Data.generate_batches(
        dataset=dataset_pretrain, batch_size=Config.args.pretrain_batch_size)