Ejemplo n.º 1
0
def main(args):
    init_logger()
    tokenizer = BertTokenizer.from_pretrained(
        args.model_name_or_path)  # 选择模型BertTokenizer执行from_pretrained

    train_dataset = load_and_cache_examples(args, tokenizer, mode="train")
    dev_dataset = load_and_cache_examples(args, tokenizer, mode="dev")

    trainer = Trainer(args, train_dataset, dev_dataset)

    if args.do_train:
        trainer.train()

    if args.do_eval:
        trainer.load_model()
        trainer.evaluate()
Ejemplo n.º 2
0
                  batch_size=config.batch_size,
                  lr=config.lr,
                  num_layers=config.num_layers)

best_valid_loss = float('inf')

target_pad_idx = trainer.trg.vocab.stoi[trainer.trg.pad_token]

criterion = nn.CrossEntropyLoss(ignore_index=target_pad_idx)
if config.mode == 'train':
    for epoch in range(trainer.number_of_epochs):

        start_time = time.time()

        train_loss = trainer.train(criterion)
        valid_loss = trainer.evaluate(trainer.valid_iterator, criterion)

        end_time = time.time()

        epoch_mins, epoch_secs = trainer.epoch_time(start_time, end_time)

        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            torch.save(trainer.model.state_dict(), 'convseq.pt')

        print('Epoch: {} | Time: {}m {}s'.format(epoch, epoch_mins,
                                                 epoch_secs))
        print('Train Loss: {:7.3f} | Train PPL: {:7.3f}'.format(
            train_loss, math.exp(train_loss)))
        print('Val. Loss: {:7.3f} |  Val. PPL: {:7.3f}'.format(
            valid_loss, math.exp(valid_loss)))
Ejemplo n.º 3
0
                                download=True)
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=128)
# 加载模型
net = models.resnet18()
net.fc = nn.Linear(512, 10)
trainer = Trainer(net)
# 定义损失函数
criterion = nn.CrossEntropyLoss()
# 定义优化器
optimizer = torch.optim.SGD(trainer.get_parameters(), lr=1e-1)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
viz = Visdom(port=17000)
viz.line([[0., 0.]], [0],
         win='train&eval loss',
         opts={
             'title': 'train&eval loss',
             'legend': ['train', 'eval']
         })
viz.line([0.], [0], win='accuracy', opts={'title': 'accuracy'})
for i in range(1000):
    print('*' * 5 + str(i) + '*' * 5)
    train_loss = trainer.train(train_loader, optimizer, criterion, device)
    eval_loss, acc = trainer.evaluate(test_loader, criterion, device)
    print('epoch: {} train_loss: {:.6f} eval_loss: {:.6f} acc: {:.2%}'.format(
        i, train_loss, eval_loss, acc))
    viz.line([[train_loss, eval_loss]], [i],
             win='train&eval loss',
             update='append')
    viz.line([acc], [i], win='accuracy', update='append')
    print('*' * 10)
Ejemplo n.º 4
0
         })
for epoch in range(args.epochs):
    # model.train()
    # losses = list()
    # for i, data in enumerate(train_loader):
    #     data = data.to(args.device)
    #     out = model(data)
    #     loss = F.nll_loss(out, data.y)
    #     print("Training loss:{}".format(loss.item()))
    #     losses.append(loss.item())
    #     loss.backward()
    #     optimizer.step()
    #     optimizer.zero_grad()
    # val_acc,val_loss = eval(model,val_loader)
    train_loss = trainer.train(train_loader, optimizer, criterion, DEVICE)
    val_loss, acc = trainer.evaluate(val_loader, criterion, DEVICE)
    viz.line([[train_loss, val_loss]], [epoch],
             win='train&eval loss',
             update='append')
    viz.line([acc], [epoch], win='accuracy', update='append')
    print("Validation loss:{}\taccuracy:{}".format(val_loss, acc))
    if val_loss < min_loss:
        torch.save(model.state_dict(), 'latest.pth')
        print("Model saved at epoch{}".format(epoch))
        min_loss = val_loss
        patience = 0
    else:
        patience += 1
    if patience > args.patience:
        break