Пример #1
0
    # TODO: Load dataset

    train_dataset = MyDataset(args.train_file)
    word2vec = train_dataset.symbol2id
    test_dataset = MyDataset(args.test_file,
                             vocab=(train_dataset.symbol2id,
                                    train_dataset.id2sybmol))

    test_loader = DataLoader(test_dataset,
                             batch_size=hyperparams['batch_size'])
    num_tokens = len(train_dataset.id2sybmol)
    print('num tokens', num_tokens)
    print('size', test_dataset.seq.size())

    model = BERT(num_tokens).to(device)

    if args.load:
        # print("Model's state_dict:")
        # for param_tensor in torch.load('./model.pt', map_location=torch.device(device)):
        #     print(param_tensor, "\t", model.state_dict()[param_tensor].size())
        model.load_state_dict(
            torch.load('./model.pt', map_location=torch.device(device)))
    if args.train:
        train(model, train_dataset, experiment, hyperparams)
    if args.save:
        torch.save(model.state_dict(), './model.pt')
    if args.test:
        test(model, test_loader, experiment, hyperparams)
    if args.analysis:
        embedding_analysis(model, experiment, train_dataset, test_dataset)
Пример #2
0
def run_training_bert(args, dataset, train_loader, val_loader, vocab_size):
    checkpoint_path = os.path.join(args.checkpoint_path, args.checkpoint)
    device = torch.device("cuda:" +
                          args.device if torch.cuda.is_available() else "cpu")

    model = BERT().to(device)

    # Initialize BCELoss function
    # criterion = nn.BCEWithLogitsLoss()
    # Setup Adam optimizers for both G and D
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-5)
    model.train()  # turn on training mode
    # Training Loop
    print("Starting Training Loop...")
    # For each epoch
    for epoch in range(args.epochs):
        # For each batch in the dataloader
        losses = []
        running_corrects = 0
        for i, batch in enumerate(train_loader):
            # format batch
            text, context, label = batch.text, batch.context, batch.label
            # print(text.tolist()[0])
            # print(label.tolist()[0])
            label = label.type(torch.LongTensor).to(device)
            text = text.type(torch.LongTensor).to(device)

            output = model(text, label)
            loss, _ = output

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            losses.append(loss.item())
        epoch_loss = sum(losses) / len(losses)
        print('Epoch: {}, Training Loss: {:.4f}'.format(epoch, epoch_loss))
        # save model
        if epoch % 1 == 0 or epoch == args.epochs - 1:
            torch.save(
                {
                    'epoch': epoch + 1,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'vocab_size': vocab_size,
                    'args': vars(args)
                }, checkpoint_path)
            if args.eval:
                model.eval()
                with torch.no_grad():
                    preds = []
                    labels = []
                    eval_losses = []
                    for i, batch in enumerate(val_loader if val_loader
                                              is not None else train_loader):
                        text, context, label = batch.text, batch.context, batch.label
                        label = label.type(torch.LongTensor).to(device)
                        text = text.type(torch.LongTensor).to(device)
                        output = model(text, label)
                        loss, output = output
                        pred = torch.argmax(output, 1).tolist()
                        preds.extend(pred)
                        labels.extend(label.tolist())
                        eval_losses.append(loss.item())
                    print("{} Precision: {}, Recall: {}, F1: {}, Loss: {}".
                          format(
                              "Train" if val_loader is None else "Valid",
                              sklearn.metrics.precision_score(
                                  np.array(labels).astype('int32'),
                                  np.array(preds)),
                              sklearn.metrics.recall_score(
                                  np.array(labels).astype('int32'),
                                  np.array(preds)),
                              sklearn.metrics.f1_score(
                                  np.array(labels).astype('int32'),
                                  np.array(preds)), np.average(eval_losses)))
Пример #3
0
    match = BERTMNLI(MATCH_DATA_DIR, bert_type=BERT_TYPE)
    mismatch = BERTMNLI(MISMATCH_DATA_DIR, bert_type=BERT_TYPE)

    checkpoint = torch.load('storage/bert-base-dnli.pt')
    model = BERT(bert_type=BERT_TYPE)
    model.load_state_dict(checkpoint['model_state_dict'])
    model.to(device)
    ###

    optimizer = Adam(model.parameters(), lr = LEARNING_RATE)
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    criterion = nn.CrossEntropyLoss()

    best_acc = 0

    for epoch in range(1, NUM_EPOCHS+1):
        train_loss = train(mnli, model, criterion, optimizer, device)
        match_acc = eval(match, model, device)
        mismatch_acc= eval(mismatch, model, device)
       # print(f'Epoch {epoch}')
        print(f'Epoch {epoch}, Train Loss: {train_loss}, Match Acc: {match_acc}, Mismatch Acc:{mismatch_acc}')
        if match_acc+mismatch_acc > best_acc:
            best_acc = match_acc+mismatch_acc
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                }, "storage/finetune-base-mnli-DNLI.pt")