Esempio n. 1
0
        optimizer.zero_grad()

        ending_sim = net(data)
        loss = ce_loss(ending_sim, data['labels'])
        loss.backward()
        optimizer.step()
        running_loss_train += loss.item()

        if i % PRINT_EVERY == 0:
            print(f'Epoch: {epoch+1}, Step: {i}/{n_iteration},\
                Runningloss: {running_loss_train/PRINT_EVERY}')
            running_loss_train = 0.0
    with torch.no_grad():
        for i, val_data in enumerate(val_dataloader):
            val_ending_sim = net(val_data)

            _, predicted = torch.max(val_ending_sim, 1)
            metric_acc.update_batch(predicted, val_data['labels'])
        val_accuracy = metric_acc.get_metrics_summary()
        metric_acc.reset()
        if val_accuracy > val_accuracy_prev:
            torch.save(net.state_dict(), 'checkpoints/sentiment_finetuned.pth')
            print('checkpoint saved')
            val_accuracy_prev = val_accuracy

        print(
            f'============Epoch: {epoch+1}, ValAccuracy: {val_accuracy}================='
        )

print('end')
Esempio n. 2
0
for epoch in range(NUM_EPOCHS):
    running_loss_train = 0.0
    running_loss_val = 0.0
    for i, train_batch in enumerate(train_dataloader):
        optimizer.zero_grad()
        logits = net(train_batch)
        train_loss = ce_loss(logits, train_batch['labels'])
        # output, train_loss = utils.run_step(train_batch, net, tokenizer, ce_loss, device)

        train_loss.backward()
        optimizer.step()

        running_loss_train += train_loss.item()

        _, predicted = torch.max(logits, 1)
        metric_acc.update_batch(predicted, train_batch['labels'])

        if i % PRINT_EVERY == 0:
            train_accuracy = metric_acc.get_metrics_summary()
            metric_acc.reset()

            print(
                f'Epoch: {epoch+1}, Step: {i}/{n_iteration}, Accuracy: {train_accuracy}, \
                    Runningloss: {running_loss_train/PRINT_EVERY}')
            running_loss_train = 0

    with torch.no_grad():
        for i, val_batch in enumerate(val_dataloader):
            logits = net(val_batch)
            val_loss = ce_loss(logits, val_batch['labels'])
            # logits, val_loss = utils.run_step(val_batch, net, tokenizer, ce_loss, device)
Esempio n. 3
0
    print('running on cpu')

stories_test = utils.read_data('data/nlp2_test.csv')

embed_file_test = open("data/dictionary_commonsense_test.pickle", 'rb')
embedding_test = pickle.load(embed_file_test)
embed_file_test.close()

test_dataloader = DataLoader(CombinedData(stories_test, embedding_test,
                                          device),
                             batch_size=BATCH_SIZE,
                             shuffle=False)

metric_acc = Accuracy()

with torch.no_grad():
    metric_acc.reset()

    model = CombinedNet(device, pretrained=(False, False, False))
    model.load_state_dict(torch.load('checkpoints/combined_model.pth'))
    model.to(device)
    for i, test_batch in enumerate(test_dataloader):
        logits = model(test_batch)

        _, predicted = torch.max(logits, 1)
        metric_acc.update_batch(predicted, test_batch['labels'])

    test_accuracy = metric_acc.get_metrics_summary()
    metric_acc.reset()

    print(f'======== TestAccuracy: {test_accuracy} ======')