def create_and_check_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
     config.num_labels = self.num_labels
     model = BertForSequenceClassification(config)
     model.eval()
     loss, logits = model(input_ids, token_type_ids, input_mask, sequence_labels)
     result = {
         "loss": loss,
         "logits": logits,
     }
     self.parent.assertListEqual(
         list(result["logits"].size()),
         [self.batch_size, self.num_labels])
     self.check_loss_output(result)
Beispiel #2
0
        pred = torch.argmax(F.softmax(logits, dim=1), dim=1)
        correct = pred.eq(batch.label)
        total_correct += correct.sum().item()
        total_len += len(batch.label)
        total_loss += loss.item()
        loss.backward()
        optimizer.step()
    print(
        f"  Train Accuracy: {total_correct / total_len:.3f}, Loss: {total_loss / total_len:.4f}"
    )
    total_loss = 0
    total_len = 0
    total_correct = 0

    model.eval()
    for batch in test_iterator:
        outputs = model(batch.text, labels=batch.label)
        loss, logits = outputs

        pred = torch.argmax(F.softmax(logits, dim=1), dim=1)
        correct = pred.eq(batch.label)
        total_correct += correct.sum().item()
        total_len += len(batch.label)

    print(f"  Test Accuracy: {total_correct / total_len:.3f}")
    total_len = 0
    total_correct = 0

    itr += 1