Exemplo n.º 1
0
 def test_dataloader(self):
     return DataLoader(StringMatchingDataset(self.test_dataset),
                       shuffle=True,
                       num_workers=NUM_WORKERS,
                       batch_size=BATCH_SIZE,
                       collate_fn=PadSequence())
Exemplo n.º 2
0
                 for sentence in words]
test_pos_ids = [[pos_vocab.get(pos) for pos in sentence] for sentence in pos]

embedding = torch.from_numpy(embedding).float()
model = LSTMTagger(embedding, embed_dim, 100, 2, len(pos_vocab)).to(device)
optimizer = optim.Adam(
    [param for param in model.parameters() if param.requires_grad == True],
    lr=0.001)
criterion = nn.NLLLoss()
for epoch in range(num_epochs):
    batch = Batch(word_ids, pos_ids, batch_size=batch_size)
    total_step = len(batch)
    i = 0
    for inputs, labels in batch:
        i += 1
        pad_words_obj = PadSequence(inputs, [len(inputs), 100])
        padded_inputs = torch.Tensor(pad_words_obj.embedding).long().to(device)
        padded_inputs_lens = torch.Tensor(
            pad_words_obj.lengths).long().to(device)

        outputs = model(padded_inputs, padded_inputs_lens)

        pad_pos_obj = PadSequence(
            labels,
            [len(inputs), outputs.size(2)])  # batch, num_classes, seq_len
        padded_labels = torch.Tensor(pad_pos_obj.embedding).long().to(device)

        loss = criterion(outputs, padded_labels)

        optimizer.zero_grad()
        loss.backward()