Ejemplo n.º 1
0
# Model hyperparameters
src_vocab_size = len(german.vocab)
trg_vocab_size = len(english.vocab)
embedding_size = 512
src_pad_idx = english.vocab.stoi["<sos>"]
print(src_pad_idx)
print(english.vocab.itos[src_pad_idx])

model = Transformer(device, embedding_size, src_vocab_size, trg_vocab_size,
                    src_pad_idx).to(device)

load_model = True
save_model = True
learning_rate = 3e-4

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

if load_model:
    load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer)

# sentence = "ein pferd geht unter einer brücke neben einem boot."
#
# translated_sentence = translate_sentence(
#     model, sentence, german, english, device, max_length=50
# )
sentence1 = [
    'ein', 'pferd', 'geht', 'unter', 'einer', 'brücke', 'neben', 'einem',
    'boot', '.'
]
translated_sentence = translate_sentence(model,
                                         sentence1,
Ejemplo n.º 2
0
    print("[", end="")
    for index in v:
      print(xv[index] + ", ", end="")

    print("]")
  """


# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = "cpu"
embedding_size = 6
src_pad_idx = 2

ptrNet = Transformer(device, embedding_size,
                     src_pad_idx=src_pad_idx).to(device)

# ptrNet = PointerNetwork(config.HIDDEN_SIZE)
optimizer = optim.Adam(ptrNet.parameters(), lr=0.01)

program_starts = time.time()
for epoch in range(EPOCHS):

    evaluateWordSort(ptrNet, epoch + 1)

    train(ptrNet, optimizer, epoch + 1)
    evaluateWordSort(ptrNet, epoch + 1)

now = time.time()
print("It has been {0} seconds since the loop started".format(now -
                                                              program_starts))