loss = criterion(output, y.view(-1).long()) loss.backward() optimizer.step() if epoch % 10 == 0: print("Epoch: {}/{}.............".format(epoch, n_epochs), end=" ") print("Loss: {:.4f}".format(loss.item())) def predict(model, question): """ Returns the answer to the question. """ question = question.ljust(dataset.longer_question_length) question = dataset.text2int(question) question = dataset.one_hot_encode(question) question = torch.from_numpy(np.array([question])).float().cuda() output, hidden = model(question) answer = dataset.one_hot_decode(output.cpu()) answer = dataset.int2text(answer) return answer model.eval() with torch.no_grad(): prediction = predict(model, "how are yo?") print("".join(prediction))
output = net(x_train) del x_train batch_loss = criterion(output, targets.contiguous().view(-1)) del targets batch_loss.backward() loss += float(batch_loss.item()) optimizer.step() del batch_loss j += 1 loss = loss / j net.init_hidden() net.eval() val_loss = 0. j = 0 for val_x, val_y in get_batches(val_data, sequence_len, batch_size): val_x = torch.from_numpy(val_x).type(torch.LongTensor).to(net.device) val_y = torch.from_numpy(val_y.T).type( torch.LongTensor).contiguous().view(-1).to(net.device) val_output = net(val_x) #val_output = net(val_x) batch_val_loss = float( criterion(val_output, val_y.contiguous().view(-1)).item()) val_loss += batch_val_loss del batch_val_loss j += 1