Exemplo n.º 1
0
batch_size = 3
# Shuffle dataset indices.
np.random.shuffle(dataset_indices)

train_sampler = SubsetRandomSampler(dataset_indices)
train_loader = torch.utils.data.DataLoader(dataset,
                                           batch_size=batch_size,
                                           sampler=train_sampler)

model = RNNModel(dataset.unique_characters_length,
                 dataset.unique_characters_length)
model.cuda()

# Define loss and optimizer functions.
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

# Training the network.
n_epochs = 1000
for epoch in range(1, n_epochs + 1):
    for batch_index, (x, y) in enumerate(train_loader):
        optimizer.zero_grad()

        output, hidden = model(x)  # (24, 24), (1, 1, 32)
        loss = criterion(output, y.view(-1).long())
        loss.backward()
        optimizer.step()

        if epoch % 10 == 0:
            print("Epoch: {}/{}.............".format(epoch, n_epochs), end=" ")
            print("Loss: {:.4f}".format(loss.item()))
Exemplo n.º 2
0
               nhid=hs,
               nlayers=3,
               dropout=0.25,
               dropouth=0.1,
               dropouti=0.2,
               dropoute=0.02,
               wdrop=0,
               tie_weights=False,
               device="cuda:0")
try:
    net.to(net.device)
except:
    net.to(net.device)

# optimizer = optim.Adam(net.parameters(), lr=30, weight_decay=0.0001 )
optimizer = torch.optim.SGD(net.parameters(),
                            lr=1e3,
                            momentum=0.90,
                            weight_decay=1.2e-6,
                            nesterov=False)
# get the validation and the training data
val_idx = int(len(encoded) * (1 - 0.1))
data, val_data = encoded[:val_idx], encoded[val_idx:]

# empty list for the validation losses
val_losses = list()
samples = list()
print()
print("start")
cutoffs = []
cutoffs = [round(vocab_size / 15), 3 * round(vocab_size / 15)]