def run_tests():
    tests.test_rnn(RNN, train_on_gpu)
    tests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu)
Exemplo n.º 2
0
    hidden = tuple([each.data for each in hidden])
    # clear accumulated gradients
    rnn.zero_grad()
    # obtain the rnn's output
    output, hidden = rnn(inp, hidden)
    # loss calculation and backprop
    loss = criterion(output, target)
    loss.backward()
    # clipping + optimization
    nn.utils.clip_grad_norm_(rnn.parameters(), 5)
    optimizer.step()
    # mu_loss = average loss over the batch
    # return the loss over a batch and the hidden state produced by our model
    return loss.item(), hidden

tests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu)

#################################################
## Neural Network Training
#################################################

def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):
    batch_losses, epoch_losses, train_losses = [], [], []
    
    rnn.train()

    print("Training for %d epoch(s)..." % n_epochs)
    for epoch_i in range(1, n_epochs + 1):
        
        # initialize hidden state
        hidden = rnn.init_hidden(batch_size)
 def test_forward_and_backpropagation(self):
     problem_unittests.test_forward_back_prop(
         RNN=lost_episode.RNN,
         forward_back_prop=lost_episode.forward_and_backpropagation,
         train_on_gpu=self.train_on_gpu)