Esempio n. 1
0
         optimizer = torch.optim.RMSprop(net.parameters(), lr=lr)
         # Define loss function
         loss_fn = nn.CrossEntropyLoss()
         
         # Start training
         for epoch in range(args.num_epochs):
             if ((epoch + 1)%100 == 0):
                 print('##################################')
                 print('## EPOCH %d' % (epoch + 1))
                 print('##################################')
             # Iterate batches
             for batch_sample in dataloader:
                 # Extract batch
                 batch_onehot = batch_sample['encoded_onehot'].to(device)
                 # Update network
                 batch_loss = train_batch(net, batch_onehot, loss_fn, optimizer)
                 if ((epoch + 1)%100 == 0):
                     print('\t Training loss (single batch):', batch_loss)
         
         loss_temp.append(batch_loss)
         if (np.mean(loss_temp)>4):
             flag=False
             
 # Compare with the previous results 
 loss_avg = np.mean(np.array(loss_temp))
 if (loss_avg<=loss_opt):
     loss_opt = loss_avg
     lr_opt = lr
     crop_len_opt = crop_len
     batchsize_opt = batchsize
     hidden_units_opt = hidden_units
Esempio n. 2
0
    # Start training
    for epoch in range(args.num_epochs):
        start = time.time()
        if epoch % verbose == 0:
            print('\n##################################')
            print('## EPOCH %d' % (epoch + 1))
            print('##################################')

        b_losses = []
        # Iterate batches
        for batch_sample in dataloader:
            # Extract batch
            batch_onehot = batch_sample['encoded_onehot'].to(device)
            if batch_onehot.shape[0] != validation_batch.shape[0]:
                # Update network
                batch_loss, out, y_true = network.train_batch(
                    net, batch_onehot, loss_fn, optimizer)
                b_losses.append(batch_loss)

            with torch.no_grad():
                y_validation = validation_batch[:, -1, :]
                # Remove the labels from the input tensor
                val_input = validation_batch[:, :-1, :]
                validation_pred, _ = net(val_input)
                ### Update network
                # Evaluate loss only for last output
                loss_val = loss_fn(validation_pred[:, -1, :], y_validation)
                val_loss.append(loss_val)

        train_loss.append(torch.mean(torch.tensor(b_losses)))
        print('Avarage loss: {}'.format(torch.mean(torch.tensor(b_losses))),
              end='\t')