Пример #1
0
                loss.backward()
                nn.utils.clip_grad_norm_(model.parameters(), 0.25)  #5
                optimizer.step()

                # track
                tracker['NLL'].append(loss.item())

                # print statistics
                if itr % print_every == 0 or itr + 1 == len(dataloader):
                    print("%s Batch %04d/%04d, NLL-Loss %.4f, " %
                          (split.upper(), itr, len(dataloader),
                           tracker['NLL'][-1]))

        samples = len(datasets[split])
        print("%s Epoch %02d/%02d, NLL %.4f, PPL %.4f" %
              (split.upper(), ep, epoch, totals['NLL'] / samples,
               math.exp(totals['NLL'] / totals['words'])))

    # save checkpoint
    checkpoint_path = os.path.join(save_path, "E%02d.pkl" % ep)
    torch.save(model.state_dict(), checkpoint_path)
    print("Model saved at %s\n" % checkpoint_path)
end_time = time.time()
print('Total cost time',
      time.strftime("%H hr %M min %S sec", time.gmtime(end_time - start_time)))

print('# of parameters:', sum(param.numel() for param in model.parameters()))

# save learning results
sio.savemat("results.mat", tracker)
Пример #2
0
    # Create the directory to save models to, if non-existent
    if not (path.exists('./models')):
        os.mkdir('./models', 0o755)

    # Saving model to a new file
    while (fileSpaceFound == False):
        modelNumber += 1
        modelPath = './models/model' + str(modelNumber) + '.pth'
        if not (path.exists(modelPath)):
            print('model parameters will be saved to: ', modelPath)
            fileSpaceFound = True

    # Initializing model context dict to be saved with model
    modelContext = {
        'epoch': 0,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'model_parameters': modelParameters,
        'optimizer_parameters': optimizerParameters,
        'LSCoefficients': {}
    }
    # Training the Least Squares so we can evaluate its performance on the same dataset
    LSCoefficients = LSTraining(LSTrainData)
    # Saving the LS Coefficients so we do not need to train it again
    modelContext['LSCoefficients'] = LSCoefficients
    # Letting the model know when the last epoch happens so we can record the MSEs of the individual samples
    for ep in range(1, epochs + 1):
        train(ep)
        tloss = evaluate()

        # Run through all epochs, find the best model and save it for testing
Пример #3
0
    for i in range(0, train_x.size(0), batch_size):
        if i + batch_size > train_x.size(0):
            x, y = train_x[i:], train_y[i:]
        else:
            x, y = train_x[i:(i + batch_size)], train_y[i:(i + batch_size)]
        optimizer.zero_grad()
        output = model(x)
        output = torch.squeeze(output)
        loss = F.mse_loss(output, y)
        loss.backward()
        #     if args.clip > 0:
        #         torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
        optimizer.step()
        batch_idx += 1
        total_loss += loss.item()

        if batch_idx % 100 == 0:
            cur_loss = total_loss / 100
            processed = min(i + batch_size, train_x.size(0))
            print(
                'Train Epoch: {:2d} [{:6d}/{:6d} ({:.0f}%)]\tLearning rate: {:.4f}\tLoss: {:.6f}'
                .format(epoch, processed, train_x.size(0),
                        100. * processed / train_x.size(0), lr, cur_loss))
            total_loss = 0


for ep in range(1, epochs + 1):
    train(ep)
    torch.save(model.state_dict(), model_para_path)
    evaluate()