def training(net, loader, optimizer): data_loader, x_features, y_features, x_mean, x_var, y_mean, y_var = train_data() for epoch in range(epochs): loss = 0 loss_denormalized = 0 for i, data in enumerate(data_loader): step = epoch * len(data_loader) + i inputs, labels = data prediction = net(inputs) original_prediction = prediction * y_var + y_mean original_label = labels * y_var + y_mean # if epoch == epochs - 1: # print(original_prediction, original_label) visual_loss = criterion(original_label, original_prediction) # writer.add_scalar('Train/doesitwork', visual_loss, step) loss_denormalized += visual_loss l = criterion(prediction, labels) loss += l * labels.size(0) # backpropagation optimizer.zero_grad() l.backward() optimizer.step() # writer.add_scalar('Train/weights', net.predict.weight[-1], epoch) # writer.add_scalar('/Step/Loss', l.item(), step) # net.log_weights(step) # writer.add_scalar('Epoch/TrainFc/deno', loss_denormalized, epoch) # writer.add_scalar('Epoch/TrainFc/Loss', loss, epoch) print('Epoch: %d | Loss: %.4f ' % (epoch + 1, loss)) return "Finished Training"
loss += l * labels.size(0) # backpropagation optimizer.zero_grad() l.backward() optimizer.step() # writer.add_scalar('Train/weights', net.predict.weight[-1], epoch) # writer.add_scalar('/Step/Loss', l.item(), step) # net.log_weights(step) # writer.add_scalar('Epoch/TrainFc/deno', loss_denormalized, epoch) # writer.add_scalar('Epoch/TrainFc/Loss', loss, epoch) print('Epoch: %d | Loss: %.4f ' % (epoch + 1, loss)) return "Finished Training" train_loader, x_features, y_features, x_mean, x_var, y_mean, y_var = train_data() model = FC(x_features, y_features) optimizer = optim.Adam(model.parameters(), lr=learning_rate) # if weights were not saves before run training and save parameters try: model.load_state_dict(torch.load('modelWeights/paramsFC.ckpt')) except FileNotFoundError: training(model, train_loader, optimizer) torch.save(model.state_dict(), 'modelWeights/paramsFC.ckpt') # training(model, train_loader, optimizer)