def train_daily(): trainloader, train_x, train_y, validation_x, validation_y, test_x, test_y, \ train_x_mean, train_x_std, train_y_mean, train_y_std = dataset_generate_daily() """ training start! """ # determine optimizer, loss_function and checkpoint path model = Net(feature_num) learning_r = 0.0002 optimizer = torch.optim.Adam(model.parameters(), lr=learning_r) loss_fun = torch.nn.MSELoss() path = 'checkpoint_0801.tar' """ if you want train your model from a pre-trained model, uncomment the following code. """ # checkpoint = torch.load(path) # model.load_state_dict(checkpoint['net']) # optimizer.load_state_dict(checkpoint['optimizer']) # validation_loss = checkpoint['best_validation_loss'] # learning_r = 6.400000000000004e-8 model.train() # val_loss_info = [validation_loss] val_loss_info = [] # preserve every epoch's train loss train_loss_info = [] # preserve every epoch's validation loss not_improve = 0 for epoch in range(1500): for i, values in enumerate(trainloader): input, lable = values output = model(input.float()) loss = loss_fun(output, lable) optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 10 == 0 and epoch > 0: train_loss = loss_fun(model(train_x),train_y).item() vali_loss = loss_fun(model(validation_x), validation_y).item() if len(val_loss_info) == 0 or vali_loss < min(val_loss_info): # save the best model not_improve = 0 state = {'net': model.state_dict(), 'optimizer': optimizer.state_dict(),'best_validation_loss': vali_loss} torch.save(state, path) print("Saved the model.") else: # if validation loss didn't decrease, reload the best model in next epoch. checkpoint = torch.load(path) model.load_state_dict(checkpoint['net']) optimizer.load_state_dict(checkpoint['optimizer']) not_improve += 1 print("not_improve : {}".format(not_improve)) if (not_improve+1) % 8 == 0: # when validation loss doesn't decrease for 7 epochs, reduce the learning rate. learning_r *= 0.2 adjust_learning_rate(optimizer, learning_r) if (not_improve+1) % 25 == 0: # early stopping print("Training End......") break print("epoch:{}, train_loss:{}, vali_loss: {}".format(epoch, train_loss, vali_loss)) train_loss_info.append(train_loss) val_loss_info.append(vali_loss) return train_loss_info, val_loss_info
def test_daily(path, test_x, test_y, train_mean, train_std): """ test start! """ # using the test dataset to test model model = Net(feature_num) checkpoint = torch.load(path) model.load_state_dict(checkpoint['net']) model.eval() predict = model(test_x.float()) real = test_y.clone() loss_fun = torch.nn.MSELoss() # two losses, one is normalized scale, another is original scale loss = loss_fun(predict, real) loss_original_scale, pre, rea = loss_cal(predict, real, train_mean, train_std) pre = np.array(pre.data) rea = np.array(rea.data) print("Test loss: " + str(loss)) print("Test loss in original scale: " + str(loss_original_scale)) # true output and predicted output plt.figure(2) plt.plot(list(rea), label="real") plt.plot(list(pre), label="pred") plt.legend(loc='best') plt.show() # true output and predicted output plt.figure(3) min_val = min(rea) max_val = max(rea) plt.scatter(rea,pre) plt.plot([min_val,max_val],[min_val,max_val],color = 'red') plt.show()