def main(): #print the config args print(config.transfer_learning) print(config.mode) print(config.input_size) # Fix Seed for Reproducibility # random.seed(config.seed) np.random.seed(config.seed) torch.manual_seed(config.seed) if torch.cuda.is_available(): torch.cuda.manual_seed(config.seed) # Samples, Weights, and Plots Path # paths = [config.weights_path, config.plots_path, config.numpy_path] for path in paths: make_dirs(path) # Prepare Data # data = load_data(config.combined_path, config.which_data, config.preprocess, config.resample) # id = config.which_data.split('_')[0] id = 12 #BOON added print("Data of {} is successfully Loaded!".format(config.which_data)) print(type(data)) print(data.shape) # Plot Time-series Data # if config.plot: plot_full(config.plots_path, data, id, config.feature) plot_split(config.plots_path, data, id, config.valid_start, config.test_start, config.feature) # Min-Max Scaler # scaler = MinMaxScaler() data.iloc[:,:] = scaler.fit_transform(data) print(type(data)) # Split the Dataset # train_X, train_Y, val_X, val_Y, test_X, test_Y, test_shifted = \ get_time_series_data_(data, config.valid_start, config.test_start, config.feature, config.label, config.window) print(train_X.shape) print(train_Y.shape) # Get Data Loader # train_loader, val_loader, test_loader = \ get_data_loader(train_X, train_Y, val_X, val_Y, test_X, test_Y, config.batch_size) # Constants # best_val_loss = 100 best_val_improv = 0 # Lists # train_losses, val_losses = list(), list() val_maes, val_mses, val_rmses, val_mapes, val_mpes, val_r2s = list(), list(), list(), list(), list(), list() # Prepare Network # if config.network == 'dnn': model = DNN(config.window, config.hidden_size, config.output_size).to(device) elif config.network == 'cnn': model = CNN(config.window, config.hidden_size, config.output_size).to(device) elif config.network == 'rnn': model = RNN(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) elif config.network == 'lstm': model = LSTM(config.input_size, config.hidden_size, config.num_layers, config.output_size, config.bidirectional).to(device) elif config.network == 'gru': model = GRU(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) elif config.network == 'recursive': model = RecursiveLSTM(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) elif config.network == 'attentional': model = AttentionalLSTM(config.input_size, config.key, config.query, config.value, config.hidden_size, config.num_layers, config.output_size, config.bidirectional).to(device) else: raise NotImplementedError if config.mode == 'train': # If fine-tuning # print('config.TL = {}'.format(config.transfer_learning)) if config.transfer_learning: print('config.TL = {}'.format(config.transfer_learning)) print('TL: True') model.load_state_dict(torch.load(os.path.join(config.weights_path, 'BEST_{}_Device_ID_12.pkl'.format(config.network)))) for param in model.parameters(): param.requires_grad = True # Loss Function # criterion = torch.nn.MSELoss() # Optimizer # optimizer = torch.optim.Adam(model.parameters(), lr=config.lr, betas=(0.5, 0.999)) optimizer_scheduler = get_lr_scheduler(config.lr_scheduler, optimizer, config) # Train and Validation # print("Training {} started with total epoch of {} using Driver ID of {}.".format(config.network, config.num_epochs, id)) for epoch in range(config.num_epochs): # Train # for i, (data, label) in enumerate(train_loader): # Data Preparation # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred = model(data) # Calculate Loss # train_loss = criterion(pred, label) # Back Propagation and Update # optimizer.zero_grad() train_loss.backward() optimizer.step() # Add items to Lists # train_losses.append(train_loss.item()) print("Epoch [{}/{}]".format(epoch+1, config.num_epochs)) print("Train") print("Loss : {:.4f}".format(np.average(train_losses))) optimizer_scheduler.step() # Validation # with torch.no_grad(): for i, (data, label) in enumerate(val_loader): # Data Preparation # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred_val = model(data) # Calculate Loss # val_loss = criterion(pred_val, label) val_mae = mean_absolute_error(label.cpu(), pred_val.cpu()) val_mse = mean_squared_error(label.cpu(), pred_val.cpu(), squared=True) val_rmse = mean_squared_error(label.cpu(), pred_val.cpu(), squared=False) val_mpe = mean_percentage_error(label.cpu(), pred_val.cpu()) val_mape = mean_absolute_percentage_error(label.cpu(), pred_val.cpu()) val_r2 = r2_score(label.cpu(), pred_val.cpu()) # Add item to Lists # val_losses.append(val_loss.item()) val_maes.append(val_mae.item()) val_mses.append(val_mse.item()) val_rmses.append(val_rmse.item()) val_mpes.append(val_mpe.item()) val_mapes.append(val_mape.item()) val_r2s.append(val_r2.item()) # Print Statistics # print("Validation") print("Loss : {:.4f}".format(np.average(val_losses))) print(" MAE : {:.4f}".format(np.average(val_maes))) print(" MSE : {:.4f}".format(np.average(val_mses))) print("RMSE : {:.4f}".format(np.average(val_rmses))) print(" MPE : {:.4f}".format(np.average(val_mpes))) print("MAPE : {:.4f}".format(np.average(val_mapes))) print(" R^2 : {:.4f}".format(np.average(val_r2s))) # Save the model only if validation loss decreased # curr_val_loss = np.average(val_losses) if curr_val_loss < best_val_loss: best_val_loss = min(curr_val_loss, best_val_loss) # if config.transfer_learning: # torch.save(model.state_dict(), os.path.join(config.weights_path, 'BEST_{}_Device_ID_{}_transfer.pkl'.format(config.network, id))) # else: # torch.save(model.state_dict(), os.path.join(config.weights_path, 'BEST_{}_Device_ID_{}.pkl'.format(config.network, id))) if config.transfer_learning: torch.save(model.state_dict(), os.path.join(config.weights_path, 'BEST_{}_Device_ID_{}_transfer_BOON_reshaped.pkl'.format(config.network, id))) else: torch.save(model.state_dict(), os.path.join(config.weights_path, 'BEST_{}_Device_ID_{}_BOON_reshaped.pkl'.format(config.network, id))) print("Best model is saved!\n") best_val_improv = 0 elif curr_val_loss >= best_val_loss: best_val_improv += 1 print("Best Validation has not improved for {} epochs.\n".format(best_val_improv)) if best_val_improv == 10: break elif config.mode == 'test': # Prepare Network # if config.transfer_learning: model.load_state_dict(torch.load(os.path.join(config.weights_path, 'BEST_{}_Device_ID_{}_transfer_BOON_reshaped.pkl'.format(config.network, id)))) else: model.load_state_dict(torch.load(os.path.join(config.weights_path, 'BEST_{}_Device_ID_{}_BOON_reshaped.pkl'.format(config.network, id)))) print("{} for Device ID {} is successfully loaded!".format((config.network).upper(), id)) with torch.no_grad(): pred_test, labels = list(), list() for i, (data, label) in enumerate(test_loader): # Data Preparation # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred = model(data) # Add items to Lists # pred_test += pred labels += label # Derive Metric and Plot # if config.transfer_learning: pred, actual = test(config.plots_path, id, config.network, scaler, pred_test, labels, test_shifted, transfer_learning=True) else: pred, actual = test(config.plots_path, id, config.network, scaler, pred_test, labels, test_shifted)
def main(args): # Fix Seed # random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) # Weights and Plots Path # paths = [args.weights_path, args.plots_path, args.numpy_path] for path in paths: make_dirs(path) # Prepare Data # data = load_data(args.which_data)[[args.feature]] data = data.copy() # Plot Time-Series Data # if args.plot_full: plot_full(args.plots_path, data, args.feature) scaler = MinMaxScaler() data[args.feature] = scaler.fit_transform(data) # Split the Dataset # copied_data = data.copy().values if args.multi_step: X, y = split_sequence_multi_step(copied_data, args.seq_length, args.output_size) step = 'MultiStep' else: X, y = split_sequence_uni_step(copied_data, args.seq_length) step = 'SingleStep' train_loader, val_loader, test_loader = data_loader( X, y, args.train_split, args.test_split, args.batch_size) # Lists # train_losses, val_losses = list(), list() val_maes, val_mses, val_rmses, val_mapes, val_mpes, val_r2s = list(), list( ), list(), list(), list(), list() test_maes, test_mses, test_rmses, test_mapes, test_mpes, test_r2s = list( ), list(), list(), list(), list(), list() pred_tests, labels = list(), list() # Constants # best_val_loss = 100 best_val_improv = 0 # Prepare Network # if args.model == 'dnn': model = DNN(args.seq_length, args.hidden_size, args.output_size).to(device) elif args.model == 'cnn': model = CNN(args.seq_length, args.batch_size, args.output_size).to(device) elif args.model == 'rnn': model = RNN(args.input_size, args.hidden_size, args.num_layers, args.output_size).to(device) elif args.model == 'lstm': model = LSTM(args.input_size, args.hidden_size, args.num_layers, args.output_size, args.bidirectional).to(device) elif args.model == 'gru': model = GRU(args.input_size, args.hidden_size, args.num_layers, args.output_size).to(device) elif args.model == 'attentional': model = AttentionalLSTM(args.input_size, args.qkv, args.hidden_size, args.num_layers, args.output_size, args.bidirectional).to(device) else: raise NotImplementedError # Loss Function # criterion = torch.nn.MSELoss() # Optimizer # optim = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999)) optim_scheduler = get_lr_scheduler(args.lr_scheduler, optim) # Train and Validation # if args.mode == 'train': # Train # print("Training {} using {} started with total epoch of {}.".format( model.__class__.__name__, step, args.num_epochs)) for epoch in range(args.num_epochs): for i, (data, label) in enumerate(train_loader): # Prepare Data # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred = model(data) # Calculate Loss # train_loss = criterion(pred, label) # Initialize Optimizer, Back Propagation and Update # optim.zero_grad() train_loss.backward() optim.step() # Add item to Lists # train_losses.append(train_loss.item()) # Print Statistics # if (epoch + 1) % args.print_every == 0: print("Epoch [{}/{}]".format(epoch + 1, args.num_epochs)) print("Train Loss {:.4f}".format(np.average(train_losses))) # Learning Rate Scheduler # optim_scheduler.step() # Validation # with torch.no_grad(): for i, (data, label) in enumerate(val_loader): # Prepare Data # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred_val = model(data) # Calculate Loss # val_loss = criterion(pred_val, label) if args.multi_step: pred_val = np.mean(pred_val.detach().cpu().numpy(), axis=1) label = np.mean(label.detach().cpu().numpy(), axis=1) else: pred_val, label = pred_val.cpu(), label.cpu() # Calculate Metrics # val_mae = mean_absolute_error(label, pred_val) val_mse = mean_squared_error(label, pred_val, squared=True) val_rmse = mean_squared_error(label, pred_val, squared=False) val_mpe = mean_percentage_error(label, pred_val) val_mape = mean_absolute_percentage_error(label, pred_val) val_r2 = r2_score(label, pred_val) # Add item to Lists # val_losses.append(val_loss.item()) val_maes.append(val_mae.item()) val_mses.append(val_mse.item()) val_rmses.append(val_rmse.item()) val_mpes.append(val_mpe.item()) val_mapes.append(val_mape.item()) val_r2s.append(val_r2.item()) if (epoch + 1) % args.print_every == 0: # Print Statistics # print("Val Loss {:.4f}".format(np.average(val_losses))) print(" MAE : {:.4f}".format(np.average(val_maes))) print(" MSE : {:.4f}".format(np.average(val_mses))) print("RMSE : {:.4f}".format(np.average(val_rmses))) print(" MPE : {:.4f}".format(np.average(val_mpes))) print("MAPE : {:.4f}".format(np.average(val_mapes))) print(" R^2 : {:.4f}".format(np.average(val_r2s))) # Save the model only if validation loss decreased # curr_val_loss = np.average(val_losses) if curr_val_loss < best_val_loss: best_val_loss = min(curr_val_loss, best_val_loss) torch.save( model.state_dict(), os.path.join( args.weights_path, 'BEST_{}_using_{}.pkl'.format( model.__class__.__name__, step))) print("Best model is saved!\n") best_val_improv = 0 elif curr_val_loss >= best_val_loss: best_val_improv += 1 print("Best Validation has not improved for {} epochs.\n". format(best_val_improv)) elif args.mode == 'test': # Load the Model Weight # model.load_state_dict( torch.load( os.path.join( args.weights_path, 'BEST_{}_using_{}.pkl'.format(model.__class__.__name__, step)))) # Test # with torch.no_grad(): for i, (data, label) in enumerate(test_loader): # Prepare Data # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred_test = model(data) # Convert to Original Value Range # pred_test, label = pred_test.detach().cpu().numpy( ), label.detach().cpu().numpy() pred_test = scaler.inverse_transform(pred_test) label = scaler.inverse_transform(label) if args.multi_step: pred_test = np.mean(pred_test, axis=1) label = np.mean(label, axis=1) pred_tests += pred_test.tolist() labels += label.tolist() # Calculate Loss # test_mae = mean_absolute_error(label, pred_test) test_mse = mean_squared_error(label, pred_test, squared=True) test_rmse = mean_squared_error(label, pred_test, squared=False) test_mpe = mean_percentage_error(label, pred_test) test_mape = mean_absolute_percentage_error(label, pred_test) test_r2 = r2_score(label, pred_test) # Add item to Lists # test_maes.append(test_mae.item()) test_mses.append(test_mse.item()) test_rmses.append(test_rmse.item()) test_mpes.append(test_mpe.item()) test_mapes.append(test_mape.item()) test_r2s.append(test_r2.item()) # Print Statistics # print("Test {} using {}".format(model.__class__.__name__, step)) print(" MAE : {:.4f}".format(np.average(test_maes))) print(" MSE : {:.4f}".format(np.average(test_mses))) print("RMSE : {:.4f}".format(np.average(test_rmses))) print(" MPE : {:.4f}".format(np.average(test_mpes))) print("MAPE : {:.4f}".format(np.average(test_mapes))) print(" R^2 : {:.4f}".format(np.average(test_r2s))) # Plot Figure # plot_pred_test(pred_tests[:args.time_plot], labels[:args.time_plot], args.plots_path, args.feature, model, step) # Save Numpy files # np.save( os.path.join( args.numpy_path, '{}_using_{}_TestSet.npy'.format(model.__class__.__name__, step)), np.asarray(pred_tests)) np.save( os.path.join(args.numpy_path, 'TestSet_using_{}.npy'.format(step)), np.asarray(labels)) else: raise NotImplementedError
def main(args): # Fix Seed for Reproducibility # random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) # Samples, Weights, and Plots Path # paths = [args.weights_path, args.plots_path, args.numpy_path] for path in paths: make_dirs(path) # Prepare Data # data = load_data(args.combined_path, args.which_data, args.preprocess, args.resample)[[args.feature]] id = args.which_data.split('_')[0] print("Data of {} is successfully Loaded!".format(args.which_data)) # Plot Time-series Data # if args.plot: plot_full(args.plots_path, data, id, args.feature) plot_split(args.plots_path, data, id, args.valid_start, args.test_start, args.feature) # Min-Max Scaler # scaler = MinMaxScaler() data[args.feature] = scaler.fit_transform(data) # Split the Dataset # copied_data = data.copy() if args.multi_step: X, y = split_sequence_multi_step(copied_data, args.window, args.output_size) else: X, y = split_sequence_uni_step(copied_data, args.window) # Get Data Loader # train_loader, val_loader, test_loader = get_data_loader( X, y, args.train_split, args.test_split, args.batch_size) # Constants # best_val_loss = 100 best_val_improv = 0 # Lists # train_losses, val_losses = list(), list() val_maes, val_mses, val_rmses, val_mapes, val_mpes, val_r2s = list(), list( ), list(), list(), list(), list() test_maes, test_mses, test_rmses, test_mapes, test_mpes, test_r2s = list( ), list(), list(), list(), list(), list() # Prepare Network # if args.network == 'dnn': model = DNN(args.window, args.hidden_size, args.output_size).to(device) elif args.network == 'cnn': model = CNN(args.window, args.hidden_size, args.output_size).to(device) elif args.network == 'rnn': model = RNN(args.input_size, args.hidden_size, args.num_layers, args.output_size).to(device) elif args.network == 'lstm': model = LSTM(args.input_size, args.hidden_size, args.num_layers, args.output_size, args.bidirectional).to(device) elif args.network == 'gru': model = GRU(args.input_size, args.hidden_size, args.num_layers, args.output_size).to(device) elif args.network == 'recursive': model = RecursiveLSTM(args.input_size, args.hidden_size, args.num_layers, args.output_size).to(device) elif args.network == 'attentional': model = AttentionalLSTM(args.input_size, args.qkv, args.hidden_size, args.num_layers, args.output_size, args.bidirectional).to(device) else: raise NotImplementedError if args.mode == 'train': # If fine-tuning # if args.transfer_learning: model.load_state_dict( torch.load( os.path.join( args.weights_path, 'BEST_{}_Device_ID_12.pkl'.format( model.__class__.__name__)))) for param in model.parameters(): param.requires_grad = True # Loss Function # criterion = torch.nn.MSELoss() # Optimizer # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, betas=(0.5, 0.999)) optimizer_scheduler = get_lr_scheduler(optimizer, args) # Train and Validation # print( "Training {} started with total epoch of {} using Driver ID of {}." .format(model.__class__.__name__, args.num_epochs, id)) for epoch in range(args.num_epochs): # Train # for i, (data, label) in enumerate(train_loader): # Data Preparation # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred = model(data) # Calculate Loss # train_loss = criterion(pred, label) # Back Propagation and Update # optimizer.zero_grad() train_loss.backward() optimizer.step() # Add items to Lists # train_losses.append(train_loss.item()) print("Epoch [{}/{}]".format(epoch + 1, args.num_epochs)) print("Train") print("Loss : {:.4f}".format(np.average(train_losses))) optimizer_scheduler.step() # Validation # with torch.no_grad(): for i, (data, label) in enumerate(val_loader): # Data Preparation # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred_val = model(data) # Calculate Loss # val_loss = criterion(pred_val, label) val_mae = mean_absolute_error(label.cpu(), pred_val.cpu()) val_mse = mean_squared_error(label.cpu(), pred_val.cpu(), squared=True) val_rmse = mean_squared_error(label.cpu(), pred_val.cpu(), squared=False) # val_mpe = mean_percentage_error(label.cpu(), pred_val.cpu()) # val_mape = mean_absolute_percentage_error(label.cpu(), pred_val.cpu()) val_r2 = r2_score(label.cpu(), pred_val.cpu()) # Add item to Lists # val_losses.append(val_loss.item()) val_maes.append(val_mae.item()) val_mses.append(val_mse.item()) val_rmses.append(val_rmse.item()) # val_mpes.append(val_mpe.item()) # val_mapes.append(val_mape.item()) val_r2s.append(val_r2.item()) # Print Statistics # print("Validation") print("Loss : {:.4f}".format(np.average(val_losses))) print(" MAE : {:.4f}".format(np.average(val_maes))) print(" MSE : {:.4f}".format(np.average(val_mses))) print("RMSE : {:.4f}".format(np.average(val_rmses))) # print(" MPE : {:.4f}".format(np.average(val_mpes))) # print("MAPE : {:.4f}".format(np.average(val_mapes))) print(" R^2 : {:.4f}".format(np.average(val_r2s))) # Save the model only if validation loss decreased # curr_val_loss = np.average(val_losses) if curr_val_loss < best_val_loss: best_val_loss = min(curr_val_loss, best_val_loss) if args.transfer_learning: torch.save( model.state_dict(), os.path.join( args.weights_path, 'BEST_{}_Device_ID_{}_transfer.pkl'.format( model.__class__.__name__, id))) else: torch.save( model.state_dict(), os.path.join( args.weights_path, 'BEST_{}_Device_ID_{}.pkl'.format( model.__class__.__name__, id))) print("Best model is saved!\n") best_val_improv = 0 elif curr_val_loss >= best_val_loss: best_val_improv += 1 print("Best Validation has not improved for {} epochs.\n". format(best_val_improv)) if best_val_improv == 10: break elif args.mode == 'test': # Prepare Network # if args.transfer_learning: model.load_state_dict( torch.load( os.path.join( args.weights_path, 'BEST_{}_Device_ID_{}.pkl'.format( model.__class__.__name__, id)))) else: model.load_state_dict( torch.load( os.path.join( args.weights_path, 'BEST_{}_Device_ID_{}.pkl'.format( model.__class__.__name__, id)))) print("{} for Device ID {} is successfully loaded!".format( model.__class__.__name__, id)) with torch.no_grad(): for i, (data, label) in enumerate(test_loader): # Data Preparation # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred_test = model(data) # Convert to Original Value Range # pred_test = pred_test.data.cpu().numpy() label = label.data.cpu().numpy() if not args.multi_step: label = label.reshape(-1, 1) pred_test = scaler.inverse_transform(pred_test) label = scaler.inverse_transform(label) # Calculate Loss # test_mae = mean_absolute_error(label, pred_test) test_mse = mean_squared_error(label, pred_test, squared=True) test_rmse = mean_squared_error(label, pred_test, squared=False) # test_mpe = mean_percentage_error(label, pred_test) # test_mape = mean_absolute_percentage_error(label, pred_test) test_r2 = r2_score(label, pred_test) # Add item to Lists # test_maes.append(test_mae.item()) test_mses.append(test_mse.item()) test_rmses.append(test_rmse.item()) # test_mpes.append(test_mpe.item()) # test_mapes.append(test_mape.item()) test_r2s.append(test_r2.item()) # Print Statistics # print("Test {}".format(model.__class__.__name__)) print(" MAE : {:.4f}".format(np.average(test_maes))) print(" MSE : {:.4f}".format(np.average(test_mses))) print("RMSE : {:.4f}".format(np.average(test_rmses))) # print(" MPE : {:.4f}".format(np.average(test_mpes))) # print("MAPE : {:.4f}".format(np.average(test_mapes))) print(" R^2 : {:.4f}".format(np.average(test_r2s))) # Derive Metric and Plot # if args.transfer_learning: test_plot(pred_test, label, args.plots_path, args.feature, id, model, transfer_learning=False) else: test_plot(pred_test, label, args.plots_path, args.feature, id, model, transfer_learning=False)
def main(config): # Fix Seed # random.seed(config.seed) np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed(config.seed) # Weights and Plots Path # paths = [config.weights_path, config.plots_path] for path in paths: make_dirs(path) # Prepare Data # data = load_data(config.which_data)[[config.feature]] data = data.copy() # Plot Time-Series Data # if config.plot_full: plot_full(config.plots_path, data, config.feature) scaler = MinMaxScaler() data[config.feature] = scaler.fit_transform(data) train_loader, val_loader, test_loader = \ data_loader(data, config.seq_length, config.train_split, config.test_split, config.batch_size) # Lists # train_losses, val_losses = list(), list() val_maes, val_mses, val_rmses, val_mapes, val_mpes, val_r2s = list(), list(), list(), list(), list(), list() test_maes, test_mses, test_rmses, test_mapes, test_mpes, test_r2s = list(), list(), list(), list(), list(), list() # Constants # best_val_loss = 100 best_val_improv = 0 # Prepare Network # if config.network == 'dnn': model = DNN(config.seq_length, config.hidden_size, config.output_size).to(device) elif config.network == 'cnn': model = CNN(config.seq_length, config.batch_size).to(device) elif config.network == 'rnn': model = RNN(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) elif config.network == 'lstm': model = LSTM(config.input_size, config.hidden_size, config.num_layers, config.output_size, config.bidirectional).to(device) elif config.network == 'gru': model = GRU(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) elif config.network == 'recursive': model = RecursiveLSTM(config.input_size, config.hidden_size, config.num_layers, config.output_size).to(device) elif config.network == 'attention': model = AttentionLSTM(config.input_size, config.key, config.query, config.value, config.hidden_size, config.num_layers, config.output_size, config.bidirectional).to(device) else: raise NotImplementedError # Loss Function # criterion = torch.nn.MSELoss() # Optimizer # optim = torch.optim.Adam(model.parameters(), lr=config.lr, betas=(0.5, 0.999)) optim_scheduler = get_lr_scheduler(config.lr_scheduler, optim) # Train and Validation # if config.mode == 'train': # Train # print("Training {} started with total epoch of {}.".format(model.__class__.__name__, config.num_epochs)) for epoch in range(config.num_epochs): for i, (data, label) in enumerate(train_loader): # Prepare Data # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred = model(data) # Calculate Loss # train_loss = criterion(pred, label) # Initialize Optimizer, Back Propagation and Update # optim.zero_grad() train_loss.backward() optim.step() # Add item to Lists # train_losses.append(train_loss.item()) # Print Statistics # if (epoch+1) % config.print_every == 0: print("Epoch [{}/{}]".format(epoch+1, config.num_epochs)) print("Train Loss {:.4f}".format(np.average(train_losses))) # Learning Rate Scheduler # optim_scheduler.step() # Validation # with torch.no_grad(): for i, (data, label) in enumerate(val_loader): # Prepare Data # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred_val = model(data) # Calculate Loss # val_loss = criterion(pred_val, label) val_mae = mean_absolute_error(label.cpu(), pred_val.cpu()) val_mse = mean_squared_error(label.cpu(), pred_val.cpu(), squared=True) val_rmse = mean_squared_error(label.cpu(), pred_val.cpu(), squared=False) val_mpe = mean_percentage_error(label.cpu(), pred_val.cpu()) val_mape = mean_absolute_percentage_error(label.cpu(), pred_val.cpu()) val_r2 = r2_score(label.cpu(), pred_val.cpu()) # Add item to Lists # val_losses.append(val_loss.item()) val_maes.append(val_mae.item()) val_mses.append(val_mse.item()) val_rmses.append(val_rmse.item()) val_mpes.append(val_mpe.item()) val_mapes.append(val_mape.item()) val_r2s.append(val_r2.item()) if (epoch + 1) % config.print_every == 0: # Print Statistics # print("Val Loss {:.4f}".format(np.average(val_losses))) print("Val MAE : {:.4f}".format(np.average(val_maes))) print("Val MSE : {:.4f}".format(np.average(val_mses))) print("Val RMSE : {:.4f}".format(np.average(val_rmses))) print("Val MPE : {:.4f}".format(np.average(val_mpes))) print("Val MAPE : {:.4f}".format(np.average(val_mapes))) print("Val R^2 : {:.4f}".format(np.average(val_r2s))) # Save the model Only if validation loss decreased # curr_val_loss = np.average(val_losses) if curr_val_loss < best_val_loss: best_val_loss = min(curr_val_loss, best_val_loss) torch.save(model.state_dict(), os.path.join(config.weights_path, 'BEST_{}.pkl'.format(model.__class__.__name__))) print("Best model is saved!\n") best_val_improv = 0 elif curr_val_loss >= best_val_loss: best_val_improv += 1 print("Best Validation has not improved for {} epochs.\n".format(best_val_improv)) elif config.mode == 'test': # Load the Model Weight # model.load_state_dict(torch.load(os.path.join(config.weights_path, 'BEST_{}.pkl'.format(model.__class__.__name__)))) # Test # with torch.no_grad(): for i, (data, label) in enumerate(test_loader): # Prepare Data # data = data.to(device, dtype=torch.float32) label = label.to(device, dtype=torch.float32) # Forward Data # pred_test = model(data) # Convert to Original Value Range # pred_test = pred_test.data.cpu().numpy() label = label.data.cpu().numpy().reshape(-1, 1) pred_test = scaler.inverse_transform(pred_test) label = scaler.inverse_transform(label) # Calculate Loss # test_mae = mean_absolute_error(label, pred_test) test_mse = mean_squared_error(label, pred_test, squared=True) test_rmse = mean_squared_error(label, pred_test, squared=False) test_mpe = mean_percentage_error(label, pred_test) test_mape = mean_absolute_percentage_error(label, pred_test) test_r2 = r2_score(label, pred_test) # Add item to Lists # test_maes.append(test_mae.item()) test_mses.append(test_mse.item()) test_rmses.append(test_rmse.item()) test_mpes.append(test_mpe.item()) test_mapes.append(test_mape.item()) test_r2s.append(test_r2.item()) # Print Statistics # print("Test {}".format(model.__class__.__name__)) print("Test MAE : {:.4f}".format(np.average(test_maes))) print("Test MSE : {:.4f}".format(np.average(test_mses))) print("Test RMSE : {:.4f}".format(np.average(test_rmses))) print("Test MPE : {:.4f}".format(np.average(test_mpes))) print("Test MAPE : {:.4f}".format(np.average(test_mapes))) print("Test R^2 : {:.4f}".format(np.average(test_r2s))) # Plot Figure # plot_pred_test(pred_test, label, config.plots_path, config.feature, model)