def main(): # Load data X, y = load_data('drives', use_targets=False) print(X.shape, y.shape) X_seq, y_seq = data_to_seq(X, y, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) print(X_seq.shape, y_seq.shape) # Split train_end = int((45. / 100.) * len(X_seq)) test_end = int((90. / 100.) * len(X_seq)) X_train, y_train = X_seq[:train_end], y_seq[:train_end] X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end] X_valid, y_valid = X_seq[test_end:], y_seq[test_end:] data = { 'train': [X_train, y_train], #'valid': [X_valid, y_valid], 'test': [X_test, y_test], } # Re-format targets for set_name in data: y = data[set_name][1] y = y.reshape((-1, 1, np.prod(y.shape[1:]))) data[set_name][1] = [y[:, :, i] for i in xrange(y.shape[2])] print(data["train"][0].shape, np.array(data["train"][1]).shape) # Model & training parameters nb_train_samples = data['train'][0].shape[0] input_shape = list(data['train'][0].shape[1:]) nb_outputs = len(data['train'][1]) gp_input_shape = (1, ) batch_size = 128 epochs = 5 print(input_shape) nn_params = { 'H_dim': 16, 'H_activation': 'tanh', 'dropout': 0.1, } gp_params = { 'cov': 'SEiso', 'hyp_lik': -2.0, 'hyp_cov': [[-0.7], [0.0]], 'opt': {}, } # Retrieve model config nn_configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=gp_input_shape, params=nn_params) gp_configs = load_GP_configs(filename='gp.yaml', nb_outputs=nb_outputs, batch_size=batch_size, nb_train_samples=nb_train_samples, params=gp_params) # Construct & compile the model model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']]) loss = [gen_gp_loss(gp) for gp in model.output_gp_layers] model.compile(optimizer=Adam(1e-2), loss=loss) # Callbacks callbacks = [EarlyStopping(monitor='mse', patience=10)] # Train the model history = train(model, data, callbacks=callbacks, gp_n_iter=5, checkpoint='lstm', checkpoint_monitor='mse', epochs=epochs, batch_size=batch_size, verbose=2) # Finetune the model model.finetune(*data['train'], batch_size=batch_size, gp_n_iter=100, verbose=0) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test predict RMSE:', rmse_predict)
def main(): # Load data X_train, y_train = load_data('actuator', stop=45.) X_valid, y_valid = load_data('actuator', start=45., stop=55.) X_test, y_test = load_data('actuator', start=55.) data = { 'train': (X_train, y_train), 'valid': (X_valid, y_valid), 'test': (X_test, y_test), } data = preprocess_data(data, standardize=True, multiple_outputs=True, t_lag=10, t_future_shift=1, t_future_steps=1, t_sw_step=1) # Model & training parameters nb_train_samples = data['train'][0].shape[0] input_shape = data['train'][0].shape[1:] nb_outputs = len(data['train'][1]) gp_input_shape = (1, ) batch_size = 128 nb_epoch = 5 # Retrieve model config nn_configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=gp_input_shape, H_dim=16, H_activation='tanh', dropout=0.1) gp_configs = load_GP_configs(filename='gp.yaml', nb_outputs=nb_outputs, batch_size=batch_size, nb_train_samples=nb_train_samples, cov='SEiso', hyp_lik=-2.0, hyp_cov=[[-0.7], [0.0]]) # Construct & compile the model model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']]) loss = [gen_gp_loss(gp) for gp in model.output_layers] model.compile(optimizer=Adam(1e-2), loss=loss) # Callbacks callbacks = [EarlyStopping(monitor='val_mse', patience=10)] # Train the model history = train(model, data, callbacks=callbacks, gp_n_iter=5, checkpoint='lstm', checkpoint_monitor='val_mse', nb_epoch=nb_epoch, batch_size=batch_size, verbose=2) # Finetune the model model.finetune(*data['train'], batch_size=batch_size, gp_n_iter=100, verbose=0) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test predict RMSE:', rmse_predict)
def build_train_GPLSTM(self): ''' Define GP-LSTM Architecture and Training. Returns ------- history : Dictionnary Training Information. y_test : Numpy Array Input Test Data. y_pred : Numpy Array Predicted output. var : Numpy Array Predicted Variances. rmse_predict : Float Training Metrics. model : Optimized Model Optimized Deep Learning Model after Training. data : Dictionnary Train, test and validation sets. ''' data=self.data # Model & training parameters nb_train_samples = data['train'][0].shape[0] input_shape = data['train'][0].shape[1:] nb_outputs = len(data['train'][1]) gp_input_shape = (1,) nn_params = { 'H_dim': self.hdim, 'H_activation': 'tanh', 'dropout': 0.0, } gp_params = { 'cov': 'SEiso', 'hyp_lik': np.log(0.3), 'hyp_cov': [[4.0], [0.1]], 'opt': {'cg_maxit': 2000,'cg_tol': 1e-4,#'deg':3, 'pred_var':-100, }, } # Retrieve model config nn_configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=gp_input_shape, params=nn_params) gp_configs = load_GP_configs(filename='gp.yaml', nb_outputs=nb_outputs, batch_size=self.batch_size, nb_train_samples=nb_train_samples, params=gp_params) # Construct & compile the model model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']]) loss = [gen_gp_loss(gp) for gp in model.output_layers] model.compile(optimizer=Adam(1e-5), loss=loss) # Callbacks callbacks = [EarlyStopping(monitor='val_mse', patience=2000)] # Train the model history = train(model, data, callbacks=callbacks, gp_n_iter=5, checkpoint='checkpL3_predmode_'+str(pred_mode)+'_test_'+str(test), checkpoint_monitor='val_mse', epochs=self.epochs, batch_size=self.batch_size, verbose=1) # Finetune the model model.finetune(*data['train'], batch_size=self.batch_size, gp_n_iter=100, verbose=0) # Test the model X_test, y_test = data['test'] X_train,y_train=data['train'] y_pred,var = model.predict(X_test,return_var=True, X_tr=X_train, Y_tr=y_train,batch_size=self.batch_size) var=np.array(var) rmse_predict = RMSE(y_test, y_pred) print('Test predict RMSE:', rmse_predict) print('mean variance:', var.mean()) return history,y_test,y_pred,var,rmse_predict,model,data
def main(): # Load data X, y = load_data('actuator', use_targets=False) X_seq, y_seq = data_to_seq(X, y, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) # Split train_end = int((45. / 100.) * len(X_seq)) test_end = int((90. / 100.) * len(X_seq)) X_train, y_train = X_seq[:train_end], y_seq[:train_end] X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end] X_valid, y_valid = X_seq[test_end:], y_seq[test_end:] data = { 'train': [X_train, y_train], 'valid': [X_valid, y_valid], 'test': [X_test, y_test], } # Model & training parameters input_shape = list(data['train'][0].shape[1:]) output_shape = list(data['train'][1].shape[1:]) batch_size = 16 epochs = 100 nn_params = { 'H_dim': 32, 'H_activation': 'tanh', 'dropout': 0.1, } # Retrieve model config configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=output_shape, params=nn_params) # Construct & compile the model model = assemble('LSTM', configs['1H']) model.compile(optimizer=Adam(1e-1), loss='mse') # Callbacks callbacks = [EarlyStopping(monitor='val_loss', patience=10)] # Train the model history = train(model, data, callbacks=callbacks, checkpoint='lstm_actuator', checkpoint_monitor='val_loss', epochs=epochs, batch_size=batch_size, verbose=2) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict)
def main(): dataset = np.loadtxt("traindata.txt", delimiter=",") X_train = dataset[:, 0:17] y_train = dataset[:, 17] dataset = np.loadtxt("testdata.txt", delimiter=",") X_test = dataset[:, 0:17] y_test = dataset[:, 17] dataset = np.loadtxt("validdata.txt", delimiter=",") X_valid = dataset[:, 0:17] y_valid = dataset[:, 17] X_train, X_test, X_valid = standardize_data(X_train, X_test, X_valid) X_train = X_train.reshape(-1, 17) X_test = X_test.reshape(-1, 17) X_valid = X_valid.reshape(-1, 17) y_valid = y_valid.reshape(-1, 1) y_test = y_test.reshape(-1, 1) y_train = y_train.reshape(-1, 1) X_train, y_train = data_to_seq(X_train, y_train, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) X_valid, y_valid = data_to_seq(X_valid, y_valid, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) X_test, y_test = data_to_seq(X_test, y_test, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) data = { 'train': [X_train, y_train], 'valid': [X_valid, y_valid], 'test': [X_test, y_test], } # Model & training parameters input_shape = list(data['train'][0].shape[1:]) output_shape = list(data['train'][1].shape[1:]) batch_size = 128 epochs = 250 nn_params = { 'H_dim': 512, 'H_activation': 'tanh', 'dropout': 0.5, } # Retrieve model config configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=output_shape, params=nn_params) # Construct & compile the model model = assemble('LSTM', configs['1H']) model.compile(optimizer=Adam(1e-4), loss='mse') # Callbacks #callbacks = [EarlyStopping(monitor='val_nlml', patience=10)] callbacks = [] # Train the model history = train(model, data, callbacks=callbacks, checkpoint='lstm', checkpoint_monitor='val_loss', epochs=epochs, batch_size=batch_size, verbose=1) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict)
def main(): # Load data X, y = load_data('actuator', use_targets=False) X_seq, y_seq = data_to_seq(X, y, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) # Split train_end = int((45. / 100.) * len(X_seq)) test_end = int((90. / 100.) * len(X_seq)) X_train, y_train = X_seq[:train_end], y_seq[:train_end] X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end] X_valid, y_valid = X_seq[test_end:], y_seq[test_end:] data = { 'train': [X_train, y_train], 'valid': [X_valid, y_valid], 'test': [X_test, y_test], } # Re-format targets for set_name in data: y = data[set_name][1] y = y.reshape((-1, 1, np.prod(y.shape[1:]))) data[set_name][1] = [y[:, :, i] for i in xrange(y.shape[2])] # Model & training parameters nb_train_samples = data['train'][0].shape[0] input_shape = data['train'][0].shape[1:] nb_outputs = len(data['train'][1]) gp_input_shape = (1, ) batch_size = 128 epochs = 5 nn_params = { 'H_dim': 16, 'H_activation': 'tanh', 'dropout': 0.1, } gp_params = { 'cov': 'SEiso', 'hyp_lik': -2.0, 'hyp_cov': [[-0.7], [0.0]], 'opt': { 'cg_maxit': 500, 'cg_tol': 1e-4 }, 'grid_kwargs': { 'eq': 1, 'k': 1e2 }, 'update_grid': False, # when using manual grid, turn off grid updates } # Retrieve model config nn_configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=gp_input_shape, params=nn_params) gp_configs = load_GP_configs(filename='gp.yaml', nb_outputs=nb_outputs, batch_size=batch_size, nb_train_samples=nb_train_samples, params=gp_params) # Specify manual grid for MSGP (100 equidistant points per input dimension). # Note: each np.ndarray in the xg must be a column vector. gp_configs['MSGP']['config']['grid_kwargs']['xg'] = ( gp_input_shape[0] * [np.linspace(-1.0, 1.0, 100)[:, None]]) # Construct & compile the model model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['MSGP']]) loss = [gen_gp_loss(gp) for gp in model.output_layers] model.compile(optimizer=Adam(1e-2), loss=loss) # Callbacks callbacks = [EarlyStopping(monitor='val_mse', patience=10)] # Train the model history = train(model, data, callbacks=callbacks, gp_n_iter=5, checkpoint='lstm', checkpoint_monitor='val_mse', epochs=epochs, batch_size=batch_size, verbose=2) # Finetune the model model.finetune(*data['train'], batch_size=batch_size, gp_n_iter=100, verbose=0) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test predict RMSE:', rmse_predict)