def main(): # Load data dataset = np.loadtxt("traindata.txt", delimiter=",") X_train = dataset[:, 0:17] y_train = dataset[:, 17] dataset = np.loadtxt("testdata.txt", delimiter=",") X_test = dataset[:, 0:17] y_test = dataset[:, 17] dataset = np.loadtxt("validdata.txt", delimiter=",") X_valid = dataset[:, 0:17] y_valid = dataset[:, 17] X_train, X_test, X_valid = standardize_data(X_train, X_test, X_valid) data = { 'train': (X_train, y_train), 'valid': (X_valid, y_valid), 'test': (X_test, y_test), } # Model & training parameters input_shape = data['train'][0].shape[1:] output_shape = data['train'][1].shape[1:] batch_size = 128 epochs = 500 # Construct & compile the model model = assemble_mlp(input_shape, output_shape) model.compile(optimizer=Adam(1e-4), loss='mse') # model.load_weights('checkpoints/mlp_kin40k.h5', by_name=True) # Callbacks # callbacks = [ # EarlyStopping(monitor='val_loss', patience=10), # ] callbacks = [] # Train the model history = train(model, data, callbacks=callbacks, checkpoint='mlp_kin40k', checkpoint_monitor='val_loss', epochs=epochs, batch_size=batch_size, verbose=1) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict)
def run(self): prt = 0.5 print("-->Process for date:%s" % self.dn) _xparams = self.data[1] _yparam = self.data[2] mI = self.mI reg = self.reg clf = self.clf self._forecast_time = self.dn + dt.timedelta(hours=(mI * 3)) self._pred_point_time = self.dn # Time at which forecast is taking place self.y_obs = -1 self.y_pred = -1 self.pr = -1 self.sigma = 0. self.prt = prt if self.data_windowing(): X_test, y_test = self.X_test, self.y_test try: pr = clf.predict_proba(X_test[:, :-4])[0, 0] self.pr = pr if pr > prt: self.data_windowing(self.trw * self.alt_win, True) # Callbacks callbacks = [EarlyStopping(monitor='mse', patience=10)] # Train the model history = train(self.reg, self.DD, callbacks=callbacks, gp_n_iter=5, checkpoint='lstm', checkpoint_monitor='mse', epochs=self.epochs, batch_size=self.batch_size, verbose=0) # Finetune the model self.reg.finetune(*self.DD['train'], batch_size=self.batch_size, gp_n_iter=100, verbose=0) X_test, y_test = self.DD['test'] y_preds = self.reg.predict(X_test, return_var=True) yr = np.array(y_preds[0]).reshape((1, 1)) s = np.array(y_preds[1]).reshape((1, 1)) self.y_pred = self.reY(np.array(yr))[0, 0] self.sigma = self.reY(np.array(s))[0, 0] except: print(self.dn) traceback.print_exc() pass else: pass print(self.y_obs, self.y_pred, self.sigma) # store_prediction_to_file(self.fname,self.dn,self.y_obs,self.y_pred,self.pr,self.prt,self.model) store_deepgp_prediction_to_file(self.fname, self.dn, self.y_obs, self.y_pred, self.sigma, self.pr, self.prt, self.model) return
def main(): # Load data dataset = np.loadtxt("kin40ktraindata.txt", delimiter=",") X_train = dataset[:, 0:8] y_train = dataset[:, 8] dataset = np.loadtxt("kin40ktestdata.txt", delimiter=",") X_test = dataset[:, 0:8] y_test = dataset[:, 8] X_valid, y_valid = X_test, y_test X_train, X_test, X_valid = standardize_data(X_train, X_test, X_valid) data = { 'train': (X_train, y_train), 'valid': (X_valid, y_valid), 'test': (X_test, y_test), } # Model & training parameters input_shape = data['train'][0].shape[1:] output_shape = data['train'][1].shape[1:] batch_size = 128 epochs = 100 # Construct & compile the model model = assemble_mlp(input_shape, output_shape, batch_size, nb_train_samples=len(X_train)) loss = [gen_gp_loss(gp) for gp in model.output_layers] model.compile(optimizer=Adam(1e-4), loss=loss) # Load saved weights (if exist) #if os.path.isfile('checkpoints/msgp_mlp_kin40k.h5'): # model.load_weights('checkpoints/msgp_mlp_kin40k.h5', by_name=True) # Train the model history = train(model, data, callbacks=[], gp_n_iter=5, checkpoint='msgp_mlp_kin40k', checkpoint_monitor='val_loss', epochs=epochs, batch_size=batch_size, verbose=1) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict)
def main(): # Load data (X_train, y_train), (X_test, y_test) = load_data(delay=2, shuffle=True) X_valid, y_valid = X_test, y_test X_train, X_test, X_valid = standardize_data(X_train, X_test, X_valid) data = { 'train': (X_train, y_train), 'valid': (X_valid, y_valid), 'test': (X_test, y_test), } # Model & training parameters input_shape = data['train'][0].shape[1:] output_shape = data['train'][1].shape[1:] batch_size = 128 epochs = 100 # Construct & compile the model model = assemble_mlp(input_shape, output_shape) model.compile(optimizer=Adam(1e-4), loss='mse') # Load saved weights (if exist) if os.path.isfile('checkpoints/mlp.h5'): model.load_weights('checkpoints/mlp.h5', by_name=True) # Callbacks # callbacks = [ # EarlyStopping(monitor='val_loss', patience=10), # ] callbacks = [] # Train the model history = train(model, data, callbacks=callbacks, checkpoint='mlp', checkpoint_monitor='val_loss', epochs=epochs, batch_size=batch_size, verbose=1) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict)
def run_model(self): pr_th = 0.5 self._sum = pd.DataFrame() self._sum["date"] = [self.dn] self._sum["name"] = [self.name] self._sum["kp"] = [-1.] self._sum["kp_pred"] = [-1.] self._sum["pr"] = [-1.] self._sum["pr_th"] = [pr_th] self._sum["std"] = [-1.] self._sum["window"] = [self.window] self._sum["alt_window"] = [self.alt_window] success = False if self.is_run: self._sum["kp"] = np.round(self.y_obs, 2) pr = self.clf.predict_proba(self.X_test) self._sum["pr"] = np.round(pr, 2) if pr[0] > pr_th: self.data_windowing(self.alt_window) self.database = { 'train': [self.X_train, self.y_train], 'test': [self.X_test, self.y_test], } self.ini() self.opt_regressor() callbacks = [EarlyStopping(monitor='mse', patience=10)] history = train(self.model, self.database, callbacks=callbacks, gp_n_iter=5, checkpoint='lstm', checkpoint_monitor='mse', epochs=self.epochs, batch_size=self.batch_size, verbose=0) y_pred, y_std = self.model.predict(self.X_test, return_var=True) y_pred, y_std = np.reshape(y_pred, (1, 1)), np.reshape(y_std, (1, 1)) self._sum["kp_pred"] = np.round(self.source.tx_y(y_pred), 3) self._sum["std"] = np.round(self.source.tx_y(y_std), 3) success = True pass self.save_results() return success
def main(): # Load data X, y = load_data('actuator', use_targets=False) X_seq, y_seq = data_to_seq(X, y, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) # Split train_end = int((45. / 100.) * len(X_seq)) test_end = int((90. / 100.) * len(X_seq)) X_train, y_train = X_seq[:train_end], y_seq[:train_end] X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end] X_valid, y_valid = X_seq[test_end:], y_seq[test_end:] data = { 'train': [X_train, y_train], 'valid': [X_valid, y_valid], 'test': [X_test, y_test], } # Re-format targets for set_name in data: y = data[set_name][1] y = y.reshape((-1, 1, np.prod(y.shape[1:]))) data[set_name][1] = [y[:,:,i] for i in xrange(y.shape[2])] # Model & training parameters nb_train_samples = data['train'][0].shape[0] input_shape = list(data['train'][0].shape[1:]) nb_outputs = len(data['train'][1]) gp_input_shape = (1,) batch_size = 128 epochs = 100 nn_params = { 'H_dim': 16, 'H_activation': 'tanh', 'dropout': 0.1, } gp_params = { 'cov': 'SEiso', 'hyp_lik': -2.0, 'hyp_cov': [[-0.7], [0.0]], 'opt': {}, } # Retrieve model config nn_configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=gp_input_shape, params=nn_params) gp_configs = load_GP_configs(filename='gp.yaml', nb_outputs=nb_outputs, batch_size=batch_size, nb_train_samples=nb_train_samples, params=gp_params) # Construct & compile the model model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']]) loss = [gen_gp_loss(gp) for gp in model.output_gp_layers] model.compile(optimizer=Adam(1e-2), loss=loss) # Callbacks callbacks = [EarlyStopping(monitor='val_mse', patience=10)] # Train the model history = train(model, data, callbacks=callbacks, gp_n_iter=5, checkpoint='gp_lstm_actuator', checkpoint_monitor='val_mse', epochs=epochs, batch_size=batch_size, verbose=2) # Finetune the model model.finetune(*data['train'], batch_size=batch_size, gp_n_iter=100, verbose=0) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test predict RMSE:', rmse_predict)
nb_train_samples=len(X_train)) opt = Adam(lr=1e-4) loss = [gen_gp_loss(gp) for gp in model.output_layers] model.compile(optimizer=opt, loss=loss) # Load saved weights (if exist) # if os.path.isfile('checkpoints/msgp_mlp.h5'): # model.load_weights('checkpoints/msgp_mlp.h5', by_name=True) # Callbacks callbacks = [EarlyStopping(monitor='val_mse', patience=50)] # Train the model history = train(model, data, gp_n_iter=5, epochs=epochs, batch_size=batch_size, callbacks=callbacks, tensorboard=True, checkpoint='msgp_mlp', checkpoint_monitor='val_mse', verbose=1) model.summary() # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict) rmse_delay = RMSE(X_test[:,-1], y_preds) rmse_rel = rmse_predict / rmse_delay print('Relative RMSE:', rmse_rel)
def main(): # Load data dataset = np.loadtxt("traindata.txt", delimiter=",") X_train = dataset[:, 0:17] y_train = dataset[:, 17] dataset = np.loadtxt("testdata.txt", delimiter=",") X_test = dataset[:, 0:17] y_test = dataset[:, 17] dataset = np.loadtxt("validdata.txt", delimiter=",") X_valid = dataset[:, 0:17] y_valid = dataset[:, 17] X_train_root = X_train X_valid_root = X_valid X_train, X_test, X_valid = standardize_data(copy.deepcopy(X_train_root), X_test, copy.deepcopy(X_valid_root)) print("MAX_Test ", np.amax(y_test)) print("MAX_Train", np.amax(y_train)) print("MAX_Valid", np.amax(y_valid)) print("MIN_Test ", np.amin(y_test)) print("MIN_Train", np.amin(y_train)) print("MIN_Valid", np.amin(y_valid)) UPPERBOUND = 1 print(y_train) y_train = y_train / UPPERBOUND y_valid = y_valid / UPPERBOUND print(y_train) data = { 'train': (X_train, y_train), 'valid': (X_valid, y_valid), 'test': (X_test, y_test), } # Model & training parameters input_shape = data['train'][0].shape[1:] output_shape = data['train'][1].shape[1:] batch_size = 128 epochs = 250 # Construct & compile the model model = assemble_mlp(input_shape, output_shape) model.compile(optimizer=Adam(1e-4), loss='mse') # model.load_weights('checkpoints/mlp_kin40k.h5', by_name=True) # Callbacks # callbacks = [ # EarlyStopping(monitor='val_loss', patience=10), # ] callbacks = [] # Train the model history = train(model, data, callbacks=callbacks, checkpoint_monitor='val_loss', epochs=epochs, batch_size=batch_size, verbose=1) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds * UPPERBOUND) print('Test RMSE:', rmse_predict) dataset = np.loadtxt("testdata.txt.en_de", delimiter=",") X_test = dataset[:, 0:17] y_test = dataset[:, 17] X_train, X_test, X_valid = standardize_data(X_train_root, X_test, X_valid_root) y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds * UPPERBOUND) print('Test Adaptation RMSE:', rmse_predict)
def main(shift, sample_size, batch_size, epochs): ''' Create GPLSTM Model and Train it on the Random Walk Parameters ---------- shift : Integer Number of steps to be predicted into the future. sample_size : Integer Number of steps to be sampled for the random walk. batch_size : Integer Batch size for training. epochs : Integer Number of epochs for training. Returns ------- history : Dictionnary Training history informations. X_test : DataFrame Training Data. X_train : DataFrame Training Data. y_train : DataFrame Training Data. batch_size : Integer Training parameter. y_test : DataFrame Training parameter. model : Optimized Model Optimized Model after training. RW_initial : Numpy Array Initial Random Walk used for training. ''' data, RW_initial = Generate_data(shift, sample_size) # Model & training parameters model = GPLSTM(shift, lr, sample_size, batch_size, data) callbacks = [] history = train(model, data, callbacks=callbacks, gp_n_iter=5, checkpoint='lstm_1', checkpoint_monitor='val_mse', epochs=epochs, batch_size=batch_size, verbose=1) # Finetune the model model.finetune(*data['train'], batch_size=batch_size, gp_n_iter=100, verbose=1) # Test the model X_test, y_test = data['test'] X_train, y_train = data['train'] return history, X_test, X_train, y_train, batch_size, y_test, model, RW_initial
def build_train_GPLSTM(self): ''' Define GP-LSTM Architecture and Training. Returns ------- history : Dictionnary Training Information. y_test : Numpy Array Input Test Data. y_pred : Numpy Array Predicted output. var : Numpy Array Predicted Variances. rmse_predict : Float Training Metrics. model : Optimized Model Optimized Deep Learning Model after Training. data : Dictionnary Train, test and validation sets. ''' data=self.data # Model & training parameters nb_train_samples = data['train'][0].shape[0] input_shape = data['train'][0].shape[1:] nb_outputs = len(data['train'][1]) gp_input_shape = (1,) nn_params = { 'H_dim': self.hdim, 'H_activation': 'tanh', 'dropout': 0.0, } gp_params = { 'cov': 'SEiso', 'hyp_lik': np.log(0.3), 'hyp_cov': [[4.0], [0.1]], 'opt': {'cg_maxit': 2000,'cg_tol': 1e-4,#'deg':3, 'pred_var':-100, }, } # Retrieve model config nn_configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=gp_input_shape, params=nn_params) gp_configs = load_GP_configs(filename='gp.yaml', nb_outputs=nb_outputs, batch_size=self.batch_size, nb_train_samples=nb_train_samples, params=gp_params) # Construct & compile the model model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['GP']]) loss = [gen_gp_loss(gp) for gp in model.output_layers] model.compile(optimizer=Adam(1e-5), loss=loss) # Callbacks callbacks = [EarlyStopping(monitor='val_mse', patience=2000)] # Train the model history = train(model, data, callbacks=callbacks, gp_n_iter=5, checkpoint='checkpL3_predmode_'+str(pred_mode)+'_test_'+str(test), checkpoint_monitor='val_mse', epochs=self.epochs, batch_size=self.batch_size, verbose=1) # Finetune the model model.finetune(*data['train'], batch_size=self.batch_size, gp_n_iter=100, verbose=0) # Test the model X_test, y_test = data['test'] X_train,y_train=data['train'] y_pred,var = model.predict(X_test,return_var=True, X_tr=X_train, Y_tr=y_train,batch_size=self.batch_size) var=np.array(var) rmse_predict = RMSE(y_test, y_pred) print('Test predict RMSE:', rmse_predict) print('mean variance:', var.mean()) return history,y_test,y_pred,var,rmse_predict,model,data
def main(): # Load data X, y = load_data('actuator', use_targets=False) X_seq, y_seq = data_to_seq(X, y, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) # Split train_end = int((45. / 100.) * len(X_seq)) test_end = int((90. / 100.) * len(X_seq)) X_train, y_train = X_seq[:train_end], y_seq[:train_end] X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end] X_valid, y_valid = X_seq[test_end:], y_seq[test_end:] data = { 'train': [X_train, y_train], 'valid': [X_valid, y_valid], 'test': [X_test, y_test], } # Model & training parameters input_shape = list(data['train'][0].shape[1:]) output_shape = list(data['train'][1].shape[1:]) batch_size = 16 epochs = 100 nn_params = { 'H_dim': 32, 'H_activation': 'tanh', 'dropout': 0.1, } # Retrieve model config configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=output_shape, params=nn_params) # Construct & compile the model model = assemble('LSTM', configs['1H']) model.compile(optimizer=Adam(1e-1), loss='mse') # Callbacks callbacks = [EarlyStopping(monitor='val_loss', patience=10)] # Train the model history = train(model, data, callbacks=callbacks, checkpoint='lstm_actuator', checkpoint_monitor='val_loss', epochs=epochs, batch_size=batch_size, verbose=2) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict)
def main(): dataset = np.loadtxt("traindata.txt", delimiter=",") X_train = dataset[:, 0:17] y_train = dataset[:, 17] dataset = np.loadtxt("testdata.txt", delimiter=",") X_test = dataset[:, 0:17] y_test = dataset[:, 17] dataset = np.loadtxt("validdata.txt", delimiter=",") X_valid = dataset[:, 0:17] y_valid = dataset[:, 17] X_train, X_test, X_valid = standardize_data(X_train, X_test, X_valid) X_train = X_train.reshape(-1, 17) X_test = X_test.reshape(-1, 17) X_valid = X_valid.reshape(-1, 17) y_valid = y_valid.reshape(-1, 1) y_test = y_test.reshape(-1, 1) y_train = y_train.reshape(-1, 1) X_train, y_train = data_to_seq(X_train, y_train, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) X_valid, y_valid = data_to_seq(X_valid, y_valid, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) X_test, y_test = data_to_seq(X_test, y_test, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) data = { 'train': [X_train, y_train], 'valid': [X_valid, y_valid], 'test': [X_test, y_test], } # Model & training parameters input_shape = list(data['train'][0].shape[1:]) output_shape = list(data['train'][1].shape[1:]) batch_size = 128 epochs = 250 nn_params = { 'H_dim': 512, 'H_activation': 'tanh', 'dropout': 0.5, } # Retrieve model config configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=output_shape, params=nn_params) # Construct & compile the model model = assemble('LSTM', configs['1H']) model.compile(optimizer=Adam(1e-4), loss='mse') # Callbacks #callbacks = [EarlyStopping(monitor='val_nlml', patience=10)] callbacks = [] # Train the model history = train(model, data, callbacks=callbacks, checkpoint='lstm', checkpoint_monitor='val_loss', epochs=epochs, batch_size=batch_size, verbose=1) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict)
data = { 'train': [x_train, y_train], 'valid': [x_test, y_test], 'test': [x_test, y_test] } # Model & training parameters input_shape = data['train'][0].shape[1:] nb_train_samples = data['train'][0].shape[0] output_shape = data['train'][1].shape[1:] batch_size = 2 ** 10 epochs = 500 # Construct & compile the model model = assemble_mlp(input_shape, batch_size, nb_train_samples=nb_train_samples) loss = [gen_gp_loss(gp) for gp in model.output_layers] model.compile(optimizer=Adam(1e-4), loss=loss) # Train the model history = train(model, data, callbacks=[], gp_n_iter=5, epochs=epochs, batch_size=batch_size, verbose=1) # Test the model x_test, y_test = data['test'] y_preds, y_var = model.predict(x_test, return_var=True) rmse_predict = RMSE(y_test, y_preds) print('Test RMSE:', rmse_predict) plt.plot(x,y) plt.plot(x_test, y_preds[0])
def main(): # Load data X, y = load_data('actuator', use_targets=False) X_seq, y_seq = data_to_seq(X, y, t_lag=32, t_future_shift=1, t_future_steps=1, t_sw_step=1) # Split train_end = int((45. / 100.) * len(X_seq)) test_end = int((90. / 100.) * len(X_seq)) X_train, y_train = X_seq[:train_end], y_seq[:train_end] X_test, y_test = X_seq[train_end:test_end], y_seq[train_end:test_end] X_valid, y_valid = X_seq[test_end:], y_seq[test_end:] data = { 'train': [X_train, y_train], 'valid': [X_valid, y_valid], 'test': [X_test, y_test], } # Re-format targets for set_name in data: y = data[set_name][1] y = y.reshape((-1, 1, np.prod(y.shape[1:]))) data[set_name][1] = [y[:, :, i] for i in xrange(y.shape[2])] # Model & training parameters nb_train_samples = data['train'][0].shape[0] input_shape = data['train'][0].shape[1:] nb_outputs = len(data['train'][1]) gp_input_shape = (1, ) batch_size = 128 epochs = 5 nn_params = { 'H_dim': 16, 'H_activation': 'tanh', 'dropout': 0.1, } gp_params = { 'cov': 'SEiso', 'hyp_lik': -2.0, 'hyp_cov': [[-0.7], [0.0]], 'opt': { 'cg_maxit': 500, 'cg_tol': 1e-4 }, 'grid_kwargs': { 'eq': 1, 'k': 1e2 }, 'update_grid': False, # when using manual grid, turn off grid updates } # Retrieve model config nn_configs = load_NN_configs(filename='lstm.yaml', input_shape=input_shape, output_shape=gp_input_shape, params=nn_params) gp_configs = load_GP_configs(filename='gp.yaml', nb_outputs=nb_outputs, batch_size=batch_size, nb_train_samples=nb_train_samples, params=gp_params) # Specify manual grid for MSGP (100 equidistant points per input dimension). # Note: each np.ndarray in the xg must be a column vector. gp_configs['MSGP']['config']['grid_kwargs']['xg'] = ( gp_input_shape[0] * [np.linspace(-1.0, 1.0, 100)[:, None]]) # Construct & compile the model model = assemble('GP-LSTM', [nn_configs['1H'], gp_configs['MSGP']]) loss = [gen_gp_loss(gp) for gp in model.output_layers] model.compile(optimizer=Adam(1e-2), loss=loss) # Callbacks callbacks = [EarlyStopping(monitor='val_mse', patience=10)] # Train the model history = train(model, data, callbacks=callbacks, gp_n_iter=5, checkpoint='lstm', checkpoint_monitor='val_mse', epochs=epochs, batch_size=batch_size, verbose=2) # Finetune the model model.finetune(*data['train'], batch_size=batch_size, gp_n_iter=100, verbose=0) # Test the model X_test, y_test = data['test'] y_preds = model.predict(X_test) rmse_predict = RMSE(y_test, y_preds) print('Test predict RMSE:', rmse_predict)