verbose = 0 bSize = 850 fntrain = "/home/philgun/Documents/PhD/Modelica/receiver-data/training_data_cascade_constant_size_AR_clean.csv" fntest = "/home/philgun/Documents/PhD/Modelica/receiver-data/validation_data_cascade_constant_size_AR_const.csv" wd = "./multi_aperture_6" #MinMax or StandardScaler scaler = "MinMax" if not os.path.exists(wd): os.makedirs(wd) print(len(pd.read_csv(fntrain))) arr = lib.preprocessing(wd, fntrain, fntest, 13, 1, scaling_method=scaler) #Print X and y train raw - to check whether training and test data have been parsed correctly print("X train raw :\n", arr[-3]) print("y train :\n", arr[-2]) #Print X and y test raw - to check whether training and test data have been parsed correctly print("X test raw :\n", arr[-1]) print("y test raw :\n", arr[5]) #******************************* Test build model model = lib.generate_model(arr, dropout=0.1, nPercent=0.25, nShrink=0.9) model.summary() #******************************* Partially initialise the eval_net func objfunc = functools.partial(lib.eval_net, wd, verbose, scaler, arr, bSize)
import time import functools from matplotlib import pyplot as plt import bayesian as lib if __name__ == "__main__": verbose = 0 bSize = 850 fntrain = "/home/philgun/Documents/PhD/Modelica/receiver-data/training_data_constant_AR_H_drop_T_out.csv" fntest = "/home/philgun/Documents/PhD/Modelica/receiver-data/validation_data_constant_AR_H_drop_T_out.csv" wd = "./single_aperture_constant_T_out_2" arr = lib.preprocessing(wd,fntrain,fntest,7,1) print(arr[-2]) print(arr[-1]) print(arr[5]) #******************************* Test build model model = lib.generate_model(arr,dropout=0.1,nPercent=0.25,nShrink=0.9) model.summary() objfunc = functools.partial( lib.eval_net, wd, verbose, arr,
#************************************* Create dir w.r.t. mode if not os.path.exists(wd): os.makedirs(wd) #************************************* Prep data data = utils.Data("./data/LC_model_temp_control.mat") df = data.get_data()[3:].drop(columns="time") split_df(df, fraction=0.3) scaler = "MinMax" inputsize = df.shape[1] - 1 outputsize = 1 bSize = 64 #*************************************** Generate training test data set and also scalers etc. arr = lib.preprocessing(wd, fntrain, fntest, inputsize, outputsize, scaler) #Print X and y train raw - to check whether training and test data have been parsed correctly print("X train raw :\n", arr[-3]) print("y train raw:\n", arr[-2]) #Print X and y test raw - to check whether training and test data have been parsed correctly print("X test raw :\n", arr[-1]) print("y test raw :\n", arr[5]) #************************************** Function test model = lib.generate_model(arr, dropout=0.1, nPercent=0.25, nShrink=0.9) model.summary() #************************************** Partially initialise the objective function objfunc = functools.partial(lib.eval_net,