Пример #1
0
#labels.append('heatRelease')
labels.append('T')
labels.append('PVs')

# # tabulate psi, mu, alpha
# labels.append('psi')
# labels.append('mu')
# labels.append('alpha')

# DO NOT CHANGE THIS ORDER!!
input_features = ['f', 'zeta', 'pv']

# read in the data
X, y, df, in_scaler, out_scaler = read_h5_data('./data/tables_of_fgm.h5',
                                               input_features=input_features,
                                               labels=labels,
                                               i_scaler='no',
                                               o_scaler='cbrt_std')
#('./data/tables_of_fgm.h5',key='of_tables',
# in_labels=input_features, labels = labels,scaler=scaler)

# split into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.01)

# %%
print('set up ANN')

# ANN parameters
dim_input = X_train.shape[1]
dim_label = y_train.shape[1]
Пример #2
0
input_features = ["f", "zeta", "pv"]

with open("GRI_species_order", "r") as f:
    labels = [a.rstrip() for a in f.readlines()]

# append other fields: heatrelease,  T, PVs
# labels.append('heatRelease')
labels.append("T")
labels.append("PVs")

#%%

x, y, df, in_scaler, out_scaler = read_h5_data(
    "./data/tables_of_fgm.h5",
    input_features=input_features,
    labels=labels,
    i_scaler="no",
    o_scaler="cbrt_std",
)

# %%
zetaLevel = list(set(df.zeta))
df_sample = df[df.zeta == zetaLevel[0]].sample(n=5_000)

sp = "T"
px.scatter_3d(data_frame=df_sample,
              x="f",
              y="pv",
              z=sp,
              color=sp,
              width=800,
Пример #3
0
# labels.append('heatRelease')
labels.append("T")
labels.append("PVs")
labels.remove("N2")

# # tabulate psi, mu, alpha
# labels.append('psi')
# labels.append('mu')
# labels.append('alpha')

# read in the data
X, y, df, in_scaler, out_scaler = read_h5_data(
    # "./data/tables_of_fgm.h5",
    # "./data/df_filtered_3.parquet",
    "./data/df_interpolation.parquet",
    input_features=input_features,
    labels=labels,
    i_scaler="no",
    o_scaler="cbrt_std",
)

# split into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.01)

# %%
print("set up ANN")

# ANN parameters
dim_input = X_train.shape[1]
dim_label = y_train.shape[1]
Пример #4
0
    # print(species)
    labels = f.read().splitlines()

labels.append('T')
labels.append('PVs')

print('The labels are:')
print(labels)

# DO NOT CHANGE THIS ORDER!!
input_features=['f','zeta','pv']

# read in the data
X, y, df, in_scaler, out_scaler = read_h5_data(path_to_data,
                                               input_features=input_features,
                                               labels=labels,
                                               i_scaler='std2',
                                               o_scaler=o_scaler)

# split into train and test data
test_size=data_points/len(X)
print('Test size is %f of entire data set\n' % test_size)
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=test_size)

# load the model
model = load_model(path_to_model)

# #############################
# inference part
t_start = time.time()
predict_val = model.predict(X_test)