예제 #1
0
        test_data = pd.concat([X_test_df, y_test_df], axis=1)
        pred_data = pd.concat([X_test_df, predict_df], axis=1)

        test_data.to_hdf('sim_check.H5', key='test')
        pred_data.to_hdf('sim_check.H5', key='pred')

        # Save model
        # sess = K.get_session()
        # saver = tf.train.Saver(tf.global_variables())
        # saver.save(sess, './exported/my_model')
        model.save(
            join(blocks_name,
                 'FPV_ANN_tabulated_%s_%i.h5' % (scaler, n_neuron)))

        # write the OpenFOAM ANNProperties file
        writeANNProperties(in_scaler, out_scaler, scaler, o_scaler)

        # save train time
        train_time = (t_end - t_start)
        print('Training took %i sec.' % train_time)
        np.savetxt(join(blocks_name, 'training_time_%i.csv' % (n_neuron)),
                   [train_time])

        # save the loss history
        losses_df = pd.DataFrame(
            np.array([
                history.history['accuracy'], history.history['val_accuracy'],
                history.history['loss'], history.history['val_loss']
            ]).T,
            columns=['accuracy', 'val_accuracy', 'loss', 'val_loss'])
예제 #2
0
input_features = ['f', 'zeta', 'pv']

# define the type of scaler: MinMax or Standard

# read in the data
X, y, df, in_scaler, out_scaler = read_h5_data(
    './data/tables_of_fgm_psi_n2fix.h5',
    input_features=input_features,
    labels=labels,
    i_scaler='no',
    o_scaler='cbrt_std')

# write the OpenFOAM ANNProperties file
scaler = 'Standard'
if (hasattr(out_scaler.std, 'mean_')):
    writeANNProperties(in_scaler, out_scaler, scaler, labels)

# split into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.01)

# %%
print('set up ANN')
# ANN parameters
dim_input = X_train.shape[1]
dim_label = y_train.shape[1]
n_neuron = 100
branches = 3
scale = 3
batch_norm = False

# This returns a tensor