Пример #1
0
# #     dnn.save(folder,f_name) # fails due to the custom kernel_initializer
#     dnn.model.save_weights(os.path.join(folder,f_name+'.h5'))

# define the emulator (GP)
n_train = 1000
prng=np.random.RandomState(2020)
sel4train = prng.choice(num_samp,size=n_train,replace=False)
tr_idx=np.random.choice(sel4train,size=np.floor(.75*n_train).astype('int'),replace=False)
te_idx=np.setdiff1d(sel4train,tr_idx)
x_train,x_test=X[tr_idx],X[te_idx]
y_train,y_test=Y[tr_idx],Y[te_idx]

# define GP
latent_dim=y_train.shape[1]
kernel=gpf.kernels.SquaredExponential(lengthscales=np.random.rand(x_train.shape[1])) + gpf.kernels.Linear()
gp=GP(x_train.shape[1], y_train.shape[1], latent_dim=latent_dim,
      kernel=kernel)
f_name='gp_'+algs[alg_no]+str(ensbl_sz)
try:
    gp.model=tf.saved_model.load(os.path.join(folder,f_name))
    gp.evaluate=lambda x:gp.model.predict(x)[0] # cannot take gradient!
    print(f_name+' has been loaded!')
except Exception as err:
    print(err)
    print('Train GP model...\n')
#     gp.induce_num=np.min((np.ceil(.1*x_train.shape[1]).astype('int'),ensbl_sz))
    epochs=200
#     batch_size=128
#     optimizer=tf.keras.optimizers.Adam(learning_rate=0.001)
    kwargs={'maxiter':epochs}
#     kwargs={'epochs':epochs,'batch_size':batch_size,'optimizer':optimizer}
    gp.train(x_train,y_train,x_test=x_test,y_test=y_test,**kwargs)
Пример #2
0
# split train/test
num_samp=X.shape[0]
prng=np.random.RandomState(2020)
sel4train = prng.choice(num_samp,size=n_train,replace=False)
tr_idx=np.random.choice(sel4train,size=np.floor(.75*n_train).astype('int'),replace=False)
te_idx=np.setdiff1d(sel4train,tr_idx)
x_train,x_test=X[tr_idx],X[te_idx]
y_train,y_test=Y[tr_idx],Y[te_idx]

# define GP
latent_dim=y_train.shape[1]
kernel=gpf.kernels.SquaredExponential() + gpf.kernels.Linear()
# kernel=gpf.kernels.SquaredExponential(lengthscales=np.random.rand(x_train.shape[1])) + gpf.kernels.Linear()
# kernel=gpf.kernels.Matern32()
# kernel=gpf.kernels.Matern52(lengthscales=np.random.rand(x_train.shape[1]))
gp=GP(x_train.shape[1], y_train.shape[1], latent_dim=latent_dim,
      kernel=kernel, shared_kernel=True)
loglik = lambda y: -0.5*tf.math.reduce_sum((y-adif.misfit.obs)**2/adif.misfit.noise_variance,axis=1)
savepath=folder+'/GP/saved_model'
if not os.path.exists(savepath): os.makedirs(savepath)
import time
ctime=time.strftime("%Y-%m-%d-%H-%M-%S")
f_name='gp_'+algs[alg_no]+str(ensbl_sz)+'-'+ctime
try:
    gp.model=tf.saved_model.load(os.path.join(savepath,f_name))
    gp.evaluate=lambda x:gp.model.predict(x)[0] # cannot take gradient!
    print(f_name+' has been loaded!')
except Exception as err:
    print(err)
    print('Train GP model...\n')
#     gp.induce_num=np.min((np.ceil(.1*x_train.shape[1]).astype('int'),ensbl_sz))
    epochs=100
Пример #3
0
num_algs=len(algs)
alg_no=1
ensbl_sz = 100

# load data
folder = './train_NN'

# load data for GP and DNN
loaded=np.load(file=os.path.join(folder,algs[alg_no]+'_ensbl'+str(ensbl_sz)+'_training_XY.npz'))
X=loaded['X']
Y=loaded['Y']

# define GP
latent_dim=Y.shape[1]
kernel=gpf.kernels.SquaredExponential(lengthscales=np.random.rand(X.shape[1])) + gpf.kernels.Linear()
gp=GP(X.shape[1], Y.shape[1], latent_dim=latent_dim, kernel=kernel)

# define DNN
depth=3
activations={'hidden':'softplus','output':'linear'}
droprate=0.
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001)
dnn=DNN(X.shape[1], Y.shape[1], depth=depth, droprate=droprate,
        activations=activations, optimizer=optimizer)


# split train/test
num_samp=X.shape[0]
folder = './analysis'
try:
    with open(os.path.join(folder,'compare_gp_dnn_cnn.pckl'),'rb') as f: