Ejemplo n.º 1
0
    f.read(u_f, "parameter")
    f.close()
except:
    pass
u = u_f.vector()
# u=elliptic.prior.sample()
loglik = lambda y: -0.5 * elliptic.misfit.prec * tf.math.reduce_sum(
    (y - elliptic.misfit.obs)**2, axis=1)
loglik_cnn = lambda x: loglik(cnn.model(x))
loglik_dnn = lambda x: loglik(dnn.model(x))
# calculate gradient
dll_xact = elliptic.get_geom(u, [0, 1])[1]
# emulate gradient
u_img = fun2img(vec2fun(u, elliptic.pde.V))
dll_cnn = cnn.gradient(u_img[None, :, :, None], loglik_cnn)
dll_dnn = dnn.gradient(u.get_local()[None, :], loglik_dnn)

# plot
import matplotlib.pyplot as plt
import matplotlib as mp
plt.rcParams['image.cmap'] = 'jet'
fig, axes = plt.subplots(nrows=1,
                         ncols=3,
                         sharex=True,
                         sharey=True,
                         figsize=(15, 5))
sub_figs = [None] * 3
# plot
plt.axes(axes.flat[0])
u_f.vector().set_local(dll_xact)
sub_figs[0] = df.plot(u_f)
Ejemplo n.º 2
0
x=np.linspace(lin.true_input[dim[0]]-2.,lin.true_input[dim[0]]+2.)
y=np.linspace(lin.true_input[dim[1]]-2.,lin.true_input[dim[1]]+2.)
X,Y=np.meshgrid(x,y)
# Input=np.zeros((X.size,lin.input_dim))
Input=np.tile(lin.true_input,(X.size,1))
Input[:,dim[0]],Input[:,dim[1]]=X.flatten(),Y.flatten()
Z_d=logLik_d(Input).numpy().reshape(X.shape)
Z_g=logLik_g(Input).numpy().reshape(X.shape)
if grad:
    x=np.linspace(lin.true_input[dim[0]]-2.,lin.true_input[dim[0]]+2.,10)
    y=np.linspace(lin.true_input[dim[1]]-2.,lin.true_input[dim[1]]+2.,10)
    X_,Y_=np.meshgrid(x,y)
#     Input=np.zeros((X_.size,lin.input_dim))
    Input=np.tile(lin.true_input,(X_.size,1))
    Input[:,dim[0]],Input[:,dim[1]]=X_.flatten(),Y_.flatten()
    G=dnn.gradient(Input, logLik_d)
    U_d,V_d=G[:,dim[0]].reshape(X_.shape),G[:,dim[1]].reshape(X_.shape)
    G=gp.gradient(Input, logLik_g)
    U_g,V_g=G[:,dim[0]].reshape(X_.shape),G[:,dim[1]].reshape(X_.shape)
sub_figs[1]=axes.flat[1].contourf(X,Y,Z_d,levels)
axes.flat[1].set_xlabel('$u_{}$'.format(dim[0]+1))
axes.flat[1].set_ylabel('$u_{}$'.format(dim[1]+1),rotation=0)
if grad: axes.flat[1].quiver(X_,Y_,U_d,V_d)
plt.title('Emulated (DNN)')
plt.axes(axes.flat[2])
sub_figs[2]=axes.flat[2].contourf(X,Y,Z_g,levels)
axes.flat[2].set_xlabel('$u_{}$'.format(dim[0]+1))
axes.flat[2].set_ylabel('$u_{}$'.format(dim[1]+1),rotation=0)
if grad: axes.flat[2].quiver(X_,Y_,U_g,V_g)
plt.title('Emulated (GP)')
from util.common_colorbar import common_colorbar
Ejemplo n.º 3
0
loaded=np.load(file=os.path.join('./train_NN',algs[alg_no]+'_ensbl'+str(ensbl_sz)+'_training_XY'+'.npz'))
prng=np.random.RandomState(2020)
sel4eval = prng.choice(num_samp,size=n_dif,replace=False)
X=loaded['X'][sel4eval]; Y=loaded['Y'][sel4eval]
sel4print = prng.choice(n_dif,size=10,replace=False)
prog=np.ceil(n_dif*(.1+np.arange(0,1,.1)))
for n in range(n_dif):
    u=X[n]
    # calculate gradient
    t_start=timeit.default_timer()
    ll_xact,dll_xact = lin.get_geom(u,[0,1])[:2]
    t_used[0] += timeit.default_timer()-t_start
    # emulate gradient
    t_start=timeit.default_timer()
    ll_emul = logLik(u[None,:]).numpy()
    dll_emul = dnn.gradient(u[None,:], logLik)
    t_used[1] += timeit.default_timer()-t_start
    # test difference
    dif_fun = np.abs(ll_xact - ll_emul)
    dif_grad = dll_xact - dll_emul
    dif[n] = np.array([dif_fun, np.linalg.norm(dif_grad)/np.linalg.norm(dll_xact)])
    
#     # check the gradient extracted from emulation
#     v=lin.sample()
#     h=1e-4
#     dll_emul_fd_v=(logLik(u[None,:]+h*v[None,:])-logLik(u[None,:]))/h
#     reldif = abs(dll_emul_fd_v - dll_emul.flatten().dot(v))/np.linalg.norm(v)
#     print('Relative difference between finite difference and extracted results: {}'.format(reldif))
    
    if n+1 in prog:
        print('{0:.0f}% evaluation has been completed.'.format(np.float(n+1)/n_dif*100))
Ejemplo n.º 4
0
loaded=np.load(file=os.path.join('./train_NN',algs[alg_no]+'_ensbl'+str(ensbl_sz)+'_training_XY'+'.npz'))
prng=np.random.RandomState(2020)
sel4eval = prng.choice(num_samp,size=n_dif,replace=False)
X=loaded['X'][sel4eval]; Y=loaded['Y'][sel4eval]
sel4print = prng.choice(n_dif,size=10,replace=False)
prog=np.ceil(n_dif*(.1+np.arange(0,1,.1)))
for n in range(n_dif):
    u=X[n]
    # calculate gradient
    t_start=timeit.default_timer()
    ll_xact,dll_xact = eit.get_geom(u,[0,1],force_posperm=True)[:2]
    t_used[0] += timeit.default_timer()-t_start
    # emulate gradient
    t_start=timeit.default_timer()
    ll_emul = logLik(u[None,:]).numpy()[0]
    dll_emul = dnn.gradient(u[None,:], logLik)
    t_used[1] += timeit.default_timer()-t_start
    # test difference
    dif_fun = np.abs(ll_xact - ll_emul)
    dif_grad = dll_xact - dll_emul
    dif[n] = np.array([dif_fun, np.linalg.norm(dif_grad)/np.linalg.norm(dll_xact)])
    
#     # check the gradient extracted from emulation
#     v=eit.sample()
#     h=1e-4
#     dll_emul_fd_v=(logLik(u[None,:]+h*v[None,:])-logLik(u[None,:]))/h
#     reldif = abs(dll_emul_fd_v - dll_emul.flatten().dot(v))/np.linalg.norm(v)
#     print('Relative difference between finite difference and extracted results: {}'.format(reldif))
    
    if n+1 in prog:
        print('{0:.0f}% evaluation has been completed.'.format(np.float(n+1)/n_dif*100))
Ejemplo n.º 5
0
            
            # emulation by GP
            t_start=timeit.default_timer()
            ll_emul = logLik_g(u).numpy()
            dll_emul = gp.gradient(u, logLik_g)
            t_used[1] += timeit.default_timer()-t_start
            # record difference
            dif_fun = np.abs(ll_xact - ll_emul)
            dif_grad = dll_xact - dll_emul
            fun_errors[0,s,t,:]=dif_fun
            grad_errors[0,s,t,:]=[np.linalg.norm(dif_grad[n])/np.linalg.norm(dll_xact[n]) for n in range(test_size)]
            
            # emulation by DNN
            t_start=timeit.default_timer()
            ll_emul = logLik_d(u).numpy()
            dll_emul = dnn.gradient(u, logLik_d)
            t_used[2] += timeit.default_timer()-t_start
            # record difference
            dif_fun = np.abs(ll_xact - ll_emul)
            dif_grad = dll_xact - dll_emul
            fun_errors[1,s,t,:]=dif_fun
            grad_errors[1,s,t,:]=[np.linalg.norm(dif_grad[n])/np.linalg.norm(dll_xact[n]) for n in range(test_size)]
                
            print('Time used for calculation: {} vs GP-emulation: {} vs DNN-emulation: {}'.format(*t_used.tolist()))
            test_times[0,s,t]=t_used[1]; test_times[1,s,t]=t_used[2]; test_times[2,s,t]=t_used[0]
    
    # save results
    with open(os.path.join(folder,'compare_gp_dnn.pckl'),'wb') as f:
        pickle.dump([fun_errors,grad_errors,train_times,test_times],f)

# make some pots
Ejemplo n.º 6
0
import timeit
t_used = np.zeros(2)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 6), facecolor='white')
plt.ion()
# plt.show(block=True)
u_f = df.Function(elliptic.pde.V)
for n in range(20):
    u = elliptic.prior.sample()
    # calculate gradient
    t_start = timeit.default_timer()
    dll_xact = elliptic.get_geom(u, [0, 1])[1]
    t_used[0] += timeit.default_timer() - t_start
    # emulate gradient
    t_start = timeit.default_timer()
    dll_emul = dnn.gradient(u.get_local()[None, :], logLik)
    t_used[1] += timeit.default_timer() - t_start
    # test difference
    dif = dll_xact.get_local() - dll_emul
    print(
        'Difference between the calculated and emulated gradients: min ({}), med ({}), max ({})'
        .format(dif.min(), np.median(dif), dif.max()))

    # check the gradient extracted from emulation
    v = elliptic.prior.sample()
    h = 1e-3
    dll_emul_fd_v = (
        logLik(u.get_local()[None, :] + h * v.get_local()[None, :]) -
        logLik(u.get_local()[None, :])) / h
    reldif = abs(dll_emul_fd_v -
                 dll_emul.flatten().dot(v.get_local())) / v.norm('l2')
Ejemplo n.º 7
0
for n in range(n_dif):
    u = X[n]
    # calculate gradient
    t_start = timeit.default_timer()
    u_f1.vector().set_local(u)
    u_v = u_f1.vector()  # u already in dof order
    if eldeg > 1:
        u_f.interpolate(u_f1)
        u_v = u_f.vector()
#     u_f = img2fun(u, adif.prior.V); u_v = u_f.vector() # for u in vertex order
    ll_xact, dll_xact = adif.get_geom(u_v, [0, 1])[:2]
    t_used[0] += timeit.default_timer() - t_start
    # emulate gradient
    t_start = timeit.default_timer()
    ll_emul = logLik(u[None, :]).numpy()[0]
    dll_emul = dnn.gradient(u[None, :], logLik)
    t_used[1] += timeit.default_timer() - t_start
    # test difference
    dif_fun = np.abs(ll_xact - ll_emul)
    if eldeg > 1:
        u_f.vector().set_local(dll_xact)
        dll_xact = u_f.compute_vertex_values(
            adif.mesh)[d2v]  # covert to dof order
    else:
        dll_xact = dll_xact.get_local()
    dif_grad = dll_xact - dll_emul
    dif[n] = np.array(
        [dif_fun, np.linalg.norm(dif_grad) / np.linalg.norm(dll_xact)])

    #     # check the gradient extracted from emulation
    #     v=adif.prior.sample()
Ejemplo n.º 8
0
    f.read_checkpoint(u_f, 'm', 0)
    f.close()
except:
    pass
u = u_f.vector()
# u=adif.prior.sample()
loglik = lambda y: -0.5 * tf.math.reduce_sum(
    (y - adif.misfit.obs)**2 / adif.misfit.noise_variance, axis=1)
loglik_cnn = lambda x: loglik(cnn.model(x))
loglik_dnn = lambda x: loglik(dnn.model(x))
# calculate gradient
dll_xact = adif.get_geom(u, [0, 1])[1]
# emulate gradient
dll_cnn = cnn.gradient(adif.vec2img(u)[None, :, :, None], loglik_cnn)
dll_dnn = dnn.gradient(
    u.get_local()[None, :]
    if eldeg == 1 else vinP1(u, adif.prior.V).get_local()[None, :], loglik_dnn)

# plot
import matplotlib.pyplot as plt
import matplotlib as mp
plt.rcParams['image.cmap'] = 'jet'
fig, axes = plt.subplots(nrows=1,
                         ncols=3,
                         sharex=True,
                         sharey=True,
                         figsize=(15, 5))
sub_figs = [None] * 3
# plot
plt.axes(axes.flat[0])
u_f.vector().set_local(dll_xact)