Beispiel #1
0
                    "r")
    f.read(u_f, "parameter")
    f.close()
except:
    pass
u = u_f.vector()
# u=elliptic.prior.sample()
loglik = lambda y: -0.5 * elliptic.misfit.prec * tf.math.reduce_sum(
    (y - elliptic.misfit.obs)**2, axis=1)
loglik_cnn = lambda x: loglik(cnn.model(x))
loglik_dnn = lambda x: loglik(dnn.model(x))
# calculate gradient
dll_xact = elliptic.get_geom(u, [0, 1])[1]
# emulate gradient
u_img = fun2img(vec2fun(u, elliptic.pde.V))
dll_cnn = cnn.gradient(u_img[None, :, :, None], loglik_cnn)
dll_dnn = dnn.gradient(u.get_local()[None, :], loglik_dnn)

# plot
import matplotlib.pyplot as plt
import matplotlib as mp
plt.rcParams['image.cmap'] = 'jet'
fig, axes = plt.subplots(nrows=1,
                         ncols=3,
                         sharex=True,
                         sharey=True,
                         figsize=(15, 5))
sub_figs = [None] * 3
# plot
plt.axes(axes.flat[0])
u_f.vector().set_local(dll_xact)
                    os.path.join(os.getcwd(), 'properties/MAP.xdmf'))
    f.read_checkpoint(u_f, 'm', 0)
    f.close()
except:
    pass
u = u_f.vector()
# u=adif.prior.sample()
loglik = lambda y: -0.5 * tf.math.reduce_sum(
    (y - adif.misfit.obs)**2 / adif.misfit.noise_variance, axis=1)
loglik_cnn = lambda x: loglik(cnn.model(x))
loglik_cnnrnn = lambda x: loglik(
    tf.reshape(cnnrnn.model(x), (-1, len(adif.misfit.obs))))
# calculate gradient
dll_xact = adif.get_geom(u, [0, 1])[1]
# emulate gradient
dll_cnn = cnn.gradient(adif.vec2img(u)[None, :, :, None], loglik_cnn)
dll_cnnrnn = cnnrnn.gradient(adif.vec2img(u)[None, :, :, None], loglik_cnnrnn)

# plot
import matplotlib.pyplot as plt
import matplotlib as mp
plt.rcParams['image.cmap'] = 'jet'
fig, axes = plt.subplots(nrows=1,
                         ncols=3,
                         sharex=True,
                         sharey=True,
                         figsize=(15, 5))
sub_figs = [None] * 3
# plot
plt.axes(axes.flat[0])
u_f.vector().set_local(dll_xact)
Beispiel #3
0
    V_P1 = df.FunctionSpace(adif.mesh, 'Lagrange', 1)
    d2v = df.dof_to_vertex_map(V_P1)
    u_f1 = df.Function(V_P1)
else:
    u_f1 = u_f
for n in range(n_dif):
    u = X[n]
    # calculate gradient
    t_start = timeit.default_timer()
    ll_xact, dll_xact = adif.get_geom(
        adif.img2vec(u, adif.prior.V if eldeg > 1 else None), [0, 1])[:2]
    t_used[0] += timeit.default_timer() - t_start
    # emulate gradient
    t_start = timeit.default_timer()
    ll_emul = logLik(u[None, :, :, None]).numpy()[0]
    dll_emul = adif.img2vec(cnn.gradient(u[None, :, :, None], logLik))
    t_used[1] += timeit.default_timer() - t_start
    # test difference
    dif_fun = np.abs(ll_xact - ll_emul)
    if eldeg > 1:
        u_f.vector().set_local(dll_xact)
        dll_xact = u_f.compute_vertex_values(
            adif.mesh)[d2v]  # covert to dof order
    else:
        dll_xact = dll_xact.get_local()
    dif_grad = dll_xact - dll_emul
    dif[n] = np.array(
        [dif_fun, np.linalg.norm(dif_grad) / np.linalg.norm(dll_xact)])

    #     # check the gradient extracted from emulation
    #     v=adif.prior.sample()
Beispiel #4
0
                t_start = timeit.default_timer()
                ll_emul = logLik_g(u.get_local()[None, :]).numpy()
                dll_emul = gp.gradient(u.get_local()[None, :], logLik_g)
                t_used[1] += timeit.default_timer() - t_start
                # record difference
                dif_fun = np.abs(ll_xact - ll_emul)
                dif_grad = dll_xact.get_local() - dll_emul
                fun_errors[0, s, t, n] = dif_fun
                grad_errors[0, s, t,
                            n] = np.linalg.norm(dif_grad) / dll_xact.norm('l2')

                # emulation by CNN
                t_start = timeit.default_timer()
                u_img = fun2img(vec2fun(u, elliptic.pde.V))
                ll_emul = logLik_c(u_img[None, :, :, None]).numpy()
                dll_emul = cnn.gradient(u_img[None, :, :, None],
                                        logLik_c)  #* grad_scalfctr
                t_used[2] += timeit.default_timer() - t_start
                # record difference
                dif_fun = np.abs(ll_xact - ll_emul)
                dif_grad = dll_xact - img2fun(dll_emul,
                                              elliptic.pde.V).vector()
                fun_errors[1, s, t, n] = dif_fun
                grad_errors[1, s, t,
                            n] = dif_grad.norm('l2') / dll_xact.norm('l2')

            print(
                'Time used for calculation: {} vs GP-emulation: {} vs CNN-emulation: {}'
                .format(*t_used.tolist()))
            pred_times[0, s, t] = t_used[1]
            pred_times[1, s, t] = t_used[2]
            pred_times[2, s, t] = t_used[0]
Beispiel #5
0
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 6), facecolor='white')
plt.ion()
# plt.show(block=True)
u_f = df.Function(elliptic.pde.V)
for n in range(20):
    u = elliptic.prior.sample()

    # calculate gradient
    t_start = timeit.default_timer()
    dll_xact = elliptic.get_geom(u, [0, 1])[1]
    t_used[0] += timeit.default_timer() - t_start
    # emulate gradient
    t_start = timeit.default_timer()
    u_img = fun2img(vec2fun(u, elliptic.pde.V))
    dll_emul = cnn.gradient(u_img[None, :, :, None], logLik)
    t_used[1] += timeit.default_timer() - t_start
    # test difference
    dif = dll_xact - img2fun(dll_emul, elliptic.pde.V).vector()
    print(
        'Difference between the calculated and emulated gradients: min ({}), med ({}), max ({})'
        .format(dif.min(), np.median(dif.get_local()), dif.max()))

    # check the gradient extracted from emulation with finite difference
    v = elliptic.prior.sample()
    v_img = fun2img(vec2fun(v, elliptic.pde.V))
    h = 1e-4
    dll_emul_fd_v = (
        logLik(u_img[None, :, :, None] + h * v_img[None, :, :, None]) -
        logLik(u_img[None, :, :, None])) / h
    reldif = abs(dll_emul_fd_v -
Beispiel #6
0
sel4eval = prng.choice(num_samp, size=n_dif, replace=False)
X = loaded['X'][sel4eval]
Y = loaded['Y'][sel4eval]
sel4print = prng.choice(n_dif, size=10, replace=False)
prog = np.ceil(n_dif * (.1 + np.arange(0, 1, .1)))
for n in range(n_dif):
    u = X[n]
    # calculate gradient
    t_start = timeit.default_timer()
    ll_xact, dll_xact = eit.get_geom(eit.img2vec(u), [0, 1],
                                     force_posperm=True)[:2]
    t_used[0] += timeit.default_timer() - t_start
    # emulate gradient
    t_start = timeit.default_timer()
    ll_emul = logLik(u[None, :, :, None]).numpy()[0]
    dll_emul = eit.img2vec(cnn.gradient(u[None, :, :, None], logLik))
    t_used[1] += timeit.default_timer() - t_start
    # test difference
    dif_fun = np.abs(ll_xact - ll_emul)
    dif_grad = dll_xact - dll_emul
    dif[n] = np.array(
        [dif_fun, np.linalg.norm(dif_grad) / np.linalg.norm(dll_xact)])

    #     # check the gradient extracted from emulation
    #     v=eit.sample()
    #     h=1e-4
    #     dll_emul_fd_v=(logLik(u[None,:]+h*v[None,:])-logLik(u[None,:]))/h
    #     reldif = abs(dll_emul_fd_v - dll_emul.flatten().dot(v))/np.linalg.norm(v)
    #     print('Relative difference between finite difference and extracted results: {}'.format(reldif))

    if n + 1 in prog: