예제 #1
0
            if '_'+algs[i]+'_' in f_i:
                try:
                    f=df.HDF5File(bip.pde.mpi_comm,os.path.join(folder,f_i),"r")
                    samp_mean.zero(); samp_std.zero(); num_read=0
                    for s in range(num_samp):
                        if s+1 in prog:
                            print('{0:.0f}% has been completed.'.format(np.float(s+1)/num_samp*100))
                        f.read(samp_f,'sample_{0}'.format(s))
                        u=samp_f.vector()
                        if '_whitened_latent' in f_i: u=bip.prior.v2u(u)
                        if 'DREAM' in algs[i]:
                            if 'c' in AE:
                                u_latin=fun2img(vec2fun(u, elliptic_latent.pde.V))
                                width=tuple(np.mod(i,2) for i in u_latin.shape)
                                u_latin=chop(u_latin,width)[None,:,:,None] if autoencoder.activations['latent'] is None else u_latin.flatten()[None,:]
                                u=img2fun(pad(np.squeeze(autoencoder.decode(u_latin)),width),elliptic.pde.V).vector()
                            else:
                                u_latin=u.get_local()[None,:]
                                u=elliptic.prior.gen_vector(autoencoder.decode(u_latin).flatten())
#                         else:
#                             u=u_
                        if '_whitened_emulated' in f_i: u=elliptic.prior.v2u(u)
                        samp_mean.axpy(wts[s],u)
                        samp_std.axpy(wts[s],u*u)
#                         num_read+=1
                    f.close()
                    print(f_i+' has been read!')
                    f_read=f_i
                    found=True
                except:
                    pass
예제 #2
0
# plot
import matplotlib.pyplot as plt
import matplotlib as mp
plt.rcParams['image.cmap'] = 'jet'
fig, axes = plt.subplots(nrows=1,
                         ncols=3,
                         sharex=True,
                         sharey=True,
                         figsize=(15, 5))
sub_figs = [None] * 3
# plot
plt.axes(axes.flat[0])
u_f.vector().set_local(dll_xact)
sub_figs[0] = df.plot(u_f)
plt.title('Calculated Gradient')
plt.axes(axes.flat[1])
u_f = img2fun(dll_cnn, elliptic.pde.V)
sub_figs[1] = df.plot(u_f)
plt.title('Emulated Gradient (CNN)')
plt.axes(axes.flat[2])
u_f = vec2fun(dll_dnn, elliptic.pde.V)
sub_figs[2] = df.plot(u_f)
plt.title('Emulated Gradient (DNN)')
# add common colorbar
from util.common_colorbar import common_colorbar
fig = common_colorbar(fig, axes, sub_figs)

# save plots
# fig.tight_layout(h_pad=1)
plt.savefig(os.path.join(folder, 'extrctgrad.png'), bbox_inches='tight')
# plt.show()
예제 #3
0
def geom(unknown_lat,
         V_lat,
         V,
         autoencoder,
         geom_ord=[0],
         whitened=False,
         **kwargs):
    loglik = None
    gradlik = None
    metact = None
    rtmetact = None
    eigs = None

    # un-whiten if necessary
    if whitened == 'latent':
        bip_lat = kwargs.get('bip_lat')
        unknown_lat = bip_lat.prior.v2u(unknown_lat)

#     u_latin={'AutoEncoder':unknown_lat.get_local()[None,:],'ConvAutoEncoder':chop(fun2img(vec2fun(unknown_lat, V_lat)))[None,:,:,None]}[type(autoencoder).__name__]
    if 'Conv' in type(autoencoder).__name__:
        u_latin = fun2img(vec2fun(unknown_lat, V_lat))
        width = tuple(np.mod(i, 2) for i in u_latin.shape)
        u_latin = chop(u_latin,
                       width)[None, :, :, None] if autoencoder.activations[
                           'latent'] is None else u_latin.flatten()[None, :]
        unknown = img2fun(pad(np.squeeze(autoencoder.decode(u_latin)), width),
                          V).vector()
    else:
        u_latin = unknown_lat.get_local()[None, :]
        unknown = df.Function(V).vector()
        unknown.set_local(autoencoder.decode(u_latin).flatten())

    emul_geom = kwargs.pop('emul_geom', None)
    full_geom = kwargs.pop('full_geom', None)
    bip_lat = kwargs.pop('bip_lat', None)
    bip = kwargs.pop('bip', None)
    try:
        if len(kwargs) == 0:
            loglik, gradlik, metact_, rtmetact_ = emul_geom(
                unknown, geom_ord, whitened == 'emulated')
        else:
            loglik, gradlik, metact_, eigs_ = emul_geom(
                unknown, geom_ord, whitened == 'emulated', **kwargs)
    except:
        try:
            if len(kwargs) == 0:
                loglik, gradlik, metact_, rtmetact_ = full_geom(
                    unknown, geom_ord, whitened == 'original')
            else:
                loglik, gradlik, metact_, eigs_ = full_geom(
                    unknown, geom_ord, whitened == 'original', **kwargs)
        except:
            raise RuntimeError('No geometry in the original space available!')

    if any(s >= 1 for s in geom_ord):
        if whitened == 'latent':
            gradlik = bip.prior.C_act(gradlik, .5, op='C', transp=True)
#         jac_=autoencoder.jacobian(u_latin,'decode')
        if 'Conv' in type(autoencoder).__name__:
            #             if autoencoder.activations['latent'] is None:
            # #                 jac__=np.zeros(jac_.shape[:2]+tuple(i+1 for i in jac_.shape[2:]))
            # #                 jac__[:,:,:-1,:-1]=jac_; jac_=jac__
            #                 jac_=pad(jac_,(0,)*2+width)
            #                 jac_=jac_.reshape(jac_.shape[:2]+(-1,))
            #             d2v = df.dof_to_vertex_map(V_lat)
            #             jac_=jac_[:,:,d2v]
            #         jac=MultiVector(unknown,V_lat.dim())
            # #         [jac[i].set_local(img2fun(pad(jac_[:,:,i]), V).vector() if 'Conv' in type(autoencoder).__name__ else jac_[:,i]) for i in range(V_lat.dim())] # not working: too many indices?
            #         if 'Conv' in type(autoencoder).__name__:
            #             [jac[i].set_local(img2fun(pad(jac_[:,:,i],width), V).vector()) for i in range(V_lat.dim())] # for loop is too slow
            #         else:
            #             [jac[i].set_local(jac_[:,i]) for i in range(V_lat.dim()) for i in range(V_lat.dim())] # for loop is too slow
            #         gradlik_=jac.dot(gradlik)
            jac_ = autoencoder.jacobian(u_latin, 'decode')
            jac_ = pad(
                jac_, width *
                2 if autoencoder.activations['latent'] is None else width +
                (0, ))
            jac_ = jac_.reshape(
                (np.prod(jac_.shape[:2]), np.prod(jac_.shape[2:])))
            jac_ = jac_[np.ix_(df.dof_to_vertex_map(V),
                               df.dof_to_vertex_map(V_lat))]
            #         try:
            #         import timeit
            #         t_start=timeit.default_timer()
            #         jac=create_PETScMatrix(jac_.shape,V.mesh().mpi_comm(),range(jac_.shape[0]),range(jac_.shape[1]),jac_)
            #         gradlik_=df.as_backend_type(gradlik).vec()
            #         gradlik1=df.Vector(unknown_lat)
            #         jac.multTranspose(gradlik_,df.as_backend_type(gradlik1).vec())
            #         print('time consumed:{}'.format(timeit.default_timer()-t_start))
            #         except:
            #         t_start=timeit.default_timer()
            gradlik_ = jac_.T.dot(gradlik.get_local())
        gradlik_ = autoencoder.jacvec(u_latin, gradlik.get_local()[None, :])
        gradlik = df.Vector(unknown_lat)
        gradlik.set_local(gradlik_)
#         print('time consumed:{}'.format(timeit.default_timer()-t_start))

    if any(s >= 1.5 for s in geom_ord):

        def _get_metact_misfit(u_actedon):
            if type(u_actedon) is df.Vector:
                u_actedon = u_actedon.get_local()
            tmp = df.Vector(unknown)
            tmp.zero()
            jac.reduce(tmp, u_actedon)
            v = df.Vector(unknown_lat)
            v.set_local(jac.dot(metact_(tmp)))
            return v

        def _get_rtmetact_misfit(u_actedon):
            if type(u_actedon) is not df.Vector:
                u_ = df.Vector(unknown)
                u_.set_local(u_actedon)
                u_actedon = u_
            v = df.Vector(unknown_lat)
            v.set_local(jac.dot(rtmetact_(u_actedon)))
            return v

        metact = _get_metact_misfit
        rtmetact = _get_rtmetact_misfit

    if any(s > 1 for s in geom_ord) and len(kwargs) != 0:
        if bip_lat is None:
            raise ValueError('No latent inverse problem defined!')
        # compute eigen-decomposition using randomized algorithms
        if whitened == 'latent':
            # generalized eigen-decomposition (_C^(1/2) F _C^(1/2), M), i.e. _C^(1/2) F _C^(1/2) = M V D V', V' M V = I
            def invM(a):
                a = bip_lat.prior.gen_vector(a)
                invMa = bip_lat.prior.gen_vector()
                bip_lat.prior.Msolver.solve(invMa, a)
                return invMa

            eigs = geigen_RA(metact,
                             lambda u: bip_lat.prior.M * u,
                             invM,
                             dim=bip_lat.pde.V.dim(),
                             **kwargs)
        else:
            # generalized eigen-decomposition (F, _C^(-1)), i.e. F = _C^(-1) U D U^(-1), U' _C^(-1) U = I, V = _C^(-1/2) U
            eigs = geigen_RA(metact,
                             lambda u: bip_lat.prior.C_act(u, -1, op='K'),
                             lambda u: bip_lat.prior.C_act(u, op='K'),
                             dim=bip_lat.pde.V.dim(),
                             **kwargs)
        if any(s > 1.5 for s in geom_ord):
            # adjust the gradient
            # update low-rank approximate Gaussian posterior
            bip_lat.post_Ga = Gaussian_apx_posterior(bip_lat.prior, eigs=eigs)
#             Hu = bip_lat.prior.gen_vector()
#             bip_lat.post_Ga.Hlr.mult(unknown, Hu)
#             gradlik.axpy(1.0,Hu)

    if len(kwargs) == 0:
        return loglik, gradlik, metact, rtmetact
    else:
        return loglik, gradlik, metact, eigs
예제 #4
0
    #     u_decoded=cae.model.predict(chop(fun2img(u_f))[None,:,:,None])

    # compute the log-volumes
    #     logvol_enc=cae.logvol(chop(fun2img(u_f))[None,:,:,None],'encode')
    #     print('Log-volume of encoder: {}'.format(logvol_enc))
    #     logvol_dec=cae.logvol(u_encoded,'decode')
    #     print('Log-volume of decoder: {}'.format(logvol_dec))

    # plot
    plt.subplot(131)
    u_f.vector().set_local(u)
    df.plot(u_f)
    plt.title('Original Sample')
    plt.subplot(132)
    if activations['latent'] is None:
        u_f_lat = img2fun(pad(np.squeeze(u_encoded)), elliptic_latent.pde.V)
    else:
        u_f_lat.vector().set_local(u_encoded.flatten())
#         u_f_lat=img2fun(u_encoded.reshape((nx+1,ny+1)),elliptic_latent.pde.V)
    df.plot(u_f_lat)
    plt.title('Latent Sample')
    plt.subplot(133)
    u_decoded = np.squeeze(u_decoded)
    #     u_decoded*=X_max; u_decoded+=X_min
    u_f = img2fun(pad(u_decoded), elliptic.pde.V)
    df.plot(u_f)
    plt.title('Reconstructed Sample')
    plt.draw()
    plt.pause(1.0 / 10.0)

# read data and construct plot functions
예제 #5
0
                dif_fun = np.abs(ll_xact - ll_emul)
                dif_grad = dll_xact.get_local() - dll_emul
                fun_errors[0, s, t, n] = dif_fun
                grad_errors[0, s, t,
                            n] = np.linalg.norm(dif_grad) / dll_xact.norm('l2')

                # emulation by CNN
                t_start = timeit.default_timer()
                u_img = fun2img(vec2fun(u, elliptic.pde.V))
                ll_emul = logLik_c(u_img[None, :, :, None]).numpy()
                dll_emul = cnn.gradient(u_img[None, :, :, None],
                                        logLik_c)  #* grad_scalfctr
                t_used[2] += timeit.default_timer() - t_start
                # record difference
                dif_fun = np.abs(ll_xact - ll_emul)
                dif_grad = dll_xact - img2fun(dll_emul,
                                              elliptic.pde.V).vector()
                fun_errors[1, s, t, n] = dif_fun
                grad_errors[1, s, t,
                            n] = dif_grad.norm('l2') / dll_xact.norm('l2')

            print(
                'Time used for calculation: {} vs GP-emulation: {} vs CNN-emulation: {}'
                .format(*t_used.tolist()))
            pred_times[0, s, t] = t_used[1]
            pred_times[1, s, t] = t_used[2]
            pred_times[2, s, t] = t_used[0]

    # save results
    with open(os.path.join(folder, 'compare_gp_cnn.pckl'), 'wb') as f:
        pickle.dump([fun_errors, grad_errors, train_times, pred_times], f)
예제 #6
0
# plt.show(block=True)
u_f = df.Function(elliptic.pde.V)
for n in range(20):
    u = elliptic.prior.sample()

    # calculate gradient
    t_start = timeit.default_timer()
    dll_xact = elliptic.get_geom(u, [0, 1])[1]
    t_used[0] += timeit.default_timer() - t_start
    # emulate gradient
    t_start = timeit.default_timer()
    u_img = fun2img(vec2fun(u, elliptic.pde.V))
    dll_emul = cnn.gradient(u_img[None, :, :, None], logLik)
    t_used[1] += timeit.default_timer() - t_start
    # test difference
    dif = dll_xact - img2fun(dll_emul, elliptic.pde.V).vector()
    print(
        'Difference between the calculated and emulated gradients: min ({}), med ({}), max ({})'
        .format(dif.min(), np.median(dif.get_local()), dif.max()))

    # check the gradient extracted from emulation with finite difference
    v = elliptic.prior.sample()
    v_img = fun2img(vec2fun(v, elliptic.pde.V))
    h = 1e-4
    dll_emul_fd_v = (
        logLik(u_img[None, :, :, None] + h * v_img[None, :, :, None]) -
        logLik(u_img[None, :, :, None])) / h
    reldif = abs(dll_emul_fd_v -
                 dll_emul.flatten().dot(v_img.flatten())) / v.norm('l2')
    print(
        'Relative difference between finite difference and extracted results: {}'
예제 #7
0
def geom(unknown, bip, emulator, geom_ord=[0], whitened=False, **kwargs):
    loglik = None
    gradlik = None
    metact = None
    rtmetact = None
    eigs = None

    # un-whiten if necessary
    if whitened:
        unknown = bip.prior.v2u(unknown)

    u_input = {
        'DNN': unknown.get_local()[None, :],
        'CNN': fun2img(vec2fun(unknown, bip.pde.V))[None, :, :, None]
    }[type(emulator).__name__]

    ll_f = lambda x: -0.5 * bip.misfit.prec * tf.math.reduce_sum(
        (emulator.model(x) - bip.misfit.obs)**2, axis=1)

    if any(s >= 0 for s in geom_ord):
        loglik = ll_f(u_input).numpy()

    if any(s >= 1 for s in geom_ord):
        gradlik_ = emulator.gradient(u_input, ll_f)
        #         gradlik = {'DNN':bip.prior.gen_vector(gradlik_), 'CNN':img2fun(gradlik_, bip.pde.V).vector()}[type(emulator).__name__] # not working
        if type(emulator).__name__ == 'DNN':
            gradlik = bip.prior.gen_vector(gradlik_)
        elif type(emulator).__name__ == 'CNN':
            gradlik = img2fun(gradlik_, bip.pde.V).vector()
        if whitened:
            gradlik = bip.prior.C_act(gradlik, .5, op='C', transp=True)

    if any(s >= 1.5 for s in geom_ord):
        jac_ = emulator.jacobian(u_input)
        n_obs = len(bip.misfit.idx)
        jac = MultiVector(unknown, n_obs)
        [
            jac[i].set_local({
                'DNN': jac_[i],
                'CNN': img2fun(jac_[i], bip.pde.V).vector()
            }[type(emulator).__name__]) for i in range(n_obs)
        ]

        def _get_metact_misfit(u_actedon):  # GNH
            if type(u_actedon) is not df.Vector:
                u_actedon = bip.prior.gen_vector(u_actedon)
            v = bip.prior.gen_vector()
            jac.reduce(v, bip.misfit.prec * jac.dot(u_actedon))
            return bip.prior.M * v

        def _get_rtmetact_misfit(u_actedon):
            if type(u_actedon) is df.Vector:
                u_actedon = u_actedon.get_local()
            v = bip.prior.gen_vector()
            jac.reduce(v, np.sqrt(bip.misfit.prec) * u)
            return bip.prior.rtM * v

        metact = _get_metact_misfit
        rtmetact = _get_rtmetact_misfit
        if whitened:
            metact = lambda u: bip.prior.C_act(_get_metact_misfit(
                bip.prior.C_act(u, .5, op='C')),
                                               .5,
                                               op='C',
                                               transp=True)  # ppGNH
            rtmetact = lambda u: bip.prior.C_act(
                _get_rtmetact_misfit(u), .5, op='C', transp=True)

    if any(s > 1 for s in geom_ord) and len(kwargs) != 0:
        if whitened:
            # generalized eigen-decomposition (_C^(1/2) F _C^(1/2), M), i.e. _C^(1/2) F _C^(1/2) = M V D V', V' M V = I
            def invM(a):
                a = bip.prior.gen_vector(a)
                invMa = bip.prior.gen_vector()
                bip.prior.Msolver.solve(invMa, a)
                return invMa

            eigs = geigen_RA(metact,
                             lambda u: bip.prior.M * u,
                             invM,
                             dim=bip.pde.V.dim(),
                             **kwargs)
        else:
            # generalized eigen-decomposition (F, _C^(-1)), i.e. F = _C^(-1) U D U^(-1), U' _C^(-1) U = I, V = _C^(-1/2) U
            eigs = geigen_RA(metact,
                             lambda u: bip.prior.C_act(u, -1, op='K'),
                             lambda u: bip.prior.C_act(u, op='K'),
                             dim=bip.pde.V.dim(),
                             **kwargs)
        if any(s > 1.5 for s in geom_ord):
            # adjust the gradient
            # update low-rank approximate Gaussian posterior
            bip.post_Ga = Gaussian_apx_posterior(bip.prior, eigs=eigs)
            Hu = bip.prior.gen_vector()
            bip.post_Ga.Hlr.mult(unknown, Hu)
            gradlik.axpy(1.0, Hu)

    if len(kwargs) == 0:
        return loglik, gradlik, metact, rtmetact
    else:
        return loglik, gradlik, metact, eigs
예제 #8
0
파일: cnn.py 프로젝트: zhouqp631/DREAM-BUQ
        dll_emul = cnn.gradient(u_img[None, :, :, None], loglik)
        t_used[1] += timeit.default_timer() - t_start
        # test difference
        dif = dll_xact - dll_emul.img2fun(dll_emul, elliptic.pde.V).vector()
        print(
            'Difference between the calculated and emulated gradients: min ({}), med ({}), max ({})'
            .format(dif.min(), np.median(dif.get_local()), dif.max()))

        #         # check the gradient extracted from emulation
        #         v=elliptic.prior.sample()
        #         v_img=fun2img(vec2fun(v,elliptic.pde.V))
        #         h=1e-4
        #         dll_emul_fd_v=(loglik(u_img[None,:,:,None]+h*v_img[None,:,:,None])-loglik(u_img[None,:,:,None]))/h
        #         reldif = abs(dll_emul_fd_v - dll_emul.flatten().dot(v_img.flatten()))/v.norm('l2')
        #         print('Relative difference between finite difference and extracted results: {}'.format(reldif))

        # plot
        plt.subplot(121)
        u_f.vector().set_local(dll_xact)
        df.plot(u_f)
        plt.title('Calculated')
        plt.subplot(122)
        u_f = img2fun(dll_emul)
        df.plot(u_f)
        plt.title('Emulated')
        plt.draw()
        plt.pause(1.0 / 30.0)

    print(
        'Time used to calculate vs emulate gradients: {} vs {}'.format(t_used))
예제 #9
0
plt.ion()
# plt.show(block=True)
u_f = df.Function(elliptic.pde.V)
for n in range(10):
    u = elliptic.prior.sample()
    # calculate gradient
    t_start = timeit.default_timer()
    dll_xact = elliptic.get_geom(u, [0, 1])[1]
    t_used[0] += timeit.default_timer() - t_start
    # emulate gradient
    t_start = timeit.default_timer()
    u_img = fun2img(vec2fun(u, elliptic.pde.V))
    dll_emul = cnn.gradient(u_img[None, :, :, None], loglik)
    t_used[1] += timeit.default_timer() - t_start
    # test difference
    dif = dll_xact - img2fun(dll_emul, elliptic.pde.V).vector()
    print(
        'Difference between the calculated and emulated gradients: min ({}), med ({}), max ({})'
        .format(dif.min(), np.median(dif.get_local()), dif.max()))

    #     # check the gradient extracted from emulation
    #     v=elliptic.prior.sample()
    #     v_img=fun2img(vec2fun(v,elliptic.pde.V))
    #     h=1e-4
    #     dll_emul_fd_v=(loglik(u_img[None,:,:,None]+h*v_img[None,:,:,None])-loglik(u_img[None,:,:,None]))/h
    #     reldif = abs(dll_emul_fd_v - dll_emul.flatten().dot(v_img.flatten()))/v.norm('l2')
    #     print('Relative difference between finite difference and extracted results: {}'.format(reldif))

    # plot
    plt.subplot(121)
    u_f.vector().set_local(dll_xact)