Exemplo n.º 1
0
    eigs=kwargs.pop('eigs',None)
    if eigs:# is not None:
        return _GA_posterior_lr(prior,eigs,**kwargs)
    metact=kwargs.pop('metact',None)
    rtmetact=kwargs.pop('rtmetact',None)
    if metact and rtmetact:
        return _GA_posterior_exct(prior,metact,rtmetact,**kwargs)
    else:
        df.error('Definition not specified!')
        
if __name__ == '__main__':
#     np.random.seed(2020)
    from Elliptic import Elliptic
    # define the inverse problem
    SNR=50
    elliptic=Elliptic(nx=40,ny=40,SNR=SNR)
    # get MAP
    unknown=df.Function(elliptic.pde.V)
    MAP_file=os.path.join(os.getcwd(),'result/MAP_SNR'+str(SNR)+'.h5')
    if os.path.isfile(MAP_file):
        f=df.HDF5File(elliptic.pde.mpi_comm,MAP_file,"r")
        f.read(unknown,'parameter')
        f.close()
    else:
        unknown=elliptic.get_MAP(SAVE=True)
    
    import time
    start = time.time()
    # get eigen-decomposition of posterior Hessian at MAP
    _,_,_,eigs=elliptic.get_geom(unknown.vector(),geom_ord=[1.5],whitened=False,k=100)#threshold=1e-2)
    # define approximate Gaussian posterior
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=3)
    parser.add_argument('dim', nargs='?', type=int, default=100)
    parser.add_argument('num_samp', nargs='?', type=int, default=10000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=1000)
    parser.add_argument('step_sizes',
                        nargs='?',
                        type=float,
                        default=[1, 10, 1, 5, 100, 100,
                                 100])  # 1,20,2,100,100,100,100
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 4, 1, 4, 1, 4])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=('pCN', 'infMALA', 'infHMC', 'infmMALA',
                                 'infmHMC', 'splitinfmMALA', 'splitinfmHMC'))
    parser.add_argument('trct_opt', nargs='?', type=int, default=3)
    parser.add_argument('trct_idx', nargs='?', type=int, default=[])
    args = parser.parse_args()

    # define the PDE problem
    nx = 40
    ny = 40
    elliptic = Elliptic(nx=nx, ny=ny)
    sigma = 1.25
    s = 0.0625
    kl_opt = 'kf'  # choice (kernel function) for Karhunen-Loeve expansion of coefficient function
    theta = np.zeros(args.dim)  # initialization
    #     f_kereigen=os.path.join(os.getcwd(),'kernel_eigens.pckl')
    #     if os.path.isfile(f_kereigen) and os.access(f_kereigen, os.R_OK):
    #         f=open(f_kereigen,'rb')
    #         ker,eigen=pickle.load(f)
    #         f.close()
    #     else:
    #         ker=elliptic.kernel(sigma=sigma,s=s)
    #         eigen=ker.get_eigen(args.dim)
    #         f=open(f_kereigen,'wb')
    #         pickle.dump([ker,eigen],f)
    #         f.close()
    #     coeff=elliptic.coefficient(theta=theta,kl_opt=kl_opt,sigma=sigma,s=s,degree=2,ker=ker,eigen=eigen) # cannot pickle SwigPyObject??
    coeff = elliptic.coefficient(theta=theta,
                                 kl_opt=kl_opt,
                                 sigma=sigma,
                                 s=s,
                                 degree=2)

    # obtain observations
    SNR = 100  # 50
    f_obs = os.path.join(os.getcwd(), 'obs_' + str(SNR) + '.pckl')
    if os.path.isfile(f_obs):
        f = open(f_obs, 'rb')
        obs, idx, loc, sd_noise = pickle.load(f)
        f.close()
    else:
        obs, idx, loc, sd_noise = elliptic.get_obs(SNR=SNR)
        f = open(f_obs, 'wb')
        pickle.dump([obs, idx, loc, sd_noise], f)
        f.close()

    # define data misfit class
    print('Defining data-misfit...')
    misfit = elliptic.data_misfit(obs, 1. / sd_noise**2, idx, loc)

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    if 'split' in args.algs[args.algNO]:
        args.trct_opt = 2
        args.trct_idx = range(25)
        print(
            'and truncating on ' + {
                0: 'value, gradient, and metric (no sense)',
                1: 'gradient and metric',
                2: 'metric',
                3: 'none'
            }[args.trct_opt] + '...')

    geomfun = lambda theta, geom_opt: geom(
        theta, coeff, elliptic, misfit, geom_opt, args.trct_opt, args.trct_idx)
    inf_MC = geoinfMC(theta, np.eye(args.dim), geomfun,
                      args.step_sizes[args.algNO], args.step_nums[args.algNO],
                      args.algs[args.algNO], args.trct_idx)
    mc_fun = inf_MC.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(inf_MC.savepath, inf_MC.filename + '.pckl')
    filename = os.path.join(inf_MC.savepath, 'Elliptic_' + inf_MC.filename +
                            '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    soln_count = elliptic.soln_count.copy()
    pickle.dump([nx, ny, theta, sigma, s, SNR, sd_noise, soln_count, args], f)
    f.close()
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=0)
    parser.add_argument('ensemble_size', nargs='?', type=int, default=100)
    parser.add_argument('max_iter', nargs='?', type=int, default=50)
    parser.add_argument('step_sizes', nargs='?', type=float,
                        default=[1., .1])  # SNR10: [1,.01];SNR100: [1,.01]
    parser.add_argument('algs', nargs='?', type=str, default=('EKI', 'EKS'))
    args = parser.parse_args()

    ## define the inverse elliptic problem ##
    # parameters for PDE model
    nx = 40
    ny = 40
    # parameters for prior model
    sigma = 1.25
    s = 0.0625
    # parameters for misfit model
    SNR = 50  # 100
    # define the inverse problem
    elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR, sigma=sigma, s=s)

    # initialization
    unknown = MultiVector(elliptic.prior.gen_vector(), args.ensemble_size)
    for j in range(args.ensemble_size):
        unknown[j].set_local(elliptic.prior.sample(whiten=False))

    # define parameters needed
    def G(u, IP=elliptic):
        u_f = df.Function(IP.prior.V)
        u_f.vector().zero()
        u_f.vector().axpy(1., u)
        IP.pde.set_forms(unknown=u_f)
        return IP.misfit._extr_soloc(IP.pde.soln_fwd()[0])

    y = elliptic.misfit.obs
    data = {
        'obs': y,
        'size': y.size,
        'cov': 1. / elliptic.misfit.prec * np.eye(y.size)
    }

    # EnK parameters
    nz_lvl = 1
    err_thld = 1e-1

    # run EnK to generate ensembles
    print("Preparing %s with step size %g ..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO]))
    ek = EnK(unknown,
             G,
             data,
             elliptic.prior,
             stp_sz=args.step_sizes[args.algNO],
             nz_lvl=nz_lvl,
             err_thld=err_thld,
             alg=args.algs[args.algNO],
             adpt=True)
    ek_fun = ek.run
    ek_args = (args.max_iter, True)
    savepath, filename = ek_fun(*ek_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(savepath, filename + '.pckl')
    filename = os.path.join(savepath, 'Elliptic_' + filename +
                            '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    #     soln_count=[elliptic.soln_count,elliptic.pde.soln_count]
    soln_count = elliptic.pde.soln_count
    pickle.dump([nx, ny, sigma, s, SNR, soln_count, args], f)
    f.close()
Exemplo n.º 4
0
from Elliptic import Elliptic
from util.dolfin_gadget import vec2fun, fun2img, img2fun
from nn.cnn import CNN
from nn.dnn import DNN
from tensorflow.keras.models import load_model

# tf.compat.v1.disable_eager_execution() # needed to train with custom loss # comment to plot
# set random seed
np.random.seed(2020)
tf.random.set_seed(2020)

# define the inverse problem
nx = 40
ny = 40
SNR = 50
elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR)
# algorithms
algs = ['EKI', 'EKS']
num_algs = len(algs)
alg_no = 1
# load data
ensbl_sz = 500
folder = './analysis_f_SNR' + str(SNR)
if not os.path.exists(folder): os.makedirs(folder)

## define the emulator (CNN) ##
loaded = np.load(file=os.path.join(
    folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) + '_training_XimgY.npz'))
X = loaded['X']
Y = loaded['Y']
# pre-processing: scale X to 0-1
    exit()

if not has_slepc():
    print("DOLFIN has not been configured with SLEPc. Exiting.")
    exit()

# np.random.seed(2016)

# settings
dim = 100
# choice of coefficient definition
# kl_opt='fb'
kl_opt = 'kf'

# define elliptic model
elliptic = Elliptic(nx=40, ny=40)
theta = np.random.randn(dim)
# get coefficient
coeff = elliptic.coefficient(theta, kl_opt=kl_opt)

# plot
parameters["plotting_backend"] = "matplotlib"
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(0, 1), ylim=(0, 1))
coeff_fun, _, _ = coeff.get_coeff()
heat = plot(coeff_fun)


# animation function.  This is called sequentially
def update(i, ax, fig):
Exemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=0)
    parser.add_argument('emuNO', nargs='?', type=int, default=1)
    parser.add_argument('aeNO', nargs='?', type=int, default=0)
    parser.add_argument('num_samp', nargs='?', type=int, default=5000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=1000)
    parser.add_argument('step_sizes',
                        nargs='?',
                        type=float,
                        default=[
                            .1, 1., .6, None, None
                        ])  # AE [.1,1.,.6] # CAE [.1,.6,.3] # VAE [.3]
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 5, 1, 5])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=[
                            'DREAM' + a for a in ('pCN', 'infMALA', 'infHMC',
                                                  'infmMALA', 'infmHMC')
                        ])
    parser.add_argument('emus', nargs='?', type=str, default=['dnn', 'cnn'])
    parser.add_argument('aes',
                        nargs='?',
                        type=str,
                        default=['ae', 'cae', 'vae'])
    args = parser.parse_args()

    ##------ define the inverse elliptic problem ------##
    # parameters for PDE model
    nx = 40
    ny = 40
    # parameters for prior model
    sigma = 1.25
    s = 0.0625
    # parameters for misfit model
    SNR = 50  # 100
    # define the inverse problem
    elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR, sigma=sigma, s=s)
    # define the latent (coarser) inverse problem
    nx = 10
    ny = 10
    obs, nzsd, loc = [
        getattr(elliptic.misfit, i) for i in ('obs', 'nzsd', 'loc')
    ]
    elliptic_latent = Elliptic(nx=nx,
                               ny=ny,
                               SNR=SNR,
                               obs=obs,
                               nzsd=nzsd,
                               loc=loc)

    ##------ define networks ------##
    # training data algorithms
    algs = ['EKI', 'EKS']
    num_algs = len(algs)
    alg_no = 1
    # load data
    ensbl_sz = 500
    folder = './analysis_f_SNR' + str(SNR)
    if not os.path.exists(folder): os.makedirs(folder)

    ##---- EMULATOR ----##
    # prepare for training data
    if args.emus[args.emuNO] == 'dnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XY.npz'))
        X = loaded['X']
        Y = loaded['Y']
    elif args.emus[args.emuNO] == 'cnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        Y = loaded['Y']
        X = X[:, :, :, None]
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train,y_train=X[:n_tr],Y[:n_tr]
    #     x_test,y_test=X[n_tr:],Y[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    y_train, y_test = Y[tr_idx], Y[te_idx]
    # define emulator
    if args.emus[args.emuNO] == 'dnn':
        depth = 3
        activations = {'hidden': 'softplus', 'output': 'linear'}
        droprate = .4
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = DNN(x_train.shape[1],
                       y_train.shape[1],
                       depth=depth,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    elif args.emus[args.emuNO] == 'cnn':
        num_filters = [16, 8, 8]
        activations = {
            'conv': 'softplus',
            'latent': 'softmax',
            'output': 'linear'
        }
        latent_dim = 256
        droprate = .5
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = CNN(x_train.shape[1:],
                       y_train.shape[1],
                       num_filters=num_filters,
                       latent_dim=latent_dim,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    f_name = args.emus[args.emuNO] + '_' + algs[alg_no] + str(ensbl_sz)
    # load emulator
    try:
        emulator.model = load_model(os.path.join(folder, f_name + '.h5'),
                                    custom_objects={'loss': None})
        print(f_name + ' has been loaded!')
    except:
        try:
            emulator.model.load_weights(os.path.join(folder, f_name + '.h5'))
            print(f_name + ' has been loaded!')
        except:
            print('\nNo emulator found. Training {}...\n'.format(
                args.emus[args.emuNO]))
            epochs = 200
            patience = 0
            emulator.train(x_train,
                           y_train,
                           x_test=x_test,
                           y_test=y_test,
                           epochs=epochs,
                           batch_size=64,
                           verbose=1,
                           patience=patience)
            # save emulator
            try:
                emulator.model.save(os.path.join(folder, f_name + '.h5'))
            except:
                emulator.model.save_weights(
                    os.path.join(folder, f_name + '.h5'))

    ##---- AUTOENCODER ----##
    # prepare for training data
    if 'c' in args.aes[args.aeNO]:
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        X = X[:, :-1, :-1, None]
    else:
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_X.npz'))
        X = loaded['X']
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train=X[:n_tr]
    #     x_test=X[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    # define autoencoder
    if args.aes[args.aeNO] == 'ae':
        half_depth = 3
        latent_dim = elliptic_latent.pde.V.dim()
        droprate = 0.
        #         activation='linear'
        activation = tf.keras.layers.LeakyReLU(alpha=2.00)
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
        lambda_ = 0.
        autoencoder = AutoEncoder(x_train.shape[1],
                                  half_depth=half_depth,
                                  latent_dim=latent_dim,
                                  droprate=droprate,
                                  activation=activation,
                                  optimizer=optimizer)
    elif args.aes[args.aeNO] == 'cae':
        num_filters = [16, 8]
        latent_dim = elliptic_latent.prior.dim
        #         activations={'conv':tf.keras.layers.LeakyReLU(alpha=0.1),'latent':None} # [16,1]
        activations = {'conv': 'elu', 'latent': 'linear'}
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        autoencoder = ConvAutoEncoder(x_train.shape[1:],
                                      num_filters=num_filters,
                                      latent_dim=latent_dim,
                                      activations=activations,
                                      optimizer=optimizer)
    elif args.aes[args.aeNO] == 'vae':
        half_depth = 5
        latent_dim = elliptic_latent.pde.V.dim()
        repatr_out = False
        beta = 1.
        activation = 'elu'
        #         activation=tf.keras.layers.LeakyReLU(alpha=0.01)
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
        autoencoder = VAE(x_train.shape[1],
                          half_depth=half_depth,
                          latent_dim=latent_dim,
                          repatr_out=repatr_out,
                          activation=activation,
                          optimizer=optimizer,
                          beta=beta)
    f_name = [
        args.aes[args.aeNO] + '_' + i + '_' + algs[alg_no] + str(ensbl_sz)
        for i in ('fullmodel', 'encoder', 'decoder')
    ]
    # load autoencoder
    try:
        autoencoder.model = load_model(os.path.join(folder, f_name[0] + '.h5'),
                                       custom_objects={'loss': None})
        print(f_name[0] + ' has been loaded!')
        autoencoder.encoder = load_model(os.path.join(folder,
                                                      f_name[1] + '.h5'),
                                         custom_objects={'loss': None})
        print(f_name[1] + ' has been loaded!')
        autoencoder.decoder = load_model(os.path.join(folder,
                                                      f_name[2] + '.h5'),
                                         custom_objects={'loss': None})
        print(f_name[2] + ' has been loaded!')
    except:
        print('\nNo autoencoder found. Training {}...\n'.format(
            args.aes[args.aeNO]))
        epochs = 200
        patience = 0
        noise = 0.2
        kwargs = {'patience': patience}
        if args.aes[args.aeNO] == 'ae' and noise: kwargs['noise'] = noise
        autoencoder.train(x_train,
                          x_test=x_test,
                          epochs=epochs,
                          batch_size=64,
                          verbose=1,
                          **kwargs)
        # save autoencoder
        autoencoder.model.save(os.path.join(folder, f_name[0] + '.h5'))
        autoencoder.encoder.save(os.path.join(folder, f_name[1] + '.h5'))
        autoencoder.decoder.save(os.path.join(folder, f_name[2] + '.h5'))

    ##------ define MCMC ------##
    # initialization
#     unknown=elliptic_latent.prior.sample(whiten=False)
    unknown = elliptic_latent.prior.gen_vector()

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    emul_geom = lambda q, geom_ord=[
        0
    ], whitened=False, **kwargs: geom_emul.geom(q, elliptic, emulator,
                                                geom_ord, whitened, **kwargs)
    latent_geom = lambda q, geom_ord=[0], whitened=False, **kwargs: geom(
        q,
        elliptic_latent.pde.V,
        elliptic.pde.V,
        autoencoder,
        geom_ord,
        whitened,
        emul_geom=emul_geom,
        bip_lat=elliptic_latent,
        bip=elliptic,
        **kwargs)
    dream = DREAM(
        unknown,
        elliptic_latent,
        latent_geom,
        args.step_sizes[args.algNO],
        args.step_nums[args.algNO],
        args.algs[args.algNO],
        whitened=False,
        log_wts=False
    )  #,AE=autoencoder)#,k=5,bip_lat=elliptic_latent) # uncomment for manifold algorithms
    mc_fun = dream.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(dream.savepath, dream.filename + '.pckl')
    filename = os.path.join(dream.savepath, 'Elliptic_' + dream.filename +
                            '_' + args.emus[args.emuNO] + '_' +
                            args.aes[args.aeNO] + '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    #     soln_count=[elliptic.soln_count,elliptic.pde.soln_count]
    soln_count = elliptic.pde.soln_count
    pickle.dump([nx, ny, sigma, s, SNR, soln_count, args], f)
    f.close()
Exemplo n.º 7
0
import sys, os
sys.path.append('../')
from Elliptic import Elliptic
sys.path.append('../gp')
from multiGP import multiGP as GP

# tf.compat.v1.disable_eager_execution() # needed to train with custom loss # comment to plot
# set random seed
np.random.seed(2020)
tf.random.set_seed(2020)

# define the inverse problem
nx = 40
ny = 40
SNR = 50
elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR)
# algorithms
algs = ['EKI', 'EKS']
num_algs = len(algs)
alg_no = 1
whiten = False

# define the emulator (GP)
# load data
ensbl_sz = 500
n_train = 1000
folder = './train_NN'
ifwhiten = '_whitened' if whiten else ''
loaded = np.load(file=os.path.join(
    folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) + '_training_XY' +
    ifwhiten + '.npz'))
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=0)
    parser.add_argument('num_samp', nargs='?', type=int, default=5000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=1000)
    parser.add_argument(
        'step_sizes', nargs='?', type=float, default=[
            .03, .15, .10, .5, .3
        ])  # SNR10: [.5,2,1.3,6.,4.];SNR100: [.01,.04,0.04,.52,.25] # HMC L4
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 5, 1, 5])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=('pCN', 'infMALA', 'infHMC', 'DRinfmMALA',
                                 'DRinfmHMC'))
    args = parser.parse_args()

    ## define the inverse elliptic problem ##
    # parameters for PDE model
    nx = 40
    ny = 40
    # parameters for prior model
    sigma = 1.25
    s = 0.0625
    # parameters for misfit model
    SNR = 50  # 100
    # define the inverse problem
    elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR, sigma=sigma, s=s)

    # initialization
    #     unknown=elliptic.prior.sample(whiten=False)
    unknown = elliptic.prior.gen_vector()
    #     unknown=df.Function(elliptic.pde.V)
    #     MAP_file=os.path.join(os.getcwd(),'result/MAP.h5')
    #     if os.path.isfile(MAP_file):
    #         f=df.HDF5File(elliptic.pde.mpi_comm,MAP_file,"r")
    #         f.read(unknown,'parameter')
    #         f.close()
    #     else:
    #         unknown=elliptic.get_MAP(SAVE=True)

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    inf_GMC = geoinfMC(unknown,
                       elliptic,
                       args.step_sizes[args.algNO],
                       args.step_nums[args.algNO],
                       args.algs[args.algNO],
                       k=5)
    mc_fun = inf_GMC.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(inf_GMC.savepath, inf_GMC.filename + '.pckl')
    filename = os.path.join(inf_GMC.savepath, 'Elliptic_' + inf_GMC.filename +
                            '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    #     soln_count=[elliptic.soln_count,elliptic.pde.soln_count]
    soln_count = elliptic.pde.soln_count
    pickle.dump([nx, ny, sigma, s, SNR, soln_count, args], f)
    f.close()
# import multiprocessing

# parameters["num_threads"] = 2

np.random.seed(2016)
# settings
theta_dim = 100
# choice of coefficient definition
# kl_opt='fb'
kl_opt = 'kf'

# generate date
# theta=.1*np.ones(theta_dim)#np.random.randn(theta_dim)
# theta=np.random.randn(theta_dim)
theta = np.zeros(theta_dim)
elliptic = Elliptic(nx=30, ny=30)
coeff = elliptic.coefficient(theta=theta, kl_opt=kl_opt, degree=2)

# obtain observations
obs, idx, loc, sd_noise = elliptic.get_obs()

# define data misfit class
print('\nDefining data-misfit...')
misfit = elliptic.data_misfit(obs, 1. / sd_noise**2, idx, loc)

# preparing density plot
dim = [1, 2]
print('\nPreparing posterior density plot in dimensions (%d, %d)...' %
      tuple(dim))
# log density function
# def logdensity(x,dim=dim):
Exemplo n.º 10
0
from Elliptic import Elliptic
from util.dolfin_gadget import vec2fun, fun2img, img2fun
from util.multivector import *
from nn.cnn import CNN
from tensorflow.keras.models import load_model

# tf.compat.v1.disable_eager_execution() # needed to train with custom loss # comment to plot
# set random seed
np.random.seed(2020)
tf.random.set_seed(2020)

# define the inverse problem
nx = 40
ny = 40
SNR = 50
elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR)
# algorithms
algs = ['EKI', 'EKS']
num_algs = len(algs)
alg_no = 1

# define the emulator (CNN)
# load data
ensbl_sz = 500
folder = './train_NN'
loaded = np.load(file=os.path.join(
    folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) + '_training_XimgY.npz'))
X = loaded['X']
Y = loaded['Y']
# pre-processing: scale X to 0-1
# X-=np.nanmin(X,axis=(1,2),keepdims=True) # try axis=(1,2,3)
Exemplo n.º 11
0
"""
Plot the true and estimated transmissivity field of Elliptic inverse problem (DILI; Cui et~al, 2016)
Shiwei Lan @ U of Warwick, 2016
"""

import os,pickle
from dolfin import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mp

from Elliptic import Elliptic

np.random.seed(2016)
# define the PDE problem
elliptic=Elliptic(nx=40,ny=40)
# true transmissivity field
true_coeff=elliptic.true_coeff()
# coefficient
dim=100
theta=np.zeros(dim)
sigma=1.25;s=0.0625;kl_opt='kf'
coeff=elliptic.coefficient(theta=theta,kl_opt=kl_opt,sigma=sigma,s=s,degree=2)

# algorithms
algs=('pCN','infMALA','infHMC','infmMALA','infmHMC','splitinfmMALA','splitinfmHMC')
alg_names=('pCN','$\infty$-MALA','$\infty$-HMC','$\infty$-mMALA','$\infty$-mHMC','split$\infty$-mMALA','split$\infty$-mHMC')
num_algs=len(algs)
# preparation for estimates
folder = './analysis'
fnames=[f for f in os.listdir(folder) if f.endswith('.pckl')]
"""
Plot observations and some solutions of Elliptic PDE model (DILI; Cui et~al, 2016)
Shiwei Lan @ U of Warwick, 2016
"""

from dolfin import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mp

from Elliptic import Elliptic

np.random.seed(2016)

# define the PDE problem
elliptic = Elliptic(nx=40, ny=40)
# obtain observations using true coefficient function
obs, idx, loc, _ = elliptic.get_obs()

# plot
fig, axes = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(14, 5))

# plot observations
plt.axes(axes[0])
parameters["plotting_backend"] = "matplotlib"
plot(elliptic.mesh)
plt.plot(loc[:, 0], loc[:, 1], 'bo', markersize=10)
plt.axis('tight')
plt.xlabel('x', fontsize=12)
plt.ylabel('y', fontsize=12)
plt.title('Observations on selected locations', fontsize=12)
Exemplo n.º 13
0
from Elliptic import Elliptic

# parameters["num_threads"] = 2

np.random.seed(2016)
# settings
dim = 9
# choice of coefficient definition
kl_opt = 'fb'
# kl_opt='kf'

# generate observations
# theta=.1*np.ones(dim)
theta = .1 * np.random.randn(dim)
elliptic = Elliptic(nx=30, ny=30)
# K-L expansion with specific choice
coeff = elliptic.coefficient(theta=theta, kl_opt=kl_opt, degree=2)

# solve forward equation
# u_fwd,p_fwd,l_fwd=elliptic.soln_fwd(theta)

# obtain observations
obs, idx, loc, sd_noise = elliptic.get_obs(coeff)

# parameters["plotting_backend"]="matplotlib"
# plt.figure(0)
# fig=plot(elliptic.states_fwd.split(True)[0])
# plt.colorbar(fig)

# define data misfit class
if not has_linear_algebra_backend("PETSc"):
    print("DOLFIN has not been configured with PETSc. Exiting.")
    exit()

if not has_petsc4py():
    print("DOLFIN has not been configured with petsc4py. Exiting.")
    exit()

if not has_slepc():
    print("DOLFIN has not been configured with SLEPc. Exiting.")
    exit()

np.random.seed(2016)

# define elliptic model
elliptic = Elliptic(nx=40, ny=40)

# create and assemble the kernel, solve the associated eigen-problem
ker = elliptic.kernel()
# Compute all eigenvalues of A x = \lambda x
n = 100
eigen = ker.get_eigen(n)

# Plot eigenfunctions
eigs_plot = np.array([1, 2, 10, n])
u = Function(elliptic.V)
parameters["plotting_backend"] = "matplotlib"
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, figsize=(10, 6))

for j, ax in enumerate(axes.flat):
    # Extract largest (first) eigenpair
Exemplo n.º 15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=0)
    parser.add_argument('emuNO', nargs='?', type=int, default=1)
    parser.add_argument('num_samp', nargs='?', type=int, default=5000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=1000)
    parser.add_argument('step_sizes',
                        nargs='?',
                        type=float,
                        default=[.05, .15, .1, None, None])
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 5, 1, 5])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=[
                            'e' + a for a in ('pCN', 'infMALA', 'infHMC',
                                              'DRinfmMALA', 'DRinfmHMC')
                        ])
    parser.add_argument('emus', nargs='?', type=str, default=['dnn', 'cnn'])
    args = parser.parse_args()

    ##------ define the inverse elliptic problem ------##
    # parameters for PDE model
    nx = 40
    ny = 40
    # parameters for prior model
    sigma = 1.25
    s = 0.0625
    # parameters for misfit model
    SNR = 50  # 100
    # define the inverse problem
    elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR, sigma=sigma, s=s)

    ##------ define networks ------##
    # training data algorithms
    algs = ['EKI', 'EKS']
    num_algs = len(algs)
    alg_no = 1
    # load data
    ensbl_sz = 500
    folder = './analysis_f_SNR' + str(SNR)

    ##---- EMULATOR ----##
    # prepare for training data
    if args.emus[args.emuNO] == 'dnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XY.npz'))
        X = loaded['X']
        Y = loaded['Y']
    elif args.emus[args.emuNO] == 'cnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        Y = loaded['Y']
        X = X[:, :, :, None]
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train,y_train=X[:n_tr],Y[:n_tr]
    #     x_test,y_test=X[n_tr:],Y[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    y_train, y_test = Y[tr_idx], Y[te_idx]
    # define emulator
    if args.emus[args.emuNO] == 'dnn':
        depth = 3
        activations = {'hidden': 'softplus', 'output': 'linear'}
        droprate = .4
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = DNN(x_train.shape[1],
                       y_train.shape[1],
                       depth=depth,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    elif args.emus[args.emuNO] == 'cnn':
        num_filters = [16, 8, 8]
        activations = {
            'conv': 'softplus',
            'latent': 'softmax',
            'output': 'linear'
        }
        latent_dim = 256
        droprate = .5
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = CNN(x_train.shape[1:],
                       y_train.shape[1],
                       num_filters=num_filters,
                       latent_dim=latent_dim,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    f_name = args.emus[args.emuNO] + '_' + algs[alg_no] + str(ensbl_sz)
    # load emulator
    try:
        emulator.model = load_model(os.path.join(folder, f_name + '.h5'),
                                    custom_objects={'loss': None})
        print(f_name + ' has been loaded!')
    except:
        try:
            emulator.model.load_weights(os.path.join(folder, f_name + '.h5'))
            print(f_name + ' has been loaded!')
        except:
            print('\nNo emulator found. Training {}...\n'.format(
                args.emus[args.emuNO]))
            epochs = 200
            patience = 0
            emulator.train(x_train,
                           y_train,
                           x_test=x_test,
                           y_test=y_test,
                           epochs=epochs,
                           batch_size=64,
                           verbose=1,
                           patience=patience)
            # save emulator
            try:
                emulator.model.save(os.path.join(folder, f_name + '.h5'))
            except:
                emulator.model.save_weights(
                    os.path.join(folder, f_name + '.h5'))

    # initialization
#     unknown=elliptic.prior.sample(whiten=False)
    unknown = elliptic.prior.gen_vector()

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    emul_geom = lambda q, geom_ord=[0], whitened=False, **kwargs: geom(
        q, elliptic, emulator, geom_ord, whitened, **kwargs)
    e_infGMC = einfGMC(
        unknown, elliptic, emul_geom, args.step_sizes[args.algNO],
        args.step_nums[args.algNO],
        args.algs[args.algNO])  #,k=5) # uncomment for manifold algorithms
    mc_fun = e_infGMC.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(e_infGMC.savepath, e_infGMC.filename + '.pckl')
    filename = os.path.join(e_infGMC.savepath,
                            'Elliptic_' + e_infGMC.filename + '_' +
                            args.emus[args.emuNO] + '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    #     soln_count=[elliptic.soln_count,elliptic.pde.soln_count]
    soln_count = elliptic.pde.soln_count
    pickle.dump([nx, ny, sigma, s, SNR, soln_count, args], f)
    f.close()
Exemplo n.º 16
0
import sys, os
sys.path.append("../")
from Elliptic import Elliptic
# from util.dolfin_gadget import vec2fun,fun2img,img2fun
from nn.vae import VAE
from tensorflow.keras.models import load_model

# set random seed
np.random.seed(2020)
tf.random.set_seed(2020)

# define the inverse problem
nx = 40
ny = 40
SNR = 50
elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR)
# define the latent (coarser) inverse problem
nx = 20
ny = 20
obs, nzsd, loc = [getattr(elliptic.misfit, i) for i in ('obs', 'nzsd', 'loc')]
elliptic_latent = Elliptic(nx=nx, ny=ny, SNR=SNR, obs=obs, nzsd=nzsd, loc=loc)
# algorithms
algs = ['EKI', 'EKS']
num_algs = len(algs)
alg_no = 1

# define the autoencoder (AE)
# load data
ensbl_sz = 500
folder = './train_NN'
loaded = np.load(file=os.path.join(
Exemplo n.º 17
0
from scipy.stats import norm
import timeit

# np.random.seed(2020)

## define the inverse elliptic problem ##
# parameters for PDE model
nx = 40
ny = 40
# parameters for prior model
sigma = 1.25
s = 0.0625
# parameters for misfit model
SNR = 50  # 100
# define the inverse problem
elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR, sigma=sigma, s=s)

# define AutoEncoder
loaded = np.load(file='../nn/training.npz')
X = loaded['X']
num_samp = X.shape[0]
tr_idx = np.random.choice(num_samp,
                          size=np.floor(.75 * num_samp).astype('int'),
                          replace=False)
te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
x_train, x_test = X[tr_idx], X[te_idx]
# define Auto-Encoder
latent_dim = 441
half_depth = 3
ae = AutoEncoder(x_train, x_test, latent_dim, half_depth)
try:
Exemplo n.º 18
0
from Elliptic import Elliptic

# parameters["num_threads"] = 2

np.random.seed(2016)
# settings
dim = 25
# choice of coefficient definition
# kl_opt='fb'
kl_opt = 'kf'

# generate observations
# theta=.1*np.ones(dim)
theta = .1 * np.random.randn(dim)
elliptic = Elliptic(nx=30, ny=30)
# K-L expansion with specific choice
coeff = elliptic.coefficient(theta=theta, kl_opt=kl_opt, degree=2)

# solve forward equation
# u_fwd,p_fwd,l_fwd=elliptic.soln_fwd(theta)

# obtain observations
obs, idx, loc, sd_noise = elliptic.get_obs(coeff)

# define data misfit class
print('\nDefining data-misfit...')
misfit = elliptic.data_misfit(obs, 1. / sd_noise**2, idx, loc)

# obtain the geometric quantities
print('\n\nObtaining full geometric quantities with Adjoint method...')
Exemplo n.º 19
0
                    ensbl_f) if img_out else ensbl_f.vector().get_local()
            if s + 1 in prog:
                print('{0:.0f}% ensembles have been retrieved.'.format(
                    np.float(s + 1) / num_ensbls * 100))
    f.close()
    return out


if __name__ == '__main__':
    from Elliptic import Elliptic
    # define the inverse problem
    np.random.seed(2020)
    nx = 40
    ny = 40
    SNR = 50
    elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR)
    # algorithms
    algs = ['EKI', 'EKS']
    num_algs = len(algs)
    # preparation for estimates
    folder = './analysis_f_SNR' + str(SNR)
    hdf5_files = [f for f in os.listdir(folder) if f.endswith('.h5')]
    pckl_files = [f for f in os.listdir(folder) if f.endswith('.pckl')]
    ensbl_sz = 500
    max_iter = 10
    img_out = ('img' in TRAIN)

    PLOT = False
    SAVE = True
    # prepare data
    for a in range(num_algs):