Пример #1
0
def prepare_model(cache=None):
    model = CNN()
    model.conv = Conv_numba(3, 32, 7, 7)
    model.param_groups = tuple(
        (model.conv.params, model.fcn1.params, model.fcn2.params))
    if cache is not None:
        for model_group, cache_group in zip(model.param_groups, cache):
            for k, v in model.param_groups:
                model_group[k]["param"] = cache_group[k]["param"].copy()

    return model
Пример #2
0
        f_b = conv(x)
        params['b']['param'] = tmp
        return f_b

    db_num = eval_numerical_gradient_array(fb, b, dout)

    print('Testing conv')
    print('dx error: ', rel_error(dx_num, dx))
    print('dw error: ', rel_error(dw_num, dw))
    print('db error: ', rel_error(db_num, db))

# TODO: write script to check the backpropagation on the whole CNN is correct
if check_cnn_back:
    model = CNN(image_size=(3, 8, 8),
                channels=3,
                conv_kernel=5,
                pool_kernel=2,
                hidden_units=10,
                n_classes=10)
    X = np.random.randn(2, 3, 8, 8)
    y = (np.random.rand(2) * 10).astype(np.int32)
    model.oracle(X, y)
    loss_fn = SoftmaxCE()

    for group in model.param_groups:
        for name, param in group.items():
            xx = param["param"]

            def fx(v):
                tmp = param['param']
                param['param'] = v
                f_x, _ = loss_fn(model.score(X), y)
        # Maybe print training loss
        if verbose and t % print_level == 0:
            train_acc = accuracy(score, y_batch)
            print('(Iteration %d / %d , epoch %d) loss: %f, train_accu: %f' %
                  (t + 1, n_iterations, epoch, loss_hist[-1], train_acc))

        # At the end of every epoch, adjust the learning rate.
        epoch_end = (t + 1) % iterations_per_epoch == 0
        if epoch_end:
            epoch += 1
            opt_params['lr'] *= lr_decay


if __name__ == '__main__':
    model = CNN()
    data = get_cifar10_data()
    num_train = 100
    data = {
        'X_train': data['X_train'][:num_train],
        'y_train': data['y_train'][:num_train],
        'X_val': data['X_val'],
        'y_val': data['y_val'],
    }
    X_train, y_train, X_val, y_val = data['X_train'], data['y_train'], data[
        'X_val'], data['y_val']

    train(model,
          X_train,
          y_train,
          X_val,
Пример #4
0
x_test, y_test = X[n_tr:], Y[n_tr:]

# define CNN
num_filters = [16, 8]
activations = {
    'conv': 'relu',
    'latent': tf.keras.layers.PReLU(),
    'output': 'linear'
}
latent_dim = 128
droprate = .5
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
cnn = CNN(x_train.shape[1:],
          y_train.shape[1],
          num_filters=num_filters,
          latent_dim=latent_dim,
          droprate=droprate,
          activations=activations,
          optimizer=optimizer)
f_name = 'cnn_' + algs[alg_no] + str(ensbl_sz)
try:
    cnn.model = load_model(os.path.join(folder, f_name + '.h5'),
                           custom_objects={'loss': None})
    #     cnn.model.load_weights(os.path.join(folder,f_name+'.h5'))
    print(f_name + ' has been loaded!')
except Exception as err:
    print(err)
    print('Train CNN...\n')
    epochs = 200
    patience = 0
    import timeit
# TODO: write script to check the backpropagation on the whole CNN is correct
if check_cnn_back:
    inputs = 2
    input_dim = (3, 16, 16)
    hidden_units = 10
    num_classes = 10
    num_filters = 3
    filter_size = 3
    pool_size = 2

    np.random.seed(231)
    X = np.random.randn(inputs, *input_dim)
    y = np.random.randint(num_classes, size=inputs)

    model = CNN(input_dim, num_filters, filter_size, pool_size, hidden_units,
                num_classes)
    loss, score = model.oracle(X, y)
    print('loss:', loss)
    a = ['w1', 'w2', 'w3']
    b = ['b1', 'b2', 'b3']

    for param_name in sorted(a):
        f = lambda _: model.oracle(X, y)[0]
        param_grad_num = eval_numerical_gradient(
            f,
            model.param_groups['w'][param_name]['param'],
            verbose=False,
            h=0.00001)
        e = rel_error(param_grad_num,
                      model.param_groups['w'][param_name]['grad'])
        print('%s relative error: %e' %
Пример #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=0)
    parser.add_argument('emuNO', nargs='?', type=int, default=1)
    parser.add_argument('aeNO', nargs='?', type=int, default=0)
    parser.add_argument('num_samp', nargs='?', type=int, default=5000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=1000)
    parser.add_argument('step_sizes',
                        nargs='?',
                        type=float,
                        default=[
                            .1, 1., .6, None, None
                        ])  # AE [.1,1.,.6] # CAE [.1,.6,.3] # VAE [.3]
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 5, 1, 5])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=[
                            'DREAM' + a for a in ('pCN', 'infMALA', 'infHMC',
                                                  'infmMALA', 'infmHMC')
                        ])
    parser.add_argument('emus', nargs='?', type=str, default=['dnn', 'cnn'])
    parser.add_argument('aes',
                        nargs='?',
                        type=str,
                        default=['ae', 'cae', 'vae'])
    args = parser.parse_args()

    ##------ define the inverse elliptic problem ------##
    # parameters for PDE model
    nx = 40
    ny = 40
    # parameters for prior model
    sigma = 1.25
    s = 0.0625
    # parameters for misfit model
    SNR = 50  # 100
    # define the inverse problem
    elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR, sigma=sigma, s=s)
    # define the latent (coarser) inverse problem
    nx = 10
    ny = 10
    obs, nzsd, loc = [
        getattr(elliptic.misfit, i) for i in ('obs', 'nzsd', 'loc')
    ]
    elliptic_latent = Elliptic(nx=nx,
                               ny=ny,
                               SNR=SNR,
                               obs=obs,
                               nzsd=nzsd,
                               loc=loc)

    ##------ define networks ------##
    # training data algorithms
    algs = ['EKI', 'EKS']
    num_algs = len(algs)
    alg_no = 1
    # load data
    ensbl_sz = 500
    folder = './analysis_f_SNR' + str(SNR)
    if not os.path.exists(folder): os.makedirs(folder)

    ##---- EMULATOR ----##
    # prepare for training data
    if args.emus[args.emuNO] == 'dnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XY.npz'))
        X = loaded['X']
        Y = loaded['Y']
    elif args.emus[args.emuNO] == 'cnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        Y = loaded['Y']
        X = X[:, :, :, None]
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train,y_train=X[:n_tr],Y[:n_tr]
    #     x_test,y_test=X[n_tr:],Y[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    y_train, y_test = Y[tr_idx], Y[te_idx]
    # define emulator
    if args.emus[args.emuNO] == 'dnn':
        depth = 3
        activations = {'hidden': 'softplus', 'output': 'linear'}
        droprate = .4
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = DNN(x_train.shape[1],
                       y_train.shape[1],
                       depth=depth,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    elif args.emus[args.emuNO] == 'cnn':
        num_filters = [16, 8, 8]
        activations = {
            'conv': 'softplus',
            'latent': 'softmax',
            'output': 'linear'
        }
        latent_dim = 256
        droprate = .5
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = CNN(x_train.shape[1:],
                       y_train.shape[1],
                       num_filters=num_filters,
                       latent_dim=latent_dim,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    f_name = args.emus[args.emuNO] + '_' + algs[alg_no] + str(ensbl_sz)
    # load emulator
    try:
        emulator.model = load_model(os.path.join(folder, f_name + '.h5'),
                                    custom_objects={'loss': None})
        print(f_name + ' has been loaded!')
    except:
        try:
            emulator.model.load_weights(os.path.join(folder, f_name + '.h5'))
            print(f_name + ' has been loaded!')
        except:
            print('\nNo emulator found. Training {}...\n'.format(
                args.emus[args.emuNO]))
            epochs = 200
            patience = 0
            emulator.train(x_train,
                           y_train,
                           x_test=x_test,
                           y_test=y_test,
                           epochs=epochs,
                           batch_size=64,
                           verbose=1,
                           patience=patience)
            # save emulator
            try:
                emulator.model.save(os.path.join(folder, f_name + '.h5'))
            except:
                emulator.model.save_weights(
                    os.path.join(folder, f_name + '.h5'))

    ##---- AUTOENCODER ----##
    # prepare for training data
    if 'c' in args.aes[args.aeNO]:
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        X = X[:, :-1, :-1, None]
    else:
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_X.npz'))
        X = loaded['X']
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train=X[:n_tr]
    #     x_test=X[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    # define autoencoder
    if args.aes[args.aeNO] == 'ae':
        half_depth = 3
        latent_dim = elliptic_latent.pde.V.dim()
        droprate = 0.
        #         activation='linear'
        activation = tf.keras.layers.LeakyReLU(alpha=2.00)
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
        lambda_ = 0.
        autoencoder = AutoEncoder(x_train.shape[1],
                                  half_depth=half_depth,
                                  latent_dim=latent_dim,
                                  droprate=droprate,
                                  activation=activation,
                                  optimizer=optimizer)
    elif args.aes[args.aeNO] == 'cae':
        num_filters = [16, 8]
        latent_dim = elliptic_latent.prior.dim
        #         activations={'conv':tf.keras.layers.LeakyReLU(alpha=0.1),'latent':None} # [16,1]
        activations = {'conv': 'elu', 'latent': 'linear'}
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        autoencoder = ConvAutoEncoder(x_train.shape[1:],
                                      num_filters=num_filters,
                                      latent_dim=latent_dim,
                                      activations=activations,
                                      optimizer=optimizer)
    elif args.aes[args.aeNO] == 'vae':
        half_depth = 5
        latent_dim = elliptic_latent.pde.V.dim()
        repatr_out = False
        beta = 1.
        activation = 'elu'
        #         activation=tf.keras.layers.LeakyReLU(alpha=0.01)
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
        autoencoder = VAE(x_train.shape[1],
                          half_depth=half_depth,
                          latent_dim=latent_dim,
                          repatr_out=repatr_out,
                          activation=activation,
                          optimizer=optimizer,
                          beta=beta)
    f_name = [
        args.aes[args.aeNO] + '_' + i + '_' + algs[alg_no] + str(ensbl_sz)
        for i in ('fullmodel', 'encoder', 'decoder')
    ]
    # load autoencoder
    try:
        autoencoder.model = load_model(os.path.join(folder, f_name[0] + '.h5'),
                                       custom_objects={'loss': None})
        print(f_name[0] + ' has been loaded!')
        autoencoder.encoder = load_model(os.path.join(folder,
                                                      f_name[1] + '.h5'),
                                         custom_objects={'loss': None})
        print(f_name[1] + ' has been loaded!')
        autoencoder.decoder = load_model(os.path.join(folder,
                                                      f_name[2] + '.h5'),
                                         custom_objects={'loss': None})
        print(f_name[2] + ' has been loaded!')
    except:
        print('\nNo autoencoder found. Training {}...\n'.format(
            args.aes[args.aeNO]))
        epochs = 200
        patience = 0
        noise = 0.2
        kwargs = {'patience': patience}
        if args.aes[args.aeNO] == 'ae' and noise: kwargs['noise'] = noise
        autoencoder.train(x_train,
                          x_test=x_test,
                          epochs=epochs,
                          batch_size=64,
                          verbose=1,
                          **kwargs)
        # save autoencoder
        autoencoder.model.save(os.path.join(folder, f_name[0] + '.h5'))
        autoencoder.encoder.save(os.path.join(folder, f_name[1] + '.h5'))
        autoencoder.decoder.save(os.path.join(folder, f_name[2] + '.h5'))

    ##------ define MCMC ------##
    # initialization
#     unknown=elliptic_latent.prior.sample(whiten=False)
    unknown = elliptic_latent.prior.gen_vector()

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    emul_geom = lambda q, geom_ord=[
        0
    ], whitened=False, **kwargs: geom_emul.geom(q, elliptic, emulator,
                                                geom_ord, whitened, **kwargs)
    latent_geom = lambda q, geom_ord=[0], whitened=False, **kwargs: geom(
        q,
        elliptic_latent.pde.V,
        elliptic.pde.V,
        autoencoder,
        geom_ord,
        whitened,
        emul_geom=emul_geom,
        bip_lat=elliptic_latent,
        bip=elliptic,
        **kwargs)
    dream = DREAM(
        unknown,
        elliptic_latent,
        latent_geom,
        args.step_sizes[args.algNO],
        args.step_nums[args.algNO],
        args.algs[args.algNO],
        whitened=False,
        log_wts=False
    )  #,AE=autoencoder)#,k=5,bip_lat=elliptic_latent) # uncomment for manifold algorithms
    mc_fun = dream.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(dream.savepath, dream.filename + '.pckl')
    filename = os.path.join(dream.savepath, 'Elliptic_' + dream.filename +
                            '_' + args.emus[args.emuNO] + '_' +
                            args.aes[args.aeNO] + '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    #     soln_count=[elliptic.soln_count,elliptic.pde.soln_count]
    soln_count = elliptic.pde.soln_count
    pickle.dump([nx, ny, sigma, s, SNR, soln_count, args], f)
    f.close()
Пример #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=2)
    parser.add_argument('emuNO', nargs='?', type=int, default=0)
    parser.add_argument('num_samp', nargs='?', type=int, default=10000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=10000)
    parser.add_argument('step_sizes',
                        nargs='?',
                        type=float,
                        default=[.01, .02, .02, None, None])
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 5, 1, 5])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=[
                            'e' + a for a in ('pCN', 'infMALA', 'infHMC',
                                              'DRinfmMALA', 'DRinfmHMC')
                        ])
    parser.add_argument('emus', nargs='?', type=str, default=['dnn', 'cnn'])
    args = parser.parse_args()

    ##------ define the BBD inverse problem ------##
    # set up
    try:
        with open('./result/BBD.pickle', 'rb') as f:
            [nz_var, pr_cov, A, true_input, y] = pickle.load(f)
        print('Data loaded!\n')
        kwargs = {'true_input': true_input, 'A': A, 'y': y}
    except:
        print('No data found. Generate new data...\n')
        d = 4
        m = 100
        nz_var = 1
        pr_cov = 1
        true_input = np.random.randint(d, size=d)
        A = np.random.rand(m, d)
        kwargs = {'true_input': true_input, 'A': A}
    bbd = BBD(nz_var=nz_var, pr_cov=pr_cov, **kwargs)
    y = bbd.y
    bbd.prior = {
        'mean': np.zeros(bbd.input_dim),
        'cov': np.diag(bbd.pr_cov) if np.ndim(bbd.pr_cov) == 1 else bbd.pr_cov,
        'sample': bbd.sample
    }

    ##------ define networks ------##
    # training data algorithms
    algs = ['EKI', 'EKS']
    num_algs = len(algs)
    alg_no = 1
    # load data
    ensbl_sz = 100
    folder = './train_NN'

    ##---- EMULATOR ----##
    # prepare for training data
    if args.emus[args.emuNO] == 'dnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XY.npz'))
        X = loaded['X']
        Y = loaded['Y']
    elif args.emus[args.emuNO] == 'cnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        Y = loaded['Y']
        X = X[:, :, :, None]
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train,y_train=X[:n_tr],Y[:n_tr]
    #     x_test,y_test=X[n_tr:],Y[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    y_train, y_test = Y[tr_idx], Y[te_idx]
    # define emulator
    if args.emus[args.emuNO] == 'dnn':
        depth = 5
        activations = {'hidden': 'softplus', 'output': 'linear'}
        droprate = 0.
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = DNN(x_train.shape[1],
                       y_train.shape[1],
                       depth=depth,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    elif args.emus[args.emuNO] == 'cnn':
        num_filters = [16, 8, 8]
        activations = {
            'conv': 'softplus',
            'latent': 'softmax',
            'output': 'linear'
        }
        latent_dim = 256
        droprate = .5
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = CNN(x_train.shape[1:],
                       y_train.shape[1],
                       num_filters=num_filters,
                       latent_dim=latent_dim,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    f_name = args.emus[args.emuNO] + '_' + algs[alg_no] + str(ensbl_sz)
    # load emulator
    try:
        emulator.model = load_model(os.path.join(folder, f_name + '.h5'),
                                    custom_objects={'loss': None})
        print(f_name + ' has been loaded!')
    except:
        try:
            emulator.model.load_weights(os.path.join(folder, f_name + '.h5'))
            print(f_name + ' has been loaded!')
        except:
            print('\nNo emulator found. Training {}...\n'.format(
                args.emus[args.emuNO]))
            epochs = 1000
            patience = 10
            emulator.train(x_train,
                           y_train,
                           x_test=x_test,
                           y_test=y_test,
                           epochs=epochs,
                           batch_size=64,
                           verbose=1,
                           patience=patience)
            # save emulator
            try:
                emulator.model.save(os.path.join(folder, f_name + '.h5'))
            except:
                emulator.model.save_weights(
                    os.path.join(folder, f_name + '.h5'))

    # initialization
    u0 = bbd.sample()
    emul_geom = lambda q, geom_ord=[0], whitened=False, **kwargs: geom(
        q, bbd, emulator, geom_ord, whitened, **kwargs)

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    e_infGMC = einfGMC(
        u0, bbd, emul_geom, args.step_sizes[args.algNO],
        args.step_nums[args.algNO],
        args.algs[args.algNO])  #,k=5) # uncomment for manifold algorithms
    mc_fun = e_infGMC.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(e_infGMC.savepath, e_infGMC.filename + '.pckl')
    filename = os.path.join(e_infGMC.savepath,
                            'BBD_' + e_infGMC.filename + '_' +
                            args.emus[args.emuNO] + '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    pickle.dump([nz_var, pr_cov, A, true_input, y, args], f)
    f.close()
Пример #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=2)
    parser.add_argument('emuNO', nargs='?', type=int, default=1)
    parser.add_argument('num_samp', nargs='?', type=int, default=5000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=1000)
    parser.add_argument('step_sizes',
                        nargs='?',
                        type=float,
                        default=[2e-3, 8e-3, 8e-3, None,
                                 None])  # DNN: 1e-2, 1e-2, 1e-2; CNN:
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 5, 1, 5])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=[
                            'e' + a for a in ('pCN', 'infMALA', 'infHMC',
                                              'DRinfmMALA', 'DRinfmHMC')
                        ])
    parser.add_argument('emus', nargs='?', type=str, default=['dnn', 'cnn'])
    args = parser.parse_args()

    ##------ define the inverse problem ------##
    ## define the Advection-Diffusion invese problem ##
    #     mesh = df.Mesh('ad_10k.xml')
    meshsz = (61, 61)
    eldeg = 1
    gamma = 2.
    delta = 10.
    rel_noise = .5
    nref = 1
    adif = advdiff(mesh=meshsz,
                   eldeg=eldeg,
                   gamma=gamma,
                   delta=delta,
                   rel_noise=rel_noise,
                   nref=nref,
                   seed=seed)
    adif.prior.V = adif.prior.Vh
    adif.misfit.obs = np.array([dat.get_local()
                                for dat in adif.misfit.d.data]).flatten()

    ##------ define networks ------##
    # training data algorithms
    algs = ['EKI', 'EKS']
    num_algs = len(algs)
    alg_no = 1
    # load data
    ensbl_sz = 500
    folder = './train_NN_eldeg' + str(eldeg)

    ##---- EMULATOR ----##
    # prepare for training data
    if args.emus[args.emuNO] == 'dnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XY.npz'))
        X = loaded['X']
        Y = loaded['Y']
    elif args.emus[args.emuNO] == 'cnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        Y = loaded['Y']
        X = X[:, :, :, None]
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train,y_train=X[:n_tr],Y[:n_tr]
    #     x_test,y_test=X[n_tr:],Y[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    y_train, y_test = Y[tr_idx], Y[te_idx]
    # define emulator
    if args.emus[args.emuNO] == 'dnn':
        depth = 5
        activations = {
            'hidden': tf.keras.layers.LeakyReLU(alpha=.01),
            'output': 'linear'
        }
        droprate = 0.25
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
        emulator = DNN(x_train.shape[1],
                       y_train.shape[1],
                       depth=depth,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    elif args.emus[args.emuNO] == 'cnn':
        num_filters = [16, 8, 4]
        activations = {
            'conv': tf.keras.layers.LeakyReLU(alpha=0.2),
            'latent': tf.keras.layers.PReLU(),
            'output': 'linear'
        }
        latent_dim = 1024
        droprate = .5
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
        emulator = CNN(x_train.shape[1:],
                       y_train.shape[1],
                       num_filters=num_filters,
                       latent_dim=latent_dim,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    f_name = args.emus[args.emuNO] + '_' + algs[alg_no] + str(ensbl_sz)
    # load emulator
    try:
        emulator.model = load_model(os.path.join(folder, f_name + '.h5'),
                                    custom_objects={'loss': None})
        print(f_name + ' has been loaded!')
    except:
        try:
            emulator.model.load_weights(os.path.join(folder, f_name + '.h5'))
            print(f_name + ' has been loaded!')
        except:
            print('\nNo emulator found. Training {}...\n'.format(
                args.emus[args.emuNO]))
            epochs = 200
            patience = 0
            emulator.train(x_train,
                           y_train,
                           x_test=x_test,
                           y_test=y_test,
                           epochs=epochs,
                           batch_size=64,
                           verbose=1,
                           patience=patience)
            # save emulator
            try:
                emulator.model.save(os.path.join(folder, f_name + '.h5'))
            except:
                emulator.model.save_weights(
                    os.path.join(folder, f_name + '.h5'))

    # initialization
    u0 = adif.prior.sample(whiten=False)
    emul_geom = lambda q, geom_ord=[0], whitened=False, **kwargs: geom(
        q, adif, emulator, geom_ord, whitened, **kwargs)

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    e_infGMC = einfGMC(
        u0, adif, emul_geom, args.step_sizes[args.algNO],
        args.step_nums[args.algNO],
        args.algs[args.algNO])  #,k=5) # uncomment for manifold algorithms
    mc_fun = e_infGMC.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(e_infGMC.savepath, e_infGMC.filename + '.pckl')
    filename = os.path.join(e_infGMC.savepath,
                            'AdvDiff_' + e_infGMC.filename + '_' +
                            args.emus[args.emuNO] + '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    soln_count = adif.pde.soln_count
    pickle.dump([meshsz, rel_noise, nref, soln_count, args], f)
    f.close()
Пример #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=0)
    parser.add_argument('emuNO', nargs='?', type=int, default=1)
    parser.add_argument('num_samp', nargs='?', type=int, default=5000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=1000)
    parser.add_argument('step_sizes',
                        nargs='?',
                        type=float,
                        default=[.05, .15, .1, None, None])
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 5, 1, 5])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=[
                            'e' + a for a in ('pCN', 'infMALA', 'infHMC',
                                              'DRinfmMALA', 'DRinfmHMC')
                        ])
    parser.add_argument('emus', nargs='?', type=str, default=['dnn', 'cnn'])
    args = parser.parse_args()

    ##------ define the inverse elliptic problem ------##
    # parameters for PDE model
    nx = 40
    ny = 40
    # parameters for prior model
    sigma = 1.25
    s = 0.0625
    # parameters for misfit model
    SNR = 50  # 100
    # define the inverse problem
    elliptic = Elliptic(nx=nx, ny=ny, SNR=SNR, sigma=sigma, s=s)

    ##------ define networks ------##
    # training data algorithms
    algs = ['EKI', 'EKS']
    num_algs = len(algs)
    alg_no = 1
    # load data
    ensbl_sz = 500
    folder = './analysis_f_SNR' + str(SNR)

    ##---- EMULATOR ----##
    # prepare for training data
    if args.emus[args.emuNO] == 'dnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XY.npz'))
        X = loaded['X']
        Y = loaded['Y']
    elif args.emus[args.emuNO] == 'cnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        Y = loaded['Y']
        X = X[:, :, :, None]
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train,y_train=X[:n_tr],Y[:n_tr]
    #     x_test,y_test=X[n_tr:],Y[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    y_train, y_test = Y[tr_idx], Y[te_idx]
    # define emulator
    if args.emus[args.emuNO] == 'dnn':
        depth = 3
        activations = {'hidden': 'softplus', 'output': 'linear'}
        droprate = .4
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = DNN(x_train.shape[1],
                       y_train.shape[1],
                       depth=depth,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    elif args.emus[args.emuNO] == 'cnn':
        num_filters = [16, 8, 8]
        activations = {
            'conv': 'softplus',
            'latent': 'softmax',
            'output': 'linear'
        }
        latent_dim = 256
        droprate = .5
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = CNN(x_train.shape[1:],
                       y_train.shape[1],
                       num_filters=num_filters,
                       latent_dim=latent_dim,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    f_name = args.emus[args.emuNO] + '_' + algs[alg_no] + str(ensbl_sz)
    # load emulator
    try:
        emulator.model = load_model(os.path.join(folder, f_name + '.h5'),
                                    custom_objects={'loss': None})
        print(f_name + ' has been loaded!')
    except:
        try:
            emulator.model.load_weights(os.path.join(folder, f_name + '.h5'))
            print(f_name + ' has been loaded!')
        except:
            print('\nNo emulator found. Training {}...\n'.format(
                args.emus[args.emuNO]))
            epochs = 200
            patience = 0
            emulator.train(x_train,
                           y_train,
                           x_test=x_test,
                           y_test=y_test,
                           epochs=epochs,
                           batch_size=64,
                           verbose=1,
                           patience=patience)
            # save emulator
            try:
                emulator.model.save(os.path.join(folder, f_name + '.h5'))
            except:
                emulator.model.save_weights(
                    os.path.join(folder, f_name + '.h5'))

    # initialization
#     unknown=elliptic.prior.sample(whiten=False)
    unknown = elliptic.prior.gen_vector()

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    emul_geom = lambda q, geom_ord=[0], whitened=False, **kwargs: geom(
        q, elliptic, emulator, geom_ord, whitened, **kwargs)
    e_infGMC = einfGMC(
        unknown, elliptic, emul_geom, args.step_sizes[args.algNO],
        args.step_nums[args.algNO],
        args.algs[args.algNO])  #,k=5) # uncomment for manifold algorithms
    mc_fun = e_infGMC.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(e_infGMC.savepath, e_infGMC.filename + '.pckl')
    filename = os.path.join(e_infGMC.savepath,
                            'Elliptic_' + e_infGMC.filename + '_' +
                            args.emus[args.emuNO] + '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    #     soln_count=[elliptic.soln_count,elliptic.pde.soln_count]
    soln_count = elliptic.pde.soln_count
    pickle.dump([nx, ny, sigma, s, SNR, soln_count, args], f)
    f.close()
Пример #10
0
# activations={'conv':'relu','latent':tf.math.sin,'output':tf.keras.layers.PReLU()}
latent_dim = 1024  # best for non-whiten 256
droprate = 0.5  # best for non-whiten .5
sin_init = lambda n: tf.random_uniform_initializer(minval=-tf.math.sqrt(6 / n),
                                                   maxval=tf.math.sqrt(6 / n))
kernel_initializers = {
    'conv': 'he_uniform',
    'latent': sin_init,
    'output': 'glorot_uniform'
}
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
# optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.001)
cnn = CNN(x_train.shape[1:],
          y_train.shape[1],
          num_filters=num_filters,
          latent_dim=latent_dim,
          droprate=droprate,
          activations=activations,
          optimizer=optimizer)
loglik = lambda y: -0.5 * tf.math.reduce_sum(
    (y - adif.misfit.obs)**2 / adif.misfit.noise_variance, axis=1)
# custom_loss = lambda y_true, y_pred: [tf.square(loglik(y_true)-loglik(y_pred)), (y_true-y_pred)/adif.misfit.noise_variance]
savepath = folder + '/CNN/saved_model'
if not os.path.exists(savepath): os.makedirs(savepath)
import time
ctime = time.strftime("%Y-%m-%d-%H-%M-%S")
f_name = 'cnn_' + algs[alg_no] + str(ensbl_sz) + '-' + ctime
try:
    #     cnn.model=load_model(os.path.join(savepath,f_name+'.h5'))
    #     cnn.model=load_model(os.path.join(savepath,f_name+'.h5'),custom_objects={'loss':None})
    cnn.model.load_weights(os.path.join(savepath, f_name + '.h5'))
Пример #11
0
loaded = np.load(file=os.path.join(
    folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) + '_training_XimgY.npz'))
Xc = loaded['X']
Yc = loaded['Y']
Xc = Xc[:, :, :, None]

# define CNN
num_filters = [16, 8, 8]
activations = {'conv': 'softplus', 'latent': 'softmax', 'output': 'linear'}
latent_dim = 256
droprate = .5
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
cnn = CNN(Xc.shape[1:],
          Yc.shape[1],
          num_filters=num_filters,
          latent_dim=latent_dim,
          droprate=droprate,
          activations=activations,
          optimizer=optimizer)

# split train/test
num_samp = Xg.shape[0]
folder = './analysis_f_SNR' + str(SNR)
try:
    with open(os.path.join(folder, 'compare_gp_cnn.pckl'), 'rb') as f:
        fun_errors, grad_errors, train_times, pred_times = pickle.load(f)
    print('Comparison results loaded!')
except:
    print('Obtaining comparison results...\n')
    for s in range(n_seed):
        np.random.seed(seeds[s])
Пример #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('algNO', nargs='?', type=int, default=0)
    parser.add_argument('emuNO', nargs='?', type=int, default=0)
    parser.add_argument('aeNO', nargs='?', type=int, default=0)
    parser.add_argument('num_samp', nargs='?', type=int, default=10000)
    parser.add_argument('num_burnin', nargs='?', type=int, default=10000)
    parser.add_argument('step_sizes',
                        nargs='?',
                        type=float,
                        default=[.01, .005, .005, None,
                                 None])  # AE [.01,.005,.01]
    parser.add_argument('step_nums',
                        nargs='?',
                        type=int,
                        default=[1, 1, 5, 1, 5])
    parser.add_argument('algs',
                        nargs='?',
                        type=str,
                        default=[
                            'DREAM' + a for a in ('pCN', 'infMALA', 'infHMC',
                                                  'infmMALA', 'infmHMC')
                        ])
    parser.add_argument('emus', nargs='?', type=str, default=['dnn', 'cnn'])
    parser.add_argument('aes',
                        nargs='?',
                        type=str,
                        default=['ae', 'cae', 'vae'])
    args = parser.parse_args()

    ##------ define the linear-Gaussian inverse problem ------##
    # set up
    d = 3
    m = 100
    try:
        with open('./result/lin.pickle', 'rb') as f:
            [nz_var, pr_cov, A, true_input, y] = pickle.load(f)
        print('Data loaded!\n')
        kwargs = {'true_input': true_input, 'A': A, 'y': y}
    except:
        print('No data found. Generate new data...\n')
        nz_var = .1
        pr_cov = 1.
        true_input = np.arange(-np.floor(d / 2), np.ceil(d / 2))
        A = np.random.rand(m, d)
        kwargs = {'true_input': true_input, 'A': A}
    lin = LiN(d, m, nz_var=nz_var, pr_cov=pr_cov, **kwargs)
    y = lin.y
    lin.prior = {
        'mean': np.zeros(lin.input_dim),
        'cov': np.diag(lin.pr_cov) if np.ndim(lin.pr_cov) == 1 else lin.pr_cov,
        'sample': lin.sample
    }
    # set up latent
    latent_dim = 2

    class LiN_lat:
        def __init__(self, input_dim):
            self.input_dim = input_dim

        def sample(self, num_samp=1):
            samp = np.random.randn(num_samp, self.input_dim)
            return np.squeeze(samp)

    lin_latent = LiN_lat(latent_dim)
    lin_latent.prior = {
        'mean': np.zeros(lin_latent.input_dim),
        'cov': np.eye(lin_latent.input_dim),
        'sample': lin_latent.sample
    }
    #     lin_latent=LiN(latent_dim,lin.output_dim,nz_var=nz_var,pr_cov=pr_cov)
    #     lin_latent.prior={'mean':np.zeros(lin_latent.input_dim),'cov':np.diag(lin_latent.pr_cov) if np.ndim(lin_latent.pr_cov)==1 else lin_latent.pr_cov,'sample':lin_latent.sample}

    ##------ define networks ------##
    # training data algorithms
    algs = ['EKI', 'EKS']
    num_algs = len(algs)
    alg_no = 1
    # load data
    ensbl_sz = 100
    folder = './train_NN'
    #     if not os.path.exists(folder): os.makedirs(folder)

    ##---- EMULATOR ----##
    # prepare for training data
    if args.emus[args.emuNO] == 'dnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XY.npz'))
        X = loaded['X']
        Y = loaded['Y']
    elif args.emus[args.emuNO] == 'cnn':
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        Y = loaded['Y']
        X = X[:, :, :, None]
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train,y_train=X[:n_tr],Y[:n_tr]
    #     x_test,y_test=X[n_tr:],Y[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    y_train, y_test = Y[tr_idx], Y[te_idx]
    # define emulator
    if args.emus[args.emuNO] == 'dnn':
        depth = 3
        activations = {'hidden': 'softplus', 'output': 'linear'}
        droprate = 0.
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = DNN(x_train.shape[1],
                       y_train.shape[1],
                       depth=depth,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    elif args.emus[args.emuNO] == 'cnn':
        num_filters = [16, 8, 8]
        activations = {
            'conv': 'softplus',
            'latent': 'softmax',
            'output': 'linear'
        }
        latent_dim = 256
        droprate = .5
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        emulator = CNN(x_train.shape[1:],
                       y_train.shape[1],
                       num_filters=num_filters,
                       latent_dim=latent_dim,
                       droprate=droprate,
                       activations=activations,
                       optimizer=optimizer)
    f_name = args.emus[args.emuNO] + '_' + algs[alg_no] + str(ensbl_sz)
    # load emulator
    try:
        emulator.model = load_model(os.path.join(folder, f_name + '.h5'),
                                    custom_objects={'loss': None})
        print(f_name + ' has been loaded!')
    except:
        try:
            emulator.model.load_weights(os.path.join(folder, f_name + '.h5'))
            print(f_name + ' has been loaded!')
        except:
            print('\nNo emulator found. Training {}...\n'.format(
                args.emus[args.emuNO]))
            epochs = 1000
            patience = 10
            emulator.train(x_train,
                           y_train,
                           x_test=x_test,
                           y_test=y_test,
                           epochs=epochs,
                           batch_size=64,
                           verbose=1,
                           patience=patience)
            # save emulator
            try:
                emulator.model.save(os.path.join(folder, f_name + '.h5'))
            except:
                emulator.model.save_weights(
                    os.path.join(folder, f_name + '.h5'))

    ##---- AUTOENCODER ----##
    # prepare for training data
    if 'c' in args.aes[args.aeNO]:
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_XimgY.npz'))
        X = loaded['X']
        X = X[:, :-1, :-1, None]
    else:
        loaded = np.load(file=os.path.join(
            folder, algs[alg_no] + '_ensbl' + str(ensbl_sz) +
            '_training_X.npz'))
        X = loaded['X']
    num_samp = X.shape[0]
    #     n_tr=np.int(num_samp*.75)
    #     x_train=X[:n_tr]
    #     x_test=X[n_tr:]
    tr_idx = np.random.choice(num_samp,
                              size=np.floor(.75 * num_samp).astype('int'),
                              replace=False)
    te_idx = np.setdiff1d(np.arange(num_samp), tr_idx)
    x_train, x_test = X[tr_idx], X[te_idx]
    # define autoencoder
    if args.aes[args.aeNO] == 'ae':
        half_depth = 2
        latent_dim = 2
        droprate = 0.
        #         activation='linear'
        activation = tf.keras.layers.LeakyReLU(alpha=2.)
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
        lambda_ = 0.
        autoencoder = AutoEncoder(x_train.shape[1],
                                  half_depth=half_depth,
                                  latent_dim=latent_dim,
                                  droprate=droprate,
                                  activation=activation,
                                  optimizer=optimizer)
    elif args.aes[args.aeNO] == 'cae':
        num_filters = [16, 8]
        latent_dim = 2
        #         activations={'conv':tf.keras.layers.LeakyReLU(alpha=0.1),'latent':None} # [16,1]
        activations = {'conv': 'elu', 'latent': 'linear'}
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
        autoencoder = ConvAutoEncoder(x_train.shape[1:],
                                      num_filters=num_filters,
                                      latent_dim=latent_dim,
                                      activations=activations,
                                      optimizer=optimizer)
    elif args.aes[args.aeNO] == 'vae':
        half_depth = 5
        latent_dim = 2
        repatr_out = False
        beta = 1.
        activation = 'elu'
        #         activation=tf.keras.layers.LeakyReLU(alpha=0.01)
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.001, amsgrad=True)
        autoencoder = VAE(x_train.shape[1],
                          half_depth=half_depth,
                          latent_dim=latent_dim,
                          repatr_out=repatr_out,
                          activation=activation,
                          optimizer=optimizer,
                          beta=beta)
    f_name = [
        args.aes[args.aeNO] + '_' + i + '_' + algs[alg_no] + str(ensbl_sz)
        for i in ('fullmodel', 'encoder', 'decoder')
    ]
    # load autoencoder
    try:
        autoencoder.model = load_model(os.path.join(folder, f_name[0] + '.h5'),
                                       custom_objects={'loss': None})
        print(f_name[0] + ' has been loaded!')
        autoencoder.encoder = load_model(os.path.join(folder,
                                                      f_name[1] + '.h5'),
                                         custom_objects={'loss': None})
        print(f_name[1] + ' has been loaded!')
        autoencoder.decoder = load_model(os.path.join(folder,
                                                      f_name[2] + '.h5'),
                                         custom_objects={'loss': None})
        print(f_name[2] + ' has been loaded!')
    except:
        print('\nNo autoencoder found. Training {}...\n'.format(
            args.aes[args.aeNO]))
        epochs = 1000
        patience = 10
        noise = 0.
        kwargs = {'patience': patience}
        if args.aes[args.aeNO] == 'ae' and noise: kwargs['noise'] = noise
        autoencoder.train(x_train,
                          x_test=x_test,
                          epochs=epochs,
                          batch_size=64,
                          verbose=1,
                          **kwargs)
        # save autoencoder
        autoencoder.model.save(os.path.join(folder, f_name[0] + '.h5'))
        autoencoder.encoder.save(os.path.join(folder, f_name[1] + '.h5'))
        autoencoder.decoder.save(os.path.join(folder, f_name[2] + '.h5'))

    ##------ define MCMC ------##
    # initialization
    u0 = lin_latent.prior['sample']()
    emul_geom = lambda q, geom_ord=[
        0
    ], whitened=False, **kwargs: geom_emul.geom(q, lin, emulator, geom_ord,
                                                whitened, **kwargs)
    latent_geom = lambda q, geom_ord=[0], whitened=False, **kwargs: geom(
        q, autoencoder, geom_ord, whitened, emul_geom=emul_geom, **kwargs)

    # run MCMC to generate samples
    print("Preparing %s sampler with step size %g for %d step(s)..." %
          (args.algs[args.algNO], args.step_sizes[args.algNO],
           args.step_nums[args.algNO]))

    dream = DREAM(
        u0,
        lin_latent,
        latent_geom,
        args.step_sizes[args.algNO],
        args.step_nums[args.algNO],
        args.algs[args.algNO],
        whitened=False,
        vol_wts='adjust',
        AE=autoencoder
    )  #,k=5,bip_lat=lin_latent) # uncomment for manifold algorithms
    mc_fun = dream.sample
    mc_args = (args.num_samp, args.num_burnin)
    mc_fun(*mc_args)

    # append PDE information including the count of solving
    filename_ = os.path.join(dream.savepath, dream.filename + '.pckl')
    filename = os.path.join(dream.savepath, 'lin_' + dream.filename + '_' +
                            args.emus[args.emuNO] + '_' + args.aes[args.aeNO] +
                            '.pckl')  # change filename
    os.rename(filename_, filename)
    f = open(filename, 'ab')
    pickle.dump([nz_var, pr_cov, A, true_input, y, args], f)
    f.close()
Пример #13
0
# define CNN
num_filters = [16, 32]
activations = {
    'conv': 'relu',
    'latent': tf.keras.layers.PReLU(),
    'output': 'linear'
}
latent_dim = 128
droprate = .25
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
cnn = CNN(x_train,
          y_train,
          num_filters=num_filters,
          x_test=x_test,
          y_test=y_test,
          latent_dim=latent_dim,
          activations=activations,
          droprate=droprate,
          optimizer=optimizer)
try:
    cnn.model = load_model(os.path.join(folder, 'cnn_' + algs[alg_no] + '.h5'))
    print('cnn_' + algs[alg_no] + '.h5' + ' has been loaded!')
except Exception as err:
    print(err)
    print('Train CNN...\n')
    epochs = 100
    import timeit
    t_start = timeit.default_timer()
    cnn.train(epochs, batch_size=64, verbose=1)
    t_used = timeit.default_timer() - t_start