コード例 #1
0
    def __init__(self, V, chol, randobs):
        '''
        Parameters:
            V - FEniCS FunctionSpace
            chol - Covariance matrix to define Gaussian field over V
        '''
        self._V = V
        self._solver = Fin(self._V, randobs)
        self._pred_k = dl.Function(self._V)

        # Setup synthetic observations
        self.k_true = dl.Function(self._V)

        # Random Gaussian field as true function
        #  norm = np.random.randn(len(chol))
        #  nodal_vals = np.exp(0.5 * chol.T @ norm)

        nodal_vals = np.load('res_x.npy')
        self.k_true.vector().set_local(nodal_vals)

        w, y, A, B, C = self._solver.forward(self.k_true)
        self.obs_data = self._solver.qoi_operator(w)

        # Setup DL error model
        #  self._err_model = load_parametric_model_avg('elu', Adam,
        #0.0003, 5, 58, 200, 2000, V.dim())
        self._err_model = load_bn_model(randobs)

        # Initialize reduced order model
        self.phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")
        self._solver_r = AffineROMFin(self._V, self._err_model, self.phi,
                                      randobs)
        self._solver_r.set_data(self.obs_data)
コード例 #2
0
def gen_avg_rom_dataset(dataset_size, resolution=40):
    V = get_space(resolution)
    chol = make_cov_chol(V)
    z = Function(V)
    solver = Fin(V)
    phi = np.loadtxt('data/basis_nine_param.txt', delimiter=",")
    qoi_errors = np.zeros((dataset_size, 5))

    # TODO: Needs to be fixed for higher order functions
    z_s = np.zeros((dataset_size, V.dim()))

    for i in range(dataset_size):
        norm = np.random.randn(len(chol))
        nodal_vals = np.exp(0.5 * chol.T @ norm)
        z.vector().set_local(nodal_vals)
        z_s[i, :] = nodal_vals
        A_r, B_r, C_r, x_r, y_r = solver.averaged_forward(z, phi)
        x, y, A, B, C = solver.forward(z)
        qoi = solver.qoi_operator(x)
        qoi_r = solver.reduced_qoi_operator(x_r)
        qoi_errors[i, :] = qoi - qoi_r

    if (dataset_size > 1000):
        np.savetxt('data/z_avg_tr.txt', z_s, delimiter=",")
        np.savetxt('data/errors_avg_tr.txt', qoi_errors, delimiter=",")

    if (dataset_size < 400):
        np.savetxt('data/z_avg_eval.txt', z_s, delimiter=",")
        np.savetxt('data/errors_avg_eval.txt', qoi_errors, delimiter=",")
    return (z_s, qoi_errors)
コード例 #3
0
    def __init__(self, resolution=40, out_type="total_avg"):
        """ 
        INPUTS:
     
        """
        V = get_space(resolution)
        dofs = len(V.dofmap().dofs())
        self.solver = Fin(V)
        self.phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
        self.phi = self.phi[:, 0:10]
        self.model = load_parametric_model('relu', Adam, 0.004, 6, 50, 150,
                                           600)

        self.out_type = out_type

        if out_type == "total_avg":
            out_dim = 1
        elif out_type == "subfin_avg":
            out_dim = 5
        elif out_type == "rand_pt":
            out_dim = 1
        elif out_type == "rand_pts":
            out_dim = 5

        mm.PyModPiece.__init__(self, [5], [out_dim])
コード例 #4
0
def generate(dataset_size, resolution=40):
    '''
    Create a tensorflow dataset where the features are thermal conductivity parameters
    and the labels are the differences in the quantity of interest between the high 
    fidelity model and the reduced order model (this is the ROM error)

    Arguments: 
        dataset_size - number of feature-label pairs
        resolution   - finite element mesh resolution for the high fidelity model

    Returns:
        dataset      - Tensorflow dataset created from tensor slices
    '''

    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())

    # TODO: Improve this by using mass matrix covariance. Bayesian prior may work well too
    z_s = np.random.uniform(0.1, 1, (dataset_size, dofs))
    phi = np.loadtxt('data/basis.txt', delimiter=",")
    solver = Fin(V)
    errors = np.zeros((dataset_size, 1))

    m = Function(V)
    for i in range(dataset_size):
        m.vector().set_local(z_s[i, :])
        w, y, A, B, C = solver.forward(m)
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        errors[i][0] = y - y_r

    dataset = tf.data.Dataset.from_tensor_slices((z_s, errors))

    return dataset
コード例 #5
0
def gen_affine_avg_rom_dataset(dataset_size, resolution=40, genrand=False):
    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())
    z = Function(V)
    solver = Fin(V, genrand)
    phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")

    chol = make_cov_chol(V, length=1.6)
    #  prior_cov = np.load('../bayesian_inference/prior_covariance_0.07_0.07.npy')
    #  L = np.linalg.cholesky(prior_cov)

    #  err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())
    err_model = res_bn_fc_model(ELU(), Adam, 3e-5, 3, 100, 1446, solver.n_obs)

    solver_r = AffineROMFin(V, err_model, phi, genrand)
    qoi_errors = np.zeros((dataset_size, solver_r.n_obs))
    qois = np.zeros((dataset_size, solver_r.n_obs))

    # TODO: Needs to be fixed for higher order functions
    z_s = np.zeros((dataset_size, V.dim()))

    for i in tqdm(range(dataset_size)):
        #  draw = np.random.randn(dofs)
        #  nodal_vals = np.exp(np.dot(L, draw))

        norm = np.random.randn(V.dim())
        nodal_vals = np.exp(0.5 * chol.T @ norm)

        z.vector().set_local(nodal_vals)
        z_s[i, :] = nodal_vals

        x, y, A, B, C = solver.forward(z)
        w_r = solver_r.forward_reduced(z)

        qoi = solver.qoi_operator(x)
        qoi_r = solver_r.qoi_reduced(w_r)

        qoi_errors[i, :] = qoi - qoi_r
        qois[i, :] = qoi

    if (dataset_size > 1000):
        np.save('../data/z_aff_avg_tr_avg_obs_3', z_s)
        np.save('../data/errors_aff_avg_tr_avg_obs_3', qoi_errors)
        np.save('../data/qois_avg_tr_avg_obs_3', qois)

    if (dataset_size < 600):
        np.save('../data/z_aff_avg_eval_avg_obs_3', z_s)
        np.save('../data/errors_aff_avg_eval_avg_obs_3', qoi_errors)
        np.save('../data/qois_avg_eval_avg_obs_3', qois)
    return (z_s, qoi_errors)
コード例 #6
0
def generate_and_save_dataset(dataset_size, resolution=40):
    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())
    z_s = np.random.uniform(0.1, 1, (dataset_size, dofs))
    phi = np.loadtxt('data/basis.txt', delimiter=",")
    solver = Fin(V)
    errors = np.zeros((dataset_size, 1))

    m = Function(V)
    for i in range(dataset_size):
        m.vector().set_local(z_s[i, :])
        w, y, A, B, C = solver.forward(m)
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        errors[i][0] = y - y_r

    np.savetxt('../data/z_s_train.txt', z_s, delimiter=",")
    np.savetxt('../data/errors_train.txt', errors, delimiter=",")
コード例 #7
0
    def __init__(self, resolution=40, out_type="total_avg"):
        """ 
        INPUTS:
     
        """
        V = get_space(resolution)
        dofs = len(V.dofmap().dofs())
        self.solver = Fin(V)
        self.out_type = out_type

        if out_type == "total_avg":
            out_dim = 1
        elif out_type == "subfin_avg":
            out_dim = 5
        elif out_type == "rand_pt":
            out_dim = 1
        elif out_type == "rand_pts":
            out_dim = 5
        mm.PyModPiece.__init__(self, [5], [out_dim])
コード例 #8
0
def gen_five_param_subfin_avg(dataset_size, resolution=40):
    V = get_space(resolution)
    z_s = np.random.uniform(0.1, 1, (dataset_size, 5))
    phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
    phi = phi[:, 0:10]
    solver = Fin(V)
    errors = np.zeros((dataset_size, 5))
    avgs = np.zeros((dataset_size, 5))
    avgs_r = np.zeros((dataset_size, 5))

    for i in range(dataset_size):
        w, y, A, B, C = solver.forward_five_param(z_s[i, :])
        avgs[i] = solver.qoi_operator(w)
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        avgs_r[i] = solver.reduced_qoi_operator(x_r)
        errors[i] = avgs[i] - avgs_r[i]

    return (z_s, errors)
コード例 #9
0
def generate_five_param_np(dataset_size, resolution=40):
    V = get_space(resolution)
    z_s = np.random.uniform(0.1, 1, (dataset_size, 5))
    phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
    phi = phi[:, 0:10]
    solver = Fin(V)
    errors = np.zeros((dataset_size, 1))
    y_s = np.zeros((dataset_size, 1))
    y_r_s = np.zeros((dataset_size, 1))

    for i in range(dataset_size):
        w, y, A, B, C = solver.forward_five_param(z_s[i, :])
        y_s[i][0] = y
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        y_r_s[i][0] = y_r
        errors[i][0] = y - y_r

    return (z_s, errors)
コード例 #10
0
def generate_DL_only_dataset(dataset_size, resolution=40):
    '''
    Create dataset where the features are thermal conductivity parameters
    and the labels are the quantities of interest of the HFM

    Arguments: 
        dataset_size - number of feature-label pairs
        resolution   - finite element mesh resolution for the high fidelity model

    Returns:
        (z, qois)    - pairs of conductivity and qois
    '''

    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())

    prior_cov = np.load('bayesian_inference/prior_covariance.npy')
    L = np.linalg.cholesky(prior_cov)

    # TODO: Improve this by using mass matrix covariance. Bayesian prior may work well too
    z_s = np.zeros((dataset_size, dofs))
    solver = Fin(V, True)
    qois = np.zeros((dataset_size, 40))
    k = Function(V)

    for i in range(dataset_size):
        draw = np.random.randn(dofs)
        prior_draw = np.dot(L, draw)
        k.vector().set_local(prior_draw)
        w, _, _, _, _ = solver.forward(k)
        qois[i, :] = solver.qoi_operator(w)
        z_s[i, :] = prior_draw

    if (dataset_size > 1000):
        np.savetxt('data/z_dlo_tr.txt', z_s, delimiter=",")
        np.savetxt('data/qois_dlo_tr.txt', qois, delimiter=",")

    if (dataset_size < 400):
        np.savetxt('data/z_dlo_eval.txt', z_s, delimiter=",")
        np.savetxt('data/qois_dlo_eval.txt', qois, delimiter=",")

    return (z_s, qois)
コード例 #11
0
def gen_affine_avg_rom_dataset(dataset_size, resolution=40, genrand=False):
    V = get_space(resolution)
    chol = make_cov_chol(V, length=1.6)
    z = Function(V)
    solver = Fin(V, genrand)
    phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")

    #  err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())
    err_model = res_bn_fc_model(ELU(), Adam, 3e-5, 3, 200, 1446, 40)

    solver_r = AffineROMFin(V, err_model, phi, genrand)
    qoi_errors = np.zeros((dataset_size, solver_r.n_obs))

    # TODO: Needs to be fixed for higher order functions
    z_s = np.zeros((dataset_size, V.dim()))

    for i in tqdm(range(dataset_size)):
        norm = np.random.randn(len(chol))
        nodal_vals = np.exp(0.5 * chol.T @ norm)
        z.vector().set_local(nodal_vals)
        z_s[i, :] = nodal_vals

        x, y, A, B, C = solver.forward(z)
        w_r = solver_r.forward_reduced(z)

        qoi = solver.qoi_operator(x)
        qoi_r = solver_r.qoi_reduced(w_r)

        qoi_errors[i, :] = qoi - qoi_r

    if (dataset_size > 1000):
        np.savetxt('../data/z_aff_avg_tr.txt', z_s, delimiter=",")
        np.savetxt('../data/errors_aff_avg_tr.txt', qoi_errors, delimiter=",")

    if (dataset_size < 400):
        np.savetxt('../data/z_aff_avg_eval.txt', z_s, delimiter=",")
        np.savetxt('../data/errors_aff_avg_eval.txt',
                   qoi_errors,
                   delimiter=",")
    return (z_s, qoi_errors)
コード例 #12
0
def generate_five_param(dataset_size, resolution=40):
    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())

    # TODO: Improve this by using mass matrix covariance. Bayesian prior may work well too
    z_s = np.random.uniform(0.1, 1, (dataset_size, 5))
    phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
    phi = phi[:, 0:20]
    solver = Fin(V)
    errors = np.zeros((dataset_size, 1))

    for i in range(dataset_size):
        w, y, A, B, C = solver.forward_five_param(z_s[i, :])
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        errors[i][0] = y - y_r

    #  np.savetxt('data/z_s_eval.txt', z_s, delimiter=",")
    #  np.savetxt('data/errors_eval.txt', errors, delimiter=",")
    dataset = tf.data.Dataset.from_tensor_slices((z_s, errors))

    return dataset
コード例 #13
0
import numpy as np
import matplotlib.pyplot as plt
import dolfin as dl
dl.set_log_level(40)

# ROMML imports
from fom.forward_solve import Fin
from fom.thermal_fin import get_space
from rom.averaged_affine_ROM import AffineROMFin
from deep_learning.dl_model import load_parametric_model_avg
from gaussian_field import make_cov_chol

resolution = 40
V = get_space(resolution)
chol = make_cov_chol(V, length=1.2)
solver = Fin(V)


class SolverWrapper:
    def __init__(self, solver, data):
        self.solver = solver
        self.data = data
        self.z = dl.Function(V)

    def cost_function(self, z_v):
        self.z.vector().set_local(z_v)
        w, y, A, B, C = self.solver.forward(self.z)
        y = self.solver.qoi_operator(w)
        cost = 0.5 * np.linalg.norm(
            y - self.data)**2  #+ dl.assemble(self.solver.reg)
        return cost
コード例 #14
0
resolution = 40
V = get_space(resolution)
chol = make_cov_chol(V, length=1.6)

# Setup DL error model
#  err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())
err_model = load_bn_model(randobs)
surrogate_model = load_surrogate_model(randobs)

# Initialize reduced order model
phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=",")
solver_r = AffineROMFin(V, err_model, phi, randobs)

# Setup synthetic observations
solver = Fin(V, randobs)
z_true = dl.Function(V)

prior_covariance = np.load('prior_covariance_0.07_0.07.npy')
L = np.linalg.cholesky(prior_covariance)
#  draw = np.random.randn(V.dim())
#  nodal_vals = np.dot(L, draw)

#Load random Gaussian field
nodal_vals = np.load('res_x.npy')
#  nodal_vals = np.exp(nodal_vals)/np.sum(np.exp(nodal_vals)) + 1.0

# For exp parametrization
#  nodal_vals = np.log(nodal_vals)

コード例 #15
0
# ROMML imports
from fom.forward_solve import Fin, get_space
from muq_mod_five_param import ROM_forward, DL_ROM_forward, FOM_forward

resolution = 40
r_fwd = ROM_forward(resolution, out_type="subfin_avg")
d_fwd = DL_ROM_forward(resolution, out_type="subfin_avg")
f_fwd = FOM_forward(resolution, out_type="subfin_avg")

#z_true = np.random.uniform(0.1,1, (1,5))
z_true = np.array(
    [[0.41126864, 0.61789679, 0.75873243, 0.96527541, 0.22348076]])

V = get_space(resolution)
full_solver = Fin(V)
w, y, A, B, C = full_solver.forward_five_param(z_true[0, :])
qoi = full_solver.qoi_operator(w)
obsData = qoi


def MCMC_sample(fwd):
    # Define prior
    logPriorMu = 0.5 * np.ones(5)
    logPriorCov = 0.5 * np.eye(5)

    logPrior = mm.Gaussian(logPriorMu, logPriorCov).AsDensity()

    # Likelihood
    noiseVar = 1e-4
    noiseCov = noiseVar * np.eye(obsData.size)
コード例 #16
0
        #  self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2 + dl.assemble(self.solver.reg)
        self.cost = 0.5 * np.linalg.norm(y_r - self.data)**2
        return self.cost

    def gradient(self, z_v):
        self.z.vector().set_local(z_v)
        self.solver._k.assign(self.z)
        self.grad, self.cost = self.solver_r.grad_reduced(self.z)
        #  self.grad = self.grad + dl.assemble(self.solver.grad_reg)
        return self.grad


resolution = 40
V = get_space(resolution)
chol = make_cov_chol(V, length=1.2)
solver = Fin(V, True)

# Generate synthetic observations
z_true = dl.Function(V)
norm = np.random.randn(len(chol))
nodal_vals = np.exp(0.5 * chol.T @ norm)
z_true.vector().set_local(nodal_vals)
w, y, A, B, C = solver.forward(z_true)
data = solver.qoi_operator(w)

# Setup DL error model
#  err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())
err_model = load_bn_model()

# Initialize reduced order model
phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")