Exemple #1
0
    def __init__(self, resolution=40, out_type="total_avg"):
        """ 
        INPUTS:
     
        """
        V = get_space(resolution)
        dofs = len(V.dofmap().dofs())
        self.solver = Fin(V)
        self.phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
        self.phi = self.phi[:, 0:10]
        self.model = load_parametric_model('relu', Adam, 0.004, 6, 50, 150,
                                           600)

        self.out_type = out_type

        if out_type == "total_avg":
            out_dim = 1
        elif out_type == "subfin_avg":
            out_dim = 5
        elif out_type == "rand_pt":
            out_dim = 1
        elif out_type == "rand_pts":
            out_dim = 5

        mm.PyModPiece.__init__(self, [5], [out_dim])
Exemple #2
0
    def __init__(self, V, chol, randobs):
        '''
        Parameters:
            V - FEniCS FunctionSpace
            chol - Covariance matrix to define Gaussian field over V
        '''
        self._V = V
        self._solver = Fin(self._V, randobs)
        self._pred_k = dl.Function(self._V)

        # Setup synthetic observations
        self.k_true = dl.Function(self._V)

        # Random Gaussian field as true function
        #  norm = np.random.randn(len(chol))
        #  nodal_vals = np.exp(0.5 * chol.T @ norm)

        nodal_vals = np.load('res_x.npy')
        self.k_true.vector().set_local(nodal_vals)

        w, y, A, B, C = self._solver.forward(self.k_true)
        self.obs_data = self._solver.qoi_operator(w)

        # Setup DL error model
        #  self._err_model = load_parametric_model_avg('elu', Adam,
        #0.0003, 5, 58, 200, 2000, V.dim())
        self._err_model = load_bn_model(randobs)

        # Initialize reduced order model
        self.phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")
        self._solver_r = AffineROMFin(self._V, self._err_model, self.phi,
                                      randobs)
        self._solver_r.set_data(self.obs_data)
Exemple #3
0
class FOM_forward(mm.PyModPiece):
    """
    Solves the thermal fin steady state problem with
    a full order model
    """
    def __init__(self, resolution=40, out_type="total_avg"):
        """ 
        INPUTS:
     
        """
        V = get_space(resolution)
        dofs = len(V.dofmap().dofs())
        self.solver = Fin(V)
        self.out_type = out_type

        if out_type == "total_avg":
            out_dim = 1
        elif out_type == "subfin_avg":
            out_dim = 5
        elif out_type == "rand_pt":
            out_dim = 1
        elif out_type == "rand_pts":
            out_dim = 5
        mm.PyModPiece.__init__(self, [5], [out_dim])

    def EvaluateImpl(self, inputs):
        """
        Performs the forward solve and returns observations.
        
        """
        z = inputs[0]

        x, y, A, B, C = self.solver.forward_five_param(z)
        output = self.solver.qoi_operator(x)
        self.outputs = [output]
Exemple #4
0
def generate(dataset_size, resolution=40):
    '''
    Create a tensorflow dataset where the features are thermal conductivity parameters
    and the labels are the differences in the quantity of interest between the high 
    fidelity model and the reduced order model (this is the ROM error)

    Arguments: 
        dataset_size - number of feature-label pairs
        resolution   - finite element mesh resolution for the high fidelity model

    Returns:
        dataset      - Tensorflow dataset created from tensor slices
    '''

    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())

    # TODO: Improve this by using mass matrix covariance. Bayesian prior may work well too
    z_s = np.random.uniform(0.1, 1, (dataset_size, dofs))
    phi = np.loadtxt('data/basis.txt', delimiter=",")
    solver = Fin(V)
    errors = np.zeros((dataset_size, 1))

    m = Function(V)
    for i in range(dataset_size):
        m.vector().set_local(z_s[i, :])
        w, y, A, B, C = solver.forward(m)
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        errors[i][0] = y - y_r

    dataset = tf.data.Dataset.from_tensor_slices((z_s, errors))

    return dataset
Exemple #5
0
class DL_ROM_forward(mm.PyModPiece):
    """
    Solves the thermal fin steady state problem with 
    projection based ROM with a given basis and augments
    QoI prediction with deep learning prediciton.
    """
    def __init__(self, resolution=40, out_type="total_avg"):
        """ 
        INPUTS:
     
        """
        V = get_space(resolution)
        dofs = len(V.dofmap().dofs())
        self.solver = Fin(V)
        self.phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
        self.phi = self.phi[:, 0:10]
        self.model = load_parametric_model('relu', Adam, 0.004, 6, 50, 150,
                                           600)

        self.out_type = out_type

        if out_type == "total_avg":
            out_dim = 1
        elif out_type == "subfin_avg":
            out_dim = 5
        elif out_type == "rand_pt":
            out_dim = 1
        elif out_type == "rand_pts":
            out_dim = 5

        mm.PyModPiece.__init__(self, [5], [out_dim])

    def EvaluateImpl(self, inputs):
        """
        Performs the forward solve and returns observations.
        
        """
        z = inputs[0]
        A_r, B_r, C_r, x_r, y_r = self.solver.r_fwd_no_full_5_param(
            z, self.phi)
        e_NN = self.model.predict(z.reshape((1, 5)))

        if self.out_type == "total_avg":
            output = np.array([y_r + e_NN[0, 0]])
        else:
            # The QoI operator determines whether we look at subfin averages
            # or random points on the boundary or domain
            output = self.solver.reduced_qoi_operator(x_r) + e_NN[0]

        self.outputs = [output]
Exemple #6
0
def gen_affine_avg_rom_dataset(dataset_size, resolution=40, genrand=False):
    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())
    z = Function(V)
    solver = Fin(V, genrand)
    phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")

    chol = make_cov_chol(V, length=1.6)
    #  prior_cov = np.load('../bayesian_inference/prior_covariance_0.07_0.07.npy')
    #  L = np.linalg.cholesky(prior_cov)

    #  err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())
    err_model = res_bn_fc_model(ELU(), Adam, 3e-5, 3, 100, 1446, solver.n_obs)

    solver_r = AffineROMFin(V, err_model, phi, genrand)
    qoi_errors = np.zeros((dataset_size, solver_r.n_obs))
    qois = np.zeros((dataset_size, solver_r.n_obs))

    # TODO: Needs to be fixed for higher order functions
    z_s = np.zeros((dataset_size, V.dim()))

    for i in tqdm(range(dataset_size)):
        #  draw = np.random.randn(dofs)
        #  nodal_vals = np.exp(np.dot(L, draw))

        norm = np.random.randn(V.dim())
        nodal_vals = np.exp(0.5 * chol.T @ norm)

        z.vector().set_local(nodal_vals)
        z_s[i, :] = nodal_vals

        x, y, A, B, C = solver.forward(z)
        w_r = solver_r.forward_reduced(z)

        qoi = solver.qoi_operator(x)
        qoi_r = solver_r.qoi_reduced(w_r)

        qoi_errors[i, :] = qoi - qoi_r
        qois[i, :] = qoi

    if (dataset_size > 1000):
        np.save('../data/z_aff_avg_tr_avg_obs_3', z_s)
        np.save('../data/errors_aff_avg_tr_avg_obs_3', qoi_errors)
        np.save('../data/qois_avg_tr_avg_obs_3', qois)

    if (dataset_size < 600):
        np.save('../data/z_aff_avg_eval_avg_obs_3', z_s)
        np.save('../data/errors_aff_avg_eval_avg_obs_3', qoi_errors)
        np.save('../data/qois_avg_eval_avg_obs_3', qois)
    return (z_s, qoi_errors)
Exemple #7
0
def generate_and_save_dataset(dataset_size, resolution=40):
    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())
    z_s = np.random.uniform(0.1, 1, (dataset_size, dofs))
    phi = np.loadtxt('data/basis.txt', delimiter=",")
    solver = Fin(V)
    errors = np.zeros((dataset_size, 1))

    m = Function(V)
    for i in range(dataset_size):
        m.vector().set_local(z_s[i, :])
        w, y, A, B, C = solver.forward(m)
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        errors[i][0] = y - y_r

    np.savetxt('../data/z_s_train.txt', z_s, delimiter=",")
    np.savetxt('../data/errors_train.txt', errors, delimiter=",")
Exemple #8
0
def generate_five_param_np(dataset_size, resolution=40):
    V = get_space(resolution)
    z_s = np.random.uniform(0.1, 1, (dataset_size, 5))
    phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
    phi = phi[:, 0:10]
    solver = Fin(V)
    errors = np.zeros((dataset_size, 1))
    y_s = np.zeros((dataset_size, 1))
    y_r_s = np.zeros((dataset_size, 1))

    for i in range(dataset_size):
        w, y, A, B, C = solver.forward_five_param(z_s[i, :])
        y_s[i][0] = y
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        y_r_s[i][0] = y_r
        errors[i][0] = y - y_r

    return (z_s, errors)
Exemple #9
0
    def __init__(self, resolution=40, out_type="total_avg"):
        """ 
        INPUTS:
     
        """
        V = get_space(resolution)
        dofs = len(V.dofmap().dofs())
        self.solver = Fin(V)
        self.out_type = out_type

        if out_type == "total_avg":
            out_dim = 1
        elif out_type == "subfin_avg":
            out_dim = 5
        elif out_type == "rand_pt":
            out_dim = 1
        elif out_type == "rand_pts":
            out_dim = 5
        mm.PyModPiece.__init__(self, [5], [out_dim])
Exemple #10
0
def generate_DL_only_dataset(dataset_size, resolution=40):
    '''
    Create dataset where the features are thermal conductivity parameters
    and the labels are the quantities of interest of the HFM

    Arguments: 
        dataset_size - number of feature-label pairs
        resolution   - finite element mesh resolution for the high fidelity model

    Returns:
        (z, qois)    - pairs of conductivity and qois
    '''

    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())

    prior_cov = np.load('bayesian_inference/prior_covariance.npy')
    L = np.linalg.cholesky(prior_cov)

    # TODO: Improve this by using mass matrix covariance. Bayesian prior may work well too
    z_s = np.zeros((dataset_size, dofs))
    solver = Fin(V, True)
    qois = np.zeros((dataset_size, 40))
    k = Function(V)

    for i in range(dataset_size):
        draw = np.random.randn(dofs)
        prior_draw = np.dot(L, draw)
        k.vector().set_local(prior_draw)
        w, _, _, _, _ = solver.forward(k)
        qois[i, :] = solver.qoi_operator(w)
        z_s[i, :] = prior_draw

    if (dataset_size > 1000):
        np.savetxt('data/z_dlo_tr.txt', z_s, delimiter=",")
        np.savetxt('data/qois_dlo_tr.txt', qois, delimiter=",")

    if (dataset_size < 400):
        np.savetxt('data/z_dlo_eval.txt', z_s, delimiter=",")
        np.savetxt('data/qois_dlo_eval.txt', qois, delimiter=",")

    return (z_s, qois)
Exemple #11
0
def gen_affine_avg_rom_dataset(dataset_size, resolution=40, genrand=False):
    V = get_space(resolution)
    chol = make_cov_chol(V, length=1.6)
    z = Function(V)
    solver = Fin(V, genrand)
    phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")

    #  err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())
    err_model = res_bn_fc_model(ELU(), Adam, 3e-5, 3, 200, 1446, 40)

    solver_r = AffineROMFin(V, err_model, phi, genrand)
    qoi_errors = np.zeros((dataset_size, solver_r.n_obs))

    # TODO: Needs to be fixed for higher order functions
    z_s = np.zeros((dataset_size, V.dim()))

    for i in tqdm(range(dataset_size)):
        norm = np.random.randn(len(chol))
        nodal_vals = np.exp(0.5 * chol.T @ norm)
        z.vector().set_local(nodal_vals)
        z_s[i, :] = nodal_vals

        x, y, A, B, C = solver.forward(z)
        w_r = solver_r.forward_reduced(z)

        qoi = solver.qoi_operator(x)
        qoi_r = solver_r.qoi_reduced(w_r)

        qoi_errors[i, :] = qoi - qoi_r

    if (dataset_size > 1000):
        np.savetxt('../data/z_aff_avg_tr.txt', z_s, delimiter=",")
        np.savetxt('../data/errors_aff_avg_tr.txt', qoi_errors, delimiter=",")

    if (dataset_size < 400):
        np.savetxt('../data/z_aff_avg_eval.txt', z_s, delimiter=",")
        np.savetxt('../data/errors_aff_avg_eval.txt',
                   qoi_errors,
                   delimiter=",")
    return (z_s, qoi_errors)
Exemple #12
0
def gen_avg_rom_dataset(dataset_size, resolution=40):
    V = get_space(resolution)
    chol = make_cov_chol(V)
    z = Function(V)
    solver = Fin(V)
    phi = np.loadtxt('data/basis_nine_param.txt', delimiter=",")
    qoi_errors = np.zeros((dataset_size, 5))

    # TODO: Needs to be fixed for higher order functions
    z_s = np.zeros((dataset_size, V.dim()))

    for i in range(dataset_size):
        norm = np.random.randn(len(chol))
        nodal_vals = np.exp(0.5 * chol.T @ norm)
        z.vector().set_local(nodal_vals)
        z_s[i, :] = nodal_vals
        A_r, B_r, C_r, x_r, y_r = solver.averaged_forward(z, phi)
        x, y, A, B, C = solver.forward(z)
        qoi = solver.qoi_operator(x)
        qoi_r = solver.reduced_qoi_operator(x_r)
        qoi_errors[i, :] = qoi - qoi_r

    if (dataset_size > 1000):
        np.savetxt('data/z_avg_tr.txt', z_s, delimiter=",")
        np.savetxt('data/errors_avg_tr.txt', qoi_errors, delimiter=",")

    if (dataset_size < 400):
        np.savetxt('data/z_avg_eval.txt', z_s, delimiter=",")
        np.savetxt('data/errors_avg_eval.txt', qoi_errors, delimiter=",")
    return (z_s, qoi_errors)
Exemple #13
0
def generate_five_param(dataset_size, resolution=40):
    V = get_space(resolution)
    dofs = len(V.dofmap().dofs())

    # TODO: Improve this by using mass matrix covariance. Bayesian prior may work well too
    z_s = np.random.uniform(0.1, 1, (dataset_size, 5))
    phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
    phi = phi[:, 0:20]
    solver = Fin(V)
    errors = np.zeros((dataset_size, 1))

    for i in range(dataset_size):
        w, y, A, B, C = solver.forward_five_param(z_s[i, :])
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        errors[i][0] = y - y_r

    #  np.savetxt('data/z_s_eval.txt', z_s, delimiter=",")
    #  np.savetxt('data/errors_eval.txt', errors, delimiter=",")
    dataset = tf.data.Dataset.from_tensor_slices((z_s, errors))

    return dataset
Exemple #14
0
def gen_five_param_subfin_avg(dataset_size, resolution=40):
    V = get_space(resolution)
    z_s = np.random.uniform(0.1, 1, (dataset_size, 5))
    phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")
    phi = phi[:, 0:10]
    solver = Fin(V)
    errors = np.zeros((dataset_size, 5))
    avgs = np.zeros((dataset_size, 5))
    avgs_r = np.zeros((dataset_size, 5))

    for i in range(dataset_size):
        w, y, A, B, C = solver.forward_five_param(z_s[i, :])
        avgs[i] = solver.qoi_operator(w)
        psi = np.dot(A, phi)
        A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi)
        avgs_r[i] = solver.reduced_qoi_operator(x_r)
        errors[i] = avgs[i] - avgs_r[i]

    return (z_s, errors)
from deep_learning.dl_model import load_bn_model, load_dataset_avg_rom
from tensorflow.keras.optimizers import Adam
from fom.forward_solve import Fin
from fom.thermal_fin import get_space
from rom.averaged_affine_ROM import AffineROMFin
from dolfin import *

#  model = load_parametric_model('relu', Adam, 0.004, 6, 50, 150, 600)
model = load_bn_model()
z_train, errors_train, z_val, errors_val = load_dataset_avg_rom()
#  z_val = np.loadtxt('data/z_avg_v.txt', delimiter=',')
#  errors_val =  np.loadtxt('data/err_avg_v.txt', delimiter=',')
V = get_space(40)
phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")
#  phi = phi[:,0:10]
solver = Fin(V)
solver_r = AffineROMFin(V, model, phi)
avgs_f = np.zeros((len(z_val), 9))
avgs_r = np.zeros((len(z_val), 9))
avgs_d = np.zeros((len(z_val), 9))
err_pred = np.zeros((len(z_val), 9))
z = Function(V)

for i in range(len(z_val)):
    z.vector().set_local(z_val[i, :])
    z_nodal = z_val[i, :].reshape((1, 1446))
    w, y, A, B, C = solver.forward(z)
    w_r = solver_r.forward_reduced(z)

    avgs_f[i] = solver.qoi_operator(w)
    avgs_r[i] = solver_r.qoi_reduced(w_r)
import numpy as np
import matplotlib.pyplot as plt
import dolfin as dl
dl.set_log_level(40)

# ROMML imports
from fom.forward_solve import Fin
from fom.thermal_fin import get_space
from rom.averaged_affine_ROM import AffineROMFin
from deep_learning.dl_model import load_parametric_model_avg
from gaussian_field import make_cov_chol

resolution = 40
V = get_space(resolution)
chol = make_cov_chol(V, length=1.2)
solver = Fin(V)


class SolverWrapper:
    def __init__(self, solver, data):
        self.solver = solver
        self.data = data
        self.z = dl.Function(V)

    def cost_function(self, z_v):
        self.z.vector().set_local(z_v)
        w, y, A, B, C = self.solver.forward(self.z)
        y = self.solver.qoi_operator(w)
        cost = 0.5 * np.linalg.norm(
            y - self.data)**2  #+ dl.assemble(self.solver.reg)
        return cost
Exemple #17
0
# Create a fin geometry
geometry = Rectangle(Point(2.5, 0.0), Point(3.5, 4.0)) \
        + Rectangle(Point(0.0, 0.75), Point(2.5, 1.0)) \
        + Rectangle(Point(0.0, 1.75), Point(2.5, 2.0)) \
        + Rectangle(Point(0.0, 2.75), Point(2.5, 3.0)) \
        + Rectangle(Point(0.0, 3.75), Point(2.5, 4.0)) \
        + Rectangle(Point(3.5, 0.75), Point(6.0, 1.0)) \
        + Rectangle(Point(3.5, 1.75), Point(6.0, 2.0)) \
        + Rectangle(Point(3.5, 2.75), Point(6.0, 3.0)) \
        + Rectangle(Point(3.5, 3.75), Point(6.0, 4.0)) \

mesh = generate_mesh(geometry, 40)

V = FunctionSpace(mesh, 'CG', 1)
dofs = len(V.dofmap().dofs())
solver = Fin(V)

##########################################################3
# Basis initialization with dummy solves and POD
##########################################################3
samples = 10
Y = np.zeros((samples, dofs))
for i in range(0,samples):

    if i == 0:
        m = interpolate(Expression("0.1 + exp(-(pow(x[0] - 0.5, 2) + pow(x[1], 2)) / 0.01)", degree=2),V)
    elif i == 1:
        m = interpolate(Expression("2*x[0] + 0.1", degree=2), V)
    elif i == 2:
        m = interpolate(Expression("1 + sin(x[0])* sin(x[0])", degree=2), V)
    elif i == 3:
Exemple #18
0
import sys
sys.path.append('../')
import dolfin as dl
import numpy as np
import matplotlib.pyplot as plt

from fom.forward_solve import Fin
from fom.thermal_fin import get_space
from bayesian_inference.gaussian_field import make_cov_chol

# Setup solver
V = get_space(40)
solver = Fin(V)
phi = np.loadtxt('data/basis_five_param.txt', delimiter=",")

# Obtain synthetic data
chol = make_cov_chol(V, length=0.8)
k_true = dl.Function(V)
norm = np.random.randn(len(chol))
nodal_vals = np.exp(0.5 * chol.T @ norm)
k_true.vector().set_local(nodal_vals)

w, y, A, B, C = solver.forward(k_true)
data = solver.qoi_operator(w)

# Comparing reduced and full
w, y, A, B, C = solver.forward(k_true)
A_r, B_r, C_r, w_r, y_r = solver.averaged_forward(k_true, phi)
k_true_averaged = solver.nine_param_to_function(solver.subfin_avg_op(k_true))

p = dl.plot(k_true_averaged)
Exemple #19
0
resolution = 40
V = get_space(resolution)
chol = make_cov_chol(V, length=1.6)

# Setup DL error model
#  err_model = load_parametric_model_avg('elu', Adam, 0.0003, 5, 58, 200, 2000, V.dim())
err_model = load_bn_model(randobs)
surrogate_model = load_surrogate_model(randobs)

# Initialize reduced order model
phi = np.loadtxt('../data/basis_nine_param.txt',delimiter=",")
solver_r = AffineROMFin(V, err_model, phi, randobs)

# Setup synthetic observations
solver = Fin(V, randobs)
z_true = dl.Function(V)

prior_covariance = np.load('prior_covariance_0.07_0.07.npy')
L = np.linalg.cholesky(prior_covariance)
#  draw = np.random.randn(V.dim())
#  nodal_vals = np.dot(L, draw)

#Load random Gaussian field
nodal_vals = np.load('res_x.npy')
#  nodal_vals = np.exp(nodal_vals)/np.sum(np.exp(nodal_vals)) + 1.0

# For exp parametrization
#  nodal_vals = np.log(nodal_vals)

import matplotlib.pyplot as plt
import numpy as np
from dolfin import Function

from tensorflow.keras.optimizers import Adam, RMSprop, Adadelta

from deep_learning.dl_model import load_parametric_model_avg
from fom.forward_solve import Fin, get_space

z_val = np.loadtxt('../data/z_avg_eval.txt', delimiter=',')
errors_val = np.loadtxt('../data/errors_avg_eval.txt', delimiter=',')
V = get_space(40)
model = load_parametric_model_avg('elu', Adam, 0.129, 3, 58, 64, 466, V.dim())
phi = np.loadtxt('../data/basis_five_param.txt', delimiter=",")
#  phi = phi[:,0:10]
solver = Fin(V)
avgs_f = np.zeros((len(z_val), 5))
avgs_r = np.zeros((len(z_val), 5))
avgs_d = np.zeros((len(z_val), 5))
avgs_c = np.zeros((len(z_val), 5))
err_pred = np.zeros((len(z_val), 5))
z = Function(V)
err_pred = model.predict(z_val)
print("Average validation error in pred: {}".format(
    np.average(np.divide(np.abs(errors_val - err_pred), np.abs(errors_val)))))

for i in range(len(z_val)):
    nodal_vals = z_val[i, :]
    z.vector().set_local(nodal_vals)
    _, _, _, x_r, y_r = solver.averaged_forward(z, phi)
    x, y, A, B, C = solver.forward(z)
Exemple #21
0
class SqError:
    '''
    Wrapper class interfacing Theano operators and ROMML
    to compute forward solves and parameter gradients
    '''
    def __init__(self, V, chol, randobs):
        '''
        Parameters:
            V - FEniCS FunctionSpace
            chol - Covariance matrix to define Gaussian field over V
        '''
        self._V = V
        self._solver = Fin(self._V, randobs)
        self._pred_k = dl.Function(self._V)

        # Setup synthetic observations
        self.k_true = dl.Function(self._V)

        # Random Gaussian field as true function
        #  norm = np.random.randn(len(chol))
        #  nodal_vals = np.exp(0.5 * chol.T @ norm)

        nodal_vals = np.load('res_x.npy')
        self.k_true.vector().set_local(nodal_vals)

        w, y, A, B, C = self._solver.forward(self.k_true)
        self.obs_data = self._solver.qoi_operator(w)

        # Setup DL error model
        #  self._err_model = load_parametric_model_avg('elu', Adam,
        #0.0003, 5, 58, 200, 2000, V.dim())
        self._err_model = load_bn_model(randobs)

        # Initialize reduced order model
        self.phi = np.loadtxt('../data/basis_nine_param.txt', delimiter=",")
        self._solver_r = AffineROMFin(self._V, self._err_model, self.phi,
                                      randobs)
        self._solver_r.set_data(self.obs_data)

    def err_grad_FOM(self, pred_k):
        '''
        For a given parameter, computes the high fidelity forward solution
        and the gradient with respect to the cost function
        '''
        self._pred_k.vector().set_local(pred_k)
        w, y, a, b, c = self._solver.forward(self._pred_k)
        qoi = self._solver.qoi_operator(w)
        err = np.square(qoi - self.obs_data).sum() / 2.0
        grad = self._solver.gradient(self._pred_k, self.obs_data)
        return err, grad

    def err_grad_ROM(self, pred_k):
        '''
        For a given parameter, computes the reduced-order forward solution
        and the gradient with respect to the cost function
        '''
        self._pred_k.vector().set_local(pred_k)
        w_r = self._solver_r.forward_reduced(self._pred_k)
        qoi_r = self._solver_r.qoi_reduced(w_r)
        err_r = np.square(qoi_r - self.obs_data).sum() / 2.0
        grad_r = self._solver_r.grad_reduced(self._pred_k)
        return err_r, grad_r

    def err_grad_ROMML(self, pred_k):
        '''
        For a given parameter, computes the reduced-order + ML forward solution
        and the gradient with respect to the cost function
        '''
        self._pred_k.vector().set_local(pred_k)
        #  w_r = self._solver_r.forward_reduced(self._pred_k)
        #  qoi_r = self._solver_r.qoi_reduced(w_r)
        #  err_NN = self._err_model.predict([[pred_k]])[0]
        #  qoi_t = qoi_r + err_NN
        #  err_t = np.square(qoi_t - self.obs_data).sum()/2.0
        grad_t, err_t = self._solver_r.grad_romml(self._pred_k)
        return err_t, grad_t
# ROMML imports
from fom.forward_solve import Fin, get_space
from muq_mod_five_param import ROM_forward, DL_ROM_forward, FOM_forward

resolution = 40
r_fwd = ROM_forward(resolution, out_type="subfin_avg")
d_fwd = DL_ROM_forward(resolution, out_type="subfin_avg")
f_fwd = FOM_forward(resolution, out_type="subfin_avg")

#z_true = np.random.uniform(0.1,1, (1,5))
z_true = np.array(
    [[0.41126864, 0.61789679, 0.75873243, 0.96527541, 0.22348076]])

V = get_space(resolution)
full_solver = Fin(V)
w, y, A, B, C = full_solver.forward_five_param(z_true[0, :])
qoi = full_solver.qoi_operator(w)
obsData = qoi


def MCMC_sample(fwd):
    # Define prior
    logPriorMu = 0.5 * np.ones(5)
    logPriorCov = 0.5 * np.eye(5)

    logPrior = mm.Gaussian(logPriorMu, logPriorCov).AsDensity()

    # Likelihood
    noiseVar = 1e-4
    noiseCov = noiseVar * np.eye(obsData.size)
Exemple #23
0
from tensorflow.keras.layers import *
from tensorflow.python.framework import ops

tf.keras.backend.set_floatx('float64')
tf.compat.v1.disable_eager_execution()

from fom.forward_solve import Fin
from fom.thermal_fin import get_space
import dolfin as dl
dl.set_log_level(40)

# Create FunctionSpace
V = get_space(40)

# Create FEniCS forward solver with surface obs as QoI
solver = Fin(V, True)
U_func = dl.Function(V)


def _py_func_with_gradient(func,
                           inp,
                           Tout,
                           stateful=True,
                           name=None,
                           grad_func=None):
    """
    PyFunc defined as given by Tensorflow
    :param func: Custom Function
    :param inp: Function Inputs
    :param Tout: Ouput Type of out Custom Function
    :param stateful: Calculate Gradients when stateful is True