def __init__(self, batch_size, resolution): self.resolution = resolution self.V = get_space(resolution) self.dofs = len(self.V.dofmap().dofs()) self.phi = np.loadtxt('data/basis_five_param.txt', delimiter=",") self.batch_size = batch_size self.solver = Fin(self.V)
def __init__(self, resolution=40, out_type="total_avg"): """ INPUTS: """ V = get_space(resolution) dofs = len(V.dofmap().dofs()) self.solver = Fin(V) self.phi = np.loadtxt('data/basis_five_param.txt', delimiter=",") self.phi = self.phi[:, 0:10] self.model = load_parametric_model('relu', Adam, 0.004, 6, 50, 150, 600) self.out_type = out_type if out_type == "total_avg": out_dim = 1 elif out_type == "subfin_avg": out_dim = 5 elif out_type == "rand_pt": out_dim = 1 elif out_type == "rand_pts": out_dim = 5 mm.PyModPiece.__init__(self, [5], [out_dim])
def generate(dataset_size, resolution=40): ''' Create a tensorflow dataset where the features are thermal conductivity parameters and the labels are the differences in the quantity of interest between the high fidelity model and the reduced order model (this is the ROM error) Arguments: dataset_size - number of feature-label pairs resolution - finite element mesh resolution for the high fidelity model Returns: dataset - Tensorflow dataset created from tensor slices ''' V = get_space(resolution) dofs = len(V.dofmap().dofs()) # TODO: Improve this by using mass matrix covariance. Bayesian prior may work well too z_s = np.random.uniform(0.1, 1, (dataset_size, dofs)) phi = np.loadtxt('data/basis.txt', delimiter=",") solver = Fin(V) errors = np.zeros((dataset_size, 1)) m = Function(V) for i in range(dataset_size): m.vector().set_local(z_s[i, :]) w, y, A, B, C = solver.forward(m) psi = np.dot(A, phi) A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi) errors[i][0] = y - y_r dataset = tf.data.Dataset.from_tensor_slices((z_s, errors)) return dataset
def generate_and_save_dataset(dataset_size, resolution=40): V = get_space(resolution) dofs = len(V.dofmap().dofs()) z_s = np.random.uniform(0.1, 1, (dataset_size, dofs)) phi = np.loadtxt('data/basis.txt', delimiter=",") solver = Fin(V) errors = np.zeros((dataset_size, 1)) m = Function(V) for i in range(dataset_size): m.vector().set_local(z_s[i, :]) w, y, A, B, C = solver.forward(m) psi = np.dot(A, phi) A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi) errors[i][0] = y - y_r np.savetxt('data/z_s_train.txt', z_s, delimiter=",") np.savetxt('data/errors_train.txt', errors, delimiter=",")
def __init__(self, resolution=40, out_type="total_avg"): """ INPUTS: """ V = get_space(resolution) dofs = len(V.dofmap().dofs()) self.solver = Fin(V) self.out_type = out_type if out_type == "total_avg": out_dim = 1 elif out_type == "subfin_avg": out_dim = 5 elif out_type == "rand_pt": out_dim = 1 elif out_type == "rand_pts": out_dim = 5 mm.PyModPiece.__init__(self, [5], [out_dim])
def gen_five_param_subfin_avg(dataset_size, resolution=40): V = get_space(resolution) z_s = np.random.uniform(0.1, 1, (dataset_size, 5)) phi = np.loadtxt('data/basis_five_param.txt', delimiter=",") phi = phi[:, 0:10] solver = Fin(V) errors = np.zeros((dataset_size, 5)) avgs = np.zeros((dataset_size, 5)) avgs_r = np.zeros((dataset_size, 5)) for i in range(dataset_size): w, y, A, B, C = solver.forward_five_param(z_s[i, :]) avgs[i] = solver.qoi_operator(w) psi = np.dot(A, phi) A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi) avgs_r[i] = solver.reduced_qoi_operator(x_r) errors[i] = avgs[i] - avgs_r[i] return (z_s, errors)
def generate_five_param_np(dataset_size, resolution=40): V = get_space(resolution) z_s = np.random.uniform(0.1, 1, (dataset_size, 5)) phi = np.loadtxt('data/basis_five_param.txt', delimiter=",") phi = phi[:, 0:10] solver = Fin(V) errors = np.zeros((dataset_size, 1)) y_s = np.zeros((dataset_size, 1)) y_r_s = np.zeros((dataset_size, 1)) for i in range(dataset_size): w, y, A, B, C = solver.forward_five_param(z_s[i, :]) y_s[i][0] = y psi = np.dot(A, phi) A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi) y_r_s[i][0] = y_r errors[i][0] = y - y_r return (z_s, errors)
def generate_five_param(dataset_size, resolution=40): V = get_space(resolution) dofs = len(V.dofmap().dofs()) # TODO: Improve this by using mass matrix covariance. Bayesian prior may work well too z_s = np.random.uniform(0.1, 1, (dataset_size, 5)) phi = np.loadtxt('data/basis_five_param.txt', delimiter=",") phi = phi[:, 0:20] solver = Fin(V) errors = np.zeros((dataset_size, 1)) for i in range(dataset_size): w, y, A, B, C = solver.forward_five_param(z_s[i, :]) psi = np.dot(A, phi) A_r, B_r, C_r, x_r, y_r = solver.reduced_forward(A, B, C, psi, phi) errors[i][0] = y - y_r # np.savetxt('data/z_s_eval.txt', z_s, delimiter=",") # np.savetxt('data/errors_eval.txt', errors, delimiter=",") dataset = tf.data.Dataset.from_tensor_slices((z_s, errors)) return dataset
# MUQ Includes import sys sys.path.insert(0,'/home/fenics/Installations/MUQ_INSTALL/lib') import pymuqModeling as mm # Needed for Gaussian distribution import pymuqApproximation as ma # Needed for Gaussian processes import pymuqSamplingAlgorithms as ms # Needed for MCMC resolution = 40 r_fwd = ROM_forward(resolution, out_type="subfin_avg") d_fwd = DL_ROM_forward(resolution, out_type="subfin_avg") f_fwd = FOM_forward(resolution, out_type="subfin_avg") #z_true = np.random.uniform(0.1,1, (1,5)) z_true = np.array([[0.41126864, 0.61789679, 0.75873243, 0.96527541, 0.22348076]]) V = get_space(resolution) full_solver = Fin(V) w, y, A, B, C = full_solver.forward_five_param(z_true[0,:]) qoi = full_solver.qoi_operator(w) obsData = qoi def MCMC_sample(fwd): # Define prior logPriorMu = 0.5*np.ones(5) logPriorCov = 0.5*np.eye(5) logPrior = mm.Gaussian(logPriorMu, logPriorCov).AsDensity() # Likelihood noiseVar = 1e-4 noiseCov = noiseVar*np.eye(obsData.size)