def reduced_error(mu, N): (mesh, _, _, restrictions) = read_mesh() W = generate_block_function_space(mesh, restrictions) truth_solution = BlockFunction(W) read_solution(mu, "truth_solve", truth_solution) reduced_solution = BlockFunction(W) read_solution(mu, "reduced_solve", reduced_solution) return truth_solution - reduced_solution
def reconstruct_solution(reduced_solution, N): (mesh, _, _, restrictions) = read_mesh() W = generate_block_function_space(mesh, restrictions) reconstructed_solution = BlockFunction(W) basis_functions = read_basis_functions(W, N) for c in components: assign(reconstructed_solution.sub(c), (basis_functions[c] * reduced_solution[c]).sub(c)) reconstructed_solution.apply("from subfunctions") return reconstructed_solution
def assert_functions_manipulations(functions, block_V): n_blocks = len(functions) assert n_blocks in (1, 2) # a) Convert from a list of Functions to a BlockFunction block_function_a = BlockFunction(block_V) for (index, function) in enumerate(functions): assign(block_function_a.sub(index), function) # Block vector should have received the data stored in the list of Functions if n_blocks == 1: assert_block_functions_equal(functions[0], block_function_a, block_V) else: assert_block_functions_equal((functions[0], functions[1]), block_function_a, block_V) # b) Test block_assign block_function_b = BlockFunction(block_V) block_assign(block_function_b, block_function_a) # Each sub function should now contain the same data as the original block function for index in range(n_blocks): assert array_equal(block_function_b.sub(index).vector().get_local(), block_function_a.sub(index).vector().get_local()) # The two block vectors should store the same data assert array_equal(block_function_b.block_vector().get_local(), block_function_a.block_vector().get_local())
def perform_POD(N): # export mesh - instead of generating mesh everytime (mesh, _, _, restrictions) = read_mesh() W = generate_block_function_space(mesh, restrictions) # POD objects X = get_inner_products(W, "POD") POD = {c: ProperOrthogonalDecomposition(W, X[c]) for c in components} # Solution storage solution = BlockFunction(W) # Training set training_set = get_set("training_set") # Read in snapshots for mu in training_set: print("Appending solution for mu =", mu, "to snapshots matrix") read_solution(mu, "truth_solve", solution) for c in components: POD[c].store_snapshot(solution, component=c) # Compress component by component basis_functions_component = dict() for c in components: _, _, basis_functions_component[c], N_c = POD[c].apply(N, tol=0.) assert N_c == N print("Eigenvalues for component", c) POD[c].print_eigenvalues(N) POD[c].save_eigenvalues_file("basis", "eigenvalues_" + c) # Collect all components and save to file basis_functions = BasisFunctionsMatrix(W) basis_functions.init(components) for c in components: basis_functions.enrich(basis_functions_component[c], component=c) basis_functions.save("basis", "basis") # Also save components to file, for the sake of the ParaView plugin with open(os.path.join("basis", "components"), "w") as file_: for c in components: file_.write(c + "\n")
def apply_bc_and_block_bc_vector_non_linear(rhs, block_rhs, block_bcs, block_V): if block_bcs is None: return (None, None) N = len(block_bcs) assert N in (1, 2) if N == 1: function = Function(block_V[0]) [bc.apply(rhs, function.vector()) for bc in block_bcs[0]] block_function = BlockFunction(block_V) block_bcs.apply(block_rhs, block_function.block_vector()) return (function, block_function) else: function1 = Function(block_V[0]) [bc1.apply(rhs[0], function1.vector()) for bc1 in block_bcs[0]] function2 = Function(block_V[1]) [bc2.apply(rhs[1], function2.vector()) for bc2 in block_bcs[1]] block_function = BlockFunction(block_V) block_bcs.apply(block_rhs, block_function.block_vector()) return ((function1, function2), block_function)
def initialization(mesh, subdomains, boundaries): TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) UCG = VectorElement("CG", mesh.ufl_cell(), 2) BDM = FiniteElement("BDM", mesh.ufl_cell(), 1) PDG = FiniteElement("DG", mesh.ufl_cell(), 0) UCG_F = FunctionSpace(mesh, UCG) BDM_F = FunctionSpace(mesh, BDM) PDG_F = FunctionSpace(mesh, PDG) W = BlockFunctionSpace([BDM_F, PDG_F], restrict=[None, None]) U = BlockFunctionSpace([UCG_F]) I = Identity(mesh.topology().dim()) C_cg = FiniteElement("CG", mesh.ufl_cell(), 1) C_dg = FiniteElement("DG", mesh.ufl_cell(), 0) mini = C_cg + C_dg C = FunctionSpace(mesh, mini) C = BlockFunctionSpace([C]) #TODO solution0_h = BlockFunction(W) solution0_m = BlockFunction(U) solution0_c = BlockFunction(C) solution1_h = BlockFunction(W) solution1_m = BlockFunction(U) solution1_c = BlockFunction(C) solution2_h = BlockFunction(W) solution2_m = BlockFunction(U) solution2_c = BlockFunction(C) solution_h = BlockFunction(W) solution_m = BlockFunction(U) solution_c = BlockFunction(C) ## mechanics # 0 properties alpha1 = 0.74 K1 = 8.4 * 1000.e6 nu1 = 0.18 alpha2 = 0.74 K2 = 8.4 * 1000.e6 nu2 = 0.18 alpha_values = [alpha1, alpha2] K_values = [K1, K2] nu_values = [nu1, nu2] alpha_0 = Function(PM) K_0 = Function(PM) nu_0 = Function(PM) alpha_0 = init_scalar_parameter(alpha_0, alpha_values[0], 500, subdomains) K_0 = init_scalar_parameter(K_0, K_values[0], 500, subdomains) nu_0 = init_scalar_parameter(nu_0, nu_values[0], 500, subdomains) alpha_0 = init_scalar_parameter(alpha_0, alpha_values[1], 501, subdomains) K_0 = init_scalar_parameter(K_0, K_values[1], 501, subdomains) nu_0 = init_scalar_parameter(nu_0, nu_values[1], 501, subdomains) K_mult_min = 1.0 K_mult_max = 1.0 mu_l_0, lmbda_l_0, Ks_0, K_0 = \ bulk_modulus_update(mesh,solution0_c[0],K_mult_min,K_mult_max,K_0,nu_0,alpha_0,K_0) # n-1 properties alpha1 = 0.74 K1 = 8.4 * 1000.e6 nu1 = 0.18 alpha2 = 0.74 K2 = 8.4 * 1000.e6 nu2 = 0.18 alpha_values = [alpha1, alpha2] K_values = [K1, K2] nu_values = [nu1, nu2] alpha_1 = Function(PM) K_1 = Function(PM) nu_1 = Function(PM) alpha_1 = init_scalar_parameter(alpha_1, alpha_values[0], 500, subdomains) K_1 = init_scalar_parameter(K_1, K_values[0], 500, subdomains) nu_1 = init_scalar_parameter(nu_1, nu_values[0], 500, subdomains) alpha_1 = init_scalar_parameter(alpha_1, alpha_values[1], 501, subdomains) K_1 = init_scalar_parameter(K_1, K_values[1], 501, subdomains) nu_1 = init_scalar_parameter(nu_1, nu_values[1], 501, subdomains) K_mult_min = 1.0 K_mult_max = 1.0 mu_l_1, lmbda_l_1, Ks_1, K_1 = \ bulk_modulus_update(mesh,solution0_c[0],K_mult_min,K_mult_max,K_1,nu_1,alpha_1,K_0) # n properties alpha1 = 0.74 K2 = 8.4 * 1000.e6 nu1 = 0.18 alpha2 = 0.74 K2 = 8.4 * 1000.e6 nu2 = 0.18 alpha_values = [alpha1, alpha2] K_values = [K1, K2] nu_values = [nu1, nu2] alpha = Function(PM) K = Function(PM) nu = Function(PM) alpha = init_scalar_parameter(alpha, alpha_values[0], 500, subdomains) K = init_scalar_parameter(K, K_values[0], 500, subdomains) nu = init_scalar_parameter(nu, nu_values[0], 500, subdomains) alpha = init_scalar_parameter(alpha, alpha_values[1], 501, subdomains) K = init_scalar_parameter(K, K_values[1], 501, subdomains) nu = init_scalar_parameter(nu, nu_values[1], 501, subdomains) K_mult_min = 1.0 K_mult_max = 1.0 mu_l, lmbda_l, Ks, K = \ bulk_modulus_update(mesh,solution0_c[0],K_mult_min,K_mult_max,K,nu,alpha,K_0) ## flow # 0 properties cf1 = 1e-10 phi1 = 0.2 rho1 = 1000.0 mu1 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k1 = np.array([kx, 0., 0., ky]) cf2 = 1e-10 phi2 = 0.2 rho2 = 1000.0 mu2 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k2 = np.array([kx, 0., 0., ky]) cf_values = [cf1, cf2] phi_values = [phi1, phi2] rho_values = [rho1, rho2] mu_values = [mu1, mu2] k_values = [k1, k2] cf_0 = Function(PM) phi_0 = Function(PM) rho_0 = Function(PM) mu_0 = Function(PM) k_0 = Function(TM) cf_0 = init_scalar_parameter(cf_0, cf_values[0], 500, subdomains) phi_0 = init_scalar_parameter(phi_0, phi_values[0], 500, subdomains) rho_0 = init_scalar_parameter(rho_0, rho_values[0], 500, subdomains) mu_0 = init_scalar_parameter(mu_0, mu_values[0], 500, subdomains) k_0 = init_tensor_parameter(k_0, k_values[0], 500, subdomains, mesh.topology().dim()) cf_0 = init_scalar_parameter(cf_0, cf_values[1], 501, subdomains) phi_0 = init_scalar_parameter(phi_0, phi_values[1], 501, subdomains) rho_0 = init_scalar_parameter(rho_0, rho_values[1], 501, subdomains) mu_0 = init_scalar_parameter(mu_0, mu_values[1], 501, subdomains) k_0 = init_tensor_parameter(k_0, k_values[1], 501, subdomains, mesh.topology().dim()) #filename = "perm4.csv" #k_0 = init_from_file_parameter(k_0,0.,0.,filename) # n-1 properties cf1 = 1e-10 phi1 = 0.2 rho1 = 1000.0 mu1 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k1 = np.array([kx, 0., 0., ky]) cf2 = 1e-10 phi2 = 0.2 rho2 = 1000.0 mu2 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k2 = np.array([kx, 0., 0., ky]) cf_values = [cf1, cf2] phi_values = [phi1, phi2] rho_values = [rho1, rho2] mu_values = [mu1, mu2] k_values = [k1, k2] cf_1 = Function(PM) phi_1 = Function(PM) rho_1 = Function(PM) mu_1 = Function(PM) k_1 = Function(TM) cf_1 = init_scalar_parameter(cf_1, cf_values[0], 500, subdomains) phi_1 = init_scalar_parameter(phi_1, phi_values[0], 500, subdomains) rho_1 = init_scalar_parameter(rho_1, rho_values[0], 500, subdomains) mu_1 = init_scalar_parameter(mu_1, mu_values[0], 500, subdomains) k_1 = init_tensor_parameter(k_1, k_values[0], 500, subdomains, mesh.topology().dim()) cf_1 = init_scalar_parameter(cf_1, cf_values[1], 501, subdomains) phi_1 = init_scalar_parameter(phi_1, phi_values[1], 501, subdomains) rho_1 = init_scalar_parameter(rho_1, rho_values[1], 501, subdomains) mu_1 = init_scalar_parameter(mu_1, mu_values[1], 501, subdomains) k_1 = init_tensor_parameter(k_1, k_values[1], 501, subdomains, mesh.topology().dim()) #filename = "perm4.csv" #k_1 = init_from_file_parameter(k_1,0.,0.,filename) # n properties cf1 = 1e-10 phi1 = 0.2 rho1 = 1000.0 mu1 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k1 = np.array([kx, 0., 0., ky]) cf2 = 1e-10 phi2 = 0.2 rho2 = 1000.0 mu2 = 1. kx = 8.802589710965712e-10 ky = 8.802589710965712e-11 k2 = np.array([kx, 0., 0., ky]) cf_values = [cf1, cf2] phi_values = [phi1, phi2] rho_values = [rho1, rho2] mu_values = [mu1, mu2] k_values = [k1, k2] cf = Function(PM) phi = Function(PM) rho = Function(PM) mu = Function(PM) k = Function(TM) cf = init_scalar_parameter(cf, cf_values[0], 500, subdomains) phi = init_scalar_parameter(phi, phi_values[0], 500, subdomains) rho = init_scalar_parameter(rho, rho_values[0], 500, subdomains) mu = init_scalar_parameter(mu, mu_values[0], 500, subdomains) k = init_tensor_parameter(k, k_values[0], 500, subdomains, mesh.topology().dim()) cf = init_scalar_parameter(cf, cf_values[1], 501, subdomains) phi = init_scalar_parameter(phi, phi_values[1], 501, subdomains) rho = init_scalar_parameter(rho, rho_values[1], 501, subdomains) mu = init_scalar_parameter(mu, mu_values[1], 501, subdomains) k = init_tensor_parameter(k, k_values[1], 501, subdomains, mesh.topology().dim()) #filename = "perm4.csv" #k = init_from_file_parameter(k,0.,0.,filename) ### transport # 0 dx1 = 1e-12 dy1 = 1e-12 d1 = np.array([dx1, 0., 0., dy1]) dx2 = 1e-12 dy2 = 1e-12 d2 = np.array([dx2, 0., 0., dy2]) d_values = [d1, d2] d_0 = Function(TM) d_0 = init_tensor_parameter(d_0, d_values[0], 500, subdomains, mesh.topology().dim()) d_0 = init_tensor_parameter(d_0, d_values[1], 501, subdomains, mesh.topology().dim()) # n-1 dx1 = 1e-12 dy1 = 1e-12 d1 = np.array([dx1, 0., 0., dy1]) dx2 = 1e-12 dy2 = 1e-12 d2 = np.array([dx2, 0., 0., dy2]) d_values = [d1, d2] d_1 = Function(TM) d_1 = init_tensor_parameter(d_1, d_values[0], 500, subdomains, mesh.topology().dim()) d_1 = init_tensor_parameter(d_1, d_values[1], 501, subdomains, mesh.topology().dim()) # n dx1 = 1e-12 dy1 = 1e-12 d1 = np.array([dx1, 0., 0., dy1]) dx2 = 1e-12 dy2 = 1e-12 d2 = np.array([dx2, 0., 0., dy2]) d_values = [d1, d2] d = Function(TM) d = init_tensor_parameter(d, d_values[0], 500, subdomains, mesh.topology().dim()) d = init_tensor_parameter(d, d_values[1], 501, subdomains, mesh.topology().dim()) ####initialization # initial u_0 = Constant((0.0, 0.0)) u_0_project = project(u_0, U[0]) assign(solution0_m.sub(0), u_0_project) p_0 = Constant(1.e6) p_0_project = project(p_0, W[1]) assign(solution0_h.sub(1), p_0_project) # v_0 = Constant((0.0, 0.0)) # v_0_project = project(v_0, W[0]) # assign(solution0_h.sub(0), v_0_project) c0 = c_sat_cal(1.e6, 20.) c0_project = project(c0, C[0]) assign(solution0_c.sub(0), c0_project) # n - 1 u_0 = Constant((0.0, 0.0)) u_0_project = project(u_0, U[0]) assign(solution1_m.sub(0), u_0_project) p_0 = Constant(1.e6) p_0_project = project(p_0, W[1]) assign(solution1_h.sub(1), p_0_project) # v_0 = Constant((0.0, 0.0)) # v_0_project = project(v_0, W[0]) # assign(solution1_h.sub(0), v_0_project) c0 = c_sat_cal(1.e6, 20.) c0_project = project(c0, C[0]) assign(solution1_c.sub(0), c0_project) # n - 2 u_0 = Constant((0.0, 0.0)) u_0_project = project(u_0, U[0]) assign(solution2_m.sub(0), u_0_project) p_0 = Constant(1.e6) p_0_project = project(p_0, W[1]) assign(solution2_h.sub(1), p_0_project) # v_0 = Constant((0.0, 0.0)) # v_0_project = project(v_0, W[0]) # assign(solution2_h.sub(0), v_0_project) c0 = c_sat_cal(1.e6, 20.) c0_project = project(c0, C[0]) assign(solution2_c.sub(0), c0_project) # n u_0 = Constant((0.0, 0.0)) u_0_project = project(u_0, U[0]) assign(solution_m.sub(0), u_0_project) p_0 = Constant(1.e6) p_0_project = project(p_0, W[1]) assign(solution_h.sub(1), p_0_project) # v_0 = Constant((0.0, 0.0)) # v_0_project = project(v_0, W[0]) # assign(solution_h.sub(0), v_0_project) c0 = c_sat_cal(1.e6, 20.) c0_project = project(c0, C[0]) assign(solution_c.sub(0), c0_project) ###iterative parameters phi_it = Function(PM) assign(phi_it, phi_0) print('c_sat', c_sat_cal(1.0e8, 20.)) c_sat = c_sat_cal(1.0e8, 20.) c_sat = project(c_sat, PM) c_inject = Constant(0.0) c_inject = project(c_inject, PM) mu_c1_1 = 1.e-4 mu_c2_1 = 5.e-0 mu_c1_2 = 1.e-4 mu_c2_2 = 5.e-0 mu_c1_values = [mu_c1_1, mu_c1_2] mu_c2_values = [mu_c2_1, mu_c2_2] mu_c1 = Function(PM) mu_c2 = Function(PM) mu_c1 = init_scalar_parameter(mu_c1, mu_c1_values[0], 500, subdomains) mu_c2 = init_scalar_parameter(mu_c2, mu_c2_values[0], 500, subdomains) mu_c1 = init_scalar_parameter(mu_c1, mu_c1_values[1], 501, subdomains) mu_c2 = init_scalar_parameter(mu_c2, mu_c2_values[1], 501, subdomains) coeff_for_perm_1 = 22.2 coeff_for_perm_2 = 22.2 coeff_for_perm_values = [coeff_for_perm_1, coeff_for_perm_2] coeff_for_perm = Function(PM) coeff_for_perm = init_scalar_parameter(coeff_for_perm, coeff_for_perm_values[0], 500, subdomains) coeff_for_perm = init_scalar_parameter(coeff_for_perm, coeff_for_perm_values[1], 501, subdomains) solutionIt_h = BlockFunction(W) return solution0_m, solution0_h, solution0_c \ ,solution1_m, solution1_h, solution1_c \ ,solution2_m, solution2_h, solution2_c \ ,solution_m, solution_h, solution_c \ ,alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0 \ ,alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1 \ ,alpha, K, mu_l, lmbda_l, Ks \ ,cf_0, phi_0, rho_0, mu_0, k_0 \ ,cf_1, phi_1, rho_1, mu_1, k_1 \ ,cf, phi, rho, mu, k \ ,d_0, d_1, d, I \ ,phi_it, solutionIt_h, mu_c1, mu_c2 \ ,nu_0, nu_1, nu, coeff_for_perm \ ,c_sat, c_inject
def transport_linear(integrator_type, mesh, subdomains, boundaries, t_start, dt, T, solution0, \ alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0, \ alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1, \ alpha, K, mu_l, lmbda_l, Ks, \ cf_0, phi_0, rho_0, mu_0, k_0,\ cf_1, phi_1, rho_1, mu_1, k_1,\ cf, phi, rho, mu, k, \ d_0, d_1, d_t, vel_c, p_con, A_0, Temp, c_extrapolate): # Create mesh and define function space parameters["ghost_mode"] = "shared_facet" # required by dS dx = Measure('dx', domain=mesh, subdomain_data=subdomains) ds = Measure('ds', domain=mesh, subdomain_data=boundaries) dS = Measure('dS', domain=mesh, subdomain_data=boundaries) C_cg = FiniteElement("CG", mesh.ufl_cell(), 1) C_dg = FiniteElement("DG", mesh.ufl_cell(), 0) mini = C_cg + C_dg C = FunctionSpace(mesh, mini) C = BlockFunctionSpace([C]) TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) n = FacetNormal(mesh) vc = CellVolume(mesh) fc = FacetArea(mesh) h = vc / fc h_avg = (vc('+') + vc('-')) / (2 * avg(fc)) penalty1 = Constant(1.0) tau = Function(PM) tau = tau_cal(tau, phi, -0.5) tuning_para = 0.25 vel_norm = (dot(vel_c, n) + abs(dot(vel_c, n))) / 2.0 cell_size = CellDiameter(mesh) vnorm = sqrt(dot(vel_c, vel_c)) I = Identity(mesh.topology().dim()) d_eff = Function(TM) d_eff = diff_coeff_cal_rev(d_eff, d_0, tau, phi) + tuning_para * cell_size * vnorm * I monitor_dt = dt # Define variational problem dc, = BlockTrialFunction(C) dc_dot, = BlockTrialFunction(C) psic, = BlockTestFunction(C) block_c = BlockFunction(C) c, = block_split(block_c) block_c_dot = BlockFunction(C) c_dot, = block_split(block_c_dot) theta = -1.0 a_time = phi * rho * inner(c_dot, psic) * dx a_dif = dot(rho*d_eff*grad(c),grad(psic))*dx \ - dot(avg_w(rho*d_eff*grad(c),weight_e(rho*d_eff,n)), jump(psic, n))*dS \ + theta*dot(avg_w(rho*d_eff*grad(psic),weight_e(rho*d_eff,n)), jump(c, n))*dS \ + penalty1/h_avg*k_e(rho*d_eff,n)*dot(jump(c, n), jump(psic, n))*dS a_adv = -dot(rho*vel_c*c,grad(psic))*dx \ + dot(jump(psic), rho('+')*vel_norm('+')*c('+') - rho('-')*vel_norm('-')*c('-') )*dS \ + dot(psic, rho*vel_norm*c)*ds(3) R_c = R_c_cal(c_extrapolate, p_con, Temp) c_D1 = Constant(0.5) rhs_c = R_c * A_s_cal(phi, phi_0, A_0) * psic * dx - dot( rho * phi * vel_c, n) * c_D1 * psic * ds(1) r_u = [a_dif + a_adv] j_u = block_derivative(r_u, [c], [dc]) r_u_dot = [a_time] j_u_dot = block_derivative(r_u_dot, [c_dot], [dc_dot]) r = [r_u_dot[0] + r_u[0] - rhs_c] # this part is not applied. exact_solution_expression1 = Expression("1.0", t=0, element=C[0].ufl_element()) def bc(t): p5 = DirichletBC(C.sub(0), exact_solution_expression1, boundaries, 1, method="geometric") return BlockDirichletBC([p5]) # Define problem wrapper class ProblemWrapper(object): def set_time(self, t): pass # Residual and jacobian functions def residual_eval(self, t, solution, solution_dot): return r def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient): return [[ Constant(solution_dot_coefficient) * j_u_dot[0, 0] + j_u[0, 0] ]] # Define boundary condition def bc_eval(self, t): pass # Define initial condition def ic_eval(self): return solution0 # Define custom monitor to plot the solution def monitor(self, t, solution, solution_dot): pass problem_wrapper = ProblemWrapper() (solution, solution_dot) = (block_c, block_c_dot) solver = TimeStepping(problem_wrapper, solution, solution_dot) solver.set_parameters({ "initial_time": t_start, "time_step_size": dt, "monitor": { "time_step_size": monitor_dt, }, "final_time": T, "exact_final_time": "stepover", "integrator_type": integrator_type, "problem_type": "linear", "linear_solver": "mumps", "report": True }) export_solution = solver.solve() return export_solution, T
def m_linear(integrator_type, mesh, subdomains, boundaries, t_start, dt, T, solution0, \ alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0, \ alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1, \ alpha, K, mu_l, lmbda_l, Ks, \ cf_0, phi_0, rho_0, mu_0, k_0,\ cf_1, phi_1, rho_1, mu_1, k_1,\ cf, phi, rho, mu, k, \ pressure_freeze): # Create mesh and define function space parameters["ghost_mode"] = "shared_facet" # required by dS dx = Measure('dx', domain=mesh, subdomain_data=subdomains) ds = Measure('ds', domain=mesh, subdomain_data=boundaries) dS = Measure('dS', domain=mesh, subdomain_data=boundaries) C = VectorFunctionSpace(mesh, "CG", 2) C = BlockFunctionSpace([C]) TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) n = FacetNormal(mesh) vc = CellVolume(mesh) fc = FacetArea(mesh) h = vc/fc h_avg = (vc('+') + vc('-'))/(2*avg(fc)) monitor_dt = dt f_stress_x = Constant(-1.e3) f_stress_y = Constant(-20.0e6) f = Constant((0.0, 0.0)) #sink/source for displacement I = Identity(mesh.topology().dim()) # Define variational problem psiu, = BlockTestFunction(C) block_u = BlockTrialFunction(C) u, = block_split(block_u) w = BlockFunction(C) theta = -1.0 a_time = inner(-alpha*pressure_freeze*I,sym(grad(psiu)))*dx #quasi static a = inner(2*mu_l*strain(u)+lmbda_l*div(u)*I, sym(grad(psiu)))*dx rhs_a = inner(f,psiu)*dx \ + dot(f_stress_y*n,psiu)*ds(2) r_u = [a] #DirichletBC bcd1 = DirichletBC(C.sub(0).sub(0), 0.0, boundaries, 1) # No normal displacement for solid on left side bcd3 = DirichletBC(C.sub(0).sub(0), 0.0, boundaries, 3) # No normal displacement for solid on right side bcd4 = DirichletBC(C.sub(0).sub(1), 0.0, boundaries, 4) # No normal displacement for solid on bottom side bcs = BlockDirichletBC([bcd1,bcd3,bcd4]) AA = block_assemble([r_u]) FF = block_assemble([rhs_a - a_time]) bcs.apply(AA) bcs.apply(FF) block_solve(AA, w.block_vector(), FF, "mumps") export_solution = w return export_solution, T
def truth_solve(mu_unkown): print("Performing truth solve at mu =", mu_unkown) (mesh, subdomains, boundaries, restrictions) = read_mesh() # (mesh, subdomains, boundaries, restrictions) = create_mesh() dx = Measure('dx', subdomain_data=subdomains) ds = Measure('ds', subdomain_data=boundaries) W = generate_block_function_space(mesh, restrictions) # Test and trial functions block_v = BlockTestFunction(W) v, q = block_split(block_v) block_du = BlockTrialFunction(W) du, dp = block_split(block_du) block_u = BlockFunction(W) u, p = block_split(block_u) # gap # V2 = FunctionSpace(mesh, "CG", 1) # gap = Function(V2, name="Gap") # obstacle R = 0.25 d = 0.15 x_0 = mu_unkown[0] y_0 = mu_unkown[1] obstacle = Expression("-d+(pow(x[0]-x_0,2)+pow(x[1]-y_0, 2))/2/R", d=d, R=R , x_0 = x_0, y_0 = y_0, degree=0) # Constitutive parameters E = Constant(10.0) nu = Constant(0.3) mu, lmbda = Constant(E/(2*(1 + nu))), Constant(E*nu/((1 + nu)*(1 - 2*nu))) B = Constant((0.0, 0.0, 0.0)) # Body force per unit volume T = Constant((0.0, 0.0, 0.0)) # Traction force on the boundary # Kinematics # ----------------------------------------------------------------------------- mesh_dim = mesh.topology().dim() # Spatial dimension I = Identity(mesh_dim) # Identity tensor F = I + grad(u) # Deformation gradient C = F.T*F # Right Cauchy-Green tensor J = det(F) # 3rd invariant of the deformation tensor # Strain function def P(u): # P = dW/dF: return mu*(F - inv(F.T)) + lmbda*ln(J)*inv(F.T) def eps(v): return sym(grad(v)) def sigma(v): return lmbda*tr(eps(v))*Identity(3) + 2.0*mu*eps(v) # Definition of The Mackauley bracket <x>+ def ppos(x): return (x+abs(x))/2. # Define the augmented lagrangian def aug_l(x): return x + pen*(obstacle-u[2]) pen = Constant(1e4) # Boundary conditions # bottom_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 2) # left_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 3) # right_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 4) # front_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 5) # back_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 6) # # sym_x_bc = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 2) # # sym_y_bc = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 3) # # bc = BlockDirichletBC([bottom_bc, sym_x_bc, sym_y_bc]) # bc = BlockDirichletBC([bottom_bc, left_bc, right_bc, front_bc, back_bc]) bottom_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 2) left_bc_x = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 3) left_bc_y = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 3) right_bc_x = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 4) right_bc_y = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 4) front_bc_x = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 5) front_bc_y = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 5) back_bc_x = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 6) back_bc_y = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 6) # sym_x_bc = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 2) # sym_y_bc = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 3) # bc = BlockDirichletBC([bottom_bc, sym_x_bc, sym_y_bc]) bc = BlockDirichletBC([bottom_bc, left_bc_x, left_bc_y, \ right_bc_x, right_bc_y, front_bc_x, front_bc_y, \ back_bc_x, back_bc_y]) # Variational forms # F = inner(sigma(u), eps(v))*dx + pen*dot(v[2], ppos(u[2]-obstacle))*ds(1) # F = [inner(sigma(u), eps(v))*dx - aug_l(l)*v[2]*ds(1) + ppos(aug_l(l))*v[2]*ds(1), # (obstacle-u[2])*v*ds(1) - (1/pen)*ppos(aug_l(l))*v*ds(1)] # F_a = inner(sigma(u), eps(v))*dx # F_b = - aug_l(p)*v[2]*ds(1) + ppos(aug_l(p))*v[2]*ds(1) # F_c = (obstacle-u[2])*q*ds(1) # F_d = - (1/pen)*ppos(aug_l(p))*q*ds(1) # # block_F = [[F_a, F_b], # [F_c, F_d]] F_a = inner(P(u), grad(v))*dx - dot(B, v)*dx - dot(T, v)*ds \ - aug_l(p)*v[2]*ds(1) + ppos(aug_l(p))*v[2]*ds(1) F_b = (obstacle-u[2])*q*ds(1) - (1/pen)*ppos(aug_l(p))*q*ds(1) block_F = [F_a, F_b] J = block_derivative(block_F, block_u, block_du) # Setup solver problem = BlockNonlinearProblem(block_F, block_u, bc, J) solver = BlockPETScSNESSolver(problem) solver.parameters.update({ "linear_solver": "mumps", "absolute_tolerance": 1E-4, "relative_tolerance": 1E-4, "maximum_iterations": 50, "report": True, "error_on_nonconvergence": True }) # solver.parameters.update({ # "linear_solver": "cg", # "absolute_tolerance": 1E-4, # "relative_tolerance": 1E-4, # "maximum_iterations": 50, # "report": True, # "error_on_nonconvergence": True # }) # Perform a fake loop over time. Note how up will store the solution at the last time. # Q. for? # A. You can remove it, since your problem is stationary. The template was targeting # a final application which was transient, but in which the ROM should have only # described the final solution (when reaching the steady state). # for _ in range(2): # solver.solve() a1 = solver.solve() print(a1) # save all the solution here as a function of time # Return the solution at the last time # Q. block_u or block # A. I think block_u, it will split split among the components elsewhere return block_u
PM = FunctionSpace(mesh, "DG", 0) TM = TensorFunctionSpace(mesh, "DG", 0) I = Identity(mesh.topology().dim()) dx = Measure("dx", domain=mesh, subdomain_data=subdomains) ds = Measure("ds", domain=mesh, subdomain_data=boundaries) dS = Measure("dS", domain=mesh, subdomain_data=boundaries) # Test and trial functions vq = BlockTestFunction(W) (v, q) = block_split(vq) up = BlockTrialFunction(W) (u, p) = block_split(up) w = BlockFunction(W) w0 = BlockFunction(W) (u0, p0) = block_split(w0) n = FacetNormal(mesh) vc = CellVolume(mesh) fc = FacetArea(mesh) h = vc / fc h_avg = (vc("+") + vc("-")) / (2 * avg(fc)) penalty1 = 1.0 penalty2 = 10.0 theta = 1.0 # Constitutive parameters
def train_data_driven(N): (mesh, _, _, restrictions) = read_mesh() W = generate_block_function_space(mesh, restrictions) # L2 projection object basis_functions = read_basis_functions(W, N) X = get_inner_products(W, "L2 projection") l2_projection = { c: L2ProjectionSolver(X[c], basis_functions[c], N) for c in components } # Solution storage solution = BlockFunction(W) # Training set training_set = get_set("training_set") mu_len = len(training_set[0]) # Read in snapshots snapshots_matrix = SnapshotsMatrix(W) for i, mu in enumerate(training_set): print("Appending solution for mu =", mu, "to snapshots matrix") read_solution(mu, "truth_solve", solution) snapshots_matrix.enrich(solution) filename = os.path.join("dis_x", "dis_x_" + str(i)) write_file = open(filename, 'wb') pickle.dump(snapshots_matrix[-1][0].vector()[::3], write_file) write_file.close() filename = os.path.join("dis_y", "dis_y_" + str(i)) write_file = open(filename, 'wb') pickle.dump(snapshots_matrix[-1][0].vector()[1::3], write_file) write_file.close() filename = os.path.join("dis_z", "dis_z_" + str(i)) write_file = open(filename, 'wb') pickle.dump(snapshots_matrix[-1][0].vector()[2::3], write_file) write_file.close() quit() # Data driven training component by component normalize_inputs = NormalizeInputs(mu_range) for c in components: projected_snapshots = [ l2_projection[c].solve(mu, c, snapshots_matrix[i]) for i, mu in enumerate(training_set) ] inputs = torch.unsqueeze(torch.FloatTensor(training_set._list), dim=mu_len) inputs = normalize_inputs(inputs) outputs = torch.stack([ torch.from_numpy(projected_snapshot) for projected_snapshot in projected_snapshots ]) with open( os.path.join("networks", "output_normalization_" + c + "_" + str(N)), "w") as file_: file_.write(str(torch.min(outputs).detach().numpy()) + "\n") file_.write(str(torch.max(outputs).detach().numpy()) + "\n") normalize_outputs = NormalizeOutputs( os.path.join("networks", "output_normalization_" + c + "_" + str(N))) outputs = normalize_outputs(outputs) # print(len(training_set[0])) # print(len(training_set)) # print(mu_len) # print(inputs.shape) # print(outputs.shape) # quit() network = Network(mu_len, c, N) network.apply(init_weights) criterion = nn.MSELoss() learning_rate = 0.3 optimizer = optim.Adam(network.parameters(), lr=learning_rate, eps=1.e-08) torch_dataset = TensorDataset(inputs.float(), outputs.float()) n_snpashots = len(training_set) n_trainining = 4 * int(n_snpashots / 6) n_validation = n_snpashots - n_trainining batch_size_training = int(round(np.sqrt(n_snpashots))) batch_size_validation = int(round(np.sqrt(n_snpashots))) epochs = 10000 n_epochs_stop = epochs training_dataset, validation_dataset = random_split( torch_dataset, [n_trainining, n_validation]) training_loader = DataLoader(dataset=training_dataset, batch_size=batch_size_training) validation_loader = DataLoader(dataset=validation_dataset, batch_size=batch_size_validation) training_losses = [None] * epochs validation_losses = [None] * epochs min_validation_loss = np.Inf for epoch in range(epochs): for param_group in optimizer.param_groups: param_group["lr"] = learning_rate / (1 + np.sqrt(epoch)) total_training_loss = 0.0 for batch_x, batch_y in training_loader: # for each training step network.train() optimizer.zero_grad() batch_x_normalized = batch_x.squeeze(1) prediction = network(batch_x_normalized) loss = criterion(prediction, batch_y) loss.backward() optimizer.step() total_training_loss += loss.item() training_losses[epoch] = total_training_loss / len(training_loader) print("[%d] Training loss: %.10f" % (epoch + 1, training_losses[epoch])) network.eval() total_validation_loss = 0.0 with torch.no_grad(): for validation_x, validation_y in validation_loader: validation_x_normalized = validation_x.squeeze(1) network_y = network(validation_x_normalized) loss = criterion(network_y, validation_y) total_validation_loss += loss.item() validation_losses[epoch] = total_validation_loss / len( validation_loader) print("[%d] Validation loss: %.10f" % (epoch + 1, validation_losses[epoch])) # add less than or eq if validation_losses[epoch] <= min_validation_loss: epochs_no_improvement = 0 min_validation_loss = validation_losses[epoch] torch.save( network.state_dict(), os.path.join("networks", "network_" + c + "_" + str(N))) else: epochs_no_improvement += 1 if epochs_no_improvement == n_epochs_stop: print("Early stopping!") break
def function(self): if self.split: return (Function(self.W), Function(self.Q)) else: return BlockFunction(self.mixedSpace)
def h_linear(integrator_type, mesh, subdomains, boundaries, t_start, dt, T, solution0, \ alpha_0, K_0, mu_l_0, lmbda_l_0, Ks_0, \ alpha_1, K_1, mu_l_1, lmbda_l_1, Ks_1, \ alpha, K, mu_l, lmbda_l, Ks, \ cf_0, phi_0, rho_0, mu_0, k_0,\ cf_1, phi_1, rho_1, mu_1, k_1,\ cf, phi, rho, mu, k, \ sigma_v_freeze, dphi_c_dt): # Create mesh and define function space parameters["ghost_mode"] = "shared_facet" # required by dS dx = Measure('dx', domain=mesh, subdomain_data=subdomains) ds = Measure('ds', domain=mesh, subdomain_data=boundaries) dS = Measure('dS', domain=mesh, subdomain_data=boundaries) BDM = FiniteElement("BDM", mesh.ufl_cell(), 1) PDG = FiniteElement("DG", mesh.ufl_cell(), 0) BDM_F = FunctionSpace(mesh, BDM) PDG_F = FunctionSpace(mesh, PDG) W = BlockFunctionSpace([BDM_F, PDG_F], restrict=[None, None]) TM = TensorFunctionSpace(mesh, 'DG', 0) PM = FunctionSpace(mesh, 'DG', 0) n = FacetNormal(mesh) vc = CellVolume(mesh) fc = FacetArea(mesh) h = vc / fc h_avg = (vc('+') + vc('-')) / (2 * avg(fc)) I = Identity(mesh.topology().dim()) monitor_dt = dt p_outlet = 0.1e6 p_inlet = 1000.0 M_inv = phi_0 * cf + (alpha - phi_0) / Ks # Define variational problem trial = BlockTrialFunction(W) dv, dp = block_split(trial) trial_dot = BlockTrialFunction(W) dv_dot, dp_dot = block_split(trial_dot) test = BlockTestFunction(W) psiv, psip = block_split(test) block_w = BlockFunction(W) v, p = block_split(block_w) block_w_dot = BlockFunction(W) v_dot, p_dot = block_split(block_w_dot) a_time = Constant(0.0) * inner(v_dot, psiv) * dx #quasi static # k is a function of phi #k = perm_update_rutqvist_newton(p,p0,phi0,phi,coeff) lhs_a = inner(dot(v, mu * inv(k)), psiv) * dx - p * div( psiv ) * dx #+ 6.0*inner(psiv,n)*ds(2) # - inner(gravity*(rho-rho0), psiv)*dx b_time = (M_inv + pow(alpha, 2.) / K) * p_dot * psip * dx lhs_b = div(v) * psip * dx #div(rho*v)*psip*dx #TODO rho rhs_v = -p_outlet * inner(psiv, n) * ds(3) rhs_p = -alpha / K * sigma_v_freeze * psip * dx - dphi_c_dt * psip * dx r_u = [lhs_a, lhs_b] j_u = block_derivative(r_u, block_w, trial) r_u_dot = [a_time, b_time] j_u_dot = block_derivative(r_u_dot, block_w_dot, trial_dot) r = [r_u_dot[0] + r_u[0] - rhs_v, \ r_u_dot[1] + r_u[1] - rhs_p] def bc(t): #bc_v = [DirichletBC(W.sub(0), (.0, .0), boundaries, 4)] v1 = DirichletBC(W.sub(0), (1.e-4 * 2.0, 0.0), boundaries, 1) v2 = DirichletBC(W.sub(0), (0.0, 0.0), boundaries, 2) v4 = DirichletBC(W.sub(0), (0.0, 0.0), boundaries, 4) bc_v = [v1, v2, v4] return BlockDirichletBC([bc_v, None]) # Define problem wrapper class ProblemWrapper(object): def set_time(self, t): pass #g.t = t # Residual and jacobian functions def residual_eval(self, t, solution, solution_dot): #print(as_backend_type(assemble(p_time - p_time_error)).vec().norm()) #print("gravity effect", as_backend_type(assemble(inner(gravity*(rho-rho0), psiv)*dx)).vec().norm()) return r def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient): return [[Constant(solution_dot_coefficient)*j_u_dot[0, 0] + j_u[0, 0], \ Constant(solution_dot_coefficient)*j_u_dot[0, 1] + j_u[0, 1]], \ [Constant(solution_dot_coefficient)*j_u_dot[1, 0] + j_u[1, 0], \ Constant(solution_dot_coefficient)*j_u_dot[1, 1] + j_u[1, 1]]] # Define boundary condition def bc_eval(self, t): return bc(t) # Define initial condition def ic_eval(self): return solution0 # Define custom monitor to plot the solution def monitor(self, t, solution, solution_dot): pass # Solve the time dependent problem problem_wrapper = ProblemWrapper() (solution, solution_dot) = (block_w, block_w_dot) solver = TimeStepping(problem_wrapper, solution, solution_dot) solver.set_parameters({ "initial_time": t_start, "time_step_size": dt, "monitor": { "time_step_size": monitor_dt, }, "final_time": T, "exact_final_time": "stepover", "integrator_type": integrator_type, "problem_type": "linear", "linear_solver": "mumps", "report": True }) export_solution = solver.solve() return export_solution, T