def test_subdomains(compile_args): cell = ufl.triangle element = ufl.FiniteElement("Lagrange", cell, 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a0 = ufl.inner(u, v) * ufl.dx + ufl.inner(u, v) * ufl.dx(2) a1 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx a2 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx(1) a3 = ufl.inner(u, v) * ufl.ds(210) + ufl.inner(u, v) * ufl.ds(0) forms = [a0, a1, a2, a3] compiled_forms, module, code = ffcx.codegeneration.jit.compile_forms( forms, parameters={'scalar_type': 'double'}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) form0 = compiled_forms[0] ids = form0.integral_ids(module.lib.cell) assert ids[0] == -1 and ids[1] == 2 form1 = compiled_forms[1] ids = form1.integral_ids(module.lib.cell) assert ids[0] == -1 and ids[1] == 2 form2 = compiled_forms[2] ids = form2.integral_ids(module.lib.cell) assert ids[0] == 1 and ids[1] == 2 form3 = compiled_forms[3] assert form3.num_integrals(module.lib.cell) == 0 ids = form3.integral_ids(module.lib.exterior_facet) assert ids[0] == 0 and ids[1] == 210
def test_additivity(mode): mesh = create_unit_square(MPI.COMM_WORLD, 12, 12, ghost_mode=mode) V = FunctionSpace(mesh, ("Lagrange", 1)) f1 = Function(V) f2 = Function(V) f3 = Function(V) f1.x.array[:] = 1.0 f2.x.array[:] = 2.0 f3.x.array[:] = 3.0 j1 = ufl.inner(f1, f1) * ufl.dx(mesh) j2 = ufl.inner(f2, f2) * ufl.ds(mesh) j3 = ufl.inner(ufl.avg(f3), ufl.avg(f3)) * ufl.dS(mesh) # Assemble each scalar form separately J1 = mesh.comm.allreduce(assemble_scalar(form(j1)), op=MPI.SUM) J2 = mesh.comm.allreduce(assemble_scalar(form(j2)), op=MPI.SUM) J3 = mesh.comm.allreduce(assemble_scalar(form(j3)), op=MPI.SUM) # Sum forms and assemble the result J12 = mesh.comm.allreduce(assemble_scalar(form(j1 + j2)), op=MPI.SUM) J13 = mesh.comm.allreduce(assemble_scalar(form(j1 + j3)), op=MPI.SUM) J23 = mesh.comm.allreduce(assemble_scalar(form(j2 + j3)), op=MPI.SUM) J123 = mesh.comm.allreduce(assemble_scalar(form(j1 + j2 + j3)), op=MPI.SUM) # Compare assembled values assert (J1 + J2) == pytest.approx(J12) assert (J1 + J3) == pytest.approx(J13) assert (J2 + J3) == pytest.approx(J23) assert (J1 + J2 + J3) == pytest.approx(J123)
def test_facet_integral(cell_type): """Test that the integral of a function over a facet is correct""" for count in range(5): mesh = unit_cell(cell_type) tdim = mesh.topology.dim V = FunctionSpace(mesh, ("Lagrange", 2)) v = Function(V) map_f = mesh.topology.index_map(tdim - 1) num_facets = map_f.size_local + map_f.num_ghosts indices = np.arange(0, num_facets) values = np.arange(0, num_facets, dtype=np.intc) marker = MeshTags(mesh, tdim - 1, indices, values) # Functions that will have the same integral over each facet if cell_type == CellType.triangle: root = 3 ** 0.25 # 4th root of 3 v.interpolate(lambda x: (x[0] - 1 / root) ** 2 + (x[1] - root / 3) ** 2) elif cell_type == CellType.quadrilateral: v.interpolate(lambda x: x[0] * (1 - x[0]) + x[1] * (1 - x[1])) elif cell_type == CellType.tetrahedron: s = 2 ** 0.5 * 3 ** (1 / 3) # side length v.interpolate(lambda x: (x[0] - s / 2) ** 2 + (x[1] - s / 2 / np.sqrt(3)) ** 2 + (x[2] - s * np.sqrt(2 / 3) / 4) ** 2) elif cell_type == CellType.hexahedron: v.interpolate(lambda x: x[0] * (1 - x[0]) + x[1] * (1 - x[1]) + x[2] * (1 - x[2])) # assert that the integral of these functions over each face are equal out = [] for j in range(num_facets): a = v * ds(subdomain_data=marker, subdomain_id=j) result = fem.assemble_scalar(a) out.append(result) assert np.isclose(result, out[0])
def test_additivity(mode): mesh = dolfinx.UnitSquareMesh(MPI.COMM_WORLD, 12, 12, ghost_mode=mode) V = dolfinx.FunctionSpace(mesh, ("CG", 1)) f1 = dolfinx.Function(V) f2 = dolfinx.Function(V) f3 = dolfinx.Function(V) with f1.vector.localForm() as f1_local: f1_local.set(1.0) with f2.vector.localForm() as f2_local: f2_local.set(2.0) with f3.vector.localForm() as f3_local: f3_local.set(3.0) j1 = ufl.inner(f1, f1) * ufl.dx(mesh) j2 = ufl.inner(f2, f2) * ufl.ds(mesh) j3 = ufl.inner(ufl.avg(f3), ufl.avg(f3)) * ufl.dS(mesh) # Assemble each scalar form separately J1 = mesh.mpi_comm().allreduce(dolfinx.fem.assemble_scalar(j1), op=MPI.SUM) J2 = mesh.mpi_comm().allreduce(dolfinx.fem.assemble_scalar(j2), op=MPI.SUM) J3 = mesh.mpi_comm().allreduce(dolfinx.fem.assemble_scalar(j3), op=MPI.SUM) # Sum forms and assemble the result J12 = mesh.mpi_comm().allreduce(dolfinx.fem.assemble_scalar(j1 + j2), op=MPI.SUM) J13 = mesh.mpi_comm().allreduce(dolfinx.fem.assemble_scalar(j1 + j3), op=MPI.SUM) J23 = mesh.mpi_comm().allreduce(dolfinx.fem.assemble_scalar(j2 + j3), op=MPI.SUM) J123 = mesh.mpi_comm().allreduce(dolfinx.fem.assemble_scalar(j1 + j2 + j3), op=MPI.SUM) # Compare assembled values assert (J1 + J2) == pytest.approx(J12) assert (J1 + J3) == pytest.approx(J13) assert (J2 + J3) == pytest.approx(J23) assert (J1 + J2 + J3) == pytest.approx(J123)
def test_facet_normals(cell_type): """Test that FacetNormal is outward facing""" for count in range(5): mesh = unit_cell(cell_type) tdim = mesh.topology.dim V = VectorFunctionSpace(mesh, ("Lagrange", 1)) normal = FacetNormal(mesh) v = Function(V) map_f = mesh.topology.index_map(tdim - 1) num_facets = map_f.size_local + map_f.num_ghosts indices = np.arange(0, num_facets) values = np.arange(0, num_facets, dtype=np.intc) marker = MeshTags(mesh, tdim - 1, indices, values) # For each facet, check that the inner product of the normal and # the vector that has a positive normal component on only that facet # is positive for i in range(num_facets): if cell_type == CellType.interval: co = mesh.geometry.x[i] v.interpolate(lambda x: x[0] - co[0]) if cell_type == CellType.triangle: co = mesh.geometry.x[i] # Vector function that is zero at `co` and points away from `co` # so that there is no normal component on two edges and the integral # over the other edge is 1 v.interpolate(lambda x: ((x[0] - co[0]) / 2, (x[1] - co[1]) / 2)) elif cell_type == CellType.tetrahedron: co = mesh.geometry.x[i] # Vector function that is zero at `co` and points away from `co` # so that there is no normal component on three faces and the integral # over the other edge is 1 v.interpolate(lambda x: ((x[0] - co[0]) / 3, (x[1] - co[1]) / 3, (x[2] - co[2]) / 3)) elif cell_type == CellType.quadrilateral: # function that is 0 on one edge and points away from that edge # so that there is no normal component on three edges v.interpolate(lambda x: tuple(x[j] - i % 2 if j == i // 2 else 0 * x[j] for j in range(2))) elif cell_type == CellType.hexahedron: # function that is 0 on one face and points away from that face # so that there is no normal component on five faces v.interpolate(lambda x: tuple(x[j] - i % 2 if j == i // 3 else 0 * x[j] for j in range(3))) # assert that the integrals these functions dotted with the normal over a face # is 1 on one face and 0 on the others ones = 0 for j in range(num_facets): a = inner(v, normal) * ds(subdomain_data=marker, subdomain_id=j) result = fem.assemble_scalar(a) if np.isclose(result, 1): ones += 1 else: assert np.isclose(result, 0) assert ones == 1
def robin_bcs(self, v, v_old): w, w_old = as_ufl(0), as_ufl(0) for r in self.bc_dict['robin']: if r['type'] == 'dashpot': if r['dir'] == 'xyz': for i in range(len(r['id'])): ds_ = ds( subdomain_data=self.io.mt_b1, subdomain_id=r['id'][i], metadata={'quadrature_degree': self.quad_degree}) w += self.vf.deltaP_ext_robin_dashpot( v, r['visc'], ds_) w_old += self.vf.deltaP_ext_robin_dashpot( v_old, r['visc'], ds_) elif r['dir'] == 'normal': # reference normal for i in range(len(r['id'])): ds_ = ds( subdomain_data=self.io.mt_b1, subdomain_id=r['id'][i], metadata={'quadrature_degree': self.quad_degree}) w += self.vf.deltaP_ext_robin_dashpot_normal( v, r['visc'], ds_) w_old += self.vf.deltaP_ext_robin_dashpot_normal( v_old, r['visc'], ds_) else: raise NameError("Unknown dir option for Robin BC!") else: raise NameError("Unknown type option for Robin BC!") return w, w_old
def test_subdomains(compile_args): cell = ufl.triangle element = ufl.FiniteElement("Lagrange", cell, 1) u, v = ufl.TrialFunction(element), ufl.TestFunction(element) a0 = ufl.inner(u, v) * ufl.dx + ufl.inner(u, v) * ufl.dx(2) a1 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx a2 = ufl.inner(u, v) * ufl.dx(2) + ufl.inner(u, v) * ufl.dx(1) a3 = ufl.inner(u, v) * ufl.ds(210) + ufl.inner(u, v) * ufl.ds(0) forms = [a0, a1, a2, a3] compiled_forms, module = ffcx.codegeneration.jit.compile_forms( forms, parameters={'scalar_type': 'double'}, cffi_extra_compile_args=compile_args) for f, compiled_f in zip(forms, compiled_forms): assert compiled_f.rank == len(f.arguments()) ffi = cffi.FFI() form0 = compiled_forms[0][0] ids = np.zeros(form0.num_cell_integrals, dtype=np.int32) form0.get_cell_integral_ids(ffi.cast('int *', ids.ctypes.data)) assert ids[0] == -1 and ids[1] == 2 form1 = compiled_forms[1][0] ids = np.zeros(form1.num_cell_integrals, dtype=np.int32) form1.get_cell_integral_ids(ffi.cast('int *', ids.ctypes.data)) assert ids[0] == -1 and ids[1] == 2 form2 = compiled_forms[2][0] ids = np.zeros(form2.num_cell_integrals, dtype=np.int32) form2.get_cell_integral_ids(ffi.cast('int *', ids.ctypes.data)) assert ids[0] == 1 and ids[1] == 2 form3 = compiled_forms[3][0] ids = np.zeros(form3.num_cell_integrals, dtype=np.int32) form3.get_cell_integral_ids(ffi.cast('int *', ids.ctypes.data)) assert len(ids) == 0 ids = np.zeros(form3.num_exterior_facet_integrals, dtype=np.int32) form3.get_exterior_facet_integral_ids(ffi.cast('int *', ids.ctypes.data)) assert ids[0] == 0 and ids[1] == 210
def error_indicators(self): """ Generate and return linear form defining error indicators """ # Extract these to increase readability R_T = self._R_T R_dT = self._R_dT z = self._Ez_h z_h = self._z_h # Define linear form for computing error indicators v = self.module.TestFunction(self._DG0) eta_T = (v * inner(R_T, z - z_h) * dx(self.domain) + avg(v)*(inner(R_dT('+'), (z - z_h)('+')) + inner(R_dT('-'), (z - z_h)('-'))) * dS(self.domain) + v * inner(R_dT, z - z_h) * ds(self.domain)) return eta_T
def facet_residual(self): """ Generate and return (bilinear, linear) forms defining linear variational problem for the strong facet residual(s) """ # Define trial and test functions for the facet residuals on # discontinuous version of primal trial space R_e = self.module.TrialFunction(self._dV) v = self.module.TestFunction(self._dV) # Extract original test function in the weak residual v_h = self.weak_residual.arguments()[0] # Define forms defining linear variational problem for facet # residual v_e = self._b_e*v a_R_dT = ((inner(v_e('+'), R_e('+')) + inner(v_e('-'), R_e('-')))*dS(self.domain) + inner(v_e, R_e)*ds(self.domain)) L_R_dT = (replace(self.weak_residual, {v_h: v_e}) - inner(v_e, self._R_T)*dx(self.domain)) return (a_R_dT, L_R_dT)
def facet_residual(self): """ Generate and return (bilinear, linear) forms defining linear variational problem for the strong facet residual(s) """ # Define trial and test functions for the facet residuals on # discontinuous version of primal trial space R_e = self.module.TrialFunction(self._dV) v = self.module.TestFunction(self._dV) # Extract original test function in the weak residual v_h = self.weak_residual.arguments()[0] # Define forms defining linear variational problem for facet # residual v_e = self._b_e * v a_R_dT = ((inner(v_e('+'), R_e('+')) + inner(v_e('-'), R_e('-'))) * dS(self.domain) + inner(v_e, R_e) * ds(self.domain)) L_R_dT = (replace(self.weak_residual, {v_h: v_e}) - inner(v_e, self._R_T) * dx(self.domain)) return (a_R_dT, L_R_dT)
def Mesh(arg, **kwargs): """ Overload Firedrake's ``Mesh`` constructor to endow the output mesh with useful quantities. The following quantities are computed by default: * cell size; * facet area. The argument and keyword arguments are passed to Firedrake's ``Mesh`` constructor, modified so that the argument could also be a mesh. """ try: mesh = firedrake.Mesh(arg, **kwargs) except TypeError: mesh = firedrake.Mesh(arg.coordinates, **kwargs) P0 = firedrake.FunctionSpace(mesh, "DG", 0) P1 = firedrake.FunctionSpace(mesh, "CG", 1) dim = mesh.topological_dimension() # Facet area boundary_markers = sorted(mesh.exterior_facets.unique_markers) one = firedrake.Function(P1).assign(1.0) bnd_len = OrderedDict( {i: firedrake.assemble(one * ufl.ds(int(i))) for i in boundary_markers} ) if dim == 2: mesh.boundary_len = bnd_len else: mesh.boundary_area = bnd_len # Cell size if dim == 2 and mesh.coordinates.ufl_element().cell() == ufl.triangle: mesh.delta_x = firedrake.interpolate(ufl.CellDiameter(mesh), P0) return mesh
def set_variational_forms_and_jacobians(self): # add constant Neumann terms for large scale problem (trigger pressures) self.neumann_funcs = [] w_neumann = as_ufl(0) for n in range(len(self.pbsmall.surface_p_ids)): self.neumann_funcs.append(Function(self.pblarge.Vd_scalar)) for i in range(len(self.pbsmall.surface_p_ids[n])): ds_ = ds( subdomain_data=self.pblarge.io.mt_b1, subdomain_id=self.pbsmall.surface_p_ids[n][i], metadata={'quadrature_degree': self.pblarge.quad_degree}) # we apply the pressure onto a fixed configuration of the G&R trigger point, determined by the displacement field u_set # in the last G&R cycle, we assure that growth falls below a tolerance and hence the current and the set configuration coincide w_neumann += self.pblarge.vf.deltaW_ext_neumann_true( self.pblarge.ki.J(self.pblarge.u_set), self.pblarge.ki.F(self.pblarge.u_set), self.neumann_funcs[-1], ds_) self.pblarge.weakform_u -= w_neumann
def test_facet_integral(cell_type): """Test that the integral of a function over a facet is correct""" for count in range(5): mesh = unit_cell(cell_type) V = FunctionSpace(mesh, ("Lagrange", 2)) num_facets = mesh.num_entities(mesh.topology.dim - 1) v = Function(V) facet_function = MeshFunction("size_t", mesh, mesh.topology.dim - 1, 1) facet_function.values[:] = range(num_facets) # Functions that will have the same integral over each facet if cell_type == CellType.triangle: root = 3**0.25 # 4th root of 3 v.interpolate(lambda x: (x[0] - 1 / root)**2 + (x[1] - root / 3)**2) elif cell_type == CellType.quadrilateral: v.interpolate(lambda x: x[0] * (1 - x[0]) + x[1] * (1 - x[1])) elif cell_type == CellType.tetrahedron: s = 2**0.5 * 3**(1 / 3) # side length v.interpolate(lambda x: (x[0] - s / 2)**2 + (x[1] - s / 2 / np.sqrt(3))**2 + (x[2] - s * np.sqrt(2 / 3) / 4)**2) elif cell_type == CellType.hexahedron: v.interpolate(lambda x: x[0] * (1 - x[0]) + x[1] * (1 - x[1]) + x[2] * (1 - x[2])) # assert that the integral of these functions over each face are equal out = [] for j in range(num_facets): a = v * ds(subdomain_data=facet_function, subdomain_id=j) result = fem.assemble_scalar(a) out.append(result) assert np.isclose(result, out[0])
def ref_elasticity(tetra: bool = True, r_lvl: int = 0, out_hdf5: h5py.File = None, xdmf: bool = False, boomeramg: bool = False, kspview: bool = False, degree: int = 1): if tetra: N = 3 if degree == 1 else 2 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N) else: N = 3 mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, CellType.hexahedron) for i in range(r_lvl): # set_log_level(LogLevel.INFO) N *= 2 if tetra: mesh = refine(mesh, redistribute=True) else: mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, CellType.hexahedron) # set_log_level(LogLevel.ERROR) N = degree * N fdim = mesh.topology.dim - 1 V = VectorFunctionSpace(mesh, ("Lagrange", int(degree))) # Generate Dirichlet BC on lower boundary (Fixed) u_bc = Function(V) with u_bc.vector.localForm() as u_local: u_local.set(0.0) def boundaries(x): return np.isclose(x[0], np.finfo(float).eps) facets = locate_entities_boundary(mesh, fdim, boundaries) topological_dofs = locate_dofs_topological(V, fdim, facets) bc = dirichletbc(u_bc, topological_dofs) bcs = [bc] # Create traction meshtag def traction_boundary(x): return np.isclose(x[0], 1) t_facets = locate_entities_boundary(mesh, fdim, traction_boundary) facet_values = np.ones(len(t_facets), dtype=np.int32) arg_sort = np.argsort(t_facets) mt = meshtags(mesh, fdim, t_facets[arg_sort], facet_values[arg_sort]) # Elasticity parameters E = PETSc.ScalarType(1.0e4) nu = 0.1 mu = Constant(mesh, E / (2.0 * (1.0 + nu))) lmbda = Constant(mesh, E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))) g = Constant(mesh, PETSc.ScalarType((0, 0, -1e2))) x = SpatialCoordinate(mesh) f = Constant(mesh, PETSc.ScalarType(1e4)) * \ as_vector((0, -(x[2] - 0.5)**2, (x[1] - 0.5)**2)) # Stress computation def sigma(v): return (2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(len(v))) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx rhs = inner(g, v) * ds(domain=mesh, subdomain_data=mt, subdomain_id=1) + inner(f, v) * dx num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs if MPI.COMM_WORLD.rank == 0: print("Problem size {0:d} ".format(num_dofs)) # Generate reference matrices and unconstrained solution bilinear_form = form(a) A_org = assemble_matrix(bilinear_form, bcs) A_org.assemble() null_space_org = rigid_motions_nullspace(V) A_org.setNearNullSpace(null_space_org) linear_form = form(rhs) L_org = assemble_vector(linear_form) apply_lifting(L_org, [bilinear_form], [bcs]) L_org.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) set_bc(L_org, bcs) opts = PETSc.Options() if boomeramg: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-5 opts["pc_type"] = "hypre" opts['pc_hypre_type'] = 'boomeramg' opts["pc_hypre_boomeramg_max_iter"] = 1 opts["pc_hypre_boomeramg_cycle_type"] = "v" # opts["pc_hypre_boomeramg_print_statistics"] = 1 else: opts["ksp_rtol"] = 1.0e-8 opts["pc_type"] = "gamg" opts["pc_gamg_type"] = "agg" opts["pc_gamg_coarse_eq_limit"] = 1000 opts["pc_gamg_sym_graph"] = True opts["mg_levels_ksp_type"] = "chebyshev" opts["mg_levels_pc_type"] = "jacobi" opts["mg_levels_esteig_ksp_type"] = "cg" opts["matptap_via"] = "scalable" opts["pc_gamg_square_graph"] = 2 opts["pc_gamg_threshold"] = 0.02 # opts["help"] = None # List all available options # opts["ksp_view"] = None # List progress of solver # Create solver, set operator and options solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setFromOptions() solver.setOperators(A_org) # Solve linear problem u_ = Function(V) start = perf_counter() with Timer("Ref solve"): solver.solve(L_org, u_.vector) end = perf_counter() u_.x.scatter_forward() if kspview: solver.view() it = solver.getIterationNumber() if out_hdf5 is not None: d_set = out_hdf5.get("its") d_set[r_lvl] = it d_set = out_hdf5.get("num_dofs") d_set[r_lvl] = num_dofs d_set = out_hdf5.get("solve_time") d_set[r_lvl, MPI.COMM_WORLD.rank] = end - start if MPI.COMM_WORLD.rank == 0: print("Refinement level {0:d}, Iterations {1:d}".format(r_lvl, it)) # List memory usage mem = sum(MPI.COMM_WORLD.allgather( resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) if MPI.COMM_WORLD.rank == 0: print("{1:d}: Max usage after trad. solve {0:d} (kb)" .format(mem, r_lvl)) if xdmf: # Name formatting of functions u_.name = "u_unconstrained" fname = "results/ref_elasticity_{0:d}.xdmf".format(r_lvl) with XDMFFile(MPI.COMM_WORLD, fname, "w") as out_xdmf: out_xdmf.write_mesh(mesh) out_xdmf.write_function(u_, 0.0, "Xdmf/Domain/Grid[@Name='{0:s}'][1]".format(mesh.name))
def run_dg_test(mesh, V, degree): """ Manufactured Poisson problem, solving u = x[component]**n, where n is the degree of the Lagrange function space. """ u, v = TrialFunction(V), TestFunction(V) # Exact solution x = SpatialCoordinate(mesh) u_exact = x[1]**degree # Coefficient k = Function(V) k.vector.set(2.0) k.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # Source term f = -div(k * grad(u_exact)) # Mesh normals and element size n = FacetNormal(mesh) h = CellDiameter(mesh) h_avg = (h("+") + h("-")) / 2.0 # Penalty parameter alpha = 32 dx_ = dx(metadata={"quadrature_degree": -1}) ds_ = ds(metadata={"quadrature_degree": -1}) dS_ = dS(metadata={"quadrature_degree": -1}) with common.Timer("Compile forms"): a = inner(k * grad(u), grad(v)) * dx_ \ - k("+") * inner(avg(grad(u)), jump(v, n)) * dS_ \ - k("+") * inner(jump(u, n), avg(grad(v))) * dS_ \ + k("+") * (alpha / h_avg) * inner(jump(u, n), jump(v, n)) * dS_ \ - inner(k * grad(u), v * n) * ds_ \ - inner(u * n, k * grad(v)) * ds_ \ + (alpha / h) * inner(k * u, v) * ds_ L = inner(f, v) * dx_ - inner(k * u_exact * n, grad(v)) * ds_ \ + (alpha / h) * inner(k * u_exact, v) * ds_ for integral in a.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( a) for integral in L.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( L) with common.Timer("Assemble vector"): b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) with common.Timer("Assemble matrix"): A = assemble_matrix(a, []) A.assemble() with common.Timer("Solve"): # Create LU linear solver solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) with common.Timer("Error functional compile"): # Calculate error M = (u_exact - uh)**2 * dx M = fem.Form(M) with common.Timer("Error assembly"): error = mesh.mpi_comm().allreduce(assemble_scalar(M), op=MPI.SUM) common.list_timings(MPI.COMM_WORLD, [common.TimingType.wall]) assert np.absolute(error) < 1.0e-14
def truth_solve(mu_unkown): print("Performing truth solve at mu =", mu_unkown) (mesh, subdomains, boundaries, restrictions) = read_mesh() # (mesh, subdomains, boundaries, restrictions) = create_mesh() dx = Measure('dx', subdomain_data=subdomains) ds = Measure('ds', subdomain_data=boundaries) W = generate_block_function_space(mesh, restrictions) # Test and trial functions block_v = BlockTestFunction(W) v, q = block_split(block_v) block_du = BlockTrialFunction(W) du, dp = block_split(block_du) block_u = BlockFunction(W) u, p = block_split(block_u) # gap # V2 = FunctionSpace(mesh, "CG", 1) # gap = Function(V2, name="Gap") # obstacle R = 0.25 d = 0.15 x_0 = mu_unkown[0] y_0 = mu_unkown[1] obstacle = Expression("-d+(pow(x[0]-x_0,2)+pow(x[1]-y_0, 2))/2/R", d=d, R=R , x_0 = x_0, y_0 = y_0, degree=0) # Constitutive parameters E = Constant(10.0) nu = Constant(0.3) mu, lmbda = Constant(E/(2*(1 + nu))), Constant(E*nu/((1 + nu)*(1 - 2*nu))) B = Constant((0.0, 0.0, 0.0)) # Body force per unit volume T = Constant((0.0, 0.0, 0.0)) # Traction force on the boundary # Kinematics # ----------------------------------------------------------------------------- mesh_dim = mesh.topology().dim() # Spatial dimension I = Identity(mesh_dim) # Identity tensor F = I + grad(u) # Deformation gradient C = F.T*F # Right Cauchy-Green tensor J = det(F) # 3rd invariant of the deformation tensor # Strain function def P(u): # P = dW/dF: return mu*(F - inv(F.T)) + lmbda*ln(J)*inv(F.T) def eps(v): return sym(grad(v)) def sigma(v): return lmbda*tr(eps(v))*Identity(3) + 2.0*mu*eps(v) # Definition of The Mackauley bracket <x>+ def ppos(x): return (x+abs(x))/2. # Define the augmented lagrangian def aug_l(x): return x + pen*(obstacle-u[2]) pen = Constant(1e4) # Boundary conditions # bottom_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 2) # left_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 3) # right_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 4) # front_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 5) # back_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 6) # # sym_x_bc = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 2) # # sym_y_bc = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 3) # # bc = BlockDirichletBC([bottom_bc, sym_x_bc, sym_y_bc]) # bc = BlockDirichletBC([bottom_bc, left_bc, right_bc, front_bc, back_bc]) bottom_bc = DirichletBC(W.sub(0), Constant((0., 0., 0.)), boundaries, 2) left_bc_x = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 3) left_bc_y = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 3) right_bc_x = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 4) right_bc_y = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 4) front_bc_x = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 5) front_bc_y = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 5) back_bc_x = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 6) back_bc_y = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 6) # sym_x_bc = DirichletBC(W.sub(0).sub(0), Constant(0.), boundaries, 2) # sym_y_bc = DirichletBC(W.sub(0).sub(1), Constant(0.), boundaries, 3) # bc = BlockDirichletBC([bottom_bc, sym_x_bc, sym_y_bc]) bc = BlockDirichletBC([bottom_bc, left_bc_x, left_bc_y, \ right_bc_x, right_bc_y, front_bc_x, front_bc_y, \ back_bc_x, back_bc_y]) # Variational forms # F = inner(sigma(u), eps(v))*dx + pen*dot(v[2], ppos(u[2]-obstacle))*ds(1) # F = [inner(sigma(u), eps(v))*dx - aug_l(l)*v[2]*ds(1) + ppos(aug_l(l))*v[2]*ds(1), # (obstacle-u[2])*v*ds(1) - (1/pen)*ppos(aug_l(l))*v*ds(1)] # F_a = inner(sigma(u), eps(v))*dx # F_b = - aug_l(p)*v[2]*ds(1) + ppos(aug_l(p))*v[2]*ds(1) # F_c = (obstacle-u[2])*q*ds(1) # F_d = - (1/pen)*ppos(aug_l(p))*q*ds(1) # # block_F = [[F_a, F_b], # [F_c, F_d]] F_a = inner(P(u), grad(v))*dx - dot(B, v)*dx - dot(T, v)*ds \ - aug_l(p)*v[2]*ds(1) + ppos(aug_l(p))*v[2]*ds(1) F_b = (obstacle-u[2])*q*ds(1) - (1/pen)*ppos(aug_l(p))*q*ds(1) block_F = [F_a, F_b] J = block_derivative(block_F, block_u, block_du) # Setup solver problem = BlockNonlinearProblem(block_F, block_u, bc, J) solver = BlockPETScSNESSolver(problem) solver.parameters.update({ "linear_solver": "mumps", "absolute_tolerance": 1E-4, "relative_tolerance": 1E-4, "maximum_iterations": 50, "report": True, "error_on_nonconvergence": True }) # solver.parameters.update({ # "linear_solver": "cg", # "absolute_tolerance": 1E-4, # "relative_tolerance": 1E-4, # "maximum_iterations": 50, # "report": True, # "error_on_nonconvergence": True # }) # Perform a fake loop over time. Note how up will store the solution at the last time. # Q. for? # A. You can remove it, since your problem is stationary. The template was targeting # a final application which was transient, but in which the ROM should have only # described the final solution (when reaching the steady state). # for _ in range(2): # solver.solve() a1 = solver.solve() print(a1) # save all the solution here as a function of time # Return the solution at the last time # Q. block_u or block # A. I think block_u, it will split split among the components elsewhere return block_u
def facet_normal_approximation(V, mt: _cpp.mesh.MeshTags_int32, mt_id: int, tangent=False, jit_params: dict = {}, form_compiler_params: dict = {}): """ Approximate the facet normal by projecting it into the function space for a set of facets Parameters ---------- V The function space to project into mt The `dolfinx.mesh.MeshTagsMetaClass` containing facet markers mt_id The id for the facets in `mt` we want to represent the normal at tangent To approximate the tangent to the facet set this flag to `True` jit_params Parameters used in CFFI JIT compilation of C code generated by FFCx. See `DOLFINx-documentation <https://github.com/FEniCS/dolfinx/blob/main/python/dolfinx/jit.py#L22-L37>` for all available parameters. Takes priority over all other parameter values. form_compiler_params Parameters used in FFCx compilation of this form. Run `ffcx - -help` at the commandline to see all available options. Takes priority over all other parameter values, except for `scalar_type` which is determined by DOLFINx. """ timer = _common.Timer("~MPC: Facet normal projection") comm = V.mesh.comm n = ufl.FacetNormal(V.mesh) nh = _fem.Function(V) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) ds = ufl.ds(domain=V.mesh, subdomain_data=mt, subdomain_id=mt_id) if tangent: if V.mesh.geometry.dim == 1: raise ValueError("Tangent not defined for 1D problem") elif V.mesh.geometry.dim == 2: a = ufl.inner(u, v) * ds L = ufl.inner(ufl.as_vector([-n[1], n[0]]), v) * ds else: def tangential_proj(u, n): """ See for instance: https://link.springer.com/content/pdf/10.1023/A:1022235512626.pdf """ return (ufl.Identity(u.ufl_shape[0]) - ufl.outer(n, n)) * u c = _fem.Constant(V.mesh, [1, 1, 1]) a = ufl.inner(u, v) * ds L = ufl.inner(tangential_proj(c, n), v) * ds else: a = (ufl.inner(u, v) * ds) L = ufl.inner(n, v) * ds # Find all dofs that are not boundary dofs imap = V.dofmap.index_map all_blocks = np.arange(imap.size_local, dtype=np.int32) top_blocks = _fem.locate_dofs_topological(V, V.mesh.topology.dim - 1, mt.find(mt_id)) deac_blocks = all_blocks[np.isin(all_blocks, top_blocks, invert=True)] # Note there should be a better way to do this # Create sparsity pattern only for constraint + bc bilinear_form = _fem.form(a, jit_params=jit_params, form_compiler_params=form_compiler_params) pattern = _fem.create_sparsity_pattern(bilinear_form) pattern.insert_diagonal(deac_blocks) pattern.assemble() u_0 = _fem.Function(V) u_0.vector.set(0) bc_deac = _fem.dirichletbc(u_0, deac_blocks) A = _cpp.la.petsc.create_matrix(comm, pattern) A.zeroEntries() # Assemble the matrix with all entries form_coeffs = _cpp.fem.pack_coefficients(bilinear_form) form_consts = _cpp.fem.pack_constants(bilinear_form) _cpp.fem.petsc.assemble_matrix(A, bilinear_form, form_consts, form_coeffs, [bc_deac]) if bilinear_form.function_spaces[0] is bilinear_form.function_spaces[1]: A.assemblyBegin(PETSc.Mat.AssemblyType.FLUSH) A.assemblyEnd(PETSc.Mat.AssemblyType.FLUSH) _cpp.fem.petsc.insert_diagonal(A, bilinear_form.function_spaces[0], [bc_deac], 1.0) A.assemble() linear_form = _fem.form(L, jit_params=jit_params, form_compiler_params=form_compiler_params) b = _fem.petsc.assemble_vector(linear_form) _fem.petsc.apply_lifting(b, [bilinear_form], [[bc_deac]]) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) _fem.petsc.set_bc(b, [bc_deac]) # Solve Linear problem solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setType("cg") solver.rtol = 1e-8 solver.setOperators(A) solver.solve(b, nh.vector) nh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) timer.stop() return nh
def test_div_grad_then_integrate_over_cells_and_boundary(): # Define 2D geometry n = 10 mesh = RectangleMesh(Point(0.0, 0.0), Point(2.0, 3.0), 2 * n, 3 * n) x, y = SpatialCoordinate(mesh) xs = 0.1 + 0.8 * x / 2 # scaled to be within [0.1,0.9] # ys = 0.1 + 0.8 * y / 3 # scaled to be within [0.1,0.9] n = FacetNormal(mesh) # Define list of expressions to test, and configure accuracies # these expressions are known to pass with. The reason some # functions are less accurately integrated is likely that the # default choice of quadrature rule is not perfect F_list = [] def reg(exprs, acc=10): for expr in exprs: F_list.append((expr, acc)) # FIXME: 0*dx and 1*dx fails in the ufl-ffc-jit framework somewhere # reg([Constant(0.0, cell=cell)]) # reg([Constant(1.0, cell=cell)]) monomial_list = [x**q for q in range(2, 6)] reg(monomial_list) reg([2.3 * p + 4.5 * q for p in monomial_list for q in monomial_list]) reg([xs**xs]) reg( [xs**(xs**2)], 8 ) # Note: Accuracies here are from 1D case, not checked against 2D results. reg([xs**(xs**3)], 6) reg([xs**(xs**4)], 2) # Special functions: reg([atan(xs)], 8) reg([sin(x), cos(x), exp(x)], 5) reg([ln(xs), pow(x, 2.7), pow(2.7, x)], 3) reg([asin(xs), acos(xs)], 1) reg([tan(xs)], 7) # To handle tensor algebra, make an x dependent input tensor # xx and square all expressions def reg2(exprs, acc=10): for expr in exprs: F_list.append((inner(expr, expr), acc)) xx = as_matrix([[2 * x**2, 3 * x**3], [11 * x**5, 7 * x**4]]) xxs = as_matrix([[2 * xs**2, 3 * xs**3], [11 * xs**5, 7 * xs**4]]) x3v = as_vector([3 * x**2, 5 * x**3, 7 * x**4]) cc = as_matrix([[2, 3], [4, 5]]) reg2( [xx] ) # TODO: Make unit test for UFL from this, results in listtensor with free indices reg2([x3v]) reg2([cross(3 * x3v, as_vector([-x3v[1], x3v[0], x3v[2]]))]) reg2([xx.T]) reg2([tr(xx)]) reg2([det(xx)]) reg2([dot(xx, 0.1 * xx)]) reg2([outer(xx, xx.T)]) reg2([dev(xx)]) reg2([sym(xx)]) reg2([skew(xx)]) reg2([elem_mult(7 * xx, cc)]) reg2([elem_div(7 * xx, xx + cc)]) reg2([elem_pow(1e-3 * xxs, 1e-3 * cc)]) reg2([elem_pow(1e-3 * cc, 1e-3 * xx)]) reg2([elem_op(lambda z: sin(z) + 2, 0.03 * xx)], 2) # pretty inaccurate... # FIXME: Add tests for all UFL operators: # These cause discontinuities and may be harder to test in the # above fashion: # 'inv', 'cofac', # 'eq', 'ne', 'le', 'ge', 'lt', 'gt', 'And', 'Or', 'Not', # 'conditional', 'sign', # 'jump', 'avg', # 'LiftingFunction', 'LiftingOperator', # FIXME: Test other derivatives: (but algorithms for operator # derivatives are the same!): # 'variable', 'diff', # 'Dx', 'grad', 'div', 'curl', 'rot', 'Dn', 'exterior_derivative', # Run through all operators defined above and compare integrals debug = 0 if debug: F_list = F_list[1:] for F, acc in F_list: if debug: print('\n', "F:", str(F)) # Integrate over domain and its boundary int_dx = assemble(div(grad(F)) * dx(mesh)) # noqa int_ds = assemble(dot(grad(F), n) * ds(mesh)) # noqa if debug: print(int_dx, int_ds) # Compare results. Using custom relative delta instead of # decimal digits here because some numbers are >> 1. delta = min(abs(int_dx), abs(int_ds)) * 10**-acc assert int_dx - int_ds <= delta
# Copyright (C) 2008 Anders Logg # # This file is part of UFL. # # UFL is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # UFL is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with UFL. If not, see <http://www.gnu.org/licenses/>. # # This example illustrates how to define a form over a # given subdomain of a mesh, in this case a functional. from ufl import (Coefficient, FiniteElement, TestFunction, TrialFunction, ds, dx, tetrahedron) element = FiniteElement("CG", tetrahedron, 1) v = TestFunction(element) u = TrialFunction(element) f = Coefficient(element) M = f * dx(2) + f * ds(5)
def set_variational_forms_and_jacobians(self): self.cq, self.cq_old, self.dcq, self.dforce = [], [], [], [] self.coupfuncs, self.coupfuncs_old = [], [] if self.coupling_type == 'monolithic_lagrange': # Lagrange multiplier stiffness matrix (currently treated with FD!) self.K_lm = PETSc.Mat().createAIJ(size=(self.num_coupling_surf, self.num_coupling_surf), bsize=None, nnz=None, csr=None, comm=self.comm) self.K_lm.setUp() # Lagrange multipliers self.lm, self.lm_old = self.K_lm.createVecLeft( ), self.K_lm.createVecLeft() # 3D fluxes self.constr, self.constr_old = [], [] self.power_coupling, self.power_coupling_old = as_ufl(0), as_ufl(0) # coupling variational forms and Jacobian contributions for n in range(self.num_coupling_surf): self.pr0D = expression.template() self.coupfuncs.append(Function( self.pbs.Vd_scalar)), self.coupfuncs_old.append( Function(self.pbs.Vd_scalar)) self.coupfuncs[-1].interpolate( self.pr0D.evaluate), self.coupfuncs_old[-1].interpolate( self.pr0D.evaluate) cq_, cq_old_ = as_ufl(0), as_ufl(0) for i in range(len(self.surface_vq_ids[n])): ds_vq = ds( subdomain_data=self.pbs.io.mt_b1, subdomain_id=self.surface_vq_ids[n][i], metadata={'quadrature_degree': self.pbs.quad_degree}) if self.coupling_params['coupling_quantity'] == 'flux': assert (self.coupling_type == 'monolithic_direct') cq_ += self.pbs.vf.flux(self.pbs.v, ds_vq) elif self.coupling_params['coupling_quantity'] == 'pressure': assert (self.coupling_type == 'monolithic_lagrange') cq_ += self.pbs.vf.flux(self.pbs.v, ds_vq) else: raise NameError( "Unknown coupling quantity! Choose flux or pressure!") self.cq.append(cq_), self.cq_old.append(cq_old_) self.dcq.append(derivative(self.cq[-1], self.pbs.v, self.pbs.dv)) df_ = as_ufl(0) for i in range(len(self.surface_p_ids[n])): ds_p = ds(subdomain_data=self.pbs.io.mt_b1, subdomain_id=self.surface_p_ids[n][i], metadata={'quadrature_degree': self.pbs.quad_degree}) df_ += self.pbs.timefac * self.pbs.vf.surface(ds_p) # add to fluid rhs contributions self.power_coupling += self.pbs.vf.deltaP_ext_neumann_normal( self.coupfuncs[-1], ds_p) self.power_coupling_old += self.pbs.vf.deltaP_ext_neumann_normal( self.coupfuncs_old[-1], ds_p) self.dforce.append(df_) # minus sign, since contribution to external power! self.pbs.weakform_u += -self.pbs.timefac * self.power_coupling - ( 1. - self.pbs.timefac) * self.power_coupling_old # add to fluid Jacobian self.pbs.jac_uu += -self.pbs.timefac * derivative( self.power_coupling, self.pbs.v, self.pbs.dv) if self.coupling_type == 'monolithic_lagrange': # old Lagrange multipliers - initialize with initial pressures self.pbf.cardvasc0D.initialize_lm( self.lm, self.pbf.time_params['initial_conditions']) self.pbf.cardvasc0D.initialize_lm( self.lm_old, self.pbf.time_params['initial_conditions'])
def bench_elasticity_edge(tetra: bool = True, r_lvl: int = 0, out_hdf5=None, xdmf: bool = False, boomeramg: bool = False, kspview: bool = False, degree: int = 1, info: bool = False): N = 3 for i in range(r_lvl): N *= 2 ct = CellType.tetrahedron if tetra else CellType.hexahedron mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N, ct) # Get number of unknowns on each edge V = VectorFunctionSpace(mesh, ("Lagrange", int(degree))) # Generate Dirichlet BC (Fixed) u_bc = Function(V) with u_bc.vector.localForm() as u_local: u_local.set(0.0) def boundaries(x): return np.isclose(x[0], np.finfo(float).eps) fdim = mesh.topology.dim - 1 facets = locate_entities_boundary(mesh, fdim, boundaries) topological_dofs = locate_dofs_topological(V, fdim, facets) bc = dirichletbc(u_bc, topological_dofs) bcs = [bc] def PeriodicBoundary(x): return np.logical_and(np.isclose(x[0], 1), np.isclose(x[2], 0)) def periodic_relation(x): out_x = np.zeros(x.shape) out_x[0] = x[0] out_x[1] = x[1] out_x[2] = x[2] + 1 return out_x with Timer("~Elasticity: Initialize MPC"): edim = mesh.topology.dim - 2 edges = locate_entities_boundary(mesh, edim, PeriodicBoundary) arg_sort = np.argsort(edges) periodic_mt = meshtags(mesh, edim, edges[arg_sort], np.full(len(edges), 2, dtype=np.int32)) mpc = MultiPointConstraint(V) mpc.create_periodic_constraint_topological(V, periodic_mt, 2, periodic_relation, bcs, scale=0.5) mpc.finalize() # Create traction meshtag def traction_boundary(x): return np.isclose(x[0], 1) t_facets = locate_entities_boundary(mesh, fdim, traction_boundary) facet_values = np.ones(len(t_facets), dtype=np.int32) arg_sort = np.argsort(t_facets) mt = meshtags(mesh, fdim, t_facets[arg_sort], facet_values) # Elasticity parameters E = PETSc.ScalarType(1.0e4) nu = 0.1 mu = Constant(mesh, E / (2.0 * (1.0 + nu))) lmbda = Constant(mesh, E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))) g = Constant(mesh, PETSc.ScalarType((0, 0, -1e2))) x = SpatialCoordinate(mesh) f = Constant(mesh, PETSc.ScalarType(1e3)) * as_vector((0, -(x[2] - 0.5)**2, (x[1] - 0.5)**2)) # Stress computation def epsilon(v): return sym(grad(v)) def sigma(v): return (2.0 * mu * epsilon(v) + lmbda * tr(epsilon(v)) * Identity(len(v))) # Define variational problem u = TrialFunction(V) v = TestFunction(V) a = inner(sigma(u), grad(v)) * dx rhs = inner(g, v) * ds(domain=mesh, subdomain_data=mt, subdomain_id=1) + inner(f, v) * dx # Setup MPC system if info: log_info(f"Run {r_lvl}: Assembling matrix and vector") bilinear_form = form(a) linear_form = form(rhs) with Timer("~Elasticity: Assemble LHS and RHS"): A = assemble_matrix(bilinear_form, mpc, bcs=bcs) b = assemble_vector(linear_form, mpc) # Create nullspace for elasticity problem and assign to matrix null_space = rigid_motions_nullspace(mpc.function_space) A.setNearNullSpace(null_space) # Apply boundary conditions apply_lifting(b, [bilinear_form], [bcs], mpc) b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) set_bc(b, bcs) opts = PETSc.Options() if boomeramg: opts["ksp_type"] = "cg" opts["ksp_rtol"] = 1.0e-5 opts["pc_type"] = "hypre" opts['pc_hypre_type'] = 'boomeramg' opts["pc_hypre_boomeramg_max_iter"] = 1 opts["pc_hypre_boomeramg_cycle_type"] = "v" # opts["pc_hypre_boomeramg_print_statistics"] = 1 else: opts["ksp_rtol"] = 1.0e-8 opts["pc_type"] = "gamg" opts["pc_gamg_type"] = "agg" opts["pc_gamg_coarse_eq_limit"] = 1000 opts["pc_gamg_sym_graph"] = True opts["mg_levels_ksp_type"] = "chebyshev" opts["mg_levels_pc_type"] = "jacobi" opts["mg_levels_esteig_ksp_type"] = "cg" opts["matptap_via"] = "scalable" opts["pc_gamg_square_graph"] = 2 opts["pc_gamg_threshold"] = 0.02 # opts["help"] = None # List all available options # opts["ksp_view"] = None # List progress of solver # Setup PETSc solver solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setFromOptions() if info: log_info(f"Run {r_lvl}: Solving") with Timer("~Elasticity: Solve problem") as timer: solver.setOperators(A) uh = b.copy() uh.set(0) solver.solve(b, uh) uh.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) mpc.backsubstitution(uh) solver_time = timer.elapsed() if kspview: solver.view() mem = sum(MPI.COMM_WORLD.allgather(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)) it = solver.getIterationNumber() num_dofs = V.dofmap.index_map.size_global * V.dofmap.index_map_bs if out_hdf5 is not None: d_set = out_hdf5.get("its") d_set[r_lvl] = it d_set = out_hdf5.get("num_dofs") d_set[r_lvl] = num_dofs d_set = out_hdf5.get("num_slaves") d_set[r_lvl, MPI.COMM_WORLD.rank] = mpc.num_local_slaves d_set = out_hdf5.get("solve_time") d_set[r_lvl, MPI.COMM_WORLD.rank] = solver_time[0] if info: log_info(f"Lvl: {r_lvl}, Its: {it}, max Mem: {mem}, dim(V): {num_dofs}") if xdmf: # Write solution to file u_h = Function(mpc.function_space) u_h.vector.setArray(uh.array) u_h.name = "u_mpc" fname = f"results/bench_elasticity_edge_{r_lvl}.xdmf" with XDMFFile(MPI.COMM_WORLD, fname, "w") as outfile: outfile.write_mesh(mesh) outfile.write_function(u_h)
def test_assemble_functional_ds(mode): mesh = create_unit_square(MPI.COMM_WORLD, 12, 12, ghost_mode=mode) M = form(1.0 * ds(domain=mesh)) value = assemble_scalar(M) value = mesh.comm.allreduce(value, op=MPI.SUM) assert value == pytest.approx(4.0, 1e-12)
def neumann_bcs(self, V, V_real): w, w_old = as_ufl(0), as_ufl(0) for n in self.bc_dict['neumann']: if n['dir'] == 'xyz': func, func_old = Function(V), Function(V) if 'curve' in n.keys(): load = expression.template_vector() load.val_x, load.val_y, load.val_z = self.ti.timecurves( n['curve'][0])(self.ti.t_init), self.ti.timecurves( n['curve'][1])(self.ti.t_init), self.ti.timecurves( n['curve'][2])(self.ti.t_init) func.interpolate(load.evaluate), func_old.interpolate( load.evaluate) self.ti.funcs_to_update_vec.append({ func: [ self.ti.timecurves(n['curve'][0]), self.ti.timecurves(n['curve'][1]), self.ti.timecurves(n['curve'][2]) ] }) self.ti.funcs_to_update_vec_old.append({ func_old: [ self.ti.timecurves(n['curve'][0]), self.ti.timecurves(n['curve'][1]), self.ti.timecurves(n['curve'][2]) ] }) else: func.vector.set( n['val'] ) # currently only one value for all directions - use constant load function otherwise! for i in range(len(n['id'])): ds_ = ds(subdomain_data=self.io.mt_b1, subdomain_id=n['id'][i], metadata={'quadrature_degree': self.quad_degree}) w += self.vf.deltaP_ext_neumann(func, ds_) w_old += self.vf.deltaP_ext_neumann(func_old, ds_) elif n['dir'] == 'normal': # reference normal func, func_old = Function(V_real), Function(V_real) if 'curve' in n.keys(): load = expression.template() load.val = self.ti.timecurves(n['curve'])(self.ti.t_init) func.interpolate(load.evaluate), func_old.interpolate( load.evaluate) self.ti.funcs_to_update.append( {func: self.ti.timecurves(n['curve'])}) self.ti.funcs_to_update_old.append( {func_old: self.ti.timecurves(n['curve'])}) else: func.vector.set(n['val']) for i in range(len(n['id'])): ds_ = ds(subdomain_data=self.io.mt_b1, subdomain_id=n['id'][i], metadata={'quadrature_degree': self.quad_degree}) w += self.vf.deltaP_ext_neumann_normal(func, ds_) w_old += self.vf.deltaP_ext_neumann_normal(func_old, ds_) else: raise NameError("Unknown dir option for Neumann BC!") return w, w_old
bc_local.set(5.0) u0 = Function(V) with u0.vector.localForm() as bc_local: bc_local.set(0.0) # Define Dirichlet boundary conditions at top and bottom boundaries bcs = [ DirichletBC(V, u5, np.where(mf_line.values == tag_info['TOP'])[0]), DirichletBC(V, u0, np.where(mf_line.values == tag_info['BOTTOM'])[0]) ] dx = dx(subdomain_data=mf_triangle) ds = ds(subdomain_data=mf_line) # Define variational form F = (inner(a0 * grad(u), grad(v)) * dx(tag_info['DOMAIN']) + inner(a1 * grad(u), grad(v)) * dx(tag_info['OBSTACLE']) - g_L * v * ds(tag_info['LEFT']) - g_R * v * ds(tag_info['RIGHT']) - f * v * dx(tag_info['DOMAIN']) - f * v * dx(tag_info['OBSTACLE'])) # Separate left and right hand sides of equation a, L = lhs(F), rhs(F) # Solve problem u = Function(V) solve(a == L, u, bcs) with XDMFFile(dolfin.MPI.comm_world, "output.xdmf") as xdmf_outfile:
def set_variational_forms_and_jacobians(self): self.cq, self.cq_old, self.dcq, self.dforce = [], [], [], [] self.coupfuncs, self.coupfuncs_old = [], [] # Lagrange multiplier stiffness matrix (most likely to be zero!) self.K_lm = PETSc.Mat().createAIJ(size=(self.num_coupling_surf, self.num_coupling_surf), bsize=None, nnz=None, csr=None, comm=self.comm) self.K_lm.setUp() # Lagrange multipliers self.lm, self.lm_old = self.K_lm.createVecLeft( ), self.K_lm.createVecLeft() # 3D constraint variable (volume or flux) self.constr, self.constr_old = [], [] self.work_coupling, self.work_coupling_old, self.work_coupling_prestr = as_ufl( 0), as_ufl(0), as_ufl(0) # coupling variational forms and Jacobian contributions for n in range(self.num_coupling_surf): self.pr0D = expression.template() self.coupfuncs.append(Function( self.pbs.Vd_scalar)), self.coupfuncs_old.append( Function(self.pbs.Vd_scalar)) self.coupfuncs[-1].interpolate( self.pr0D.evaluate), self.coupfuncs_old[-1].interpolate( self.pr0D.evaluate) cq_, cq_old_ = as_ufl(0), as_ufl(0) for i in range(len(self.surface_c_ids[n])): ds_vq = ds( subdomain_data=self.pbs.io.mt_b1, subdomain_id=self.surface_c_ids[n][i], metadata={'quadrature_degree': self.pbs.quad_degree}) # currently, only volume or flux constraints are supported if self.coupling_params['constraint_quantity'] == 'volume': cq_ += self.pbs.vf.volume(self.pbs.u, self.pbs.ki.J(self.pbs.u), self.pbs.ki.F(self.pbs.u), ds_vq) cq_old_ += self.pbs.vf.volume( self.pbs.u_old, self.pbs.ki.J(self.pbs.u_old), self.pbs.ki.F(self.pbs.u_old), ds_vq) elif self.coupling_params['constraint_quantity'] == 'flux': cq_ += self.pbs.vf.flux(self.pbs.vel, self.pbs.ki.J(self.pbs.u), self.pbs.ki.F(self.pbs.u), ds_vq) cq_old_ += self.pbs.vf.flux(self.pbs.v_old, self.pbs.ki.J(self.pbs.u_old), self.pbs.ki.F(self.pbs.u_old), ds_vq) else: raise NameError( "Unknown constraint quantity! Choose either volume or flux!" ) self.cq.append(cq_), self.cq_old.append(cq_old_) self.dcq.append(derivative(self.cq[-1], self.pbs.u, self.pbs.du)) df_ = as_ufl(0) for i in range(len(self.surface_p_ids[n])): ds_p = ds(subdomain_data=self.pbs.io.mt_b1, subdomain_id=self.surface_p_ids[n][i], metadata={'quadrature_degree': self.pbs.quad_degree}) df_ += self.pbs.timefac * self.pbs.vf.surface( self.pbs.ki.J(self.pbs.u), self.pbs.ki.F(self.pbs.u), ds_p) # add to solid rhs contributions self.work_coupling += self.pbs.vf.deltaW_ext_neumann_true( self.pbs.ki.J(self.pbs.u), self.pbs.ki.F(self.pbs.u), self.coupfuncs[-1], ds_p) self.work_coupling_old += self.pbs.vf.deltaW_ext_neumann_true( self.pbs.ki.J(self.pbs.u_old), self.pbs.ki.F(self.pbs.u_old), self.coupfuncs_old[-1], ds_p) # for prestressing, true loads should act on the reference, not the current configuration if self.pbs.prestress_initial: self.work_coupling_prestr += self.pbs.vf.deltaW_ext_neumann_refnormal( self.coupfuncs_old[-1], ds_p) self.dforce.append(df_) # minus sign, since contribution to external work! self.pbs.weakform_u += -self.pbs.timefac * self.work_coupling - ( 1. - self.pbs.timefac) * self.work_coupling_old # add to solid Jacobian self.pbs.jac_uu += -self.pbs.timefac * derivative( self.work_coupling, self.pbs.u, self.pbs.du)
def test_manufactured_poisson_dg(degree, filename, datadir): """ Manufactured Poisson problem, solving u = x[component]**n, where n is the degree of the Lagrange function space. """ with XDMFFile(MPI.COMM_WORLD, os.path.join(datadir, filename), "r", encoding=XDMFFile.Encoding.ASCII) as xdmf: mesh = xdmf.read_mesh(name="Grid") V = FunctionSpace(mesh, ("DG", degree)) u, v = TrialFunction(V), TestFunction(V) # Exact solution x = SpatialCoordinate(mesh) u_exact = x[1]**degree # Coefficient k = Function(V) k.vector.set(2.0) k.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # Source term f = -div(k * grad(u_exact)) # Mesh normals and element size n = FacetNormal(mesh) h = CellDiameter(mesh) h_avg = (h("+") + h("-")) / 2.0 # Penalty parameter alpha = 32 dx_ = dx(metadata={"quadrature_degree": -1}) ds_ = ds(metadata={"quadrature_degree": -1}) dS_ = dS(metadata={"quadrature_degree": -1}) a = inner(k * grad(u), grad(v)) * dx_ \ - k("+") * inner(avg(grad(u)), jump(v, n)) * dS_ \ - k("+") * inner(jump(u, n), avg(grad(v))) * dS_ \ + k("+") * (alpha / h_avg) * inner(jump(u, n), jump(v, n)) * dS_ \ - inner(k * grad(u), v * n) * ds_ \ - inner(u * n, k * grad(v)) * ds_ \ + (alpha / h) * inner(k * u, v) * ds_ L = inner(f, v) * dx_ - inner(k * u_exact * n, grad(v)) * ds_ \ + (alpha / h) * inner(k * u_exact, v) * ds_ for integral in a.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( a) for integral in L.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( L) b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) A = assemble_matrix(a, []) A.assemble() # Create LU linear solver solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) error = mesh.mpi_comm().allreduce(assemble_scalar((u_exact - uh)**2 * dx), op=MPI.SUM) assert np.absolute(error) < 1.0e-14
def clement_interpolant(source, target_space=None, boundary_tag=None): r""" Compute the Clement interpolant of a :math:`\mathbb P0` source field, i.e. take the volume average over neighbouring cells at each vertex. :arg source: the :math:`\mathbb P0` source field :kwarg target_space: the :math:`\mathbb P1` space to interpolate into :boundary_tag: optional boundary tag to compute the Clement interpolant over. """ V = source.function_space() assert V.ufl_element().family() == "Discontinuous Lagrange" assert V.ufl_element().degree() == 0 rank = len(V.ufl_element().value_shape()) mesh = V.mesh() dim = mesh.topological_dimension() P1 = firedrake.FunctionSpace(mesh, "CG", 1) dX = ufl.dx if boundary_tag is None else ufl.ds(boundary_tag) if target_space is None: if rank == 0: target_space = P1 elif rank == 1: target_space = firedrake.VectorFunctionSpace(mesh, "CG", 1) elif rank == 2: target_space = firedrake.TensorFunctionSpace(mesh, "CG", 1) else: raise ValueError(f"Rank-{rank} tensors are not supported.") else: assert target_space.ufl_element().family() == "Lagrange" assert target_space.ufl_element().degree() == 1 target = firedrake.Function(target_space) # Compute the patch volume at each vertex if boundary_tag is None: P0 = firedrake.FunctionSpace(mesh, "DG", 0) dx = ufl.dx(domain=mesh) volume = firedrake.assemble(firedrake.TestFunction(P0) * dx) else: volume = get_facet_areas(mesh) patch_volume = firedrake.Function(P1) kernel = "for (int i=0; i < p.dofs; i++) p[i] += v[0];" keys = { "v": (volume, op2.READ), "p": (patch_volume, op2.INC), } firedrake.par_loop(kernel, dX, keys) # Volume average keys = { "s": (source, op2.READ), "v": (volume, op2.READ), "t": (target, op2.INC), } if rank == 0: firedrake.par_loop( """ for (int i=0; i < t.dofs; i++) { t[i] += s[0]*v[0]; } """, dX, keys, ) elif rank == 1: firedrake.par_loop( """ int d = %d; for (int i=0; i < t.dofs; i++) { for (int j=0; j < d; j++) { t[i*d + j] += s[j]*v[0]; } } """ % dim, dX, keys, ) elif rank == 2: firedrake.par_loop( """ int d = %d; int Nd = d*d; for (int i=0; i < t.dofs; i++) { for (int j=0; j < d; j++) { for (int k=0; k < d; k++) { t[i*Nd + j*d + k] += s[j*d + k]*v[0]; } } } """ % dim, dX, keys, ) else: raise ValueError(f"Rank-{rank} tensors are not supported.") target.interpolate(target / patch_volume) if boundary_tag is not None: target.dat.data_with_halos[:] = np.nan_to_num(target.dat.data_with_halos) return target
u5 = Function(V) with u5.vector().localForm() as bc_local: bc_local.set(5.0) u0 = Function(V) with u0.vector().localForm() as bc_local: bc_local.set(0.0) # Define Dirichlet boundary conditions at top and bottom boundaries bcs = [ DirichletBC(V, u5, boundaries.where_equal(boundary['TOP'][0])), DirichletBC(V, u0, boundaries.where_equal(boundary['BOTTOM'][0])) ] dx = dx(subdomain_data=domains) ds = ds(subdomain_data=boundaries) # Define variational form F = (inner(a0 * grad(u), grad(v)) * dx(boundary['DOMAIN'][0]) + inner(a1 * grad(u), grad(v)) * dx(boundary['OBSTACLE'][0]) - g_L * v * ds(boundary['LEFT'][0]) - g_R * v * ds(boundary['RIGHT'][0]) - f * v * dx(boundary['DOMAIN'][0]) - f * v * dx(boundary['OBSTACLE'][0])) # Separate left and right hand sides of equation a, L = lhs(F), rhs(F) # Solve problem u = Function(V) solve(a == L, u, bcs) bb_tree = cpp.geometry.BoundingBoxTree(mesh, 2)
# Copyright (C) 2008 Anders Logg and Kristian B. Oelgaard # # This file is part of UFL. # # UFL is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # UFL is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with UFL. If not, see <http://www.gnu.org/licenses/>. # # This simple example illustrates how forms can be defined on different sub domains. # It is supported for all three integral types. from ufl import (FiniteElement, TestFunction, TrialFunction, ds, dS, dx, tetrahedron) element = FiniteElement("CG", tetrahedron, 1) v = TestFunction(element) u = TrialFunction(element) a = v * u * dx(0) + 10.0 * v * u * dx(1) + v * u * ds(0) + 2.0 * v * u * ds(1)\ + v('+') * u('+') * dS(0) + 4.3 * v('+') * u('+') * dS(1)
def test_assemble_functional_ds(mode): mesh = UnitSquareMesh(MPI.COMM_WORLD, 12, 12, ghost_mode=mode) M = 1.0 * ds(domain=mesh) value = dolfinx.fem.assemble_scalar(M) value = mesh.mpi_comm().allreduce(value, op=MPI.SUM) assert value == pytest.approx(4.0, 1e-12)
u5 = Function(V) with u5.vector().localForm() as bc_local: bc_local.set(5.0) u0 = Function(V) with u0.vector().localForm() as bc_local: bc_local.set(0.0) # Define Dirichlet boundary conditions at top and bottom boundaries bcs = [ DirichletBC(V, u5, boundaries.where_equal(boundary["TOP"][0])), DirichletBC(V, u0, boundaries.where_equal(boundary["BOTTOM"][0])), ] dx = dx(subdomain_data=domains) ds = ds(subdomain_data=boundaries) # Define variational form F = (inner(a0 * grad(u), grad(v)) * dx(boundary["DOMAIN"][0]) + inner(a1 * grad(u), grad(v)) * dx(boundary["OBSTACLE"][0]) - g_L * v * ds(boundary["LEFT"][0]) - g_R * v * ds(boundary["RIGHT"][0]) - f * v * dx(boundary["DOMAIN"][0]) - f * v * dx(boundary["OBSTACLE"][0])) # Separate left and right hand sides of equation a, L = lhs(F), rhs(F) # Solve problem u = Function(V) solve(a == L, u, bcs) bb_tree = cpp.geometry.BoundingBoxTree(mesh, 2)