def test_read_mesh_data(tempdir, tdim, n): filename = os.path.join(tempdir, "mesh.xdmf") mesh = mesh_factory(tdim, n) encoding = XDMFFile.Encoding.HDF5 with XDMFFile(mesh.mpi_comm(), filename, "w", encoding) as file: file.write_mesh(mesh) with XDMFFile(MPI.COMM_WORLD, filename, "r") as file: cell_shape, cell_degree = file.read_cell_type() cells = file.read_topology_data() x = file.read_geometry_data() assert cell_shape == mesh.topology.cell_type assert cell_degree == 1 assert mesh.topology.index_map( tdim).size_global == mesh.mpi_comm().allreduce(cells.shape[0], op=MPI.SUM) assert mesh.geometry.index_map().size_global == mesh.mpi_comm().allreduce( x.shape[0], op=MPI.SUM)
def test_xdmf_input_tri(datadir): with XDMFFile(MPI.COMM_WORLD, os.path.join(datadir, "mesh.xdmf"), "r", encoding=XDMFFile.Encoding.ASCII) as xdmf: mesh = xdmf.read_mesh(name="Grid") surface = assemble_scalar(1 * dx(mesh)) assert mesh.mpi_comm().allreduce(surface, op=MPI.SUM) == pytest.approx(4 * np.pi, rel=1e-4)
def play_with_meshtags(): mesh = UnitCubeMesh(MPI.COMM_WORLD, 5, 5, 5) tdim = mesh.topology.dim mesh.topology.create_connectivity(0, tdim) indices = np.arange(mesh.topology.index_map(tdim).size_local) values = np.ones_like(indices, dtype=np.int32) * MPI.COMM_WORLD.rank cell_owner = MeshTags(mesh, tdim, indices, values) with XDMFFile(MPI.COMM_WORLD, "cell_owner.xdmf", "w") as file: file.write_mesh(mesh) file.write_meshtags(cell_owner)
def test_read_mesh_data(tempdir, tdim, n): filename = os.path.join(tempdir, "mesh.xdmf") mesh = mesh_factory(tdim, n) encoding = XDMFFile.Encoding.HDF5 ghost_mode = cpp.mesh.GhostMode.none with XDMFFile(mesh.mpi_comm(), filename, encoding) as file: file.write(mesh) with XDMFFile(MPI.comm_world, filename) as file: cell_type, points, cells, indices = file.read_mesh_data(MPI.comm_world) mesh2 = Mesh(MPI.comm_world, cell_type, points, cells, indices, ghost_mode) assert (mesh.topology.cell_type == mesh2.topology.cell_type) assert mesh.num_entities_global(0) == mesh2.num_entities_global(0) dim = mesh.topology.dim assert mesh.num_entities_global(dim) == mesh2.num_entities_global(dim)
def test_save_3D_facet_function(tempdir, encoding, data_type, cell_type): dtype_str, dtype = data_type mesh = UnitCubeMesh(MPI.comm_world, 4, 4, 4, cell_type) tdim = mesh.topology.dim mf = MeshFunction(dtype_str, mesh, tdim - 1, 0) mf.name = "facets" map = mesh.topology.index_map(tdim - 1) global_indices = map.global_indices(True) mf.values[:] = global_indices[:] filename = os.path.join(tempdir, "mf_facet_3D_%s.xdmf" % dtype_str) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as xdmf: xdmf.write(mf) with XDMFFile(mesh.mpi_comm(), filename) as xdmf: read_function = getattr(xdmf, "read_mf_" + dtype_str) mf_in = read_function(mesh, "facets") diff = mf_in.values - mf.values assert np.all(diff == 0)
def test_append_and_load_mesh_functions(tempdir, encoding, data_type): dtype_str, dtype = data_type meshes = [ UnitSquareMesh(MPI.comm_world, 12, 12), UnitCubeMesh(MPI.comm_world, 2, 2, 2), UnitSquareMesh(MPI.comm_world, 12, 12, CellType.quadrilateral), UnitCubeMesh(MPI.comm_world, 2, 2, 2, CellType.hexahedron) ] for mesh in meshes: dim = mesh.topology.dim vf = MeshFunction(dtype_str, mesh, 0, 0) vf.name = "vertices" ff = MeshFunction(dtype_str, mesh, mesh.topology.dim - 1, 0) ff.name = "facets" cf = MeshFunction(dtype_str, mesh, mesh.topology.dim, 0) cf.name = "cells" vf.values[:] = mesh.topology.global_indices(0)[:] ff.values[:] = mesh.topology.global_indices(dim - 1)[:] cf.values[:] = mesh.topology.global_indices(dim)[:] filename = os.path.join( tempdir, "appended_mf_{0:d}_{1:s}.xdmf".format(dim, str(mesh.cell_type))) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as xdmf: xdmf.write(mesh) xdmf.write(vf) xdmf.write(ff) xdmf.write(cf) with XDMFFile(mesh.mpi_comm(), filename) as xdmf: read_function = getattr(xdmf, "read_mf_" + dtype_str) vf_in = read_function(mesh, "vertices") ff_in = read_function(mesh, "facets") cf_in = read_function(mesh, "cells") diff_vf = vf_in.values - vf.values diff_ff = ff_in.values - ff.values diff_cf = cf_in.values - cf.values assert np.all(diff_vf == 0) assert np.all(diff_ff == 0) assert np.all(diff_cf == 0)
def xtest_submesh(tempdir, d, n, codim, ghost_mode, encoding): mesh = mesh_factory(d, n, ghost_mode) edim = d - codim entities = locate_entities(mesh, edim, lambda x: x[0] >= 0.5) submesh = create_submesh(mesh, edim, entities)[0] filename = os.path.join(tempdir, "submesh.xdmf") # Check writing the mesh doesn't cause a segmentation fault with XDMFFile(mesh.comm, filename, "w", encoding=encoding) as xdmf: xdmf.write_mesh(submesh)
def test_save_2d_vector(tempdir, encoding, cell_type): filename = os.path.join(tempdir, "u_2dv.xdmf") mesh = create_unit_square(MPI.COMM_WORLD, 12, 13, cell_type) V = VectorFunctionSpace(mesh, ("Lagrange", 2)) u = Function(V) u.vector.set(1.0 + ( 1j if np.issubdtype(PETSc.ScalarType, np.complexfloating) else 0)) with XDMFFile(mesh.comm, filename, "w", encoding=encoding) as file: file.write_mesh(mesh) file.write_function(u)
def test_manufactured_vector2(family, degree, filename, datadir): """Projection into H(div/curl) spaces""" # Skip slowest tests if "tetra" in filename and degree > 2: return with XDMFFile(MPI.comm_world, os.path.join(datadir, filename)) as xdmf: mesh = xdmf.read_mesh(GhostMode.none) # FIXME: these test are currently failing on unordered meshes if "tetra" in filename: if family == "N1curl": Ordering.order_simplex(mesh) V = FunctionSpace(mesh, (family, degree + 1)) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) a = inner(u, v) * dx xp = np.array([0.33, 0.33, 0.0]) tree = geometry.BoundingBoxTree(mesh, mesh.geometry.dim) cells = geometry.compute_first_entity_collision(tree, mesh, xp) # Source term x = SpatialCoordinate(mesh) u_ref = x[0]**degree L = inner(u_ref, v[0]) * dx b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) A = assemble_matrix(a) A.assemble() # Create LU linear solver (Note: need to use a solver that # re-orders to handle pivots, e.g. not the PETSc built-in LU # solver) solver = PETSc.KSP().create(MPI.comm_world) solver.setType("preonly") solver.getPC().setType('lu') solver.setOperators(A) # Solve uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) up = uh.eval(xp, cells[0]) print("test0:", up) print("test1:", xp[0]**degree) u_exact = np.zeros(mesh.geometry.dim) u_exact[0] = xp[0]**degree assert np.allclose(up, u_exact)
def test_save_3D_edge_function(tempdir, encoding, data_type, cell_type): dtype_str, dtype = data_type mesh = UnitCubeMesh(MPI.comm_world, 4, 4, 4, cell_type) mf = MeshFunction(dtype_str, mesh, 1, 0) mf.name = "edges" mf.values[:] = np.arange(mesh.num_entities(1), dtype=dtype) filename = os.path.join(tempdir, "mf_edge_3D_%s.xdmf" % dtype_str) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(mf)
def test_save_3d_vector_series(tempdir, encoding, cell_type): filename = os.path.join(tempdir, "u_3D.xdmf") mesh = UnitCubeMesh(MPI.comm_world, 2, 2, 2, cell_type) u = Function(VectorFunctionSpace(mesh, ("Lagrange", 2))) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: u.vector.set(1.0 + (1j if has_petsc_complex else 0)) file.write(u, 0.1) u.vector.set(2.0 + (2j if has_petsc_complex else 0)) file.write(u, 0.2) u.vector.set(3.0 + (3j if has_petsc_complex else 0)) file.write(u, 0.3)
def test_save_1d_scalar(tempdir, encoding, use_pathlib): filename2 = (Path(tempdir).joinpath("u1_.xdmf") if use_pathlib else os.path.join(tempdir, "u1_.xdmf")) mesh = create_unit_interval(MPI.COMM_WORLD, 32) V = FunctionSpace(mesh, ("Lagrange", 2)) u = Function(V) u.vector.set(1.0 + ( 1j if np.issubdtype(PETSc.ScalarType, np.complexfloating) else 0)) with XDMFFile(mesh.comm, filename2, "w", encoding=encoding) as file: file.write_mesh(mesh) file.write_function(u)
def get_mesh(cell_type, datadir): # In parallel, use larger meshes if cell_type == CellType.triangle: filename = "UnitSquareMesh_triangle.xdmf" elif cell_type == CellType.quadrilateral: filename = "UnitSquareMesh_quad.xdmf" elif cell_type == CellType.tetrahedron: filename = "UnitCubeMesh_tetra.xdmf" elif cell_type == CellType.hexahedron: filename = "UnitCubeMesh_hexahedron.xdmf" with XDMFFile(MPI.COMM_WORLD, os.path.join(datadir, filename), "r", encoding=XDMFFile.Encoding.ASCII) as xdmf: return xdmf.read_mesh(name="Grid")
def test_read_write_p2_mesh(tempdir, encoding): cell = ufl.Cell("triangle", geometric_dimension=2) element = ufl.VectorElement("Lagrange", cell, 2) domain = ufl.Mesh(element) cmap = fem.create_coordinate_map(domain) mesh = cpp.generation.UnitDiscMesh.create(MPI.COMM_WORLD, 3, cmap, cpp.mesh.GhostMode.none) filename = os.path.join(tempdir, "tri6_mesh.xdmf") with XDMFFile(mesh.mpi_comm(), filename, "w", encoding=encoding) as xdmf: xdmf.write_mesh(mesh) with XDMFFile(mesh.mpi_comm(), filename, "r", encoding=encoding) as xdmf: mesh2 = xdmf.read_mesh() assert mesh.topology.index_map(0).size_global == mesh2.topology.index_map( 0).size_global dim = mesh.topology.dim assert mesh.topology.index_map( dim).size_global == mesh2.topology.index_map(dim).size_global
def write_output(self, pb=None, writemesh=False, N=1, t=0): if writemesh: if self.write_results_every > 0: self.resultsfiles = {} for res in self.results_to_write: outfile = XDMFFile( self.comm, self.output_path + '/results_' + pb.simname + '_' + res + '.xdmf', 'w') outfile.write_mesh(self.mesh) self.resultsfiles[res] = outfile return else: # write results every write_results_every steps if self.write_results_every > 0 and N % self.write_results_every == 0: # save solution to XDMF format for res in self.results_to_write: if res == 'velocity': self.resultsfiles[res].write_function(pb.v, t) elif res == 'acceleration': # passed in a is not a function but form, so we have to project a_proj = project(pb.acc, pb.V_v, pb.dx_, nm="Acceleration") self.resultsfiles[res].write_function(a_proj, t) elif res == 'pressure': self.resultsfiles[res].write_function(pb.p, t) elif res == 'cauchystress': stressfuncs = [] for n in range(pb.num_domains): stressfuncs.append(pb.ma[n].sigma(pb.v, pb.p)) cauchystress = project(stressfuncs, pb.Vd_tensor, pb.dx_, nm="CauchyStress") self.resultsfiles[res].write_function(cauchystress, t) elif res == 'reynolds': reynolds = project(re, pb.Vd_scalar, pb.dx_, nm="Reynolds") self.resultsfiles[res].write_function(reynolds, t) else: raise NameError( "Unknown output to write for fluid mechanics!")
def test_xdmf_timeseries_write_to_closed_hdf5_using_with(tempdir, cell_type): mesh = UnitCubeMesh(MPI.comm_world, 2, 2, 2, cell_type) V = FunctionSpace(mesh, ("CG", 1)) u = Function(V) filename = os.path.join(tempdir, "time_series_closed_append.xdmf") with XDMFFile(mesh.mpi_comm(), filename) as xdmf: xdmf.write(u, float(0.0)) xdmf.write(u, float(1.0)) xdmf.close() with xdmf: xdmf.write(u, float(2.0))
def test_manufactured_vector1(family, degree, filename, datadir): """Projection into H(div/curl) spaces""" with XDMFFile(MPI.COMM_WORLD, os.path.join(datadir, filename), "r", encoding=XDMFFile.Encoding.ASCII) as xdmf: mesh = xdmf.read_mesh(name="Grid") V = FunctionSpace(mesh, (family, degree)) W = VectorFunctionSpace(mesh, ("CG", degree)) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) a = inner(u, v) * dx # Source term x = SpatialCoordinate(mesh) u_ref = x[0]**degree L = inner(u_ref, v[0]) * dx b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) A = assemble_matrix(a) A.assemble() # Create LU linear solver (Note: need to use a solver that # re-orders to handle pivots, e.g. not the PETSc built-in LU # solver) solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setType("preonly") solver.getPC().setType('lu') solver.setOperators(A) # Solve uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) u_exact = Function(W) u_exact.interpolate(lambda x: np.array([ x[0]**degree if i == 0 else 0 * x[0] for i in range(mesh.topology.dim) ])) M = inner(uh - u_exact, uh - u_exact) * dx M = fem.Form(M) error = mesh.mpi_comm().allreduce(assemble_scalar(M), op=MPI.SUM) assert np.absolute(error) < 1.0e-14
def test_manufactured_vector1(family, degree, filename, datadir): """Projection into H(div/curl) spaces""" with XDMFFile(MPI.COMM_WORLD, os.path.join(datadir, filename), "r", encoding=XDMFFile.Encoding.ASCII) as xdmf: mesh = xdmf.read_mesh(name="Grid") V = FunctionSpace(mesh, (family, degree)) u, v = ufl.TrialFunction(V), ufl.TestFunction(V) a = inner(u, v) * dx # Source term x = SpatialCoordinate(mesh) u_ref = x[0]**degree L = inner(u_ref, v[0]) * dx b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) A = assemble_matrix(a) A.assemble() # Create LU linear solver (Note: need to use a solver that # re-orders to handle pivots, e.g. not the PETSc built-in LU # solver) solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setType("preonly") solver.getPC().setType('lu') solver.setOperators(A) # Solve uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) xp = np.array([0.33, 0.33, 0.0]) tree = geometry.BoundingBoxTree(mesh, mesh.geometry.dim) cells = geometry.compute_first_entity_collision(tree, mesh, xp) up = uh.eval(xp, cells[0]) print("test0:", up) print("test1:", xp[0]**degree) u_exact = np.zeros(mesh.geometry.dim) u_exact[0] = xp[0]**degree assert np.allclose(up, u_exact)
def io_callback(iv1, w1, t): # Project damage into visualizable space b = assemble_vector(proj_rhs) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) proj_ksp = PETSc.KSP() proj_ksp.create(MPI.COMM_WORLD) proj_ksp.setType("cg") proj_ksp.getPC().setType("jacobi") proj_ksp.setOperators(mass_DMG) proj_ksp.setFromOptions() proj_ksp.solve(b, dmg0.vector) with XDMFFile(MPI.COMM_WORLD, f"{filename}.xdmf", "a") as ofile: ofile.write_function(w1["displ"], t) ofile.write_function(w1["temp"], t) ofile.write_function(w1["phi"], t) ofile.write_function(w1["co2"], t) ofile.write_function(dmg0, t)
def get_mesh(cell_type, datadir): if MPI.COMM_WORLD.size == 1: # If running in serial, use a small mesh if cell_type in [CellType.triangle, CellType.quadrilateral]: return UnitSquareMesh(MPI.COMM_WORLD, 2, 1, cell_type) else: return UnitCubeMesh(MPI.COMM_WORLD, 2, 1, 1, cell_type) else: # In parallel, use larger meshes if cell_type == CellType.triangle: filename = "UnitSquareMesh_triangle.xdmf" elif cell_type == CellType.quadrilateral: filename = "UnitSquareMesh_quad.xdmf" elif cell_type == CellType.tetrahedron: filename = "UnitCubeMesh_tetra.xdmf" elif cell_type == CellType.hexahedron: filename = "UnitCubeMesh_hexahedron.xdmf" with XDMFFile(MPI.COMM_WORLD, os.path.join(datadir, filename), "r", encoding=XDMFFile.Encoding.ASCII) as xdmf: return xdmf.read_mesh(name="Grid")
# Compute solution solver.setMonitor(lambda ksp, its, rnorm: print( "Iteration: {}, rel. residual: {}".format(its, rnorm))) solver.solve(b, u.vector) solver.view() u.x.scatter_forward() # Compute von Mises stress via interpolation sigma_deviatoric = sigma(u) - (1 / 3) * tr(sigma(u)) * Identity(len(u)) sigma_von_mises = sqrt((3 / 2) * inner(sigma_deviatoric, sigma_deviatoric)) W = FunctionSpace(mesh, ("Discontinuous Lagrange", 0)) sigma_von_mises_expression = Expression(sigma_von_mises, W.element.interpolation_points) sigma_von_mises_h = Function(W) sigma_von_mises_h.interpolate(sigma_von_mises_expression) # Save solution to XDMF format with XDMFFile(MPI.COMM_WORLD, "displacements.xdmf", "w") as file: file.write_mesh(mesh) file.write_function(u) # Save solution to XDMF format with XDMFFile(MPI.COMM_WORLD, "von_mises_stress.xdmf", "w") as file: file.write_mesh(mesh) file.write_function(sigma_von_mises_h) unorm = u.x.norm() if mesh.comm.rank == 0: print("Solution vector norm:", unorm)
solver.convergence_criterion = "residual" solver.max_it = 200 solver.report = True ksp = solver.krylov_solver opts = PETSc.Options() opts.getAll() opts.view() option_prefix = ksp.getOptionsPrefix() opts[f"{option_prefix}ksp_type"] = "preonly" opts[f"{option_prefix}pc_type"] = "lu" opts[f"{option_prefix}pc_factor_mat_solver_type"] = "mumps" ksp.setFromOptions() wfil = XDMFFile(comm, "disp_Visco_0_05_realEta_Fixed.xdmf", "w") wfil.write_mesh(mesh) with open("sfiles.txt", "w") as fil: fil.write(f"0.0, 0.0\n") for i, t in enumerate(timeVals): right_disp.value = stretchVals[i] print(f"Load step: {i+1}") set_output_file("viscoTrialrunW.log") num_its, converged = solver.solve(u) print(f"Newton iteration: {num_its}") dl_interp(CC, C) k_terms(dt, C, Cn, Cvn, C_quart, C_half, C_thr_quart, CCv, k_cache) CCv.vector.copy(result=Cv_iter.vector)
model.mesh.generate(3) # Sort mesh nodes according to their index in gmsh (Starts at 1) x = extract_gmsh_geometry(model, model_name="Sphere") # Extract cells from gmsh (Only interested in tetrahedrons) element_types, element_tags, node_tags = model.mesh.getElements(dim=3) assert len(element_types) == 1 name, dim, order, num_nodes, local_coords, num_first_order_nodes = model.mesh.getElementProperties( element_types[0]) cells = node_tags[0].reshape(-1, num_nodes) - 1 mesh = create_mesh(MPI.COMM_SELF, cells, x, ufl_mesh_from_gmsh(element_types[0], x.shape[1])) with XDMFFile(MPI.COMM_SELF, "mesh_rank_{}.xdmf".format(MPI.COMM_WORLD.rank), "w") as file: file.write_mesh(mesh) # Create a distributed (parallel) mesh with affine geometry. # Generate mesh on rank 0, then build a distributed mesh :: if MPI.COMM_WORLD.rank == 0: # Generate a mesh model.add("Sphere minus box") model.setCurrent("Sphere minus box") sphere_dim_tags = model.occ.addSphere(0, 0, 0, 1) box_dim_tags = model.occ.addBox(0, 0, 0, 1, 1, 1) model_dim_tags = model.occ.cut([(3, sphere_dim_tags)], [(3, box_dim_tags)]) model.occ.synchronize()
solver = NewtonSolver(MPI.COMM_WORLD) solver.convergence_criterion = "incremental" solver.rtol = 1e-6 # The setting of ``convergence_criterion`` to ``"incremental"`` specifies # that the Newton solver should compute a norm of the solution increment # to check for convergence (the other possibility is to use # ``"residual"``, or to provide a user-defined check). The tolerance for # convergence is specified by ``rtol``. # # To run the solver and save the output to a VTK file for later # visualization, the solver is advanced in time from :math:`t_{n}` to # :math:`t_{n+1}` until a terminal time :math:`T` is reached:: # Output file file = XDMFFile(MPI.COMM_WORLD, "output.xdmf", "w") file.write_mesh(mesh) # Step in time t = 0.0 # Check if we are running on CI server and reduce run time if "CI" in os.environ.keys() or "GITHUB_ACTIONS" in os.environ.keys(): T = 3 * dt else: T = 50 * dt u.vector.copy(result=u0.vector) u0.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD)
# Create the mesh of the specimen with given dimensions gmsh_model, tdim = mesh_bar_gmshapi(geom_type, Lx, Ly, lc, tdim) # Get mesh and meshtags mesh, mts = gmsh_model_to_mesh(gmsh_model, cell_data=False, facet_data=True, gdim=2) outdir = "output" if comm.rank == 0: Path(outdir).mkdir(parents=True, exist_ok=True) prefix = os.path.join(outdir, "elasticity") with XDMFFile(comm, f"{prefix}.xdmf", "w", encoding=XDMFFile.Encoding.HDF5) as file: file.write_mesh(mesh) # Function spaces element_u = ufl.VectorElement("Lagrange", mesh.ufl_cell(), degree=1, dim=tdim) V_u = dolfinx.fem.FunctionSpace(mesh, element_u) # Define the state u = dolfinx.fem.Function(V_u, name="Displacement") u_ = dolfinx.fem.Function(V_u, name="Boundary Displacement") ux_ = dolfinx.fem.Function(V_u.sub(0).collapse(), name="Boundary Displacement") zero_u = dolfinx.fem.Function(V_u, name=" Boundary Displacement") state = {"u": u} # Measures
def test_manufactured_poisson(degree, filename, datadir): """ Manufactured Poisson problem, solving u = x[1]**p, where p is the degree of the Lagrange function space. """ with XDMFFile(MPI.comm_world, os.path.join(datadir, filename)) as xdmf: mesh = xdmf.read_mesh(GhostMode.none) V = FunctionSpace(mesh, ("Lagrange", degree)) u, v = TrialFunction(V), TestFunction(V) a = inner(grad(u), grad(v)) * dx # Get quadrature degree for bilinear form integrand (ignores effect # of non-affine map) a = inner(grad(u), grad(v)) * dx(metadata={"quadrature_degree": -1}) a.integrals()[0].metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree(a) # Source term x = SpatialCoordinate(mesh) u_exact = x[1]**degree f = -div(grad(u_exact)) # Set quadrature degree for linear form integrand (ignores effect of # non-affine map) L = inner(f, v) * dx(metadata={"quadrature_degree": -1}) L.integrals()[0].metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree(L) t0 = time.time() L = fem.Form(L) t1 = time.time() print("Linear form compile time:", t1 - t0) u_bc = Function(V) u_bc.interpolate(lambda x: x[1]**degree) # Create Dirichlet boundary condition mesh.create_connectivity_all() facetdim = mesh.topology.dim - 1 bndry_facets = np.where( np.array(mesh.topology.on_boundary(facetdim)) == 1)[0] bdofs = locate_dofs_topological(V, facetdim, bndry_facets) assert (len(bdofs) < V.dim()) bc = DirichletBC(u_bc, bdofs) t0 = time.time() b = assemble_vector(L) apply_lifting(b, [a], [[bc]]) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) set_bc(b, [bc]) t1 = time.time() print("Vector assembly time:", t1 - t0) t0 = time.time() a = fem.Form(a) t1 = time.time() print("Bilinear form compile time:", t1 - t0) t0 = time.time() A = assemble_matrix(a, [bc]) A.assemble() t1 = time.time() print("Matrix assembly time:", t1 - t0) # Create LU linear solver solver = PETSc.KSP().create(MPI.comm_world) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve t0 = time.time() uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) t1 = time.time() print("Linear solver time:", t1 - t0) M = (u_exact - uh)**2 * dx t0 = time.time() M = fem.Form(M) t1 = time.time() print("Error functional compile time:", t1 - t0) t0 = time.time() error = assemble_scalar(M) error = MPI.sum(mesh.mpi_comm(), error) t1 = time.time() print("Error assembly time:", t1 - t0) assert np.absolute(error) < 1.0e-14
x = PETSc.Vec().createNest([u.vector, p.vector]) ksp.solve(b, x) # Norms of the solution vectors are computed:: norm_u_0 = u.vector.norm() norm_p_0 = p.vector.norm() if MPI.COMM_WORLD.rank == 0: print("(A) Norm of velocity coefficient vector (nested, iterative): {}".format(norm_u_0)) print("(A) Norm of pressure coefficient vector (nested, iterative): {}".format(norm_p_0)) # The solution fields can be saved to file in XDMF format for # visualization, e.g. with ParView. Before writing to file, ghost values # are updated. with XDMFFile(MPI.COMM_WORLD, "velocity.xdmf", "w") as ufile_xdmf: u.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) ufile_xdmf.write_mesh(mesh) ufile_xdmf.write_function(u) with XDMFFile(MPI.COMM_WORLD, "pressure.xdmf", "w") as pfile_xdmf: p.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) pfile_xdmf.write_mesh(mesh) pfile_xdmf.write_function(p) # Monolithic block iterative solver # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Next, we solve same problem, but now with monolithic (non-nested) # matrices and iterative solvers.
from mpi4py import MPI import numpy as np from dolfinx import * from dolfinx.io import XDMFFile comm = MPI.COMM_WORLD mesh = UnitCubeMesh(comm, 5, 5, 5) tdim = mesh.topology.dim print(tdim) mesh.topology.create_connectivity(0, tdim) indices = np.arange(mesh.topology.index_map(tdim).size_local) values = np.ones_like(indices, dtype=np.int32) * MPI.COMM_WORLD.rank cell_owner = MeshTags(mesh, tdim, indices, values) rank = comm.Get_rank() size = comm.Get_size() with XDMFFile(comm, "3_vis/cell_owner_" + str(size) + ".xdmf", "w") as file: file.write_mesh(mesh) file.write_meshtags(cell_owner) vertex_indices = np.arange(mesh.topology.index_map(0).size_local) vertex_values = np.ones_like(vertex_indices, dtype=np.int32) * MPI.COMM_WORLD.rank vertex_owner = MeshTags(mesh, 0, vertex_indices, vertex_values) print(vertex_indices, vertex_values) with XDMFFile(comm, "3_vis/vertex_owner_" + str(size) + ".xdmf", "w") as file: file.write_mesh(mesh) file.write_meshtags(vertex_owner)
def write(filename, mesh, u): from dolfinx.io import XDMFFile with XDMFFile(MPI.COMM_WORLD, filename, "w", encoding=XDMFFile.Encoding.HDF5) as xdmffile: xdmffile.write_mesh(mesh) xdmffile.write_function(u)
def test_manufactured_poisson_dg(degree, filename, datadir): """ Manufactured Poisson problem, solving u = x[component]**n, where n is the degree of the Lagrange function space. """ with XDMFFile(MPI.COMM_WORLD, os.path.join(datadir, filename), "r", encoding=XDMFFile.Encoding.ASCII) as xdmf: mesh = xdmf.read_mesh(name="Grid") V = FunctionSpace(mesh, ("DG", degree)) u, v = TrialFunction(V), TestFunction(V) # Exact solution x = SpatialCoordinate(mesh) u_exact = x[1]**degree # Coefficient k = Function(V) k.vector.set(2.0) k.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # Source term f = -div(k * grad(u_exact)) # Mesh normals and element size n = FacetNormal(mesh) h = CellDiameter(mesh) h_avg = (h("+") + h("-")) / 2.0 # Penalty parameter alpha = 32 dx_ = dx(metadata={"quadrature_degree": -1}) ds_ = ds(metadata={"quadrature_degree": -1}) dS_ = dS(metadata={"quadrature_degree": -1}) a = inner(k * grad(u), grad(v)) * dx_ \ - k("+") * inner(avg(grad(u)), jump(v, n)) * dS_ \ - k("+") * inner(jump(u, n), avg(grad(v))) * dS_ \ + k("+") * (alpha / h_avg) * inner(jump(u, n), jump(v, n)) * dS_ \ - inner(k * grad(u), v * n) * ds_ \ - inner(u * n, k * grad(v)) * ds_ \ + (alpha / h) * inner(k * u, v) * ds_ L = inner(f, v) * dx_ - inner(k * u_exact * n, grad(v)) * ds_ \ + (alpha / h) * inner(k * u_exact, v) * ds_ for integral in a.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( a) for integral in L.integrals(): integral.metadata( )["quadrature_degree"] = ufl.algorithms.estimate_total_polynomial_degree( L) b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) A = assemble_matrix(a, []) A.assemble() # Create LU linear solver solver = PETSc.KSP().create(MPI.COMM_WORLD) solver.setType(PETSc.KSP.Type.PREONLY) solver.getPC().setType(PETSc.PC.Type.LU) solver.setOperators(A) # Solve uh = Function(V) solver.solve(b, uh.vector) uh.vector.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) error = mesh.mpi_comm().allreduce(assemble_scalar((u_exact - uh)**2 * dx), op=MPI.SUM) assert np.absolute(error) < 1.0e-14