def test_save_and_read_function_timeseries(tempdir): filename = os.path.join(tempdir, "function.h5") mesh = UnitSquareMesh(MPI.comm_world, 10, 10) Q = FunctionSpace(mesh, ("CG", 3)) F0 = Function(Q) F1 = Function(Q) t = 0.0 def E(x): return t * x[0] F0.interpolate(E) # Save to HDF5 File hdf5_file = HDF5File(mesh.mpi_comm(), filename, "w") for t in range(10): F0.interpolate(E) hdf5_file.write(F0, "/function", t) hdf5_file.close() # Read back from file hdf5_file = HDF5File(mesh.mpi_comm(), filename, "r") for t in range(10): F1.interpolate(E) vec_name = "/function/vector_{}".format(t) F0 = hdf5_file.read_function(Q, vec_name) # timestamp = hdf5_file.attributes(vec_name)["timestamp"] # assert timestamp == t F0.vector.axpy(-1.0, F1.vector) assert F0.vector.norm() < 1.0e-12 hdf5_file.close()
def test_save_and_checkpoint_timeseries(tempdir, encoding, cell_type): mesh = UnitSquareMesh(MPI.comm_world, 16, 16, cell_type) filename = os.path.join(tempdir, "u2_checkpoint.xdmf") FE = FiniteElement("CG", mesh.ufl_cell(), 2) V = FunctionSpace(mesh, FE) times = [0.5, 0.2, 0.1] u_out = [None] * len(times) u_in = [None] * len(times) p = 0.0 def expr_eval(x): return x[0] * p with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: for i, p in enumerate(times): u_out[i] = Function(V) u_out[i].interpolate(expr_eval) file.write_checkpoint(u_out[i], "u_out", p) with XDMFFile(mesh.mpi_comm(), filename) as file: for i, p in enumerate(times): u_in[i] = file.read_checkpoint(V, "u_out", i) for i, p in enumerate(times): u_in[i].vector.axpy(-1.0, u_out[i].vector) assert u_in[i].vector.norm() < 1.0e-12 # test reading last with XDMFFile(mesh.mpi_comm(), filename) as file: u_in_last = file.read_checkpoint(V, "u_out", -1) u_out[-1].vector.axpy(-1.0, u_in_last.vector) assert u_out[-1].vector.norm() < 1.0e-12
def test_save_and_read_function(tempdir): filename = os.path.join(tempdir, "function.h5") mesh = UnitSquareMesh(MPI.comm_world, 10, 10) Q = FunctionSpace(mesh, ("CG", 3)) F0 = Function(Q) F1 = Function(Q) def E(x): return x[0] F0.interpolate(E) # Save to HDF5 File hdf5_file = HDF5File(mesh.mpi_comm(), filename, "w") hdf5_file.write(F0, "/function") hdf5_file.close() # Read back from file hdf5_file = HDF5File(mesh.mpi_comm(), filename, "r") F1 = hdf5_file.read_function(Q, "/function") F0.vector.axpy(-1.0, F1.vector) assert F0.vector.norm() < 1.0e-12 hdf5_file.close()
def test_ghost_2d(mode): N = 8 num_cells = N * N * 2 mesh = UnitSquareMesh(MPI.COMM_WORLD, N, N, ghost_mode=mode) if mesh.mpi_comm().size > 1: map = mesh.topology.index_map(2) num_cells_local = map.size_local + map.num_ghosts assert mesh.mpi_comm().allreduce(num_cells_local, op=MPI.SUM) > num_cells assert mesh.topology.index_map(0).size_global == 81 assert mesh.topology.index_map(2).size_global == num_cells
def test_save_points_2D(tempdir, encoding, cell_type): mesh = UnitSquareMesh(MPI.comm_world, 16, 16, cell_type) points = mesh.geometry.points vals = np.linalg.norm(points, axis=1) with XDMFFile(mesh.mpi_comm(), os.path.join(tempdir, "points_2D.xdmf"), encoding=encoding) as file: file.write(points) with XDMFFile(mesh.mpi_comm(), os.path.join(tempdir, "points_values_2D.xdmf"), encoding=encoding) as file: file.write(points, vals)
def test_UnitQuadMesh(): mesh = UnitSquareMesh(MPI.comm_world, 5, 7, CellType.quadrilateral) assert mesh.num_entities_global(0) == 48 assert mesh.num_entities_global(2) == 35 assert mesh.geometry.dim == 2 assert MPI.sum(mesh.mpi_comm(), mesh.topology.index_map(0).size_local) == 48
def test_krylov_solver_lu(): mesh = UnitSquareMesh(MPI.COMM_WORLD, 12, 12) V = FunctionSpace(mesh, ("Lagrange", 1)) u, v = TrialFunction(V), TestFunction(V) a = inner(u, v) * dx L = inner(1.0, v) * dx A = assemble_matrix(a) A.assemble() b = assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) norm = 13.0 solver = PETSc.KSP().create(mesh.mpi_comm()) solver.setOptionsPrefix("test_lu_") opts = PETSc.Options("test_lu_") opts["ksp_type"] = "preonly" opts["pc_type"] = "lu" solver.setFromOptions() x = A.createVecRight() solver.setOperators(A) solver.solve(b, x) # *Tight* tolerance for LU solves assert x.norm(PETSc.NormType.N2) == pytest.approx(norm, abs=1.0e-12)
def test_multiple_datasets(tempdir, encoding, cell_type): mesh = UnitSquareMesh(MPI.comm_world, 4, 4, cell_type) cf0 = MeshFunction('size_t', mesh, 2, 11) cf0.name = 'cf0' cf1 = MeshFunction('size_t', mesh, 2, 22) cf1.name = 'cf1' filename = os.path.join(tempdir, "multiple_mf.xdmf") with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as xdmf: xdmf.write(mesh) xdmf.write(cf0) xdmf.write(cf1) with XDMFFile(mesh.mpi_comm(), filename) as xdmf: mesh = xdmf.read_mesh(cpp.mesh.GhostMode.none) cf0 = xdmf.read_mf_size_t(mesh, "cf0") cf1 = xdmf.read_mf_size_t(mesh, "cf1") assert (cf0.values[0] == 11 and cf1.values[0] == 22)
def test_UnitSquareMeshDistributed(): """Create mesh of unit square.""" mesh = UnitSquareMesh(MPI.comm_world, 5, 7) assert mesh.num_entities_global(0) == 48 assert mesh.num_entities_global(2) == 70 assert mesh.geometry.dim == 2 assert MPI.sum(mesh.mpi_comm(), mesh.topology.index_map(0).size_local) == 48
def test_mesh_function_assign_2D_cells(): mesh = UnitSquareMesh(MPI.comm_world, 3, 3) ncells = mesh.num_cells() f = MeshFunction("int", mesh, mesh.topology.dim, 0) for c in range(ncells): f.values[c] = ncells - c g = MeshValueCollection("int", mesh, 2) g.assign(f) assert ncells == len(f.values) assert ncells == g.size() f2 = MeshFunction("int", mesh, g, 0) for c in range(mesh.num_cells()): value = ncells - c assert value == g.get_value(c, 0) assert f2.values[c] == g.get_value(c, 0) h = MeshValueCollection("int", mesh, 2) global_indices = mesh.topology.index_map(2).global_indices(True) ncells_global = mesh.num_entities_global(2) for c in range(mesh.num_cells()): if global_indices[c] in [5, 8, 10]: continue value = ncells_global - global_indices[c] h.set_value(c, int(value)) f3 = MeshFunction("int", mesh, h, 0) values = f3.values values[values > ncells_global] = 0. assert MPI.sum(mesh.mpi_comm(), values.sum() * 1.0) == 140.
def test_save_2D_cell_function(tempdir, encoding, data_type, cell_type): dtype_str, dtype = data_type filename = os.path.join(tempdir, "mf_2D_%s.xdmf" % dtype_str) mesh = UnitSquareMesh(MPI.comm_world, 32, 32, cell_type) mf = MeshFunction(dtype_str, mesh, mesh.topology.dim, 0) mf.name = "cells" mf.values[:] = np.arange(mesh.num_entities(2), dtype=dtype) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(mf) with XDMFFile(mesh.mpi_comm(), filename) as xdmf: read_function = getattr(xdmf, "read_mf_" + dtype_str) mf_in = read_function(mesh, "cells") diff = mf_in.values - mf.values assert np.all(diff == 0)
def test_UnitQuadMesh(): mesh = UnitSquareMesh(MPI.COMM_WORLD, 5, 7, CellType.quadrilateral) assert mesh.topology.index_map(0).size_global == 48 assert mesh.topology.index_map(2).size_global == 35 assert mesh.geometry.dim == 2 assert mesh.mpi_comm().allreduce( mesh.topology.index_map(0).size_local, MPI.SUM) == 48
def test_save_2d_tensor(tempdir, encoding, cell_type): filename = os.path.join(tempdir, "tensor.xdmf") mesh = UnitSquareMesh(MPI.comm_world, 16, 16, cell_type) u = Function(TensorFunctionSpace(mesh, ("Lagrange", 2))) u.vector.set(1.0 + (1j if has_petsc_complex else 0)) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(u)
def test_UnitSquareMeshDistributed(): """Create mesh of unit square.""" mesh = UnitSquareMesh(MPI.COMM_WORLD, 5, 7) assert mesh.topology.index_map(0).size_global == 48 assert mesh.topology.index_map(2).size_global == 70 assert mesh.geometry.dim == 2 assert mesh.mpi_comm().allreduce(mesh.topology.index_map(0).size_local, MPI.SUM) == 48
def test_save_2D_vertex_function(tempdir, encoding, data_type, cell_type): dtype_str, dtype = data_type mesh = UnitSquareMesh(MPI.comm_world, 32, 32, cell_type) mf = MeshFunction(dtype_str, mesh, 0, 0) mf.name = "vertices" global_indices = mesh.topology.index_map(0).global_indices(False) mf.values[:] = global_indices[:] filename = os.path.join(tempdir, "mf_vertex_2D_%s.xdmf" % dtype_str) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(mf) with XDMFFile(mesh.mpi_comm(), filename) as xdmf: read_function = getattr(xdmf, "read_mf_" + dtype_str) mf_in = read_function(mesh, "vertices") diff = mf_in.values - mf.values assert np.all(diff == 0)
def test_save_2d_vector(tempdir, encoding, cell_type): filename = os.path.join(tempdir, "u_2dv.xdmf") mesh = UnitSquareMesh(MPI.COMM_WORLD, 12, 13, cell_type) V = VectorFunctionSpace(mesh, ("Lagrange", 2)) u = Function(V) u.vector.set(1.0 + (1j if has_petsc_complex else 0)) with XDMFFile(mesh.mpi_comm(), filename, "w", encoding=encoding) as file: file.write_mesh(mesh) file.write_function(u)
def test_save_2d_scalar(tempdir, encoding, cell_type): filename = os.path.join(tempdir, "u2.xdmf") mesh = UnitSquareMesh(MPI.COMM_WORLD, 12, 12, cell_type) V = FunctionSpace(mesh, ("Lagrange", 2)) u = Function(V) u.vector.set(1.0) with XDMFFile(mesh.mpi_comm(), filename, "w", encoding=encoding) as file: file.write_mesh(mesh) file.write_function(u)
def test_save_2d_scalar(tempdir, encoding, cell_type): filename = os.path.join(tempdir, "u2.xdmf") mesh = UnitSquareMesh(MPI.comm_world, 16, 16, cell_type) # FIXME: This randomly hangs in parallel V = FunctionSpace(mesh, ("Lagrange", 2)) u = Function(V) u.vector.set(1.0 + (1j if has_petsc_complex else 0)) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(u)
def test_save_2D_facet_function(tempdir, encoding, data_type, cell_type): dtype_str, dtype = data_type mesh = UnitSquareMesh(MPI.comm_world, 32, 32, cell_type) tdim = mesh.topology.dim mf = MeshFunction(dtype_str, mesh, tdim - 1, 0) mf.name = "facets" global_indices = mesh.topology.global_indices(tdim - 1) mf.values[:] = global_indices[:] filename = os.path.join(tempdir, "mf_facet_2D_%s.xdmf" % dtype_str) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as xdmf: xdmf.write(mf) with XDMFFile(mesh.mpi_comm(), filename) as xdmf: read_function = getattr(xdmf, "read_mf_" + dtype_str) mf_in = read_function(mesh, "facets") diff = mf_in.values - mf.values assert np.all(diff == 0)
def test_save_and_load_2d_mesh(tempdir, encoding, cell_type): filename = os.path.join(tempdir, "mesh_2D.xdmf") mesh = UnitSquareMesh(MPI.comm_world, 32, 32, cell_type) with XDMFFile(mesh.mpi_comm(), filename, encoding=encoding) as file: file.write(mesh) with XDMFFile(MPI.comm_world, filename) as file: mesh2 = file.read_mesh(cpp.mesh.GhostMode.none) assert mesh.num_entities_global(0) == mesh2.num_entities_global(0) dim = mesh.topology.dim assert mesh.num_entities_global(dim) == mesh2.num_entities_global(dim)
def test_save_2d_tensor(tempdir): mesh = UnitSquareMesh(MPI.COMM_WORLD, 16, 16) u = Function(TensorFunctionSpace(mesh, ("Lagrange", 2))) with u.vector.localForm() as loc: loc.set(1.0) filename = os.path.join(tempdir, "u.pvd") with VTKFile(mesh.mpi_comm(), filename, "w") as vtk: vtk.write_function(u, 0.) with u.vector.localForm() as loc: loc.set(2.0) vtk.write_function(u, 1.)
def test_save_and_read_meshfunction_2D(tempdir): filename = os.path.join(tempdir, "meshfn-2d.h5") # Write to file mesh = UnitSquareMesh(MPI.comm_world, 20, 20) with HDF5File(mesh.mpi_comm(), filename, "w") as mf_file: # save meshfuns to compare when reading back meshfunctions = [] for i in range(0, 3): mf = MeshFunction('double', mesh, i, 0.0) # NB choose a value to set which will be the same on every # process for each entity mf.values[:] = cpp.mesh.midpoints(mesh, i, range(mesh.num_entities(i)))[:, 0] meshfunctions.append(mf) mf_file.write(mf, "/meshfunction/meshfun%d" % i) # Read back from file with HDF5File(mesh.mpi_comm(), filename, "r") as mf_file: for i in range(0, 3): mf2 = mf_file.read_mf_double(mesh, "/meshfunction/meshfun%d" % i) assert numpy.all(meshfunctions[i].values == mf2.values)
def test_save_and_load_2d_mesh(tempdir, encoding, cell_type): filename = os.path.join(tempdir, "mesh.xdmf") mesh = UnitSquareMesh(MPI.COMM_WORLD, 12, 12, cell_type) mesh.name = "square" with XDMFFile(mesh.mpi_comm(), filename, "w", encoding=encoding) as file: file.write_mesh(mesh) with XDMFFile(MPI.COMM_WORLD, filename, "r", encoding=encoding) as file: mesh2 = file.read_mesh(name="square") assert mesh2.name == mesh.name assert mesh.topology.index_map(0).size_global == mesh2.topology.index_map(0).size_global assert mesh.topology.index_map(mesh.topology.dim).size_global == mesh2.topology.index_map( mesh.topology.dim).size_global
def test_GetCells(): """Get cells of mesh""" mesh = UnitSquareMesh(MPI.comm_world, 5, 5) assert MPI.sum(mesh.mpi_comm(), len(mesh.cells())) == 50
def test_cffi_assembly(): mesh = UnitSquareMesh(MPI.COMM_WORLD, 13, 13) V = FunctionSpace(mesh, ("Lagrange", 1)) if mesh.mpi_comm().rank == 0: from cffi import FFI ffibuilder = FFI() ffibuilder.set_source( "_cffi_kernelA", r""" #include <math.h> #include <stdalign.h> void tabulate_tensor_poissonA(double* restrict A, const double* w, const double* c, const double* restrict coordinate_dofs, const int* entity_local_index, const int* cell_orientation) { // Precomputed values of basis functions and precomputations // FE* dimensions: [entities][points][dofs] // PI* dimensions: [entities][dofs][dofs] or [entities][dofs] // PM* dimensions: [entities][dofs][dofs] alignas(32) static const double FE3_C0_D01_Q1[1][1][2] = { { { -1.0, 1.0 } } }; // Unstructured piecewise computations const double J_c0 = coordinate_dofs[0] * FE3_C0_D01_Q1[0][0][0] + coordinate_dofs[2] * FE3_C0_D01_Q1[0][0][1]; const double J_c3 = coordinate_dofs[1] * FE3_C0_D01_Q1[0][0][0] + coordinate_dofs[5] * FE3_C0_D01_Q1[0][0][1]; const double J_c1 = coordinate_dofs[0] * FE3_C0_D01_Q1[0][0][0] + coordinate_dofs[4] * FE3_C0_D01_Q1[0][0][1]; const double J_c2 = coordinate_dofs[1] * FE3_C0_D01_Q1[0][0][0] + coordinate_dofs[3] * FE3_C0_D01_Q1[0][0][1]; alignas(32) double sp[20]; sp[0] = J_c0 * J_c3; sp[1] = J_c1 * J_c2; sp[2] = sp[0] + -1 * sp[1]; sp[3] = J_c0 / sp[2]; sp[4] = -1 * J_c1 / sp[2]; sp[5] = sp[3] * sp[3]; sp[6] = sp[3] * sp[4]; sp[7] = sp[4] * sp[4]; sp[8] = J_c3 / sp[2]; sp[9] = -1 * J_c2 / sp[2]; sp[10] = sp[9] * sp[9]; sp[11] = sp[8] * sp[9]; sp[12] = sp[8] * sp[8]; sp[13] = sp[5] + sp[10]; sp[14] = sp[6] + sp[11]; sp[15] = sp[12] + sp[7]; sp[16] = fabs(sp[2]); sp[17] = sp[13] * sp[16]; sp[18] = sp[14] * sp[16]; sp[19] = sp[15] * sp[16]; // UFLACS block mode: preintegrated A[0] = 0.5 * sp[19] + 0.5 * sp[18] + 0.5 * sp[18] + 0.5 * sp[17]; A[1] = -0.5 * sp[19] + -0.5 * sp[18]; A[2] = -0.5 * sp[18] + -0.5 * sp[17]; A[3] = -0.5 * sp[19] + -0.5 * sp[18]; A[4] = 0.5 * sp[19]; A[5] = 0.5 * sp[18]; A[6] = -0.5 * sp[18] + -0.5 * sp[17]; A[7] = 0.5 * sp[18]; A[8] = 0.5 * sp[17]; } void tabulate_tensor_poissonL(double* restrict A, const double* w, const double* c, const double* restrict coordinate_dofs, const int* entity_local_index, const int* cell_orientation) { // Precomputed values of basis functions and precomputations // FE* dimensions: [entities][points][dofs] // PI* dimensions: [entities][dofs][dofs] or [entities][dofs] // PM* dimensions: [entities][dofs][dofs] alignas(32) static const double FE4_C0_D01_Q1[1][1][2] = { { { -1.0, 1.0 } } }; // Unstructured piecewise computations const double J_c0 = coordinate_dofs[0] * FE4_C0_D01_Q1[0][0][0] + coordinate_dofs[2] * FE4_C0_D01_Q1[0][0][1]; const double J_c3 = coordinate_dofs[1] * FE4_C0_D01_Q1[0][0][0] + coordinate_dofs[5] * FE4_C0_D01_Q1[0][0][1]; const double J_c1 = coordinate_dofs[0] * FE4_C0_D01_Q1[0][0][0] + coordinate_dofs[4] * FE4_C0_D01_Q1[0][0][1]; const double J_c2 = coordinate_dofs[1] * FE4_C0_D01_Q1[0][0][0] + coordinate_dofs[3] * FE4_C0_D01_Q1[0][0][1]; alignas(32) double sp[4]; sp[0] = J_c0 * J_c3; sp[1] = J_c1 * J_c2; sp[2] = sp[0] + -1 * sp[1]; sp[3] = fabs(sp[2]); // UFLACS block mode: preintegrated A[0] = 0.1666666666666667 * sp[3]; A[1] = 0.1666666666666667 * sp[3]; A[2] = 0.1666666666666667 * sp[3]; } """) ffibuilder.cdef(""" void tabulate_tensor_poissonA(double* restrict A, const double* w, const double* c, const double* restrict coordinate_dofs, const int* entity_local_index, const int* cell_orientation); void tabulate_tensor_poissonL(double* restrict A, const double* w, const double* c, const double* restrict coordinate_dofs, const int* entity_local_index, const int* cell_orientation); """) ffibuilder.compile(verbose=True) mesh.mpi_comm().Barrier() from _cffi_kernelA import ffi, lib ptrA = ffi.cast("intptr_t", ffi.addressof(lib, "tabulate_tensor_poissonA")) integrals = {IntegralType.cell: ([(-1, ptrA)], None)} a = cpp.fem.Form([V._cpp_object, V._cpp_object], integrals, [], [], False) ptrL = ffi.cast("intptr_t", ffi.addressof(lib, "tabulate_tensor_poissonL")) integrals = {IntegralType.cell: ([(-1, ptrL)], None)} L = cpp.fem.Form([V._cpp_object], integrals, [], [], False) A = dolfinx.fem.assemble_matrix(a) A.assemble() b = dolfinx.fem.assemble_vector(L) b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE) Anorm = A.norm(PETSc.NormType.FROBENIUS) bnorm = b.norm(PETSc.NormType.N2) assert (np.isclose(Anorm, 56.124860801609124)) assert (np.isclose(bnorm, 0.0739710713711999)) list_timings(MPI.COMM_WORLD, [TimingType.wall])
def solution(values, x): values[:, 0] = A * np.cos(k0 * x[:, 0]) * np.cos(k0 * x[:, 1]) # Function space for exact solution - need it to be higher than deg V_exact = FunctionSpace(mesh, ("Lagrange", deg + 3)) u_exact = Function(V_exact) u_exact.interpolate(lambda x: A * np.cos(k0 * x[0]) * np.cos(k0 * x[1])) # best approximation from V # u_BA = project(u_exact, V) # H1 errors diff = u - u_exact H1_diff = mesh.mpi_comm().allreduce(assemble_scalar( inner(grad(diff), grad(diff)) * dx), op=MPI.SUM) H1_exact = mesh.mpi_comm().allreduce(assemble_scalar( inner(grad(u_exact), grad(u_exact)) * dx), op=MPI.SUM) print("Relative H1 error of FEM solution:", abs(np.sqrt(H1_diff) / np.sqrt(H1_exact))) # L2 errors L2_diff = mesh.mpi_comm().allreduce(assemble_scalar(inner(diff, diff) * dx), op=MPI.SUM) L2_exact = mesh.mpi_comm().allreduce(assemble_scalar( inner(u_exact, u_exact) * dx), op=MPI.SUM) print("Relative L2 error of FEM solution:", abs(np.sqrt(L2_diff) / np.sqrt(L2_exact)))
# "Exact" solution expression def solution(values, x): values[:, 0] = A * np.cos(k0 * x[:, 0]) * np.cos(k0 * x[:, 1]) # Function space for exact solution - need it to be higher than deg V_exact = FunctionSpace(mesh, ("Lagrange", deg + 3)) u_exact = Function(V_exact) u_exact.interpolate(lambda x: A * np.cos(k0 * x[0]) * np.cos(k0 * x[1])) # best approximation from V # u_BA = project(u_exact, V) # H1 errors diff = u - u_exact # diff_BA = u_BA - u_exact H1_diff = MPI.sum(mesh.mpi_comm(), assemble_scalar(inner(grad(diff), grad(diff)) * dx)) # H1_BA = MPI.sum(mesh.mpi_comm(), assemble_scalar(inner(grad(diff_BA), grad(diff_BA)) * dx)) H1_exact = MPI.sum(mesh.mpi_comm(), assemble_scalar(inner(grad(u_exact), grad(u_exact)) * dx)) # print("Relative H1 error of best approximation:", abs(np.sqrt(H1_BA) / np.sqrt(H1_exact))) print("Relative H1 error of FEM solution:", abs(np.sqrt(H1_diff) / np.sqrt(H1_exact))) # L2 errors L2_diff = MPI.sum(mesh.mpi_comm(), assemble_scalar(inner(diff, diff) * dx)) # L2_BA = MPI.sum(mesh.mpi_comm(), assemble_scalar(inner(diff_BA, diff_BA) * dx)) L2_exact = MPI.sum(mesh.mpi_comm(), assemble_scalar(inner(u_exact, u_exact) * dx)) # print("Relative L2 error of best approximation:", abs(np.sqrt(L2_BA) / np.sqrt(L2_exact))) print("Relative L2 error of FEM solution:", abs(np.sqrt(L2_diff) / np.sqrt(L2_exact)))