def test_exterior_stokes(ctx_factory, ambient_dim, visualize=False): if visualize: logging.basicConfig(level=logging.INFO) from pytools.convergence import EOCRecorder eoc = EOCRecorder() target_order = 3 qbx_order = 3 print(ambient_dim) if ambient_dim == 2: resolutions = [20, 35, 50] elif ambient_dim == 3: resolutions = [0, 1, 2] else: raise ValueError(f"unsupported dimension: {ambient_dim}") for resolution in resolutions: h_max, err = run_exterior_stokes(ctx_factory, ambient_dim=ambient_dim, target_order=target_order, qbx_order=qbx_order, resolution=resolution, visualize=visualize) eoc.add_data_point(h_max, err) print(eoc) # This convergence data is not as clean as it could be. See # https://github.com/inducer/pytential/pull/32 # for some discussion. assert eoc.order_estimate() > target_order - 0.5
def test_integration_order(integrator, method_order): """Test that time integrators have correct order.""" def exact_soln(t): return np.exp(-t) def rhs(t, state): return -np.exp(-t) from pytools.convergence import EOCRecorder integrator_eoc = EOCRecorder() dt = 1.0 for refine in [1, 2, 4, 8]: dt = dt / refine t = 0 state = exact_soln(t) while t < 4: state = integrator(state, t, dt, rhs) t = t + dt error = np.abs(state - exact_soln(t)) / exact_soln(t) integrator_eoc.add_data_point(dt, error) logger.info(f"Time Integrator EOC:\n = {integrator_eoc}") assert integrator_eoc.order_estimate() >= method_order - .01
def test_leapgen_integration_order(method, method_order): """Test that time integrators have correct order.""" def exact_soln(t): return np.exp(-t) def rhs(t, y): return -np.exp(-t) from pytools.convergence import EOCRecorder integrator_eoc = EOCRecorder() dt = 1.0 for refine in [1, 2, 4, 8]: dt = dt / refine t = 0 state = exact_soln(t) t_final = 4 step = 0 (step, t, state) = \ advance_state(rhs=rhs, timestepper=method, dt=dt, state=state, t=t, t_final=t_final, component_id="y") error = np.abs(state - exact_soln(t)) / exact_soln(t) integrator_eoc.add_data_point(dt, error) logger.info(f"Time Integrator EOC:\n = {integrator_eoc}") assert integrator_eoc.order_estimate() >= method_order - .1
def test_pde_check_kernels(ctx_factory, knl_info, order=5): dim = knl_info.kernel.dim tctx = t.ToyContext(ctx_factory(), knl_info.kernel, extra_source_kwargs=knl_info.extra_kwargs) pt_src = t.PointSources( tctx, np.random.rand(dim, 50) - 0.5, np.ones(50)) from pytools.convergence import EOCRecorder from sumpy.point_calculus import CalculusPatch eoc_rec = EOCRecorder() for h in [0.1, 0.05, 0.025]: cp = CalculusPatch(np.array([1, 0, 0])[:dim], h=h, order=order) pot = pt_src.eval(cp.points) pde = knl_info.pde_func(cp, pot) err = la.norm(pde) eoc_rec.add_data_point(h, err) print(eoc_rec) assert eoc_rec.order_estimate() > order - knl_info.nderivs + 1 - 0.1
def test_reversed_chained_connection(actx_factory, ndim, mesh_name): actx = actx_factory() def run(nelements, order): discr = create_discretization(actx, ndim, nelements=nelements, order=order, mesh_name=mesh_name) threshold = 1.0 connections = [] conn = create_refined_connection(actx, discr, threshold=threshold) connections.append(conn) if ndim == 2: # NOTE: additional refinement makes the 3D meshes explode in size conn = create_refined_connection(actx, conn.to_discr, threshold=threshold) connections.append(conn) conn = create_refined_connection(actx, conn.to_discr, threshold=threshold) connections.append(conn) from meshmode.discretization.connection import \ ChainedDiscretizationConnection chained = ChainedDiscretizationConnection(connections) from meshmode.discretization.connection import \ L2ProjectionInverseDiscretizationConnection reverse = L2ProjectionInverseDiscretizationConnection(chained) # create test vector from_nodes = thaw(chained.from_discr.nodes(), actx) to_nodes = thaw(chained.to_discr.nodes(), actx) from_x = 0 to_x = 0 for d in range(ndim): from_x = from_x + actx.np.cos(from_nodes[d])**(d + 1) to_x = to_x + actx.np.cos(to_nodes[d])**(d + 1) from_interp = reverse(to_x) return (1.0 / nelements, flat_norm(from_interp - from_x, np.inf) / flat_norm(from_x, np.inf)) from pytools.convergence import EOCRecorder eoc = EOCRecorder() order = 4 mesh_sizes = [16, 32, 48, 64, 96, 128] for n in mesh_sizes: h, error = run(n, order) eoc.add_data_point(h, error) print(eoc) assert eoc.order_estimate() > (order + 1 - 0.5)
def test_pde_check_kernels(ctx_factory, knl_info, order=5): dim = knl_info.kernel.dim tctx = t.ToyContext(ctx_factory(), knl_info.kernel, extra_source_kwargs=knl_info.extra_kwargs) pt_src = t.PointSources( tctx, np.random.rand(dim, 50) - 0.5, np.ones(50)) from pytools.convergence import EOCRecorder from sumpy.point_calculus import CalculusPatch eoc_rec = EOCRecorder() for h in [0.1, 0.05, 0.025]: cp = CalculusPatch(np.array([1, 0, 0])[:dim], h=h, order=order) pot = pt_src.eval(cp.points) pde = knl_info.pde_func(cp, pot) err = la.norm(pde) eoc_rec.add_data_point(h, err) print(eoc_rec) assert eoc_rec.order_estimate() > order - knl_info.nderivs + 1 - 0.1
def test_dielectric(ctx_factory, qbx_order, op_class, mode, visualize=False): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) import logging logging.basicConfig(level=logging.INFO) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for nelements in [30, 50, 70]: # prevent sympy cache 'splosion from sympy.core.cache import clear_cache clear_cache() errs = run_dielectric_test(cl_ctx, queue, nelements=nelements, qbx_order=qbx_order, op_class=op_class, mode=mode, visualize=visualize) eoc_rec.add_data_point(1 / nelements, la.norm(list(errs), np.inf)) print(eoc_rec) assert eoc_rec.order_estimate() > qbx_order - 0.5
def test_dielectric(ctx_getter, qbx_order, op_class, mode, visualize=False): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) import logging logging.basicConfig(level=logging.INFO) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for nelements in [30, 50, 70]: # prevent sympy cache 'splosion from sympy.core.cache import clear_cache clear_cache() errs = run_dielectric_test( cl_ctx, queue, nelements=nelements, qbx_order=qbx_order, op_class=op_class, mode=mode, visualize=visualize) eoc_rec.add_data_point(1/nelements, la.norm(list(errs), np.inf)) print(eoc_rec) assert eoc_rec.order_estimate() > qbx_order - 0.5
def test_mean_curvature(ctx_factory, discr_name, resolutions, discr_and_ref_mean_curvature_getter, visualize=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) actx = PyOpenCLArrayContext(queue) from pytools.convergence import EOCRecorder eoc = EOCRecorder() for r in resolutions: discr, ref_mean_curvature = \ discr_and_ref_mean_curvature_getter(actx, r) mean_curvature = bind(discr, sym.mean_curvature(discr.ambient_dim))(actx) h = 1.0 / r from meshmode.dof_array import flat_norm h_error = flat_norm(mean_curvature - ref_mean_curvature, np.inf) eoc.add_data_point(h, h_error) print(eoc) order = min([g.order for g in discr.groups]) assert eoc.order_estimate() > order - 1.1
def test_integral_equation(ctx_getter, case, visualize=False): logging.basicConfig(level=logging.INFO) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) if USE_SYMENGINE and case.fmm_backend is None: pytest.skip("https://gitlab.tiker.net/inducer/sumpy/issues/25") # prevent cache 'splosion from sympy.core.cache import clear_cache clear_cache() from pytools.convergence import EOCRecorder print("qbx_order: %d, %s" % (case.qbx_order, case)) eoc_rec_target = EOCRecorder() eoc_rec_td = EOCRecorder() have_error_data = False for resolution in case.resolutions: result = run_int_eq_test(cl_ctx, queue, case, resolution, visualize=visualize) if result.rel_err_2 is not None: have_error_data = True eoc_rec_target.add_data_point(result.h_max, result.rel_err_2) if result.rel_td_err_inf is not None: eoc_rec_td.add_data_point(result.h_max, result.rel_td_err_inf) if case.bc_type == "dirichlet": tgt_order = case.qbx_order elif case.bc_type == "neumann": tgt_order = case.qbx_order-1 else: assert False if have_error_data: print("TARGET ERROR:") print(eoc_rec_target) assert eoc_rec_target.order_estimate() > tgt_order - 1.3 if case.check_tangential_deriv: print("TANGENTIAL DERIVATIVE ERROR:") print(eoc_rec_td) assert eoc_rec_td.order_estimate() > tgt_order - 2.3
def test_isentropic_vortex(actx_factory, order): """Advance the 2D isentropic vortex case in time with non-zero velocities using an RK4 timestepping scheme. Check the advanced field values against the exact/analytic expressions. This tests all parts of the Euler module working together, with results converging at the expected rates vs. the order. """ actx = actx_factory() dim = 2 from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for nel_1d in [16, 32, 64]: from meshmode.mesh.generation import ( generate_regular_rect_mesh, ) mesh = generate_regular_rect_mesh(a=(-5.0, ) * dim, b=(5.0, ) * dim, nelements_per_axis=(nel_1d, ) * dim) exittol = 1.0 t_final = 0.001 cfl = 1.0 vel = np.zeros(shape=(dim, )) orig = np.zeros(shape=(dim, )) vel[:dim] = 1.0 dt = .0001 initializer = Vortex2D(center=orig, velocity=vel) casename = "Vortex" boundaries = {BTAG_ALL: PrescribedBoundary(initializer)} eos = IdealSingleGas() t = 0 flowparams = { "dim": dim, "dt": dt, "order": order, "time": t, "boundaries": boundaries, "initializer": initializer, "eos": eos, "casename": casename, "mesh": mesh, "tfinal": t_final, "exittol": exittol, "cfl": cfl, "constantcfl": False, "nstatus": 0 } maxerr = _euler_flow_stepper(actx, flowparams) eoc_rec.add_data_point(1.0 / nel_1d, maxerr) logger.info(f"Error for (dim,order) = ({dim},{order}):\n" f"{eoc_rec}") assert (eoc_rec.order_estimate() >= order - 0.5 or eoc_rec.max_error() < 1e-11)
def test_wave_accuracy(actx_factory, problem, order, visualize=False): """Checks accuracy of the wave operator for a given problem setup. """ actx = actx_factory() p = problem sym_u, sym_v, sym_f, sym_rhs = sym_wave(p.dim, p.sym_phi) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for n in [8, 10, 12] if p.dim == 3 else [8, 12, 16]: mesh = p.mesh_factory(n) from grudge.eager import EagerDGDiscretization discr = EagerDGDiscretization(actx, mesh, order=order) nodes = thaw(actx, discr.nodes()) def sym_eval(expr, t): return sym.EvaluationMapper({"c": p.c, "x": nodes, "t": t})(expr) t_check = 1.23456789 u = sym_eval(sym_u, t_check) v = sym_eval(sym_v, t_check) fields = flat_obj_array(u, v) rhs = wave_operator(discr, c=p.c, w=fields) rhs[0] = rhs[0] + sym_eval(sym_f, t_check) expected_rhs = sym_eval(sym_rhs, t_check) rel_linf_err = actx.to_numpy( discr.norm(rhs - expected_rhs, np.inf) / discr.norm(expected_rhs, np.inf)) eoc_rec.add_data_point(1. / n, rel_linf_err) if visualize: from grudge.shortcuts import make_visualizer vis = make_visualizer(discr, discr.order) vis.write_vtk_file( "wave_accuracy_{order}_{n}.vtu".format(order=order, n=n), [ ("u", fields[0]), ("v", fields[1:]), ("rhs_u_actual", rhs[0]), ("rhs_v_actual", rhs[1:]), ("rhs_u_expected", expected_rhs[0]), ("rhs_v_expected", expected_rhs[1:]), ]) print("Approximation error:") print(eoc_rec) assert (eoc_rec.order_estimate() >= order - 0.5 or eoc_rec.max_error() < 1e-11)
def test_direct(ctx_factory): # This evaluates a single layer potential on a circle. logging.basicConfig(level=logging.INFO) ctx = ctx_factory() queue = cl.CommandQueue(ctx) from sumpy.kernel import LaplaceKernel lknl = LaplaceKernel(2) order = 12 from sumpy.qbx import LayerPotential from sumpy.expansion.local import LineTaylorLocalExpansion lpot = LayerPotential(ctx, [LineTaylorLocalExpansion(lknl, order)]) mode_nr = 25 from pytools.convergence import EOCRecorder eocrec = EOCRecorder() for n in [200, 300, 400]: t = np.linspace(0, 2 * np.pi, n, endpoint=False) unit_circle = np.exp(1j * t) unit_circle = np.array([unit_circle.real, unit_circle.imag]) sigma = np.cos(mode_nr * t) eigval = 1 / (2 * mode_nr) result_ref = eigval * sigma h = 2 * np.pi / n targets = unit_circle sources = unit_circle radius = 7 * h centers = unit_circle * (1 - radius) expansion_radii = np.ones(n) * radius strengths = (sigma * h, ) evt, (result_qbx, ) = lpot(queue, targets, sources, centers, strengths, expansion_radii=expansion_radii) eocrec.add_data_point(h, np.max(np.abs(result_ref - result_qbx))) print(eocrec) slack = 1.5 assert eocrec.order_estimate() > order - slack
def test_lump_rhs(actx_factory, dim, order): """Test the inviscid rhs using the non-trivial mass lump case. The case is tested against the analytic expressions of the RHS. Checks several different orders and refinement levels to check error behavior. """ actx = actx_factory() tolerance = 1e-10 maxxerr = 0.0 from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for nel_1d in [4, 8, 12]: from meshmode.mesh.generation import ( generate_regular_rect_mesh, ) mesh = generate_regular_rect_mesh( a=(-5, ) * dim, b=(5, ) * dim, nelements_per_axis=(nel_1d, ) * dim, ) logger.info(f"Number of elements: {mesh.nelements}") discr = EagerDGDiscretization(actx, mesh, order=order) nodes = thaw(actx, discr.nodes()) # Init soln with Lump and expected RHS = 0 center = np.zeros(shape=(dim, )) velocity = np.zeros(shape=(dim, )) lump = Lump(dim=dim, center=center, velocity=velocity) lump_soln = lump(nodes) boundaries = { BTAG_ALL: PrescribedInviscidBoundary(fluid_solution_func=lump) } inviscid_rhs = euler_operator(discr, eos=IdealSingleGas(), boundaries=boundaries, cv=lump_soln, time=0.0) expected_rhs = lump.exact_rhs(discr, cv=lump_soln, time=0) err_max = discr.norm((inviscid_rhs - expected_rhs).join(), np.inf) if err_max > maxxerr: maxxerr = err_max eoc_rec.add_data_point(1.0 / nel_1d, err_max) logger.info(f"Max error: {maxxerr}") logger.info(f"Error for (dim,order) = ({dim},{order}):\n" f"{eoc_rec}") assert (eoc_rec.order_estimate() >= order - 0.5 or eoc_rec.max_error() < tolerance)
def test_integral_equation( ctx_getter, curve_name, curve_f, qbx_order, bc_type, loc_sign, k, target_order=7, source_order=None): logging.basicConfig(level=logging.INFO) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) # prevent cache 'splosion from sympy.core.cache import clear_cache clear_cache() from pytools.convergence import EOCRecorder print(("curve_name: %s, qbx_order: %d, bc_type: %s, loc_sign: %s, " "helmholtz_k: %s" % (curve_name, qbx_order, bc_type, loc_sign, k))) eoc_rec_target = EOCRecorder() eoc_rec_td = EOCRecorder() for nelements in [30, 40, 50]: result = run_int_eq_test( cl_ctx, queue, curve_f, nelements, qbx_order, bc_type, loc_sign, k, target_order=target_order, source_order=source_order) eoc_rec_target.add_data_point(1/nelements, result.rel_err_2) eoc_rec_td.add_data_point(1/nelements, result.rel_td_err_inf) if bc_type == "dirichlet": tgt_order = qbx_order elif bc_type == "neumann": tgt_order = qbx_order-1 else: assert False print("TARGET ERROR:") print(eoc_rec_target) assert eoc_rec_target.order_estimate() > tgt_order - 1.3 print("TANGENTIAL DERIVATIVE ERROR:") print(eoc_rec_td) assert eoc_rec_td.order_estimate() > tgt_order - 2.3
def test_exterior_stokes_2d(ctx_factory, qbx_order=3): from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for nelements in [20, 50]: h_max, l2_err = run_exterior_stokes_2d(ctx_factory, nelements) eoc_rec.add_data_point(h_max, l2_err) print(eoc_rec) assert eoc_rec.order_estimate() >= qbx_order - 1
def conv_test(descr, use_quad): logger.info("-" * 75) logger.info(descr) logger.info("-" * 75) eoc_rec = EOCRecorder() if use_quad: qtag = dof_desc.DISCR_TAG_QUAD else: qtag = None ns = [20, 25] for n in ns: mesh = mgen.generate_regular_rect_mesh(a=(-0.5, ) * dims, b=(0.5, ) * dims, nelements_per_axis=(n, ) * dims, order=order) if use_quad: discr_tag_to_group_factory = { qtag: QuadratureSimplexGroupFactory(order=4 * order) } else: discr_tag_to_group_factory = {} dcoll = DiscretizationCollection( actx, mesh, order=order, discr_tag_to_group_factory=discr_tag_to_group_factory) nodes = thaw(dcoll.nodes(), actx) def zero_inflow(dtag, t=0): dd = dof_desc.DOFDesc(dtag, qtag) return dcoll.discr_from_dd(dd).zeros(actx) adv_op = VariableCoefficientAdvectionOperator( dcoll, flat_obj_array(-1 * nodes[1], nodes[0]), inflow_u=lambda t: zero_inflow(BTAG_ALL, t=t), flux_type="upwind", quad_tag=qtag) total_error = op.norm(dcoll, adv_op.operator(0, gaussian_mode(nodes)), 2) eoc_rec.add_data_point(1.0 / n, actx.to_numpy(total_error)) logger.info( "\n%s", eoc_rec.pretty_print(abscissa_label="h", error_label="L2 Error")) return eoc_rec.order_estimate(), np.array( [x[1] for x in eoc_rec.history])
def test_velocity_gradient_eoc(actx_factory, dim): """Test that the velocity gradient converges at the proper rate.""" from mirgecom.fluid import velocity_gradient actx = actx_factory() order = 3 from pytools.convergence import EOCRecorder eoc = EOCRecorder() nel_1d_0 = 4 for hn1 in [1, 2, 3, 4]: nel_1d = hn1 * nel_1d_0 h = 1/nel_1d from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh( a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim ) discr = EagerDGDiscretization(actx, mesh, order=order) nodes = thaw(actx, discr.nodes()) zeros = discr.zeros(actx) energy = zeros + 2.5 mass = nodes[dim-1]*nodes[dim-1] velocity = make_obj_array([actx.np.cos(nodes[i]) for i in range(dim)]) mom = mass*velocity q = join_conserved(dim, mass=mass, energy=energy, momentum=mom) cv = split_conserved(dim, q) grad_q = obj_array_vectorize(discr.grad, q) grad_cv = split_conserved(dim, grad_q) grad_v = velocity_gradient(discr, cv, grad_cv) def exact_grad_row(xdata, gdim, dim): exact_grad_row = make_obj_array([zeros for _ in range(dim)]) exact_grad_row[gdim] = -actx.np.sin(xdata) return exact_grad_row comp_err = make_obj_array([ discr.norm(grad_v[i] - exact_grad_row(nodes[i], i, dim), np.inf) for i in range(dim)]) err_max = comp_err.max() eoc.add_data_point(h, err_max) logger.info(eoc) assert ( eoc.order_estimate() >= order - 0.5 or eoc.max_error() < 1e-9 )
def test_direct(ctx_getter): # This evaluates a single layer potential on a circle. logging.basicConfig(level=logging.INFO) ctx = ctx_getter() queue = cl.CommandQueue(ctx) from sumpy.kernel import LaplaceKernel lknl = LaplaceKernel(2) order = 12 from sumpy.qbx import LayerPotential from sumpy.expansion.local import LineTaylorLocalExpansion lpot = LayerPotential(ctx, [LineTaylorLocalExpansion(lknl, order)]) mode_nr = 25 from pytools.convergence import EOCRecorder eocrec = EOCRecorder() for n in [200, 300, 400]: t = np.linspace(0, 2 * np.pi, n, endpoint=False) unit_circle = np.exp(1j * t) unit_circle = np.array([unit_circle.real, unit_circle.imag]) sigma = np.cos(mode_nr * t) eigval = 1/(2*mode_nr) result_ref = eigval * sigma h = 2 * np.pi / n targets = unit_circle sources = unit_circle radius = 7 * h centers = unit_circle * (1 - radius) expansion_radii = np.ones(n) * radius strengths = (sigma * h,) evt, (result_qbx,) = lpot(queue, targets, sources, centers, strengths, expansion_radii=expansion_radii) eocrec.add_data_point(h, np.max(np.abs(result_ref - result_qbx))) print(eocrec) slack = 1.5 assert eocrec.order_estimate() > order - slack
def test_vortex_rhs(actx_factory, order): """Tests the inviscid rhs using the non-trivial 2D isentropic vortex case configured to yield rhs = 0. Checks several different orders and refinement levels to check error behavior. """ actx = actx_factory() dim = 2 from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() from meshmode.mesh.generation import generate_regular_rect_mesh for nel_1d in [16, 32, 64]: mesh = generate_regular_rect_mesh( a=(-5, ) * dim, b=(5, ) * dim, n=(nel_1d, ) * dim, ) logger.info(f"Number of {dim}d elements: {mesh.nelements}") discr = EagerDGDiscretization(actx, mesh, order=order) nodes = thaw(actx, discr.nodes()) # Init soln with Vortex and expected RHS = 0 vortex = Vortex2D(center=[0, 0], velocity=[0, 0]) vortex_soln = vortex(0, nodes) boundaries = {BTAG_ALL: PrescribedBoundary(vortex)} inviscid_rhs = inviscid_operator(discr, eos=IdealSingleGas(), boundaries=boundaries, q=vortex_soln, t=0.0) err_max = discr.norm(inviscid_rhs, np.inf) eoc_rec.add_data_point(1.0 / nel_1d, err_max) message = (f"Error for (dim,order) = ({dim},{order}):\n" f"{eoc_rec}") logger.info(message) assert (eoc_rec.order_estimate() >= order - 0.5 or eoc_rec.max_error() < 1e-11)
def test_pde_check(dim, order=4): from sumpy.point_calculus import CalculusPatch from pytools.convergence import EOCRecorder for iaxis in range(dim): eoc_rec = EOCRecorder() for h in [0.1, 0.01, 0.001]: cp = CalculusPatch(np.array([3, 0, 0])[:dim], h=h, order=order) df_num = cp.diff(iaxis, np.sin(10*cp.points[iaxis])) df_true = 10*np.cos(10*cp.points[iaxis]) err = la.norm(df_num-df_true) eoc_rec.add_data_point(h, err) print(eoc_rec) assert eoc_rec.order_estimate() > order-2-0.1
def test_pde_check(dim, order=4): from sumpy.point_calculus import CalculusPatch from pytools.convergence import EOCRecorder for iaxis in range(dim): eoc_rec = EOCRecorder() for h in [0.1, 0.01, 0.001]: cp = CalculusPatch(np.array([3, 0, 0])[:dim], h=h, order=order) df_num = cp.diff(iaxis, np.sin(10 * cp.points[iaxis])) df_true = 10 * np.cos(10 * cp.points[iaxis]) err = la.norm(df_num - df_true) eoc_rec.add_data_point(h, err) print(eoc_rec) assert eoc_rec.order_estimate() > order - 2 - 0.1
def test_boundary_interpolation(ctx_getter): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) from meshmode.mesh.io import generate_gmsh, FileSource from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from meshmode.discretization.connection import make_boundary_restriction from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() order = 4 for h in [1e-1, 3e-2, 1e-2]: print("BEGIN GEN") mesh = generate_gmsh( FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ "-string", "Mesh.CharacteristicLengthMax = %s;" % h] ) print("END GEN") vol_discr = Discretization(cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(order)) print("h=%s -> %d elements" % ( h, sum(mgrp.nelements for mgrp in mesh.groups))) x = vol_discr.nodes()[0].with_queue(queue) f = 0.1*cl.clmath.sin(30*x) bdry_mesh, bdry_discr, bdry_connection = make_boundary_restriction( queue, vol_discr, InterpolatoryQuadratureSimplexGroupFactory(order)) bdry_x = bdry_discr.nodes()[0].with_queue(queue) bdry_f = 0.1*cl.clmath.sin(30*bdry_x) bdry_f_2 = bdry_connection(queue, f) err = la.norm((bdry_f-bdry_f_2).get(), np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) assert eoc_rec.order_estimate() >= order-0.5
def conv_test(descr, use_quad): logger.info("-" * 75) logger.info(descr) logger.info("-" * 75) eoc_rec = EOCRecorder() ns = [20, 25] for n in ns: mesh = mgen.generate_regular_rect_mesh(a=(-0.5, ) * dims, b=(0.5, ) * dims, nelements_per_axis=(n, ) * dims, order=order) if use_quad: discr_tag_to_group_factory = { "product": QuadratureSimplexGroupFactory(order=4 * order) } else: discr_tag_to_group_factory = {"product": None} discr = DiscretizationCollection( actx, mesh, order=order, discr_tag_to_group_factory=discr_tag_to_group_factory) bound_op = bind(discr, op.sym_operator()) fields = bind(discr, gaussian_mode())(actx, t=0) norm = bind(discr, sym.norm(2, sym.var("u"))) esc = bound_op(u=fields) total_error = norm(u=esc) eoc_rec.add_data_point(1.0 / n, total_error) logger.info( "\n%s", eoc_rec.pretty_print(abscissa_label="h", error_label="L2 Error")) return eoc_rec.order_estimate(), np.array( [x[1] for x in eoc_rec.history])
def conv_test(descr, use_quad): print("-" * 75) print(descr) print("-" * 75) eoc_rec = EOCRecorder() ns = [20, 25] for n in ns: mesh = generate_regular_rect_mesh(a=(-0.5, ) * dims, b=(0.5, ) * dims, n=(n, ) * dims, order=order) if use_quad: quad_tag_to_group_factory = { "product": QuadratureSimplexGroupFactory(order=4 * order) } else: quad_tag_to_group_factory = {"product": None} discr = DGDiscretizationWithBoundaries( cl_ctx, mesh, order=order, quad_tag_to_group_factory=quad_tag_to_group_factory) bound_op = bind(discr, op.sym_operator()) fields = bind(discr, gaussian_mode())(queue, t=0) norm = bind(discr, sym.norm(2, sym.var("u"))) esc = bound_op(queue, u=fields) total_error = norm(queue, u=esc) eoc_rec.add_data_point(1.0 / n, total_error) print( eoc_rec.pretty_print(abscissa_label="h", error_label="LInf Error")) return eoc_rec.order_estimate(), np.array( [x[1] for x in eoc_rec.history])
def test_basis_grad(dim, shape_cls, order, basis_getter): """Do a simplistic FD-style check on the gradients of the basis.""" h = 1.0e-4 shape = shape_cls(dim) rng = np.random.Generator(np.random.PCG64(17)) basis = basis_getter(mp.space_for_shape(shape, order), shape) from pytools.convergence import EOCRecorder from pytools import wandering_element for i_bf, (bf, gradbf) in enumerate(zip( basis.functions, basis.gradients, )): eoc_rec = EOCRecorder() for h in [1e-2, 1e-3]: r = mp.random_nodes_for_shape(shape, nnodes=1000, rng=rng) gradbf_v = np.array(gradbf(r)) gradbf_v_num = np.array([ (bf(r + h * unit) - bf(r - h * unit)) / (2 * h) for unit_tuple in wandering_element(shape.dim) for unit in (np.array(unit_tuple).reshape(-1, 1), ) ]) ref_norm = la.norm((gradbf_v).reshape(-1), np.inf) err = la.norm((gradbf_v_num - gradbf_v).reshape(-1), np.inf) if ref_norm > 1e-13: err = err / ref_norm logger.info("error: %.5", err) eoc_rec.add_data_point(h, err) tol = 1e-8 if eoc_rec.max_error() >= tol: print(eoc_rec) assert (eoc_rec.max_error() < tol or eoc_rec.order_estimate() >= 1.5)
def test_boundary_interpolation(ctx_getter, group_factory, boundary_tag, mesh_name, dim, mesh_pars, per_face_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization from meshmode.discretization.connection import (make_face_restriction, check_connection) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() order = 4 def f(x): return 0.1 * cl.clmath.sin(30 * x) for mesh_par in mesh_pars: # {{{ get mesh if mesh_name == "blob": assert dim == 2 h = mesh_par from meshmode.mesh.io import generate_gmsh, FileSource print("BEGIN GEN") mesh = generate_gmsh(FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ "-string", "Mesh.CharacteristicLengthMax = %s;" % h ]) print("END GEN") elif mesh_name == "warp": from meshmode.mesh.generation import generate_warped_rect_mesh mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par) h = 1 / mesh_par else: raise ValueError("mesh_name not recognized") # }}} vol_discr = Discretization(cl_ctx, mesh, group_factory(order)) print("h=%s -> %d elements" % (h, sum(mgrp.nelements for mgrp in mesh.groups))) x = vol_discr.nodes()[0].with_queue(queue) vol_f = f(x) bdry_connection = make_face_restriction( vol_discr, group_factory(order), boundary_tag, per_face_groups=per_face_groups) check_connection(bdry_connection) bdry_discr = bdry_connection.to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue) bdry_f = f(bdry_x) bdry_f_2 = bdry_connection(queue, vol_f) if mesh_name == "blob" and dim == 2: mat = bdry_connection.full_resample_matrix(queue).get(queue) bdry_f_2_by_mat = mat.dot(vol_f.get()) mat_error = la.norm(bdry_f_2.get(queue=queue) - bdry_f_2_by_mat) assert mat_error < 1e-14, mat_error err = la.norm((bdry_f - bdry_f_2).get(), np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) assert (eoc_rec.order_estimate() >= order - 0.5 or eoc_rec.max_error() < 1e-14)
def test_p2e2p(ctx_factory, base_knl, expn_class, order, with_source_derivative): #logging.basicConfig(level=logging.INFO) from sympy.core.cache import clear_cache clear_cache() ctx = ctx_factory() queue = cl.CommandQueue(ctx) np.random.seed(17) res = 100 nsources = 100 extra_kwargs = {} if isinstance(base_knl, HelmholtzKernel): if base_knl.allow_evanescent: extra_kwargs["k"] = 0.2 * (0.707 + 0.707j) else: extra_kwargs["k"] = 0.2 if isinstance(base_knl, StokesletKernel): extra_kwargs["mu"] = 0.2 if with_source_derivative: knl = DirectionalSourceDerivative(base_knl, "dir_vec") else: knl = base_knl out_kernels = [ knl, AxisTargetDerivative(0, knl), ] expn = expn_class(knl, order=order) from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P p2e = P2EFromSingleBox(ctx, expn, kernels=[knl]) e2p = E2PFromSingleBox(ctx, expn, kernels=out_kernels) p2p = P2P(ctx, out_kernels, exclude_self=False) from pytools.convergence import EOCRecorder eoc_rec_pot = EOCRecorder() eoc_rec_grad_x = EOCRecorder() from sumpy.expansion.local import LocalExpansionBase if issubclass(expn_class, LocalExpansionBase): h_values = [1 / 5, 1 / 7, 1 / 20] else: h_values = [1 / 2, 1 / 3, 1 / 5] center = np.array([2, 1, 0][:knl.dim], np.float64) sources = (0.7 * (-0.5 + np.random.rand(knl.dim, nsources).astype(np.float64)) + center[:, np.newaxis]) strengths = np.ones(nsources, dtype=np.float64) * (1 / nsources) source_boxes = np.array([0], dtype=np.int32) box_source_starts = np.array([0], dtype=np.int32) box_source_counts_nonchild = np.array([nsources], dtype=np.int32) extra_source_kwargs = extra_kwargs.copy() if isinstance(knl, DirectionalSourceDerivative): alpha = np.linspace(0, 2 * np.pi, nsources, np.float64) dir_vec = np.vstack([np.cos(alpha), np.sin(alpha)]) extra_source_kwargs["dir_vec"] = dir_vec from sumpy.visualization import FieldPlotter for h in h_values: if issubclass(expn_class, LocalExpansionBase): loc_center = np.array([5.5, 0.0, 0.0][:knl.dim]) + center centers = np.array(loc_center, dtype=np.float64).reshape(knl.dim, 1) fp = FieldPlotter(loc_center, extent=h, npoints=res) else: eval_center = np.array([1 / h, 0.0, 0.0][:knl.dim]) + center fp = FieldPlotter(eval_center, extent=0.1, npoints=res) centers = (np.array([0.0, 0.0, 0.0][:knl.dim], dtype=np.float64).reshape(knl.dim, 1) + center[:, np.newaxis]) targets = fp.points rscale = 0.5 # pick something non-1 # {{{ apply p2e evt, (mpoles, ) = p2e( queue, source_boxes=source_boxes, box_source_starts=box_source_starts, box_source_counts_nonchild=box_source_counts_nonchild, centers=centers, sources=sources, strengths=(strengths, ), nboxes=1, tgt_base_ibox=0, rscale=rscale, #flags="print_hl_cl", out_host=True, **extra_source_kwargs) # }}} # {{{ apply e2p ntargets = targets.shape[-1] box_target_starts = np.array([0], dtype=np.int32) box_target_counts_nonchild = np.array([ntargets], dtype=np.int32) evt, ( pot, grad_x, ) = e2p( queue, src_expansions=mpoles, src_base_ibox=0, target_boxes=source_boxes, box_target_starts=box_target_starts, box_target_counts_nonchild=box_target_counts_nonchild, centers=centers, targets=targets, rscale=rscale, #flags="print_hl_cl", out_host=True, **extra_kwargs) # }}} # {{{ compute (direct) reference solution evt, ( pot_direct, grad_x_direct, ) = p2p(queue, targets, sources, (strengths, ), out_host=True, **extra_source_kwargs) err_pot = la.norm((pot - pot_direct) / res**2) err_grad_x = la.norm((grad_x - grad_x_direct) / res**2) if 1: err_pot = err_pot / la.norm((pot_direct) / res**2) err_grad_x = err_grad_x / la.norm((grad_x_direct) / res**2) if 0: import matplotlib.pyplot as pt from matplotlib.colors import Normalize pt.subplot(131) im = fp.show_scalar_in_matplotlib(pot.real) im.set_norm(Normalize(vmin=-0.1, vmax=0.1)) pt.subplot(132) im = fp.show_scalar_in_matplotlib(pot_direct.real) im.set_norm(Normalize(vmin=-0.1, vmax=0.1)) pt.colorbar() pt.subplot(133) im = fp.show_scalar_in_matplotlib( np.log10(1e-15 + np.abs(pot - pot_direct))) im.set_norm(Normalize(vmin=-6, vmax=1)) pt.colorbar() pt.show() # }}} eoc_rec_pot.add_data_point(h, err_pot) eoc_rec_grad_x.add_data_point(h, err_grad_x) print(expn_class, knl, order) print("POTENTIAL:") print(eoc_rec_pot) print("X TARGET DERIVATIVE:") print(eoc_rec_grad_x) tgt_order = order + 1 if issubclass(expn_class, LocalExpansionBase): tgt_order_grad = tgt_order - 1 slack = 0.7 grad_slack = 0.5 else: tgt_order_grad = tgt_order + 1 slack = 0.5 grad_slack = 1 if order <= 2: slack += 1 grad_slack += 1 if isinstance(knl, DirectionalSourceDerivative): slack += 1 grad_slack += 2 if isinstance(base_knl, DirectionalSourceDerivative): slack += 1 grad_slack += 2 if isinstance(base_knl, HelmholtzKernel): if base_knl.allow_evanescent: slack += 0.5 grad_slack += 0.5 if issubclass(expn_class, VolumeTaylorMultipoleExpansionBase): slack += 0.3 grad_slack += 0.3 assert eoc_rec_pot.order_estimate() > tgt_order - slack assert eoc_rec_grad_x.order_estimate() > tgt_order_grad - grad_slack
def test_opposite_face_interpolation(ctx_getter, group_factory, mesh_name, dim, mesh_pars): logging.basicConfig(level=logging.INFO) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( make_face_restriction, make_opposite_face_connection, check_connection) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() order = 5 def f(x): return 0.1 * cl.clmath.sin(30 * x) for mesh_par in mesh_pars: # {{{ get mesh if mesh_name == "blob": assert dim == 2 h = mesh_par from meshmode.mesh.io import generate_gmsh, FileSource print("BEGIN GEN") mesh = generate_gmsh(FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ "-string", "Mesh.CharacteristicLengthMax = %s;" % h ]) print("END GEN") elif mesh_name == "warp": from meshmode.mesh.generation import generate_warped_rect_mesh mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par) h = 1 / mesh_par else: raise ValueError("mesh_name not recognized") # }}} vol_discr = Discretization(cl_ctx, mesh, group_factory(order)) print("h=%s -> %d elements" % (h, sum(mgrp.nelements for mgrp in mesh.groups))) bdry_connection = make_face_restriction(vol_discr, group_factory(order), FRESTR_INTERIOR_FACES) bdry_discr = bdry_connection.to_discr opp_face = make_opposite_face_connection(bdry_connection) check_connection(opp_face) bdry_x = bdry_discr.nodes()[0].with_queue(queue) bdry_f = f(bdry_x) bdry_f_2 = opp_face(queue, bdry_f) err = la.norm((bdry_f - bdry_f_2).get(), np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) assert (eoc_rec.order_estimate() >= order - 0.5 or eoc_rec.max_error() < 1e-13)
def test_sanity_balls(ctx_getter, src_file, dim, mesh_order, visualize=False): pytest.importorskip("pytential") logging.basicConfig(level=logging.INFO) ctx = ctx_getter() queue = cl.CommandQueue(ctx) from pytools.convergence import EOCRecorder vol_eoc_rec = EOCRecorder() surf_eoc_rec = EOCRecorder() # overkill quad_order = mesh_order from pytential import bind, sym for h in [0.2, 0.14, 0.1]: from meshmode.mesh.io import generate_gmsh, FileSource mesh = generate_gmsh(FileSource(src_file), dim, order=mesh_order, other_options=[ "-string", "Mesh.CharacteristicLengthMax = %g;" % h ], force_ambient_dim=dim) logger.info("%d elements" % mesh.nelements) # {{{ discretizations and connections from meshmode.discretization import Discretization vol_discr = Discretization( ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(quad_order)) from meshmode.discretization.connection import make_face_restriction bdry_connection = make_face_restriction( vol_discr, InterpolatoryQuadratureSimplexGroupFactory(quad_order), BTAG_ALL) bdry_discr = bdry_connection.to_discr # }}} # {{{ visualizers from meshmode.discretization.visualization import make_visualizer vol_vis = make_visualizer(queue, vol_discr, 20) bdry_vis = make_visualizer(queue, bdry_discr, 20) # }}} from math import gamma true_surf = 2 * np.pi**(dim / 2) / gamma(dim / 2) true_vol = true_surf / dim vol_x = vol_discr.nodes().with_queue(queue) vol_one = vol_x[0].copy() vol_one.fill(1) from pytential import norm, integral # noqa comp_vol = integral(vol_discr, queue, vol_one) rel_vol_err = abs(true_vol - comp_vol) / true_vol vol_eoc_rec.add_data_point(h, rel_vol_err) print("VOL", true_vol, comp_vol) bdry_x = bdry_discr.nodes().with_queue(queue) bdry_one_exact = bdry_x[0].copy() bdry_one_exact.fill(1) bdry_one = bdry_connection(queue, vol_one).with_queue(queue) intp_err = norm(bdry_discr, queue, bdry_one - bdry_one_exact) assert intp_err < 1e-14 comp_surf = integral(bdry_discr, queue, bdry_one) rel_surf_err = abs(true_surf - comp_surf) / true_surf surf_eoc_rec.add_data_point(h, rel_surf_err) print("SURF", true_surf, comp_surf) if visualize: vol_vis.write_vtk_file("volume-h=%g.vtu" % h, [ ("f", vol_one), ("area_el", bind(vol_discr, sym.area_element())(queue)), ]) bdry_vis.write_vtk_file("boundary-h=%g.vtu" % h, [("f", bdry_one)]) # {{{ check normals point outward normal_outward_check = bind( bdry_discr, sym.normal(mesh.ambient_dim) | sym.nodes(mesh.ambient_dim), )(queue).as_scalar() > 0 assert normal_outward_check.get().all(), normal_outward_check.get() # }}} print("---------------------------------") print("VOLUME") print("---------------------------------") print(vol_eoc_rec) assert vol_eoc_rec.order_estimate() >= mesh_order print("---------------------------------") print("SURFACE") print("---------------------------------") print(surf_eoc_rec) assert surf_eoc_rec.order_estimate() >= mesh_order
def test_sphere_eigenvalues(ctx_getter, mode_m, mode_n, qbx_order, fmm_backend): logging.basicConfig(level=logging.INFO) special = pytest.importorskip("scipy.special") cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) target_order = 8 from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from pytential.qbx import QBXLayerPotentialSource from pytools.convergence import EOCRecorder s_eoc_rec = EOCRecorder() d_eoc_rec = EOCRecorder() sp_eoc_rec = EOCRecorder() dp_eoc_rec = EOCRecorder() def rel_err(comp, ref): return ( norm(density_discr, queue, comp - ref) / norm(density_discr, queue, ref)) for nrefinements in [0, 1]: from meshmode.mesh.generation import generate_icosphere mesh = generate_icosphere(1, target_order) from meshmode.mesh.refinement import Refiner refiner = Refiner(mesh) for i in range(nrefinements): flags = np.ones(mesh.nelements, dtype=bool) refiner.refine(flags) mesh = refiner.get_current_mesh() pre_density_discr = Discretization( cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) qbx, _ = QBXLayerPotentialSource( pre_density_discr, 4*target_order, qbx_order, fmm_order=6, fmm_backend=fmm_backend, ).with_refinement() density_discr = qbx.density_discr nodes = density_discr.nodes().with_queue(queue) r = cl.clmath.sqrt(nodes[0]**2 + nodes[1]**2 + nodes[2]**2) phi = cl.clmath.acos(nodes[2]/r) theta = cl.clmath.atan2(nodes[0], nodes[1]) ymn = cl.array.to_device(queue, special.sph_harm(mode_m, mode_n, theta.get(), phi.get())) from sumpy.kernel import LaplaceKernel lap_knl = LaplaceKernel(3) # {{{ single layer s_sigma_op = bind(qbx, sym.S(lap_knl, sym.var("sigma"))) s_sigma = s_sigma_op(queue=queue, sigma=ymn) s_eigval = 1/(2*mode_n + 1) s_eoc_rec.add_data_point(qbx.h_max, rel_err(s_sigma, s_eigval*ymn)) # }}} # {{{ double layer d_sigma_op = bind(qbx, sym.D(lap_knl, sym.var("sigma"))) d_sigma = d_sigma_op(queue=queue, sigma=ymn) d_eigval = -1/(2*(2*mode_n + 1)) d_eoc_rec.add_data_point(qbx.h_max, rel_err(d_sigma, d_eigval*ymn)) # }}} # {{{ S' sp_sigma_op = bind(qbx, sym.Sp(lap_knl, sym.var("sigma"))) sp_sigma = sp_sigma_op(queue=queue, sigma=ymn) sp_eigval = -1/(2*(2*mode_n + 1)) sp_eoc_rec.add_data_point(qbx.h_max, rel_err(sp_sigma, sp_eigval*ymn)) # }}} # {{{ D' dp_sigma_op = bind(qbx, sym.Dp(lap_knl, sym.var("sigma"))) dp_sigma = dp_sigma_op(queue=queue, sigma=ymn) dp_eigval = -(mode_n*(mode_n+1))/(2*mode_n + 1) dp_eoc_rec.add_data_point(qbx.h_max, rel_err(dp_sigma, dp_eigval*ymn)) # }}} print("Errors for S:") print(s_eoc_rec) required_order = qbx_order + 1 assert s_eoc_rec.order_estimate() > required_order - 1.5 print("Errors for D:") print(d_eoc_rec) required_order = qbx_order assert d_eoc_rec.order_estimate() > required_order - 0.5 print("Errors for S':") print(sp_eoc_rec) required_order = qbx_order assert sp_eoc_rec.order_estimate() > required_order - 1.5 print("Errors for D':") print(dp_eoc_rec) required_order = qbx_order assert dp_eoc_rec.order_estimate() > required_order - 1.5
def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, per_face_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( make_face_restriction, make_face_to_all_faces_embedding, check_connection) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() order = 4 def f(x): return 0.1 * cl.clmath.sin(30 * x) for mesh_par in mesh_pars: # {{{ get mesh if mesh_name == "blob": assert dim == 2 h = mesh_par from meshmode.mesh.io import generate_gmsh, FileSource print("BEGIN GEN") mesh = generate_gmsh(FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ "-string", "Mesh.CharacteristicLengthMax = %s;" % h ]) print("END GEN") elif mesh_name == "warp": from meshmode.mesh.generation import generate_warped_rect_mesh mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par) h = 1 / mesh_par else: raise ValueError("mesh_name not recognized") # }}} vol_discr = Discretization(cl_ctx, mesh, PolynomialWarpAndBlendGroupFactory(order)) print("h=%s -> %d elements" % (h, sum(mgrp.nelements for mgrp in mesh.groups))) all_face_bdry_connection = make_face_restriction( vol_discr, PolynomialWarpAndBlendGroupFactory(order), FRESTR_ALL_FACES, per_face_groups=per_face_groups) all_face_bdry_discr = all_face_bdry_connection.to_discr for ito_grp, ceg in enumerate(all_face_bdry_connection.groups): for ibatch, batch in enumerate(ceg.batches): assert np.array_equal(batch.from_element_indices.get(queue), np.arange(vol_discr.mesh.nelements)) if per_face_groups: assert ito_grp == batch.to_element_face else: assert ibatch == batch.to_element_face all_face_x = all_face_bdry_discr.nodes()[0].with_queue(queue) all_face_f = f(all_face_x) all_face_f_2 = all_face_bdry_discr.zeros(queue) for boundary_tag in [ BTAG_ALL, FRESTR_INTERIOR_FACES, ]: bdry_connection = make_face_restriction( vol_discr, PolynomialWarpAndBlendGroupFactory(order), boundary_tag, per_face_groups=per_face_groups) bdry_discr = bdry_connection.to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue) bdry_f = f(bdry_x) all_face_embedding = make_face_to_all_faces_embedding( bdry_connection, all_face_bdry_discr) check_connection(all_face_embedding) all_face_f_2 += all_face_embedding(queue, bdry_f) err = la.norm((all_face_f - all_face_f_2).get(), np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) assert (eoc_rec.order_estimate() >= order - 0.5 or eoc_rec.max_error() < 1e-14)
def test_3d_jump_relations(ctx_factory, relation, visualize=False): # logging.basicConfig(level=logging.INFO) cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) if relation == "div_s": target_order = 3 else: target_order = 4 qbx_order = target_order from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for nel_factor in [6, 10, 14]: from meshmode.mesh.generation import generate_torus mesh = generate_torus( 5, 2, order=target_order, n_outer=2*nel_factor, n_inner=nel_factor) from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory pre_discr = Discretization( cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(3)) from pytential.qbx import QBXLayerPotentialSource qbx, _ = QBXLayerPotentialSource( pre_discr, fine_order=4*target_order, qbx_order=qbx_order, fmm_order=qbx_order + 5, fmm_backend="fmmlib" ).with_refinement() from sumpy.kernel import LaplaceKernel knl = LaplaceKernel(3) def nxcurlS(qbx_forced_limit): return sym.n_cross(sym.curl(sym.S( knl, sym.cse(sym.tangential_to_xyz(density_sym), "jxyz"), qbx_forced_limit=qbx_forced_limit))) x, y, z = qbx.density_discr.nodes().with_queue(queue) m = cl.clmath if relation == "nxcurls": density_sym = sym.make_sym_vector("density", 2) jump_identity_sym = ( nxcurlS(+1) - (nxcurlS("avg") + 0.5*sym.tangential_to_xyz(density_sym))) # The tangential coordinate system is element-local, so we can't just # conjure up some globally smooth functions, interpret their values # in the tangential coordinate system, and be done. Instead, generate # an XYZ function and project it. density = bind( qbx, sym.xyz_to_tangential(sym.make_sym_vector("jxyz", 3)))( queue, jxyz=sym.make_obj_array([ m.cos(0.5*x) * m.cos(0.5*y) * m.cos(0.5*z), m.sin(0.5*x) * m.cos(0.5*y) * m.sin(0.5*z), m.sin(0.5*x) * m.cos(0.5*y) * m.cos(0.5*z), ])) elif relation == "sp": density = m.cos(2*x) * m.cos(2*y) * m.cos(z) density_sym = sym.var("density") jump_identity_sym = ( sym.Sp(knl, density_sym, qbx_forced_limit=+1) - (sym.Sp(knl, density_sym, qbx_forced_limit="avg") - 0.5*density_sym)) elif relation == "div_s": density = m.cos(2*x) * m.cos(2*y) * m.cos(z) density_sym = sym.var("density") jump_identity_sym = ( sym.div(sym.S(knl, sym.normal(3).as_vector()*density_sym, qbx_forced_limit="avg")) + sym.D(knl, density_sym, qbx_forced_limit="avg")) else: raise ValueError("unexpected value of 'relation': %s" % relation) bound_jump_identity = bind(qbx, jump_identity_sym) jump_identity = bound_jump_identity(queue, density=density) err = ( norm(qbx, queue, jump_identity, np.inf) / norm(qbx, queue, density, np.inf)) print("ERROR", qbx.h_max, err) eoc_rec.add_data_point(qbx.h_max, err) # {{{ visualization if visualize and relation == "nxcurls": nxcurlS_ext = bind(qbx, nxcurlS(+1))(queue, density=density) nxcurlS_avg = bind(qbx, nxcurlS("avg"))(queue, density=density) jtxyz = bind(qbx, sym.tangential_to_xyz(density_sym))( queue, density=density) from meshmode.discretization.visualization import make_visualizer bdry_vis = make_visualizer(queue, qbx.density_discr, target_order+3) bdry_normals = bind(qbx, sym.normal(3))(queue)\ .as_vector(dtype=object) bdry_vis.write_vtk_file("source-%s.vtu" % nel_factor, [ ("jt", jtxyz), ("nxcurlS_ext", nxcurlS_ext), ("nxcurlS_avg", nxcurlS_avg), ("bdry_normals", bdry_normals), ]) if visualize and relation == "sp": sp_ext = bind(qbx, sym.Sp(knl, density_sym, qbx_forced_limit=+1))( queue, density=density) sp_avg = bind(qbx, sym.Sp(knl, density_sym, qbx_forced_limit="avg"))( queue, density=density) from meshmode.discretization.visualization import make_visualizer bdry_vis = make_visualizer(queue, qbx.density_discr, target_order+3) bdry_normals = bind(qbx, sym.normal(3))(queue)\ .as_vector(dtype=object) bdry_vis.write_vtk_file("source-%s.vtu" % nel_factor, [ ("density", density), ("sp_ext", sp_ext), ("sp_avg", sp_avg), ("bdry_normals", bdry_normals), ]) # }}} print(eoc_rec) assert eoc_rec.order_estimate() >= qbx_order - 1.5
def test_reversed_chained_connection(ctx_factory, ndim, mesh_name, visualize=False): ctx = ctx_factory() queue = cl.CommandQueue(ctx) def run(nelements, order): discr = create_discretization(queue, ndim, nelements=nelements, order=order, mesh_name=mesh_name) threshold = 1.0 connections = [] conn = create_refined_connection(queue, discr, threshold=threshold) connections.append(conn) if ndim == 2: # NOTE: additional refinement makes the 3D meshes explode in size conn = create_refined_connection(queue, conn.to_discr, threshold=threshold) connections.append(conn) conn = create_refined_connection(queue, conn.to_discr, threshold=threshold) connections.append(conn) from meshmode.discretization.connection import \ ChainedDiscretizationConnection chained = ChainedDiscretizationConnection(connections) from meshmode.discretization.connection import \ L2ProjectionInverseDiscretizationConnection reverse = L2ProjectionInverseDiscretizationConnection(chained) # create test vector from_nodes = chained.from_discr.nodes().with_queue(queue) to_nodes = chained.to_discr.nodes().with_queue(queue) from_x = 0 to_x = 0 for d in range(ndim): from_x += cl.clmath.cos(from_nodes[d]) ** (d + 1) to_x += cl.clmath.cos(to_nodes[d]) ** (d + 1) from_interp = reverse(queue, to_x) from_interp = from_interp.get(queue) from_x = from_x.get(queue) return 1.0 / nelements, la.norm(from_interp - from_x) / la.norm(from_x) from pytools.convergence import EOCRecorder eoc = EOCRecorder() order = 4 mesh_sizes = [16, 32, 48, 64, 96, 128] for n in mesh_sizes: h, error = run(n, order) eoc.add_data_point(h, error) print(eoc) assert eoc.order_estimate() > (order + 1 - 0.5)
def test_p2e2p(ctx_getter, base_knl, expn_class, order, with_source_derivative): #logging.basicConfig(level=logging.INFO) from sympy.core.cache import clear_cache clear_cache() ctx = ctx_getter() queue = cl.CommandQueue(ctx) np.random.seed(17) res = 100 nsources = 100 extra_kwargs = {} if isinstance(base_knl, HelmholtzKernel): if base_knl.allow_evanescent: extra_kwargs["k"] = 0.2 * (0.707 + 0.707j) else: extra_kwargs["k"] = 0.2 if with_source_derivative: knl = DirectionalSourceDerivative(base_knl, "dir_vec") else: knl = base_knl out_kernels = [ knl, AxisTargetDerivative(0, knl), ] expn = expn_class(knl, order=order) from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P p2e = P2EFromSingleBox(ctx, expn, out_kernels) e2p = E2PFromSingleBox(ctx, expn, out_kernels) p2p = P2P(ctx, out_kernels, exclude_self=False) from pytools.convergence import EOCRecorder eoc_rec_pot = EOCRecorder() eoc_rec_grad_x = EOCRecorder() from sumpy.expansion.local import LocalExpansionBase if issubclass(expn_class, LocalExpansionBase): h_values = [1/5, 1/7, 1/20] else: h_values = [1/2, 1/3, 1/5] center = np.array([2, 1], np.float64) sources = (0.7*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64)) + center[:, np.newaxis]) strengths = np.ones(nsources, dtype=np.float64) * (1/nsources) source_boxes = np.array([0], dtype=np.int32) box_source_starts = np.array([0], dtype=np.int32) box_source_counts_nonchild = np.array([nsources], dtype=np.int32) extra_source_kwargs = extra_kwargs.copy() if with_source_derivative: alpha = np.linspace(0, 2*np.pi, nsources, np.float64) dir_vec = np.vstack([np.cos(alpha), np.sin(alpha)]) extra_source_kwargs["dir_vec"] = dir_vec from sumpy.visualization import FieldPlotter for h in h_values: if issubclass(expn_class, LocalExpansionBase): loc_center = np.array([5.5, 0.0]) + center centers = np.array(loc_center, dtype=np.float64).reshape(knl.dim, 1) fp = FieldPlotter(loc_center, extent=h, npoints=res) else: eval_center = np.array([1/h, 0.0]) + center fp = FieldPlotter(eval_center, extent=0.1, npoints=res) centers = ( np.array([0.0, 0.0], dtype=np.float64).reshape(knl.dim, 1) + center[:, np.newaxis]) targets = fp.points rscale = 0.5 # pick something non-1 # {{{ apply p2e evt, (mpoles,) = p2e(queue, source_boxes=source_boxes, box_source_starts=box_source_starts, box_source_counts_nonchild=box_source_counts_nonchild, centers=centers, sources=sources, strengths=strengths, nboxes=1, tgt_base_ibox=0, rscale=rscale, #flags="print_hl_cl", out_host=True, **extra_source_kwargs) # }}} # {{{ apply e2p ntargets = targets.shape[-1] box_target_starts = np.array([0], dtype=np.int32) box_target_counts_nonchild = np.array([ntargets], dtype=np.int32) evt, (pot, grad_x, ) = e2p( queue, src_expansions=mpoles, src_base_ibox=0, target_boxes=source_boxes, box_target_starts=box_target_starts, box_target_counts_nonchild=box_target_counts_nonchild, centers=centers, targets=targets, rscale=rscale, #flags="print_hl_cl", out_host=True, **extra_kwargs) # }}} # {{{ compute (direct) reference solution evt, (pot_direct, grad_x_direct, ) = p2p( queue, targets, sources, (strengths,), out_host=True, **extra_source_kwargs) err_pot = la.norm((pot - pot_direct)/res**2) err_grad_x = la.norm((grad_x - grad_x_direct)/res**2) if 1: err_pot = err_pot / la.norm((pot_direct)/res**2) err_grad_x = err_grad_x / la.norm((grad_x_direct)/res**2) if 0: import matplotlib.pyplot as pt from matplotlib.colors import Normalize pt.subplot(131) im = fp.show_scalar_in_matplotlib(pot.real) im.set_norm(Normalize(vmin=-0.1, vmax=0.1)) pt.subplot(132) im = fp.show_scalar_in_matplotlib(pot_direct.real) im.set_norm(Normalize(vmin=-0.1, vmax=0.1)) pt.colorbar() pt.subplot(133) im = fp.show_scalar_in_matplotlib(np.log10(1e-15+np.abs(pot-pot_direct))) im.set_norm(Normalize(vmin=-6, vmax=1)) pt.colorbar() pt.show() # }}} eoc_rec_pot.add_data_point(h, err_pot) eoc_rec_grad_x.add_data_point(h, err_grad_x) print(expn_class, knl, order) print("POTENTIAL:") print(eoc_rec_pot) print("X TARGET DERIVATIVE:") print(eoc_rec_grad_x) tgt_order = order + 1 if issubclass(expn_class, LocalExpansionBase): tgt_order_grad = tgt_order - 1 slack = 0.7 grad_slack = 0.5 else: tgt_order_grad = tgt_order + 1 slack = 0.5 grad_slack = 1 if order <= 2: slack += 1 grad_slack += 1 if isinstance(knl, DirectionalSourceDerivative): slack += 1 grad_slack += 2 if isinstance(base_knl, HelmholtzKernel): if base_knl.allow_evanescent: slack += 0.5 grad_slack += 0.5 if issubclass(expn_class, VolumeTaylorMultipoleExpansionBase): slack += 0.3 grad_slack += 0.3 assert eoc_rec_pot.order_estimate() > tgt_order - slack assert eoc_rec_grad_x.order_estimate() > tgt_order_grad - grad_slack
def test_refinement_connection( ctx_getter, refiner_cls, group_factory, mesh_name, dim, mesh_pars, mesh_order, refine_flags, visualize=False): from random import seed seed(13) # Discretization order order = 5 cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( make_refinement_connection, check_connection) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for mesh_par in mesh_pars: # {{{ get mesh if mesh_name == "circle": assert dim == 1 h = 1 / mesh_par mesh = make_curve_mesh( partial(ellipse, 1), np.linspace(0, 1, mesh_par + 1), order=mesh_order) elif mesh_name == "blob": if mesh_order == 5: pytest.xfail("https://gitlab.tiker.net/inducer/meshmode/issues/2") assert dim == 2 mesh = get_blob_mesh(mesh_par, mesh_order) h = float(mesh_par) elif mesh_name == "warp": from meshmode.mesh.generation import generate_warped_rect_mesh mesh = generate_warped_rect_mesh(dim, order=mesh_order, n=mesh_par) h = 1/mesh_par else: raise ValueError("mesh_name not recognized") # }}} from meshmode.mesh.processing import find_bounding_box mesh_bbox_low, mesh_bbox_high = find_bounding_box(mesh) mesh_ext = mesh_bbox_high-mesh_bbox_low def f(x): result = 1 if mesh_name == "blob": factor = 15 else: factor = 9 for iaxis in range(len(x)): result = result * cl.clmath.sin(factor * (x[iaxis]/mesh_ext[iaxis])) return result discr = Discretization(cl_ctx, mesh, group_factory(order)) refiner = refiner_cls(mesh) flags = refine_flags(mesh) refiner.refine(flags) connection = make_refinement_connection( refiner, discr, group_factory(order)) check_connection(connection) fine_discr = connection.to_discr x = discr.nodes().with_queue(queue) x_fine = fine_discr.nodes().with_queue(queue) f_coarse = f(x) f_interp = connection(queue, f_coarse).with_queue(queue) f_true = f(x_fine).with_queue(queue) if visualize == "dots": import matplotlib.pyplot as plt x = x.get(queue) err = np.array(np.log10( 1e-16 + np.abs((f_interp - f_true).get(queue))), dtype=float) import matplotlib.cm as cm cmap = cm.ScalarMappable(cmap=cm.jet) cmap.set_array(err) plt.scatter(x[0], x[1], c=cmap.to_rgba(err), s=20, cmap=cmap) plt.colorbar(cmap) plt.show() elif visualize == "vtk": from meshmode.discretization.visualization import make_visualizer fine_vis = make_visualizer(queue, fine_discr, mesh_order) fine_vis.write_vtk_file( "refine-fine-%s-%dd-%s.vtu" % (mesh_name, dim, mesh_par), [ ("f_interp", f_interp), ("f_true", f_true), ]) import numpy.linalg as la err = la.norm((f_interp - f_true).get(queue), np.inf) eoc_rec.add_data_point(h, err) order_slack = 0.5 if mesh_name == "blob" and order > 1: order_slack = 1 print(eoc_rec) assert ( eoc_rec.order_estimate() >= order-order_slack or eoc_rec.max_error() < 1e-14)
def test_sanity_balls(ctx_getter, src_file, dim, mesh_order, visualize=False): pytest.importorskip("pytential") logging.basicConfig(level=logging.INFO) ctx = ctx_getter() queue = cl.CommandQueue(ctx) from pytools.convergence import EOCRecorder vol_eoc_rec = EOCRecorder() surf_eoc_rec = EOCRecorder() # overkill quad_order = mesh_order from pytential import bind, sym for h in [0.2, 0.14, 0.1]: from meshmode.mesh.io import generate_gmsh, FileSource mesh = generate_gmsh( FileSource(src_file), dim, order=mesh_order, other_options=["-string", "Mesh.CharacteristicLengthMax = %g;" % h], force_ambient_dim=dim) logger.info("%d elements" % mesh.nelements) # {{{ discretizations and connections from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory vol_discr = Discretization(ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(quad_order)) from meshmode.discretization.connection import make_boundary_restriction bdry_mesh, bdry_discr, bdry_connection = make_boundary_restriction( queue, vol_discr, InterpolatoryQuadratureSimplexGroupFactory(quad_order)) # }}} # {{{ visualizers from meshmode.discretization.visualization import make_visualizer vol_vis = make_visualizer(queue, vol_discr, 20) bdry_vis = make_visualizer(queue, bdry_discr, 20) # }}} from math import gamma true_surf = 2*np.pi**(dim/2)/gamma(dim/2) true_vol = true_surf/dim vol_x = vol_discr.nodes().with_queue(queue) vol_one = vol_x[0].copy() vol_one.fill(1) from pytential import norm, integral # noqa comp_vol = integral(vol_discr, queue, vol_one) rel_vol_err = abs(true_vol - comp_vol) / true_vol vol_eoc_rec.add_data_point(h, rel_vol_err) print("VOL", true_vol, comp_vol) bdry_x = bdry_discr.nodes().with_queue(queue) bdry_one_exact = bdry_x[0].copy() bdry_one_exact.fill(1) bdry_one = bdry_connection(queue, vol_one).with_queue(queue) intp_err = norm(bdry_discr, queue, bdry_one-bdry_one_exact) assert intp_err < 1e-14 comp_surf = integral(bdry_discr, queue, bdry_one) rel_surf_err = abs(true_surf - comp_surf) / true_surf surf_eoc_rec.add_data_point(h, rel_surf_err) print("SURF", true_surf, comp_surf) if visualize: vol_vis.write_vtk_file("volume-h=%g.vtu" % h, [ ("f", vol_one), ("area_el", bind(vol_discr, sym.area_element())(queue)), ]) bdry_vis.write_vtk_file("boundary-h=%g.vtu" % h, [("f", bdry_one)]) # {{{ check normals point outward normal_outward_check = bind(bdry_discr, sym.normal() | sym.Nodes(), )(queue).as_scalar() > 0 assert normal_outward_check.get().all(), normal_outward_check.get() # }}} print("---------------------------------") print("VOLUME") print("---------------------------------") print(vol_eoc_rec) assert vol_eoc_rec.order_estimate() >= mesh_order print("---------------------------------") print("SURFACE") print("---------------------------------") print(surf_eoc_rec) assert surf_eoc_rec.order_estimate() >= mesh_order
def test_ellipse_eigenvalues(ctx_getter, ellipse_aspect, mode_nr, qbx_order): logging.basicConfig(level=logging.INFO) print("ellipse_aspect: %s, mode_nr: %d, qbx_order: %d" % ( ellipse_aspect, mode_nr, qbx_order)) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) target_order = 7 from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from pytential.qbx import QBXLayerPotentialSource from pytools.convergence import EOCRecorder s_eoc_rec = EOCRecorder() d_eoc_rec = EOCRecorder() sp_eoc_rec = EOCRecorder() if ellipse_aspect != 1: nelements_values = [60, 100, 150, 200] else: nelements_values = [30, 70] # See # # [1] G. J. Rodin and O. Steinbach, "Boundary Element Preconditioners # for Problems Defined on Slender Domains", SIAM Journal on Scientific # Computing, Vol. 24, No. 4, pg. 1450, 2003. # http://dx.doi.org/10.1137/S1064827500372067 for nelements in nelements_values: mesh = make_curve_mesh(partial(ellipse, ellipse_aspect), np.linspace(0, 1, nelements+1), target_order) fmm_order = qbx_order if fmm_order > 3: # FIXME: for now fmm_order = False density_discr = Discretization( cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) qbx = QBXLayerPotentialSource(density_discr, 4*target_order, qbx_order, fmm_order=fmm_order) nodes = density_discr.nodes().with_queue(queue) if 0: # plot geometry, centers, normals centers = qbx.centers(density_discr, 1) nodes_h = nodes.get() centers_h = [centers[0].get(), centers[1].get()] pt.plot(nodes_h[0], nodes_h[1], "x-") pt.plot(centers_h[0], centers_h[1], "o") normal = bind(qbx, sym.normal())(queue).as_vector(np.object) pt.quiver(nodes_h[0], nodes_h[1], normal[0].get(), normal[1].get()) pt.gca().set_aspect("equal") pt.show() angle = cl.clmath.atan2(nodes[1]*ellipse_aspect, nodes[0]) ellipse_fraction = ((1-ellipse_aspect)/(1+ellipse_aspect))**mode_nr # (2.6) in [1] J = cl.clmath.sqrt( # noqa cl.clmath.sin(angle)**2 + (1/ellipse_aspect)**2 * cl.clmath.cos(angle)**2) # {{{ single layer sigma = cl.clmath.cos(mode_nr*angle)/J s_sigma_op = bind(qbx, sym.S(0, sym.var("sigma"))) s_sigma = s_sigma_op(queue=queue, sigma=sigma) # SIGN BINGO! :) s_eigval = 1/(2*mode_nr) * (1 + (-1)**mode_nr * ellipse_fraction) # (2.12) in [1] s_sigma_ref = s_eigval*J*sigma if 0: #pt.plot(s_sigma.get(), label="result") #pt.plot(s_sigma_ref.get(), label="ref") pt.plot((s_sigma_ref-s_sigma).get(), label="err") pt.legend() pt.show() s_err = ( norm(density_discr, queue, s_sigma - s_sigma_ref) / norm(density_discr, queue, s_sigma_ref)) s_eoc_rec.add_data_point(1/nelements, s_err) # }}} # {{{ double layer sigma = cl.clmath.cos(mode_nr*angle) d_sigma_op = bind(qbx, sym.D(0, sym.var("sigma"))) d_sigma = d_sigma_op(queue=queue, sigma=sigma) # SIGN BINGO! :) d_eigval = -(-1)**mode_nr * 1/2*ellipse_fraction d_sigma_ref = d_eigval*sigma if 0: pt.plot(d_sigma.get(), label="result") pt.plot(d_sigma_ref.get(), label="ref") pt.legend() pt.show() if ellipse_aspect == 1: d_ref_norm = norm(density_discr, queue, sigma) else: d_ref_norm = norm(density_discr, queue, d_sigma_ref) d_err = ( norm(density_discr, queue, d_sigma - d_sigma_ref) / d_ref_norm) d_eoc_rec.add_data_point(1/nelements, d_err) # }}} if ellipse_aspect == 1: # {{{ S' sigma = cl.clmath.cos(mode_nr*angle) sp_sigma_op = bind(qbx, sym.Sp(0, sym.var("sigma"))) sp_sigma = sp_sigma_op(queue=queue, sigma=sigma) sp_eigval = 0 sp_sigma_ref = sp_eigval*sigma sp_err = ( norm(density_discr, queue, sp_sigma - sp_sigma_ref) / norm(density_discr, queue, sigma)) sp_eoc_rec.add_data_point(1/nelements, sp_err) # }}} print("Errors for S:") print(s_eoc_rec) required_order = qbx_order + 1 assert s_eoc_rec.order_estimate() > required_order - 1.5 print("Errors for D:") print(d_eoc_rec) required_order = qbx_order assert d_eoc_rec.order_estimate() > required_order - 1.5 if ellipse_aspect == 1: print("Errors for S':") print(sp_eoc_rec) required_order = qbx_order assert sp_eoc_rec.order_estimate() > required_order - 1.5
def test_identities(ctx_getter, zero_op_name, curve_name, curve_f, qbx_order, k): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) # prevent cache 'splosion from sympy.core.cache import clear_cache clear_cache() target_order = 7 u_sym = sym.var("u") grad_u_sym = sym.VectorVariable("grad_u") dn_u_sym = sym.var("dn_u") if k == 0: k_sym = 0 else: k_sym = "k" zero_op_table = { "green": sym.S(k_sym, dn_u_sym) - sym.D(k_sym, u_sym) - 0.5*u_sym, "green_grad": d1.nabla * d1(sym.S(k_sym, dn_u_sym)) - d2.nabla * d2(sym.D(k_sym, u_sym)) - 0.5*grad_u_sym, # only for k==0: "zero_calderon": -sym.Dp(0, sym.S(0, u_sym)) - 0.25*u_sym + sym.Sp(0, sym.Sp(0, u_sym)) } order_table = { "green": qbx_order, "green_grad": qbx_order-1, "zero_calderon": qbx_order-1, } zero_op = zero_op_table[zero_op_name] from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for nelements in [30, 50, 70]: mesh = make_curve_mesh(curve_f, np.linspace(0, 1, nelements+1), target_order) from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from pytential.qbx import QBXLayerPotentialSource density_discr = Discretization( cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) qbx = QBXLayerPotentialSource(density_discr, 4*target_order, qbx_order, # Don't use FMM for now fmm_order=False) # {{{ compute values of a solution to the PDE nodes_host = density_discr.nodes().get(queue) normal = bind(density_discr, sym.normal())(queue).as_vector(np.object) normal_host = [normal[0].get(), normal[1].get()] if k != 0: angle = 0.3 wave_vec = np.array([np.cos(angle), np.sin(angle)]) u = np.exp(1j*k*np.tensordot(wave_vec, nodes_host, axes=1)) grad_u = 1j*k*wave_vec[:, np.newaxis]*u else: center = np.array([3, 1]) diff = nodes_host - center[:, np.newaxis] dist_squared = np.sum(diff**2, axis=0) dist = np.sqrt(dist_squared) u = np.log(dist) grad_u = diff/dist_squared dn_u = normal_host[0]*grad_u[0] + normal_host[1]*grad_u[1] # }}} u_dev = cl.array.to_device(queue, u) dn_u_dev = cl.array.to_device(queue, dn_u) grad_u_dev = cl.array.to_device(queue, grad_u) key = (qbx_order, curve_name, nelements, zero_op_name) bound_op = bind(qbx, zero_op) error = bound_op( queue, u=u_dev, dn_u=dn_u_dev, grad_u=grad_u_dev, k=k) if 0: pt.plot(error) pt.show() l2_error_norm = norm(density_discr, queue, error) print(key, l2_error_norm) eoc_rec.add_data_point(1/nelements, l2_error_norm) print(eoc_rec) tgt_order = order_table[zero_op_name] assert eoc_rec.order_estimate() > tgt_order - 1.3
def test_sphere_eigenvalues(ctx_factory, mode_m, mode_n, qbx_order, fmm_backend): logging.basicConfig(level=logging.INFO) special = pytest.importorskip("scipy.special") cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) target_order = 8 from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from pytential.qbx import QBXLayerPotentialSource from pytools.convergence import EOCRecorder s_eoc_rec = EOCRecorder() d_eoc_rec = EOCRecorder() sp_eoc_rec = EOCRecorder() dp_eoc_rec = EOCRecorder() def rel_err(comp, ref): return (norm(density_discr, comp - ref) / norm(density_discr, ref)) for nrefinements in [0, 1]: from meshmode.mesh.generation import generate_icosphere mesh = generate_icosphere(1, target_order) from meshmode.mesh.refinement import Refiner refiner = Refiner(mesh) for i in range(nrefinements): flags = np.ones(mesh.nelements, dtype=bool) refiner.refine(flags) mesh = refiner.get_current_mesh() pre_density_discr = Discretization( actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) qbx = QBXLayerPotentialSource( pre_density_discr, 4 * target_order, qbx_order, fmm_order=6, fmm_backend=fmm_backend, ) places = GeometryCollection(qbx) from meshmode.dof_array import flatten, unflatten, thaw density_discr = places.get_discretization(places.auto_source.geometry) nodes = thaw(actx, density_discr.nodes()) r = actx.np.sqrt(nodes[0] * nodes[0] + nodes[1] * nodes[1] + nodes[2] * nodes[2]) phi = actx.np.arccos(nodes[2] / r) theta = actx.np.arctan2(nodes[0], nodes[1]) ymn = unflatten( actx, density_discr, actx.from_numpy( special.sph_harm(mode_m, mode_n, actx.to_numpy(flatten(theta)), actx.to_numpy(flatten(phi))))) from sumpy.kernel import LaplaceKernel lap_knl = LaplaceKernel(3) # {{{ single layer s_sigma_op = bind( places, sym.S(lap_knl, sym.var("sigma"), qbx_forced_limit=+1)) s_sigma = s_sigma_op(actx, sigma=ymn) s_eigval = 1 / (2 * mode_n + 1) h_max = bind(places, sym.h_max(qbx.ambient_dim))(actx) s_eoc_rec.add_data_point(h_max, rel_err(s_sigma, s_eigval * ymn)) # }}} # {{{ double layer d_sigma_op = bind( places, sym.D(lap_knl, sym.var("sigma"), qbx_forced_limit="avg")) d_sigma = d_sigma_op(actx, sigma=ymn) d_eigval = -1 / (2 * (2 * mode_n + 1)) d_eoc_rec.add_data_point(h_max, rel_err(d_sigma, d_eigval * ymn)) # }}} # {{{ S' sp_sigma_op = bind( places, sym.Sp(lap_knl, sym.var("sigma"), qbx_forced_limit="avg")) sp_sigma = sp_sigma_op(actx, sigma=ymn) sp_eigval = -1 / (2 * (2 * mode_n + 1)) sp_eoc_rec.add_data_point(h_max, rel_err(sp_sigma, sp_eigval * ymn)) # }}} # {{{ D' dp_sigma_op = bind( places, sym.Dp(lap_knl, sym.var("sigma"), qbx_forced_limit="avg")) dp_sigma = dp_sigma_op(actx, sigma=ymn) dp_eigval = -(mode_n * (mode_n + 1)) / (2 * mode_n + 1) dp_eoc_rec.add_data_point(h_max, rel_err(dp_sigma, dp_eigval * ymn)) # }}} print("Errors for S:") print(s_eoc_rec) required_order = qbx_order + 1 assert s_eoc_rec.order_estimate() > required_order - 1.5 print("Errors for D:") print(d_eoc_rec) required_order = qbx_order assert d_eoc_rec.order_estimate() > required_order - 0.5 print("Errors for S':") print(sp_eoc_rec) required_order = qbx_order assert sp_eoc_rec.order_estimate() > required_order - 1.5 print("Errors for D':") print(dp_eoc_rec) required_order = qbx_order assert dp_eoc_rec.order_estimate() > required_order - 1.5
def test_uniform_rhs(actx_factory, dim, order): """Tests the inviscid rhs using a trivial constant/uniform state which should yield rhs = 0 to FP. The test is performed for 1, 2, and 3 dimensions. """ actx = actx_factory() tolerance = 1e-9 maxxerr = 0.0 from pytools.convergence import EOCRecorder eoc_rec0 = EOCRecorder() eoc_rec1 = EOCRecorder() # for nel_1d in [4, 8, 12]: for nel_1d in [4, 8]: from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim, b=(0.5, ) * dim, n=(nel_1d, ) * dim) logger.info(f"Number of {dim}d elements: {mesh.nelements}") discr = EagerDGDiscretization(actx, mesh, order=order) mass_input = discr.zeros(actx) + 1 energy_input = discr.zeros(actx) + 2.5 mom_input = make_obj_array( [discr.zeros(actx) for i in range(discr.dim)]) fields = join_conserved(dim, mass=mass_input, energy=energy_input, momentum=mom_input) expected_rhs = make_obj_array( [discr.zeros(actx) for i in range(len(fields))]) boundaries = {BTAG_ALL: DummyBoundary()} inviscid_rhs = inviscid_operator(discr, eos=IdealSingleGas(), boundaries=boundaries, q=fields, t=0.0) rhs_resid = inviscid_rhs - expected_rhs resid_split = split_conserved(dim, rhs_resid) rho_resid = resid_split.mass rhoe_resid = resid_split.energy mom_resid = resid_split.momentum rhs_split = split_conserved(dim, inviscid_rhs) rho_rhs = rhs_split.mass rhoe_rhs = rhs_split.energy rhov_rhs = rhs_split.momentum message = (f"rho_rhs = {rho_rhs}\n" f"rhoe_rhs = {rhoe_rhs}\n" f"rhov_rhs = {rhov_rhs}") logger.info(message) assert discr.norm(rho_resid, np.inf) < tolerance assert discr.norm(rhoe_resid, np.inf) < tolerance for i in range(dim): assert discr.norm(mom_resid[i], np.inf) < tolerance err_max = discr.norm(rhs_resid[i], np.inf) eoc_rec0.add_data_point(1.0 / nel_1d, err_max) assert (err_max < tolerance) if err_max > maxxerr: maxxerr = err_max # set a non-zero, but uniform velocity component for i in range(len(mom_input)): mom_input[i] = discr.zeros(actx) + (-1.0)**i boundaries = {BTAG_ALL: DummyBoundary()} inviscid_rhs = inviscid_operator(discr, eos=IdealSingleGas(), boundaries=boundaries, q=fields, t=0.0) rhs_resid = inviscid_rhs - expected_rhs resid_split = split_conserved(dim, rhs_resid) rho_resid = resid_split.mass rhoe_resid = resid_split.energy mom_resid = resid_split.momentum assert discr.norm(rho_resid, np.inf) < tolerance assert discr.norm(rhoe_resid, np.inf) < tolerance for i in range(dim): assert discr.norm(mom_resid[i], np.inf) < tolerance err_max = discr.norm(rhs_resid[i], np.inf) eoc_rec1.add_data_point(1.0 / nel_1d, err_max) assert (err_max < tolerance) if err_max > maxxerr: maxxerr = err_max message = (f"V == 0 Errors:\n{eoc_rec0}" f"V != 0 Errors:\n{eoc_rec1}") print(message) assert (eoc_rec0.order_estimate() >= order - 0.5 or eoc_rec0.max_error() < 1e-9) assert (eoc_rec1.order_estimate() >= order - 0.5 or eoc_rec1.max_error() < 1e-9)
def test_ellipse_eigenvalues(ctx_factory, ellipse_aspect, mode_nr, qbx_order, force_direct, visualize=False): logging.basicConfig(level=logging.INFO) print("ellipse_aspect: %s, mode_nr: %d, qbx_order: %d" % (ellipse_aspect, mode_nr, qbx_order)) cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) target_order = 8 from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from pytential.qbx import QBXLayerPotentialSource from pytools.convergence import EOCRecorder s_eoc_rec = EOCRecorder() d_eoc_rec = EOCRecorder() sp_eoc_rec = EOCRecorder() if ellipse_aspect != 1: nelements_values = [60, 100, 150, 200] else: nelements_values = [30, 70] # See # # [1] G. J. Rodin and O. Steinbach, "Boundary Element Preconditioners # for Problems Defined on Slender Domains", SIAM Journal on Scientific # Computing, Vol. 24, No. 4, pg. 1450, 2003. # https://dx.doi.org/10.1137/S1064827500372067 for nelements in nelements_values: mesh = make_curve_mesh(partial(ellipse, ellipse_aspect), np.linspace(0, 1, nelements + 1), target_order) fmm_order = 12 if force_direct: fmm_order = False pre_density_discr = Discretization( actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) qbx = QBXLayerPotentialSource( pre_density_discr, 4 * target_order, qbx_order, fmm_order=fmm_order, _expansions_in_tree_have_extent=True, ) places = GeometryCollection(qbx) density_discr = places.get_discretization(places.auto_source.geometry) from meshmode.dof_array import thaw, flatten nodes = thaw(actx, density_discr.nodes()) if visualize: # plot geometry, centers, normals centers = bind(places, sym.expansion_centers(qbx.ambient_dim, +1))(actx) normals = bind(places, sym.normal(qbx.ambient_dim))(actx).as_vector(object) nodes_h = np.array( [actx.to_numpy(axis) for axis in flatten(nodes)]) centers_h = np.array( [actx.to_numpy(axis) for axis in flatten(centers)]) normals_h = np.array( [actx.to_numpy(axis) for axis in flatten(normals)]) pt.plot(nodes_h[0], nodes_h[1], "x-") pt.plot(centers_h[0], centers_h[1], "o") pt.quiver(nodes_h[0], nodes_h[1], normals_h[0], normals_h[1]) pt.gca().set_aspect("equal") pt.show() angle = actx.np.arctan2(nodes[1] * ellipse_aspect, nodes[0]) ellipse_fraction = ((1 - ellipse_aspect) / (1 + ellipse_aspect))**mode_nr # (2.6) in [1] J = actx.np.sqrt( # noqa actx.np.sin(angle)**2 + (1 / ellipse_aspect)**2 * actx.np.cos(angle)**2) from sumpy.kernel import LaplaceKernel lap_knl = LaplaceKernel(2) # {{{ single layer sigma_sym = sym.var("sigma") s_sigma_op = sym.S(lap_knl, sigma_sym, qbx_forced_limit=+1) sigma = actx.np.cos(mode_nr * angle) / J s_sigma = bind(places, s_sigma_op)(actx, sigma=sigma) # SIGN BINGO! :) s_eigval = 1 / (2 * mode_nr) * (1 + (-1)**mode_nr * ellipse_fraction) # (2.12) in [1] s_sigma_ref = s_eigval * J * sigma if 0: #pt.plot(s_sigma.get(), label="result") #pt.plot(s_sigma_ref.get(), label="ref") pt.plot(actx.to_numpy(flatten(s_sigma_ref - s_sigma)), label="err") pt.legend() pt.show() h_max = bind(places, sym.h_max(qbx.ambient_dim))(actx) s_err = (norm(density_discr, s_sigma - s_sigma_ref) / norm(density_discr, s_sigma_ref)) s_eoc_rec.add_data_point(h_max, s_err) # }}} # {{{ double layer d_sigma_op = sym.D(lap_knl, sigma_sym, qbx_forced_limit="avg") sigma = actx.np.cos(mode_nr * angle) d_sigma = bind(places, d_sigma_op)(actx, sigma=sigma) # SIGN BINGO! :) d_eigval = -(-1)**mode_nr * 1 / 2 * ellipse_fraction d_sigma_ref = d_eigval * sigma if 0: pt.plot(actx.to_numpy(flatten(d_sigma)), label="result") pt.plot(actx.to_numpy(flatten(d_sigma_ref)), label="ref") pt.legend() pt.show() if ellipse_aspect == 1: d_ref_norm = norm(density_discr, sigma) else: d_ref_norm = norm(density_discr, d_sigma_ref) d_err = (norm(density_discr, d_sigma - d_sigma_ref) / d_ref_norm) d_eoc_rec.add_data_point(h_max, d_err) # }}} if ellipse_aspect == 1: # {{{ S' sp_sigma_op = sym.Sp(lap_knl, sym.var("sigma"), qbx_forced_limit="avg") sigma = actx.np.cos(mode_nr * angle) sp_sigma = bind(places, sp_sigma_op)(actx, sigma=sigma) sp_eigval = 0 sp_sigma_ref = sp_eigval * sigma sp_err = (norm(density_discr, sp_sigma - sp_sigma_ref) / norm(density_discr, sigma)) sp_eoc_rec.add_data_point(h_max, sp_err) # }}} print("Errors for S:") print(s_eoc_rec) required_order = qbx_order + 1 assert s_eoc_rec.order_estimate() > required_order - 1.5 print("Errors for D:") print(d_eoc_rec) required_order = qbx_order assert d_eoc_rec.order_estimate() > required_order - 1.5 if ellipse_aspect == 1: print("Errors for S':") print(sp_eoc_rec) required_order = qbx_order assert sp_eoc_rec.order_estimate() > required_order - 1.5
def test_convergence_maxwell(ctx_factory, order): """Test whether 3D Maxwell's actually converges""" cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() dims = 3 ns = [4, 6, 8] for n in ns: from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(0.0, ) * dims, b=(1.0, ) * dims, n=(n, ) * dims) discr = DGDiscretizationWithBoundaries(actx, mesh, order=order) epsilon = 1 mu = 1 from grudge.models.em import get_rectangular_cavity_mode sym_mode = get_rectangular_cavity_mode(1, (1, 2, 2)) analytic_sol = bind(discr, sym_mode) fields = analytic_sol(actx, t=0, epsilon=epsilon, mu=mu) from grudge.models.em import MaxwellOperator op = MaxwellOperator(epsilon, mu, flux_type=0.5, dimensions=dims) op.check_bc_coverage(mesh) bound_op = bind(discr, op.sym_operator()) def rhs(t, w): return bound_op(t=t, w=w) dt = 0.002 final_t = dt * 5 nsteps = int(final_t / dt) from grudge.shortcuts import set_up_rk4 dt_stepper = set_up_rk4("w", dt, fields, rhs) logger.info("dt %.5e nsteps %5d", dt, nsteps) norm = bind(discr, sym.norm(2, sym.var("u"))) step = 0 for event in dt_stepper.run(t_end=final_t): if isinstance(event, dt_stepper.StateComputed): assert event.component_id == "w" esc = event.state_component step += 1 logger.debug("[%04d] t = %.5e", step, event.t) sol = analytic_sol(actx, mu=mu, epsilon=epsilon, t=step * dt) vals = [norm(u=(esc[i] - sol[i])) / norm(u=sol[i]) for i in range(5)] # noqa E501 total_error = sum(vals) eoc_rec.add_data_point(1.0 / n, total_error) logger.info( "\n%s", eoc_rec.pretty_print(abscissa_label="h", error_label="L2 Error")) assert eoc_rec.order_estimate() > order
def test_convergence_advec(ctx_factory, mesh_name, mesh_pars, op_type, flux_type, order, visualize=False): """Test whether 2D advection actually converges""" cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for mesh_par in mesh_pars: if mesh_name == "segment": from meshmode.mesh.generation import generate_box_mesh mesh = generate_box_mesh([np.linspace(-1.0, 1.0, mesh_par)], order=order) dim = 1 dt_factor = 1.0 elif mesh_name == "disk": pytest.importorskip("meshpy") from meshpy.geometry import make_circle, GeometryBuilder from meshpy.triangle import MeshInfo, build geob = GeometryBuilder() geob.add_geometry(*make_circle(1)) mesh_info = MeshInfo() geob.set(mesh_info) mesh_info = build(mesh_info, max_volume=mesh_par) from meshmode.mesh.io import from_meshpy mesh = from_meshpy(mesh_info, order=1) dim = 2 dt_factor = 4 elif mesh_name.startswith("rect"): dim = int(mesh_name[4:]) from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim, b=(0.5, ) * dim, n=(mesh_par, ) * dim, order=4) if dim == 2: dt_factor = 4 elif dim == 3: dt_factor = 2 else: raise ValueError("dt_factor not known for %dd" % dim) else: raise ValueError("invalid mesh name: " + mesh_name) v = np.array([0.27, 0.31, 0.1])[:dim] norm_v = la.norm(v) def f(x): return sym.sin(10 * x) def u_analytic(x): return f(-v.dot(x) / norm_v + sym.var("t", sym.DD_SCALAR) * norm_v) from grudge.models.advection import (StrongAdvectionOperator, WeakAdvectionOperator) discr = DGDiscretizationWithBoundaries(actx, mesh, order=order) op_class = { "strong": StrongAdvectionOperator, "weak": WeakAdvectionOperator, }[op_type] op = op_class(v, inflow_u=u_analytic(sym.nodes(dim, sym.BTAG_ALL)), flux_type=flux_type) bound_op = bind(discr, op.sym_operator()) u = bind(discr, u_analytic(sym.nodes(dim)))(actx, t=0) def rhs(t, u): return bound_op(t=t, u=u) if dim == 3: final_time = 0.1 else: final_time = 0.2 h_max = bind(discr, sym.h_max_from_volume(discr.ambient_dim))(actx) dt = dt_factor * h_max / order**2 nsteps = (final_time // dt) + 1 dt = final_time / nsteps + 1e-15 from grudge.shortcuts import set_up_rk4 dt_stepper = set_up_rk4("u", dt, u, rhs) last_u = None from grudge.shortcuts import make_visualizer vis = make_visualizer(discr, vis_order=order) step = 0 for event in dt_stepper.run(t_end=final_time): if isinstance(event, dt_stepper.StateComputed): step += 1 logger.debug("[%04d] t = %.5f", step, event.t) last_t = event.t last_u = event.state_component if visualize: vis.write_vtk_file("fld-%s-%04d.vtu" % (mesh_par, step), [("u", event.state_component)]) error_l2 = bind(discr, sym.norm(2, sym.var("u") - u_analytic(sym.nodes(dim))))( t=last_t, u=last_u) logger.info("h_max %.5e error %.5e", h_max, error_l2) eoc_rec.add_data_point(h_max, error_l2) logger.info( "\n%s", eoc_rec.pretty_print(abscissa_label="h", error_label="L2 Error")) assert eoc_rec.order_estimate() > order
def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars, per_face_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( make_face_restriction, make_face_to_all_faces_embedding, check_connection) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() order = 4 def f(x): return 0.1*cl.clmath.sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh if mesh_name == "blob": assert dim == 2 h = mesh_par from meshmode.mesh.io import generate_gmsh, FileSource print("BEGIN GEN") mesh = generate_gmsh( FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ "-string", "Mesh.CharacteristicLengthMax = %s;" % h] ) print("END GEN") elif mesh_name == "warp": from meshmode.mesh.generation import generate_warped_rect_mesh mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par) h = 1/mesh_par else: raise ValueError("mesh_name not recognized") # }}} vol_discr = Discretization(cl_ctx, mesh, PolynomialWarpAndBlendGroupFactory(order)) print("h=%s -> %d elements" % ( h, sum(mgrp.nelements for mgrp in mesh.groups))) all_face_bdry_connection = make_face_restriction( vol_discr, PolynomialWarpAndBlendGroupFactory(order), FRESTR_ALL_FACES, per_face_groups=per_face_groups) all_face_bdry_discr = all_face_bdry_connection.to_discr for ito_grp, ceg in enumerate(all_face_bdry_connection.groups): for ibatch, batch in enumerate(ceg.batches): assert np.array_equal( batch.from_element_indices.get(queue), np.arange(vol_discr.mesh.nelements)) if per_face_groups: assert ito_grp == batch.to_element_face else: assert ibatch == batch.to_element_face all_face_x = all_face_bdry_discr.nodes()[0].with_queue(queue) all_face_f = f(all_face_x) all_face_f_2 = all_face_bdry_discr.zeros(queue) for boundary_tag in [ BTAG_ALL, FRESTR_INTERIOR_FACES, ]: bdry_connection = make_face_restriction( vol_discr, PolynomialWarpAndBlendGroupFactory(order), boundary_tag, per_face_groups=per_face_groups) bdry_discr = bdry_connection.to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue) bdry_f = f(bdry_x) all_face_embedding = make_face_to_all_faces_embedding( bdry_connection, all_face_bdry_discr) check_connection(all_face_embedding) all_face_f_2 += all_face_embedding(queue, bdry_f) err = la.norm((all_face_f-all_face_f_2).get(), np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) assert ( eoc_rec.order_estimate() >= order-0.5 or eoc_rec.max_error() < 1e-14)
eoc_rec_target.add_data_point(result.h_max, result.rel_err_2) if result.rel_td_err_inf is not None: eoc_rec_td.add_data_point(result.h_max, result.rel_td_err_inf) if case.bc_type == "dirichlet": tgt_order = case.qbx_order elif case.bc_type == "neumann": tgt_order = case.qbx_order - 1 else: assert False if have_error_data: print("TARGET ERROR:") print(eoc_rec_target) assert eoc_rec_target.order_estimate() > tgt_order - 1.3 if case.check_tangential_deriv: print("TANGENTIAL DERIVATIVE ERROR:") print(eoc_rec_td) assert eoc_rec_td.order_estimate() > tgt_order - 2.3 # }}} # You can test individual routines by typing # $ python test_scalar_int_eq.py 'test_routine()' if __name__ == "__main__": import sys if len(sys.argv) > 1:
def test_opposite_face_interpolation(ctx_getter, group_factory, mesh_name, dim, mesh_pars): logging.basicConfig(level=logging.INFO) cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( make_face_restriction, make_opposite_face_connection, check_connection) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() order = 5 def f(x): return 0.1*cl.clmath.sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh if mesh_name == "blob": assert dim == 2 h = mesh_par from meshmode.mesh.io import generate_gmsh, FileSource print("BEGIN GEN") mesh = generate_gmsh( FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ "-string", "Mesh.CharacteristicLengthMax = %s;" % h] ) print("END GEN") elif mesh_name == "warp": from meshmode.mesh.generation import generate_warped_rect_mesh mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par) h = 1/mesh_par else: raise ValueError("mesh_name not recognized") # }}} vol_discr = Discretization(cl_ctx, mesh, group_factory(order)) print("h=%s -> %d elements" % ( h, sum(mgrp.nelements for mgrp in mesh.groups))) bdry_connection = make_face_restriction( vol_discr, group_factory(order), FRESTR_INTERIOR_FACES) bdry_discr = bdry_connection.to_discr opp_face = make_opposite_face_connection(bdry_connection) check_connection(opp_face) bdry_x = bdry_discr.nodes()[0].with_queue(queue) bdry_f = f(bdry_x) bdry_f_2 = opp_face(queue, bdry_f) err = la.norm((bdry_f-bdry_f_2).get(), np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) assert ( eoc_rec.order_estimate() >= order-0.5 or eoc_rec.max_error() < 1e-13)
def test_boundary_interpolation(ctx_getter, group_factory, boundary_tag, mesh_name, dim, mesh_pars, per_face_groups): cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) from meshmode.discretization import Discretization from meshmode.discretization.connection import ( make_face_restriction, check_connection) from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() order = 4 def f(x): return 0.1*cl.clmath.sin(30*x) for mesh_par in mesh_pars: # {{{ get mesh if mesh_name == "blob": assert dim == 2 h = mesh_par from meshmode.mesh.io import generate_gmsh, FileSource print("BEGIN GEN") mesh = generate_gmsh( FileSource("blob-2d.step"), 2, order=order, force_ambient_dim=2, other_options=[ "-string", "Mesh.CharacteristicLengthMax = %s;" % h] ) print("END GEN") elif mesh_name == "warp": from meshmode.mesh.generation import generate_warped_rect_mesh mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par) h = 1/mesh_par else: raise ValueError("mesh_name not recognized") # }}} vol_discr = Discretization(cl_ctx, mesh, group_factory(order)) print("h=%s -> %d elements" % ( h, sum(mgrp.nelements for mgrp in mesh.groups))) x = vol_discr.nodes()[0].with_queue(queue) vol_f = f(x) bdry_connection = make_face_restriction( vol_discr, group_factory(order), boundary_tag, per_face_groups=per_face_groups) check_connection(bdry_connection) bdry_discr = bdry_connection.to_discr bdry_x = bdry_discr.nodes()[0].with_queue(queue) bdry_f = f(bdry_x) bdry_f_2 = bdry_connection(queue, vol_f) if mesh_name == "blob" and dim == 2: mat = bdry_connection.full_resample_matrix(queue).get(queue) bdry_f_2_by_mat = mat.dot(vol_f.get()) mat_error = la.norm(bdry_f_2.get(queue=queue) - bdry_f_2_by_mat) assert mat_error < 1e-14, mat_error err = la.norm((bdry_f-bdry_f_2).get(), np.inf) eoc_rec.add_data_point(h, err) print(eoc_rec) assert ( eoc_rec.order_estimate() >= order-0.5 or eoc_rec.max_error() < 1e-14)
def test_identity_convergence(ctx_getter, case, visualize=False): logging.basicConfig(level=logging.INFO) case.check() cl_ctx = ctx_getter() queue = cl.CommandQueue(cl_ctx) # prevent cache 'splosion from sympy.core.cache import clear_cache clear_cache() target_order = 8 from pytools.convergence import EOCRecorder eoc_rec = EOCRecorder() for resolution in ( getattr(case, "resolutions", None) or case.geometry.resolutions ): mesh = case.geometry.get_mesh(resolution, target_order) if mesh is None: break d = mesh.ambient_dim k = case.k lap_k_sym = LaplaceKernel(d) if k == 0: k_sym = lap_k_sym knl_kwargs = {} else: k_sym = HelmholtzKernel(d) knl_kwargs = {"k": sym.var("k")} from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from pytential.qbx import QBXLayerPotentialSource pre_density_discr = Discretization( cl_ctx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) refiner_extra_kwargs = {} if case.k != 0: refiner_extra_kwargs["kernel_length_scale"] = 5/case.k qbx, _ = QBXLayerPotentialSource( pre_density_discr, 4*target_order, case.qbx_order, fmm_order=case.fmm_order, fmm_backend=case.fmm_backend, _expansions_in_tree_have_extent=True, _expansion_stick_out_factor=getattr( case, "_expansion_stick_out_factor", 0), ).with_refinement(**refiner_extra_kwargs) density_discr = qbx.density_discr # {{{ compute values of a solution to the PDE nodes_host = density_discr.nodes().get(queue) normal = bind(density_discr, sym.normal(d))(queue).as_vector(np.object) normal_host = [normal[j].get() for j in range(d)] if k != 0: if d == 2: angle = 0.3 wave_vec = np.array([np.cos(angle), np.sin(angle)]) u = np.exp(1j*k*np.tensordot(wave_vec, nodes_host, axes=1)) grad_u = 1j*k*wave_vec[:, np.newaxis]*u elif d == 3: center = np.array([3, 1, 2]) diff = nodes_host - center[:, np.newaxis] r = la.norm(diff, axis=0) u = np.exp(1j*k*r) / r grad_u = diff * (1j*k*u/r - u/r**2) else: raise ValueError("invalid dim") else: center = np.array([3, 1, 2])[:d] diff = nodes_host - center[:, np.newaxis] dist_squared = np.sum(diff**2, axis=0) dist = np.sqrt(dist_squared) if d == 2: u = np.log(dist) grad_u = diff/dist_squared elif d == 3: u = 1/dist grad_u = -diff/dist**3 else: assert False dn_u = 0 for i in range(d): dn_u = dn_u + normal_host[i]*grad_u[i] # }}} u_dev = cl.array.to_device(queue, u) dn_u_dev = cl.array.to_device(queue, dn_u) grad_u_dev = cl.array.to_device(queue, grad_u) key = (case.qbx_order, case.geometry.mesh_name, resolution, case.expr.zero_op_name) bound_op = bind(qbx, case.expr.get_zero_op(k_sym, **knl_kwargs)) error = bound_op( queue, u=u_dev, dn_u=dn_u_dev, grad_u=grad_u_dev, k=case.k) if 0: pt.plot(error) pt.show() linf_error_norm = norm(density_discr, queue, error, p=np.inf) print("--->", key, linf_error_norm) eoc_rec.add_data_point(qbx.h_max, linf_error_norm) if visualize: from meshmode.discretization.visualization import make_visualizer bdry_vis = make_visualizer(queue, density_discr, target_order) bdry_normals = bind(density_discr, sym.normal(mesh.ambient_dim))(queue)\ .as_vector(dtype=object) bdry_vis.write_vtk_file("source-%s.vtu" % resolution, [ ("u", u_dev), ("bdry_normals", bdry_normals), ("error", error), ]) print(eoc_rec) tgt_order = case.qbx_order - case.expr.order_drop assert eoc_rec.order_estimate() > tgt_order - 1.6