def test_target_specific_qbx(actx_factory, op, helmholtz_k, qbx_order): logging.basicConfig(level=logging.INFO) actx = actx_factory() target_order = 4 fmm_tol = 1e-3 from meshmode.mesh.generation import generate_sphere mesh = generate_sphere(1, target_order) from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from pytential.qbx import QBXLayerPotentialSource pre_density_discr = Discretization( actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder qbx = QBXLayerPotentialSource( pre_density_discr, 4 * target_order, qbx_order=qbx_order, fmm_level_to_order=SimpleExpansionOrderFinder(fmm_tol), fmm_backend="fmmlib", _expansions_in_tree_have_extent=True, _expansion_stick_out_factor=0.9, _use_target_specific_qbx=False, ) kernel_length_scale = 5 / abs(helmholtz_k) if helmholtz_k else None places = { "qbx": qbx, "qbx_target_specific": qbx.copy(_use_target_specific_qbx=True) } from pytential.qbx.refinement import refine_geometry_collection places = GeometryCollection(places, auto_where="qbx") places = refine_geometry_collection( places, kernel_length_scale=kernel_length_scale) density_discr = places.get_discretization("qbx") nodes = thaw(density_discr.nodes(), actx) u_dev = actx.np.sin(nodes[0]) if helmholtz_k == 0: kernel = LaplaceKernel(3) kernel_kwargs = {} else: kernel = HelmholtzKernel(3, allow_evanescent=True) kernel_kwargs = {"k": sym.var("k")} u_sym = sym.var("u") if op == "S": op = sym.S elif op == "D": op = sym.D elif op == "Sp": op = sym.Sp else: raise ValueError("unknown operator: '%s'" % op) expr = op(kernel, u_sym, qbx_forced_limit=-1, **kernel_kwargs) bound_op = bind(places, expr) pot_ref = actx.to_numpy( flatten(bound_op(actx, u=u_dev, k=helmholtz_k), actx)) bound_op = bind(places, expr, auto_where="qbx_target_specific") pot_tsqbx = actx.to_numpy( flatten(bound_op(actx, u=u_dev, k=helmholtz_k), actx)) assert np.allclose(pot_tsqbx, pot_ref, atol=1e-13, rtol=1e-13)
def run_source_refinement_test(ctx_factory, mesh, order, helmholtz_k=None, visualize=False): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) # {{{ initial geometry from meshmode.discretization import Discretization from meshmode.discretization.poly_element import ( InterpolatoryQuadratureSimplexGroupFactory) discr = Discretization(actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(order)) lpot_source = QBXLayerPotentialSource( discr, qbx_order=order, # not used in refinement fine_order=order) places = GeometryCollection(lpot_source) # }}} # {{{ refined geometry kernel_length_scale = 5 / helmholtz_k if helmholtz_k else None expansion_disturbance_tolerance = 0.025 from pytential.qbx.refinement import refine_geometry_collection places = refine_geometry_collection( places, kernel_length_scale=kernel_length_scale, expansion_disturbance_tolerance=expansion_disturbance_tolerance, visualize=visualize) # }}} dd = places.auto_source stage1_density_discr = places.get_discretization(dd.geometry) from meshmode.dof_array import thaw stage1_density_nodes = dof_array_to_numpy( actx, thaw(actx, stage1_density_discr.nodes())) quad_stage2_density_discr = places.get_discretization( dd.geometry, sym.QBX_SOURCE_QUAD_STAGE2) quad_stage2_density_nodes = dof_array_to_numpy( actx, thaw(actx, quad_stage2_density_discr.nodes())) int_centers = dof_array_to_numpy( actx, bind(places, sym.expansion_centers(lpot_source.ambient_dim, -1))(actx)) ext_centers = dof_array_to_numpy( actx, bind(places, sym.expansion_centers(lpot_source.ambient_dim, +1))(actx)) expansion_radii = dof_array_to_numpy( actx, bind(places, sym.expansion_radii(lpot_source.ambient_dim))(actx)) dd = dd.copy(granularity=sym.GRANULARITY_ELEMENT) source_danger_zone_radii = dof_array_to_numpy( actx, bind( places, sym._source_danger_zone_radii(lpot_source.ambient_dim, dofdesc=dd.to_stage2()))(actx)) quad_res = dof_array_to_numpy( actx, bind(places, sym._quad_resolution(lpot_source.ambient_dim, dofdesc=dd))(actx)) # {{{ check if satisfying criteria def check_disk_undisturbed_by_sources(centers_panel, sources_panel): if centers_panel.element_nr == sources_panel.element_nr: # Same panel return my_int_centers = int_centers[:, centers_panel.discr_slice] my_ext_centers = ext_centers[:, centers_panel.discr_slice] all_centers = np.append(my_int_centers, my_ext_centers, axis=-1) nodes = stage1_density_nodes[:, sources_panel.discr_slice] # =distance(centers of panel 1, panel 2) dist = (la.norm( (all_centers[..., np.newaxis] - nodes[:, np.newaxis, ...]).T, axis=-1).min()) # Criterion: # A center cannot be closer to another panel than to its originating # panel. rad = expansion_radii[centers_panel.discr_slice] assert (dist >= rad * (1-expansion_disturbance_tolerance)).all(), \ (dist, rad, centers_panel.element_nr, sources_panel.element_nr) def check_sufficient_quadrature_resolution(centers_panel, sources_panel): dz_radius = source_danger_zone_radii[sources_panel.element_nr] my_int_centers = int_centers[:, centers_panel.discr_slice] my_ext_centers = ext_centers[:, centers_panel.discr_slice] all_centers = np.append(my_int_centers, my_ext_centers, axis=-1) nodes = quad_stage2_density_nodes[:, sources_panel.discr_slice] # =distance(centers of panel 1, panel 2) dist = (la.norm( (all_centers[..., np.newaxis] - nodes[:, np.newaxis, ...]).T, axis=-1).min()) # Criterion: # The quadrature contribution from each panel is as accurate # as from the center's own source panel. assert dist >= dz_radius, \ (dist, dz_radius, centers_panel.element_nr, sources_panel.element_nr) def check_quad_res_to_helmholtz_k_ratio(panel): # Check wavenumber to panel size ratio. assert quad_res[panel.element_nr] * helmholtz_k <= 5 for i, panel_1 in enumerate(iter_elements(stage1_density_discr)): for panel_2 in iter_elements(stage1_density_discr): check_disk_undisturbed_by_sources(panel_1, panel_2) for panel_2 in iter_elements(quad_stage2_density_discr): check_sufficient_quadrature_resolution(panel_1, panel_2) if helmholtz_k is not None: check_quad_res_to_helmholtz_k_ratio(panel_1)
def test_compare_cl_and_py_cost_model(ctx_factory): nelements = 3600 target_order = 16 fmm_order = 5 qbx_order = fmm_order ctx = ctx_factory() queue = cl.CommandQueue(ctx) actx = PyOpenCLArrayContext(queue) # {{{ Construct geometry from meshmode.mesh.generation import make_curve_mesh, starfish mesh = make_curve_mesh(starfish, np.linspace(0, 1, nelements), target_order) from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory pre_density_discr = Discretization( actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order) ) qbx = QBXLayerPotentialSource( pre_density_discr, 4 * target_order, qbx_order, fmm_order=fmm_order ) places = GeometryCollection(qbx) from pytential.qbx.refinement import refine_geometry_collection places = refine_geometry_collection(places) target_discrs_and_qbx_sides = tuple([(qbx.density_discr, 0)]) geo_data_dev = qbx.qbx_fmm_geometry_data( places, places.auto_source.geometry, target_discrs_and_qbx_sides ) from pytential.qbx.utils import ToHostTransferredGeoDataWrapper geo_data = ToHostTransferredGeoDataWrapper(queue, geo_data_dev) # }}} # {{{ Construct cost models cl_cost_model = QBXCostModel() python_cost_model = _PythonQBXCostModel() tree = geo_data.tree() xlat_cost = make_pde_aware_translation_cost_model( tree.targets.shape[0], tree.nlevels ) constant_one_params = QBXCostModel.get_unit_calibration_params() constant_one_params["p_qbx"] = 5 for ilevel in range(tree.nlevels): constant_one_params["p_fmm_lev%d" % ilevel] = 10 cl_cost_factors = cl_cost_model.qbx_cost_factors_for_kernels_from_model( queue, tree.nlevels, xlat_cost, constant_one_params ) python_cost_factors = python_cost_model.qbx_cost_factors_for_kernels_from_model( None, tree.nlevels, xlat_cost, constant_one_params ) # }}} # {{{ Test process_form_qbxl cl_ndirect_sources_per_target_box = ( cl_cost_model.get_ndirect_sources_per_target_box( queue, geo_data_dev.traversal() ) ) queue.finish() start_time = time.time() cl_p2qbxl = cl_cost_model.process_form_qbxl( queue, geo_data_dev, cl_cost_factors["p2qbxl_cost"], cl_ndirect_sources_per_target_box ) queue.finish() logger.info("OpenCL time for process_form_qbxl: {}".format( str(time.time() - start_time) )) python_ndirect_sources_per_target_box = ( python_cost_model.get_ndirect_sources_per_target_box( queue, geo_data.traversal() ) ) start_time = time.time() python_p2qbxl = python_cost_model.process_form_qbxl( queue, geo_data, python_cost_factors["p2qbxl_cost"], python_ndirect_sources_per_target_box ) logger.info("Python time for process_form_qbxl: {}".format( str(time.time() - start_time) )) assert np.array_equal(cl_p2qbxl.get(), python_p2qbxl) # }}} # {{{ Test process_m2qbxl queue.finish() start_time = time.time() cl_m2qbxl = cl_cost_model.process_m2qbxl( queue, geo_data_dev, cl_cost_factors["m2qbxl_cost"] ) queue.finish() logger.info("OpenCL time for process_m2qbxl: {}".format( str(time.time() - start_time) )) start_time = time.time() python_m2qbxl = python_cost_model.process_m2qbxl( queue, geo_data, python_cost_factors["m2qbxl_cost"] ) logger.info("Python time for process_m2qbxl: {}".format( str(time.time() - start_time) )) assert np.array_equal(cl_m2qbxl.get(), python_m2qbxl) # }}} # {{{ Test process_l2qbxl queue.finish() start_time = time.time() cl_l2qbxl = cl_cost_model.process_l2qbxl( queue, geo_data_dev, cl_cost_factors["l2qbxl_cost"] ) queue.finish() logger.info("OpenCL time for process_l2qbxl: {}".format( str(time.time() - start_time) )) start_time = time.time() python_l2qbxl = python_cost_model.process_l2qbxl( queue, geo_data, python_cost_factors["l2qbxl_cost"] ) logger.info("Python time for process_l2qbxl: {}".format( str(time.time() - start_time) )) assert np.array_equal(cl_l2qbxl.get(), python_l2qbxl) # }}} # {{{ Test process_eval_qbxl queue.finish() start_time = time.time() cl_eval_qbxl = cl_cost_model.process_eval_qbxl( queue, geo_data_dev, cl_cost_factors["qbxl2p_cost"] ) queue.finish() logger.info("OpenCL time for process_eval_qbxl: {}".format( str(time.time() - start_time) )) start_time = time.time() python_eval_qbxl = python_cost_model.process_eval_qbxl( queue, geo_data, python_cost_factors["qbxl2p_cost"] ) logger.info("Python time for process_eval_qbxl: {}".format( str(time.time() - start_time) )) assert np.array_equal(cl_eval_qbxl.get(), python_eval_qbxl) # }}} # {{{ Test eval_target_specific_qbxl queue.finish() start_time = time.time() cl_eval_target_specific_qbxl = cl_cost_model.process_eval_target_specific_qbxl( queue, geo_data_dev, cl_cost_factors["p2p_tsqbx_cost"], cl_ndirect_sources_per_target_box ) queue.finish() logger.info("OpenCL time for eval_target_specific_qbxl: {}".format( str(time.time() - start_time) )) start_time = time.time() python_eval_target_specific_qbxl = \ python_cost_model.process_eval_target_specific_qbxl( queue, geo_data, python_cost_factors["p2p_tsqbx_cost"], python_ndirect_sources_per_target_box ) logger.info("Python time for eval_target_specific_qbxl: {}".format( str(time.time() - start_time) )) assert np.array_equal( cl_eval_target_specific_qbxl.get(), python_eval_target_specific_qbxl )
extend_factor=vis_extend_factor) from pytential.target import PointsTarget plot_targets = PointsTarget(fplot.points) places.update({ "qbx_target_tol": qbx.copy(target_association_tolerance=0.15), "plot_targets": plot_targets }) places = GeometryCollection(places, auto_where=case.name) if case.use_refinement: from pytential.qbx.refinement import refine_geometry_collection places = refine_geometry_collection(places, **refiner_extra_kwargs) dd = sym.as_dofdesc(case.name).to_stage1() density_discr = places.get_discretization(dd.geometry) logger.info("nelements: %d", density_discr.mesh.nelements) logger.info("ndofs: %d", density_discr.ndofs) if case.use_refinement: logger.info("%d elements before refinement", qbx.density_discr.mesh.nelements) discr = places.get_discretization(dd.geometry, sym.QBX_SOURCE_STAGE1) logger.info("%d stage-1 elements after refinement", discr.mesh.nelements)
qbx = QBXLayerPotentialSource( pre_density_discr, 4*target_order, case.qbx_order, fmm_order=case.fmm_order, fmm_backend=case.fmm_backend, target_association_tolerance=1.0e-1, _expansions_in_tree_have_extent=True, _expansion_stick_out_factor=getattr( case, "_expansion_stick_out_factor", 0), ) places = GeometryCollection(qbx) from pytential.qbx.refinement import refine_geometry_collection kernel_length_scale = 5 / case.k if case.k else None places = refine_geometry_collection(places, kernel_length_scale=kernel_length_scale) # {{{ compute values of a solution to the PDE density_discr = places.get_discretization(places.auto_source.geometry) from meshmode.dof_array import thaw, flatten, unflatten nodes_host = [actx.to_numpy(axis) for axis in flatten(thaw(actx, density_discr.nodes()))] normal = bind(places, sym.normal(d))(actx).as_vector(object) normal_host = [actx.to_numpy(axis)for axis in flatten(normal)] if k != 0: if d == 2: angle = 0.3 wave_vec = np.array([np.cos(angle), np.sin(angle)])
def test_build_matrix(ctx_factory, k, curve_fn, op_type, visualize=False): """Checks that the matrix built with `symbolic.execution.build_matrix` gives the same (to tolerance) answer as a direct evaluation. """ cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) # prevent cache 'splosion from sympy.core.cache import clear_cache clear_cache() case = extra.CurveTestCase(name="curve", knl_class_or_helmholtz_k=k, curve_fn=curve_fn, op_type=op_type, target_order=7, qbx_order=4, resolutions=[30]) logger.info("\n%s", case) # {{{ geometry qbx = case.get_layer_potential(actx, case.resolutions[-1], case.target_order) from pytential.qbx.refinement import refine_geometry_collection places = GeometryCollection(qbx, auto_where=case.name) places = refine_geometry_collection(places, kernel_length_scale=(5 / k if k else None)) dd = places.auto_source.to_stage1() density_discr = places.get_discretization(dd.geometry) logger.info("nelements: %d", density_discr.mesh.nelements) logger.info("ndofs: %d", density_discr.ndofs) # }}} # {{{ symbolic sym_u, sym_op = case.get_operator(places.ambient_dim) bound_op = bind(places, sym_op) # }}} # {{{ dense matrix from pytential.symbolic.execution import build_matrix mat = actx.to_numpy( build_matrix(actx, places, sym_op, sym_u, context=case.knl_concrete_kwargs)) if visualize: try: import matplotlib.pyplot as pt except ImportError: visualize = False if visualize: from sumpy.tools import build_matrix as build_matrix_via_matvec mat2 = bound_op.scipy_op(actx, "u", dtype=mat.dtype, **case.knl_concrete_kwargs) mat2 = build_matrix_via_matvec(mat2) logger.info( "real %.5e imag %.5e", la.norm((mat - mat2).real, "fro") / la.norm(mat2.real, "fro"), la.norm((mat - mat2).imag, "fro") / la.norm(mat2.imag, "fro")) pt.subplot(121) pt.imshow(np.log10(np.abs(1.0e-20 + (mat - mat2).real))) pt.colorbar() pt.subplot(122) pt.imshow(np.log10(np.abs(1.0e-20 + (mat - mat2).imag))) pt.colorbar() pt.show() pt.clf() if visualize: pt.subplot(121) pt.imshow(mat.real) pt.colorbar() pt.subplot(122) pt.imshow(mat.imag) pt.colorbar() pt.show() pt.clf() # }}} # {{{ check from pytential.utils import unflatten_from_numpy, flatten_to_numpy np.random.seed(12) for i in range(5): if isinstance(sym_u, np.ndarray): u = make_obj_array([ np.random.randn(density_discr.ndofs) for _ in range(len(sym_u)) ]) else: u = np.random.randn(density_discr.ndofs) u_dev = unflatten_from_numpy(actx, density_discr, u) res_matvec = np.hstack( flatten_to_numpy( actx, bound_op(actx, u=u_dev, **case.knl_concrete_kwargs))) res_mat = mat.dot(np.hstack(u)) abs_err = la.norm(res_mat - res_matvec, np.inf) rel_err = abs_err / la.norm(res_matvec, np.inf) logger.info("AbsErr {:.5e} RelErr {:.5e}".format(abs_err, rel_err)) assert rel_err < 1.0e-13, 'iteration: {}'.format(i)
def test_build_matrix_conditioning(actx_factory, side, op_type, visualize=False): """Checks that :math:`I + K`, where :math:`K` is compact gives a well-conditioned operator when it should. For example, the exterior Laplace problem has a nullspace, so we check that and remove it. """ actx = actx_factory() # prevent cache explosion from sympy.core.cache import clear_cache clear_cache() case = extra.CurveTestCase( name="ellipse", curve_fn=lambda t: ellipse(3.0, t), target_order=16, source_ovsmp=1, qbx_order=4, resolutions=[64], op_type=op_type, side=side, ) logger.info("\n%s", case) # {{{ geometry qbx = case.get_layer_potential(actx, case.resolutions[-1], case.target_order) from pytential.qbx.refinement import refine_geometry_collection places = GeometryCollection(qbx, auto_where=case.name) places = refine_geometry_collection( places, refine_discr_stage=sym.QBX_SOURCE_QUAD_STAGE2) dd = places.auto_source.to_stage1() density_discr = places.get_discretization(dd.geometry) logger.info("nelements: %d", density_discr.mesh.nelements) logger.info("ndofs: %d", density_discr.ndofs) # }}} # {{{ check matrix from pytential.symbolic.execution import build_matrix sym_u, sym_op = case.get_operator(places.ambient_dim, qbx_forced_limit="avg") mat = actx.to_numpy( build_matrix(actx, places, sym_op, sym_u, context=case.knl_concrete_kwargs)) kappa = la.cond(mat) _, sigma, _ = la.svd(mat) logger.info("cond: %.5e sigma_max %.5e", kappa, sigma[0]) # NOTE: exterior Laplace has a nullspace if side == +1 and op_type == "double": assert kappa > 1.0e+9 assert sigma[-1] < 1.0e-9 else: assert kappa < 1.0e+1 assert sigma[-1] > 1.0e-2 # remove the nullspace and check that it worked if side == +1 and op_type == "double": # NOTE: this adds the "mean" to remove the nullspace for the operator # See `pytential.symbolic.pde.scalar` for the equivalent formulation w = actx.to_numpy( flatten( bind(places, sym.sqrt_jac_q_weight(places.ambient_dim)**2)(actx), actx)) w = np.tile(w.reshape(-1, 1), w.size).T kappa = la.cond(mat + w) assert kappa < 1.0e+2 # }}} # {{{ plot if not visualize: return side = "int" if side == -1 else "ext" import matplotlib.pyplot as plt plt.imshow(mat) plt.colorbar() plt.title(fr"$\kappa(A) = {kappa:.5e}$") plt.savefig(f"test_cond_{op_type}_{side}_mat") plt.clf() plt.plot(sigma) plt.ylabel(r"$\sigma$") plt.grid() plt.savefig(f"test_cond_{op_type}_{side}_svd") plt.clf()
def run_source_refinement_test(actx_factory, mesh, order, helmholtz_k=None, surface_name="surface", visualize=False): actx = actx_factory() # {{{ initial geometry from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureGroupFactory discr = Discretization(actx, mesh, InterpolatoryQuadratureGroupFactory(order)) lpot_source = QBXLayerPotentialSource( discr, qbx_order=order, # not used in refinement fine_order=order) places = GeometryCollection(lpot_source) logger.info("nelements: %d", discr.mesh.nelements) logger.info("ndofs: %d", discr.ndofs) # }}} # {{{ refined geometry def _visualize_quad_resolution(_places, dd, suffix): if dd.discr_stage is None: vis_discr = lpot_source.density_discr else: vis_discr = _places.get_discretization(dd.geometry, dd.discr_stage) stretch = bind(_places, sym._simplex_mapping_max_stretch_factor( _places.ambient_dim, with_elementwise_max=False), auto_where=dd)(actx) from meshmode.discretization.visualization import make_visualizer vis = make_visualizer(actx, vis_discr, order, force_equidistant=True) vis.write_vtk_file( f"global-qbx-source-refinement-{surface_name}-{order}-{suffix}.vtu", [("stretch", stretch)], overwrite=True, use_high_order=True) kernel_length_scale = 5 / helmholtz_k if helmholtz_k else None expansion_disturbance_tolerance = 0.025 from pytential.qbx.refinement import refine_geometry_collection places = refine_geometry_collection( places, kernel_length_scale=kernel_length_scale, expansion_disturbance_tolerance=expansion_disturbance_tolerance, visualize=False) if visualize: dd = places.auto_source _visualize_quad_resolution(places, dd.copy(discr_stage=None), "original") _visualize_quad_resolution(places, dd.to_stage1(), "stage1") _visualize_quad_resolution(places, dd.to_stage2(), "stage2") # }}} dd = places.auto_source ambient_dim = places.ambient_dim stage1_density_discr = places.get_discretization(dd.geometry) stage1_density_nodes = actx.to_numpy( flatten(stage1_density_discr.nodes(), actx)).reshape(ambient_dim, -1) quad_stage2_density_discr = places.get_discretization( dd.geometry, sym.QBX_SOURCE_QUAD_STAGE2) quad_stage2_density_nodes = actx.to_numpy( flatten(quad_stage2_density_discr.nodes(), actx)).reshape(ambient_dim, -1) int_centers = actx.to_numpy( flatten( bind(places, sym.expansion_centers(ambient_dim, -1))(actx), actx)).reshape(ambient_dim, -1) ext_centers = actx.to_numpy( flatten( bind(places, sym.expansion_centers(ambient_dim, +1))(actx), actx)).reshape(ambient_dim, -1) expansion_radii = actx.to_numpy( flatten(bind(places, sym.expansion_radii(ambient_dim))(actx), actx)) dd = dd.copy(granularity=sym.GRANULARITY_ELEMENT) source_danger_zone_radii = actx.to_numpy( flatten( bind( places, sym._source_danger_zone_radii(ambient_dim, dofdesc=dd.to_stage2()))(actx), actx)) quad_res = actx.to_numpy( flatten( bind(places, sym._quad_resolution(ambient_dim, dofdesc=dd))(actx), actx)) # {{{ check if satisfying criteria def check_disk_undisturbed_by_sources(centers_element, sources_element): if centers_element.index == sources_element.index: # Same element return my_int_centers = int_centers[:, centers_element.discr_slice] my_ext_centers = ext_centers[:, centers_element.discr_slice] all_centers = np.append(my_int_centers, my_ext_centers, axis=-1) nodes = stage1_density_nodes[:, sources_element.discr_slice] # =distance(centers of element 1, element 2) dist = (la.norm( (all_centers[..., np.newaxis] - nodes[:, np.newaxis, ...]).T, axis=-1).min()) # Criterion: # A center cannot be closer to another element than to its originating # element. rad = expansion_radii[centers_element.discr_slice] assert np.all( dist >= rad * (1 - expansion_disturbance_tolerance)), (dist, rad, centers_element.index, sources_element.index) def check_sufficient_quadrature_resolution(centers_element, sources_element): dz_radius = source_danger_zone_radii[sources_element.index] my_int_centers = int_centers[:, centers_element.discr_slice] my_ext_centers = ext_centers[:, centers_element.discr_slice] all_centers = np.append(my_int_centers, my_ext_centers, axis=-1) nodes = quad_stage2_density_nodes[:, sources_element.discr_slice] # =distance(centers of element 1, element 2) dist = (la.norm( (all_centers[..., np.newaxis] - nodes[:, np.newaxis, ...]).T, axis=-1).min()) # Criterion: # The quadrature contribution from each element is as accurate # as from the center's own source element. assert dist >= dz_radius, \ (dist, dz_radius, centers_element.index, sources_element.index) def check_quad_res_to_helmholtz_k_ratio(element): # Check wavenumber to element size ratio. assert quad_res[element.index] * helmholtz_k <= 5 for element_1 in iter_elements(stage1_density_discr): for element_2 in iter_elements(stage1_density_discr): check_disk_undisturbed_by_sources(element_1, element_2) for element_2 in iter_elements(quad_stage2_density_discr): check_sufficient_quadrature_resolution(element_1, element_2) if helmholtz_k is not None: check_quad_res_to_helmholtz_k_ratio(element_1)