def test_interpolation(actx_factory, name, source_discr_stage, target_granularity): actx = actx_factory() nelements = 32 target_order = 7 qbx_order = 4 where = sym.as_dofdesc("test_interpolation") from_dd = sym.DOFDescriptor( geometry=where.geometry, discr_stage=source_discr_stage, granularity=sym.GRANULARITY_NODE) to_dd = sym.DOFDescriptor( geometry=where.geometry, discr_stage=sym.QBX_SOURCE_QUAD_STAGE2, granularity=target_granularity) mesh = mgen.make_curve_mesh(mgen.starfish, np.linspace(0.0, 1.0, nelements + 1), target_order) discr = Discretization(actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) from pytential.qbx import QBXLayerPotentialSource qbx = QBXLayerPotentialSource(discr, fine_order=4 * target_order, qbx_order=qbx_order, fmm_order=False) from pytential import GeometryCollection places = GeometryCollection(qbx, auto_where=where) sigma_sym = sym.var("sigma") op_sym = sym.sin(sym.interp(from_dd, to_dd, sigma_sym)) bound_op = bind(places, op_sym, auto_where=where) def discr_and_nodes(stage): density_discr = places.get_discretization(where.geometry, stage) return density_discr, actx.to_numpy( flatten(density_discr.nodes(), actx) ).reshape(density_discr.ambient_dim, -1) _, target_nodes = discr_and_nodes(sym.QBX_SOURCE_QUAD_STAGE2) source_discr, source_nodes = discr_and_nodes(source_discr_stage) sigma_target = np.sin(la.norm(target_nodes, axis=0)) sigma_dev = unflatten( thaw(source_discr.nodes()[0], actx), actx.from_numpy(la.norm(source_nodes, axis=0)), actx) sigma_target_interp = actx.to_numpy( flatten(bound_op(actx, sigma=sigma_dev), actx) ) if name in ("default", "default_explicit", "stage2", "quad"): error = la.norm(sigma_target_interp - sigma_target) / la.norm(sigma_target) assert error < 1.0e-10 elif name in ("stage2_center",): assert len(sigma_target_interp) == 2 * len(sigma_target) else: raise ValueError(f"unknown test case name: {name}")
def map_num_reference_derivative(self, expr): from pytential import bind, sym rec_operand = self.rec(expr.operand) assert isinstance(rec_operand, np.ndarray) if self.is_kind_matrix(rec_operand): raise NotImplementedError("derivatives") actx = self.array_context dofdesc = expr.dofdesc op = sym.NumReferenceDerivative(ref_axes=expr.ref_axes, operand=sym.var("u"), dofdesc=dofdesc) discr = self.places.get_discretization(dofdesc.geometry, dofdesc.discr_stage) template_ary = thaw(discr.nodes()[0], actx) rec_operand = unflatten(template_ary, actx.from_numpy(rec_operand), actx) return actx.to_numpy( flatten( bind(self.places, op)(self.array_context, u=rec_operand), actx))
def map_interpolation(self, expr): from pytential import sym if expr.to_dd.discr_stage != sym.QBX_SOURCE_QUAD_STAGE2: raise RuntimeError( "can only interpolate to QBX_SOURCE_QUAD_STAGE2") operand = self.rec(expr.operand) actx = self.array_context if isinstance(operand, (int, float, complex, np.number)): return operand elif isinstance(operand, np.ndarray) and operand.ndim == 1: conn = self.places.get_connection(expr.from_dd, expr.to_dd) discr = self.places.get_discretization(expr.from_dd.geometry, expr.from_dd.discr_stage) template_ary = thaw(discr.nodes()[0], actx) from pytools.obj_array import make_obj_array return make_obj_array([ actx.to_numpy( flatten( conn(unflatten(template_ary, actx.from_numpy(o), actx)), actx)) for o in operand ]) elif isinstance(operand, np.ndarray) and operand.ndim == 2: cache = self.places._get_cache( MatrixBuilderDirectResamplerCacheKey) key = (expr.from_dd.geometry, expr.from_dd.discr_stage, expr.to_dd.discr_stage) try: mat = cache[key] except KeyError: from meshmode.discretization.connection import \ flatten_chained_connection from meshmode.discretization.connection.direct import \ make_direct_full_resample_matrix conn = self.places.get_connection(expr.from_dd, expr.to_dd) conn = flatten_chained_connection(actx, conn) mat = actx.to_numpy( make_direct_full_resample_matrix(actx, conn)) # FIXME: the resample matrix is slow to compute and very big # to store, so caching it may not be the best idea cache[key] = mat return mat.dot(operand) else: raise RuntimeError("unknown operand type: {}".format( type(operand)))
def exec_compute_potential_insn_direct(self, actx: PyOpenCLArrayContext, insn, bound_expr, evaluate): kernel_args = {} for arg_name, arg_expr in insn.kernel_arguments.items(): kernel_args[arg_name] = flatten(evaluate(arg_expr), actx, leaf_class=DOFArray) from pytential import bind, sym waa = bind( bound_expr.places, sym.weights_and_area_elements(self.ambient_dim, dofdesc=insn.source))(actx) strengths = [waa * evaluate(density) for density in insn.densities] flat_strengths = [flatten(strength, actx) for strength in strengths] results = [] p2p = None for o in insn.outputs: target_discr = bound_expr.places.get_discretization( o.target_name.geometry, o.target_name.discr_stage) if p2p is None: p2p = self.get_p2p(actx, source_kernels=insn.source_kernels, target_kernels=insn.target_kernels) evt, output_for_each_kernel = p2p( actx.queue, targets=flatten(target_discr.nodes(), actx, leaf_class=DOFArray), sources=flatten(self.density_discr.nodes(), actx, leaf_class=DOFArray), strength=flat_strengths, **kernel_args) from meshmode.discretization import Discretization result = output_for_each_kernel[o.target_kernel_index] if isinstance(target_discr, Discretization): template_ary = thaw(target_discr.nodes()[0], actx) result = unflatten(template_ary, result, actx, strict=False) results.append((o.name, result)) timing_data = {} return results, timing_data
def exec_compute_potential_insn(self, actx, insn, bound_expr, evaluate, return_timing_data): if return_timing_data: from warnings import warn warn("Timing data collection not supported.", category=UnableToCollectTimingData) p2p = None kernel_args = evaluate_kernel_arguments(actx, evaluate, insn.kernel_arguments, flat=False) strengths = [evaluate(density) for density in insn.densities] # FIXME: Do this all at once results = [] for o in insn.outputs: target_discr = bound_expr.places.get_discretization( o.target_name.geometry, o.target_name.discr_stage) # no on-disk kernel caching if p2p is None: p2p = self.get_p2p(actx, source_kernels=insn.source_kernels, target_kernels=insn.target_kernels) evt, output_for_each_kernel = p2p(actx.queue, targets=flatten( target_discr.nodes(), actx, leaf_class=DOFArray), sources=self._nodes, strength=strengths, **kernel_args) from meshmode.discretization import Discretization result = output_for_each_kernel[o.target_kernel_index] if isinstance(target_discr, Discretization): template_ary = thaw(target_discr.nodes()[0], actx) result = unflatten(template_ary, result, actx, strict=False) results.append((o.name, result)) timing_data = {} return results, timing_data
def finish(self): # Wait for the nonblocking receive request to complete before # accessing the data self.recv_req.Wait() # Nonblocking receive is complete, we can now access the data and apply # the boundary-swap connection actx = self.array_context remote_bdry_data_flat = from_numpy(self.remote_data_host_numpy, actx) remote_bdry_data = unflatten(self.local_bdry_data, remote_bdry_data_flat, actx) bdry_conn = self.dcoll.distributed_boundary_swap_connection( dof_desc.as_dofdesc(dof_desc.DTAG_BOUNDARY(self.remote_btag))) swapped_remote_bdry_data = bdry_conn(remote_bdry_data) # Complete the nonblocking send request associated with communicating # `self.local_bdry_data_np` self.send_req.Wait() return TracePair(self.remote_btag, interior=self.local_bdry_data, exterior=swapped_remote_bdry_data)
def unflatten(self, ary): # Convert a flat version of *ary* into a structured version. components = [] for discr, (start, end) in zip(self.discrs, self.starts_and_ends): component = ary[start:end] from meshmode.discretization import Discretization if isinstance(discr, Discretization): from arraycontext import unflatten template_ary = thaw(discr.nodes()[0], self.array_context) component = unflatten(template_ary, component, self.array_context, strict=False) components.append(component) if self._operator_uses_obj_array: from pytools.obj_array import make_obj_array return make_obj_array(components) else: return components[0]
def exec_compute_potential_insn_direct(self, actx, insn, bound_expr, evaluate, return_timing_data): from pytential import bind, sym from meshmode.discretization import Discretization if return_timing_data: from pytential.source import UnableToCollectTimingData from warnings import warn warn( "Timing data collection not supported.", category=UnableToCollectTimingData) # {{{ evaluate and flatten inputs @memoize_in(bound_expr.places, (QBXLayerPotentialSource, "flat_nodes")) def _flat_nodes(dofdesc): discr = bound_expr.places.get_discretization( dofdesc.geometry, dofdesc.discr_stage) return freeze(flatten(discr.nodes(), actx, leaf_class=DOFArray), actx) @memoize_in(bound_expr.places, (QBXLayerPotentialSource, "flat_expansion_radii")) def _flat_expansion_radii(dofdesc): radii = bind( bound_expr.places, sym.expansion_radii(self.ambient_dim, dofdesc=dofdesc), )(actx) return freeze(flatten(radii, actx), actx) @memoize_in(bound_expr.places, (QBXLayerPotentialSource, "flat_centers")) def _flat_centers(dofdesc, qbx_forced_limit): centers = bind(bound_expr.places, sym.expansion_centers( self.ambient_dim, qbx_forced_limit, dofdesc=dofdesc), )(actx) return freeze(flatten(centers, actx, leaf_class=DOFArray), actx) from pytential.source import evaluate_kernel_arguments flat_kernel_args = evaluate_kernel_arguments( actx, evaluate, insn.kernel_arguments, flat=True) flat_strengths = get_flat_strengths_from_densities( actx, bound_expr.places, evaluate, insn.densities, dofdesc=insn.source) flat_source_nodes = _flat_nodes(insn.source) # }}} # {{{ partition interactions in target kernels from collections import defaultdict self_outputs = defaultdict(list) other_outputs = defaultdict(list) for i, o in enumerate(insn.outputs): # For purposes of figuring out whether this is a self-interaction, # disregard discr_stage. source_dd = insn.source.copy(discr_stage=o.target_name.discr_stage) target_discr = bound_expr.places.get_discretization( o.target_name.geometry, o.target_name.discr_stage) density_discr = bound_expr.places.get_discretization( source_dd.geometry, source_dd.discr_stage) if target_discr is density_discr: # NOTE: QBXPreprocessor is supposed to have taken care of this assert o.qbx_forced_limit is not None assert abs(o.qbx_forced_limit) > 0 self_outputs[(o.target_name, o.qbx_forced_limit)].append((i, o)) else: qbx_forced_limit = o.qbx_forced_limit if qbx_forced_limit is None: qbx_forced_limit = 0 other_outputs[(o.target_name, qbx_forced_limit)].append((i, o)) queue = actx.queue results = [None] * len(insn.outputs) # }}} # {{{ self interactions # FIXME: Do this all at once lpot_applier = self.get_lpot_applier( insn.target_kernels, insn.source_kernels) for (target_name, qbx_forced_limit), outputs in self_outputs.items(): target_discr = bound_expr.places.get_discretization( target_name.geometry, target_name.discr_stage) flat_target_nodes = _flat_nodes(target_name) evt, output_for_each_kernel = lpot_applier(queue, targets=flat_target_nodes, sources=flat_source_nodes, centers=_flat_centers(target_name, qbx_forced_limit), strengths=flat_strengths, expansion_radii=_flat_expansion_radii(target_name), **flat_kernel_args) for i, o in outputs: result = output_for_each_kernel[o.target_kernel_index] if isinstance(target_discr, Discretization): template_ary = thaw(target_discr.nodes()[0], actx) result = unflatten(template_ary, result, actx, strict=False) results[i] = (o.name, result) # }}} # {{{ off-surface interactions if other_outputs: p2p = self.get_p2p(actx, insn.target_kernels, insn.source_kernels) lpot_applier_on_tgt_subset = self.get_lpot_applier_on_tgt_subset( insn.target_kernels, insn.source_kernels) for (target_name, qbx_forced_limit), outputs in other_outputs.items(): target_discr = bound_expr.places.get_discretization( target_name.geometry, target_name.discr_stage) flat_target_nodes = _flat_nodes(target_name) # FIXME: (Somewhat wastefully) compute P2P for all targets evt, output_for_each_kernel = p2p(queue, targets=flat_target_nodes, sources=flat_source_nodes, strength=flat_strengths, **flat_kernel_args) target_discrs_and_qbx_sides = ((target_discr, qbx_forced_limit),) geo_data = self.qbx_fmm_geometry_data( bound_expr.places, insn.source.geometry, target_discrs_and_qbx_sides=target_discrs_and_qbx_sides) # center-related info is independent of targets # First ncenters targets are the centers tgt_to_qbx_center = actx.np.copy(actx.thaw( geo_data.user_target_to_center()[geo_data.ncenters:] )) qbx_tgt_numberer = self.get_qbx_target_numberer( tgt_to_qbx_center.dtype) qbx_tgt_count = actx.empty((), np.int32) qbx_tgt_numbers = actx.empty_like(tgt_to_qbx_center) qbx_tgt_numberer( tgt_to_qbx_center, qbx_tgt_numbers, qbx_tgt_count, queue=queue) qbx_tgt_count = int(actx.to_numpy(qbx_tgt_count).item()) if (abs(qbx_forced_limit) == 1 and qbx_tgt_count < target_discr.ndofs): raise RuntimeError( "Did not find a matching QBX center for some targets") qbx_tgt_numbers = qbx_tgt_numbers[:qbx_tgt_count] qbx_center_numbers = tgt_to_qbx_center[qbx_tgt_numbers] qbx_center_numbers.finish() tgt_subset_kwargs = flat_kernel_args.copy() for i, res_i in enumerate(output_for_each_kernel): tgt_subset_kwargs[f"result_{i}"] = res_i if qbx_tgt_count: lpot_applier_on_tgt_subset( queue, targets=flat_target_nodes, sources=flat_source_nodes, centers=geo_data.flat_centers(), expansion_radii=geo_data.flat_expansion_radii(), strengths=flat_strengths, qbx_tgt_numbers=qbx_tgt_numbers, qbx_center_numbers=qbx_center_numbers, **tgt_subset_kwargs) for i, o in outputs: result = output_for_each_kernel[o.target_kernel_index] if isinstance(target_discr, Discretization): template_ary = thaw(target_discr.nodes()[0], actx) result = unflatten(template_ary, result, actx, strict=False) results[i] = (o.name, result) # }}} timing_data = {} return results, timing_data
def exec_compute_potential_insn_fmm(self, actx: PyOpenCLArrayContext, insn, bound_expr, evaluate, fmm_driver): """ :arg fmm_driver: A function that accepts four arguments: *wrangler*, *strength*, *geo_data*, *kernel*, *kernel_arguments* :returns: a tuple ``(assignments, extra_outputs)``, where *assignments* is a list of tuples containing pairs ``(name, value)`` representing assignments to be performed in the evaluation context. *extra_outputs* is data that *fmm_driver* may return (such as timing data), passed through unmodified. """ target_name_and_side_to_number, target_discrs_and_qbx_sides = ( self.get_target_discrs_and_qbx_sides(insn, bound_expr)) geo_data = self.qbx_fmm_geometry_data( bound_expr.places, insn.source.geometry, target_discrs_and_qbx_sides) # FIXME Exert more positive control over geo_data attribute lifetimes using # geo_data.<method>.clear_cache(geo_data). # FIXME Synthesize "bad centers" around corners and edges that have # inadequate QBX coverage. # FIXME don't compute *all* output kernels on all targets--respect that # some target discretizations may only be asking for derivatives (e.g.) flat_strengths = get_flat_strengths_from_densities( actx, bound_expr.places, evaluate, insn.densities, dofdesc=insn.source) base_kernel = single_valued(knl.get_base_kernel() for knl in insn.source_kernels) output_and_expansion_dtype = ( self.get_fmm_output_and_expansion_dtype(insn.source_kernels, flat_strengths[0])) kernel_extra_kwargs, source_extra_kwargs = ( self.get_fmm_expansion_wrangler_extra_kwargs( actx, insn.target_kernels + insn.source_kernels, geo_data.tree().user_source_ids, insn.kernel_arguments, evaluate)) tree_indep = self._tree_indep_data_for_wrangler( target_kernels=insn.target_kernels, source_kernels=insn.source_kernels) wrangler = tree_indep.wrangler_cls( tree_indep, geo_data, output_and_expansion_dtype, self.qbx_order, self.fmm_level_to_order, source_extra_kwargs=source_extra_kwargs, kernel_extra_kwargs=kernel_extra_kwargs) from pytential.qbx.geometry import target_state if actx.to_numpy(actx.np.any( actx.thaw(geo_data.user_target_to_center()) == target_state.FAILED)): raise RuntimeError("geometry has failed targets") # {{{ geometry data inspection hook if self.geometry_data_inspector is not None: perform_fmm = self.geometry_data_inspector(insn, bound_expr, geo_data) if not perform_fmm: return [(o.name, 0) for o in insn.outputs] # }}} # Execute global QBX. all_potentials_on_every_target, extra_outputs = ( fmm_driver( wrangler, flat_strengths, geo_data, base_kernel, kernel_extra_kwargs)) results = [] for o in insn.outputs: target_side_number = target_name_and_side_to_number[ o.target_name, o.qbx_forced_limit] target_discr, _ = target_discrs_and_qbx_sides[target_side_number] target_slice = slice(*geo_data.target_info().target_discr_starts[ target_side_number:target_side_number+2]) result = \ all_potentials_on_every_target[o.target_kernel_index][target_slice] from meshmode.discretization import Discretization if isinstance(target_discr, Discretization): template_ary = thaw(target_discr.nodes()[0], actx) result = unflatten(template_ary, result, actx, strict=False) results.append((o.name, result)) return results, extra_outputs
def main(curve_fn=starfish, visualize=True): import logging logging.basicConfig(level=logging.WARNING) # INFO for more progress info import pyopencl as cl cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue, force_device_scalars=True) from meshmode.mesh.generation import make_curve_mesh mesh = make_curve_mesh( curve_fn, np.linspace(0, 1, nelements+1), target_order) from pytential.qbx import QBXLayerPotentialSource from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory pre_density_discr = Discretization( actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) qbx = QBXLayerPotentialSource( pre_density_discr, 4*target_order, qbx_order, fmm_order=qbx_order+3, target_association_tolerance=0.005, #fmm_backend="fmmlib", ) from pytential.target import PointsTarget fplot = FieldPlotter(np.zeros(2), extent=5, npoints=1000) targets_dev = actx.from_numpy(fplot.points) from pytential import GeometryCollection places = GeometryCollection({ "qbx": qbx, "targets": PointsTarget(targets_dev), }, auto_where="qbx") density_discr = places.get_discretization("qbx") nodes = thaw(density_discr.nodes(), actx) angle = actx.np.arctan2(nodes[1], nodes[0]) if k: kernel = HelmholtzKernel(2) kernel_kwargs = {"k": sym.var("k")} else: kernel = LaplaceKernel(2) kernel_kwargs = {} def op(**kwargs): kwargs.update(kernel_kwargs) #op = sym.d_dx(sym.S(kernel, sym.var("sigma"), **kwargs)) return sym.D(kernel, sym.var("sigma"), **kwargs) #op = sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None, **kwargs) if 0: from random import randrange sigma = actx.zeros(density_discr.ndofs, angle.entry_dtype) for _ in range(5): sigma[randrange(len(sigma))] = 1 from arraycontext import unflatten sigma = unflatten(angle, sigma, actx) else: sigma = actx.np.cos(mode_nr*angle) if isinstance(kernel, HelmholtzKernel): for i, elem in np.ndenumerate(sigma): sigma[i] = elem.astype(np.complex128) bound_bdry_op = bind(places, op()) if visualize: fld_in_vol = actx.to_numpy( bind(places, op( source="qbx", target="targets", qbx_forced_limit=None))(actx, sigma=sigma, k=k)) if enable_mayavi: fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) else: fplot.write_vtk_file("layerpot-potential.vts", [ ("potential", fld_in_vol) ]) if 0: apply_op = bound_bdry_op.scipy_op(actx, "sigma", np.float64, k=k) from sumpy.tools import build_matrix mat = build_matrix(apply_op) import matplotlib.pyplot as pt pt.imshow(mat) pt.colorbar() pt.show() if enable_mayavi: # {{{ plot boundary field from arraycontext import flatten fld_on_bdry = actx.to_numpy( flatten(bound_bdry_op(actx, sigma=sigma, k=k), actx)) nodes_host = actx.to_numpy( flatten(density_discr.nodes(), actx) ).reshape(density_discr.ambient_dim, -1) mlab.points3d(nodes_host[0], nodes_host[1], fld_on_bdry.real, scale_factor=0.03) mlab.colorbar() mlab.show()
def test_sphere_eigenvalues(actx_factory, mode_m, mode_n, qbx_order, fmm_backend): special = pytest.importorskip("scipy.special") logging.basicConfig(level=logging.INFO) actx = actx_factory() target_order = 8 from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from pytential.qbx import QBXLayerPotentialSource from pytools.convergence import EOCRecorder s_eoc_rec = EOCRecorder() d_eoc_rec = EOCRecorder() sp_eoc_rec = EOCRecorder() dp_eoc_rec = EOCRecorder() def rel_err(comp, ref): return actx.to_numpy( norm(density_discr, comp - ref) / norm(density_discr, ref)) for nrefinements in [0, 1]: from meshmode.mesh.generation import generate_sphere mesh = generate_sphere(1, target_order, uniform_refinement_rounds=nrefinements) pre_density_discr = Discretization( actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) qbx = QBXLayerPotentialSource( pre_density_discr, 4 * target_order, qbx_order, fmm_order=6, fmm_backend=fmm_backend, ) places = GeometryCollection(qbx) density_discr = places.get_discretization(places.auto_source.geometry) nodes = thaw(density_discr.nodes(), actx) r = actx.np.sqrt(nodes[0] * nodes[0] + nodes[1] * nodes[1] + nodes[2] * nodes[2]) phi = actx.np.arccos(nodes[2] / r) theta = actx.np.arctan2(nodes[0], nodes[1]) ymn = unflatten(theta, actx.from_numpy( special.sph_harm( mode_m, mode_n, actx.to_numpy(flatten(theta, actx)), actx.to_numpy(flatten(phi, actx)))), actx, strict=False) from sumpy.kernel import LaplaceKernel lap_knl = LaplaceKernel(3) # {{{ single layer s_sigma_op = bind( places, sym.S(lap_knl, sym.var("sigma"), qbx_forced_limit=+1)) s_sigma = s_sigma_op(actx, sigma=ymn) s_eigval = 1 / (2 * mode_n + 1) h_max = actx.to_numpy(bind(places, sym.h_max(qbx.ambient_dim))(actx)) s_eoc_rec.add_data_point(h_max, rel_err(s_sigma, s_eigval * ymn)) # }}} # {{{ double layer d_sigma_op = bind( places, sym.D(lap_knl, sym.var("sigma"), qbx_forced_limit="avg")) d_sigma = d_sigma_op(actx, sigma=ymn) d_eigval = -1 / (2 * (2 * mode_n + 1)) d_eoc_rec.add_data_point(h_max, rel_err(d_sigma, d_eigval * ymn)) # }}} # {{{ S' sp_sigma_op = bind( places, sym.Sp(lap_knl, sym.var("sigma"), qbx_forced_limit="avg")) sp_sigma = sp_sigma_op(actx, sigma=ymn) sp_eigval = -1 / (2 * (2 * mode_n + 1)) sp_eoc_rec.add_data_point(h_max, rel_err(sp_sigma, sp_eigval * ymn)) # }}} # {{{ D' dp_sigma_op = bind( places, sym.Dp(lap_knl, sym.var("sigma"), qbx_forced_limit="avg")) dp_sigma = dp_sigma_op(actx, sigma=ymn) dp_eigval = -(mode_n * (mode_n + 1)) / (2 * mode_n + 1) dp_eoc_rec.add_data_point(h_max, rel_err(dp_sigma, dp_eigval * ymn)) # }}} print("Errors for S:") print(s_eoc_rec) required_order = qbx_order + 1 assert s_eoc_rec.order_estimate() > required_order - 1.5 print("Errors for D:") print(d_eoc_rec) required_order = qbx_order assert d_eoc_rec.order_estimate() > required_order - 0.5 print("Errors for S':") print(sp_eoc_rec) required_order = qbx_order assert sp_eoc_rec.order_estimate() > required_order - 1.5 print("Errors for D':") print(dp_eoc_rec) required_order = qbx_order assert dp_eoc_rec.order_estimate() > required_order - 1.5
def exec_compute_potential_insn_fmm(self, actx: PyOpenCLArrayContext, insn, bound_expr, evaluate): # {{{ gather unique target discretizations used target_name_to_index = {} targets = [] for o in insn.outputs: assert o.qbx_forced_limit not in (-1, 1) if o.target_name in target_name_to_index: continue target_name_to_index[o.target_name] = len(targets) targets.append( bound_expr.places.get_geometry(o.target_name.geometry)) targets = tuple(targets) # }}} # {{{ get wrangler geo_data = self.fmm_geometry_data(targets) from pytential import bind, sym waa = bind( bound_expr.places, sym.weights_and_area_elements(self.ambient_dim, dofdesc=insn.source))(actx) strengths = [waa * evaluate(density) for density in insn.densities] flat_strengths = [flatten(strength, actx) for strength in strengths] fmm_kernel = self.get_fmm_kernel(insn.target_kernels) output_and_expansion_dtype = (self.get_fmm_output_and_expansion_dtype( insn.target_kernels, strengths[0])) kernel_extra_kwargs, source_extra_kwargs = ( self.get_fmm_expansion_wrangler_extra_kwargs( actx, insn.target_kernels + insn.source_kernels, geo_data.tree().user_source_ids, insn.kernel_arguments, evaluate)) tree_indep = self._tree_indep_data_for_wrangler( fmm_kernel, target_kernels=insn.target_kernels, source_kernels=insn.source_kernels) from sumpy.fmm import SumpyExpansionWrangler wrangler = SumpyExpansionWrangler( tree_indep, geo_data.traversal(), output_and_expansion_dtype, self.fmm_level_to_order, source_extra_kwargs=source_extra_kwargs, kernel_extra_kwargs=kernel_extra_kwargs) # }}} from boxtree.fmm import drive_fmm all_potentials_on_every_tgt = drive_fmm(wrangler, flat_strengths, timing_data=None) # {{{ postprocess fmm results = [] for o in insn.outputs: target_index = target_name_to_index[o.target_name] target_slice = slice(*geo_data.target_info( ).target_discr_starts[target_index:target_index + 2]) target_discr = targets[target_index] result = all_potentials_on_every_tgt[ o.target_kernel_index][target_slice] from meshmode.discretization import Discretization if isinstance(target_discr, Discretization): template_ary = thaw(target_discr.nodes()[0], actx) result = unflatten(template_ary, result, actx, strict=False) results.append((o.name, result)) # }}} timing_data = {} return results, timing_data
def plot_proxy_geometry(actx, places, indices, pxy=None, nbrindices=None, with_qbx_centers=False, suffix=None): dofdesc = places.auto_source discr = places.get_discretization(dofdesc.geometry, dofdesc.discr_stage) ambient_dim = places.ambient_dim if suffix is None: suffix = f"{ambient_dim}d" suffix = suffix.replace(".", "_") import matplotlib.pyplot as pt pt.figure(figsize=(10, 8), dpi=300) pt.plot(np.diff(indices.ranges)) pt.savefig(f"test_proxy_geometry_{suffix}_ranges") pt.clf() if ambient_dim == 2: sources = actx.to_numpy(flatten(discr.nodes(), actx)).reshape(ambient_dim, -1) if pxy is not None: proxies = np.stack(pxy.points) pxycenters = np.stack(pxy.centers) pxyranges = pxy.indices.ranges if with_qbx_centers: ci = actx.to_numpy( flatten( bind(places, sym.expansion_centers(ambient_dim, -1))(actx), actx)).reshape(ambient_dim, -1) ce = actx.to_numpy( flatten( bind(places, sym.expansion_centers(ambient_dim, +1))(actx), actx)).reshape(ambient_dim, -1) r = actx.to_numpy( flatten( bind(places, sym.expansion_radii(ambient_dim))(actx), actx)) fig = pt.figure(figsize=(10, 8), dpi=300) if indices.indices.shape[0] != discr.ndofs: pt.plot(sources[0], sources[1], "ko", ms=2.0, alpha=0.5) for i in range(indices.nblocks): isrc = indices.block_indices(i) pt.plot(sources[0, isrc], sources[1, isrc], "o", ms=2.0) if with_qbx_centers: ax = pt.gca() for j in isrc: c = pt.Circle(ci[:, j], r[j], color="k", alpha=0.1) ax.add_artist(c) c = pt.Circle(ce[:, j], r[j], color="k", alpha=0.1) ax.add_artist(c) if pxy is not None: ipxy = np.s_[pxyranges[i]:pxyranges[i + 1]] pt.plot(proxies[0, ipxy], proxies[1, ipxy], "o", ms=2.0) if nbrindices is not None: inbr = nbrindices.block_indices(i) pt.plot(sources[0, inbr], sources[1, inbr], "o", ms=2.0) pt.xlim([-2, 2]) pt.ylim([-2, 2]) pt.gca().set_aspect("equal") pt.savefig(f"test_proxy_geometry_{suffix}") pt.close(fig) elif ambient_dim == 3: from meshmode.discretization.visualization import make_visualizer marker = -42.0 * np.ones(discr.ndofs) for i in range(indices.nblocks): isrc = indices.block_indices(i) marker[isrc] = 10.0 * (i + 1.0) template_ary = thaw(discr.nodes()[0], actx) marker_dev = unflatten(template_ary, actx.from_numpy(marker), actx) vis = make_visualizer(actx, discr) vis.write_vtk_file(f"test_proxy_geometry_{suffix}.vtu", [("marker", marker_dev)], overwrite=False) if nbrindices: for i in range(indices.nblocks): isrc = indices.block_indices(i) inbr = nbrindices.block_indices(i) marker.fill(0.0) marker[indices.indices] = 0.0 marker[isrc] = -42.0 marker[inbr] = +42.0 marker_dev = unflatten(template_ary, actx.from_numpy(marker), actx) vis.write_vtk_file( f"test_proxy_geometry_{suffix}_neighbor_{i:04d}.vtu", [("marker", marker_dev)], overwrite=False) if pxy: # NOTE: this does not plot the actual proxy points, just sphere # with the same center and radius as the proxy balls from meshmode.mesh.processing import (affine_map, merge_disjoint_meshes) from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory from meshmode.mesh.generation import generate_sphere ref_mesh = generate_sphere(1, 4, uniform_refinement_rounds=1) pxycenters = np.stack(pxy.centers) for i in range(indices.nblocks): mesh = affine_map(ref_mesh, A=pxy.radii[i], b=pxycenters[:, i].reshape(-1)) mesh = merge_disjoint_meshes([mesh, discr.mesh]) discr = Discretization( actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(4)) vis = make_visualizer(actx, discr) filename = f"test_proxy_geometry_{suffix}_block_{i:04d}.vtu" vis.write_vtk_file(filename, [], overwrite=False) else: raise ValueError
def test_build_matrix(actx_factory, k, curve_fn, op_type, visualize=False): """Checks that the matrix built with `symbolic.execution.build_matrix` gives the same (to tolerance) answer as a direct evaluation. """ actx = actx_factory() # prevent cache 'splosion from sympy.core.cache import clear_cache clear_cache() case = extra.CurveTestCase(name="curve", knl_class_or_helmholtz_k=k, curve_fn=curve_fn, op_type=op_type, target_order=7, qbx_order=4, resolutions=[30]) logger.info("\n%s", case) # {{{ geometry qbx = case.get_layer_potential(actx, case.resolutions[-1], case.target_order) from pytential.qbx.refinement import refine_geometry_collection places = GeometryCollection(qbx, auto_where=case.name) places = refine_geometry_collection(places, kernel_length_scale=(5 / k if k else None)) dd = places.auto_source.to_stage1() density_discr = places.get_discretization(dd.geometry) logger.info("nelements: %d", density_discr.mesh.nelements) logger.info("ndofs: %d", density_discr.ndofs) # }}} # {{{ symbolic sym_u, sym_op = case.get_operator(places.ambient_dim) bound_op = bind(places, sym_op) # }}} # {{{ dense matrix from pytential.symbolic.execution import build_matrix mat = actx.to_numpy( build_matrix(actx, places, sym_op, sym_u, context=case.knl_concrete_kwargs)) if visualize: try: import matplotlib.pyplot as pt except ImportError: visualize = False if visualize: from sumpy.tools import build_matrix as build_matrix_via_matvec mat2 = bound_op.scipy_op(actx, "u", dtype=mat.dtype, **case.knl_concrete_kwargs) mat2 = build_matrix_via_matvec(mat2) logger.info( "real %.5e imag %.5e", la.norm((mat - mat2).real, "fro") / la.norm(mat2.real, "fro"), la.norm((mat - mat2).imag, "fro") / la.norm(mat2.imag, "fro")) pt.subplot(121) pt.imshow(np.log10(np.abs(1.0e-20 + (mat - mat2).real))) pt.colorbar() pt.subplot(122) pt.imshow(np.log10(np.abs(1.0e-20 + (mat - mat2).imag))) pt.colorbar() pt.show() pt.clf() if visualize: pt.subplot(121) pt.imshow(mat.real) pt.colorbar() pt.subplot(122) pt.imshow(mat.imag) pt.colorbar() pt.show() pt.clf() # }}} # {{{ check np.random.seed(12) template_ary = thaw(density_discr.nodes()[0], actx) for i in range(5): if isinstance(sym_u, np.ndarray): u = np.random.randn(len(sym_u), density_discr.ndofs) u_dev = make_obj_array([ unflatten(template_ary, actx.from_numpy(ui), actx, strict=False) for ui in u ]) else: u = np.random.randn(density_discr.ndofs) u_dev = unflatten(template_ary, actx.from_numpy(u), actx, strict=False) res_matvec = actx.to_numpy( flatten(bound_op(actx, u=u_dev, **case.knl_concrete_kwargs), actx)) res_mat = mat @ u.ravel() abs_err = la.norm(res_mat - res_matvec, np.inf) rel_err = abs_err / la.norm(res_matvec, np.inf) logger.info(f"AbsErr {abs_err:.5e} RelErr {rel_err:.5e}") assert rel_err < 1.0e-13, f"iteration: {i}"
def main(mesh_name="ellipsoid"): import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.WARNING) # INFO for more progress info import pyopencl as cl cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue, force_device_scalars=True) if mesh_name == "ellipsoid": cad_file_name = "geometries/ellipsoid.step" h = 0.6 elif mesh_name == "two-cylinders": cad_file_name = "geometries/two-cylinders-smooth.step" h = 0.4 else: raise ValueError("unknown mesh name: %s" % mesh_name) from meshmode.mesh.io import generate_gmsh, FileSource mesh = generate_gmsh( FileSource(cad_file_name), 2, order=2, other_options=["-string", "Mesh.CharacteristicLengthMax = %g;" % h], target_unit="MM") from meshmode.mesh.processing import perform_flips # Flip elements--gmsh generates inside-out geometry. mesh = perform_flips(mesh, np.ones(mesh.nelements)) from meshmode.mesh.processing import find_bounding_box bbox_min, bbox_max = find_bounding_box(mesh) bbox_center = 0.5 * (bbox_min + bbox_max) bbox_size = max(bbox_max - bbox_min) / 2 logger.info("%d elements" % mesh.nelements) from pytential.qbx import QBXLayerPotentialSource from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureSimplexGroupFactory density_discr = Discretization( actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order)) qbx = QBXLayerPotentialSource(density_discr, 4 * target_order, qbx_order, fmm_order=qbx_order + 3, target_association_tolerance=0.15) from pytential.target import PointsTarget fplot = FieldPlotter(bbox_center, extent=3.5 * bbox_size, npoints=150) from pytential import GeometryCollection places = GeometryCollection( { "qbx": qbx, "targets": PointsTarget(actx.from_numpy(fplot.points)) }, auto_where="qbx") density_discr = places.get_discretization("qbx") nodes = thaw(density_discr.nodes(), actx) angle = actx.np.arctan2(nodes[1], nodes[0]) if k: kernel = HelmholtzKernel(3) else: kernel = LaplaceKernel(3) #op = sym.d_dx(sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None)) op = sym.D(kernel, sym.var("sigma"), qbx_forced_limit=None) #op = sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None) if 0: from random import randrange sigma = actx.zeros(density_discr.ndofs, angle.entry_dtype) for _ in range(5): sigma[randrange(len(sigma))] = 1 from arraycontext import unflatten sigma = unflatten(angle, sigma, actx) else: sigma = actx.np.cos(mode_nr * angle) if isinstance(kernel, HelmholtzKernel): for i, elem in np.ndenumerate(sigma): sigma[i] = elem.astype(np.complex128) fld_in_vol = actx.to_numpy( bind(places, op, auto_where=("qbx", "targets"))(actx, sigma=sigma, k=k)) #fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5) fplot.write_vtk_file("layerpot-3d-potential.vts", [("potential", fld_in_vol)]) bdry_normals = bind(places, sym.normal( density_discr.ambient_dim))(actx).as_vector(dtype=object) from meshmode.discretization.visualization import make_visualizer bdry_vis = make_visualizer(actx, density_discr, target_order) bdry_vis.write_vtk_file("layerpot-3d-density.vtu", [ ("sigma", sigma), ("bdry_normals", bdry_normals), ])