def separate_by_real_and_imag(names_and_fields, real_only): """ :arg names_and_fields: input data array must be already flattened into a single :mod:`numpy` array using :func:`resample_to_numpy`. """ for name, field in names_and_fields: if isinstance(field, np.ndarray) and field.dtype.char == "O": assert len(field.shape) == 1 from pytools.obj_array import ( obj_array_real_copy, obj_array_imag_copy, obj_array_vectorize) if field[0].dtype.kind == "c": if real_only: yield (name, obj_array_vectorize(obj_array_real_copy, field)) else: yield (f"{name}_r", obj_array_vectorize(obj_array_real_copy, field)) yield (f"{name}_i", obj_array_vectorize(obj_array_imag_copy, field)) else: yield (name, field) else: if field.dtype.kind == "c": if real_only: yield (name, field.real.copy()) else: yield (f"{name}_r", field.real.copy()) yield (f"{name}_i", field.imag.copy()) else: yield (name, field)
def get_next_step(self, available_names, done_insns): from pytools import all, argmax2 available_insns = [(insn, insn.priority) for insn in self.instructions if insn not in done_insns and all( dep.name in available_names for dep in insn.get_dependencies())] if not available_insns: raise self.NoInstructionAvailable from pytools import flatten discardable_vars = set(available_names) - set( flatten([dep.name for dep in insn.get_dependencies()] for insn in self.instructions if insn not in done_insns)) # {{{ make sure results do not get discarded dm = mappers.DependencyMapper(composite_leaves=False) def remove_result_variable(result_expr): # The extra dependency mapper run is necessary # because, for instance, subscripts can make it # into the result expression, which then does # not consist of just variables. for var in dm(result_expr): assert isinstance(var, Variable) discardable_vars.discard(var.name) obj_array_vectorize(remove_result_variable, self.result) # }}} return argmax2(available_insns), discardable_vars
def _apply_inverse_mass_operator(dcoll: DiscretizationCollection, dd_out, dd_in, vec): if isinstance(vec, np.ndarray): return obj_array_vectorize( lambda vi: _apply_inverse_mass_operator(dcoll, dd_out, dd_in, vi), vec) from grudge.geometry import area_element if dd_out != dd_in: raise ValueError("Cannot compute inverse of a mass matrix mapping " "between different element groups; inverse is not " "guaranteed to be well-defined") actx = vec.array_context discr = dcoll.discr_from_dd(dd_in) inv_area_elements = 1. / area_element(actx, dcoll, dd=dd_in) group_data = [] for grp, jac_inv, vec_i in zip(discr.groups, inv_area_elements, vec): ref_mass_inverse = reference_inverse_mass_matrix(actx, element_group=grp) group_data.append( # Based on https://arxiv.org/pdf/1608.03836.pdf # true_Minv ~ ref_Minv * ref_M * (1/jac_det) * ref_Minv actx.einsum("ei,ij,ej->ei", jac_inv, ref_mass_inverse, vec_i, tagged=(FirstAxisIsElementsTag(), ))) return DOFArray(actx, data=tuple(group_data))
def _apply_mass_operator(dcoll: DiscretizationCollection, dd_out, dd_in, vec): if isinstance(vec, np.ndarray): return obj_array_vectorize( lambda vi: _apply_mass_operator(dcoll, dd_out, dd_in, vi), vec) from grudge.geometry import area_element in_discr = dcoll.discr_from_dd(dd_in) out_discr = dcoll.discr_from_dd(dd_out) actx = vec.array_context area_elements = area_element(actx, dcoll, dd=dd_in) return DOFArray( actx, data=tuple( actx.einsum("ij,ej,ej->ei", reference_mass_matrix(actx, out_element_group=out_grp, in_element_group=in_grp), ae_i, vec_i, arg_names=("mass_mat", "jac", "vec"), tagged=(FirstAxisIsElementsTag(), )) for in_grp, out_grp, ae_i, vec_i in zip( in_discr.groups, out_discr.groups, area_elements, vec)))
def inverse_mass(self, vec): if (isinstance(vec, np.ndarray) and vec.dtype.char == "O" and not isinstance(vec, DOFArray)): return obj_array_vectorize(lambda el: self.inverse_mass(el), vec) @memoize_in(self, "elwise_linear_knl") def knl(): return make_loopy_program( """{[iel,idof,j]: 0<=iel<nelements and 0<=idof<ndiscr_nodes_out and 0<=j<ndiscr_nodes_in}""", "result[iel,idof] = sum(j, mat[idof, j] * vec[iel, j])", name="diff") discr = self.volume_discr result = discr.empty_like(vec) for grp in discr.groups: matrix = self.get_inverse_mass_matrix(grp, vec.entry_dtype) vec.array_context.call_loopy(knl(), mat=matrix, result=result[grp.index], vec=vec[grp.index]) return result / self.vol_jacobian()
def unflatten(actx: ArrayContext, discr, ary: Union[Any, np.ndarray], ndofs_per_element_per_group: Optional[Iterable[int]] = None) -> DOFArray: r"""Convert a 'flat' array returned by :func:`flatten` back to a :class:`DOFArray`. :arg ndofs_per_element: Optional. If given, an iterable of numbers representing the number of degrees of freedom per element, overriding the numbers provided by the element groups in *discr*. May be used (for example) to handle :class:`DOFArray`\ s that have only one DOF per element, representing some per-element quantity. Vectorizes over object arrays. """ if isinstance(ary, np.ndarray): return obj_array_vectorize( lambda subary: unflatten( actx, discr, subary, ndofs_per_element_per_group), ary) if ndofs_per_element_per_group is None: ndofs_per_element_per_group = [ grp.nunit_dofs for grp in discr.groups] nel_ndof_per_element_per_group = [ (grp.nelements, ndofs_per_element) for grp, ndofs_per_element in zip(discr.groups, ndofs_per_element_per_group)] return _unflatten(actx, nel_ndof_per_element_per_group, ary)
def sym_grad(dim, expr): if isinstance(expr, ConservedVars): return make_conserved(dim, q=sym_grad(dim, expr.join())) elif isinstance(expr, np.ndarray): return np.stack(obj_array_vectorize(lambda e: sym.grad(dim, e), expr)) else: return sym.grad(dim, expr)
def flatten(ary: Union[DOFArray, np.ndarray]) -> Any: r"""Convert a :class:`DOFArray` into a "flat" array of degrees of freedom, where the resulting type of the array is given by the :attr:`DOFArray.array_context`. Array elements are laid out contiguously, with the element group index varying slowest, element index next, and intra-element DOF index fastest. Vectorizes over object arrays of :class:`DOFArray`\ s. """ if isinstance(ary, np.ndarray): return obj_array_vectorize(flatten, ary) group_sizes = [grp_ary.shape[0] * grp_ary.shape[1] for grp_ary in ary] group_starts = np.cumsum([0] + group_sizes) actx = ary.array_context @memoize_in(actx, (flatten, "flatten_prg")) def prg(): return make_loopy_program( "{[iel,idof]: 0<=iel<nelements and 0<=idof<ndofs_per_element}", """result[grp_start + iel*ndofs_per_element + idof] \ = grp_ary[iel, idof]""", name="flatten") result = actx.empty(group_starts[-1], dtype=ary.entry_dtype) for grp_start, grp_ary in zip(group_starts, ary): actx.call_loopy(prg(), grp_ary=grp_ary, result=result, grp_start=grp_start) return result
def get_fmm_expansion_wrangler_extra_kwargs(self, actx, out_kernels, tree_user_source_ids, arguments, evaluator): # This contains things like the Helmholtz parameter k or # the normal directions for double layers. queue = actx.queue def reorder_sources(source_array): if isinstance(source_array, cl.array.Array): return (source_array.with_queue(queue) [tree_user_source_ids].with_queue(None)) else: return source_array kernel_extra_kwargs = {} source_extra_kwargs = {} from sumpy.tools import gather_arguments, gather_source_arguments from pytools.obj_array import obj_array_vectorize from pytential.utils import flatten_if_needed for func, var_dict in [ (gather_arguments, kernel_extra_kwargs), (gather_source_arguments, source_extra_kwargs), ]: for arg in func(out_kernels): var_dict[arg.name] = obj_array_vectorize( reorder_sources, flatten_if_needed(actx, evaluator(arguments[arg.name]))) return kernel_extra_kwargs, source_extra_kwargs
def my_checkpoint(step, t, dt, state): write_restart = (check_step(step, nrestart) if step != restart_step else False) if write_restart is True: with open(snapshot_pattern.format(step=step, rank=rank), "wb") as f: pickle.dump({ "local_mesh": local_mesh, "state": obj_array_vectorize(actx.to_numpy, flatten(state)), "t": t, "step": step, "global_nelements": global_nelements, "num_parts": nparts, }, f) #x0=f(time) exact_soln = Discontinuity(dim=dim, x0=.05,sigma=0.00001, rhol=rho2, rhor=rho1, pl=pressure2, pr=pressure1, ul=vel_inflow[0], ur=0., uc=mach*c_bkrnd) return sim_checkpoint(discr=discr, visualizer=visualizer, eos=eos, q=state, vizname=casename, step=step, t=t, dt=dt, nstatus=nstatus, nviz=nviz, exittol=exittol, constant_cfl=constant_cfl, comm=comm, vis_timer=vis_timer, overwrite=True, exact_soln=exact_soln,sigma=sigma_sc,kappa=kappa_sc)
def __call__(self, expr): # {{{ collect operators by operand from pytential.symbolic.mappers import OperatorCollector operators = [ op for op in OperatorCollector()(expr) if isinstance(op, IntG) ] self.group_to_operators = {} for op in operators: features = self.op_group_features(op) self.group_to_operators.setdefault(features, set()).add(op) # }}} # Traverse the expression, generate code. result = super().__call__(expr) # Put the toplevel expressions into variables as well. from pytools.obj_array import obj_array_vectorize result = obj_array_vectorize(self.assign_to_new_var, result) return Code(self.code, result)
def interp(self, src, tgt, vec): if (isinstance(vec, np.ndarray) and vec.dtype.char == "O" and not isinstance(vec, DOFArray)): return obj_array_vectorize(lambda el: self.interp(src, tgt, el), vec) return self.get_connection(src, tgt)(vec)
def local_grad(dcoll: DiscretizationCollection, vec, *, nested=False) -> np.ndarray: r"""Return the element-local gradient of a function :math:`f` represented by *vec*: .. math:: \nabla|_E f = \left( \partial_x|_E f, \partial_y|_E f, \partial_z|_E f \right) :arg vec: a :class:`~meshmode.dof_array.DOFArray` or object array of :class:`~meshmode.dof_array.DOFArray`\ s. :arg nested: return nested object arrays instead of a single multidimensional array if *vec* is non-scalar. :returns: an object array (possibly nested) of :class:`~meshmode.dof_array.DOFArray`\ s. """ if isinstance(vec, np.ndarray): grad = obj_array_vectorize( lambda el: local_grad(dcoll, el, nested=nested), vec) if nested: return grad else: return np.stack(grad, axis=0) return make_obj_array([ _compute_local_gradient(dcoll, vec, xyz_axis) for xyz_axis in range(dcoll.dim) ])
def local_interior_trace_pair(dcoll: DiscretizationCollection, vec) -> TracePair: r"""Return a :class:`TracePair` for the interior faces of *dcoll* with a discretization tag specified by *discr_tag*. This does not include interior faces on different MPI ranks. :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` of them. For certain applications, it may be useful to distinguish between rank-local and cross-rank trace pairs. For example, avoiding unnecessary communication of derived quantities (i.e. temperature) on partition boundaries by computing them directly. Having the ability for user applications to distinguish between rank-local and cross-rank contributions can also help enable overlapping communication with computation. :returns: a :class:`TracePair` object. """ i = project(dcoll, "vol", "int_faces", vec) def get_opposite_face(el): if isinstance(el, Number): return el else: return dcoll.opposite_face_connection()(el) e = obj_array_vectorize(get_opposite_face, i) return TracePair("int_faces", interior=i, exterior=e)
def my_checkpoint(step, t, dt, state): write_restart = (check_step(step, nrestart) if step != restart_step else False) if write_restart is True: with open(snapshot_pattern.format(step=step, rank=rank), "wb") as f: pickle.dump({ "local_mesh": local_mesh, "state": obj_array_vectorize(actx.to_numpy, flatten(state)), "t": t, "step": step, "global_nelements": global_nelements, "num_parts": nparts, }, f) cv = split_conserved(dim, state) tagged_cells = smoothness_indicator(discr, cv.mass, s0=s0_sc, kappa=kappa_sc) viz_fields = [("sponge_sigma", gen_sponge()),("tagged cells", tagged_cells)] return sim_checkpoint(discr=discr, visualizer=visualizer, eos=eos, q=state, vizname=casename, step=step, t=t, dt=dt, nstatus=nstatus, nviz=nviz, exittol=exittol, constant_cfl=constant_cfl, comm=comm, vis_timer=vis_timer, overwrite=True,s0=s0_sc,kappa=kappa_sc, viz_fields=viz_fields)
def inverse_mass(self, vec): if (isinstance(vec, np.ndarray) and vec.dtype.char == "O" and not isinstance(vec, DOFArray)): return obj_array_vectorize( lambda el: self.inverse_mass(el), vec) return self._bound_inverse_mass()(u=vec)
def project(self, src, tgt, vec): if (isinstance(vec, np.ndarray) and vec.dtype.char == "O" and not isinstance(vec, DOFArray)): return obj_array_vectorize( lambda el: self.project(src, tgt, el), vec) return self.connection_from_dds(src, tgt)(vec)
def u_incoming_func(x): from pytools.obj_array import obj_array_vectorize x = obj_array_vectorize(actx.to_numpy, flatten(x)) x = np.array(list(x)) # return 1/cl.clmath.sqrt( (x[0] - source[0])**2 # +(x[1] - source[1])**2 # +(x[2] - source[2])**2 ) return 1.0/la.norm(x - source[:, None], axis=0)
def interior_trace_pair(discrwb, vec): """Return a :class:`grudge.sym.TracePair` for the interior faces of *discrwb*. """ i = discrwb.project("vol", "int_faces", vec) e = obj_array_vectorize(lambda el: discrwb.opposite_face_connection()(el), i) return TracePair("int_faces", interior=i, exterior=e)
def unflatten_from_numpy(actx, discr, ary): from pytools.obj_array import obj_array_vectorize from meshmode.dof_array import unflatten ary = obj_array_vectorize(actx.from_numpy, ary) if discr is None: return ary else: return unflatten(actx, discr, ary)
def vector_to_device(queue, vec): from pytools.obj_array import obj_array_vectorize from pyopencl.array import to_device def to_dev(ary): return to_device(queue, ary) return obj_array_vectorize(to_dev, vec)
def dof_array_to_numpy(actx, ary): """Converts DOFArrays (or object arrays of DOFArrays) to NumPy arrays. Object arrays get turned into multidimensional arrays. """ from pytools.obj_array import obj_array_vectorize from meshmode.dof_array import flatten arr = obj_array_vectorize(actx.to_numpy, flatten(ary)) if arr.dtype.char == "O": arr = np.array(list(arr)) return arr
def my_checkpoint(step, t, dt, state): write_restart = (check_step(step, nrestart) if step != restart_step else False) if write_restart is True: with open(snapshot_pattern.format(step=step, rank=rank), "wb") as f: pickle.dump( { "local_mesh": local_mesh, "state": obj_array_vectorize(actx.to_numpy, flatten(state)), "t": t, "step": step, "global_nelements": global_nelements, "num_parts": nparts, }, f) def loc_fn(t): return flame_start_loc + flame_speed * t exact_soln = PlanarDiscontinuity(dim=dim, disc_location=loc_fn, sigma=0.0000001, nspecies=nspecies, temperature_left=temp_ignition, temperature_right=temp_unburned, pressure_left=pres_burned, pressure_right=pres_unburned, velocity_left=vel_burned, velocity_right=vel_unburned, species_mass_left=y_burned, species_mass_right=y_unburned) cv = split_conserved(dim, state) reaction_rates = eos.get_production_rates(cv) viz_fields = [("reaction_rates", reaction_rates)] return sim_checkpoint(discr=discr, visualizer=visualizer, eos=eos, q=state, vizname=casename, step=step, t=t, dt=dt, nstatus=nstatus, nviz=nviz, exittol=exittol, constant_cfl=constant_cfl, comm=comm, vis_timer=vis_timer, overwrite=True, exact_soln=exact_soln, viz_fields=viz_fields)
def execute(self, exec_mapper, pre_assign_check=None): """Execute the instruction stream, make all scheduling decisions dynamically. """ context = exec_mapper.context done_insns = set() while True: discardable_vars = [] insn = None try: insn, discardable_vars = self.get_next_step( frozenset(context.keys()), frozenset(done_insns)) except self.NoInstructionAvailable: # no available instructions: we're done break else: for name in discardable_vars: del context[name] done_insns.add(insn) assignments = (self.get_exec_function( insn, exec_mapper)(exec_mapper.array_context, insn, exec_mapper.bound_expr, exec_mapper)) assignees = insn.get_assignees() for target, value in assignments: if pre_assign_check is not None: pre_assign_check(target, value) assert target in assignees context[target] = value if len(done_insns) < len(self.instructions): print("Unreachable instructions:") for insn in set(self.instructions) - done_insns: print(" ", str(insn).replace("\n", "\n ")) from pymbolic import var print( " missing: ", ", ".join( str(s) for s in set(insn.get_dependencies()) - {var(v) for v in context.keys()})) raise RuntimeError( "not all instructions are reachable" "--did you forget to pass a value for a placeholder?") from pytools.obj_array import obj_array_vectorize return obj_array_vectorize(exec_mapper, self.result)
def interior_trace_pair(discrwb, vec): i = discrwb.project("vol", "int_faces", vec) if (isinstance(vec, np.ndarray) and vec.dtype.char == "O" and not isinstance(vec, DOFArray)): e = obj_array_vectorize( lambda el: discrwb.opposite_face_connection()(el), i) return TracePair("int_faces", i, e)
def componentwise(f, expr): """Apply function *f* componentwise to object arrays and :class:`MultiVector` instances. *expr* is also allowed to be a scalar. """ if isinstance(expr, MultiVector): return expr.map(f) from pytools.obj_array import obj_array_vectorize return obj_array_vectorize(f, expr)
def test_velocity_gradient_eoc(actx_factory, dim): """Test that the velocity gradient converges at the proper rate.""" from mirgecom.fluid import velocity_gradient actx = actx_factory() order = 3 from pytools.convergence import EOCRecorder eoc = EOCRecorder() nel_1d_0 = 4 for hn1 in [1, 2, 3, 4]: nel_1d = hn1 * nel_1d_0 h = 1/nel_1d from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh( a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim ) discr = EagerDGDiscretization(actx, mesh, order=order) nodes = thaw(actx, discr.nodes()) zeros = discr.zeros(actx) energy = zeros + 2.5 mass = nodes[dim-1]*nodes[dim-1] velocity = make_obj_array([actx.np.cos(nodes[i]) for i in range(dim)]) mom = mass*velocity q = join_conserved(dim, mass=mass, energy=energy, momentum=mom) cv = split_conserved(dim, q) grad_q = obj_array_vectorize(discr.grad, q) grad_cv = split_conserved(dim, grad_q) grad_v = velocity_gradient(discr, cv, grad_cv) def exact_grad_row(xdata, gdim, dim): exact_grad_row = make_obj_array([zeros for _ in range(dim)]) exact_grad_row[gdim] = -actx.np.sin(xdata) return exact_grad_row comp_err = make_obj_array([ discr.norm(grad_v[i] - exact_grad_row(nodes[i], i, dim), np.inf) for i in range(dim)]) err_max = comp_err.max() eoc.add_data_point(h, err_max) logger.info(eoc) assert ( eoc.order_estimate() >= order - 0.5 or eoc.max_error() < 1e-9 )
def reorder_potentials(self, potentials): from pytools.obj_array import obj_array_vectorize import numpy as np assert ( isinstance(potentials, np.ndarray) and potentials.dtype.char == "O") def reorder(x): return x.with_queue(self.queue)[self.tree.sorted_target_ids] return obj_array_vectorize(reorder, potentials)
def _comparison(self, operator_func, other): from numbers import Number if isinstance(other, DOFArray): return obj_array_vectorize_n_args(operator_func, self, other) elif isinstance(other, Number): return obj_array_vectorize( lambda self_entry: operator_func(self_entry, other), self) else: # fall back to "best effort" (i.e. likley failure) return operator_func(self, other)
def __call__(self, expr): from pytools.obj_array import obj_array_vectorize from grudge.tools import is_zero def bind_one(subexpr): if is_zero(subexpr): return subexpr else: from grudge.symbolic.primitives import OperatorBinding return OperatorBinding(self, subexpr) return obj_array_vectorize(bind_one, expr)