def _apply_face_mass_operator(dcoll: DiscretizationCollection, dd, vec): if not isinstance(vec, DOFArray): return map_array_container( partial(_apply_face_mass_operator, dcoll, dd), vec) from grudge.geometry import area_element volm_discr = dcoll.discr_from_dd(dof_desc.DD_VOLUME) face_discr = dcoll.discr_from_dd(dd) dtype = vec.entry_dtype actx = vec.array_context assert len(face_discr.groups) == len(volm_discr.groups) surf_area_elements = area_element( actx, dcoll, dd=dd, _use_geoderiv_connection=actx.supports_nonscalar_broadcasting) return DOFArray( actx, data=tuple( actx.einsum("ifj,fej,fej->ei", reference_face_mass_matrix(actx, face_element_group=afgrp, vol_element_group=vgrp, dtype=dtype), surf_ae_i.reshape(vgrp.mesh_el_group.nfaces, vgrp.nelements, -1), vec_i.reshape(vgrp.mesh_el_group.nfaces, vgrp.nelements, afgrp.nunit_dofs), arg_names=("ref_face_mass_mat", "jac_surf", "vec"), tagged=(FirstAxisIsElementsTag(), )) for vgrp, afgrp, vec_i, surf_ae_i in zip(volm_discr.groups, face_discr.groups, vec, surf_area_elements)))
def _apply_mass_operator(dcoll: DiscretizationCollection, dd_out, dd_in, vec): if not isinstance(vec, DOFArray): return map_array_container( partial(_apply_mass_operator, dcoll, dd_out, dd_in), vec) from grudge.geometry import area_element in_discr = dcoll.discr_from_dd(dd_in) out_discr = dcoll.discr_from_dd(dd_out) actx = vec.array_context area_elements = area_element( actx, dcoll, dd=dd_in, _use_geoderiv_connection=actx.supports_nonscalar_broadcasting) return DOFArray( actx, data=tuple( actx.einsum("ij,ej,ej->ei", reference_mass_matrix(actx, out_element_group=out_grp, in_element_group=in_grp), ae_i, vec_i, arg_names=("mass_mat", "jac", "vec"), tagged=(FirstAxisIsElementsTag(), )) for in_grp, out_grp, ae_i, vec_i in zip( in_discr.groups, out_discr.groups, area_elements, vec)))
def _apply_mass_operator(dcoll: DiscretizationCollection, dd_out, dd_in, vec): if isinstance(vec, np.ndarray): return obj_array_vectorize( lambda vi: _apply_mass_operator(dcoll, dd_out, dd_in, vi), vec) from grudge.geometry import area_element in_discr = dcoll.discr_from_dd(dd_in) out_discr = dcoll.discr_from_dd(dd_out) actx = vec.array_context area_elements = area_element(actx, dcoll, dd=dd_in) return DOFArray( actx, data=tuple( actx.einsum("ij,ej,ej->ei", reference_mass_matrix(actx, out_element_group=out_grp, in_element_group=in_grp), ae_i, vec_i, arg_names=("mass_mat", "jac", "vec"), tagged=(FirstAxisIsElementsTag(), )) for in_grp, out_grp, ae_i, vec_i in zip( in_discr.groups, out_discr.groups, area_elements, vec)))
def _apply_stiffness_transpose_operator(dcoll: DiscretizationCollection, dd_out, dd_in, vec, xyz_axis): from grudge.geometry import \ inverse_surface_metric_derivative, area_element in_discr = dcoll.discr_from_dd(dd_in) out_discr = dcoll.discr_from_dd(dd_out) actx = vec.array_context area_elements = area_element(actx, dcoll, dd=dd_in) inverse_jac_t = actx.np.stack([ inverse_surface_metric_derivative(actx, dcoll, rst_axis, xyz_axis, dd=dd_in) for rst_axis in range(dcoll.dim) ]) return DOFArray( actx, data=tuple( actx.einsum( "dij,ej,ej,dej->ei", reference_stiffness_transpose_matrix( actx, out_element_group=out_grp, in_element_group=in_grp), ae_i, vec_i, inv_jac_t_i, arg_names=("ref_stiffT_mat", "jac", "vec", "inv_jac_t"), tagged=(FirstAxisIsElementsTag(), )) for out_grp, in_grp, vec_i, ae_i, inv_jac_t_i in zip( out_discr.groups, in_discr.groups, vec, area_elements, inverse_jac_t)))
def _apply_face_mass_operator(dcoll: DiscretizationCollection, dd, vec): if isinstance(vec, np.ndarray): return obj_array_vectorize( lambda vi: _apply_face_mass_operator(dcoll, dd, vi), vec) from grudge.geometry import area_element volm_discr = dcoll.discr_from_dd(dof_desc.DD_VOLUME) face_discr = dcoll.discr_from_dd(dd) dtype = vec.entry_dtype actx = vec.array_context @memoize_in(actx, (_apply_face_mass_operator, "face_mass_knl")) def prg(): t_unit = make_loopy_program([ "{[iel]: 0 <= iel < nelements}", "{[f]: 0 <= f < nfaces}", "{[idof]: 0 <= idof < nvol_nodes}", "{[jdof]: 0 <= jdof < nface_nodes}" ], """ result[iel, idof] = sum(f, sum(jdof, mat[idof, f, jdof] * jac_surf[f, iel, jdof] * vec[f, iel, jdof])) """, name="face_mass") import loopy as lp from meshmode.transform_metadata import (ConcurrentElementInameTag, ConcurrentDOFInameTag) return lp.tag_inames(t_unit, { "iel": ConcurrentElementInameTag(), "idof": ConcurrentDOFInameTag() }) assert len(face_discr.groups) == len(volm_discr.groups) surf_area_elements = area_element(actx, dcoll, dd=dd) return DOFArray( actx, data=tuple( actx.call_loopy( prg(), mat=reference_face_mass_matrix(actx, face_element_group=afgrp, vol_element_group=vgrp, dtype=dtype), jac_surf=surf_ae_i.reshape(vgrp.mesh_el_group.nfaces, vgrp.nelements, afgrp.nunit_dofs), vec=vec_i.reshape(vgrp.mesh_el_group.nfaces, vgrp.nelements, afgrp.nunit_dofs))["result"] for vgrp, afgrp, vec_i, surf_ae_i in zip(volm_discr.groups, face_discr.groups, vec, surf_area_elements)))
def main(): cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) dim = 2 nel_1d = 16 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim, b=(0.5, ) * dim, nelements_per_axis=(nel_1d, ) * dim) order = 3 if dim == 2: # no deep meaning here, just a fudge factor dt = 0.75 / (nel_1d * order**2) elif dim == 3: # no deep meaning here, just a fudge factor dt = 0.45 / (nel_1d * order**2) else: raise ValueError("don't have a stable time step guesstimate") print("%d elements" % mesh.nelements) dcoll = DiscretizationCollection(actx, mesh, order=order) fields = flat_obj_array(bump(actx, dcoll), [dcoll.zeros(actx) for i in range(dcoll.dim)]) vis = make_visualizer(dcoll) def rhs(t, w): return wave_operator(dcoll, c=1, w=w) t = 0 t_final = 3 istep = 0 while t < t_final: fields = rk4_step(fields, t, dt, rhs) if istep % 10 == 0: print(f"step: {istep} t: {t} L2: {op.norm(dcoll, fields[0], 2)} " f"sol max: {op.nodal_max(dcoll, 'vol', fields[0])}") vis.write_vtk_file("fld-wave-eager-%04d.vtu" % istep, [ ("u", fields[0]), ("v", fields[1:]), ]) t += dt istep += 1
def _apply_inverse_mass_operator(dcoll: DiscretizationCollection, dd_out, dd_in, vec): if isinstance(vec, np.ndarray): return obj_array_vectorize( lambda vi: _apply_inverse_mass_operator(dcoll, dd_out, dd_in, vi), vec) from grudge.geometry import area_element if dd_out != dd_in: raise ValueError("Cannot compute inverse of a mass matrix mapping " "between different element groups; inverse is not " "guaranteed to be well-defined") actx = vec.array_context discr = dcoll.discr_from_dd(dd_in) inv_area_elements = 1. / area_element(actx, dcoll, dd=dd_in) group_data = [] for grp, jac_inv, vec_i in zip(discr.groups, inv_area_elements, vec): ref_mass_inverse = reference_inverse_mass_matrix(actx, element_group=grp) group_data.append( # Based on https://arxiv.org/pdf/1608.03836.pdf # true_Minv ~ ref_Minv * ref_M * (1/jac_det) * ref_Minv actx.einsum("ei,ij,ej->ei", jac_inv, ref_mass_inverse, vec_i, tagged=(FirstAxisIsElementsTag(), ))) return DOFArray(actx, data=tuple(group_data))
def h_min_from_volume(dcoll: DiscretizationCollection, dim=None, dd=None) -> float: """Returns a (minimum) characteristic length based on the volume of the elements. This length may not be representative if the elements have very high aspect ratios. :arg dim: an integer denoting topological dimension. If *None*, the spatial dimension specified by :attr:`grudge.DiscretizationCollection.dim` is used. :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. Defaults to the base volume discretization if not provided. :returns: a scalar denoting the minimum characteristic length. """ from grudge.reductions import nodal_min, elementwise_sum if dd is None: dd = DD_VOLUME dd = as_dofdesc(dd) if dim is None: dim = dcoll.dim ones = dcoll.discr_from_dd(dd).zeros(dcoll._setup_actx) + 1.0 return nodal_min(dcoll, dd, elementwise_sum(dcoll, op.mass(dcoll, dd, ones)))**(1.0 / dim)
def __init__(self, dcoll: DiscretizationCollection, remote_rank, vol_field, tag=None): self.tag = self.base_tag if tag is not None: self.tag += tag self.dcoll = dcoll self.array_context = vol_field.array_context self.remote_btag = BTAG_PARTITION(remote_rank) self.bdry_discr = dcoll.discr_from_dd(self.remote_btag) from grudge.op import project self.local_dof_array = project(dcoll, "vol", self.remote_btag, vol_field) local_data = self.array_context.to_numpy(flatten(self.local_dof_array)) comm = self.dcoll.mpi_communicator self.send_req = comm.Isend(local_data, remote_rank, tag=self.tag) self.remote_data_host = np.empty_like(local_data) self.recv_req = comm.Irecv(self.remote_data_host, remote_rank, self.tag)
def integral(dcoll: DiscretizationCollection, dd, vec) -> float: """Numerically integrates a function represented by a :class:`~meshmode.dof_array.DOFArray` of degrees of freedom. :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. :arg vec: a :class:`~meshmode.dof_array.DOFArray` :returns: a scalar denoting the evaluated integral. """ from grudge.op import _apply_mass_operator dd = dof_desc.as_dofdesc(dd) ones = dcoll.discr_from_dd(dd).zeros(vec.array_context) + 1.0 return nodal_sum(dcoll, dd, vec * _apply_mass_operator(dcoll, dd, dd, ones))
def __init__(self, dcoll: DiscretizationCollection, array_container: ArrayOrContainerT, remote_rank, tag=None): actx = get_container_context_recursively(array_container) btag = BTAG_PARTITION(remote_rank) local_bdry_data = project(dcoll, "vol", btag, array_container) comm = dcoll.mpi_communicator self.dcoll = dcoll self.array_context = actx self.remote_btag = btag self.bdry_discr = dcoll.discr_from_dd(btag) self.local_bdry_data = local_bdry_data self.local_bdry_data_np = \ to_numpy(flatten(self.local_bdry_data, actx), actx) self.tag = self.base_tag if tag is not None: self.tag += tag # Here, we initialize both send and recieve operations through # mpi4py `Request` (MPI_Request) instances for comm.Isend (MPI_Isend) # and comm.Irecv (MPI_Irecv) respectively. These initiate non-blocking # point-to-point communication requests and require explicit management # via the use of wait (MPI_Wait, MPI_Waitall, MPI_Waitany, MPI_Waitsome), # test (MPI_Test, MPI_Testall, MPI_Testany, MPI_Testsome), and cancel # (MPI_Cancel). The rank-local data `self.local_bdry_data_np` will have its # associated memory buffer sent across connected ranks and must not be # modified at the Python level during this process. Completion of the # requests is handled in :meth:`finish`. # # For more details on the mpi4py semantics, see: # https://mpi4py.readthedocs.io/en/stable/overview.html#nonblocking-communications # # NOTE: mpi4py currently (2021-11-03) holds a reference to the send # memory buffer for (i.e. `self.local_bdry_data_np`) until the send # requests is complete, however it is not clear that this is documented # behavior. We hold on to the buffer (via the instance attribute) # as well, just in case. self.send_req = comm.Isend(self.local_bdry_data_np, remote_rank, tag=self.tag) self.remote_data_host_numpy = np.empty_like(self.local_bdry_data_np) self.recv_req = comm.Irecv(self.remote_data_host_numpy, remote_rank, tag=self.tag)
def elementwise_integral(dcoll: DiscretizationCollection, dd, vec) -> DOFArray: """Numerically integrates a function represented by a :class:`~meshmode.dof_array.DOFArray` of degrees of freedom in each element of a discretization, given by *dd*. :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. :arg vec: a :class:`~meshmode.dof_array.DOFArray` :returns: a :class:`~meshmode.dof_array.DOFArray` containing the elementwise integral if *vec*. """ from grudge.op import _apply_mass_operator dd = dof_desc.as_dofdesc(dd) ones = dcoll.discr_from_dd(dd).zeros(vec.array_context) + 1.0 return elementwise_sum(dcoll, dd, vec * _apply_mass_operator(dcoll, dd, dd, ones))
def elementwise_integral(dcoll: DiscretizationCollection, *args) -> ArrayOrContainerT: """Numerically integrates a function represented by a :class:`~meshmode.dof_array.DOFArray` of degrees of freedom in each element of a discretization, given by *dd*. May be called with ``(vec)`` or ``(dd, vec)``. The input *vec* can either be a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` with :class:`~meshmode.dof_array.DOFArray` entries. If the underlying array context (see :class:`arraycontext.ArrayContext`) for *vec* supports nonscalar broadcasting, all :class:`~meshmode.dof_array.DOFArray` entries will contain a single value for each element. Otherwise, the entries will have the same number of degrees of freedom as *vec*, but set to the same value. :arg dcoll: a :class:`grudge.discretization.DiscretizationCollection`. :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. Defaults to the base volume discretization if not provided. :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` of them. :returns: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` like *vec* containing the elementwise integral if *vec*. """ if len(args) == 1: vec, = args dd = dof_desc.DOFDesc("vol", dof_desc.DISCR_TAG_BASE) elif len(args) == 2: dd, vec = args else: raise TypeError("invalid number of arguments") dd = dof_desc.as_dofdesc(dd) from grudge.op import _apply_mass_operator ones = dcoll.discr_from_dd(dd).zeros(vec.array_context) + 1.0 return elementwise_sum(dcoll, dd, vec * _apply_mass_operator(dcoll, dd, dd, ones))
def _compute_local_gradient(dcoll: DiscretizationCollection, vec, xyz_axis): from grudge.geometry import inverse_surface_metric_derivative discr = dcoll.discr_from_dd(dof_desc.DD_VOLUME) actx = vec.array_context inverse_jac_t = actx.np.stack([ inverse_surface_metric_derivative(actx, dcoll, rst_axis, xyz_axis) for rst_axis in range(dcoll.dim) ]) return DOFArray(actx, data=tuple( actx.einsum("dei,dij,ej->ei", inv_jac_t_i, reference_derivative_matrices(actx, grp), vec_i, arg_names=("inv_jac_t", "ref_diff_mat", "vec"), tagged=(FirstAxisIsElementsTag(), )) for grp, vec_i, inv_jac_t_i in zip( discr.groups, vec, inverse_jac_t)))
def dt_non_geometric_factors(dcoll: DiscretizationCollection, dd=None) -> list: r"""Computes the non-geometric scale factors following [Hesthaven_2008]_, section 6.4, for each element group in the *dd* discretization: .. math:: c_{ng} = \operatorname{min}\left( \Delta r_i \right), where :math:`\Delta r_i` denotes the distance between two distinct nodal points on the reference element. :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. Defaults to the base volume discretization if not provided. :returns: a :class:`list` of :class:`float` values denoting the minimum node distance on the reference element for each group. """ if dd is None: dd = DD_VOLUME discr = dcoll.discr_from_dd(dd) min_delta_rs = [] for grp in discr.groups: nodes = np.asarray(list(zip(*grp.unit_nodes))) nnodes = grp.nunit_dofs # NOTE: order 0 elements have 1 node located at the centroid of # the reference element and is equidistant from each vertex if grp.order == 0: assert nnodes == 1 min_delta_rs.append( np.linalg.norm(nodes[0] - grp.mesh_el_group.vertex_unit_coordinates()[0])) else: min_delta_rs.append( min( np.linalg.norm(nodes[i] - nodes[j]) for i in range(nnodes) for j in range(nnodes) if i != j)) return min_delta_rs
def project(dcoll: DiscretizationCollection, src, tgt, vec) -> ArrayOrContainerT: """Project from one discretization to another, e.g. from the volume to the boundary, or from the base to the an overintegrated quadrature discretization. :arg src: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. :arg tgt: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` of them. :returns: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` like *vec*. """ src = as_dofdesc(src) tgt = as_dofdesc(tgt) if isinstance(vec, Number) or src == tgt: return vec if not isinstance(vec, DOFArray): return map_array_container(partial(project, dcoll, src, tgt), vec) return dcoll.connection_from_dds(src, tgt)(vec)
def local_d_dx(dcoll: DiscretizationCollection, xyz_axis, vec) -> ArrayOrContainerT: r"""Return the element-local derivative along axis *xyz_axis* of a function :math:`f` represented by *vec*: .. math:: \frac{\partial f}{\partial \lbrace x,y,z\rbrace}\Big|_E :arg xyz_axis: an integer indicating the axis along which the derivative is taken. :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` of them. :returns: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` of them. """ if not isinstance(vec, DOFArray): return map_array_container(partial(local_d_dx, dcoll, xyz_axis), vec) discr = dcoll.discr_from_dd(dof_desc.DD_VOLUME) actx = vec.array_context from grudge.geometry import inverse_surface_metric_derivative_mat inverse_jac_mat = inverse_surface_metric_derivative_mat( actx, dcoll, _use_geoderiv_connection=actx.supports_nonscalar_broadcasting) return _single_axis_derivative_kernel(actx, discr, discr, _reference_derivative_matrices, inverse_jac_mat, xyz_axis, vec, metric_in_matvec=False)
def project(dcoll: DiscretizationCollection, src, tgt, vec): """Project from one discretization to another, e.g. from the volume to the boundary, or from the base to the an overintegrated quadrature discretization. :arg src: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. :arg tgt: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. :arg vec: a :class:`~meshmode.dof_array.DOFArray` or a :class:`~arraycontext.ArrayContainer`. """ src = as_dofdesc(src) tgt = as_dofdesc(tgt) if src == tgt: return vec if isinstance(vec, np.ndarray): return obj_array_vectorize(lambda el: project(dcoll, src, tgt, el), vec) if isinstance(vec, Number): return vec return dcoll.connection_from_dds(src, tgt)(vec)
def weak_local_d_dx(dcoll: DiscretizationCollection, *args) -> ArrayOrContainerT: r"""Return the element-local weak derivative along axis *xyz_axis* of the volume function represented by *vec*. May be called with ``(xyz_axis, vec)`` or ``(dd_in, xyz_axis, vec)``. Specifically, this function computes the volume contribution of the weak derivative in the :math:`i`-th component (specified by *xyz_axis*) of a function :math:`f`, in each element :math:`E`, with respect to polynomial test functions :math:`\phi`: .. math:: \int_E \partial_i\phi\,f\,\mathrm{d}x \sim \mathbf{D}_{E,i}^T \mathbf{M}_{E}^T\mathbf{f}|_E, where :math:`\mathbf{D}_{E,i}` is the polynomial differentiation matrix on an :math:`E` for the :math:`i`-th spatial coordinate, :math:`\mathbf{M}_E` is the elemental mass matrix (see :func:`mass` for more information), and :math:`\mathbf{f}|_E` is a vector of coefficients for :math:`f` on :math:`E`. :arg dd_in: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. Defaults to the base volume discretization if not provided. :arg xyz_axis: an integer indicating the axis along which the derivative is taken. :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` of them. :returns: a :class:`~meshmode.dof_array.DOFArray` or an :class:`~arraycontext.container.ArrayContainer` of them. """ if len(args) == 2: xyz_axis, vec = args dd_in = dof_desc.DOFDesc("vol", dof_desc.DISCR_TAG_BASE) elif len(args) == 3: dd_in, xyz_axis, vec = args else: raise TypeError("invalid number of arguments") if not isinstance(vec, DOFArray): return map_array_container( partial(weak_local_d_dx, dcoll, dd_in, xyz_axis), vec) from grudge.geometry import inverse_surface_metric_derivative_mat in_discr = dcoll.discr_from_dd(dd_in) out_discr = dcoll.discr_from_dd(dof_desc.DD_VOLUME) actx = vec.array_context inverse_jac_mat = inverse_surface_metric_derivative_mat( actx, dcoll, dd=dd_in, times_area_element=True, _use_geoderiv_connection=actx.supports_nonscalar_broadcasting) return _single_axis_derivative_kernel( actx, out_discr, in_discr, _reference_stiffness_transpose_matrix, inverse_jac_mat, xyz_axis, vec, metric_in_matvec=True)
def main(): cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) dim = 2 nel_1d = 16 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim, b=(0.5, ) * dim, nelements_per_axis=(nel_1d, ) * dim) order = 3 if dim == 2: # no deep meaning here, just a fudge factor dt = 0.75 / (nel_1d * order**2) elif dim == 3: # no deep meaning here, just a fudge factor dt = 0.45 / (nel_1d * order**2) else: raise ValueError("don't have a stable time step guesstimate") print("%d elements" % mesh.nelements) from meshmode.discretization.poly_element import \ QuadratureSimplexGroupFactory, \ PolynomialWarpAndBlendGroupFactory dcoll = DiscretizationCollection( actx, mesh, discr_tag_to_group_factory={ DISCR_TAG_BASE: PolynomialWarpAndBlendGroupFactory(order), DISCR_TAG_QUAD: QuadratureSimplexGroupFactory(3 * order), }) # bounded above by 1 c = 0.2 + 0.8 * bump(actx, dcoll, center=np.zeros(3), width=0.5) fields = flat_obj_array(bump( actx, dcoll, ), [dcoll.zeros(actx) for i in range(dcoll.dim)]) vis = make_visualizer(dcoll) def rhs(t, w): return wave_operator(dcoll, c=c, w=w) t = 0 t_final = 3 istep = 0 while t < t_final: fields = rk4_step(fields, t, dt, rhs) if istep % 10 == 0: print(istep, t, op.norm(dcoll, fields[0], p=2)) vis.write_vtk_file("fld-wave-eager-var-velocity-%04d.vtu" % istep, [ ("c", c), ("u", fields[0]), ("v", fields[1:]), ]) t += dt istep += 1
def dt_geometric_factors(dcoll: DiscretizationCollection, dd=None) -> DOFArray: r"""Computes a geometric scaling factor for each cell following [Hesthaven_2008]_, section 6.4, defined as the inradius (radius of an inscribed circle/sphere). Specifically, the inradius for each element is computed using the following formula from [Shewchuk_2002]_, Table 1, for simplicial cells (triangles/tetrahedra): .. math:: r_D = \frac{d V}{\sum_{i=1}^{N_{faces}} F_i}, where :math:`d` is the topological dimension, :math:`V` is the cell volume, and :math:`F_i` are the areas of each face of the cell. :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one. Defaults to the base volume discretization if not provided. :returns: a frozen :class:`~meshmode.dof_array.DOFArray` containing the geometric factors for each cell and at each nodal location. """ from meshmode.discretization.poly_element import SimplexElementGroupBase if dd is None: dd = DD_VOLUME actx = dcoll._setup_actx volm_discr = dcoll.discr_from_dd(dd) if any(not isinstance(grp, SimplexElementGroupBase) for grp in volm_discr.groups): raise NotImplementedError( "Geometric factors are only implemented for simplex element groups" ) if volm_discr.dim != volm_discr.ambient_dim: from warnings import warn warn( "The geometric factor for the characteristic length scale in " "time step estimation is not necessarily valid for non-volume-" "filling discretizations. Continuing anyway.", stacklevel=3) cell_vols = abs( op.elementwise_integral(dcoll, dd, volm_discr.zeros(actx) + 1.0)) if dcoll.dim == 1: return freeze(cell_vols) dd_face = DOFDesc("all_faces", dd.discretization_tag) face_discr = dcoll.discr_from_dd(dd_face) # To get a single value for the total surface area of a cell, we # take the sum over the averaged face areas on each face. # NOTE: The face areas are the *same* at each face nodal location. # This assumes there are the *same* number of face nodes on each face. surface_areas = abs( op.elementwise_integral(dcoll, dd_face, face_discr.zeros(actx) + 1.0)) surface_areas = DOFArray( actx, data=tuple( actx.einsum("fej->e", face_ae_i.reshape(vgrp.mesh_el_group.nfaces, vgrp.nelements, afgrp.nunit_dofs), tagged=(FirstAxisIsElementsTag(), )) / afgrp.nunit_dofs for vgrp, afgrp, face_ae_i in zip( volm_discr.groups, face_discr.groups, surface_areas))) return freeze( DOFArray(actx, data=tuple( actx.einsum("e,ei->ei", 1 / sae_i, cv_i, tagged=(FirstAxisIsElementsTag(), )) * dcoll.dim for cv_i, sae_i in zip(cell_vols, surface_areas))))
def main(ctx_factory, dim=2, order=3, visualize=False, lazy=False): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) if lazy: actx = PytatoPyOpenCLArrayContext(queue) else: actx = PyOpenCLArrayContext( queue, allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)), force_device_scalars=True, ) comm = MPI.COMM_WORLD num_parts = comm.Get_size() from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis mesh_dist = MPIMeshDistributor(comm) nel_1d = 16 if mesh_dist.is_mananger_rank(): from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim, b=(0.5, ) * dim, nelements_per_axis=(nel_1d, ) * dim) logger.info("%d elements", mesh.nelements) part_per_element = get_partition_by_pymetis(mesh, num_parts) local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts) del mesh else: local_mesh = mesh_dist.receive_mesh_part() dcoll = DiscretizationCollection(actx, local_mesh, order=order, mpi_communicator=comm) fields = WaveState(u=bump(actx, dcoll), v=make_obj_array( [dcoll.zeros(actx) for i in range(dcoll.dim)])) c = 1 dt = actx.to_numpy(0.45 * estimate_rk4_timestep(actx, dcoll, c)) vis = make_visualizer(dcoll) def rhs(t, w): return wave_operator(dcoll, c=c, w=w) compiled_rhs = actx.compile(rhs) if comm.rank == 0: logger.info("dt = %g", dt) import time start = time.time() t = 0 t_final = 3 istep = 0 while t < t_final: if lazy: fields = thaw(freeze(fields, actx), actx) fields = rk4_step(fields, t, dt, compiled_rhs) l2norm = actx.to_numpy(op.norm(dcoll, fields.u, 2)) if istep % 10 == 0: stop = time.time() linfnorm = actx.to_numpy(op.norm(dcoll, fields.u, np.inf)) nodalmax = actx.to_numpy(op.nodal_max(dcoll, "vol", fields.u)) nodalmin = actx.to_numpy(op.nodal_min(dcoll, "vol", fields.u)) if comm.rank == 0: logger.info(f"step: {istep} t: {t} " f"L2: {l2norm} " f"Linf: {linfnorm} " f"sol max: {nodalmax} " f"sol min: {nodalmin} " f"wall: {stop-start} ") if visualize: vis.write_parallel_vtk_file( comm, f"fld-wave-eager-mpi-{{rank:03d}}-{istep:04d}.vtu", [ ("u", fields.u), ("v", fields.v), ]) start = stop t += dt istep += 1 # NOTE: These are here to ensure the solution is bounded for the # time interval specified assert l2norm < 1
from meshmode.mesh.generation import generate_box_mesh from meshmode.array_context import PyOpenCLArrayContext from arraycontext import thaw from grudge.dof_desc import DTAG_BOUNDARY, FACE_RESTR_INTERIOR ctx = cl.create_some_context() queue = cl.CommandQueue(ctx) actx = PyOpenCLArrayContext(queue) nel = 10 coords = np.linspace(0, 2*np.pi, nel) mesh = generate_box_mesh((coords,), boundary_tag_to_face={"left": ["-x"], "right": ["+x"]}) dcoll = DiscretizationCollection(actx, mesh, order=1) def initial_condition(x): # 'x' contains ndim arrays. # 'x[0]' gets the first coordinate value of all the nodes return actx.np.sin(x[0]) def left_boundary_condition(x, t): return actx.np.sin(x[0] - 2 * np.pi * t) def flux(dcoll, u_tpair): dd = u_tpair.dd velocity = np.array([2 * np.pi])
def main(): cl_ctx = cl.create_some_context() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext(queue) comm = MPI.COMM_WORLD num_parts = comm.Get_size() from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis mesh_dist = MPIMeshDistributor(comm) dim = 2 nel_1d = 16 if mesh_dist.is_mananger_rank(): from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim, b=(0.5, ) * dim, nelements_per_axis=(nel_1d, ) * dim) print("%d elements" % mesh.nelements) part_per_element = get_partition_by_pymetis(mesh, num_parts) local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts) del mesh else: local_mesh = mesh_dist.receive_mesh_part() order = 3 dcoll = DiscretizationCollection(actx, local_mesh, order=order, mpi_communicator=comm) if dim == 2: # no deep meaning here, just a fudge factor dt = 0.75 / (nel_1d * order**2) elif dim == 3: # no deep meaning here, just a fudge factor dt = 0.45 / (nel_1d * order**2) else: raise ValueError("don't have a stable time step guesstimate") fields = flat_obj_array(bump(actx, dcoll), [dcoll.zeros(actx) for i in range(dcoll.dim)]) vis = make_visualizer(dcoll) def rhs(t, w): return wave_operator(dcoll, c=1, w=w) t = 0 t_final = 3 istep = 0 while t < t_final: fields = rk4_step(fields, t, dt, rhs) if istep % 10 == 0: print(istep, t, op.norm(dcoll, fields[0], p=2)) vis.write_parallel_vtk_file( comm, f"fld-wave-eager-mpi-{{rank:03d}}-{istep:04d}.vtu", [ ("u", fields[0]), ("v", fields[1:]), ]) t += dt istep += 1
def main(ctx_factory, dim=2, order=3, visualize=False): cl_ctx = ctx_factory() queue = cl.CommandQueue(cl_ctx) actx = PyOpenCLArrayContext( queue, allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)), force_device_scalars=True, ) nel_1d = 16 from meshmode.mesh.generation import generate_regular_rect_mesh mesh = generate_regular_rect_mesh( a=(-0.5,)*dim, b=(0.5,)*dim, nelements_per_axis=(nel_1d,)*dim) logger.info("%d elements", mesh.nelements) from meshmode.discretization.poly_element import \ QuadratureSimplexGroupFactory, \ default_simplex_group_factory dcoll = DiscretizationCollection( actx, mesh, discr_tag_to_group_factory={ DISCR_TAG_BASE: default_simplex_group_factory(base_dim=dim, order=order), DISCR_TAG_QUAD: QuadratureSimplexGroupFactory(3*order), } ) # bounded above by 1 c = 0.2 + 0.8*bump(actx, dcoll, center=np.zeros(3), width=0.5) dt = 0.5 * estimate_rk4_timestep(actx, dcoll, c=1) fields = flat_obj_array( bump(actx, dcoll, ), [dcoll.zeros(actx) for i in range(dcoll.dim)] ) vis = make_visualizer(dcoll) def rhs(t, w): return wave_operator(dcoll, c=c, w=w) logger.info("dt = %g", dt) t = 0 t_final = 3 istep = 0 while t < t_final: fields = rk4_step(fields, t, dt, rhs) if istep % 10 == 0: logger.info(f"step: {istep} t: {t} " f"L2: {op.norm(dcoll, fields[0], 2)} " f"Linf: {op.norm(dcoll, fields[0], np.inf)} " f"sol max: {op.nodal_max(dcoll, 'vol', fields[0])} " f"sol min: {op.nodal_min(dcoll, 'vol', fields[0])}") if visualize: vis.write_vtk_file( f"fld-wave-eager-var-velocity-{istep:04d}.vtu", [ ("c", c), ("u", fields[0]), ("v", fields[1:]), ] ) t += dt istep += 1 # NOTE: These are here to ensure the solution is bounded for the # time interval specified assert op.norm(dcoll, fields[0], 2) < 1