Beispiel #1
0
def _np_like_for(val):
    from arraycontext import get_container_context_recursively
    actx = get_container_context_recursively(val)
    if actx is None:
        return np
    else:
        return actx.np
Beispiel #2
0
Datei: em.py Projekt: sll2/grudge
    def absorbing_bc(self, w):
        """Construct part of the flux operator template for 1st order
        absorbing boundary conditions.
        """

        actx = get_container_context_recursively(w)
        absorb_normal = thaw(self.dcoll.normal(dd=self.absorb_tag), actx)

        e, h = self.split_eh(w)

        if self.fixed_material:
            epsilon = self.epsilon
            mu = self.mu

        absorb_Z = (mu / epsilon)**0.5  # noqa: N806
        absorb_Y = 1 / absorb_Z  # noqa: N806

        absorb_e = op.project(self.dcoll, "vol", self.absorb_tag, e)
        absorb_h = op.project(self.dcoll, "vol", self.absorb_tag, h)

        bc = flat_obj_array(
            absorb_e + 1 / 2 *
            (self.space_cross_h(absorb_normal,
                                self.space_cross_e(absorb_normal, absorb_e)) -
             absorb_Z * self.space_cross_h(absorb_normal, absorb_h)),
            absorb_h + 1 / 2 *
            (self.space_cross_e(absorb_normal,
                                self.space_cross_h(absorb_normal, absorb_h)) +
             absorb_Y * self.space_cross_e(absorb_normal, absorb_e)))

        return bc
Beispiel #3
0
def norm(dcoll: DiscretizationCollection, vec, p, dd=None) -> "DeviceScalar":
    r"""Return the vector p-norm of a function represented
    by its vector of degrees of freedom *vec*.

    :arg vec: a :class:`~meshmode.dof_array.DOFArray` or an
        :class:`~arraycontext.container.ArrayContainer` of them.
    :arg p: an integer denoting the order of the integral norm. Currently,
        only values of 2 or `numpy.inf` are supported.
    :arg dd: a :class:`~grudge.dof_desc.DOFDesc`, or a value convertible to one.
        Defaults to the base volume discretization if not provided.
    :returns: a nonegative scalar denoting the norm.
    """
    if dd is None:
        dd = dof_desc.DD_VOLUME

    from arraycontext import get_container_context_recursively
    actx = get_container_context_recursively(vec)

    dd = dof_desc.as_dofdesc(dd)

    if p == 2:
        from grudge.op import _apply_mass_operator
        return actx.np.sqrt(
            actx.np.abs(
                nodal_sum(
                    dcoll, dd,
                    actx.np.conjugate(vec) *
                    _apply_mass_operator(dcoll, dd, dd, vec))))
    elif p == np.inf:
        return nodal_max(dcoll, dd, actx.np.abs(vec))
    else:
        raise ValueError("unsupported norm order")
Beispiel #4
0
Datei: em.py Projekt: sll2/grudge
    def flux(self, wtpair):
        """The numerical flux for variable coefficients.

        :param flux_type: can be in [0,1] for anything between central and upwind,
          or "lf" for Lax-Friedrichs.

        As per Hesthaven and Warburton page 433.
        """

        actx = get_container_context_recursively(wtpair)
        normal = thaw(self.dcoll.normal(wtpair.dd), actx)

        if self.fixed_material:
            e, h = self.split_eh(wtpair)
            epsilon = self.epsilon
            mu = self.mu

        Z_int = (mu / epsilon)**0.5  # noqa: N806
        Y_int = 1 / Z_int  # noqa: N806
        Z_ext = (mu / epsilon)**0.5  # noqa: N806
        Y_ext = 1 / Z_ext  # noqa: N806

        if self.flux_type == "lf":
            # if self.fixed_material:
            #     max_c = (self.epsilon*self.mu)**(-0.5)

            return flat_obj_array(
                # flux e,
                1 / 2 * (
                    -self.space_cross_h(normal, h.ext - h.int)
                    # multiplication by epsilon undoes material divisor below
                    #-max_c*(epsilon*e.int - epsilon*e.ext)
                ),
                # flux h
                1 / 2 * (
                    self.space_cross_e(normal, e.ext - e.int)
                    # multiplication by mu undoes material divisor below
                    #-max_c*(mu*h.int - mu*h.ext)
                ))
        elif isinstance(self.flux_type, (int, float)):
            # see doc/maxima/maxwell.mac
            return flat_obj_array(
                # flux e,
                (-1 / (Z_int + Z_ext) * self.space_cross_h(
                    normal,
                    Z_ext * (h.ext - h.int) -
                    self.flux_type * self.space_cross_e(normal, e.ext - e.int))
                 ),
                # flux h
                (1 / (Y_int + Y_ext) * self.space_cross_e(
                    normal,
                    Y_ext * (e.ext - e.int) +
                    self.flux_type * self.space_cross_h(normal, h.ext - h.int))
                 ),
            )
        else:
            raise ValueError("maxwell: invalid flux_type (%s)" %
                             self.flux_type)
Beispiel #5
0
    def __init__(self,
                 dcoll: DiscretizationCollection,
                 array_container: ArrayOrContainerT,
                 remote_rank, tag=None):
        actx = get_container_context_recursively(array_container)
        btag = BTAG_PARTITION(remote_rank)

        local_bdry_data = project(dcoll, "vol", btag, array_container)
        comm = dcoll.mpi_communicator

        self.dcoll = dcoll
        self.array_context = actx
        self.remote_btag = btag
        self.bdry_discr = dcoll.discr_from_dd(btag)
        self.local_bdry_data = local_bdry_data
        self.local_bdry_data_np = \
            to_numpy(flatten(self.local_bdry_data, actx), actx)

        self.tag = self.base_tag
        if tag is not None:
            self.tag += tag

        # Here, we initialize both send and recieve operations through
        # mpi4py `Request` (MPI_Request) instances for comm.Isend (MPI_Isend)
        # and comm.Irecv (MPI_Irecv) respectively. These initiate non-blocking
        # point-to-point communication requests and require explicit management
        # via the use of wait (MPI_Wait, MPI_Waitall, MPI_Waitany, MPI_Waitsome),
        # test (MPI_Test, MPI_Testall, MPI_Testany, MPI_Testsome), and cancel
        # (MPI_Cancel). The rank-local data `self.local_bdry_data_np` will have its
        # associated memory buffer sent across connected ranks and must not be
        # modified at the Python level during this process. Completion of the
        # requests is handled in :meth:`finish`.
        #
        # For more details on the mpi4py semantics, see:
        # https://mpi4py.readthedocs.io/en/stable/overview.html#nonblocking-communications
        #
        # NOTE: mpi4py currently (2021-11-03) holds a reference to the send
        # memory buffer for (i.e. `self.local_bdry_data_np`) until the send
        # requests is complete, however it is not clear that this is documented
        # behavior. We hold on to the buffer (via the instance attribute)
        # as well, just in case.
        self.send_req = comm.Isend(self.local_bdry_data_np,
                                   remote_rank,
                                   tag=self.tag)
        self.remote_data_host_numpy = np.empty_like(self.local_bdry_data_np)
        self.recv_req = comm.Irecv(self.remote_data_host_numpy,
                                   remote_rank,
                                   tag=self.tag)
Beispiel #6
0
def _advance_state_stepper_func(rhs,
                                timestepper,
                                state,
                                t_final,
                                dt=0,
                                t=0.0,
                                istep=0,
                                pre_step_callback=None,
                                post_step_callback=None,
                                logmgr=None,
                                eos=None,
                                dim=None):
    """Advance state from some time (t) to some time (t_final).

    Parameters
    ----------
    rhs
        Function that should return the time derivative of the state.
        This function should take time and state as arguments, with
        a call with signature ``rhs(t, state)``.
    timestepper
        Function that advances the state from t=time to t=(time+dt), and
        returns the advanced state. Has a call with signature
        ``timestepper(state, t, dt, rhs)``.
    state: numpy.ndarray
        Agglomerated object array containing at least the state variables that
        will be advanced by this stepper
    t_final: float
        Simulated time at which to stop
    t: float
        Time at which to start
    dt: float
        Initial timestep size to use, optional if dt is adaptive
    istep: int
        Step number from which to start
    pre_step_callback
        An optional user-defined function, with signature:
        ``state, dt = pre_step_callback(step, t, dt, state)``,
        to be called before the timestepper is called for that particular step.
    post_step_callback
        An optional user-defined function, with signature:
        ``state, dt = post_step_callback(step, t, dt, state)``,
        to be called after the timestepper is called for that particular step.

    Returns
    -------
    istep: int
        the current step number
    t: float
        the current time
    state: numpy.ndarray
    """
    t = np.float64(t)

    if t_final <= t:
        return istep, t, state

    actx = get_container_context_recursively(state)

    compiled_rhs = _compile_rhs(actx, rhs)

    while t < t_final:
        state = _force_evaluation(actx, state)

        if logmgr:
            logmgr.tick_before()

        if pre_step_callback is not None:
            state, dt = pre_step_callback(state=state, step=istep, t=t, dt=dt)

        state = timestepper(state=state, t=t, dt=dt, rhs=compiled_rhs)

        t += dt
        istep += 1

        if post_step_callback is not None:
            state, dt = post_step_callback(state=state, step=istep, t=t, dt=dt)

        if logmgr:
            set_dt(logmgr, dt)
            set_sim_state(logmgr, dim, state, eos)
            logmgr.tick_after()

    return istep, t, state
Beispiel #7
0
def _advance_state_leap(rhs,
                        timestepper,
                        state,
                        t_final,
                        dt=0,
                        component_id="state",
                        t=0.0,
                        istep=0,
                        pre_step_callback=None,
                        post_step_callback=None,
                        logmgr=None,
                        eos=None,
                        dim=None):
    """Advance state from some time *t* to some time *t_final* using :mod:`leap`.

    Parameters
    ----------
    rhs
        Function that should return the time derivative of the state.
        This function should take time and state as arguments, with
        a call with signature ``rhs(t, state)``.
    timestepper
        An instance of :class:`leap.MethodBuilder`.
    state: numpy.ndarray
        Agglomerated object array containing at least the state variables that
        will be advanced by this stepper
    t_final: float
        Simulated time at which to stop
    component_id
        State id (required input for leap method generation)
    t: float
        Time at which to start
    dt: float
        Initial timestep size to use, optional if dt is adaptive
    istep: int
        Step number from which to start
    pre_step_callback
        An optional user-defined function, with signature:
        ``state, dt = pre_step_callback(step, t, dt, state)``,
        to be called before the timestepper is called for that particular step.
    post_step_callback
        An optional user-defined function, with signature:
        ``state, dt = post_step_callback(step, t, dt, state)``,
        to be called after the timestepper is called for that particular step.

    Returns
    -------
    istep: int
        the current step number
    t: float
        the current time
    state: numpy.ndarray
    """
    if t_final <= t:
        return istep, t, state

    actx = get_container_context_recursively(state)

    compiled_rhs = _compile_rhs(actx, rhs)
    stepper_cls = generate_singlerate_leap_advancer(timestepper, component_id,
                                                    compiled_rhs, t, dt, state)

    while t < t_final:
        state = _force_evaluation(actx, state)

        if pre_step_callback is not None:
            state, dt = pre_step_callback(state=state, step=istep, t=t, dt=dt)
            stepper_cls.dt = dt

        # Leap interface here is *a bit* different.
        for event in stepper_cls.run(t_end=t + dt):
            if isinstance(event, stepper_cls.StateComputed):
                state = event.state_component
                t += dt

                if post_step_callback is not None:
                    state, dt = post_step_callback(state=state,
                                                   step=istep,
                                                   t=t,
                                                   dt=dt)
                    stepper_cls.dt = dt

                istep += 1

    return istep, t, state
Beispiel #8
0
 def _np_like_for(val):
     actx = get_container_context_recursively(val)
     if actx is None:
         return np
     else:
         return actx.np