Beispiel #1
0
def test_geometric_factors_regular_refinement(actx_factory, name):
    from grudge.dt_utils import dt_geometric_factors

    actx = actx_factory()

    # {{{ cases

    if name == "interval":
        from mesh_data import BoxMeshBuilder
        builder = BoxMeshBuilder(ambient_dim=1)
    elif name == "box2d":
        from mesh_data import BoxMeshBuilder
        builder = BoxMeshBuilder(ambient_dim=2)
    elif name == "box3d":
        from mesh_data import BoxMeshBuilder
        builder = BoxMeshBuilder(ambient_dim=3)
    else:
        raise ValueError("unknown geometry name: %s" % name)

    # }}}

    min_factors = []
    for resolution in builder.resolutions:
        mesh = builder.get_mesh(resolution, builder.mesh_order)
        dcoll = DiscretizationCollection(actx, mesh, order=builder.order)
        min_factors.append(
            actx.to_numpy(
                op.nodal_min(dcoll, "vol",
                             thaw(dt_geometric_factors(dcoll), actx))))

    # Resolution is doubled each refinement, so the ratio of consecutive
    # geometric factors should satisfy: gfi+1 / gfi = 2
    min_factors = np.asarray(min_factors)
    ratios = min_factors[:-1] / min_factors[1:]
    assert np.all(np.isclose(ratios, 2))
Beispiel #2
0
def get_sim_timestep(discr,
                     state,
                     t,
                     dt,
                     cfl,
                     eos,
                     t_final,
                     constant_cfl=False):
    """Return the maximum stable timestep for a typical fluid simulation.

    This routine returns *dt*, the users defined constant timestep, or
    *max_dt*, the maximum domain-wide stability-limited
    timestep for a fluid simulation. It calls the collective:
    :func:`~grudge.op.nodal_min` on the inside which makes it
    domain-wide regardless of parallel decomposition.

    Two modes are supported:
        - Constant DT mode: returns the minimum of (t_final-t, dt)
        - Constant CFL mode: returns (cfl * max_dt)

    .. important::
        The current implementation is calculating an acoustic-limited
        timestep and CFL for an inviscid fluid. The addition of viscous
        fluxes includes modification to this routine.

    Parameters
    ----------
    discr
        Grudge discretization or discretization collection?
    state: :class:`~mirgecom.fluid.ConservedVars`
        The fluid state.
    t: float
        Current time
    t_final: float
        Final time
    dt: float
        The current timestep
    cfl: float
        The current CFL number
    eos: :class:`~mirgecom.eos.GasEOS`
        Gas equation-of-state supporting speed_of_sound
    constant_cfl: bool
        True if running constant CFL mode

    Returns
    -------
    float
        The maximum stable DT based on inviscid fluid acoustic wavespeed.
    """
    mydt = dt
    t_remaining = max(0, t_final - t)
    if constant_cfl:
        from mirgecom.inviscid import get_inviscid_timestep
        from grudge.op import nodal_min
        mydt = cfl * nodal_min(
            discr, "vol", get_inviscid_timestep(discr=discr, eos=eos,
                                                cv=state))
    return min(t_remaining, mydt)
Beispiel #3
0
def test_viscous_timestep(actx_factory, dim, mu, vel):
    """Test timestep size."""
    actx = actx_factory()
    nel_1d = 4

    from meshmode.mesh.generation import generate_regular_rect_mesh

    mesh = generate_regular_rect_mesh(a=(1.0, ) * dim,
                                      b=(2.0, ) * dim,
                                      nelements_per_axis=(nel_1d, ) * dim)

    order = 1

    discr = EagerDGDiscretization(actx, mesh, order=order)
    zeros = discr.zeros(actx)
    ones = zeros + 1.0

    velocity = make_obj_array([zeros + vel for _ in range(dim)])

    massval = 1
    mass = massval * ones

    # I *think* this energy should yield c=1.0
    energy = zeros + 1.0 / (1.4 * .4)
    mom = mass * velocity
    species_mass = None

    cv = make_conserved(dim,
                        mass=mass,
                        energy=energy,
                        momentum=mom,
                        species_mass=species_mass)

    from grudge.dt_utils import characteristic_lengthscales
    chlen = characteristic_lengthscales(actx, discr)
    from grudge.op import nodal_min
    chlen_min = nodal_min(discr, "vol", chlen)

    mu = mu * chlen_min
    if mu < 0:
        mu = 0
        tv_model = None
    else:
        tv_model = SimpleTransport(viscosity=mu)

    eos = IdealSingleGas()
    gas_model = GasModel(eos=eos, transport=tv_model)
    fluid_state = make_fluid_state(cv, gas_model)

    from mirgecom.viscous import get_viscous_timestep
    dt_field = get_viscous_timestep(discr, fluid_state)

    speed_total = fluid_state.wavespeed
    dt_expected = chlen / (speed_total + (mu / chlen))

    error = (dt_expected - dt_field) / dt_expected
    assert actx.to_numpy(discr.norm(error, np.inf)) == 0
Beispiel #4
0
def get_sim_timestep(discr,
                     state,
                     t,
                     dt,
                     cfl,
                     eos,
                     t_final,
                     constant_cfl=False):
    """Return the maximum stable timestep for a typical fluid simulation.

    This routine returns *dt*, the users defined constant timestep, or *max_dt*, the
    maximum domain-wide stability-limited timestep for a fluid simulation.

    .. important::
        This routine calls the collective: :func:`~grudge.op.nodal_min` on the inside
        which makes it domain-wide regardless of parallel domain decomposition. Thus
        this routine must be called *collectively* (i.e. by all ranks).

    Two modes are supported:
        - Constant DT mode: returns the minimum of (t_final-t, dt)
        - Constant CFL mode: returns (cfl * max_dt)

    Parameters
    ----------
    discr
        Grudge discretization or discretization collection?
    state: :class:`~mirgecom.fluid.ConservedVars`
        The fluid state.
    t: float
        Current time
    t_final: float
        Final time
    dt: float
        The current timestep
    cfl: float
        The current CFL number
    eos: :class:`~mirgecom.eos.GasEOS`
        Gas equation-of-state optionally with a non-empty
        :class:`~mirgecom.transport.TransportModel` for viscous transport properties.
    constant_cfl: bool
        True if running constant CFL mode

    Returns
    -------
    float
        The maximum stable DT based on a viscous fluid.
    """
    t_remaining = max(0, t_final - t)
    mydt = dt
    if constant_cfl:
        from mirgecom.viscous import get_viscous_timestep
        from grudge.op import nodal_min
        mydt = cfl * nodal_min(
            discr, "vol", get_viscous_timestep(discr=discr, eos=eos, cv=state))
    return min(t_remaining, mydt)
Beispiel #5
0
    def estimate_rk4_timestep(self, actx, dcoll, **kwargs):
        r"""Estimate the largest stable timestep for an RK4 method.

        :arg actx: a :class:`arraycontext.ArrayContext`.
        :arg dcoll: a :class:`grudge.discretization.DiscretizationCollection`.
        :arg \**kwargs: Optional keyword arguments for determining the
            max characteristic velocity of the operator. These are passed
            to :meth:`max_characteristic_velocity`.
        """
        from grudge.dt_utils import characteristic_lengthscales
        import grudge.op as op

        wavespeeds = self.max_characteristic_velocity(actx, **kwargs)
        local_timesteps = (characteristic_lengthscales(actx, dcoll) /
                           wavespeeds)

        return op.nodal_min(dcoll, "vol", local_timesteps)
    def my_get_timestep(t, dt, state):
        #  richer interface to calculate {dt,cfl} returns node-local estimates
        t_remaining = max(0, t_final - t)
        if constant_cfl:
            from mirgecom.inviscid import get_inviscid_timestep
            ts_field = current_cfl * get_inviscid_timestep(
                discr, eos=eos, cv=state)
            from grudge.op import nodal_min
            dt = nodal_min(discr, "vol", ts_field)
            cfl = current_cfl
        else:
            from mirgecom.inviscid import get_inviscid_cfl
            ts_field = get_inviscid_cfl(discr, eos=eos, dt=dt, cv=state)
            from grudge.op import nodal_max
            cfl = nodal_max(discr, "vol", ts_field)

        return ts_field, cfl, min(t_remaining, dt)
Beispiel #7
0
def get_sim_timestep(discr, state, t, dt, cfl, t_final, constant_cfl=False):
    """Return the maximum stable timestep for a typical fluid simulation.

    This routine returns *dt*, the users defined constant timestep, or *max_dt*, the
    maximum domain-wide stability-limited timestep for a fluid simulation.

    .. important::
        This routine calls the collective: :func:`~grudge.op.nodal_min` on the inside
        which makes it domain-wide regardless of parallel domain decomposition. Thus
        this routine must be called *collectively* (i.e. by all ranks).

    Two modes are supported:
        - Constant DT mode: returns the minimum of (t_final-t, dt)
        - Constant CFL mode: returns (cfl * max_dt)

    Parameters
    ----------
    discr
        Grudge discretization or discretization collection?
    state: :class:`~mirgecom.gas_model.FluidState`
        The full fluid conserved and thermal state
    t: float
        Current time
    t_final: float
        Final time
    dt: float
        The current timestep
    cfl: float
        The current CFL number
    constant_cfl: bool
        True if running constant CFL mode

    Returns
    -------
    float
        The maximum stable DT based on a viscous fluid.
    """
    t_remaining = max(0, t_final - t)
    mydt = dt
    if constant_cfl:
        from mirgecom.viscous import get_viscous_timestep
        from grudge.op import nodal_min
        mydt = state.array_context.to_numpy(cfl * nodal_min(
            discr, "vol", get_viscous_timestep(discr=discr, state=state)))[()]
    return min(t_remaining, mydt)
Beispiel #8
0
def estimate_rk4_timestep(actx, dcoll, c):
    from grudge.dt_utils import characteristic_lengthscales

    local_dts = characteristic_lengthscales(actx, dcoll) / c

    return op.nodal_min(dcoll, "vol", local_dts)
Beispiel #9
0
def main(ctx_factory, dim=2, order=3, visualize=False, lazy=False):
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)

    if lazy:
        actx = PytatoPyOpenCLArrayContext(queue)
    else:
        actx = PyOpenCLArrayContext(
            queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)),
            force_device_scalars=True,
        )

    comm = MPI.COMM_WORLD
    num_parts = comm.Get_size()

    from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
    mesh_dist = MPIMeshDistributor(comm)

    nel_1d = 16

    if mesh_dist.is_mananger_rank():
        from meshmode.mesh.generation import generate_regular_rect_mesh
        mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                          b=(0.5, ) * dim,
                                          nelements_per_axis=(nel_1d, ) * dim)

        logger.info("%d elements", mesh.nelements)

        part_per_element = get_partition_by_pymetis(mesh, num_parts)

        local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element,
                                               num_parts)

        del mesh

    else:
        local_mesh = mesh_dist.receive_mesh_part()

    dcoll = DiscretizationCollection(actx,
                                     local_mesh,
                                     order=order,
                                     mpi_communicator=comm)

    fields = WaveState(u=bump(actx, dcoll),
                       v=make_obj_array(
                           [dcoll.zeros(actx) for i in range(dcoll.dim)]))

    c = 1
    dt = actx.to_numpy(0.45 * estimate_rk4_timestep(actx, dcoll, c))

    vis = make_visualizer(dcoll)

    def rhs(t, w):
        return wave_operator(dcoll, c=c, w=w)

    compiled_rhs = actx.compile(rhs)

    if comm.rank == 0:
        logger.info("dt = %g", dt)

    import time
    start = time.time()

    t = 0
    t_final = 3
    istep = 0
    while t < t_final:
        if lazy:
            fields = thaw(freeze(fields, actx), actx)

        fields = rk4_step(fields, t, dt, compiled_rhs)

        l2norm = actx.to_numpy(op.norm(dcoll, fields.u, 2))

        if istep % 10 == 0:
            stop = time.time()
            linfnorm = actx.to_numpy(op.norm(dcoll, fields.u, np.inf))
            nodalmax = actx.to_numpy(op.nodal_max(dcoll, "vol", fields.u))
            nodalmin = actx.to_numpy(op.nodal_min(dcoll, "vol", fields.u))
            if comm.rank == 0:
                logger.info(f"step: {istep} t: {t} "
                            f"L2: {l2norm} "
                            f"Linf: {linfnorm} "
                            f"sol max: {nodalmax} "
                            f"sol min: {nodalmin} "
                            f"wall: {stop-start} ")
            if visualize:
                vis.write_parallel_vtk_file(
                    comm, f"fld-wave-eager-mpi-{{rank:03d}}-{istep:04d}.vtu", [
                        ("u", fields.u),
                        ("v", fields.v),
                    ])
            start = stop

        t += dt
        istep += 1

        # NOTE: These are here to ensure the solution is bounded for the
        # time interval specified
        assert l2norm < 1
Beispiel #10
0
 def nodal_min(self, dd, vec):
     return op.nodal_min(self, dd, vec)
Beispiel #11
0
def main(use_profiling=False, use_logmgr=False, lazy_eval: bool = False):
    """Drive the example."""
    cl_ctx = cl.create_some_context()

    logmgr = initialize_logmgr(use_logmgr, filename="wave.sqlite", mode="wu")

    if use_profiling:
        if lazy_eval:
            raise RuntimeError("Cannot run lazy with profiling.")
        queue = cl.CommandQueue(
            cl_ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
        actx = PyOpenCLProfilingArrayContext(
            queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))
    else:
        queue = cl.CommandQueue(cl_ctx)
        if lazy_eval:
            actx = PytatoPyOpenCLArrayContext(queue)
        else:
            actx = PyOpenCLArrayContext(
                queue,
                allocator=cl_tools.MemoryPool(
                    cl_tools.ImmediateAllocator(queue)))

    dim = 2
    nel_1d = 16
    from meshmode.mesh.generation import generate_regular_rect_mesh

    mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                      b=(0.5, ) * dim,
                                      nelements_per_axis=(nel_1d, ) * dim)

    order = 3

    discr = EagerDGDiscretization(actx, mesh, order=order)

    current_cfl = 0.485
    wave_speed = 1.0
    from grudge.dt_utils import characteristic_lengthscales
    nodal_dt = characteristic_lengthscales(actx, discr) / wave_speed
    from grudge.op import nodal_min
    dt = actx.to_numpy(current_cfl * nodal_min(discr, "vol", nodal_dt))[()]

    print("%d elements" % mesh.nelements)

    fields = flat_obj_array(bump(actx, discr),
                            [discr.zeros(actx) for i in range(discr.dim)])

    if logmgr:
        logmgr_add_cl_device_info(logmgr, queue)
        logmgr_add_device_memory_usage(logmgr, queue)

        logmgr.add_watches(["step.max", "t_step.max", "t_log.max"])

        try:
            logmgr.add_watches(
                ["memory_usage_python.max", "memory_usage_gpu.max"])
        except KeyError:
            pass

        if use_profiling:
            logmgr.add_watches(["multiply_time.max"])

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

    vis = make_visualizer(discr)

    def rhs(t, w):
        return wave_operator(discr, c=wave_speed, w=w)

    compiled_rhs = actx.compile(rhs)

    t = 0
    t_final = 1
    istep = 0
    while t < t_final:
        if logmgr:
            logmgr.tick_before()

        fields = thaw(freeze(fields, actx), actx)
        fields = rk4_step(fields, t, dt, compiled_rhs)

        if istep % 10 == 0:
            if use_profiling:
                print(actx.tabulate_profiling_data())
            print(istep, t, actx.to_numpy(discr.norm(fields[0], np.inf)))
            vis.write_vtk_file("fld-wave-%04d.vtu" % istep, [
                ("u", fields[0]),
                ("v", fields[1:]),
            ],
                               overwrite=True)

        t += dt
        istep += 1

        if logmgr:
            set_dt(logmgr, dt)
            logmgr.tick_after()
Beispiel #12
0
def main(snapshot_pattern="wave-mpi-{step:04d}-{rank:04d}.pkl", restart_step=None,
         use_profiling=False, use_logmgr=False, actx_class=PyOpenCLArrayContext):
    """Drive the example."""
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    num_parts = comm.Get_size()

    logmgr = initialize_logmgr(use_logmgr,
        filename="wave-mpi.sqlite", mode="wu", mpi_comm=comm)
    if use_profiling:
        queue = cl.CommandQueue(cl_ctx,
            properties=cl.command_queue_properties.PROFILING_ENABLE)
        actx = actx_class(queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)),
            logmgr=logmgr)
    else:
        queue = cl.CommandQueue(cl_ctx)
        actx = actx_class(queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))

    if restart_step is None:

        from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
        mesh_dist = MPIMeshDistributor(comm)

        dim = 2
        nel_1d = 16

        if mesh_dist.is_mananger_rank():
            from meshmode.mesh.generation import generate_regular_rect_mesh
            mesh = generate_regular_rect_mesh(
                a=(-0.5,)*dim, b=(0.5,)*dim,
                nelements_per_axis=(nel_1d,)*dim)

            print("%d elements" % mesh.nelements)
            part_per_element = get_partition_by_pymetis(mesh, num_parts)
            local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts)

            del mesh

        else:
            local_mesh = mesh_dist.receive_mesh_part()

        fields = None

    else:
        from mirgecom.restart import read_restart_data
        restart_data = read_restart_data(
            actx, snapshot_pattern.format(step=restart_step, rank=rank)
        )
        local_mesh = restart_data["local_mesh"]
        nel_1d = restart_data["nel_1d"]
        assert comm.Get_size() == restart_data["num_parts"]

    order = 3

    discr = EagerDGDiscretization(actx, local_mesh, order=order,
                                  mpi_communicator=comm)

    current_cfl = 0.485
    wave_speed = 1.0
    from grudge.dt_utils import characteristic_lengthscales
    dt = current_cfl * characteristic_lengthscales(actx, discr) / wave_speed

    from grudge.op import nodal_min
    dt = nodal_min(discr, "vol", dt)

    t_final = 1

    if restart_step is None:
        t = 0
        istep = 0

        fields = flat_obj_array(
            bump(actx, discr),
            [discr.zeros(actx) for i in range(discr.dim)]
            )

    else:
        t = restart_data["t"]
        istep = restart_step
        assert istep == restart_step
        restart_fields = restart_data["fields"]
        old_order = restart_data["order"]
        if old_order != order:
            old_discr = EagerDGDiscretization(actx, local_mesh, order=old_order,
                                              mpi_communicator=comm)
            from meshmode.discretization.connection import make_same_mesh_connection
            connection = make_same_mesh_connection(actx, discr.discr_from_dd("vol"),
                                                   old_discr.discr_from_dd("vol"))
            fields = connection(restart_fields)
        else:
            fields = restart_fields

    if logmgr:
        logmgr_add_cl_device_info(logmgr, queue)
        logmgr_add_device_memory_usage(logmgr, queue)

        logmgr.add_watches(["step.max", "t_step.max", "t_log.max"])

        try:
            logmgr.add_watches(["memory_usage_python.max", "memory_usage_gpu.max"])
        except KeyError:
            pass

        if use_profiling:
            logmgr.add_watches(["multiply_time.max"])

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

    vis = make_visualizer(discr)

    def rhs(t, w):
        return wave_operator(discr, c=wave_speed, w=w)

    compiled_rhs = actx.compile(rhs)

    while t < t_final:
        if logmgr:
            logmgr.tick_before()

        # restart must happen at beginning of step
        if istep % 100 == 0 and (
                # Do not overwrite the restart file that we just read.
                istep != restart_step):
            from mirgecom.restart import write_restart_file
            write_restart_file(
                actx, restart_data={
                    "local_mesh": local_mesh,
                    "order": order,
                    "fields": fields,
                    "t": t,
                    "step": istep,
                    "nel_1d": nel_1d,
                    "num_parts": num_parts},
                filename=snapshot_pattern.format(step=istep, rank=rank),
                comm=comm
            )

        if istep % 10 == 0:
            print(istep, t, discr.norm(fields[0]))
            vis.write_parallel_vtk_file(
                comm,
                "fld-wave-mpi-%03d-%04d.vtu" % (rank, istep),
                [
                    ("u", fields[0]),
                    ("v", fields[1:]),
                ], overwrite=True
            )

        fields = thaw(freeze(fields, actx), actx)
        fields = rk4_step(fields, t, dt, compiled_rhs)

        t += dt
        istep += 1

        if logmgr:
            set_dt(logmgr, dt)
            logmgr.tick_after()

    final_soln = discr.norm(fields[0])
    assert np.abs(final_soln - 0.04409852463947439) < 1e-14