Ejemplo n.º 1
0
def main(ctx_factory=cl.create_some_context,
         use_logmgr=True,
         use_leap=False,
         use_profiling=False,
         casename=None,
         rst_filename=None,
         actx_class=PyOpenCLArrayContext):
    """Drive example."""
    cl_ctx = ctx_factory()

    if casename is None:
        casename = "mirgecom"

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nproc = comm.Get_size()

    logmgr = initialize_logmgr(use_logmgr,
                               filename=f"{casename}.sqlite",
                               mode="wu",
                               mpi_comm=comm)

    if use_profiling:
        queue = cl.CommandQueue(
            cl_ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
    else:
        queue = cl.CommandQueue(cl_ctx)

    actx = actx_class(queue,
                      allocator=cl_tools.MemoryPool(
                          cl_tools.ImmediateAllocator(queue)))

    # Some discretization parameters
    dim = 2
    nel_1d = 8
    order = 1

    # {{{ Time stepping control

    # This example runs only 3 steps by default (to keep CI ~short)
    # With the mixture defined below, equilibrium is achieved at ~40ms
    # To run to equlibrium, set t_final >= 40ms.

    # Time stepper selection
    if use_leap:
        from leap.rk import RK4MethodBuilder
        timestepper = RK4MethodBuilder("state")
    else:
        timestepper = rk4_step

    # Time loop control parameters
    current_step = 0
    t_final = 1e-8
    current_cfl = 1.0
    current_dt = 1e-9
    current_t = 0
    constant_cfl = False

    # i.o frequencies
    nstatus = 1
    nviz = 5
    nhealth = 1
    nrestart = 5

    # }}}  Time stepping control

    debug = False

    rst_path = "restart_data/"
    rst_pattern = (rst_path + "{cname}-{step:04d}-{rank:04d}.pkl")
    if rst_filename:  # read the grid from restart data
        rst_filename = f"{rst_filename}-{rank:04d}.pkl"

        from mirgecom.restart import read_restart_data
        restart_data = read_restart_data(actx, rst_filename)
        local_mesh = restart_data["local_mesh"]
        local_nelements = local_mesh.nelements
        global_nelements = restart_data["global_nelements"]
        assert restart_data["num_parts"] == nproc
        rst_time = restart_data["t"]
        rst_step = restart_data["step"]
        rst_order = restart_data["order"]
    else:  # generate the grid from scratch
        from meshmode.mesh.generation import generate_regular_rect_mesh
        box_ll = -0.005
        box_ur = 0.005
        generate_mesh = partial(generate_regular_rect_mesh,
                                a=(box_ll, ) * dim,
                                b=(box_ur, ) * dim,
                                nelements_per_axis=(nel_1d, ) * dim)
        local_mesh, global_nelements = generate_and_distribute_mesh(
            comm, generate_mesh)
        local_nelements = local_mesh.nelements

    discr = EagerDGDiscretization(actx,
                                  local_mesh,
                                  order=order,
                                  mpi_communicator=comm)
    nodes = thaw(actx, discr.nodes())

    vis_timer = None

    if logmgr:
        logmgr_add_device_name(logmgr, queue)
        logmgr_add_device_memory_usage(logmgr, queue)
        logmgr_add_many_discretization_quantities(logmgr, discr, dim,
                                                  extract_vars_for_logging,
                                                  units_for_logging)

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

        logmgr.add_watches([
            ("step.max", "step = {value}, "),
            ("t_sim.max", "sim time: {value:1.6e} s\n"),
            ("min_pressure", "------- P (min, max) (Pa) = ({value:1.9e}, "),
            ("max_pressure", "{value:1.9e})\n"),
            ("min_temperature", "------- T (min, max) (K)  = ({value:7g}, "),
            ("max_temperature", "{value:7g})\n"),
            ("t_step.max", "------- step walltime: {value:6g} s, "),
            ("t_log.max", "log walltime: {value:6g} s")
        ])

    # {{{  Set up initial state using Cantera

    # Use Cantera for initialization
    # -- Pick up a CTI for the thermochemistry config
    # --- Note: Users may add their own CTI file by dropping it into
    # ---       mirgecom/mechanisms alongside the other CTI files.
    from mirgecom.mechanisms import get_mechanism_cti
    mech_cti = get_mechanism_cti("uiuc")

    cantera_soln = cantera.Solution(phase_id="gas", source=mech_cti)
    nspecies = cantera_soln.n_species

    # Initial temperature, pressure, and mixutre mole fractions are needed to
    # set up the initial state in Cantera.
    init_temperature = 1500.0  # Initial temperature hot enough to burn
    # Parameters for calculating the amounts of fuel, oxidizer, and inert species
    equiv_ratio = 1.0
    ox_di_ratio = 0.21
    stoich_ratio = 3.0
    # Grab the array indices for the specific species, ethylene, oxygen, and nitrogen
    i_fu = cantera_soln.species_index("C2H4")
    i_ox = cantera_soln.species_index("O2")
    i_di = cantera_soln.species_index("N2")
    x = np.zeros(nspecies)
    # Set the species mole fractions according to our desired fuel/air mixture
    x[i_fu] = (ox_di_ratio * equiv_ratio) / (stoich_ratio +
                                             ox_di_ratio * equiv_ratio)
    x[i_ox] = stoich_ratio * x[i_fu] / equiv_ratio
    x[i_di] = (1.0 - ox_di_ratio) * x[i_ox] / ox_di_ratio
    # Uncomment next line to make pylint fail when it can't find cantera.one_atm
    one_atm = cantera.one_atm  # pylint: disable=no-member
    # one_atm = 101325.0

    # Let the user know about how Cantera is being initilized
    print(f"Input state (T,P,X) = ({init_temperature}, {one_atm}, {x}")
    # Set Cantera internal gas temperature, pressure, and mole fractios
    cantera_soln.TPX = init_temperature, one_atm, x
    # Pull temperature, total density, mass fractions, and pressure from Cantera
    # We need total density, and mass fractions to initialize the fluid/gas state.
    can_t, can_rho, can_y = cantera_soln.TDY
    can_p = cantera_soln.P
    # *can_t*, *can_p* should not differ (significantly) from user's initial data,
    # but we want to ensure that we use exactly the same starting point as Cantera,
    # so we use Cantera's version of these data.

    # }}}

    # {{{ Create Pyrometheus thermochemistry object & EOS

    # Create a Pyrometheus EOS with the Cantera soln. Pyrometheus uses Cantera and
    # generates a set of methods to calculate chemothermomechanical properties and
    # states for this particular mechanism.
    pyrometheus_mechanism = pyro.get_thermochem_class(cantera_soln)(actx.np)
    eos = PyrometheusMixture(pyrometheus_mechanism,
                             temperature_guess=init_temperature)

    # }}}

    # {{{ MIRGE-Com state initialization

    # Initialize the fluid/gas state with Cantera-consistent data:
    # (density, pressure, temperature, mass_fractions)
    print(f"Cantera state (rho,T,P,Y) = ({can_rho}, {can_t}, {can_p}, {can_y}")
    velocity = np.zeros(shape=(dim, ))
    initializer = MixtureInitializer(dim=dim,
                                     nspecies=nspecies,
                                     pressure=can_p,
                                     temperature=can_t,
                                     massfractions=can_y,
                                     velocity=velocity)

    my_boundary = AdiabaticSlipBoundary()
    boundaries = {BTAG_ALL: my_boundary}

    if rst_filename:
        current_step = rst_step
        current_t = rst_time
        if logmgr:
            from mirgecom.logging_quantities import logmgr_set_time
            logmgr_set_time(logmgr, current_step, current_t)
        if order == rst_order:
            current_state = restart_data["state"]
        else:
            rst_state = restart_data["state"]
            old_discr = EagerDGDiscretization(actx,
                                              local_mesh,
                                              order=rst_order,
                                              mpi_communicator=comm)
            from meshmode.discretization.connection import make_same_mesh_connection
            connection = make_same_mesh_connection(
                actx, discr.discr_from_dd("vol"),
                old_discr.discr_from_dd("vol"))
            current_state = connection(rst_state)
    else:
        # Set the current state from time 0
        current_state = initializer(eos=eos, x_vec=nodes)

    # Inspection at physics debugging time
    if debug:
        print("Initial MIRGE-Com state:")
        print(f"{current_state=}")
        print(f"Initial DV pressure: {eos.pressure(current_state)}")
        print(f"Initial DV temperature: {eos.temperature(current_state)}")

    # }}}

    visualizer = make_visualizer(discr)
    initname = initializer.__class__.__name__
    eosname = eos.__class__.__name__
    init_message = make_init_message(dim=dim,
                                     order=order,
                                     nelements=local_nelements,
                                     global_nelements=global_nelements,
                                     dt=current_dt,
                                     t_final=t_final,
                                     nstatus=nstatus,
                                     nviz=nviz,
                                     cfl=current_cfl,
                                     constant_cfl=constant_cfl,
                                     initname=initname,
                                     eosname=eosname,
                                     casename=casename)

    # Cantera equilibrate calculates the expected end state @ chemical equilibrium
    # i.e. the expected state after all reactions
    cantera_soln.equilibrate("UV")
    eq_temperature, eq_density, eq_mass_fractions = cantera_soln.TDY
    eq_pressure = cantera_soln.P

    # Report the expected final state to the user
    if rank == 0:
        logger.info(init_message)
        logger.info(f"Expected equilibrium state:"
                    f" {eq_pressure=}, {eq_temperature=},"
                    f" {eq_density=}, {eq_mass_fractions=}")

    def my_write_status(dt, cfl):
        status_msg = f"------ {dt=}" if constant_cfl else f"----- {cfl=}"
        if rank == 0:
            logger.info(status_msg)

    def my_write_viz(step,
                     t,
                     dt,
                     state,
                     ts_field=None,
                     dv=None,
                     production_rates=None,
                     cfl=None):
        if dv is None:
            dv = eos.dependent_vars(state)
        if production_rates is None:
            production_rates = eos.get_production_rates(state)
        if ts_field is None:
            ts_field, cfl, dt = my_get_timestep(t=t, dt=dt, state=state)
        viz_fields = [("cv", state), ("dv", dv),
                      ("production_rates", production_rates),
                      ("dt" if constant_cfl else "cfl", ts_field)]
        write_visfile(discr,
                      viz_fields,
                      visualizer,
                      vizname=casename,
                      step=step,
                      t=t,
                      overwrite=True,
                      vis_timer=vis_timer)

    def my_write_restart(step, t, state):
        rst_fname = rst_pattern.format(cname=casename, step=step, rank=rank)
        if rst_fname == rst_filename:
            if rank == 0:
                logger.info("Skipping overwrite of restart file.")
        else:
            rst_data = {
                "local_mesh": local_mesh,
                "state": state,
                "t": t,
                "step": step,
                "order": order,
                "global_nelements": global_nelements,
                "num_parts": nproc
            }
            from mirgecom.restart import write_restart_file
            write_restart_file(actx, rst_data, rst_fname, comm)

    def my_health_check(dv):
        health_error = False
        from mirgecom.simutil import check_naninf_local, check_range_local
        if check_naninf_local(discr, "vol", dv.pressure) \
           or check_range_local(discr, "vol", dv.pressure, 1e5, 2.4e5):
            health_error = True
            logger.info(f"{rank=}: Invalid pressure data found.")

        if check_range_local(discr, "vol", dv.temperature, 1.498e3, 1.52e3):
            health_error = True
            logger.info(f"{rank=}: Invalid temperature data found.")

        return health_error

    def my_get_timestep(t, dt, state):
        #  richer interface to calculate {dt,cfl} returns node-local estimates
        t_remaining = max(0, t_final - t)
        if constant_cfl:
            from mirgecom.inviscid import get_inviscid_timestep
            ts_field = current_cfl * get_inviscid_timestep(
                discr, eos=eos, cv=state)
            from grudge.op import nodal_min
            dt = nodal_min(discr, "vol", ts_field)
            cfl = current_cfl
        else:
            from mirgecom.inviscid import get_inviscid_cfl
            ts_field = get_inviscid_cfl(discr, eos=eos, dt=dt, cv=state)
            from grudge.op import nodal_max
            cfl = nodal_max(discr, "vol", ts_field)

        return ts_field, cfl, min(t_remaining, dt)

    def my_pre_step(step, t, dt, state):
        try:
            dv = None

            if logmgr:
                logmgr.tick_before()

            from mirgecom.simutil import check_step
            do_viz = check_step(step=step, interval=nviz)
            do_restart = check_step(step=step, interval=nrestart)
            do_health = check_step(step=step, interval=nhealth)
            do_status = check_step(step=step, interval=nstatus)

            if do_health:
                dv = eos.dependent_vars(state)
                from mirgecom.simutil import allsync
                health_errors = allsync(my_health_check(dv), comm, op=MPI.LOR)
                if health_errors:
                    if rank == 0:
                        logger.info("Fluid solution failed health check.")
                    raise MyRuntimeError("Failed simulation health check.")

            ts_field, cfl, dt = my_get_timestep(t=t, dt=dt, state=state)

            if do_status:
                my_write_status(dt, cfl)

            if do_restart:
                my_write_restart(step=step, t=t, state=state)

            if do_viz:
                production_rates = eos.get_production_rates(state)
                if dv is None:
                    dv = eos.dependent_vars(state)
                my_write_viz(step=step,
                             t=t,
                             dt=dt,
                             state=state,
                             dv=dv,
                             production_rates=production_rates,
                             ts_field=ts_field,
                             cfl=cfl)

        except MyRuntimeError:
            if rank == 0:
                logger.info("Errors detected; attempting graceful exit.")
            my_write_viz(step=step, t=t, dt=dt, state=state)
            my_write_restart(step=step, t=t, state=state)
            raise

        return state, dt

    def my_post_step(step, t, dt, state):
        # Logmgr needs to know about EOS, dt, dim?
        # imo this is a design/scope flaw
        if logmgr:
            set_dt(logmgr, dt)
            set_sim_state(logmgr, dim, state, eos)
            logmgr.tick_after()
        return state, dt

    def my_rhs(t, state):
        return (euler_operator(
            discr, cv=state, time=t, boundaries=boundaries, eos=eos) +
                eos.get_species_source_terms(state))

    current_dt = get_sim_timestep(discr, current_state, current_t, current_dt,
                                  current_cfl, eos, t_final, constant_cfl)

    current_step, current_t, current_state = \
        advance_state(rhs=my_rhs, timestepper=timestepper,
                      pre_step_callback=my_pre_step,
                      post_step_callback=my_post_step, dt=current_dt,
                      state=current_state, t=current_t, t_final=t_final)

    # Dump the final data
    if rank == 0:
        logger.info("Checkpointing final state ...")

    final_dv = eos.dependent_vars(current_state)
    final_dm = eos.get_production_rates(current_state)
    ts_field, cfl, dt = my_get_timestep(t=current_t,
                                        dt=current_dt,
                                        state=current_state)
    my_write_viz(step=current_step,
                 t=current_t,
                 dt=dt,
                 state=current_state,
                 dv=final_dv,
                 production_rates=final_dm,
                 ts_field=ts_field,
                 cfl=cfl)
    my_write_status(dt=dt, cfl=cfl)
    my_write_restart(step=current_step, t=current_t, state=current_state)

    if logmgr:
        logmgr.close()
    elif use_profiling:
        print(actx.tabulate_profiling_data())

    finish_tol = 1e-16
    assert np.abs(current_t - t_final) < finish_tol
Ejemplo n.º 2
0
def get_test_density(actx, density_discr):
    nodes = thaw(actx, density_discr.nodes())
    sigma = actx.np.sin(10 * nodes[0])
    return sigma
Ejemplo n.º 3
0
def test_grad_operator(actx_factory, dim, order, sym_test_func_factory):
    """Test the gradient operator for sanity.

    Check whether we get the right answers for gradients of analytic functions with
    some simple input fields and states:
    - constant
    - multilinear funcs
    - quadratic funcs
    - trig funcs
    - :class:`~mirgecom.fluid.ConservedVars` composed of funcs from above
    """
    actx = actx_factory()

    sym_test_func = sym_test_func_factory(dim)

    tol = 1e-10 if dim < 3 else 1e-9
    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    for nfac in [1, 2, 4]:

        npts_axis = (nfac * 4, ) * dim
        box_ll = (0, ) * dim
        box_ur = (1, ) * dim
        mesh = _get_box_mesh(dim, a=box_ll, b=box_ur, n=npts_axis)

        logger.info(f"Number of {dim}d elements: {mesh.nelements}")

        discr = EagerDGDiscretization(actx, mesh, order=order)
        # compute max element size
        from grudge.dt_utils import h_max_from_volume
        h_max = h_max_from_volume(discr)

        def sym_eval(expr, x_vec):
            # FIXME: When pymbolic supports array containers
            mapper = sym.EvaluationMapper({"x": x_vec})
            from arraycontext import rec_map_array_container
            result = rec_map_array_container(mapper, expr)
            # If expressions don't depend on coords (e.g., order 0), evaluated result
            # will be scalar-valued, so promote to DOFArray(s) before returning
            return result * (0 * x_vec[0] + 1)

        test_func = partial(sym_eval, sym_test_func)
        grad_test_func = partial(sym_eval, sym_grad(dim, sym_test_func))

        nodes = thaw(actx, discr.nodes())
        int_flux = partial(central_flux_interior, actx, discr)
        bnd_flux = partial(central_flux_boundary, actx, discr, test_func)

        test_data = test_func(nodes)
        exact_grad = grad_test_func(nodes)

        from mirgecom.simutil import componentwise_norms
        from arraycontext import flatten

        err_scale = max(
            flatten(componentwise_norms(discr, exact_grad, np.inf), actx))

        if err_scale <= 1e-16:
            err_scale = 1

        print(f"{test_data=}")
        print(f"{exact_grad=}")

        test_data_int_tpair = interior_trace_pair(discr, test_data)
        boundaries = [BTAG_ALL]
        test_data_flux_bnd = _elbnd_flux(discr, int_flux, bnd_flux,
                                         test_data_int_tpair, boundaries)

        from mirgecom.operators import grad_operator
        from grudge.dof_desc import as_dofdesc
        dd_vol = as_dofdesc("vol")
        dd_faces = as_dofdesc("all_faces")
        test_grad = grad_operator(discr, dd_vol, dd_faces, test_data,
                                  test_data_flux_bnd)

        print(f"{test_grad=}")
        grad_err = \
            max(flatten(componentwise_norms(discr, test_grad - exact_grad, np.inf),
                        actx)) / err_scale

        eoc.add_data_point(actx.to_numpy(h_max), actx.to_numpy(grad_err))

    assert (eoc.order_estimate() >= order - 0.5 or eoc.max_error() < tol)
Ejemplo n.º 4
0
def gather_block_interaction_points(actx,
                                    places,
                                    source_dd,
                                    indices,
                                    radius_factor=None,
                                    approx_nproxy=None,
                                    max_nodes_in_box=None):
    """Generate sets of interaction points for each given range of indices
    in the *source* discretization. For each input range of indices,
    the corresponding output range of points is consists of:

    - a set of proxy points (or balls) around the range, which
      model farfield interactions. These are constructed using
      :class:`ProxyGenerator`.

    - a set of neighboring points that are inside the proxy balls, but
      do not belong to the given range, which model nearby interactions.
      These are constructed with :func:`gather_block_neighbor_points`.

    :arg places: a :class:`~pytential.GeometryCollection`.
    :arg source_dd: geometry in *places* for which to generate the
        interaction points. This is a
        :class:`~pytential.symbolic.primitives.DOFDescriptor` describing
        the exact discretization.
    :arg indices: a :class:`sumpy.tools.BlockIndexRanges` on the
        discretization described by *source_dd*.

    :return: a tuple ``(nodes, ranges)``, where each value is a
        :class:`pyopencl.array.Array`. For a range :math:`i`, we can
        get the slice using ``nodes[ranges[i]:ranges[i + 1]]``.
    """
    @memoize_in(places, "concat_proxy_and_neighbors")
    def knl():
        loopy_knl = lp.make_kernel(
            [
                "{[irange, idim]: 0 <= irange < nranges and \
                              0 <= idim < dim}",
                "{[ipxy, ingb]: 0 <= ipxy < npxyblock and \
                            0 <= ingb < nngbblock}"
            ],
            """
            for irange
                <> pxystart = pxyranges[irange]
                <> pxyend = pxyranges[irange + 1]
                <> npxyblock = pxyend - pxystart

                <> ngbstart = nbrranges[irange]
                <> ngbend = nbrranges[irange + 1]
                <> nngbblock = ngbend - ngbstart

                <> istart = pxyranges[irange] + nbrranges[irange]
                nodes[idim, istart + ipxy] = \
                    proxies[idim, pxystart + ipxy] \
                    {id_prefix=write_pxy,nosync=write_ngb}
                nodes[idim, istart + npxyblock + ingb] = \
                    sources[idim, nbrindices[ngbstart + ingb]] \
                    {id_prefix=write_ngb,nosync=write_pxy}
                ranges[irange + 1] = ranges[irange] + npxyblock + nngbblock
            end
            """, [
                lp.GlobalArg("sources",
                             None,
                             shape=(lpot_source.ambient_dim, "nsources"),
                             dim_tags="sep,C"),
                lp.GlobalArg("proxies",
                             None,
                             shape=(lpot_source.ambient_dim, "nproxies"),
                             dim_tags="sep,C"),
                lp.GlobalArg("nbrindices", None, shape="nnbrindices"),
                lp.GlobalArg(
                    "nodes",
                    None,
                    shape=(lpot_source.ambient_dim, "nproxies + nnbrindices")),
                lp.ValueArg("nsources", np.int),
                lp.ValueArg("nproxies", np.int),
                lp.ValueArg("nnbrindices", np.int), "..."
            ],
            name="concat_proxy_and_neighbors",
            default_offset=lp.auto,
            silenced_warnings="write_race(write_*)",
            fixed_parameters=dict(dim=lpot_source.ambient_dim),
            lang_version=MOST_RECENT_LANGUAGE_VERSION)

        loopy_knl = lp.tag_inames(loopy_knl, "idim*:unr")
        loopy_knl = lp.split_iname(loopy_knl, "irange", 128, outer_tag="g.0")

        return loopy_knl

    lpot_source = places.get_geometry(source_dd.geometry)
    generator = ProxyGenerator(places,
                               radius_factor=radius_factor,
                               approx_nproxy=approx_nproxy)
    proxies, pxyranges, pxycenters, pxyradii = \
            generator(actx, source_dd, indices)

    discr = places.get_discretization(source_dd.geometry,
                                      source_dd.discr_stage)
    neighbors = gather_block_neighbor_points(actx,
                                             discr,
                                             indices,
                                             pxycenters,
                                             pxyradii,
                                             max_nodes_in_box=max_nodes_in_box)

    from meshmode.dof_array import flatten, thaw
    ranges = actx.zeros(indices.nblocks + 1, dtype=np.int)
    _, (nodes, ranges) = knl()(actx.queue,
                               sources=flatten(thaw(actx, discr.nodes())),
                               proxies=proxies,
                               pxyranges=pxyranges,
                               nbrindices=neighbors.indices,
                               nbrranges=neighbors.ranges,
                               ranges=ranges)

    return actx.freeze(nodes), actx.freeze(ranges)
Ejemplo n.º 5
0
def test_slipwall_identity(actx_factory, dim):
    """Identity test - check for the expected boundary solution.

    Checks that the slipwall implements the expected boundary solution:
    rho_plus = rho_minus
    v_plus = v_minus - 2 * (n_hat . v_minus) * n_hat
    mom_plus = rho_plus * v_plus
    E_plus = E_minus
    """
    actx = actx_factory()

    nel_1d = 4

    from meshmode.mesh.generation import generate_regular_rect_mesh

    mesh = generate_regular_rect_mesh(
        a=(-0.5,) * dim, b=(0.5,) * dim, n=(nel_1d,) * dim
    )

    order = 3
    discr = EagerDGDiscretization(actx, mesh, order=order)
    nodes = thaw(actx, discr.nodes())
    eos = IdealSingleGas()
    orig = np.zeros(shape=(dim,))
    nhat = thaw(actx, discr.normal(BTAG_ALL))
    #    normal_mag = actx.np.sqrt(np.dot(normal, normal))
    #    nhat_mult = 1.0 / normal_mag
    #    nhat = normal * make_obj_array([nhat_mult])

    logger.info(f"Number of {dim}d elems: {mesh.nelements}")

    # for velocity going along each direction
    for vdir in range(dim):
        vel = np.zeros(shape=(dim,))
        # for velocity directions +1, and -1
        for parity in [1.0, -1.0]:
            vel[vdir] = parity  # Check incoming normal
            initializer = Lump(center=orig, velocity=vel, rhoamp=0.0)
            wall = AdiabaticSlipBoundary()

            uniform_state = initializer(0, nodes)
            from functools import partial
            bnd_norm = partial(discr.norm, p=np.inf, dd=BTAG_ALL)

            bnd_pair = wall.boundary_pair(discr, uniform_state, t=0.0,
                                          btag=BTAG_ALL, eos=eos)
            bnd_cv_int = split_conserved(dim, bnd_pair.int)
            bnd_cv_ext = split_conserved(dim, bnd_pair.ext)

            # check that mass and energy are preserved
            mass_resid = bnd_cv_int.mass - bnd_cv_ext.mass
            mass_err = bnd_norm(mass_resid)
            assert mass_err == 0.0
            energy_resid = bnd_cv_int.energy - bnd_cv_ext.energy
            energy_err = bnd_norm(energy_resid)
            assert energy_err == 0.0

            # check that exterior momentum term is mom_interior - 2 * mom_normal
            mom_norm_comp = np.dot(bnd_cv_int.momentum, nhat)
            mom_norm = nhat * mom_norm_comp
            expected_mom_ext = bnd_cv_int.momentum - 2.0 * mom_norm
            mom_resid = bnd_cv_ext.momentum - expected_mom_ext
            mom_err = bnd_norm(mom_resid)

            assert mom_err == 0.0
Ejemplo n.º 6
0
    def map_int_g(self, expr):
        lpot_source = self.places.get_geometry(expr.source.geometry)
        source_discr = self.places.get_discretization(expr.source.geometry,
                                                      expr.source.discr_stage)
        target_discr = self.places.get_discretization(expr.target.geometry,
                                                      expr.target.discr_stage)

        if source_discr is not target_discr:
            raise NotImplementedError

        result = 0

        for kernel, density in zip(expr.source_kernels, expr.densities):
            rec_density = self._blk_mapper.rec(density)
            if is_zero(rec_density):
                continue

            if not np.isscalar(rec_density):
                raise NotImplementedError

            actx = self.array_context
            kernel_args = _get_layer_potential_args(self._mat_mapper, expr)

            from sumpy.expansion.local import LineTaylorLocalExpansion
            local_expn = LineTaylorLocalExpansion(
                expr.target_kernel.get_base_kernel(), lpot_source.qbx_order)

            from sumpy.qbx import LayerPotentialMatrixBlockGenerator
            mat_gen = LayerPotentialMatrixBlockGenerator(
                actx.context,
                local_expn,
                source_kernels=(kernel, ),
                target_kernels=(expr.target_kernel, ))

            assert abs(expr.qbx_forced_limit) > 0
            from pytential import bind, sym
            radii = bind(
                self.places,
                sym.expansion_radii(source_discr.ambient_dim,
                                    dofdesc=expr.target))(actx)
            centers = bind(
                self.places,
                sym.expansion_centers(source_discr.ambient_dim,
                                      expr.qbx_forced_limit,
                                      dofdesc=expr.target))(actx)

            from meshmode.dof_array import flatten, thaw
            _, (mat, ) = mat_gen(
                actx.queue,
                targets=flatten(thaw(actx, target_discr.nodes())),
                sources=flatten(thaw(actx, source_discr.nodes())),
                centers=flatten(centers),
                expansion_radii=flatten(radii),
                index_set=self.index_set,
                **kernel_args)

            waa = bind(
                self.places,
                sym.weights_and_area_elements(source_discr.ambient_dim,
                                              dofdesc=expr.source))(actx)
            waa = flatten(waa)

            mat *= waa[self.index_set.linear_col_indices]
            result += rec_density * actx.to_numpy(mat)

        return result
def test_target_specific_qbx(ctx_factory, op, helmholtz_k, qbx_order):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    target_order = 4
    fmm_tol = 1e-3

    from meshmode.mesh.generation import generate_icosphere
    mesh = generate_icosphere(1, target_order)

    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
        InterpolatoryQuadratureSimplexGroupFactory
    from pytential.qbx import QBXLayerPotentialSource
    pre_density_discr = Discretization(
        actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order))

    from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder
    qbx = QBXLayerPotentialSource(
        pre_density_discr,
        4 * target_order,
        qbx_order=qbx_order,
        fmm_level_to_order=SimpleExpansionOrderFinder(fmm_tol),
        fmm_backend="fmmlib",
        _expansions_in_tree_have_extent=True,
        _expansion_stick_out_factor=0.9,
        _use_target_specific_qbx=False,
    )

    kernel_length_scale = 5 / abs(helmholtz_k) if helmholtz_k else None
    places = {
        "qbx": qbx,
        "qbx_target_specific": qbx.copy(_use_target_specific_qbx=True)
    }

    from pytential.qbx.refinement import refine_geometry_collection
    places = GeometryCollection(places, auto_where="qbx")
    places = refine_geometry_collection(
        places, kernel_length_scale=kernel_length_scale)

    density_discr = places.get_discretization("qbx")
    from meshmode.dof_array import thaw
    nodes = thaw(actx, density_discr.nodes())
    u_dev = actx.np.sin(nodes[0])

    if helmholtz_k == 0:
        kernel = LaplaceKernel(3)
        kernel_kwargs = {}
    else:
        kernel = HelmholtzKernel(3, allow_evanescent=True)
        kernel_kwargs = {"k": sym.var("k")}

    u_sym = sym.var("u")

    if op == "S":
        op = sym.S
    elif op == "D":
        op = sym.D
    elif op == "Sp":
        op = sym.Sp
    else:
        raise ValueError("unknown operator: '%s'" % op)

    expr = op(kernel, u_sym, qbx_forced_limit=-1, **kernel_kwargs)

    from meshmode.dof_array import flatten
    bound_op = bind(places, expr)
    pot_ref = actx.to_numpy(flatten(bound_op(actx, u=u_dev, k=helmholtz_k)))

    bound_op = bind(places, expr, auto_where="qbx_target_specific")
    pot_tsqbx = actx.to_numpy(flatten(bound_op(actx, u=u_dev, k=helmholtz_k)))

    assert np.allclose(pot_tsqbx, pot_ref, atol=1e-13, rtol=1e-13)
Ejemplo n.º 8
0
def test_diffusion_compare_to_nodal_dg(actx_factory, problem, order,
            print_err=False):
    """Compares diffusion operator to Hesthaven/Warburton Nodal-DG code."""
    pytest.importorskip("oct2py")

    actx = actx_factory()

    p = problem

    assert p.dim == 1
    assert p.sym_alpha == 1.

    from meshmode.interop.nodal_dg import download_nodal_dg_if_not_present
    download_nodal_dg_if_not_present()

    sym_diffusion_u = sym_diffusion(p.dim, p.sym_alpha, p.sym_u)

    for n in [4, 8, 16, 32, 64]:
        mesh = p.get_mesh(n)

        from meshmode.interop.nodal_dg import NodalDGContext
        with NodalDGContext("./nodal-dg/Codes1.1") as ndgctx:
            ndgctx.set_mesh(mesh, order=order)

            t = 1.23456789

            from grudge.eager import EagerDGDiscretization
            discr_mirgecom = EagerDGDiscretization(actx, mesh, order=order)
            nodes_mirgecom = thaw(actx, discr_mirgecom.nodes())

            def sym_eval_mirgecom(expr):
                return sym.EvaluationMapper({"x": nodes_mirgecom, "t": t})(expr)

            u_mirgecom = sym_eval_mirgecom(p.sym_u)

            diffusion_u_mirgecom = diffusion_operator(discr_mirgecom,
                quad_tag=DISCR_TAG_BASE, alpha=discr_mirgecom.zeros(actx)+1.,
                boundaries=p.get_boundaries(discr_mirgecom, actx, t), u=u_mirgecom)

            discr_ndg = ndgctx.get_discr(actx)
            nodes_ndg = thaw(actx, discr_ndg.nodes())

            def sym_eval_ndg(expr):
                return sym.EvaluationMapper({"x": nodes_ndg, "t": t})(expr)

            u_ndg = sym_eval_ndg(p.sym_u)

            ndgctx.push_dof_array("u", u_ndg)
            ndgctx.octave.push("t", t)
            ndgctx.octave.eval("[rhs] = HeatCRHS1D(u,t)", verbose=False)
            diffusion_u_ndg = ndgctx.pull_dof_array(actx, "rhs")

            def inf_norm(f):
                return actx.np.linalg.norm(f, np.inf)

            err = (inf_norm(diffusion_u_mirgecom-diffusion_u_ndg)
                        / inf_norm(diffusion_u_ndg))

            if print_err:
                diffusion_u_exact = sym_eval_mirgecom(sym_diffusion_u)
                err_exact = (inf_norm(diffusion_u_mirgecom-diffusion_u_exact)
                            / inf_norm(diffusion_u_exact))
                print(err, err_exact)

            assert err < 1e-9
Ejemplo n.º 9
0
def test_diffusion_obj_array_vectorize(actx_factory):
    """
    Checks that the diffusion operator can be called with either scalars or object
    arrays for `u`.
    """
    actx = actx_factory()

    p = get_decaying_trig(1, 2.)

    assert isinstance(p.sym_alpha, Number)

    sym_u1 = p.sym_u
    sym_u2 = 2*p.sym_u

    sym_diffusion_u1 = sym_diffusion(p.dim, p.sym_alpha, sym_u1)
    sym_diffusion_u2 = sym_diffusion(p.dim, p.sym_alpha, sym_u2)

    n = 128

    mesh = p.get_mesh(n)

    from grudge.eager import EagerDGDiscretization
    discr = EagerDGDiscretization(actx, mesh, order=4)

    nodes = thaw(actx, discr.nodes())

    t = 1.23456789

    def sym_eval(expr):
        return sym.EvaluationMapper({"x": nodes, "t": t})(expr)

    alpha = sym_eval(p.sym_alpha)

    u1 = sym_eval(sym_u1)
    u2 = sym_eval(sym_u2)

    boundaries = p.get_boundaries(discr, actx, t)

    diffusion_u1 = diffusion_operator(discr, quad_tag=DISCR_TAG_BASE, alpha=alpha,
        boundaries=boundaries, u=u1)

    assert isinstance(diffusion_u1, DOFArray)

    expected_diffusion_u1 = sym_eval(sym_diffusion_u1)
    rel_linf_err = (
        discr.norm(diffusion_u1 - expected_diffusion_u1, np.inf)
        / discr.norm(expected_diffusion_u1, np.inf))
    assert rel_linf_err < 1.e-5

    boundaries_vector = [boundaries, boundaries]
    u_vector = make_obj_array([u1, u2])

    diffusion_u_vector = diffusion_operator(
        discr, quad_tag=DISCR_TAG_BASE, alpha=alpha,
        boundaries=boundaries_vector, u=u_vector
    )

    assert isinstance(diffusion_u_vector, np.ndarray)
    assert diffusion_u_vector.shape == (2,)

    expected_diffusion_u_vector = make_obj_array([
        sym_eval(sym_diffusion_u1),
        sym_eval(sym_diffusion_u2)
    ])
    rel_linf_err = (
        discr.norm(diffusion_u_vector - expected_diffusion_u_vector, np.inf)
        / discr.norm(expected_diffusion_u_vector, np.inf))
    assert rel_linf_err < 1.e-5
Ejemplo n.º 10
0
def test_diffusion_accuracy(actx_factory, problem, nsteps, dt, scales, order,
            visualize=False):
    """
    Checks the accuracy of the diffusion operator by solving the heat equation for a
    given problem setup.
    """
    actx = actx_factory()

    p = problem

    sym_diffusion_u = sym_diffusion(p.dim, p.sym_alpha, p.sym_u)

    # In order to support manufactured solutions, we modify the heat equation
    # to add a source term f. If the solution is exact, this term should be 0.
    sym_t = pmbl.var("t")
    sym_f = sym.diff(sym_t)(p.sym_u) - sym_diffusion_u

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for n in scales:
        mesh = p.get_mesh(n)

        from grudge.eager import EagerDGDiscretization
        from meshmode.discretization.poly_element import \
                QuadratureSimplexGroupFactory, \
                PolynomialWarpAndBlendGroupFactory
        discr = EagerDGDiscretization(
            actx, mesh,
            discr_tag_to_group_factory={
                DISCR_TAG_BASE: PolynomialWarpAndBlendGroupFactory(order),
                DISCR_TAG_QUAD: QuadratureSimplexGroupFactory(3*order),
            }
        )

        nodes = thaw(actx, discr.nodes())

        def sym_eval(expr, t):
            return sym.EvaluationMapper({"x": nodes, "t": t})(expr)

        alpha = sym_eval(p.sym_alpha, 0.)

        if isinstance(alpha, DOFArray):
            discr_tag = DISCR_TAG_QUAD
        else:
            discr_tag = DISCR_TAG_BASE

        def get_rhs(t, u):
            return (diffusion_operator(discr, quad_tag=discr_tag, alpha=alpha,
                    boundaries=p.get_boundaries(discr, actx, t), u=u)
                + sym_eval(sym_f, t))

        t = 0.

        u = sym_eval(p.sym_u, t)

        from mirgecom.integrators import rk4_step

        for _ in range(nsteps):
            u = rk4_step(u, t, dt, get_rhs)
            t += dt

        expected_u = sym_eval(p.sym_u, t)

        rel_linf_err = (
            discr.norm(u - expected_u, np.inf)
            / discr.norm(expected_u, np.inf))
        eoc_rec.add_data_point(1./n, rel_linf_err)

        if visualize:
            from grudge.shortcuts import make_visualizer
            vis = make_visualizer(discr, discr.order+3)
            vis.write_vtk_file("diffusion_accuracy_{order}_{n}.vtu".format(
                order=order, n=n), [
                    ("u", u),
                    ("expected_u", expected_u),
                    ])

    print("L^inf error:")
    print(eoc_rec)
    # Expected convergence rates from Hesthaven/Warburton book
    expected_order = order+1 if order % 2 == 0 else order
    assert(eoc_rec.order_estimate() >= expected_order - 0.5
                or eoc_rec.max_error() < 1e-11)
Ejemplo n.º 11
0
def test_diffusion_discontinuous_alpha(actx_factory, order, visualize=False):
    """
    Checks the accuracy of the diffusion operator for an alpha field that has a
    jump across an element face.
    """
    actx = actx_factory()

    n = 8

    mesh = get_box_mesh(1, -1, 1, n)

    from grudge.eager import EagerDGDiscretization
    discr = EagerDGDiscretization(actx, mesh, order=order)

    nodes = thaw(actx, discr.nodes())

    # Set up a 1D heat equation interface problem, apply the diffusion operator to
    # the exact steady state solution, and check that it's zero

    lower_mask_np = np.empty((n, order+1), dtype=int)
    lower_mask_np[:, :] = 0
    lower_mask_np[:int(n/2), :] = 1
    lower_mask = DOFArray(actx, (actx.from_numpy(lower_mask_np),))

    upper_mask_np = np.empty((n, order+1), dtype=int)
    upper_mask_np[:, :] = 0
    upper_mask_np[int(n/2):, :] = 1
    upper_mask = DOFArray(actx, (actx.from_numpy(upper_mask_np),))

    alpha_lower = 0.5
    alpha_upper = 1

    alpha = alpha_lower * lower_mask + alpha_upper * upper_mask

    boundaries = {
        DTAG_BOUNDARY("-0"): DirichletDiffusionBoundary(0.),
        DTAG_BOUNDARY("+0"): DirichletDiffusionBoundary(1.),
    }

    flux = -alpha_lower*alpha_upper/(alpha_lower + alpha_upper)

    u_steady = (
              -flux/alpha_lower * (nodes[0] + 1)  * lower_mask  # noqa: E126, E221
        + (1 - flux/alpha_upper * (nodes[0] - 1)) * upper_mask)  # noqa: E131

    def get_rhs(t, u):
        return diffusion_operator(
            discr, quad_tag=DISCR_TAG_BASE, alpha=alpha, boundaries=boundaries, u=u)

    rhs = get_rhs(0, u_steady)

    if visualize:
        from grudge.shortcuts import make_visualizer
        vis = make_visualizer(discr, discr.order+3)
        vis.write_vtk_file("diffusion_discontinuous_alpha_rhs_{order}.vtu"
            .format(order=order), [
                ("alpha", alpha),
                ("u_steady", u_steady),
                ("rhs", rhs),
                ])

    linf_err = discr.norm(rhs, np.inf)
    assert(linf_err < 1e-11)

    # Now check stability

    from numpy.random import rand
    perturb_np = np.empty((n, order+1), dtype=float)
    for i in range(n):
        perturb_np[i, :] = 0.1*(rand()-0.5)
    perturb = DOFArray(actx, (actx.from_numpy(perturb_np),))

    u = u_steady + perturb

    dt = 1e-3 / order**2
    t = 0

    from mirgecom.integrators import rk4_step

    for _ in range(50):
        u = rk4_step(u, t, dt, get_rhs)
        t += dt

    if visualize:
        vis.write_vtk_file("diffusion_disc_alpha_stability_{order}.vtu"
            .format(order=order), [
                ("alpha", alpha),
                ("u", u),
                ("u_steady", u_steady),
                ])

    linf_diff = discr.norm(u - u_steady, np.inf)
    assert linf_diff < 0.1
Ejemplo n.º 12
0
def run_exterior_stokes_2d(
        ctx_factory,
        nelements,
        mesh_order=4,
        target_order=4,
        qbx_order=4,
        fmm_order=False,  # FIXME: FMM is slower than direct eval
        mu=1,
        circle_rad=1.5,
        visualize=False):

    # This program tests an exterior Stokes flow in 2D using the
    # compound representation given in Hsiao & Kress,
    # ``On an integral equation for the two-dimensional exterior Stokes problem,''
    # Applied Numerical Mathematics 1 (1985).

    logging.basicConfig(level=logging.INFO)

    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    ovsmp_target_order = 4 * target_order

    # {{{ geometries

    from meshmode.mesh.generation import (  # noqa
        make_curve_mesh, starfish, ellipse, drop)
    mesh = make_curve_mesh(lambda t: circle_rad * ellipse(1, t),
                           np.linspace(0, 1, nelements + 1), target_order)
    coarse_density_discr = Discretization(
        actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order))

    from pytential.qbx import QBXLayerPotentialSource
    target_association_tolerance = 0.05
    qbx = QBXLayerPotentialSource(
        coarse_density_discr,
        fine_order=ovsmp_target_order,
        qbx_order=qbx_order,
        fmm_order=fmm_order,
        target_association_tolerance=target_association_tolerance,
        _expansions_in_tree_have_extent=True,
    )

    def circle_mask(test_points, radius):
        return (test_points[0, :]**2 + test_points[1, :]**2 > radius**2)

    def outside_circle(test_points, radius):
        mask = circle_mask(test_points, radius)
        return np.array([row[mask] for row in test_points])

    from pytential.target import PointsTarget
    nsamp = 30
    eval_points_1d = np.linspace(-3., 3., nsamp)
    eval_points = np.zeros((2, len(eval_points_1d)**2))
    eval_points[0, :] = np.tile(eval_points_1d, len(eval_points_1d))
    eval_points[1, :] = np.repeat(eval_points_1d, len(eval_points_1d))
    eval_points = outside_circle(eval_points, radius=circle_rad)
    point_targets = PointsTarget(eval_points)

    fplot = FieldPlotter(np.zeros(2), extent=6, npoints=100)
    plot_targets = PointsTarget(outside_circle(fplot.points,
                                               radius=circle_rad))

    places = GeometryCollection({
        sym.DEFAULT_SOURCE: qbx,
        sym.DEFAULT_TARGET: qbx.density_discr,
        "point_target": point_targets,
        "plot_target": plot_targets,
    })

    density_discr = places.get_discretization(sym.DEFAULT_SOURCE)

    normal = bind(places, sym.normal(2).as_vector())(actx)
    path_length = bind(places, sym.integral(2, 1, 1))(actx)

    # }}}

    # {{{ describe bvp

    from pytential.symbolic.stokes import StressletWrapper, StokesletWrapper
    dim = 2
    cse = sym.cse

    sigma_sym = sym.make_sym_vector("sigma", dim)
    meanless_sigma_sym = cse(sigma_sym - sym.mean(2, 1, sigma_sym))
    int_sigma = sym.Ones() * sym.integral(2, 1, sigma_sym)

    nvec_sym = sym.make_sym_vector("normal", dim)
    mu_sym = sym.var("mu")

    # -1 for interior Dirichlet
    # +1 for exterior Dirichlet
    loc_sign = 1

    stresslet_obj = StressletWrapper(dim=2)
    stokeslet_obj = StokesletWrapper(dim=2)
    bdry_op_sym = (-loc_sign * 0.5 * sigma_sym - stresslet_obj.apply(
        sigma_sym, nvec_sym, mu_sym, qbx_forced_limit="avg") +
                   stokeslet_obj.apply(
                       meanless_sigma_sym, mu_sym, qbx_forced_limit="avg") -
                   (0.5 / np.pi) * int_sigma)

    # }}}

    bound_op = bind(places, bdry_op_sym)

    # {{{ fix rhs and solve

    def fund_soln(x, y, loc, strength):
        #with direction (1,0) for point source
        r = actx.np.sqrt((x - loc[0])**2 + (y - loc[1])**2)
        scaling = strength / (4 * np.pi * mu)
        xcomp = (-actx.np.log(r) + (x - loc[0])**2 / r**2) * scaling
        ycomp = ((x - loc[0]) * (y - loc[1]) / r**2) * scaling
        return [xcomp, ycomp]

    def rotlet_soln(x, y, loc):
        r = actx.np.sqrt((x - loc[0])**2 + (y - loc[1])**2)
        xcomp = -(y - loc[1]) / r**2
        ycomp = (x - loc[0]) / r**2
        return [xcomp, ycomp]

    def fund_and_rot_soln(x, y, loc, strength):
        #with direction (1,0) for point source
        r = actx.np.sqrt((x - loc[0])**2 + (y - loc[1])**2)
        scaling = strength / (4 * np.pi * mu)
        xcomp = ((-actx.np.log(r) + (x - loc[0])**2 / r**2) * scaling -
                 (y - loc[1]) * strength * 0.125 / r**2 + 3.3)
        ycomp = (((x - loc[0]) * (y - loc[1]) / r**2) * scaling +
                 (x - loc[0]) * strength * 0.125 / r**2 + 1.5)
        return make_obj_array([xcomp, ycomp])

    from meshmode.dof_array import unflatten, flatten, thaw
    nodes = flatten(thaw(actx, density_discr.nodes()))
    fund_soln_loc = np.array([0.5, -0.2])
    strength = 100.
    bc = unflatten(
        actx, density_discr,
        fund_and_rot_soln(nodes[0], nodes[1], fund_soln_loc, strength))

    omega_sym = sym.make_sym_vector("omega", dim)
    u_A_sym_bdry = stokeslet_obj.apply(  # noqa: N806
        omega_sym, mu_sym, qbx_forced_limit=1)

    from pytential.utils import unflatten_from_numpy
    omega = unflatten_from_numpy(
        actx, density_discr,
        make_obj_array([(strength / path_length) * np.ones(len(nodes[0])),
                        np.zeros(len(nodes[0]))]))

    bvp_rhs = bind(places,
                   sym.make_sym_vector("bc", dim) + u_A_sym_bdry)(actx,
                                                                  bc=bc,
                                                                  mu=mu,
                                                                  omega=omega)
    gmres_result = gmres(bound_op.scipy_op(actx,
                                           "sigma",
                                           np.float64,
                                           mu=mu,
                                           normal=normal),
                         bvp_rhs,
                         x0=bvp_rhs,
                         tol=1e-9,
                         progress=True,
                         stall_iterations=0,
                         hard_failure=True)

    # }}}

    # {{{ postprocess/visualize

    sigma = gmres_result.solution
    sigma_int_val_sym = sym.make_sym_vector("sigma_int_val", 2)
    int_val = bind(places, sym.integral(2, 1, sigma_sym))(actx, sigma=sigma)
    int_val = -int_val / (2 * np.pi)
    print("int_val = ", int_val)

    u_A_sym_vol = stokeslet_obj.apply(  # noqa: N806
        omega_sym, mu_sym, qbx_forced_limit=2)
    representation_sym = (
        -stresslet_obj.apply(sigma_sym, nvec_sym, mu_sym, qbx_forced_limit=2) +
        stokeslet_obj.apply(meanless_sigma_sym, mu_sym, qbx_forced_limit=2) -
        u_A_sym_vol + sigma_int_val_sym)

    where = (sym.DEFAULT_SOURCE, "point_target")
    vel = bind(places, representation_sym,
               auto_where=where)(actx,
                                 sigma=sigma,
                                 mu=mu,
                                 normal=normal,
                                 sigma_int_val=int_val,
                                 omega=omega)
    print("@@@@@@@@")

    plot_vel = bind(places,
                    representation_sym,
                    auto_where=(sym.DEFAULT_SOURCE,
                                "plot_target"))(actx,
                                                sigma=sigma,
                                                mu=mu,
                                                normal=normal,
                                                sigma_int_val=int_val,
                                                omega=omega)

    def get_obj_array(obj_array):
        return make_obj_array([ary.get() for ary in obj_array])

    exact_soln = fund_and_rot_soln(actx.from_numpy(eval_points[0]),
                                   actx.from_numpy(eval_points[1]),
                                   fund_soln_loc, strength)

    vel = get_obj_array(vel)
    err = vel - get_obj_array(exact_soln)

    # FIXME: Pointwise relative errors don't make sense!
    rel_err = err / (get_obj_array(exact_soln))

    if 0:
        print("@@@@@@@@")
        print("vel[0], err[0], rel_err[0] ***** vel[1], err[1], rel_err[1]: ")
        for i in range(len(vel[0])):
            print(
                "{:15.8e}, {:15.8e}, {:15.8e} ***** {:15.8e}, {:15.8e}, {:15.8e}"
                .format(vel[0][i], err[0][i], rel_err[0][i], vel[1][i],
                        err[1][i], rel_err[1][i]))

        print("@@@@@@@@")

    l2_err = np.sqrt((6. / (nsamp - 1))**2 * np.sum(err[0] * err[0]) +
                     (6. / (nsamp - 1))**2 * np.sum(err[1] * err[1]))
    l2_rel_err = np.sqrt((6. /
                          (nsamp - 1))**2 * np.sum(rel_err[0] * rel_err[0]) +
                         (6. /
                          (nsamp - 1))**2 * np.sum(rel_err[1] * rel_err[1]))

    print("L2 error estimate: ", l2_err)
    print("L2 rel error estimate: ", l2_rel_err)
    print("max error at sampled points: ", max(abs(err[0])), max(abs(err[1])))
    print("max rel error at sampled points: ", max(abs(rel_err[0])),
          max(abs(rel_err[1])))

    if visualize:
        import matplotlib.pyplot as plt

        full_pot = np.zeros_like(fplot.points) * float("nan")
        mask = circle_mask(fplot.points, radius=circle_rad)

        for i, vel in enumerate(plot_vel):
            full_pot[i, mask] = vel.get()

        plt.quiver(fplot.points[0],
                   fplot.points[1],
                   full_pot[0],
                   full_pot[1],
                   linewidth=0.1)
        plt.savefig("exterior-2d-field.pdf")

    # }}}

    h_max = bind(places, sym.h_max(qbx.ambient_dim))(actx)
    return h_max, l2_err
Ejemplo n.º 13
0
def main(mesh_name="ellipse", visualize=False):
    import logging
    logging.basicConfig(level=logging.WARNING)  # INFO for more progress info

    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    from meshmode.mesh.generation import ellipse, make_curve_mesh
    from functools import partial

    if mesh_name == "ellipse":
        mesh = make_curve_mesh(
                partial(ellipse, 1),
                np.linspace(0, 1, nelements+1),
                mesh_order)
    elif mesh_name == "ellipse_array":
        base_mesh = make_curve_mesh(
                partial(ellipse, 1),
                np.linspace(0, 1, nelements+1),
                mesh_order)

        from meshmode.mesh.processing import affine_map, merge_disjoint_meshes
        nx = 2
        ny = 2
        dx = 2 / nx
        meshes = [
                affine_map(
                    base_mesh,
                    A=np.diag([dx*0.25, dx*0.25]),
                    b=np.array([dx*(ix-nx/2), dx*(iy-ny/2)]))
                for ix in range(nx)
                for iy in range(ny)]

        mesh = merge_disjoint_meshes(meshes, single_group=True)

        if visualize:
            from meshmode.mesh.visualization import draw_curve
            draw_curve(mesh)
            import matplotlib.pyplot as plt
            plt.show()
    else:
        raise ValueError("unknown mesh name: {}".format(mesh_name))

    pre_density_discr = Discretization(
            actx, mesh,
            InterpolatoryQuadratureSimplexGroupFactory(bdry_quad_order))

    from pytential.qbx import (
            QBXLayerPotentialSource, QBXTargetAssociationFailedException)
    qbx = QBXLayerPotentialSource(
            pre_density_discr, fine_order=bdry_ovsmp_quad_order, qbx_order=qbx_order,
            fmm_order=fmm_order
            )

    from sumpy.visualization import FieldPlotter
    fplot = FieldPlotter(np.zeros(2), extent=5, npoints=500)
    targets = actx.from_numpy(fplot.points)

    from pytential import GeometryCollection
    places = GeometryCollection({
        "qbx": qbx,
        "qbx_high_target_assoc_tol": qbx.copy(target_association_tolerance=0.05),
        "targets": PointsTarget(targets)
        }, auto_where="qbx")
    density_discr = places.get_discretization("qbx")

    # {{{ describe bvp

    from sumpy.kernel import LaplaceKernel, HelmholtzKernel
    kernel = HelmholtzKernel(2)

    sigma_sym = sym.var("sigma")
    sqrt_w = sym.sqrt_jac_q_weight(2)
    inv_sqrt_w_sigma = sym.cse(sigma_sym/sqrt_w)

    # Brakhage-Werner parameter
    alpha = 1j

    # -1 for interior Dirichlet
    # +1 for exterior Dirichlet
    loc_sign = +1

    k_sym = sym.var("k")
    bdry_op_sym = (-loc_sign*0.5*sigma_sym
            + sqrt_w*(
                alpha*sym.S(kernel, inv_sqrt_w_sigma, k=k_sym,
                    qbx_forced_limit=+1)
                - sym.D(kernel, inv_sqrt_w_sigma, k=k_sym,
                    qbx_forced_limit="avg")
                ))

    # }}}

    bound_op = bind(places, bdry_op_sym)

    # {{{ fix rhs and solve

    from meshmode.dof_array import thaw
    nodes = thaw(actx, density_discr.nodes())
    k_vec = np.array([2, 1])
    k_vec = k * k_vec / la.norm(k_vec, 2)

    def u_incoming_func(x):
        return actx.np.exp(
                1j * (x[0] * k_vec[0] + x[1] * k_vec[1]))

    bc = -u_incoming_func(nodes)

    bvp_rhs = bind(places, sqrt_w*sym.var("bc"))(actx, bc=bc)

    from pytential.solve import gmres
    gmres_result = gmres(
            bound_op.scipy_op(actx, sigma_sym.name, dtype=np.complex128, k=k),
            bvp_rhs, tol=1e-8, progress=True,
            stall_iterations=0,
            hard_failure=True)

    # }}}

    # {{{ postprocess/visualize

    repr_kwargs = dict(
            source="qbx_high_target_assoc_tol",
            target="targets",
            qbx_forced_limit=None)
    representation_sym = (
            alpha*sym.S(kernel, inv_sqrt_w_sigma, k=k_sym, **repr_kwargs)
            - sym.D(kernel, inv_sqrt_w_sigma, k=k_sym, **repr_kwargs))

    u_incoming = u_incoming_func(targets)
    ones_density = density_discr.zeros(actx)
    for elem in ones_density:
        elem.fill(1)

    indicator = actx.to_numpy(
            bind(places, sym.D(LaplaceKernel(2), sigma_sym, **repr_kwargs))(
                actx, sigma=ones_density))

    try:
        fld_in_vol = actx.to_numpy(
                bind(places, representation_sym)(
                    actx, sigma=gmres_result.solution, k=k))
    except QBXTargetAssociationFailedException as e:
        fplot.write_vtk_file("helmholtz-dirichlet-failed-targets.vts", [
            ("failed", e.failed_target_flags.get(queue))
            ])
        raise

    #fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5)
    fplot.write_vtk_file("helmholtz-dirichlet-potential.vts", [
        ("potential", fld_in_vol),
        ("indicator", indicator),
        ("u_incoming", actx.to_numpy(u_incoming)),
        ])
Ejemplo n.º 14
0
def get_density(actx, discr):
    from meshmode.dof_array import thaw
    nodes = thaw(actx, discr.nodes())
    return actx.np.sin(10 * nodes[0])
Ejemplo n.º 15
0
def main(ctx_factory=cl.create_some_context,
         snapshot_pattern="y0euler-{step:06d}-{rank:04d}.pkl",
         restart_step=None, use_profiling=False, use_logmgr=False):
    """Drive the Y0 example."""

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = 0
    rank = comm.Get_rank()
    nparts = comm.Get_size()

    """logging and profiling"""
    logmgr = initialize_logmgr(use_logmgr, filename="y0euler.sqlite",
        mode="wu", mpi_comm=comm)

    cl_ctx = ctx_factory()
    if use_profiling:
        queue = cl.CommandQueue(cl_ctx,
            properties=cl.command_queue_properties.PROFILING_ENABLE)
        actx = PyOpenCLProfilingArrayContext(queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)),
            logmgr=logmgr)
    else:
        queue = cl.CommandQueue(cl_ctx)
        actx = PyOpenCLArrayContext(queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))

    #nviz = 500
    #nrestart = 500
    nviz = 100
    nrestart = 100
    current_dt = 2.5e-8
    t_final = 5.e-1

    dim = 3
    order = 1
    exittol = .09
    #t_final = 0.001
    current_cfl = 1.0
    vel_init = np.zeros(shape=(dim,))
    vel_inflow = np.zeros(shape=(dim,))
    vel_outflow = np.zeros(shape=(dim,))
    orig = np.zeros(shape=(dim,))
    orig[0] = 0.83
    orig[2] = 0.001
    #vel[0] = 340.0
    #vel_inflow[0] = 100.0  # m/s
    current_t = 0
    casename = "y0euler"
    constant_cfl = False
    nstatus = 10000000000
    rank = 0
    checkpoint_t = current_t
    current_step = 0

    # working gas: CO2 #
    #   gamma = 1.289
    #   MW=44.009  g/mol
    #   cp = 37.135 J/mol-K,
    #   rho= 1.977 kg/m^3 @298K
    gamma_CO2 = 1.289
    R_CO2 = 8314.59/44.009

    # background
    #   100 Pa
    #   298 K
    #   rho = 1.77619667e-3 kg/m^3
    #   velocity = 0,0,0
    rho_bkrnd=1.77619667e-3
    pres_bkrnd=100
    temp_bkrnd=298
     
    # nozzle inflow #
    # 
    # stagnation tempertuare 298 K
    # stagnation pressure 1.5e Pa
    # 
    # isentropic expansion based on the area ratios between the inlet (r=13e-3m) and the throat (r=6.3e-3)
    #
    #  MJA, this is calculated offline, add some code to do it for us
    # 
    #   Mach number=0.139145
    #   pressure=148142
    #   temperature=297.169
    #   density=2.63872
    #   gamma=1.289

    # calculate the inlet Mach number from the area ratio
    nozzleInletRadius = 13.e-3
    nozzleThroatRadius = 6.3e-3
    nozzleInletArea = math.pi*nozzleInletRadius*nozzleInletRadius
    nozzleThroatArea = math.pi*nozzleThroatRadius*nozzleThroatRadius
    inletAreaRatio = nozzleInletArea/nozzleThroatArea

    def getMachFromAreaRatio(area_ratio, gamma, mach_guess=0.01):
        error=1.e-8
        nextError=1.e8
        g=gamma
        M0=mach_guess
        while nextError > error:
            R = ((2/(g+1)+((g-1)/(g+1)*M0*M0))**(((g+1)/(2*g-2))))/M0-area_ratio
            dRdM = (2*((2/(g+1)+((g-1)/(g+1)*M0*M0))**(((g+1)/(2*g-2))))/
                   (2*g-2)*(g-1)/(2/(g+1)+((g-1)/(g+1)*M0*M0))-
                   ((2/(g+1)+((g-1)/(g+1)*M0*M0))**(((g+1)/(2*g-2))))* M0**(-2))
      
            M1=M0-R/dRdM
            nextError=abs(R)
            M0=M1

        return M1


    def getIsentropicPressure(mach, P0, gamma):
        pressure=(1.+(gamma-1.)*0.5*math.pow(mach,2))
        pressure=P0*math.pow(pressure,(-gamma/(gamma-1.)))
        return pressure

  
    def getIsentropicTemperature(mach, T0, gamma):
      temperature=(1.+(gamma-1.)*0.5*math.pow(mach,2))
      temperature=T0*math.pow(temperature,-1.0)
      return temperature


    inlet_mach = getMachFromAreaRatio(area_ratio = inletAreaRatio, gamma=gamma_CO2, mach_guess = 0.01);
    # ramp the stagnation pressure
    start_ramp_pres = 1000
    ramp_interval = 1.e-2
    t_ramp_start = 1e5
    pres_inflow = getIsentropicPressure(mach=inlet_mach, P0=start_ramp_pres, gamma=gamma_CO2)
    temp_inflow = getIsentropicTemperature(mach=inlet_mach, T0=298, gamma=gamma_CO2)
    rho_inflow = pres_inflow/temp_inflow/R_CO2

    print(f'inlet Mach number {inlet_mach}')
    print(f'inlet temperature {temp_inflow}')
    print(f'inlet pressure {pres_inflow}')

    end_ramp_pres = 150000
    pres_inflow_final = getIsentropicPressure(mach=inlet_mach, P0=end_ramp_pres, gamma=gamma_CO2)

    print(f'final inlet pressure {pres_inflow_final}')


    #pres_inflow=148142
    #temp_inflow=297.169
    #rho_inflow=2.63872
    #mach_inflow=infloM = 0.139145
    vel_inflow[0] = inlet_mach*math.sqrt(gamma_CO2*pres_inflow/rho_inflow)

    # starting pressure for the inflow ramp

    #timestepper = rk4_step
    timestepper = euler_step
    eos = IdealSingleGas(gamma=gamma_CO2, gas_const=R_CO2)
    bulk_init = Discontinuity(dim=dim, x0=-.30,sigma=0.005,
    #bulk_init = Discontinuity(dim=dim, x0=-.31,sigma=0.04,
                              rhol=rho_inflow, rhor=rho_bkrnd,
                              pl=pres_inflow, pr=pres_bkrnd,
                              ul=vel_inflow, ur=vel_outflow)
    #inflow_init = Lump(dim=dim, rho0=rho_inflow, p0=pres_inflow,
                       #center=orig, velocity=vel_inflow, rhoamp=0.0)
    #outflow_init = Lump(dim=dim, rho0=rho_bkrnd, p0=pres_bkrnd,
                       #center=orig, velocity=vel_outflow, rhoamp=0.0)

    # pressure ramp function
    def inflow_ramp_pressure(t, startP=start_ramp_pres, finalP=end_ramp_pres, 
                             ramp_interval=ramp_interval, t_ramp_start=t_ramp_start):
      if t > t_ramp_start:
          rampPressure = min(finalP, startP+(t-t_ramp_start)/ramp_interval*(finalP-startP))
      else:
          rampPressure = startP
      return rampPressure


    class IsentropicInflow:

        def __init__(self, *, dim=1, direc=0, T0=298, P0=1e5, mach= 0.01, p_fun = None):

            self._P0 = P0
            self._T0 = T0
            self._dim = dim
            self._direc = direc
            self._mach = mach
            if p_fun is not None:
              self._p_fun = p_fun
    
        def __call__(self, x_vec, *, t=0, eos):
    
    
            if self._p_fun is not None:
                P0 = self._p_fun(t)
            else:
                P0 = self._P0
            T0 = self._T0

            gamma = eos.gamma()
            gas_const = eos.gas_const()
            pressure = getIsentropicPressure(mach=self._mach, P0=P0, gamma=gamma)
            temperature = getIsentropicTemperature(mach=self._mach, T0=T0, gamma=gamma)
            rho = pressure/temperature/gas_const

            #print(f'ramp Mach number {self._mach}')
            #print(f'ramp stagnation pressure {P0}')
            #print(f'ramp stagnation temperature {T0}')
            #print(f'ramp pressure {pressure}')
            #print(f'ramp temperature {temperature}')

            velocity = np.zeros(shape=(self._dim,)) 
            velocity[self._direc] = self._mach*math.sqrt(gamma*pressure/rho)
    
            mass = 0.0*x_vec[0] + rho
            mom = velocity*mass
            energy = (pressure/(gamma - 1.0)) + np.dot(mom, mom)/(2.0*mass)
            from mirgecom.euler import join_conserved
            return join_conserved(dim=self._dim, mass=mass, momentum=mom, energy=energy)


    inflow_init = IsentropicInflow(dim=dim, T0=298, P0=start_ramp_pres, 
                                   mach = inlet_mach , p_fun=inflow_ramp_pressure)
    outflow_init = Uniform(dim=dim, rho=rho_bkrnd, p=pres_bkrnd,
                           velocity=vel_outflow)

    inflow = PrescribedBoundary(inflow_init)
    outflow = PrescribedBoundary(outflow_init)
    wall = AdiabaticSlipBoundary()
    dummy = DummyBoundary()

    alpha_sc = 0.5
    # s0 is ~p^-4 
    #s0_sc = -11.0
    s0_sc = -5.0
    # kappa is empirical ...
    kappa_sc = 0.5
    print(f"Shock capturing parameters: alpha {alpha_sc}, s0 {s0_sc}, kappa {kappa_sc}")

    # timestep estimate
    #wave_speed = max(mach2*c_bkrnd,c_shkd+velocity2[0])
    #char_len = 0.001
    #area=char_len*char_len/2
    #perimeter = 2*char_len+math.sqrt(2*char_len*char_len)
    #h = 2*area/perimeter

    #dt_est = 1/(wave_speed*order*order/h)
    #print(f"Time step estimate {dt_est}\n")
#
    #dt_est_visc = 1/(wave_speed*order*order/h+alpha_sc*order*order*order*order/h/h)
    #print(f"Viscous timestep estimate {dt_est_visc}\n")

    from grudge import sym
    boundaries = {sym.DTAG_BOUNDARY("Inflow"): inflow,
                  sym.DTAG_BOUNDARY("Outflow"): outflow,
                  sym.DTAG_BOUNDARY("Wall"): wall}

    if restart_step is None:
        local_mesh, global_nelements = create_parallel_grid(comm, get_pseudo_y0_mesh)
        local_nelements = local_mesh.nelements

    else:  # Restart
        with open(snapshot_pattern.format(step=restart_step, rank=rank), "rb") as f:
            restart_data = pickle.load(f)

        local_mesh = restart_data["local_mesh"]
        local_nelements = local_mesh.nelements
        global_nelements = restart_data["global_nelements"]

        assert comm.Get_size() == restart_data["num_parts"]

    if rank == 0:
        logging.info("Making discretization")
    discr = EagerDGDiscretization(
        actx, local_mesh, order=order, mpi_communicator=comm
    )
    nodes = thaw(actx, discr.nodes())

    if restart_step is None:
        if rank == 0:
            logging.info("Initializing soln.")
        # for Discontinuity initial conditions
        current_state = bulk_init(0, nodes, eos=eos)
        # for uniform background initial condition
        #current_state = bulk_init(nodes, eos=eos)
    else:
        current_t = restart_data["t"]
        current_step = restart_step

        current_state = unflatten(
            actx, discr.discr_from_dd("vol"),
            obj_array_vectorize(actx.from_numpy, restart_data["state"]))

    vis_timer = None

    if logmgr:
        logmgr_add_device_name(logmgr, queue)
        logmgr_add_many_discretization_quantities(logmgr, discr, dim,
            extract_vars_for_logging, units_for_logging)
        #logmgr_add_package_versions(logmgr)

        logmgr.add_watches(["step.max", "t_sim.max", "t_step.max", "t_log.max",
                            "min_pressure", "max_pressure",
                            "min_temperature", "max_temperature"])

        try:
            logmgr.add_watches(["memory_usage.max"])
        except KeyError:
            pass

        if use_profiling:
            logmgr.add_watches(["pyopencl_array_time.max"])

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

    visualizer = make_visualizer(discr, discr.order + 3
                                 if discr.dim == 2 else discr.order)
    #    initname = initializer.__class__.__name__
    initname = "pseudoY0"
    eosname = eos.__class__.__name__
    init_message = make_init_message(dim=dim, order=order,
                                     nelements=local_nelements,
                                     global_nelements=global_nelements,
                                     dt=current_dt, t_final=t_final,
                                     nstatus=nstatus, nviz=nviz,
                                     cfl=current_cfl,
                                     constant_cfl=constant_cfl,
                                     initname=initname,
                                     eosname=eosname, casename=casename)
    if rank == 0:
        logger.info(init_message)

    get_timestep = partial(inviscid_sim_timestep, discr=discr, t=current_t,
                           dt=current_dt, cfl=current_cfl, eos=eos,
                           t_final=t_final, constant_cfl=constant_cfl)

    def my_rhs(t, state):
        #return inviscid_operator(discr, eos=eos, boundaries=boundaries, q=state, t=t)
        return ( inviscid_operator(discr, q=state, t=t,boundaries=boundaries, eos=eos)
               + artificial_viscosity(discr,t=t, r=state, eos=eos, boundaries=boundaries,
               alpha=alpha_sc, s0=s0_sc, kappa=kappa_sc))

    def my_checkpoint(step, t, dt, state):

        write_restart = (check_step(step, nrestart)
                         if step != restart_step else False)
        if write_restart is True:
            with open(snapshot_pattern.format(step=step, rank=rank), "wb") as f:
                pickle.dump({
                    "local_mesh": local_mesh,
                    "state": obj_array_vectorize(actx.to_numpy, flatten(state)),
                    "t": t,
                    "step": step,
                    "global_nelements": global_nelements,
                    "num_parts": nparts,
                    }, f)

        return sim_checkpoint(discr=discr, visualizer=visualizer, eos=eos,
                              q=state, vizname=casename,
                              step=step, t=t, dt=dt, nstatus=nstatus,
                              nviz=nviz, exittol=exittol,
                              constant_cfl=constant_cfl, comm=comm, vis_timer=vis_timer,
                              overwrite=True,s0=s0_sc,kappa=kappa_sc)

    if rank == 0:
        logging.info("Stepping.")

    (current_step, current_t, current_state) = \
        advance_state(rhs=my_rhs, timestepper=timestepper,
                      checkpoint=my_checkpoint,
                      get_timestep=get_timestep, state=current_state,
                      t_final=t_final, t=current_t, istep=current_step,
                      logmgr=logmgr,eos=eos,dim=dim)

    if rank == 0:
        logger.info("Checkpointing final state ...")

    my_checkpoint(current_step, t=current_t,
                  dt=(current_t - checkpoint_t),
                  state=current_state)

    if current_t - t_final < 0:
        raise ValueError("Simulation exited abnormally")

    if logmgr:
        logmgr.close()
    elif use_profiling:
        print(actx.tabulate_profiling_data())
Ejemplo n.º 16
0
def main(ctx_factory=cl.create_some_context):
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue,
                                allocator=cl_tools.MemoryPool(
                                    cl_tools.ImmediateAllocator(queue)))

    dim = 1
    nel_1d = 24
    order = 1
    # tolerate large errors; case is unstable
    exittol = .2
    t_final = 0.01
    current_cfl = 1.0
    current_dt = .0001
    current_t = 0
    eos = IdealSingleGas()
    initializer = SodShock1D(dim)
    casename = "sod1d"
    boundaries = {BTAG_ALL: PrescribedBoundary(initializer)}
    constant_cfl = False
    nstatus = 10
    nviz = 10
    rank = 0
    checkpoint_t = current_t
    current_step = 0
    timestepper = rk4_step
    box_ll = -5.0
    box_ur = 5.0

    comm = MPI.COMM_WORLD

    rank = comm.Get_rank()

    from meshmode.mesh.generation import generate_regular_rect_mesh
    generate_grid = partial(generate_regular_rect_mesh,
                            a=(box_ll, ) * dim,
                            b=(box_ur, ) * dim,
                            n=(nel_1d, ) * dim)
    local_mesh, global_nelements = create_parallel_grid(comm, generate_grid)

    local_nelements = local_mesh.nelements

    discr = EagerDGDiscretization(actx,
                                  local_mesh,
                                  order=order,
                                  mpi_communicator=comm)
    nodes = thaw(actx, discr.nodes())
    current_state = initializer(0, nodes)

    visualizer = make_visualizer(
        discr, discr.order + 3 if discr.dim == 2 else discr.order)
    initname = initializer.__class__.__name__
    eosname = eos.__class__.__name__
    init_message = make_init_message(dim=dim,
                                     order=order,
                                     nelements=local_nelements,
                                     global_nelements=global_nelements,
                                     dt=current_dt,
                                     t_final=t_final,
                                     nstatus=nstatus,
                                     nviz=nviz,
                                     cfl=current_cfl,
                                     constant_cfl=constant_cfl,
                                     initname=initname,
                                     eosname=eosname,
                                     casename=casename)
    if rank == 0:
        logger.info(init_message)

    get_timestep = partial(inviscid_sim_timestep,
                           discr=discr,
                           t=current_t,
                           dt=current_dt,
                           cfl=current_cfl,
                           eos=eos,
                           t_final=t_final,
                           constant_cfl=constant_cfl)

    def my_rhs(t, state):
        return inviscid_operator(discr,
                                 q=state,
                                 t=t,
                                 boundaries=boundaries,
                                 eos=eos)

    def my_checkpoint(step, t, dt, state):
        return sim_checkpoint(discr,
                              visualizer,
                              eos,
                              q=state,
                              exact_soln=initializer,
                              vizname=casename,
                              step=step,
                              t=t,
                              dt=dt,
                              nstatus=nstatus,
                              nviz=nviz,
                              exittol=exittol,
                              constant_cfl=constant_cfl,
                              comm=comm)

    try:
        (current_step, current_t, current_state) = \
            advance_state(rhs=my_rhs, timestepper=timestepper,
                          checkpoint=my_checkpoint,
                          get_timestep=get_timestep, state=current_state,
                          t=current_t, t_final=t_final)
    except ExactSolutionMismatch as ex:
        current_step = ex.step
        current_t = ex.t
        current_state = ex.state

    #    if current_t != checkpoint_t:
    if rank == 0:
        logger.info("Checkpointing final state ...")
        my_checkpoint(current_step,
                      t=current_t,
                      dt=(current_t - checkpoint_t),
                      state=current_state)

    if current_t - t_final < 0:
        raise ValueError("Simulation exited abnormally")
Ejemplo n.º 17
0
    def map_int_g(self, expr):
        lpot_source = self.places.get_geometry(expr.source.geometry)
        source_discr = self.places.get_discretization(expr.source.geometry,
                                                      expr.source.discr_stage)
        target_discr = self.places.get_discretization(expr.target.geometry,
                                                      expr.target.discr_stage)

        result = 0
        for kernel, density in zip(expr.source_kernels, expr.densities):
            rec_density = self.rec(density)
            if is_zero(rec_density):
                continue

            assert isinstance(rec_density, np.ndarray)
            if not self.is_kind_matrix(rec_density):
                raise NotImplementedError("layer potentials on non-variables")

            actx = self.array_context
            kernel_args = _get_layer_potential_args(self, expr)

            from sumpy.expansion.local import LineTaylorLocalExpansion
            local_expn = LineTaylorLocalExpansion(
                expr.target_kernel.get_base_kernel(), lpot_source.qbx_order)

            from sumpy.qbx import LayerPotentialMatrixGenerator
            mat_gen = LayerPotentialMatrixGenerator(
                actx.context,
                expansion=local_expn,
                source_kernels=(kernel, ),
                target_kernels=(expr.target_kernel, ))

            assert abs(expr.qbx_forced_limit) > 0
            from pytential import bind, sym
            radii = bind(
                self.places,
                sym.expansion_radii(source_discr.ambient_dim,
                                    dofdesc=expr.target))(actx)
            centers = bind(
                self.places,
                sym.expansion_centers(source_discr.ambient_dim,
                                      expr.qbx_forced_limit,
                                      dofdesc=expr.target))(actx)

            from meshmode.dof_array import flatten, thaw
            _, (mat, ) = mat_gen(
                actx.queue,
                targets=flatten(thaw(actx, target_discr.nodes())),
                sources=flatten(thaw(actx, source_discr.nodes())),
                centers=flatten(centers),
                expansion_radii=flatten(radii),
                **kernel_args)
            mat = actx.to_numpy(mat)

            waa = bind(
                self.places,
                sym.weights_and_area_elements(source_discr.ambient_dim,
                                              dofdesc=expr.source))(actx)
            mat[:, :] *= actx.to_numpy(flatten(waa))
            mat = mat.dot(rec_density)

            result += mat

        return result
Ejemplo n.º 18
0
def main(ctx_factory=cl.create_some_context,
         snapshot_pattern="y0euler-{step:06d}-{rank:04d}.pkl",
         restart_step=None,
         use_profiling=False,
         use_logmgr=False):
    """Drive the Y0 example."""

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = 0
    rank = comm.Get_rank()
    nparts = comm.Get_size()
    """logging and profiling"""
    logmgr = initialize_logmgr(use_logmgr,
                               use_profiling,
                               filename="y0euler.sqlite",
                               mode="wu",
                               mpi_comm=comm)

    cl_ctx = ctx_factory()
    if use_profiling:
        queue = cl.CommandQueue(
            cl_ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
        actx = PyOpenCLProfilingArrayContext(
            queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)),
            logmgr=logmgr)
    else:
        queue = cl.CommandQueue(cl_ctx)
        actx = PyOpenCLArrayContext(queue,
                                    allocator=cl_tools.MemoryPool(
                                        cl_tools.ImmediateAllocator(queue)))

    #nviz = 500
    #nrestart = 500
    nviz = 50
    nrestart = 10000000
    current_dt = 2.5e-7
    #t_final = 5.e-7
    t_final = 3e-4

    dim = 2
    order = 1
    exittol = 10000000  # do never exit when comparing to exact solution
    #t_final = 0.001
    current_cfl = 1.0
    vel_init = np.zeros(shape=(dim, ))
    vel_inflow = np.zeros(shape=(dim, ))
    vel_outflow = np.zeros(shape=(dim, ))
    orig = np.zeros(shape=(dim, ))
    #vel[0] = 340.0
    #vel_inflow[0] = 100.0  # m/s
    current_t = 0
    casename = "y0euler"
    constant_cfl = False
    # no internal euler status messages
    nstatus = 1000000000
    checkpoint_t = current_t
    current_step = 0

    # working gas: CO2 #
    #   gamma = 1.289
    #   MW=44.009  g/mol
    #   cp = 37.135 J/mol-K,
    #   rho= 1.977 kg/m^3 @298K
    gamma_CO2 = 1.289
    R_CO2 = 8314.59 / 44.009

    # background
    #   100 Pa
    #   298 K
    #   rho = 1.77619667e-3 kg/m^3
    #   velocity = 0,0,0
    rho_bkrnd = 1.77619667e-3
    pres_bkrnd = 100
    temp_bkrnd = 298
    c_bkrnd = math.sqrt(gamma_CO2 * pres_bkrnd / rho_bkrnd)

    # isentropic shock relations #
    # lab frame, moving shock
    # state 1 is behind (downstream) the shock, state 2 is in front (upstream) of the shock

    mach = 2.0
    pressure_ratio = (2. * gamma_CO2 * mach * mach -
                      (gamma_CO2 - 1.)) / (gamma_CO2 + 1.)
    density_ratio = (gamma_CO2 + 1.) * mach * mach / (
        (gamma_CO2 - 1.) * mach * mach + 2.)
    mach2 = math.sqrt(((gamma_CO2 - 1.) * mach * mach + 2.) /
                      (2. * gamma_CO2 * mach * mach - (gamma_CO2 - 1.)))

    rho1 = rho_bkrnd
    pressure1 = pres_bkrnd
    rho2 = rho1 * density_ratio
    pressure2 = pressure1 * pressure_ratio
    velocity1 = 0.
    velocity2 = -mach * c_bkrnd * (1 / density_ratio - 1)
    c_shkd = math.sqrt(gamma_CO2 * pressure2 / rho2)

    vel_inflow[0] = velocity2

    timestepper = rk4_step
    eos = IdealSingleGas(gamma=gamma_CO2, gas_const=R_CO2)
    bulk_init = Discontinuity(dim=dim,
                              x0=.05,
                              sigma=0.01,
                              rhol=rho2,
                              rhor=rho1,
                              pl=pressure2,
                              pr=pressure1,
                              ul=vel_inflow[0],
                              ur=0.)
    inflow_init = Lump(dim=dim,
                       rho0=rho2,
                       p0=pressure2,
                       center=orig,
                       velocity=vel_inflow,
                       rhoamp=0.0)
    outflow_init = Lump(dim=dim,
                        rho0=rho1,
                        p0=pressure1,
                        center=orig,
                        velocity=vel_outflow,
                        rhoamp=0.0)

    inflow = PrescribedBoundary(inflow_init)
    outflow = PrescribedBoundary(outflow_init)
    wall = AdiabaticSlipBoundary()
    dummy = DummyBoundary()

    # shock capturing parameters
    # sonic conditions
    density_ratio = (gamma_CO2 + 1.) * 1.0 / ((gamma_CO2 - 1.) + 2.)

    density_star = rho1 * density_ratio
    shock_thickness = 20 * 0.001  # on the order of 3 elements, should match what is in mesh generator
    # alpha is ~h/p (spacing/order)
    #alpha_sc = shock_thickness*abs(velocity1-velocity2)*density_star
    alpha_sc = 0.05
    # sigma is ~p^-4
    sigma_sc = -11.0
    # kappa is empirical ...
    kappa_sc = 0.5
    print(
        f"Shock capturing parameters: alpha {alpha_sc}, s0 {sigma_sc}, kappa {kappa_sc}"
    )

    # timestep estimate
    wave_speed = max(mach2 * c_bkrnd, c_shkd + velocity2)
    char_len = 0.001
    area = char_len * char_len / 2
    perimeter = 2 * char_len + math.sqrt(2 * char_len * char_len)
    h = 2 * area / perimeter

    dt_est = 1 / (wave_speed * order * order / h)
    print(f"Time step estimate {dt_est}\n")

    dt_est_visc = 1 / (wave_speed * order * order / h +
                       alpha_sc * order * order * order * order / h / h)
    print(f"Viscous timestep estimate {dt_est_visc}\n")

    from grudge import sym
    #    boundaries = {BTAG_ALL: DummyBoundary}
    boundaries = {
        sym.DTAG_BOUNDARY("Inflow"): inflow,
        sym.DTAG_BOUNDARY("Outflow"): outflow,
        sym.DTAG_BOUNDARY("Wall"): wall
    }

    #local_mesh, global_nelements = create_parallel_grid(comm,
    #get_pseudo_y0_mesh)
    #
    #local_nelements = local_mesh.nelements

    if restart_step is None:
        local_mesh, global_nelements = create_parallel_grid(comm, get_mesh)
        local_nelements = local_mesh.nelements

    else:  # Restart
        with open(snapshot_pattern.format(step=restart_step, rank=rank),
                  "rb") as f:
            restart_data = pickle.load(f)

        local_mesh = restart_data["local_mesh"]
        local_nelements = local_mesh.nelements
        global_nelements = restart_data["global_nelements"]

        assert comm.Get_size() == restart_data["num_parts"]

    if rank == 0:
        logging.info("Making discretization")
    discr = EagerDGDiscretization(actx,
                                  local_mesh,
                                  order=order,
                                  mpi_communicator=comm)
    nodes = thaw(actx, discr.nodes())

    if restart_step is None:
        if rank == 0:
            logging.info("Initializing soln.")
        current_state = bulk_init(0, nodes, eos=eos)
    else:
        current_t = restart_data["t"]
        current_step = restart_step

        current_state = unflatten(
            actx, discr.discr_from_dd("vol"),
            obj_array_vectorize(actx.from_numpy, restart_data["state"]))

    vis_timer = None

    if logmgr:
        logmgr_add_device_name(logmgr, queue)
        logmgr_add_discretization_quantities(logmgr, discr, eos, dim)
        #logmgr_add_package_versions(logmgr)

        logmgr.add_watches([
            "step.max", "t_sim.max", "t_step.max", "min_pressure",
            "max_pressure", "min_temperature", "max_temperature"
        ])

        try:
            logmgr.add_watches(["memory_usage.max"])
        except KeyError:
            pass

        if use_profiling:
            logmgr.add_watches(["pyopencl_array_time.max"])

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

    #visualizer = make_visualizer(discr, discr.order + 3
    #if discr.dim == 2 else discr.order)
    visualizer = make_visualizer(discr, discr.order)

    #    initname = initializer.__class__.__name__
    initname = "pseudoY0"
    eosname = eos.__class__.__name__
    init_message = make_init_message(dim=dim,
                                     order=order,
                                     nelements=local_nelements,
                                     global_nelements=global_nelements,
                                     dt=current_dt,
                                     t_final=t_final,
                                     nstatus=nstatus,
                                     nviz=nviz,
                                     cfl=current_cfl,
                                     constant_cfl=constant_cfl,
                                     initname=initname,
                                     eosname=eosname,
                                     casename=casename)
    if rank == 0:
        logger.info(init_message)

    get_timestep = partial(inviscid_sim_timestep,
                           discr=discr,
                           t=current_t,
                           dt=current_dt,
                           cfl=current_cfl,
                           eos=eos,
                           t_final=t_final,
                           constant_cfl=constant_cfl)

    def my_rhs(t, state):
        #return inviscid_operator(discr, eos=eos, boundaries=boundaries, q=state, t=t)
        return (inviscid_operator(
            discr, q=state, t=t, boundaries=boundaries, eos=eos) +
                artificial_viscosity(discr,
                                     t=t,
                                     r=state,
                                     eos=eos,
                                     boundaries=boundaries,
                                     alpha=alpha_sc,
                                     sigma=sigma_sc,
                                     kappa=kappa_sc))

    def my_checkpoint(step, t, dt, state):

        write_restart = (check_step(step, nrestart)
                         if step != restart_step else False)
        if write_restart is True:
            with open(snapshot_pattern.format(step=step, rank=rank),
                      "wb") as f:
                pickle.dump(
                    {
                        "local_mesh": local_mesh,
                        "state": obj_array_vectorize(actx.to_numpy,
                                                     flatten(state)),
                        "t": t,
                        "step": step,
                        "global_nelements": global_nelements,
                        "num_parts": nparts,
                    }, f)

        #x0=f(time)
        exact_soln = Discontinuity(dim=dim,
                                   x0=.05,
                                   sigma=0.00001,
                                   rhol=rho2,
                                   rhor=rho1,
                                   pl=pressure2,
                                   pr=pressure1,
                                   ul=vel_inflow[0],
                                   ur=0.,
                                   uc=mach * c_bkrnd)

        return sim_checkpoint(discr=discr,
                              visualizer=visualizer,
                              eos=eos,
                              q=state,
                              vizname=casename,
                              step=step,
                              t=t,
                              dt=dt,
                              nstatus=nstatus,
                              nviz=nviz,
                              exittol=exittol,
                              constant_cfl=constant_cfl,
                              comm=comm,
                              vis_timer=vis_timer,
                              overwrite=True,
                              exact_soln=exact_soln,
                              sigma=sigma_sc,
                              kappa=kappa_sc)

    if rank == 0:
        logging.info("Stepping.")

    (current_step, current_t, current_state) = \
        advance_state(rhs=my_rhs, timestepper=timestepper,
                      checkpoint=my_checkpoint,
                      get_timestep=get_timestep, state=current_state,
                      t_final=t_final, t=current_t, istep=current_step,
                      logmgr=logmgr,eos=eos,dim=dim)

    if rank == 0:
        logger.info("Checkpointing final state ...")

    my_checkpoint(current_step,
                  t=current_t,
                  dt=(current_t - checkpoint_t),
                  state=current_state)

    if current_t - t_final < 0:
        raise ValueError("Simulation exited abnormally")

    if logmgr:
        logmgr.close()
    elif use_profiling:
        print(actx.tabulate_profiling_data())
Ejemplo n.º 19
0
 def discr_and_nodes(stage):
     density_discr = places.get_discretization(where.geometry, stage)
     return density_discr, np.array([
             actx.to_numpy(flatten(axis))
             for axis in thaw(actx, density_discr.nodes())])
Ejemplo n.º 20
0
def test_chained_to_direct(actx_factory,
                           ndim,
                           chain_type,
                           nelements=128,
                           visualize=False):
    import time
    from meshmode.discretization.connection.chained import \
        flatten_chained_connection

    actx = actx_factory()

    discr = create_discretization(actx, ndim, nelements=nelements)
    connections = []
    if chain_type == 1:
        conn = create_refined_connection(actx, discr)
        connections.append(conn)
        conn = create_refined_connection(actx, conn.to_discr)
        connections.append(conn)
    elif chain_type == 2:
        conn = create_refined_connection(actx, discr)
        connections.append(conn)
        conn = create_refined_connection(actx, conn.to_discr)
        connections.append(conn)
        conn = create_refined_connection(actx, conn.to_discr)
        connections.append(conn)
    elif chain_type == 3 and ndim == 3:
        conn = create_refined_connection(actx, discr, threshold=np.inf)
        connections.append(conn)
        conn = create_face_connection(actx, conn.to_discr)
        connections.append(conn)
    else:
        raise ValueError("unknown test case")

    from meshmode.discretization.connection import \
            ChainedDiscretizationConnection
    chained = ChainedDiscretizationConnection(connections)

    t_start = time.time()
    direct = flatten_chained_connection(actx, chained)
    t_end = time.time()
    if visualize:
        print("[TIME] Flatten: {:.5e}".format(t_end - t_start))

    if chain_type < 3:
        to_element_indices = np.full(direct.to_discr.mesh.nelements,
                                     0,
                                     dtype=np.int)
        for grp in direct.groups:
            for batch in grp.batches:
                for i in batch.to_element_indices.get(actx.queue):
                    to_element_indices[i] += 1
        assert np.min(to_element_indices) > 0

    def f(x):
        from functools import reduce
        return 0.1 * reduce(lambda x, y: x * actx.np.sin(5 * y), x)

    x = thaw(actx, connections[0].from_discr.nodes())
    fx = f(x)

    t_start = time.time()
    f1 = actx.to_numpy(flatten(direct(fx)))
    t_end = time.time()
    if visualize:
        print("[TIME] Direct: {:.5e}".format(t_end - t_start))

    t_start = time.time()
    f2 = actx.to_numpy(flatten(chained(fx)))
    t_end = time.time()
    if visualize:
        print("[TIME] Chained: {:.5e}".format(t_end - t_start))

    if visualize and ndim == 2:
        import matplotlib.pyplot as pt

        pt.figure(figsize=(10, 8), dpi=300)
        pt.plot(f1, label="Direct")
        pt.plot(f2, label="Chained")
        pt.ylim([np.min(f2) - 0.1, np.max(f2) + 0.1])
        pt.legend()
        pt.savefig("test_chained_to_direct.png")
        pt.clf()

    assert np.allclose(f1, f2)
Ejemplo n.º 21
0
    def __call__(self, actx, source_dd, indices, **kwargs):
        """Generate proxy points for each given range of source points in
        the discretization in *source_dd*.

        :arg actx: a :class:`~meshmode.array_context.ArrayContext`.
        :arg source_dd: a :class:`~pytential.symbolic.primitives.DOFDescriptor`
            for the discretization on which the proxy points are to be
            generated.
        :arg indices: a :class:`sumpy.tools.BlockIndexRanges`.

        :return: a tuple of ``(proxies, pxyranges, pxycenters, pxyranges)``,
            where each element is a :class:`pyopencl.array.Array`. The
            sizes of the arrays are as follows: ``pxycenters`` is of size
            ``(2, nranges)``, ``pxyradii`` is of size ``(nranges,)``,
            ``pxyranges`` is of size ``(nranges + 1,)`` and ``proxies`` is
            of size ``(2, nranges * nproxy)``. The proxy points in a range
            :math:`i` can be obtained by a slice
            ``proxies[pxyranges[i]:pxyranges[i + 1]]`` and are all at a
            distance ``pxyradii[i]`` from the range center ``pxycenters[i]``.
        """
        def _affine_map(v, A, b):
            return np.dot(A, v) + b

        from pytential import bind, sym
        source_dd = sym.as_dofdesc(source_dd)
        discr = self.places.get_discretization(source_dd.geometry,
                                               source_dd.discr_stage)

        radii = bind(self.places,
                     sym.expansion_radii(self.ambient_dim,
                                         dofdesc=source_dd))(actx)
        center_int = bind(
            self.places,
            sym.expansion_centers(self.ambient_dim, -1,
                                  dofdesc=source_dd))(actx)
        center_ext = bind(
            self.places,
            sym.expansion_centers(self.ambient_dim, +1,
                                  dofdesc=source_dd))(actx)

        from meshmode.dof_array import flatten, thaw
        knl = self.get_kernel()
        _, (
            centers_dev,
            radii_dev,
        ) = knl(actx.queue,
                sources=flatten(thaw(actx, discr.nodes())),
                center_int=flatten(center_int),
                center_ext=flatten(center_ext),
                expansion_radii=flatten(radii),
                srcindices=indices.indices,
                srcranges=indices.ranges,
                **kwargs)

        from pytential.utils import flatten_to_numpy
        centers = flatten_to_numpy(actx, centers_dev)
        radii = flatten_to_numpy(actx, radii_dev)
        proxies = np.empty(indices.nblocks, dtype=np.object)
        for i in range(indices.nblocks):
            proxies[i] = _affine_map(self.ref_points,
                                     A=(radii[i] * np.eye(self.ambient_dim)),
                                     b=centers[:, i].reshape(-1, 1))

        pxyranges = actx.from_numpy(
            np.arange(0,
                      proxies.shape[0] * proxies[0].shape[1] + 1,
                      proxies[0].shape[1],
                      dtype=indices.ranges.dtype))
        proxies = make_obj_array([
            actx.freeze(actx.from_numpy(np.hstack([p[idim] for p in proxies])))
            for idim in range(self.ambient_dim)
        ])
        centers = make_obj_array([
            actx.freeze(centers_dev[idim]) for idim in range(self.ambient_dim)
        ])

        assert pxyranges[-1] == proxies[0].shape[0]
        return proxies, actx.freeze(pxyranges), centers, actx.freeze(radii_dev)
Ejemplo n.º 22
0
def main():
    import logging
    logging.basicConfig(level=logging.WARNING)  # INFO for more progress info

    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    target_order = 16
    qbx_order = 3
    nelements = 60
    mode_nr = 0

    k = 0
    if k:
        kernel = HelmholtzKernel(2)
    else:
        kernel = LaplaceKernel(2)

    mesh = make_curve_mesh(
        #lambda t: ellipse(1, t),
        starfish,
        np.linspace(0, 1, nelements + 1),
        target_order)

    from pytential.qbx import QBXLayerPotentialSource
    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory

    pre_density_discr = Discretization(
        actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order))

    unaccel_qbx = QBXLayerPotentialSource(
        pre_density_discr,
        fine_order=2 * target_order,
        qbx_order=qbx_order,
        fmm_order=False,
        target_association_tolerance=.05,
    )

    from pytential.target import PointsTarget
    fplot = FieldPlotter(np.zeros(2), extent=5, npoints=600)

    from pytential import GeometryCollection
    places = GeometryCollection({
        "unaccel_qbx": unaccel_qbx,
        "qbx": unaccel_qbx.copy(fmm_order=10),
        "targets": PointsTarget(fplot.points)
    })
    density_discr = places.get_discretization("unaccel_qbx")

    nodes = thaw(actx, density_discr.nodes())
    angle = actx.np.arctan2(nodes[1], nodes[0])

    from pytential import bind, sym
    if k:
        kernel_kwargs = {"k": sym.var("k")}
    else:
        kernel_kwargs = {}

    def get_op():
        kwargs = dict(qbx_forced_limit=None)
        kwargs.update(kernel_kwargs)
        # return sym.d_dx(2, sym.S(kernel, sym.var("sigma"), **kwargs))
        # return sym.D(kernel, sym.var("sigma"), **kwargs)
        return sym.S(kernel, sym.var("sigma"), **kwargs)

    op = get_op()

    sigma = actx.np.cos(mode_nr * angle)

    if isinstance(kernel, HelmholtzKernel):
        for i, elem in np.ndenumerate(sigma):
            sigma[i] = elem.astype(np.complex128)

    fld_in_vol = bind(places, op,
                      auto_where=("unaccel_qbx", "targets"))(actx,
                                                             sigma=sigma,
                                                             k=k).get()

    fmm_fld_in_vol = bind(places, op,
                          auto_where=("qbx", "targets"))(actx,
                                                         sigma=sigma,
                                                         k=k).get()

    err = fmm_fld_in_vol - fld_in_vol

    try:
        import matplotlib
    except ImportError:
        return

    matplotlib.use("Agg")
    im = fplot.show_scalar_in_matplotlib(np.log10(np.abs(err) + 1e-17))

    from matplotlib.colors import Normalize
    im.set_norm(Normalize(vmin=-12, vmax=0))

    import matplotlib.pyplot as pt
    from matplotlib.ticker import NullFormatter
    pt.gca().xaxis.set_major_formatter(NullFormatter())
    pt.gca().yaxis.set_major_formatter(NullFormatter())

    cb = pt.colorbar(shrink=0.9)
    cb.set_label(r"$\log_{10}(\mathrm{Error})$")

    pt.savefig("fmm-error-order-%d.pdf" % qbx_order)
Ejemplo n.º 23
0
def test_slipwall_flux(actx_factory, dim, order):
    """Check for zero boundary flux.

    Check for vanishing flux across the slipwall.
    """
    actx = actx_factory()

    wall = AdiabaticSlipBoundary()
    eos = IdealSingleGas()

    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    for np1 in [4, 8, 12]:
        from meshmode.mesh.generation import generate_regular_rect_mesh

        mesh = generate_regular_rect_mesh(
            a=(-0.5,) * dim, b=(0.5,) * dim, n=(np1,) * dim
        )

        discr = EagerDGDiscretization(actx, mesh, order=order)
        nodes = thaw(actx, discr.nodes())
        nhat = thaw(actx, discr.normal(BTAG_ALL))
        h = 1.0 / np1

        from functools import partial
        bnd_norm = partial(discr.norm, p=np.inf, dd=BTAG_ALL)

        logger.info(f"Number of {dim}d elems: {mesh.nelements}")
        # for velocities in each direction
        err_max = 0.0
        for vdir in range(dim):
            vel = np.zeros(shape=(dim,))

            # for velocity directions +1, and -1
            for parity in [1.0, -1.0]:
                vel[vdir] = parity
                from mirgecom.initializers import Uniform
                initializer = Uniform(numdim=dim, velocity=vel)
                uniform_state = initializer(0, nodes)
                bnd_pair = wall.boundary_pair(discr, uniform_state, t=0.0,
                                              btag=BTAG_ALL, eos=eos)

                # Check the total velocity component normal
                # to each surface.  It should be zero.  The
                # numerical fluxes cannot be zero.
                avg_state = 0.5*(bnd_pair.int + bnd_pair.ext)
                acv = split_conserved(dim, avg_state)
                err_max = max(err_max, bnd_norm(np.dot(acv.momentum, nhat)))

                from mirgecom.euler import _facial_flux
                bnd_flux = split_conserved(dim, _facial_flux(discr, eos,
                                                             bnd_pair, local=True))
                err_max = max(err_max, bnd_norm(bnd_flux.mass),
                              bnd_norm(bnd_flux.energy))

        eoc.add_data_point(h, err_max)

    message = (f"EOC:\n{eoc}")
    logger.info(message)
    assert (
        eoc.order_estimate() >= order - 0.5
        or eoc.max_error() < 1e-12
    )
Ejemplo n.º 24
0
def main(mesh_name="ellipsoid"):
    import logging
    logger = logging.getLogger(__name__)
    logging.basicConfig(level=logging.WARNING)  # INFO for more progress info

    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    if mesh_name == "ellipsoid":
        cad_file_name = "geometries/ellipsoid.step"
        h = 0.6
    elif mesh_name == "two-cylinders":
        cad_file_name = "geometries/two-cylinders-smooth.step"
        h = 0.4
    else:
        raise ValueError("unknown mesh name: %s" % mesh_name)

    from meshmode.mesh.io import generate_gmsh, FileSource
    mesh = generate_gmsh(
        FileSource(cad_file_name),
        2,
        order=2,
        other_options=["-string",
                       "Mesh.CharacteristicLengthMax = %g;" % h],
        target_unit="MM")

    from meshmode.mesh.processing import perform_flips
    # Flip elements--gmsh generates inside-out geometry.
    mesh = perform_flips(mesh, np.ones(mesh.nelements))

    from meshmode.mesh.processing import find_bounding_box
    bbox_min, bbox_max = find_bounding_box(mesh)
    bbox_center = 0.5 * (bbox_min + bbox_max)
    bbox_size = max(bbox_max - bbox_min) / 2

    logger.info("%d elements" % mesh.nelements)

    from pytential.qbx import QBXLayerPotentialSource
    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory

    density_discr = Discretization(
        actx, mesh, InterpolatoryQuadratureSimplexGroupFactory(target_order))

    qbx = QBXLayerPotentialSource(density_discr,
                                  4 * target_order,
                                  qbx_order,
                                  fmm_order=qbx_order + 3,
                                  target_association_tolerance=0.15)

    from pytential.target import PointsTarget
    fplot = FieldPlotter(bbox_center, extent=3.5 * bbox_size, npoints=150)

    from pytential import GeometryCollection
    places = GeometryCollection(
        {
            "qbx": qbx,
            "targets": PointsTarget(fplot.points)
        }, auto_where="qbx")
    density_discr = places.get_discretization("qbx")

    nodes = thaw(actx, density_discr.nodes())
    angle = actx.np.arctan2(nodes[1], nodes[0])

    if k:
        kernel = HelmholtzKernel(3)
    else:
        kernel = LaplaceKernel(3)

    #op = sym.d_dx(sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None))
    op = sym.D(kernel, sym.var("sigma"), qbx_forced_limit=None)
    #op = sym.S(kernel, sym.var("sigma"), qbx_forced_limit=None)

    sigma = actx.np.cos(mode_nr * angle)
    if 0:
        from meshmode.dof_array import flatten, unflatten
        sigma = flatten(0 * angle)
        from random import randrange
        for i in range(5):
            sigma[randrange(len(sigma))] = 1
        sigma = unflatten(actx, density_discr, sigma)

    if isinstance(kernel, HelmholtzKernel):
        for i, elem in np.ndenumerate(sigma):
            sigma[i] = elem.astype(np.complex128)

    fld_in_vol = actx.to_numpy(
        bind(places, op, auto_where=("qbx", "targets"))(actx, sigma=sigma,
                                                        k=k))

    #fplot.show_scalar_in_mayavi(fld_in_vol.real, max_val=5)
    fplot.write_vtk_file("layerpot-3d-potential.vts",
                         [("potential", fld_in_vol)])

    bdry_normals = bind(places, sym.normal(
        density_discr.ambient_dim))(actx).as_vector(dtype=object)

    from meshmode.discretization.visualization import make_visualizer
    bdry_vis = make_visualizer(actx, density_discr, target_order)
    bdry_vis.write_vtk_file("layerpot-3d-density.vtu", [
        ("sigma", sigma),
        ("bdry_normals", bdry_normals),
    ])
Ejemplo n.º 25
0
 def map_q_weight(self, expr):
     discr = self.places.get_discretization(expr.dofdesc.geometry,
                                            expr.dofdesc.discr_stage)
     return thaw(self.array_context, discr.quad_weights())
Ejemplo n.º 26
0
def eval_func_on_discr_nodes(queue, discr, func):
    arr_ctx = PyOpenCLArrayContext(queue)
    nodes = np.vstack([c.get() for c in flatten(thaw(arr_ctx, discr.nodes()))])
    fvals = func(nodes)
    return cl.array.to_device(queue, fvals)
Ejemplo n.º 27
0
def central_flux_interior(actx, discr, int_tpair):
    """Compute a central flux for interior faces."""
    normal = thaw(actx, discr.normal(int_tpair.dd))
    flux_weak = gradient_flux_central(int_tpair, normal)
    dd_all_faces = int_tpair.dd.with_dtag("all_faces")
    return discr.project(int_tpair.dd, dd_all_faces, flux_weak)
Ejemplo n.º 28
0
def main(ctx_factory=cl.create_some_context, use_logmgr=True,
         use_leap=False, use_profiling=False, casename=None,
         rst_filename=None, actx_class=PyOpenCLArrayContext):
    """Drive example."""
    cl_ctx = ctx_factory()

    if casename is None:
        casename = "mirgecom"

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nparts = comm.Get_size()

    logmgr = initialize_logmgr(use_logmgr,
        filename=f"{casename}.sqlite", mode="wu", mpi_comm=comm)

    if use_profiling:
        queue = cl.CommandQueue(cl_ctx,
            properties=cl.command_queue_properties.PROFILING_ENABLE)
    else:
        queue = cl.CommandQueue(cl_ctx)

    actx = actx_class(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))

    # timestepping control
    if use_leap:
        from leap.rk import RK4MethodBuilder
        timestepper = RK4MethodBuilder("state")
    else:
        timestepper = rk4_step
    t_final = 0.01
    current_cfl = 1.0
    current_dt = .001
    current_t = 0
    current_step = 0
    constant_cfl = False

    # some i/o frequencies
    nstatus = 1
    nhealth = 1
    nrestart = 10
    nviz = 1

    dim = 2

    rst_path = "restart_data/"
    rst_pattern = (
        rst_path + "{cname}-{step:04d}-{rank:04d}.pkl"
    )
    if rst_filename:  # read the grid from restart data
        rst_filename = f"{rst_filename}-{rank:04d}.pkl"

        from mirgecom.restart import read_restart_data
        restart_data = read_restart_data(actx, rst_filename)
        local_mesh = restart_data["local_mesh"]
        local_nelements = local_mesh.nelements
        global_nelements = restart_data["global_nelements"]
        assert restart_data["nparts"] == nparts
    else:  # generate the grid from scratch
        box_ll = -5.0
        box_ur = 5.0
        nel_1d = 16
        from meshmode.mesh.generation import generate_regular_rect_mesh
        generate_mesh = partial(generate_regular_rect_mesh, a=(box_ll,)*dim,
                                b=(box_ur,) * dim, nelements_per_axis=(nel_1d,)*dim)
        local_mesh, global_nelements = generate_and_distribute_mesh(comm,
                                                                    generate_mesh)
        local_nelements = local_mesh.nelements

    order = 3
    discr = EagerDGDiscretization(
        actx, local_mesh, order=order, mpi_communicator=comm
    )
    nodes = thaw(actx, discr.nodes())

    vis_timer = None

    if logmgr:
        logmgr_add_device_name(logmgr, queue)
        logmgr_add_device_memory_usage(logmgr, queue)
        logmgr_add_many_discretization_quantities(logmgr, discr, dim,
                             extract_vars_for_logging, units_for_logging)

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

        logmgr.add_watches([
            ("step.max", "step = {value}, "),
            ("t_sim.max", "sim time: {value:1.6e} s\n"),
            ("min_pressure", "------- P (min, max) (Pa) = ({value:1.9e}, "),
            ("max_pressure",    "{value:1.9e})\n"),
            ("t_step.max", "------- step walltime: {value:6g} s, "),
            ("t_log.max", "log walltime: {value:6g} s")
        ])

    # soln setup, init
    eos = IdealSingleGas()
    vel = np.zeros(shape=(dim,))
    orig = np.zeros(shape=(dim,))
    vel[:dim] = 1.0
    initializer = Lump(dim=dim, center=orig, velocity=vel)
    boundaries = {
        BTAG_ALL: PrescribedInviscidBoundary(fluid_solution_func=initializer)
    }

    if rst_filename:
        current_t = restart_data["t"]
        current_step = restart_data["step"]
        current_state = restart_data["state"]
        if logmgr:
            from mirgecom.logging_quantities import logmgr_set_time
            logmgr_set_time(logmgr, current_step, current_t)
    else:
        # Set the current state from time 0
        current_state = initializer(nodes)

    visualizer = make_visualizer(discr)
    initname = initializer.__class__.__name__
    eosname = eos.__class__.__name__
    init_message = make_init_message(dim=dim, order=order,
                                     nelements=local_nelements,
                                     global_nelements=global_nelements,
                                     dt=current_dt, t_final=t_final, nstatus=nstatus,
                                     nviz=nviz, cfl=current_cfl,
                                     constant_cfl=constant_cfl, initname=initname,
                                     eosname=eosname, casename=casename)
    if rank == 0:
        logger.info(init_message)

    def my_write_viz(step, t, state, dv=None, exact=None, resid=None):
        if dv is None:
            dv = eos.dependent_vars(state)
        if exact is None:
            exact = initializer(x_vec=nodes, eos=eos, time=t)
        if resid is None:
            resid = state - exact
        viz_fields = [("cv", state),
                      ("dv", dv),
                      ("exact", exact),
                      ("residual", resid)]
        from mirgecom.simutil import write_visfile
        write_visfile(discr, viz_fields, visualizer, vizname=casename,
                      step=step, t=t, overwrite=True, vis_timer=vis_timer)

    def my_write_restart(state, step, t):
        rst_fname = rst_pattern.format(cname=casename, step=step, rank=rank)
        if rst_fname != rst_filename:
            rst_data = {
                "local_mesh": local_mesh,
                "state": state,
                "t": t,
                "step": step,
                "order": order,
                "global_nelements": global_nelements,
                "num_parts": nparts
            }
            from mirgecom.restart import write_restart_file
            write_restart_file(actx, rst_data, rst_fname, comm)

    def my_health_check(dv, state, exact):
        health_error = False
        from mirgecom.simutil import check_naninf_local, check_range_local
        if check_naninf_local(discr, "vol", dv.pressure) \
           or check_range_local(discr, "vol", dv.pressure, .9999999999, 1.00000001):
            health_error = True
            logger.info(f"{rank=}: Invalid pressure data found.")

        from mirgecom.simutil import compare_fluid_solutions
        component_errors = compare_fluid_solutions(discr, state, exact)
        exittol = .09
        if max(component_errors) > exittol:
            health_error = True
            if rank == 0:
                logger.info("Solution diverged from exact soln.")

        return health_error

    def my_pre_step(step, t, dt, state):
        try:
            dv = None
            exact = None

            if logmgr:
                logmgr.tick_before()

            from mirgecom.simutil import check_step
            do_viz = check_step(step=step, interval=nviz)
            do_restart = check_step(step=step, interval=nrestart)
            do_health = check_step(step=step, interval=nhealth)
            do_status = check_step(step=step, interval=nstatus)

            if do_health:
                dv = eos.dependent_vars(state)
                exact = initializer(x_vec=nodes, eos=eos, time=t)
                from mirgecom.simutil import allsync
                health_errors = allsync(
                    my_health_check(dv=dv, state=state, exact=exact),
                    comm, op=MPI.LOR
                )
                if health_errors:
                    if rank == 0:
                        logger.info("Fluid solution failed health check.")
                    raise MyRuntimeError("Failed solution health check.")

            if do_restart:
                my_write_restart(step=step, t=t, state=state)

            if do_viz:
                if dv is None:
                    dv = eos.dependent_vars(state)
                if exact is None:
                    exact = initializer(x_vec=nodes, eos=eos, time=t)
                resid = state - exact
                my_write_viz(step=step, t=t, dv=dv, state=state, exact=exact,
                             resid=resid)

            if do_status:
                if exact is None:
                    exact = initializer(x_vec=nodes, eos=eos, time=t)
                from mirgecom.simutil import compare_fluid_solutions
                component_errors = compare_fluid_solutions(discr, state, exact)
                status_msg = (
                    "------- errors="
                    + ", ".join("%.3g" % en for en in component_errors))
                if rank == 0:
                    logger.info(status_msg)

        except MyRuntimeError:
            if rank == 0:
                logger.info("Errors detected; attempting graceful exit.")
            my_write_viz(step=step, t=t, state=state)
            my_write_restart(step=step, t=t, state=state)
            raise

        dt = get_sim_timestep(discr, state, t, dt, current_cfl, eos, t_final,
                              constant_cfl)
        return state, dt

    def my_post_step(step, t, dt, state):
        # Logmgr needs to know about EOS, dt, dim?
        # imo this is a design/scope flaw
        if logmgr:
            set_dt(logmgr, dt)
            set_sim_state(logmgr, dim, state, eos)
            logmgr.tick_after()
        return state, dt

    def my_rhs(t, state):
        return euler_operator(discr, cv=state, time=t,
                              boundaries=boundaries, eos=eos)

    current_dt = get_sim_timestep(discr, current_state, current_t, current_dt,
                                  current_cfl, eos, t_final, constant_cfl)

    current_step, current_t, current_state = \
        advance_state(rhs=my_rhs, timestepper=timestepper,
                      pre_step_callback=my_pre_step,
                      post_step_callback=my_post_step, dt=current_dt,
                      state=current_state, t=current_t, t_final=t_final)

    # Dump the final data
    if rank == 0:
        logger.info("Checkpointing final state ...")

    final_dv = eos.dependent_vars(current_state)
    final_exact = initializer(x_vec=nodes, eos=eos, time=current_t)
    final_resid = current_state - final_exact
    my_write_viz(step=current_step, t=current_t, state=current_state, dv=final_dv,
                 exact=final_exact, resid=final_resid)
    my_write_restart(step=current_step, t=current_t, state=current_state)

    if logmgr:
        logmgr.close()
    elif use_profiling:
        print(actx.tabulate_profiling_data())

    finish_tol = 1e-16
    time_err = current_t - t_final
    if np.abs(time_err) > finish_tol:
        raise ValueError(f"Simulation did not finish at expected time {time_err=}.")
Ejemplo n.º 29
0
        )
        places = GeometryCollection(qbx)

        from pytential.qbx.refinement import refine_geometry_collection
        kernel_length_scale = 5 / case.k if case.k else None
        places = refine_geometry_collection(
            places, kernel_length_scale=kernel_length_scale)

        # {{{ compute values of a solution to the PDE

        density_discr = places.get_discretization(places.auto_source.geometry)

        from meshmode.dof_array import thaw, flatten, unflatten
        nodes_host = [
            actx.to_numpy(axis)
            for axis in flatten(thaw(actx, density_discr.nodes()))
        ]
        normal = bind(places, sym.normal(d))(actx).as_vector(np.object)
        normal_host = [actx.to_numpy(axis) for axis in flatten(normal)]

        if k != 0:
            if d == 2:
                angle = 0.3
                wave_vec = np.array([np.cos(angle), np.sin(angle)])
                u = np.exp(1j * k * np.tensordot(wave_vec, nodes_host, axes=1))
                grad_u = 1j * k * wave_vec[:, np.newaxis] * u
            elif d == 3:
                center = np.array([3, 1, 2])
                diff = nodes_host - center[:, np.newaxis]
                r = la.norm(diff, axis=0)
                u = np.exp(1j * k * r) / r
Ejemplo n.º 30
0
def sim_checkpoint(discr,
                   visualizer,
                   eos,
                   cv,
                   vizname,
                   exact_soln=None,
                   step=0,
                   t=0,
                   dt=0,
                   cfl=1.0,
                   nstatus=-1,
                   nviz=-1,
                   exittol=1e-16,
                   constant_cfl=False,
                   comm=None,
                   viz_fields=None,
                   overwrite=False,
                   vis_timer=None):
    """Check simulation health, status, viz dumps, and restart."""
    do_viz = check_step(step=step, interval=nviz)
    do_status = check_step(step=step, interval=nstatus)
    if do_viz is False and do_status is False:
        return 0

    dependent_vars = eos.dependent_vars(cv)

    rank = 0
    if comm is not None:
        rank = comm.Get_rank()

    maxerr = 0.0
    if exact_soln is not None:
        actx = cv.mass.array_context
        nodes = thaw(actx, discr.nodes())
        expected_state = exact_soln(x_vec=nodes, t=t, eos=eos)
        exp_resid = cv - expected_state
        err_norms = [discr.norm(v, np.inf) for v in exp_resid.join()]
        maxerr = discr.norm(exp_resid.join(), np.inf)

    if do_viz:
        io_fields = [("cv", cv), ("dv", dependent_vars)]
        if exact_soln is not None:
            exact_list = [
                ("exact_soln", expected_state),
            ]
            io_fields.extend(exact_list)
        if viz_fields is not None:
            io_fields.extend(viz_fields)

        from mirgecom.io import make_rank_fname, make_par_fname
        rank_fn = make_rank_fname(basename=vizname, rank=rank, step=step, t=t)

        from contextlib import nullcontext

        if vis_timer:
            ctm = vis_timer.start_sub_timer()
        else:
            ctm = nullcontext()

        with ctm:
            visualizer.write_parallel_vtk_file(
                comm,
                rank_fn,
                io_fields,
                overwrite=overwrite,
                par_manifest_filename=make_par_fname(basename=vizname,
                                                     step=step,
                                                     t=t))

    if do_status is True:
        #        if constant_cfl is False:
        #            current_cfl = get_inviscid_cfl(discr=discr, q=q,
        #                                           eos=eos, dt=dt)
        statusmesg = make_status_message(discr=discr,
                                         t=t,
                                         step=step,
                                         dt=dt,
                                         cfl=cfl,
                                         dependent_vars=dependent_vars)
        if exact_soln is not None:
            statusmesg += ("\n------- errors=" +
                           ", ".join("%.3g" % en for en in err_norms))

        if rank == 0:
            logger.info(statusmesg)

    if maxerr > exittol:
        raise ExactSolutionMismatch(step, t=t, state=cv)