コード例 #1
0
ファイル: test_wave.py プロジェクト: nchristensen/mirgecom
def test_wave_stability(actx_factory,
                        problem,
                        timestep_scale,
                        order,
                        visualize=False):
    """Checks stability of the wave operator for a given problem setup.
    Adjust *timestep_scale* to get timestep close to stability limit.
    """
    actx = actx_factory()

    p = problem

    sym_u, sym_v, sym_f, sym_rhs = sym_wave(p.dim, p.sym_phi)

    mesh = p.mesh_factory(8)

    from grudge.eager import EagerDGDiscretization
    discr = EagerDGDiscretization(actx, mesh, order=order)

    nodes = thaw(actx, discr.nodes())

    def sym_eval(expr, t):
        return sym.EvaluationMapper({"c": p.c, "x": nodes, "t": t})(expr)

    def get_rhs(t, w):
        result = wave_operator(discr, c=p.c, w=w)
        result[0] += sym_eval(sym_f, t)
        return result

    t = 0.

    u = sym_eval(sym_u, t)
    v = sym_eval(sym_v, t)

    fields = flat_obj_array(u, v)

    from mirgecom.integrators import rk4_step
    dt = timestep_scale / order**2
    for istep in range(10):
        fields = rk4_step(fields, t, dt, get_rhs)
        t += dt

    expected_u = sym_eval(sym_u, 10 * dt)
    expected_v = sym_eval(sym_v, 10 * dt)
    expected_fields = flat_obj_array(expected_u, expected_v)

    if visualize:
        from grudge.shortcuts import make_visualizer
        vis = make_visualizer(discr, discr.order)
        vis.write_vtk_file("wave_stability.vtu", [
            ("u", fields[0]),
            ("v", fields[1:]),
            ("u_expected", expected_fields[0]),
            ("v_expected", expected_fields[1:]),
        ])

    err = discr.norm(fields - expected_fields, np.inf)
    max_err = discr.norm(expected_fields, np.inf)

    assert err < max_err
コード例 #2
0
def main(ctx_factory=cl.create_some_context, use_logmgr=True,
         use_leap=False, use_profiling=False, casename=None,
         rst_filename=None, actx_class=PyOpenCLArrayContext):
    """Run the example."""
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    num_parts = comm.Get_size()

    logmgr = initialize_logmgr(use_logmgr,
        filename="heat-source.sqlite", mode="wu", mpi_comm=comm)

    if use_profiling:
        queue = cl.CommandQueue(
            cl_ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
    else:
        queue = cl.CommandQueue(cl_ctx)

    actx = actx_class(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))

    from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
    mesh_dist = MPIMeshDistributor(comm)

    dim = 2
    nel_1d = 16

    t = 0
    t_final = 0.0002
    istep = 0

    if mesh_dist.is_mananger_rank():
        from meshmode.mesh.generation import generate_regular_rect_mesh
        mesh = generate_regular_rect_mesh(
            a=(-0.5,)*dim,
            b=(0.5,)*dim,
            nelements_per_axis=(nel_1d,)*dim,
            boundary_tag_to_face={
                "dirichlet": ["+x", "-x"],
                "neumann": ["+y", "-y"]
                }
            )

        print("%d elements" % mesh.nelements)

        part_per_element = get_partition_by_pymetis(mesh, num_parts)

        local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts)

        del mesh

    else:
        local_mesh = mesh_dist.receive_mesh_part()

    order = 3

    discr = EagerDGDiscretization(actx, local_mesh, order=order,
                    mpi_communicator=comm)

    if dim == 2:
        # no deep meaning here, just a fudge factor
        dt = 0.0025/(nel_1d*order**2)
    else:
        raise ValueError("don't have a stable time step guesstimate")

    source_width = 0.2

    from arraycontext import thaw
    nodes = thaw(discr.nodes(), actx)

    boundaries = {
        DTAG_BOUNDARY("dirichlet"): DirichletDiffusionBoundary(0.),
        DTAG_BOUNDARY("neumann"): NeumannDiffusionBoundary(0.)
    }

    u = discr.zeros(actx)

    if logmgr:
        logmgr_add_device_name(logmgr, queue)
        logmgr_add_device_memory_usage(logmgr, queue)

        logmgr.add_watches(["step.max", "t_step.max", "t_log.max"])

        try:
            logmgr.add_watches(["memory_usage_python.max", "memory_usage_gpu.max"])
        except KeyError:
            pass

        if use_profiling:
            logmgr.add_watches(["multiply_time.max"])

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

    vis = make_visualizer(discr)

    def rhs(t, u):
        return (
            diffusion_operator(
                discr, quad_tag=DISCR_TAG_BASE,
                alpha=1, boundaries=boundaries, u=u)
            + actx.np.exp(-np.dot(nodes, nodes)/source_width**2))

    compiled_rhs = actx.compile(rhs)

    rank = comm.Get_rank()

    while t < t_final:
        if logmgr:
            logmgr.tick_before()

        if istep % 10 == 0:
            print(istep, t, actx.to_numpy(actx.np.linalg.norm(u[0])))
            vis.write_vtk_file("fld-heat-source-mpi-%03d-%04d.vtu" % (rank, istep),
                    [
                        ("u", u)
                        ], overwrite=True)

        u = rk4_step(u, t, dt, compiled_rhs)
        t += dt
        istep += 1

        if logmgr:
            set_dt(logmgr, dt)
            logmgr.tick_after()
    final_answer = discr.norm(u, np.inf)
    resid = abs(final_answer - 0.00020620711665201585)
    if resid > 1e-15:
        raise ValueError(f"Run did not produce the expected result {resid=}")
コード例 #3
0
def main():
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue,
                                allocator=cl_tools.MemoryPool(
                                    cl_tools.ImmediateAllocator(queue)))

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    num_parts = comm.Get_size()

    from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
    mesh_dist = MPIMeshDistributor(comm)

    dim = 2
    nel_1d = 16

    if mesh_dist.is_mananger_rank():
        from meshmode.mesh.generation import generate_regular_rect_mesh
        mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                          b=(0.5, ) * dim,
                                          n=(nel_1d, ) * dim,
                                          boundary_tag_to_face={
                                              "dirichlet": ["+x", "-x"],
                                              "neumann": ["+y", "-y"]
                                          })

        print("%d elements" % mesh.nelements)

        part_per_element = get_partition_by_pymetis(mesh, num_parts)

        local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element,
                                               num_parts)

        del mesh

    else:
        local_mesh = mesh_dist.receive_mesh_part()

    order = 3

    discr = EagerDGDiscretization(actx,
                                  local_mesh,
                                  order=order,
                                  mpi_communicator=comm)

    if dim == 2:
        # no deep meaning here, just a fudge factor
        dt = 0.0025 / (nel_1d * order**2)
    else:
        raise ValueError("don't have a stable time step guesstimate")

    source_width = 0.2

    nodes = thaw(actx, discr.nodes())

    u = discr.zeros(actx)

    vis = make_visualizer(discr, order + 3 if dim == 2 else order)

    boundaries = {
        grudge_sym.DTAG_BOUNDARY("dirichlet"): DirichletDiffusionBoundary(0.),
        grudge_sym.DTAG_BOUNDARY("neumann"): NeumannDiffusionBoundary(0.)
    }

    def rhs(t, u):
        return (
            diffusion_operator(discr, alpha=1, boundaries=boundaries, u=u) +
            actx.np.exp(-np.dot(nodes, nodes) / source_width**2))

    rank = comm.Get_rank()

    t = 0
    t_final = 0.01
    istep = 0

    while True:
        if istep % 10 == 0:
            print(istep, t, discr.norm(u))
            vis.write_vtk_file(
                "fld-heat-source-mpi-%03d-%04d.vtu" % (rank, istep),
                [("u", u)])

        if t >= t_final:
            break

        u = rk4_step(u, t, dt, rhs)
        t += dt
        istep += 1
コード例 #4
0
def main():
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue,
                                allocator=cl_tools.MemoryPool(
                                    cl_tools.ImmediateAllocator(queue)))

    comm = MPI.COMM_WORLD
    num_parts = comm.Get_size()

    from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
    mesh_dist = MPIMeshDistributor(comm)

    dim = 2
    nel_1d = 16

    if mesh_dist.is_mananger_rank():
        from meshmode.mesh.generation import generate_regular_rect_mesh
        mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                          b=(0.5, ) * dim,
                                          n=(nel_1d, ) * dim)

        print("%d elements" % mesh.nelements)

        part_per_element = get_partition_by_pymetis(mesh, num_parts)

        local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element,
                                               num_parts)

        del mesh

    else:
        local_mesh = mesh_dist.receive_mesh_part()

    order = 3

    discr = EagerDGDiscretization(actx,
                                  local_mesh,
                                  order=order,
                                  mpi_communicator=comm)

    if dim == 2:
        # no deep meaning here, just a fudge factor
        dt = 0.75 / (nel_1d * order**2)
    elif dim == 3:
        # no deep meaning here, just a fudge factor
        dt = 0.45 / (nel_1d * order**2)
    else:
        raise ValueError("don't have a stable time step guesstimate")

    fields = flat_obj_array(bump(actx, discr),
                            [discr.zeros(actx) for i in range(discr.dim)])

    vis = make_visualizer(discr, order + 3 if dim == 2 else order)

    def rhs(t, w):
        return wave_operator(discr, c=1, w=w)

    rank = comm.Get_rank()

    t = 0
    t_final = 3
    istep = 0
    while t < t_final:
        fields = rk4_step(fields, t, dt, rhs)

        if istep % 10 == 0:
            print(istep, t, discr.norm(fields[0]))
            vis.write_vtk_file(
                "fld-wave-eager-mpi-%03d-%04d.vtu" % (rank, istep), [
                    ("u", fields[0]),
                    ("v", fields[1:]),
                ])

        t += dt
        istep += 1
コード例 #5
0
def _euler_flow_stepper(actx, parameters):
    """
    Implements a generic time stepping loop for testing an inviscid flow.
    """
    logging.basicConfig(format="%(message)s", level=logging.INFO)

    mesh = parameters["mesh"]
    t = parameters["time"]
    order = parameters["order"]
    t_final = parameters["tfinal"]
    initializer = parameters["initializer"]
    exittol = parameters["exittol"]
    casename = parameters["casename"]
    boundaries = parameters["boundaries"]
    eos = parameters["eos"]
    cfl = parameters["cfl"]
    dt = parameters["dt"]
    constantcfl = parameters["constantcfl"]
    nstepstatus = parameters["nstatus"]

    if t_final <= t:
        return (0.0)

    rank = 0
    dim = mesh.dim
    istep = 0

    discr = EagerDGDiscretization(actx, mesh, order=order)
    nodes = thaw(actx, discr.nodes())
    fields = initializer(0, nodes)
    sdt = get_inviscid_timestep(discr, eos=eos, cfl=cfl, q=fields)

    initname = initializer.__class__.__name__
    eosname = eos.__class__.__name__
    message = (f"Num {dim}d order-{order} elements: {mesh.nelements}\n"
               f"Timestep:        {dt}\n"
               f"Final time:      {t_final}\n"
               f"Status freq:     {nstepstatus}\n"
               f"Initialization:  {initname}\n"
               f"EOS:             {eosname}")
    logger.info(message)

    vis = make_visualizer(discr, discr.order + 3 if dim == 2 else discr.order)

    def write_soln(write_status=True):
        cv = split_conserved(dim, fields)
        dv = eos.dependent_vars(cv)
        expected_result = initializer(t, nodes)
        result_resid = fields - expected_result
        maxerr = [
            np.max(np.abs(result_resid[i].get())) for i in range(dim + 2)
        ]
        mindv = [np.min(dvfld.get()) for dvfld in dv]
        maxdv = [np.max(dvfld.get()) for dvfld in dv]

        if write_status is True:
            statusmsg = (f"Status: Step({istep}) Time({t})\n"
                         f"------   P({mindv[0]},{maxdv[0]})\n"
                         f"------   T({mindv[1]},{maxdv[1]})\n"
                         f"------   dt,cfl = ({dt},{cfl})\n"
                         f"------   Err({maxerr})")
            logger.info(statusmsg)

        io_fields = ["cv", split_conserved(dim, fields)]
        io_fields += eos.split_fields(dim, dv)
        io_fields.append(("exact_soln", expected_result))
        io_fields.append(("residual", result_resid))
        nameform = casename + "-{iorank:04d}-{iostep:06d}.vtu"
        visfilename = nameform.format(iorank=rank, iostep=istep)
        vis.write_vtk_file(visfilename, io_fields)

        return maxerr

    def rhs(t, q):
        return inviscid_operator(discr,
                                 eos=eos,
                                 boundaries=boundaries,
                                 q=q,
                                 t=t)

    while t < t_final:

        if constantcfl is True:
            dt = sdt
        else:
            cfl = dt / sdt

        if nstepstatus > 0:
            if istep % nstepstatus == 0:
                write_soln()

        fields = rk4_step(fields, t, dt, rhs)
        t += dt
        istep += 1

        sdt = get_inviscid_timestep(discr, eos=eos, cfl=cfl, q=fields)

    if nstepstatus > 0:
        logger.info("Writing final dump.")
        maxerr = max(write_soln(False))
    else:
        expected_result = initializer(t, nodes)
        maxerr = discr.norm(fields - expected_result, np.inf)

    logger.info(f"Max Error: {maxerr}")
    if maxerr > exittol:
        raise ValueError("Solution failed to follow expected result.")

    return (maxerr)
コード例 #6
0
ファイル: test_diffusion.py プロジェクト: MTCam/mirgecom
def test_diffusion_discontinuous_alpha(actx_factory, order, visualize=False):
    """
    Checks the accuracy of the diffusion operator for an alpha field that has a
    jump across an element face.
    """
    actx = actx_factory()

    n = 8

    mesh = get_box_mesh(1, -1, 1, n)

    from grudge.eager import EagerDGDiscretization
    discr = EagerDGDiscretization(actx, mesh, order=order)

    nodes = thaw(actx, discr.nodes())

    # Set up a 1D heat equation interface problem, apply the diffusion operator to
    # the exact steady state solution, and check that it's zero

    lower_mask_np = np.empty((n, order + 1), dtype=int)
    lower_mask_np[:, :] = 0
    lower_mask_np[:int(n / 2), :] = 1
    lower_mask = DOFArray(actx, (actx.from_numpy(lower_mask_np), ))

    upper_mask_np = np.empty((n, order + 1), dtype=int)
    upper_mask_np[:, :] = 0
    upper_mask_np[int(n / 2):, :] = 1
    upper_mask = DOFArray(actx, (actx.from_numpy(upper_mask_np), ))

    alpha_lower = 0.5
    alpha_upper = 1

    alpha = alpha_lower * lower_mask + alpha_upper * upper_mask

    boundaries = {
        DTAG_BOUNDARY("-0"): DirichletDiffusionBoundary(0.),
        DTAG_BOUNDARY("+0"): DirichletDiffusionBoundary(1.),
    }

    flux = -alpha_lower * alpha_upper / (alpha_lower + alpha_upper)

    u_steady = (
        -flux / alpha_lower * (nodes[0] + 1) * lower_mask  # noqa: E126, E221
        + (1 - flux / alpha_upper * (nodes[0] - 1)) * upper_mask)  # noqa: E131

    def get_rhs(t, u):
        return diffusion_operator(discr,
                                  quad_tag=DISCR_TAG_BASE,
                                  alpha=alpha,
                                  boundaries=boundaries,
                                  u=u)

    rhs = get_rhs(0, u_steady)

    if visualize:
        from grudge.shortcuts import make_visualizer
        vis = make_visualizer(discr, discr.order + 3)
        vis.write_vtk_file(
            "diffusion_discontinuous_alpha_rhs_{order}.vtu".format(
                order=order), [
                    ("alpha", alpha),
                    ("u_steady", u_steady),
                    ("rhs", rhs),
                ])

    linf_err = discr.norm(rhs, np.inf)
    assert (linf_err < 1e-11)

    # Now check stability

    from numpy.random import rand
    perturb_np = np.empty((n, order + 1), dtype=float)
    for i in range(n):
        perturb_np[i, :] = 0.1 * (rand() - 0.5)
    perturb = DOFArray(actx, (actx.from_numpy(perturb_np), ))

    u = u_steady + perturb

    dt = 1e-3 / order**2
    t = 0

    from mirgecom.integrators import rk4_step

    for _ in range(50):
        u = rk4_step(u, t, dt, get_rhs)
        t += dt

    if visualize:
        vis.write_vtk_file(
            "diffusion_disc_alpha_stability_{order}.vtu".format(order=order), [
                ("alpha", alpha),
                ("u", u),
                ("u_steady", u_steady),
            ])

    linf_diff = discr.norm(u - u_steady, np.inf)
    assert linf_diff < 0.1
コード例 #7
0
def main(snapshot_pattern="wave-eager-{step:04d}-{rank:04d}.pkl",
         restart_step=None):
    """Drive the example."""
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue,
                                allocator=cl_tools.MemoryPool(
                                    cl_tools.ImmediateAllocator(queue)))

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    num_parts = comm.Get_size()

    if restart_step is None:

        from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
        mesh_dist = MPIMeshDistributor(comm)

        dim = 2
        nel_1d = 16

        if mesh_dist.is_mananger_rank():
            from meshmode.mesh.generation import generate_regular_rect_mesh
            mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                              b=(0.5, ) * dim,
                                              nelements_per_axis=(nel_1d, ) *
                                              dim)

            print("%d elements" % mesh.nelements)
            part_per_element = get_partition_by_pymetis(mesh, num_parts)
            local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element,
                                                   num_parts)

            del mesh

        else:
            local_mesh = mesh_dist.receive_mesh_part()

        fields = None

    else:
        from mirgecom.restart import read_restart_data
        restart_data = read_restart_data(
            actx, snapshot_pattern.format(step=restart_step, rank=rank))
        local_mesh = restart_data["local_mesh"]
        nel_1d = restart_data["nel_1d"]
        assert comm.Get_size() == restart_data["num_parts"]

    order = 3

    discr = EagerDGDiscretization(actx,
                                  local_mesh,
                                  order=order,
                                  mpi_communicator=comm)

    if local_mesh.dim == 2:
        # no deep meaning here, just a fudge factor
        dt = 0.7 / (nel_1d * order**2)
    elif dim == 3:
        # no deep meaning here, just a fudge factor
        dt = 0.4 / (nel_1d * order**2)
    else:
        raise ValueError("don't have a stable time step guesstimate")

    t_final = 3

    if restart_step is None:
        t = 0
        istep = 0

        fields = flat_obj_array(bump(actx, discr),
                                [discr.zeros(actx) for i in range(discr.dim)])

    else:
        t = restart_data["t"]
        istep = restart_step
        assert istep == restart_step
        restart_fields = restart_data["fields"]
        old_order = restart_data["order"]
        if old_order != order:
            old_discr = EagerDGDiscretization(actx,
                                              local_mesh,
                                              order=old_order,
                                              mpi_communicator=comm)
            from meshmode.discretization.connection import make_same_mesh_connection
            connection = make_same_mesh_connection(
                actx, discr.discr_from_dd("vol"),
                old_discr.discr_from_dd("vol"))
            fields = connection(restart_fields)
        else:
            fields = restart_fields

    vis = make_visualizer(discr)

    def rhs(t, w):
        return wave_operator(discr, c=1, w=w)

    while t < t_final:
        # restart must happen at beginning of step
        if istep % 100 == 0 and (
                # Do not overwrite the restart file that we just read.
                istep != restart_step):
            from mirgecom.restart import write_restart_file
            write_restart_file(actx,
                               restart_data={
                                   "local_mesh": local_mesh,
                                   "order": order,
                                   "fields": fields,
                                   "t": t,
                                   "step": istep,
                                   "nel_1d": nel_1d,
                                   "num_parts": num_parts
                               },
                               filename=snapshot_pattern.format(step=istep,
                                                                rank=rank),
                               comm=comm)

        if istep % 10 == 0:
            print(istep, t, discr.norm(fields[0]))
            vis.write_parallel_vtk_file(
                comm, "fld-wave-eager-mpi-%03d-%04d.vtu" % (rank, istep), [
                    ("u", fields[0]),
                    ("v", fields[1:]),
                ])

        fields = rk4_step(fields, t, dt, rhs)

        t += dt
        istep += 1
コード例 #8
0
ファイル: test_diffusion.py プロジェクト: MTCam/mirgecom
def test_diffusion_accuracy(actx_factory,
                            problem,
                            nsteps,
                            dt,
                            scales,
                            order,
                            visualize=False):
    """
    Checks the accuracy of the diffusion operator by solving the heat equation for a
    given problem setup.
    """
    actx = actx_factory()

    p = problem

    sym_diffusion_u = sym_diffusion(p.dim, p.sym_alpha, p.sym_u)

    # In order to support manufactured solutions, we modify the heat equation
    # to add a source term f. If the solution is exact, this term should be 0.
    sym_t = pmbl.var("t")
    sym_f = sym.diff(sym_t)(p.sym_u) - sym_diffusion_u

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for n in scales:
        mesh = p.get_mesh(n)

        from grudge.eager import EagerDGDiscretization
        from meshmode.discretization.poly_element import \
                QuadratureSimplexGroupFactory, \
                PolynomialWarpAndBlendGroupFactory
        discr = EagerDGDiscretization(
            actx,
            mesh,
            discr_tag_to_group_factory={
                DISCR_TAG_BASE: PolynomialWarpAndBlendGroupFactory(order),
                DISCR_TAG_QUAD: QuadratureSimplexGroupFactory(3 * order),
            })

        nodes = thaw(actx, discr.nodes())

        def sym_eval(expr, t):
            return sym.EvaluationMapper({"x": nodes, "t": t})(expr)

        alpha = sym_eval(p.sym_alpha, 0.)

        if isinstance(alpha, DOFArray):
            discr_tag = DISCR_TAG_QUAD
        else:
            discr_tag = DISCR_TAG_BASE

        def get_rhs(t, u):
            return (
                diffusion_operator(discr,
                                   quad_tag=discr_tag,
                                   alpha=alpha,
                                   boundaries=p.get_boundaries(discr, actx, t),
                                   u=u) + sym_eval(sym_f, t))

        t = 0.

        u = sym_eval(p.sym_u, t)

        from mirgecom.integrators import rk4_step

        for _ in range(nsteps):
            u = rk4_step(u, t, dt, get_rhs)
            t += dt

        expected_u = sym_eval(p.sym_u, t)

        rel_linf_err = (discr.norm(u - expected_u, np.inf) /
                        discr.norm(expected_u, np.inf))
        eoc_rec.add_data_point(1. / n, rel_linf_err)

        if visualize:
            from grudge.shortcuts import make_visualizer
            vis = make_visualizer(discr, discr.order + 3)
            vis.write_vtk_file(
                "diffusion_accuracy_{order}_{n}.vtu".format(order=order, n=n),
                [
                    ("u", u),
                    ("expected_u", expected_u),
                ])

    print("L^inf error:")
    print(eoc_rec)
    # Expected convergence rates from Hesthaven/Warburton book
    expected_order = order + 1 if order % 2 == 0 else order
    assert (eoc_rec.order_estimate() >= expected_order - 0.5
            or eoc_rec.max_error() < 1e-11)
コード例 #9
0
ファイル: test_euler.py プロジェクト: cmikida2/mirgecom
def _euler_flow_stepper(actx, parameters):
    logging.basicConfig(format="%(message)s", level=logging.INFO)

    mesh = parameters["mesh"]
    t = parameters["time"]
    order = parameters["order"]
    t_final = parameters["tfinal"]
    initializer = parameters["initializer"]
    exittol = parameters["exittol"]
    casename = parameters["casename"]
    boundaries = parameters["boundaries"]
    eos = parameters["eos"]
    cfl = parameters["cfl"]
    dt = parameters["dt"]
    constantcfl = parameters["constantcfl"]
    nstepstatus = parameters["nstatus"]

    if t_final <= t:
        return (0.0)

    rank = 0
    dim = mesh.dim
    istep = 0

    discr = EagerDGDiscretization(actx, mesh, order=order)
    nodes = thaw(actx, discr.nodes())

    cv = initializer(nodes)
    sdt = cfl * get_inviscid_timestep(discr, eos=eos, cv=cv)

    initname = initializer.__class__.__name__
    eosname = eos.__class__.__name__
    logger.info(f"Num {dim}d order-{order} elements: {mesh.nelements}\n"
                f"Timestep:        {dt}\n"
                f"Final time:      {t_final}\n"
                f"Status freq:     {nstepstatus}\n"
                f"Initialization:  {initname}\n"
                f"EOS:             {eosname}")

    vis = make_visualizer(discr, order)

    def write_soln(state, write_status=True):
        dv = eos.dependent_vars(cv=state)
        expected_result = initializer(nodes, t=t)
        result_resid = (state - expected_result).join()
        maxerr = [
            np.max(np.abs(result_resid[i].get())) for i in range(dim + 2)
        ]
        mindv = [np.min(dvfld.get()) for dvfld in dv]
        maxdv = [np.max(dvfld.get()) for dvfld in dv]

        if write_status is True:
            statusmsg = (f"Status: Step({istep}) Time({t})\n"
                         f"------   P({mindv[0]},{maxdv[0]})\n"
                         f"------   T({mindv[1]},{maxdv[1]})\n"
                         f"------   dt,cfl = ({dt},{cfl})\n"
                         f"------   Err({maxerr})")
            logger.info(statusmsg)

        io_fields = ["cv", state]
        io_fields += eos.split_fields(dim, dv)
        io_fields.append(("exact_soln", expected_result))
        io_fields.append(("residual", result_resid))
        nameform = casename + "-{iorank:04d}-{iostep:06d}.vtu"
        visfilename = nameform.format(iorank=rank, iostep=istep)
        vis.write_vtk_file(visfilename, io_fields)

        return maxerr

    def rhs(t, q):
        return euler_operator(discr,
                              eos=eos,
                              boundaries=boundaries,
                              cv=q,
                              time=t)

    filter_order = 8
    eta = .5
    alpha = -1.0 * np.log(np.finfo(float).eps)
    nummodes = int(1)
    for _ in range(dim):
        nummodes *= int(order + dim + 1)
    nummodes /= math.factorial(int(dim))
    cutoff = int(eta * order)

    from mirgecom.filter import (exponential_mode_response_function as xmrfunc,
                                 filter_modally)
    frfunc = partial(xmrfunc, alpha=alpha, filter_order=filter_order)

    while t < t_final:

        if constantcfl is True:
            dt = sdt
        else:
            cfl = dt / sdt

        if nstepstatus > 0:
            if istep % nstepstatus == 0:
                write_soln(state=cv)

        cv = rk4_step(cv, t, dt, rhs)
        cv = make_conserved(dim,
                            q=filter_modally(discr, "vol", cutoff, frfunc,
                                             cv.join()))

        t += dt
        istep += 1

        sdt = cfl * get_inviscid_timestep(discr, eos=eos, cv=cv)

    if nstepstatus > 0:
        logger.info("Writing final dump.")
        maxerr = max(write_soln(cv, False))
    else:
        expected_result = initializer(nodes, time=t)
        maxerr = discr.norm((cv - expected_result).join(), np.inf)

    logger.info(f"Max Error: {maxerr}")
    if maxerr > exittol:
        raise ValueError("Solution failed to follow expected result.")

    return (maxerr)
コード例 #10
0
def main(use_profiling=False, use_logmgr=False, lazy_eval: bool = False):
    """Drive the example."""
    cl_ctx = cl.create_some_context()

    logmgr = initialize_logmgr(use_logmgr, filename="wave.sqlite", mode="wu")

    if use_profiling:
        if lazy_eval:
            raise RuntimeError("Cannot run lazy with profiling.")
        queue = cl.CommandQueue(
            cl_ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
        actx = PyOpenCLProfilingArrayContext(
            queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))
    else:
        queue = cl.CommandQueue(cl_ctx)
        if lazy_eval:
            actx = PytatoPyOpenCLArrayContext(queue)
        else:
            actx = PyOpenCLArrayContext(
                queue,
                allocator=cl_tools.MemoryPool(
                    cl_tools.ImmediateAllocator(queue)))

    dim = 2
    nel_1d = 16
    from meshmode.mesh.generation import generate_regular_rect_mesh

    mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                      b=(0.5, ) * dim,
                                      nelements_per_axis=(nel_1d, ) * dim)

    order = 3

    discr = EagerDGDiscretization(actx, mesh, order=order)

    current_cfl = 0.485
    wave_speed = 1.0
    from grudge.dt_utils import characteristic_lengthscales
    nodal_dt = characteristic_lengthscales(actx, discr) / wave_speed
    from grudge.op import nodal_min
    dt = actx.to_numpy(current_cfl * nodal_min(discr, "vol", nodal_dt))[()]

    print("%d elements" % mesh.nelements)

    fields = flat_obj_array(bump(actx, discr),
                            [discr.zeros(actx) for i in range(discr.dim)])

    if logmgr:
        logmgr_add_cl_device_info(logmgr, queue)
        logmgr_add_device_memory_usage(logmgr, queue)

        logmgr.add_watches(["step.max", "t_step.max", "t_log.max"])

        try:
            logmgr.add_watches(
                ["memory_usage_python.max", "memory_usage_gpu.max"])
        except KeyError:
            pass

        if use_profiling:
            logmgr.add_watches(["multiply_time.max"])

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

    vis = make_visualizer(discr)

    def rhs(t, w):
        return wave_operator(discr, c=wave_speed, w=w)

    compiled_rhs = actx.compile(rhs)

    t = 0
    t_final = 1
    istep = 0
    while t < t_final:
        if logmgr:
            logmgr.tick_before()

        fields = thaw(freeze(fields, actx), actx)
        fields = rk4_step(fields, t, dt, compiled_rhs)

        if istep % 10 == 0:
            if use_profiling:
                print(actx.tabulate_profiling_data())
            print(istep, t, actx.to_numpy(discr.norm(fields[0], np.inf)))
            vis.write_vtk_file("fld-wave-%04d.vtu" % istep, [
                ("u", fields[0]),
                ("v", fields[1:]),
            ],
                               overwrite=True)

        t += dt
        istep += 1

        if logmgr:
            set_dt(logmgr, dt)
            logmgr.tick_after()
コード例 #11
0
ファイル: wave-mpi.py プロジェクト: thomasgibson/mirgecom
def main(snapshot_pattern="wave-mpi-{step:04d}-{rank:04d}.pkl", restart_step=None,
         use_profiling=False, use_logmgr=False, actx_class=PyOpenCLArrayContext):
    """Drive the example."""
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)

    from mpi4py import MPI
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    num_parts = comm.Get_size()

    logmgr = initialize_logmgr(use_logmgr,
        filename="wave-mpi.sqlite", mode="wu", mpi_comm=comm)
    if use_profiling:
        queue = cl.CommandQueue(cl_ctx,
            properties=cl.command_queue_properties.PROFILING_ENABLE)
        actx = actx_class(queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)),
            logmgr=logmgr)
    else:
        queue = cl.CommandQueue(cl_ctx)
        actx = actx_class(queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))

    if restart_step is None:

        from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
        mesh_dist = MPIMeshDistributor(comm)

        dim = 2
        nel_1d = 16

        if mesh_dist.is_mananger_rank():
            from meshmode.mesh.generation import generate_regular_rect_mesh
            mesh = generate_regular_rect_mesh(
                a=(-0.5,)*dim, b=(0.5,)*dim,
                nelements_per_axis=(nel_1d,)*dim)

            print("%d elements" % mesh.nelements)
            part_per_element = get_partition_by_pymetis(mesh, num_parts)
            local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts)

            del mesh

        else:
            local_mesh = mesh_dist.receive_mesh_part()

        fields = None

    else:
        from mirgecom.restart import read_restart_data
        restart_data = read_restart_data(
            actx, snapshot_pattern.format(step=restart_step, rank=rank)
        )
        local_mesh = restart_data["local_mesh"]
        nel_1d = restart_data["nel_1d"]
        assert comm.Get_size() == restart_data["num_parts"]

    order = 3

    discr = EagerDGDiscretization(actx, local_mesh, order=order,
                                  mpi_communicator=comm)

    current_cfl = 0.485
    wave_speed = 1.0
    from grudge.dt_utils import characteristic_lengthscales
    dt = current_cfl * characteristic_lengthscales(actx, discr) / wave_speed

    from grudge.op import nodal_min
    dt = nodal_min(discr, "vol", dt)

    t_final = 1

    if restart_step is None:
        t = 0
        istep = 0

        fields = flat_obj_array(
            bump(actx, discr),
            [discr.zeros(actx) for i in range(discr.dim)]
            )

    else:
        t = restart_data["t"]
        istep = restart_step
        assert istep == restart_step
        restart_fields = restart_data["fields"]
        old_order = restart_data["order"]
        if old_order != order:
            old_discr = EagerDGDiscretization(actx, local_mesh, order=old_order,
                                              mpi_communicator=comm)
            from meshmode.discretization.connection import make_same_mesh_connection
            connection = make_same_mesh_connection(actx, discr.discr_from_dd("vol"),
                                                   old_discr.discr_from_dd("vol"))
            fields = connection(restart_fields)
        else:
            fields = restart_fields

    if logmgr:
        logmgr_add_cl_device_info(logmgr, queue)
        logmgr_add_device_memory_usage(logmgr, queue)

        logmgr.add_watches(["step.max", "t_step.max", "t_log.max"])

        try:
            logmgr.add_watches(["memory_usage_python.max", "memory_usage_gpu.max"])
        except KeyError:
            pass

        if use_profiling:
            logmgr.add_watches(["multiply_time.max"])

        vis_timer = IntervalTimer("t_vis", "Time spent visualizing")
        logmgr.add_quantity(vis_timer)

    vis = make_visualizer(discr)

    def rhs(t, w):
        return wave_operator(discr, c=wave_speed, w=w)

    compiled_rhs = actx.compile(rhs)

    while t < t_final:
        if logmgr:
            logmgr.tick_before()

        # restart must happen at beginning of step
        if istep % 100 == 0 and (
                # Do not overwrite the restart file that we just read.
                istep != restart_step):
            from mirgecom.restart import write_restart_file
            write_restart_file(
                actx, restart_data={
                    "local_mesh": local_mesh,
                    "order": order,
                    "fields": fields,
                    "t": t,
                    "step": istep,
                    "nel_1d": nel_1d,
                    "num_parts": num_parts},
                filename=snapshot_pattern.format(step=istep, rank=rank),
                comm=comm
            )

        if istep % 10 == 0:
            print(istep, t, discr.norm(fields[0]))
            vis.write_parallel_vtk_file(
                comm,
                "fld-wave-mpi-%03d-%04d.vtu" % (rank, istep),
                [
                    ("u", fields[0]),
                    ("v", fields[1:]),
                ], overwrite=True
            )

        fields = thaw(freeze(fields, actx), actx)
        fields = rk4_step(fields, t, dt, compiled_rhs)

        t += dt
        istep += 1

        if logmgr:
            set_dt(logmgr, dt)
            logmgr.tick_after()

    final_soln = discr.norm(fields[0])
    assert np.abs(final_soln - 0.04409852463947439) < 1e-14
コード例 #12
0
def main(use_profiling=False):
    """Drive the example."""
    cl_ctx = cl.create_some_context()
    if use_profiling:
        queue = cl.CommandQueue(
            cl_ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
        actx = PyOpenCLProfilingArrayContext(
            queue,
            allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))
    else:
        queue = cl.CommandQueue(cl_ctx)
        actx = PyOpenCLArrayContext(queue,
                                    allocator=cl_tools.MemoryPool(
                                        cl_tools.ImmediateAllocator(queue)))

    dim = 2
    nel_1d = 16
    from meshmode.mesh.generation import generate_regular_rect_mesh

    mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                      b=(0.5, ) * dim,
                                      nelements_per_axis=(nel_1d, ) * dim)

    order = 3

    if dim == 2:
        # no deep meaning here, just a fudge factor
        dt = 0.7 / (nel_1d * order**2)
    elif dim == 3:
        # no deep meaning here, just a fudge factor
        dt = 0.4 / (nel_1d * order**2)
    else:
        raise ValueError("don't have a stable time step guesstimate")

    print("%d elements" % mesh.nelements)

    discr = EagerDGDiscretization(actx, mesh, order=order)

    fields = flat_obj_array(bump(actx, discr),
                            [discr.zeros(actx) for i in range(discr.dim)])

    vis = make_visualizer(discr)

    def rhs(t, w):
        return wave_operator(discr, c=1, w=w)

    t = 0
    t_final = 3
    istep = 0
    while t < t_final:
        fields = rk4_step(fields, t, dt, rhs)

        if istep % 10 == 0:
            if use_profiling:
                print(actx.tabulate_profiling_data())
            print(istep, t, discr.norm(fields[0], np.inf))
            vis.write_vtk_file("fld-wave-eager-%04d.vtu" % istep, [
                ("u", fields[0]),
                ("v", fields[1:]),
            ])

        t += dt
        istep += 1