Esempio n. 1
0
def problem_stats(order=3):
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
    )

    with open_output_file("grudge-problem-stats.txt") as outf:
        _, dg_discr_2d = get_wave_op_with_discr(
            actx, dims=2, order=order)
        print("Number of 2D elements:", dg_discr_2d.mesh.nelements, file=outf)
        vol_discr_2d = dg_discr_2d.discr_from_dd("vol")
        dofs_2d = {group.nunit_dofs for group in vol_discr_2d.groups}
        from pytools import one
        print("Number of DOFs per 2D element:", one(dofs_2d), file=outf)

        _, dg_discr_3d = get_wave_op_with_discr(
            actx, dims=3, order=order)
        print("Number of 3D elements:", dg_discr_3d.mesh.nelements, file=outf)
        vol_discr_3d = dg_discr_3d.discr_from_dd("vol")
        dofs_3d = {group.nunit_dofs for group in vol_discr_3d.groups}
        from pytools import one
        print("Number of DOFs per 3D element:", one(dofs_3d), file=outf)

    logger.info("Wrote '%s'", outf.name)
Esempio n. 2
0
def statement_counts_table():
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
    )

    fused_stepper = get_example_stepper(actx, use_fusion=True)
    stepper = get_example_stepper(actx, use_fusion=False)

    with open_output_file("statement-counts.tex") as outf:
        if not PAPER_OUTPUT:
            print("==== Statement Counts ====", file=outf)

        print(table(
            "lr",
            ("Operator", "Grudge Node Count"),
            (
                ("Time integration: baseline",
                 r"\num{%d}"
                     % len(stepper.bound_op.eval_code.instructions)),
                ("Right-hand side: baseline",
                 r"\num{%d}"
                     % len(stepper.grudge_bound_op.eval_code.instructions)),
                ("Inlined operator",
                 r"\num{%d}"
                     % len(fused_stepper.bound_op.eval_code.instructions))
            )),
            file=outf)

    logger.info("Wrote '%s'", outf.name)
Esempio n. 3
0
def test_assignment_memory_model(ctx_factory):
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
    )

    _, discr = get_wave_op_with_discr(actx, dims=2, order=3)

    # Assignment instruction
    bound_op = bind(
            discr,
            sym.Variable("input0", dof_desc.DD_VOLUME)
            + sym.Variable("input1", dof_desc.DD_VOLUME),
            exec_mapper_factory=ExecutionMapperWithMemOpCounting)

    input0 = discr.zeros(actx)
    input1 = discr.zeros(actx)

    result, profile_data = bound_op(
            profile_data={},
            input0=input0,
            input1=input1)

    assert profile_data["bytes_read"] == \
            dof_array_nbytes(input0) + dof_array_nbytes(input1)
    assert profile_data["bytes_written"] == dof_array_nbytes(result)
Esempio n. 4
0
def main():
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    from meshmode.mesh.generation import (  # noqa
        generate_icosphere, generate_icosahedron, generate_torus)
    #mesh = generate_icosphere(1, order=order)
    mesh = generate_icosahedron(1, order=order)
    #mesh = generate_torus(3, 1, order=order)

    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            PolynomialWarpAndBlendGroupFactory

    discr = Discretization(actx, mesh,
                           PolynomialWarpAndBlendGroupFactory(order))

    from meshmode.discretization.visualization import make_visualizer
    vis = make_visualizer(actx, discr, order)

    vis.write_vtk_file("geometry.vtu", [
        ("f", thaw(discr.nodes()[0], actx)),
    ])

    from meshmode.discretization.visualization import \
            write_nodal_adjacency_vtk_file

    write_nodal_adjacency_vtk_file("adjacency.vtu", mesh)
Esempio n. 5
0
def _visualize_refinement(actx: PyOpenCLArrayContext,
                          discr,
                          niter,
                          stage_nr,
                          stage_name,
                          flags,
                          visualize=False):
    if not visualize:
        return

    if stage_nr not in (1, 2):
        raise ValueError("unexpected stage number")

    flags = actx.to_numpy(flags)
    logger.info("for stage %s: splitting %d/%d stage-%d elements", stage_name,
                np.sum(flags), discr.mesh.nelements, stage_nr)

    from meshmode.discretization.visualization import make_visualizer
    vis = make_visualizer(actx, discr, 3)

    assert len(flags) == discr.mesh.nelements

    flags = flags.astype(bool)
    nodes_flags_template = discr.zeros(actx)
    nodes_flags = []
    for grp in discr.groups:
        meg = grp.mesh_el_group
        nodes_flags_grp = actx.to_numpy(nodes_flags_template[grp.index])
        nodes_flags_grp[flags[meg.element_nr_base:meg.nelements +
                              meg.element_nr_base]] = 1
        nodes_flags.append(actx.from_numpy(nodes_flags_grp))

    nodes_flags = DOFArray(actx, tuple(nodes_flags))

    vis_data = [
        ("refine_flags", nodes_flags),
    ]

    if 0:
        from pytential import sym, bind
        bdry_normals = bind(discr, sym.normal(
            discr.ambient_dim))(actx).as_vector(dtype=object)
        vis_data.append(("bdry_normals", bdry_normals), )

    vis.write_vtk_file(f"refinement-{stage_name}-{niter:03d}.vtu", vis_data)
Esempio n. 6
0
    def __init__(self, actx: PyOpenCLArrayContext, ambient_dim,
                 tree_code_container, debug, _well_sep_is_n_away,
                 _from_sep_smaller_crit):
        self.ambient_dim = ambient_dim
        self.tree_code_container = tree_code_container
        self._well_sep_is_n_away = _well_sep_is_n_away
        self._from_sep_smaller_crit = _from_sep_smaller_crit

        self._setup_actx = actx.clone()
        self.debug = debug
Esempio n. 7
0
def _acf():
    """A tiny undocumented function to pass to tests that take an ``actx_factory``
    argument when running them from the command line.
    """
    import pyopencl as cl
    from arraycontext import PyOpenCLArrayContext

    context = cl._csc()
    queue = cl.CommandQueue(context)
    return PyOpenCLArrayContext(queue)
Esempio n. 8
0
def test_stepper_timing(ctx_factory, use_fusion):
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(
            cl_ctx,
            properties=cl.command_queue_properties.PROFILING_ENABLE)
    actx = PyOpenCLArrayContext(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
    )

    dims = 3

    sym_operator, discr = get_wave_op_with_discr(
            actx, dims=dims, order=3)

    t_start = 0
    dt = 0.04
    t_end = 0.1

    ic = flat_obj_array(discr.zeros(actx),
            [discr.zeros(actx) for i in range(discr.dim)])

    if not use_fusion:
        bound_op = bind(
                discr, sym_operator,
                exec_mapper_factory=ExecutionMapperWithTiming)

        stepper = RK4TimeStepper(
                discr, "w", bound_op, 1 + discr.dim,
                get_wave_component,
                exec_mapper_factory=ExecutionMapperWithTiming)

    else:
        stepper = FusedRK4TimeStepper(
                discr, "w", sym_operator, 1 + discr.dim,
                get_wave_component,
                exec_mapper_factory=ExecutionMapperWithTiming)

    step = 0

    import time
    t = time.time()
    nsteps = int(np.ceil((t_end + 1e-9) / dt))
    for (_, _, profile_data) in stepper.run(  # noqa: B007
            ic, t_start, dt, t_end, return_profile_data=True):
        step += 1
        tn = time.time()
        logger.info("step %d/%d: %f", step, nsteps, tn - t)
        t = tn

    logger.info("fusion? %s", use_fusion)
    for key, value in profile_data.items():
        if isinstance(value, TimingFutureList):
            print(key, value.elapsed())
Esempio n. 9
0
def main():
    logging.basicConfig(level=logging.INFO)

    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    nel_1d = 16
    from meshmode.mesh.generation import generate_regular_rect_mesh
    mesh = generate_regular_rect_mesh(a=(-0.5, -0.5),
                                      b=(0.5, 0.5),
                                      nelements_per_axis=(nel_1d, nel_1d))

    order = 3

    # no deep meaning here, just a fudge factor
    dt = 0.7 / (nel_1d * order**2)

    logger.info("%d elements", mesh.nelements)

    discr = DGDiscretization(actx, mesh, order=order)

    fields = WaveState(
        u=bump(actx, discr),
        v=make_obj_array([discr.zeros(actx) for i in range(discr.dim)]),
    )

    from meshmode.discretization.visualization import make_visualizer
    vis = make_visualizer(actx, discr.volume_discr)

    def rhs(t, q):
        return wave_operator(actx, discr, c=1, q=q)

    t = 0
    t_final = 3
    istep = 0
    while t < t_final:
        fields = rk4_step(fields, t, dt, rhs)

        if istep % 10 == 0:
            # FIXME: Maybe an integral function to go with the
            # DOFArray would be nice?
            assert len(fields.u) == 1
            logger.info("[%05d] t %.5e / %.5e norm %.5e", istep, t, t_final,
                        flat_norm(fields.u, 2))
            vis.write_vtk_file("fld-wave-min-%04d.vtu" % istep, [
                ("q", fields),
            ])

        t += dt
        istep += 1

    assert flat_norm(fields.u, 2) < 100
Esempio n. 10
0
def test_to_fd_consistency(ctx_factory, mm_mesh, fspace_degree):
    fspace_degree += mm_mesh.groups[0].order

    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    factory = InterpolatoryQuadratureSimplexGroupFactory(fspace_degree)
    discr = Discretization(actx, mm_mesh, factory)
    fdrake_connection = build_connection_to_firedrake(discr)
    fdrake_fspace = fdrake_connection.firedrake_fspace()
    # Check consistency
    check_consistency(fdrake_fspace, discr)
Esempio n. 11
0
def test_stepper_mem_ops(ctx_factory, use_fusion):
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
    )

    dims = 2

    sym_operator, discr = get_wave_op_with_discr(
            actx, dims=dims, order=3)

    t_start = 0
    dt = 0.04
    t_end = 0.2

    ic = flat_obj_array(discr.zeros(actx),
            [discr.zeros(actx) for i in range(discr.dim)])

    if not use_fusion:
        bound_op = bind(
                discr, sym_operator,
                exec_mapper_factory=ExecutionMapperWithMemOpCounting)

        stepper = RK4TimeStepper(
                discr, "w", bound_op, 1 + discr.dim,
                get_wave_component,
                exec_mapper_factory=ExecutionMapperWithMemOpCounting)

    else:
        stepper = FusedRK4TimeStepper(
                discr, "w", sym_operator, 1 + discr.dim,
                get_wave_component,
                exec_mapper_factory=ExecutionMapperWithMemOpCounting)

    step = 0

    nsteps = int(np.ceil((t_end + 1e-9) / dt))
    for (_, _, profile_data) in stepper.run(  # noqa: B007
            ic, t_start, dt, t_end, return_profile_data=True):
        step += 1
        logger.info("step %d/%d", step, nsteps)

    logger.info("using fusion? %s", use_fusion)
    logger.info("bytes read: %d", profile_data["bytes_read"])
    logger.info("bytes written: %d", profile_data["bytes_written"])
    logger.info("bytes total: %d",
            profile_data["bytes_read"] + profile_data["bytes_written"])
Esempio n. 12
0
def test_stepper_equivalence(ctx_factory, order=4):
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
    )

    dims = 2

    sym_operator, discr = get_wave_op_with_discr(
            actx, dims=dims, order=order)
    #sym_operator_direct, discr = get_wave_op_with_discr_direct(
    #        actx, dims=dims, order=order)

    if dims == 2:
        dt = 0.04
    elif dims == 3:
        dt = 0.02

    ic = flat_obj_array(discr.zeros(actx),
            [discr.zeros(actx) for i in range(discr.dim)])

    bound_op = bind(discr, sym_operator)

    stepper = RK4TimeStepper(
            discr, "w", bound_op, 1 + discr.dim, get_wave_component)

    fused_stepper = FusedRK4TimeStepper(
            discr, "w", sym_operator, 1 + discr.dim,
            get_wave_component)

    t_start = 0
    t_end = 0.5
    nsteps = int(np.ceil((t_end + 1e-9) / dt))
    print("dt=%g nsteps=%d" % (dt, nsteps))

    step = 0

    norm = bind(discr, sym.norm(2, sym.var("u_ref") - sym.var("u")))

    fused_steps = fused_stepper.run(ic, t_start, dt, t_end)

    for t_ref, (u_ref, _v_ref) in stepper.run(ic, t_start, dt, t_end):
        step += 1
        logger.debug("step %d/%d", step, nsteps)
        t, (u, v) = next(fused_steps)
        assert t == t_ref, step
        assert norm(u=u, u_ref=u_ref) <= 1e-13, step
Esempio n. 13
0
def test_from_fd_consistency(ctx_factory, fdrake_mesh, fspace_degree):
    """
    Check basic consistency with a FiredrakeConnection built from firedrake
    """
    # make discretization from firedrake
    fdrake_fspace = FunctionSpace(fdrake_mesh, "DG", fspace_degree)

    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    fdrake_connection = build_connection_from_firedrake(actx, fdrake_fspace)
    discr = fdrake_connection.discr
    # Check consistency
    check_consistency(fdrake_fspace, discr)
Esempio n. 14
0
def test_to_fd_idempotency(ctx_factory, mm_mesh, fspace_degree):
    """
    Make sure mm->fd->mm and (mm->)->fd->mm->fd are identity
    """
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    # make sure degree is higher order than mesh
    fspace_degree += mm_mesh.groups[0].order

    # Make a function space and a function with unique values at each node
    factory = InterpolatoryQuadratureSimplexGroupFactory(fspace_degree)
    discr = Discretization(actx, mm_mesh, factory)
    fdrake_connection = build_connection_to_firedrake(discr)
    fdrake_mesh = fdrake_connection.firedrake_fspace().mesh()
    dtype = fdrake_mesh.coordinates.dat.data.dtype

    mm_unique = discr.zeros(actx, dtype=dtype)
    unique_vals = np.arange(np.size(mm_unique[0]), dtype=dtype)
    mm_unique[0].set(unique_vals.reshape(mm_unique[0].shape))
    mm_unique_copy = DOFArray(actx, (mm_unique[0].copy(), ))

    # Test for idempotency mm->fd->mm
    fdrake_unique = fdrake_connection.from_meshmode(mm_unique)
    fdrake_connection.from_firedrake(fdrake_unique, out=mm_unique_copy)

    np.testing.assert_allclose(actx.to_numpy(mm_unique_copy[0]),
                               actx.to_numpy(mm_unique[0]),
                               atol=CLOSE_ATOL)

    # Test for idempotency (mm->)fd->mm->fd
    fdrake_unique_copy = fdrake_connection.from_meshmode(mm_unique_copy)
    np.testing.assert_allclose(fdrake_unique_copy.dat.data,
                               fdrake_unique.dat.data,
                               atol=CLOSE_ATOL)
Esempio n. 15
0
def scalar_assignment_percent_of_total_mem_ops_table():
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(
        queue,
        allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue))
    )

    result2d = mem_ops_results(actx, 2)
    result3d = mem_ops_results(actx, 3)

    with open_output_file("scalar-assignments-mem-op-percentage.tex") as outf:
        if not PAPER_OUTPUT:
            print("==== Scalar Assigment % of Total Mem Ops ====", file=outf)

        print(
            table(
                "lr",
                ("Operator",
                 r"\parbox{1in}{\centering \% Memory Ops. "
                 r"Due to Scalar Assignments}"),
                (
                    ("2D: Baseline",
                     "%.1f" % (
                         100 * result2d["nonfused_bytes_total_by_scalar_assignments"]
                         / result2d["nonfused_bytes_total"])),
                    ("2D: Inlined",
                     "%.1f" % (
                         100 * result2d["fused_bytes_total_by_scalar_assignments"]
                         / result2d["fused_bytes_total"])),
                    ("3D: Baseline",
                     "%.1f" % (
                         100 * result3d["nonfused_bytes_total_by_scalar_assignments"]
                         / result3d["nonfused_bytes_total"])),
                    ("3D: Inlined",
                     "%.1f" % (
                         100 * result3d["fused_bytes_total_by_scalar_assignments"]
                         / result3d["fused_bytes_total"])),
                )),
            file=outf)

    logger.info("Wrote '%s'", outf.name)
Esempio n. 16
0
def main():
    # If can't import firedrake, do nothing
    #
    # filename MUST include "firedrake" (i.e. match *firedrake*.py) in order
    # to be run during CI
    try:
        import firedrake  # noqa : F401
    except ImportError:
        return 0

    from meshmode.interop.firedrake import build_connection_from_firedrake
    from firedrake import (UnitSquareMesh, FunctionSpace, SpatialCoordinate,
                           Function, cos)

    # Create a firedrake mesh and interpolate cos(x+y) onto it
    fd_mesh = UnitSquareMesh(10, 10)
    fd_fspace = FunctionSpace(fd_mesh, "DG", 2)
    spatial_coord = SpatialCoordinate(fd_mesh)
    fd_fntn = Function(fd_fspace).interpolate(cos(sum(spatial_coord)))

    # Make connections
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    fd_connection = build_connection_from_firedrake(actx, fd_fspace)
    fd_bdy_connection = \
        build_connection_from_firedrake(actx,
                                        fd_fspace,
                                        restrict_to_boundary="on_boundary")

    # Plot the meshmode meshes that the connections connect to
    import matplotlib.pyplot as plt
    from meshmode.mesh.visualization import draw_2d_mesh
    fig, (ax1, ax2) = plt.subplots(1, 2)
    ax1.set_title("FiredrakeConnection")
    plt.sca(ax1)
    draw_2d_mesh(fd_connection.discr.mesh,
                 draw_vertex_numbers=False,
                 draw_element_numbers=False,
                 set_bounding_box=True)
    ax2.set_title("FiredrakeConnection 'on_boundary'")
    plt.sca(ax2)
    draw_2d_mesh(fd_bdy_connection.discr.mesh,
                 draw_vertex_numbers=False,
                 draw_element_numbers=False,
                 set_bounding_box=True)
    plt.show()

    # Plot fd_fntn using unrestricted FiredrakeConnection
    from meshmode.discretization.visualization import make_visualizer
    discr = fd_connection.discr
    vis = make_visualizer(actx, discr, discr.groups[0].order + 3)
    field = fd_connection.from_firedrake(fd_fntn, actx=actx)

    fig = plt.figure()
    ax1 = fig.add_subplot(1, 2, 1, projection="3d")
    ax1.set_title("cos(x+y) in\nFiredrakeConnection")
    vis.show_scalar_in_matplotlib_3d(field, do_show=False)

    # Now repeat using FiredrakeConnection restricted to "on_boundary"
    bdy_discr = fd_bdy_connection.discr
    bdy_vis = make_visualizer(actx, bdy_discr, bdy_discr.groups[0].order + 3)
    bdy_field = fd_bdy_connection.from_firedrake(fd_fntn, actx=actx)

    ax2 = fig.add_subplot(1, 2, 2, projection="3d")
    plt.sca(ax2)
    ax2.set_title("cos(x+y) in\nFiredrakeConnection 'on_boundary'")
    bdy_vis.show_scalar_in_matplotlib_3d(bdy_field, do_show=False)

    import matplotlib.cm as cm
    fig.colorbar(cm.ScalarMappable())
    plt.show()
Esempio n. 17
0
def test_to_fd_transfer(ctx_factory, fspace_degree, mesh_name, mesh_pars, dim):
    """
    Make sure creating a function which projects onto
    one dimension then transports it is the same
    (up to resampling error) as projecting to one
    dimension on the transported mesh
    """
    # build estimate-of-convergence recorder
    from pytools.convergence import EOCRecorder
    # dimension projecting onto -> EOCRecorder
    eoc_recorders = {d: EOCRecorder() for d in range(dim)}

    # make a computing context
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    # Get each of the refinements of the meshmeshes and record
    # conversions errors
    for mesh_par in mesh_pars:
        if mesh_name in ("blob2d-order1", "blob2d-order4"):
            assert dim == 2
            from meshmode.mesh.io import read_gmsh
            mm_mesh = read_gmsh(f"{mesh_name}-h{mesh_par}.msh",
                                force_ambient_dim=dim)
            h = float(mesh_par)
        elif mesh_name == "warp":
            from meshmode.mesh.generation import generate_warped_rect_mesh
            mm_mesh = generate_warped_rect_mesh(dim,
                                                order=4,
                                                nelements_side=mesh_par)
            h = 1 / mesh_par
        else:
            raise ValueError("mesh_name not recognized")

        # Make discr and connect it to firedrake
        factory = InterpolatoryQuadratureSimplexGroupFactory(fspace_degree)
        discr = Discretization(actx, mm_mesh, factory)

        fdrake_connection = build_connection_to_firedrake(discr)
        fdrake_fspace = fdrake_connection.firedrake_fspace()
        spatial_coord = SpatialCoordinate(fdrake_fspace.mesh())

        # get the group's nodes in a numpy array
        nodes = discr.nodes()
        group_nodes = np.array(
            [actx.to_numpy(dof_arr[0]) for dof_arr in nodes])

        for d in range(dim):
            meshmode_f = discr.zeros(actx)
            meshmode_f[0][:] = group_nodes[d, :, :]

            # connect to firedrake and evaluate expr in firedrake
            fdrake_f = Function(fdrake_fspace).interpolate(spatial_coord[d])

            # transport to firedrake and record error
            mm2fd_f = fdrake_connection.from_meshmode(meshmode_f)

            err = np.max(np.abs(fdrake_f.dat.data - mm2fd_f.dat.data))
            eoc_recorders[d].add_data_point(h, err)

    # assert that order is correct or error is "low enough"
    for d, eoc_rec in eoc_recorders.items():
        print("\nvector *x* -> *x[%s]*\n" % d, eoc_rec)
        assert (eoc_rec.order_estimate() >= fspace_degree
                or eoc_rec.max_error() < 2e-14)
Esempio n. 18
0
def main():
    # If can't import firedrake, do nothing
    #
    # filename MUST include "firedrake" (i.e. match *firedrake*.py) in order
    # to be run during CI
    try:
        import firedrake  # noqa : F401
    except ImportError:
        return 0

    # For this example, imagine we wish to solve the Laplace equation
    # on a meshmode mesh with some given Dirichlet boundary conditions,
    # and decide to use firedrake.
    #
    # To verify this is working, we use a solution to the wave equation
    # to get our boundary conditions

    # {{{ First we make a discretization in meshmode and get our bcs

    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    nel_1d = 16
    from meshmode.mesh.generation import generate_regular_rect_mesh
    mesh = generate_regular_rect_mesh(
            a=(-0.5, -0.5),
            b=(0.5, 0.5),
            nelements_per_axis=(nel_1d, nel_1d))

    order = 3

    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
        InterpolatoryQuadratureSimplexGroupFactory
    group_factory = InterpolatoryQuadratureSimplexGroupFactory(order=order)
    discr = Discretization(actx, mesh, group_factory)

    # Get our solution: we will use
    # Real(e^z) = Real(e^{x+iy})
    #           = e^x Real(e^{iy})
    #           = e^x cos(y)
    nodes = discr.nodes()
    for i in range(len(nodes)):
        nodes[i] = thaw(nodes[i], actx)
    # First index is dimension
    candidate_sol = actx.np.exp(nodes[0]) * actx.np.cos(nodes[1])

    # }}}

    # {{{ Now send candidate_sol into firedrake and use it for boundary conditions

    from meshmode.interop.firedrake import build_connection_to_firedrake
    fd_connection = build_connection_to_firedrake(discr, group_nr=0)
    # convert candidate_sol to firedrake
    fd_candidate_sol = fd_connection.from_meshmode(candidate_sol)
    # get the firedrake function space
    fd_fspace = fd_connection.firedrake_fspace()

    # set up dirichlet laplace problem in fd and solve
    from firedrake import (
        FunctionSpace, TrialFunction, TestFunction, Function, inner, grad, dx,
        Constant, project, DirichletBC, solve)

    # because it's easier to write down the variational problem,
    # we're going to project from our "DG" space
    # into a continuous one.
    cfd_fspace = FunctionSpace(fd_fspace.mesh(), "CG", order)
    u = TrialFunction(cfd_fspace)
    v = TestFunction(cfd_fspace)
    sol = Function(cfd_fspace)

    a = inner(grad(u), grad(v)) * dx
    rhs = Constant(0.0) * v * dx
    bc_value = project(fd_candidate_sol, cfd_fspace)
    bc = DirichletBC(cfd_fspace, bc_value, "on_boundary")
    params = {"ksp_monitor": None}
    solve(a == rhs, sol, bcs=[bc], solver_parameters=params)

    # project back into our "DG" space
    sol = project(sol, fd_fspace)

    # }}}

    # {{{ Take the solution from firedrake and compare it to candidate_sol

    true_sol = fd_connection.from_firedrake(sol, actx=actx)
    # pull back into numpy
    true_sol = actx.to_numpy(true_sol[0])
    candidate_sol = actx.to_numpy(candidate_sol[0])
    print("l^2 difference between candidate solution and firedrake solution=",
          np.linalg.norm(true_sol - candidate_sol))
Esempio n. 19
0
            discr = reconstruct_discr_from_nodes(actx, discr0, x)
            vis = make_visualizer(actx, discr, vis_order=target_order)
            # vis = vis.copy_with_same_connectivity(actx, discr)

            filename = f"moving-geometry-{n:09d}.vtu"
            plot_solution(actx, vis, filename, discr, t, x)

        logger.info("[%05d/%05d] t = %.5e/%.5e dt = %.5e",
                n, maxiter, t, tmax, dt)

    # }}}


if __name__ == "__main__":
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    from pytools import ProcessTimer
    for _ in range(1):
        with ProcessTimer() as p:
            run(actx,
                    ambient_dim=3,
                    group_factory_name="warp_and_blend",
                    tmax=1.0,
                    timestep=1.0e-2,
                    visualize=False)

        logger.info("elapsed: %.3fs wall %.2fx cpu",
                p.wall_elapsed, p.process_elapsed / p.wall_elapsed)
Esempio n. 20
0
def test_from_fd_idempotency(ctx_factory, fdrake_mesh, fspace_degree,
                             fspace_type, only_convert_bdy):
    """
    Make sure fd->mm->fd and (fd->)->mm->fd->mm are identity
    """
    # Make a function space and a function with unique values at each node
    if fspace_type == "scalar":
        fdrake_fspace = FunctionSpace(fdrake_mesh, "DG", fspace_degree)
        # Just use the node nr
        fdrake_unique = Function(fdrake_fspace)
        fdrake_unique.dat.data[:] = np.arange(fdrake_unique.dat.data.shape[0])
    elif fspace_type == "vector":
        fdrake_fspace = VectorFunctionSpace(fdrake_mesh, "DG", fspace_degree)
        # use the coordinates
        xx = SpatialCoordinate(fdrake_fspace.mesh())
        fdrake_unique = Function(fdrake_fspace).interpolate(xx)
    elif fspace_type == "tensor":
        fdrake_fspace = TensorFunctionSpace(fdrake_mesh, "DG", fspace_degree)
        # use the coordinates, duplicated into the right tensor shape
        xx = SpatialCoordinate(fdrake_fspace.mesh())
        dim = fdrake_fspace.mesh().geometric_dimension()
        unique_expr = as_tensor([xx for _ in range(dim)])
        fdrake_unique = Function(fdrake_fspace).interpolate(unique_expr)

    # Make connection
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    # If only converting boundary, first go ahead and do one round of
    # fd->mm->fd. This will zero out any degrees of freedom absent in
    # the meshmode mesh (because they are not associated to cells
    #                    with >= 1 node on the boundary)
    #
    # Otherwise, just continue as normal
    if only_convert_bdy:
        fdrake_connection = \
            build_connection_from_firedrake(actx,
                                            fdrake_fspace,
                                            restrict_to_boundary="on_boundary")
        temp = fdrake_connection.from_firedrake(fdrake_unique, actx=actx)
        fdrake_unique = fdrake_connection.from_meshmode(temp)
    else:
        fdrake_connection = build_connection_from_firedrake(
            actx, fdrake_fspace)

    # Test for idempotency fd->mm->fd
    mm_field = fdrake_connection.from_firedrake(fdrake_unique, actx=actx)
    fdrake_unique_copy = Function(fdrake_fspace)
    fdrake_connection.from_meshmode(mm_field, out=fdrake_unique_copy)

    np.testing.assert_allclose(fdrake_unique_copy.dat.data,
                               fdrake_unique.dat.data,
                               atol=CLOSE_ATOL)

    # Test for idempotency (fd->)mm->fd->mm
    mm_field_copy = fdrake_connection.from_firedrake(fdrake_unique_copy,
                                                     actx=actx)
    if fspace_type == "scalar":
        np.testing.assert_allclose(actx.to_numpy(mm_field_copy[0]),
                                   actx.to_numpy(mm_field[0]),
                                   atol=CLOSE_ATOL)
    else:
        for dof_arr_cp, dof_arr in zip(mm_field_copy.flatten(),
                                       mm_field.flatten()):
            np.testing.assert_allclose(actx.to_numpy(dof_arr_cp[0]),
                                       actx.to_numpy(dof_arr[0]),
                                       atol=CLOSE_ATOL)
Esempio n. 21
0
def test_from_boundary_consistency(ctx_factory, fdrake_mesh, fspace_degree):
    """
    Make basic checks that FiredrakeConnection restricted to cells
    near the boundary is not doing
    something obviously wrong,
    i.e. that the firedrake boundary tags partition the converted meshmode mesh,
    that the firedrake boundary tags correspond to the same physical
    regions in the converted meshmode mesh as in the original firedrake mesh,
    and that each boundary tag is associated to the same number of facets
    in the converted meshmode mesh as in the original firedrake mesh.
    """
    fdrake_fspace = FunctionSpace(fdrake_mesh, "DG", fspace_degree)

    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    frombdy_conn = \
        build_connection_from_firedrake(actx,
                                        fdrake_fspace,
                                        restrict_to_boundary="on_boundary")

    # Ensure the meshmode mesh has one group and make sure both
    # meshes agree on some basic properties
    discr = frombdy_conn.discr
    assert len(discr.mesh.groups) == 1
    fdrake_mesh_fspace = fdrake_mesh.coordinates.function_space()
    fdrake_mesh_order = fdrake_mesh_fspace.finat_element.degree
    assert discr.mesh.groups[0].dim == fdrake_mesh.topological_dimension()
    assert discr.mesh.groups[0].order == fdrake_mesh_order

    # Get the unit vertex indices (in each cell)
    fdrake_mesh = fdrake_fspace.mesh()
    cfspace = fdrake_mesh.coordinates.function_space()
    entity_dofs = cfspace.finat_element.entity_dofs()[0]
    fdrake_unit_vert_indices = []
    for _, local_node_nrs in sorted(entity_dofs.items()):
        assert len(local_node_nrs) == 1
        fdrake_unit_vert_indices.append(local_node_nrs[0])
    fdrake_unit_vert_indices = np.array(fdrake_unit_vert_indices)

    # only look at cells "near" bdy (with >= 1 vertex on)
    from meshmode.interop.firedrake.connection import _get_cells_to_use
    cells_near_bdy = _get_cells_to_use(fdrake_mesh, "on_boundary")
    # get the firedrake vertices of cells near the boundary,
    # in no particular order
    fdrake_vert_indices = \
        cfspace.cell_node_list[cells_near_bdy,
                               fdrake_unit_vert_indices[:, np.newaxis]]
    fdrake_vert_indices = np.unique(fdrake_vert_indices)
    fdrake_verts = fdrake_mesh.coordinates.dat.data[fdrake_vert_indices, ...]
    if fdrake_mesh.geometric_dimension() == 1:
        fdrake_verts = fdrake_verts[:, np.newaxis]
    # Get meshmode vertices (shaped like (dim, nverts))
    meshmode_verts = discr.mesh.vertices

    # Ensure that the vertices of firedrake elements on
    # the boundary are identical to the resultant meshes' vertices up to
    # reordering
    # Nb: I got help on this from stack overflow:
    # https://stackoverflow.com/questions/38277143/sort-2d-numpy-array-lexicographically  # noqa: E501
    lex_sorted_mm_verts = meshmode_verts[:, np.lexsort(meshmode_verts)]
    lex_sorted_fdrake_verts = fdrake_verts[np.lexsort(fdrake_verts.T)]
    np.testing.assert_allclose(lex_sorted_mm_verts,
                               lex_sorted_fdrake_verts.T,
                               atol=CLOSE_ATOL)

    # Ensure the discretization and the firedrake function space reference element
    # agree on some basic properties
    finat_elt = fdrake_fspace.finat_element
    assert len(discr.groups) == 1
    assert discr.groups[0].order == finat_elt.degree
    assert discr.groups[0].nunit_dofs == finat_elt.space_dimension()
Esempio n. 22
0
def build_tree_with_qbx_metadata(actx: PyOpenCLArrayContext,
        places, tree_builder, particle_list_filter,
        sources_list=(), targets_list=(),
        use_stage2_discr=False):
    """Return a :class:`TreeWithQBXMetadata` built from the given layer
    potential source. This contains particles of four different types:

       * source particles either from
         :class:`~pytential.symbolic.primitives.QBX_SOURCE_STAGE1` or
         :class:`~pytential.symbolic.primitives.QBX_SOURCE_QUAD_STAGE2`.
       * centers from
         :class:`~pytential.symbolic.primitives.QBX_SOURCE_STAGE1`.
       * targets from ``targets_list``.

    :arg places: An instance of
        :class:`~pytential.symbolic.execution.GeometryCollection`.
    :arg targets_list: A list of :class:`pytential.target.TargetBase`

    :arg use_stage2_discr: If *True*, builds a tree with stage 2 sources.
        If *False*, the tree is built with stage 1 sources.
    """

    # The ordering of particles is as follows:
    # - sources go first
    # - then centers
    # - then targets

    from pytential import bind, sym
    stage1_density_discrs = []
    density_discrs = []
    for source_name in sources_list:
        dd = sym.as_dofdesc(source_name)

        discr = places.get_discretization(dd.geometry)
        stage1_density_discrs.append(discr)

        if use_stage2_discr:
            discr = places.get_discretization(
                    dd.geometry, sym.QBX_SOURCE_QUAD_STAGE2)
        density_discrs.append(discr)

    # TODO: update code to work for multiple source discretizations
    if len(sources_list) != 1:
        raise RuntimeError("can only build a tree for a single source")

    def _make_centers(discr):
        return bind(discr, sym.interleaved_expansion_centers(
            discr.ambient_dim))(actx)

    stage1_density_discr = stage1_density_discrs[0]
    density_discr = density_discrs[0]

    from arraycontext import flatten
    sources = flatten(density_discr.nodes(), actx, leaf_class=DOFArray)
    centers = flatten(_make_centers(stage1_density_discr), actx, leaf_class=DOFArray)
    targets = [
            flatten(tgt.nodes(), actx, leaf_class=DOFArray)
            for tgt in targets_list]

    queue = actx.queue
    particles = tuple(
            actx.np.concatenate(dim_coords)
            for dim_coords in zip(sources, centers, *targets))

    # Counts
    nparticles = len(particles[0])
    nelements = density_discr.mesh.nelements
    nsources = len(sources[0])
    ncenters = len(centers[0])
    # Each source gets an interior / exterior center.
    assert 2 * nsources == ncenters or use_stage2_discr
    ntargets = sum(tgt.ndofs for tgt in targets_list)

    # Slices
    qbx_user_source_slice = slice(0, nsources)

    center_slice_start = nsources
    qbx_user_center_slice = slice(center_slice_start, center_slice_start + ncenters)

    element_slice_start = center_slice_start + ncenters
    target_slice_start = element_slice_start
    qbx_user_target_slice = slice(target_slice_start, target_slice_start + ntargets)

    # Build tree with sources and centers. Split boxes
    # only because of sources.
    refine_weights = actx.zeros(nparticles, np.int32)
    refine_weights[:nsources].fill(1)

    refine_weights.finish()

    tree, evt = tree_builder(queue, particles,
            max_leaf_refine_weight=MAX_REFINE_WEIGHT,
            refine_weights=refine_weights)

    # Compute box => particle class relations
    flags = refine_weights
    del refine_weights
    particle_classes = {}

    for class_name, particle_slice, fixup in (
            ("box_to_qbx_source", qbx_user_source_slice, 0),
            ("box_to_qbx_target", qbx_user_target_slice, -target_slice_start),
            ("box_to_qbx_center", qbx_user_center_slice, -center_slice_start)):
        flags.fill(0)
        flags[particle_slice].fill(1)
        flags.finish()

        box_to_class = (
            particle_list_filter
            .filter_target_lists_in_user_order(queue, tree, flags)
            ).with_queue(actx.queue)

        if fixup:
            box_to_class.target_lists += fixup
        particle_classes[class_name + "_starts"] = box_to_class.target_starts
        particle_classes[class_name + "_lists"] = box_to_class.target_lists

    del flags
    del box_to_class

    # Compute element => source relation
    qbx_element_to_source_starts = actx.empty(nelements + 1, tree.particle_id_dtype)
    el_offset = 0
    node_nr_base = 0
    for group in density_discr.groups:
        group_element_starts = np.arange(
                node_nr_base, node_nr_base + group.ndofs, group.nunit_dofs,
                dtype=tree.particle_id_dtype)
        qbx_element_to_source_starts[el_offset:el_offset + group.nelements] = \
                actx.from_numpy(group_element_starts)

        node_nr_base += group.ndofs
        el_offset += group.nelements
    qbx_element_to_source_starts[-1] = nsources

    # Compute element => center relation
    qbx_element_to_center_starts = (
            2 * qbx_element_to_source_starts
            if not use_stage2_discr
            else None)

    # Transfer all tree attributes.
    tree_attrs = {}
    for attr_name in tree.__class__.fields:
        try:
            tree_attrs[attr_name] = getattr(tree, attr_name)
        except AttributeError:
            pass

    tree_attrs.update(particle_classes)

    return TreeWithQBXMetadata(
        qbx_element_to_source_starts=qbx_element_to_source_starts,
        qbx_element_to_center_starts=qbx_element_to_center_starts,
        qbx_user_source_slice=qbx_user_source_slice,
        qbx_user_center_slice=qbx_user_center_slice,
        qbx_user_target_slice=qbx_user_target_slice,
        nqbxelements=nelements,
        nqbxsources=nsources,
        nqbxcenters=ncenters,
        nqbxtargets=ntargets,
        **tree_attrs).with_queue(None)
Esempio n. 23
0
def _test_mpi_boundary_swap(dim, order, num_groups):
    from meshmode.distributed import MPIMeshDistributor, MPIBoundaryCommSetupHelper

    from mpi4py import MPI
    mpi_comm = MPI.COMM_WORLD
    i_local_part = mpi_comm.Get_rank()
    num_parts = mpi_comm.Get_size()

    mesh_dist = MPIMeshDistributor(mpi_comm)

    if mesh_dist.is_mananger_rank():
        np.random.seed(42)
        from meshmode.mesh.generation import generate_warped_rect_mesh
        meshes = [generate_warped_rect_mesh(dim, order=order, nelements_side=4)
                        for _ in range(num_groups)]

        if num_groups > 1:
            from meshmode.mesh.processing import merge_disjoint_meshes
            mesh = merge_disjoint_meshes(meshes)
        else:
            mesh = meshes[0]

        part_per_element = np.random.randint(num_parts, size=mesh.nelements)

        local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts)
    else:
        local_mesh = mesh_dist.receive_mesh_part()

    group_factory = PolynomialWarpAndBlendGroupFactory(order)

    from arraycontext import PyOpenCLArrayContext
    cl_ctx = cl.create_some_context()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    from meshmode.discretization import Discretization
    vol_discr = Discretization(actx, local_mesh, group_factory)

    from meshmode.distributed import get_connected_partitions
    connected_parts = get_connected_partitions(local_mesh)

    # Check that the connectivity makes sense before doing any communication
    _test_connected_parts(mpi_comm, connected_parts)

    from meshmode.discretization.connection import make_face_restriction
    from meshmode.mesh import BTAG_PARTITION
    local_bdry_conns = {}
    for i_remote_part in connected_parts:
        local_bdry_conns[i_remote_part] = make_face_restriction(
                actx, vol_discr, group_factory, BTAG_PARTITION(i_remote_part))

    remote_to_local_bdry_conns = {}
    with MPIBoundaryCommSetupHelper(mpi_comm, actx, local_bdry_conns,
            bdry_grp_factory=group_factory) as bdry_setup_helper:
        from meshmode.discretization.connection import check_connection
        while True:
            conns = bdry_setup_helper.complete_some()
            if not conns:
                break
            for i_remote_part, conn in conns.items():
                check_connection(actx, conn)
                remote_to_local_bdry_conns[i_remote_part] = conn

    _test_data_transfer(mpi_comm,
                        actx,
                        local_bdry_conns,
                        remote_to_local_bdry_conns,
                        connected_parts)

    logger.debug("Rank %d exiting", i_local_part)
Esempio n. 24
0
    def exec_compute_potential_insn_fmm(self, actx: PyOpenCLArrayContext,
            insn, bound_expr, evaluate, fmm_driver):
        """
        :arg fmm_driver: A function that accepts four arguments:
            *wrangler*, *strength*, *geo_data*, *kernel*, *kernel_arguments*
        :returns: a tuple ``(assignments, extra_outputs)``, where *assignments*
            is a list of tuples containing pairs ``(name, value)`` representing
            assignments to be performed in the evaluation context.
            *extra_outputs* is data that *fmm_driver* may return
            (such as timing data), passed through unmodified.
        """
        target_name_and_side_to_number, target_discrs_and_qbx_sides = (
                self.get_target_discrs_and_qbx_sides(insn, bound_expr))

        geo_data = self.qbx_fmm_geometry_data(
                bound_expr.places,
                insn.source.geometry,
                target_discrs_and_qbx_sides)

        # FIXME Exert more positive control over geo_data attribute lifetimes using
        # geo_data.<method>.clear_cache(geo_data).

        # FIXME Synthesize "bad centers" around corners and edges that have
        # inadequate QBX coverage.

        # FIXME don't compute *all* output kernels on all targets--respect that
        # some target discretizations may only be asking for derivatives (e.g.)

        flat_strengths = get_flat_strengths_from_densities(
                actx, bound_expr.places, evaluate, insn.densities,
                dofdesc=insn.source)

        base_kernel = single_valued(knl.get_base_kernel() for
            knl in insn.source_kernels)

        output_and_expansion_dtype = (
                self.get_fmm_output_and_expansion_dtype(insn.source_kernels,
                    flat_strengths[0]))
        kernel_extra_kwargs, source_extra_kwargs = (
                self.get_fmm_expansion_wrangler_extra_kwargs(
                    actx, insn.target_kernels + insn.source_kernels,
                    geo_data.tree().user_source_ids,
                    insn.kernel_arguments, evaluate))

        tree_indep = self._tree_indep_data_for_wrangler(
                target_kernels=insn.target_kernels,
                source_kernels=insn.source_kernels)

        wrangler = tree_indep.wrangler_cls(
                        tree_indep, geo_data, output_and_expansion_dtype,
                        self.qbx_order,
                        self.fmm_level_to_order,
                        source_extra_kwargs=source_extra_kwargs,
                        kernel_extra_kwargs=kernel_extra_kwargs)

        from pytential.qbx.geometry import target_state
        if actx.to_numpy(actx.np.any(
                actx.thaw(geo_data.user_target_to_center())
                == target_state.FAILED)):
            raise RuntimeError("geometry has failed targets")

        # {{{ geometry data inspection hook

        if self.geometry_data_inspector is not None:
            perform_fmm = self.geometry_data_inspector(insn, bound_expr, geo_data)
            if not perform_fmm:
                return [(o.name, 0) for o in insn.outputs]

        # }}}

        # Execute global QBX.
        all_potentials_on_every_target, extra_outputs = (
                fmm_driver(
                    wrangler, flat_strengths, geo_data,
                    base_kernel, kernel_extra_kwargs))

        results = []

        for o in insn.outputs:
            target_side_number = target_name_and_side_to_number[
                    o.target_name, o.qbx_forced_limit]
            target_discr, _ = target_discrs_and_qbx_sides[target_side_number]
            target_slice = slice(*geo_data.target_info().target_discr_starts[
                    target_side_number:target_side_number+2])

            result = \
                all_potentials_on_every_target[o.target_kernel_index][target_slice]

            from meshmode.discretization import Discretization
            if isinstance(target_discr, Discretization):
                template_ary = thaw(target_discr.nodes()[0], actx)
                result = unflatten(template_ary, result, actx, strict=False)

            results.append((o.name, result))

        return results, extra_outputs
Esempio n. 25
0
def test_from_fd_transfer(ctx_factory, fspace_degree, fdrake_mesh_name,
                          fdrake_mesh_pars, dim, only_convert_bdy):
    """
    Make sure creating a function which projects onto
    one dimension then transports it is the same
    (up to resampling error) as projecting to one
    dimension on the transported mesh
    """
    # build estimate-of-convergence recorder
    from pytools.convergence import EOCRecorder
    # (fd -> mm ? True : False, dimension projecting onto)
    eoc_recorders = {(True, d): EOCRecorder() for d in range(dim)}
    if not only_convert_bdy:
        for d in range(dim):
            eoc_recorders[(False, d)] = EOCRecorder()

    # make a computing context
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    def get_fdrake_mesh_and_h_from_par(mesh_par):
        if fdrake_mesh_name == "UnitInterval":
            assert dim == 1
            n = mesh_par
            fdrake_mesh = UnitIntervalMesh(n)
            h = 1 / n
        elif fdrake_mesh_name == "UnitSquare":
            assert dim == 2
            n = mesh_par
            fdrake_mesh = UnitSquareMesh(n, n)
            h = 1 / n
        elif fdrake_mesh_name == "UnitCube":
            assert dim == 3
            n = mesh_par
            fdrake_mesh = UnitCubeMesh(n, n, n)
            h = 1 / n
        elif fdrake_mesh_name in ("blob2d-order1", "blob2d-order4"):
            assert dim == 2
            if fdrake_mesh_name == "blob2d-order1":
                from firedrake import Mesh
                fdrake_mesh = Mesh(f"{fdrake_mesh_name}-h{mesh_par}.msh",
                                   dim=dim)
            else:
                from meshmode.mesh.io import read_gmsh
                from meshmode.interop.firedrake import export_mesh_to_firedrake
                mm_mesh = read_gmsh(f"{fdrake_mesh_name}-h{mesh_par}.msh",
                                    force_ambient_dim=dim)
                fdrake_mesh, _, _ = export_mesh_to_firedrake(mm_mesh)
            h = float(mesh_par)
        elif fdrake_mesh_name == "warp":
            from meshmode.mesh.generation import generate_warped_rect_mesh
            from meshmode.interop.firedrake import export_mesh_to_firedrake
            mm_mesh = generate_warped_rect_mesh(dim,
                                                order=4,
                                                nelements_side=mesh_par)
            fdrake_mesh, _, _ = export_mesh_to_firedrake(mm_mesh)
            h = 1 / mesh_par
        else:
            raise ValueError("fdrake_mesh_name not recognized")

        return (fdrake_mesh, h)

    # Record error for each refinement of each mesh
    for mesh_par in fdrake_mesh_pars:
        fdrake_mesh, h = get_fdrake_mesh_and_h_from_par(mesh_par)
        # make function space and build connection
        fdrake_fspace = FunctionSpace(fdrake_mesh, "DG", fspace_degree)
        if only_convert_bdy:
            fdrake_connection = \
                build_connection_from_firedrake(actx,
                                                fdrake_fspace,
                                                restrict_to_boundary="on_boundary")
        else:
            fdrake_connection = build_connection_from_firedrake(
                actx, fdrake_fspace)
        # get this for making functions in firedrake
        spatial_coord = SpatialCoordinate(fdrake_mesh)

        # get nodes in handier format for making meshmode functions
        discr = fdrake_connection.discr
        # nodes is np array (ambient_dim,) of DOFArray (ngroups,)
        # of arrays (nelements, nunit_dofs), we want a single np array
        # of shape (ambient_dim, nelements, nunit_dofs)
        nodes = discr.nodes()
        group_nodes = np.array(
            [actx.to_numpy(dof_arr[0]) for dof_arr in nodes])

        # Now, for each coordinate d, test transferring the function
        # x -> sin(dth component of x)
        for d in range(dim):
            fdrake_f = Function(fdrake_fspace).interpolate(
                sin(spatial_coord[d]))
            # transport fdrake function and put in numpy
            fd2mm_f = fdrake_connection.from_firedrake(fdrake_f, actx=actx)
            fd2mm_f = actx.to_numpy(fd2mm_f[0])
            meshmode_f = np.sin(group_nodes[d, :, :])

            # record fd -> mm error
            err = np.max(np.abs(fd2mm_f - meshmode_f))
            eoc_recorders[(True, d)].add_data_point(h, err)

            if not only_convert_bdy:
                # now transport mm -> fd
                meshmode_f_dofarr = discr.zeros(actx)
                meshmode_f_dofarr[0][:] = meshmode_f
                mm2fd_f = fdrake_connection.from_meshmode(meshmode_f_dofarr)
                # record mm -> fd error
                err = np.max(np.abs(fdrake_f.dat.data - mm2fd_f.dat.data))
                eoc_recorders[(False, d)].add_data_point(h, err)

    # assert that order is correct or error is "low enough"
    for ((fd2mm, d), eoc_rec) in eoc_recorders.items():
        print(
            "\nfiredrake -> meshmode: %s\nvector *x* -> *sin(x[%s])*\n" %
            (fd2mm, d), eoc_rec)
        assert (eoc_rec.order_estimate() >= fspace_degree
                or eoc_rec.max_error() < 2e-14)