Beispiel #1
0
def test_dielectric(ctx_factory, qbx_order, op_class, mode, visualize=False):
    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)

    import logging
    logging.basicConfig(level=logging.INFO)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for nelements in [30, 50, 70]:
        # prevent sympy cache 'splosion
        from sympy.core.cache import clear_cache
        clear_cache()

        errs = run_dielectric_test(cl_ctx,
                                   queue,
                                   nelements=nelements,
                                   qbx_order=qbx_order,
                                   op_class=op_class,
                                   mode=mode,
                                   visualize=visualize)

        eoc_rec.add_data_point(1 / nelements, la.norm(list(errs), np.inf))

    print(eoc_rec)
    assert eoc_rec.order_estimate() > qbx_order - 0.5
Beispiel #2
0
def test_pde_check_kernels(ctx_factory, knl_info, order=5):
    dim = knl_info.kernel.dim
    tctx = t.ToyContext(ctx_factory(), knl_info.kernel,
            extra_source_kwargs=knl_info.extra_kwargs)

    pt_src = t.PointSources(
            tctx,
            np.random.rand(dim, 50) - 0.5,
            np.ones(50))

    from pytools.convergence import EOCRecorder
    from sumpy.point_calculus import CalculusPatch
    eoc_rec = EOCRecorder()

    for h in [0.1, 0.05, 0.025]:
        cp = CalculusPatch(np.array([1, 0, 0])[:dim], h=h, order=order)
        pot = pt_src.eval(cp.points)

        pde = knl_info.pde_func(cp, pot)

        err = la.norm(pde)
        eoc_rec.add_data_point(h, err)

    print(eoc_rec)
    assert eoc_rec.order_estimate() > order - knl_info.nderivs + 1 - 0.1
Beispiel #3
0
def test_surface_mass_operator_inverse(actx_factory, name):
    actx = actx_factory()

    # {{{ cases

    if name == "2-1-ellipse":
        from mesh_data import EllipseMeshBuilder
        builder = EllipseMeshBuilder(radius=3.1, aspect_ratio=2.0)
    elif name == "spheroid":
        from mesh_data import SpheroidMeshBuilder
        builder = SpheroidMeshBuilder()
    else:
        raise ValueError("unknown geometry name: %s" % name)

    # }}}

    # {{{ convergence

    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    for resolution in builder.resolutions:
        mesh = builder.get_mesh(resolution, builder.mesh_order)
        discr = DiscretizationCollection(actx, mesh, order=builder.order)
        volume_discr = discr.discr_from_dd(dof_desc.DD_VOLUME)

        logger.info("ndofs:     %d", volume_discr.ndofs)
        logger.info("nelements: %d", volume_discr.mesh.nelements)

        # {{{ compute inverse mass

        dd = dof_desc.DD_VOLUME
        sym_f = sym.cos(4.0 * sym.nodes(mesh.ambient_dim, dd)[0])
        sym_op = sym.InverseMassOperator(dd, dd)(sym.MassOperator(dd, dd)(
            sym.var("f")))

        f = bind(discr, sym_f)(actx)
        f_inv = bind(discr, sym_op)(actx, f=f)

        inv_error = bind(
            discr,
            sym.norm(2,
                     sym.var("x") - sym.var("y")) / sym.norm(2, sym.var("y")))(
                         actx, x=f_inv, y=f)

        # }}}

        h_max = bind(
            discr,
            sym.h_max_from_volume(discr.ambient_dim, dim=discr.dim,
                                  dd=dd))(actx)
        eoc.add_data_point(h_max, inv_error)

    # }}}

    logger.info("inverse mass error\n%s", str(eoc))

    # NOTE: both cases give 1.0e-16-ish at the moment, but just to be on the
    # safe side, choose a slightly larger tolerance
    assert eoc.max_error() < 1.0e-14
Beispiel #4
0
def test_mean_curvature(ctx_factory,
                        discr_name,
                        resolutions,
                        discr_and_ref_mean_curvature_getter,
                        visualize=False):
    ctx = ctx_factory()
    queue = cl.CommandQueue(ctx)
    actx = PyOpenCLArrayContext(queue)

    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    for r in resolutions:
        discr, ref_mean_curvature = \
                discr_and_ref_mean_curvature_getter(actx, r)
        mean_curvature = bind(discr,
                              sym.mean_curvature(discr.ambient_dim))(actx)

        h = 1.0 / r
        from meshmode.dof_array import flat_norm
        h_error = flat_norm(mean_curvature - ref_mean_curvature, np.inf)
        eoc.add_data_point(h, h_error)
    print(eoc)

    order = min([g.order for g in discr.groups])
    assert eoc.order_estimate() > order - 1.1
Beispiel #5
0
    def test_leapgen_integration_order(method, method_order):
        """Test that time integrators have correct order."""
        def exact_soln(t):
            return np.exp(-t)

        def rhs(t, y):
            return -np.exp(-t)

        from pytools.convergence import EOCRecorder
        integrator_eoc = EOCRecorder()

        dt = 1.0
        for refine in [1, 2, 4, 8]:
            dt = dt / refine
            t = 0
            state = exact_soln(t)

            t_final = 4
            step = 0

            (step, t, state) = \
                advance_state(rhs=rhs, timestepper=method, dt=dt,
                              state=state, t=t, t_final=t_final,
                              component_id="y")

            error = np.abs(state - exact_soln(t)) / exact_soln(t)
            integrator_eoc.add_data_point(dt, error)

        logger.info(f"Time Integrator EOC:\n = {integrator_eoc}")
        assert integrator_eoc.order_estimate() >= method_order - .1
Beispiel #6
0
def test_reversed_chained_connection(actx_factory, ndim, mesh_name):
    actx = actx_factory()

    def run(nelements, order):
        discr = create_discretization(actx,
                                      ndim,
                                      nelements=nelements,
                                      order=order,
                                      mesh_name=mesh_name)

        threshold = 1.0
        connections = []
        conn = create_refined_connection(actx, discr, threshold=threshold)
        connections.append(conn)
        if ndim == 2:
            # NOTE: additional refinement makes the 3D meshes explode in size
            conn = create_refined_connection(actx,
                                             conn.to_discr,
                                             threshold=threshold)
            connections.append(conn)
            conn = create_refined_connection(actx,
                                             conn.to_discr,
                                             threshold=threshold)
            connections.append(conn)

        from meshmode.discretization.connection import \
                ChainedDiscretizationConnection
        chained = ChainedDiscretizationConnection(connections)
        from meshmode.discretization.connection import \
                L2ProjectionInverseDiscretizationConnection
        reverse = L2ProjectionInverseDiscretizationConnection(chained)

        # create test vector
        from_nodes = thaw(chained.from_discr.nodes(), actx)
        to_nodes = thaw(chained.to_discr.nodes(), actx)

        from_x = 0
        to_x = 0
        for d in range(ndim):
            from_x = from_x + actx.np.cos(from_nodes[d])**(d + 1)
            to_x = to_x + actx.np.cos(to_nodes[d])**(d + 1)

        from_interp = reverse(to_x)

        return (1.0 / nelements, flat_norm(from_interp - from_x, np.inf) /
                flat_norm(from_x, np.inf))

    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    order = 4
    mesh_sizes = [16, 32, 48, 64, 96, 128]

    for n in mesh_sizes:
        h, error = run(n, order)
        eoc.add_data_point(h, error)

    print(eoc)

    assert eoc.order_estimate() > (order + 1 - 0.5)
Beispiel #7
0
def test_mass_operator_inverse(actx_factory, name):
    actx = actx_factory()

    # {{{ cases

    import mesh_data
    if name == "2-1-ellipse":
        # curve
        builder = mesh_data.EllipseMeshBuilder(radius=3.1, aspect_ratio=2.0)
    elif name == "spheroid":
        # surface
        builder = mesh_data.SpheroidMeshBuilder()
    elif name.startswith("warped_rect"):
        builder = mesh_data.WarpedRectMeshBuilder(dim=int(name[-1]))

    else:
        raise ValueError("unknown geometry name: %s" % name)

    # }}}

    # {{{ inv(m) @ m == id

    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    for resolution in builder.resolutions:
        mesh = builder.get_mesh(resolution, builder.mesh_order)
        dcoll = DiscretizationCollection(actx, mesh, order=builder.order)
        volume_discr = dcoll.discr_from_dd(dof_desc.DD_VOLUME)

        logger.info("ndofs:     %d", volume_discr.ndofs)
        logger.info("nelements: %d", volume_discr.mesh.nelements)

        # {{{ compute inverse mass

        def f(x):
            return actx.np.cos(4.0 * x[0])

        dd = dof_desc.DD_VOLUME
        x_volm = thaw(volume_discr.nodes(), actx)
        f_volm = f(x_volm)
        f_inv = op.inverse_mass(dcoll, op.mass(dcoll, dd, f_volm))

        inv_error = actx.to_numpy(
            op.norm(dcoll, f_volm - f_inv, 2) / op.norm(dcoll, f_volm, 2))

        # }}}

        # compute max element size
        from grudge.dt_utils import h_max_from_volume

        h_max = h_max_from_volume(dcoll)

        eoc.add_data_point(h_max, inv_error)

    logger.info("inverse mass error\n%s", str(eoc))

    # NOTE: both cases give 1.0e-16-ish at the moment, but just to be on the
    # safe side, choose a slightly larger tolerance
    assert eoc.max_error() < 1.0e-14
Beispiel #8
0
def test_integration_order(integrator, method_order):
    """Test that time integrators have correct order."""
    def exact_soln(t):
        return np.exp(-t)

    def rhs(t, state):
        return -np.exp(-t)

    from pytools.convergence import EOCRecorder
    integrator_eoc = EOCRecorder()

    dt = 1.0
    for refine in [1, 2, 4, 8]:
        dt = dt / refine
        t = 0
        state = exact_soln(t)

        while t < 4:
            state = integrator(state, t, dt, rhs)
            t = t + dt

        error = np.abs(state - exact_soln(t)) / exact_soln(t)
        integrator_eoc.add_data_point(dt, error)

    logger.info(f"Time Integrator EOC:\n = {integrator_eoc}")
    assert integrator_eoc.order_estimate() >= method_order - .01
Beispiel #9
0
def get_convergence_data(method_class, problem, dts=_default_dts):
    from pytools.convergence import EOCRecorder

    eocrec = EOCRecorder()

    component_id = "y"

    for dt in dts:
        t = problem.t_start
        y = problem.initial()
        final_t = problem.t_end

        interp = method_class(function_map={
            "<func>f": problem,
        })
        interp.set_up(t_start=t, dt_start=dt, context={component_id: y})

        times = []
        values = []
        for event in interp.run(t_end=final_t):
            if isinstance(event, interp.StateComputed):
                assert event.component_id == component_id
                values.append(event.state_component[0])
                times.append(event.t)

        assert abs(times[-1] - final_t) / final_t < 0.1
        times = np.array(times)

        error = abs(values[-1] - problem.exact(final_t)[0])
        eocrec.add_data_point(dt, error)

    return eocrec
Beispiel #10
0
def test_pde_check_kernels(ctx_factory, knl_info, order=5):
    dim = knl_info.kernel.dim
    tctx = t.ToyContext(ctx_factory(), knl_info.kernel,
            extra_source_kwargs=knl_info.extra_kwargs)

    pt_src = t.PointSources(
            tctx,
            np.random.rand(dim, 50) - 0.5,
            np.ones(50))

    from pytools.convergence import EOCRecorder
    from sumpy.point_calculus import CalculusPatch
    eoc_rec = EOCRecorder()

    for h in [0.1, 0.05, 0.025]:
        cp = CalculusPatch(np.array([1, 0, 0])[:dim], h=h, order=order)
        pot = pt_src.eval(cp.points)

        pde = knl_info.pde_func(cp, pot)

        err = la.norm(pde)
        eoc_rec.add_data_point(h, err)

    print(eoc_rec)
    assert eoc_rec.order_estimate() > order - knl_info.nderivs + 1 - 0.1
Beispiel #11
0
def test_exterior_stokes(ctx_factory, ambient_dim, visualize=False):
    if visualize:
        logging.basicConfig(level=logging.INFO)

    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    target_order = 3
    qbx_order = 3

    print(ambient_dim)
    if ambient_dim == 2:
        resolutions = [20, 35, 50]
    elif ambient_dim == 3:
        resolutions = [0, 1, 2]
    else:
        raise ValueError(f"unsupported dimension: {ambient_dim}")

    for resolution in resolutions:
        h_max, err = run_exterior_stokes(ctx_factory,
                                         ambient_dim=ambient_dim,
                                         target_order=target_order,
                                         qbx_order=qbx_order,
                                         resolution=resolution,
                                         visualize=visualize)

        eoc.add_data_point(h_max, err)

    print(eoc)

    # This convergence data is not as clean as it could be. See
    # https://github.com/inducer/pytential/pull/32
    # for some discussion.
    assert eoc.order_estimate() > target_order - 0.5
def test_dielectric(ctx_getter, qbx_order, op_class, mode, visualize=False):
    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    import logging
    logging.basicConfig(level=logging.INFO)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for nelements in [30, 50, 70]:
        # prevent sympy cache 'splosion
        from sympy.core.cache import clear_cache
        clear_cache()

        errs = run_dielectric_test(
                cl_ctx, queue,
                nelements=nelements, qbx_order=qbx_order,
                op_class=op_class, mode=mode,
                visualize=visualize)

        eoc_rec.add_data_point(1/nelements, la.norm(list(errs), np.inf))

    print(eoc_rec)
    assert eoc_rec.order_estimate() > qbx_order - 0.5
Beispiel #13
0
def test_direct(ctx_factory):
    # This evaluates a single layer potential on a circle.
    logging.basicConfig(level=logging.INFO)

    ctx = ctx_factory()
    queue = cl.CommandQueue(ctx)

    from sumpy.kernel import LaplaceKernel
    lknl = LaplaceKernel(2)

    order = 12

    from sumpy.qbx import LayerPotential
    from sumpy.expansion.local import LineTaylorLocalExpansion

    lpot = LayerPotential(ctx, [LineTaylorLocalExpansion(lknl, order)])

    mode_nr = 25

    from pytools.convergence import EOCRecorder

    eocrec = EOCRecorder()

    for n in [200, 300, 400]:
        t = np.linspace(0, 2 * np.pi, n, endpoint=False)
        unit_circle = np.exp(1j * t)
        unit_circle = np.array([unit_circle.real, unit_circle.imag])

        sigma = np.cos(mode_nr * t)
        eigval = 1 / (2 * mode_nr)

        result_ref = eigval * sigma

        h = 2 * np.pi / n

        targets = unit_circle
        sources = unit_circle

        radius = 7 * h
        centers = unit_circle * (1 - radius)

        expansion_radii = np.ones(n) * radius

        strengths = (sigma * h, )
        evt, (result_qbx, ) = lpot(queue,
                                   targets,
                                   sources,
                                   centers,
                                   strengths,
                                   expansion_radii=expansion_radii)

        eocrec.add_data_point(h, np.max(np.abs(result_ref - result_qbx)))

    print(eocrec)

    slack = 1.5
    assert eocrec.order_estimate() > order - slack
Beispiel #14
0
def test_exterior_stokes_2d(ctx_factory, qbx_order=3):
    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for nelements in [20, 50]:
        h_max, l2_err = run_exterior_stokes_2d(ctx_factory, nelements)
        eoc_rec.add_data_point(h_max, l2_err)

    print(eoc_rec)
    assert eoc_rec.order_estimate() >= qbx_order - 1
Beispiel #15
0
def test_direct(ctx_getter):
    # This evaluates a single layer potential on a circle.
    logging.basicConfig(level=logging.INFO)

    ctx = ctx_getter()
    queue = cl.CommandQueue(ctx)

    from sumpy.kernel import LaplaceKernel
    lknl = LaplaceKernel(2)

    order = 12

    from sumpy.qbx import LayerPotential
    from sumpy.expansion.local import LineTaylorLocalExpansion

    lpot = LayerPotential(ctx, [LineTaylorLocalExpansion(lknl, order)])

    mode_nr = 25

    from pytools.convergence import EOCRecorder

    eocrec = EOCRecorder()

    for n in [200, 300, 400]:
        t = np.linspace(0, 2 * np.pi, n, endpoint=False)
        unit_circle = np.exp(1j * t)
        unit_circle = np.array([unit_circle.real, unit_circle.imag])

        sigma = np.cos(mode_nr * t)
        eigval = 1/(2*mode_nr)

        result_ref = eigval * sigma

        h = 2 * np.pi / n

        targets = unit_circle
        sources = unit_circle

        radius = 7 * h
        centers = unit_circle * (1 - radius)

        expansion_radii = np.ones(n) * radius

        strengths = (sigma * h,)
        evt, (result_qbx,) = lpot(queue, targets, sources, centers, strengths,
                expansion_radii=expansion_radii)

        eocrec.add_data_point(h, np.max(np.abs(result_ref - result_qbx)))

    print(eocrec)

    slack = 1.5
    assert eocrec.order_estimate() > order - slack
Beispiel #16
0
def test_stresslet_identity(actx_factory, cls, visualize=False):
    if visualize:
        logging.basicConfig(level=logging.INFO)

    source_ovsmp = 4 if cls.func.ambient_dim == 2 else 8
    case = cls(fmm_backend=None,
            target_order=5, qbx_order=3, source_ovsmp=source_ovsmp)
    identity = StressletIdentity(case.ambient_dim)
    logger.info("\n%s", str(case))

    from pytools.convergence import EOCRecorder
    eocs = [EOCRecorder() for _ in range(case.ambient_dim)]

    for resolution in case.resolutions:
        h_max, errors = run_stokes_identity(
                actx_factory, case, identity,
                resolution=resolution,
                visualize=visualize)

        for eoc, e in zip(eocs, errors):
            eoc.add_data_point(h_max, e)

    for eoc in eocs:
        print(eoc.pretty_print(
            abscissa_format="%.8e",
            error_format="%.8e",
            eoc_format="%.2f"))

    for eoc in eocs:
        order = min(case.target_order, case.qbx_order)
        assert eoc.order_estimate() > order - 1.0
Beispiel #17
0
def test_pde_check(dim, order=4):
    from sumpy.point_calculus import CalculusPatch
    from pytools.convergence import EOCRecorder

    for iaxis in range(dim):
        eoc_rec = EOCRecorder()
        for h in [0.1, 0.01, 0.001]:
            cp = CalculusPatch(np.array([3, 0, 0])[:dim], h=h, order=order)
            df_num = cp.diff(iaxis, np.sin(10*cp.points[iaxis]))
            df_true = 10*np.cos(10*cp.points[iaxis])

            err = la.norm(df_num-df_true)
            eoc_rec.add_data_point(h, err)

        print(eoc_rec)
        assert eoc_rec.order_estimate() > order-2-0.1
Beispiel #18
0
def test_pde_check(dim, order=4):
    from sumpy.point_calculus import CalculusPatch
    from pytools.convergence import EOCRecorder

    for iaxis in range(dim):
        eoc_rec = EOCRecorder()
        for h in [0.1, 0.01, 0.001]:
            cp = CalculusPatch(np.array([3, 0, 0])[:dim], h=h, order=order)
            df_num = cp.diff(iaxis, np.sin(10 * cp.points[iaxis]))
            df_true = 10 * np.cos(10 * cp.points[iaxis])

            err = la.norm(df_num - df_true)
            eoc_rec.add_data_point(h, err)

        print(eoc_rec)
        assert eoc_rec.order_estimate() > order - 2 - 0.1
Beispiel #19
0
def test_boundary_interpolation(ctx_getter):
    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    from meshmode.mesh.io import generate_gmsh, FileSource
    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory
    from meshmode.discretization.connection import make_boundary_restriction

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    order = 4

    for h in [1e-1, 3e-2, 1e-2]:
        print("BEGIN GEN")
        mesh = generate_gmsh(
                FileSource("blob-2d.step"), 2, order=order,
                force_ambient_dim=2,
                other_options=[
                    "-string", "Mesh.CharacteristicLengthMax = %s;" % h]
                )
        print("END GEN")

        vol_discr = Discretization(cl_ctx, mesh,
                InterpolatoryQuadratureSimplexGroupFactory(order))
        print("h=%s -> %d elements" % (
                h, sum(mgrp.nelements for mgrp in mesh.groups)))

        x = vol_discr.nodes()[0].with_queue(queue)
        f = 0.1*cl.clmath.sin(30*x)

        bdry_mesh, bdry_discr, bdry_connection = make_boundary_restriction(
                queue, vol_discr, InterpolatoryQuadratureSimplexGroupFactory(order))

        bdry_x = bdry_discr.nodes()[0].with_queue(queue)
        bdry_f = 0.1*cl.clmath.sin(30*bdry_x)
        bdry_f_2 = bdry_connection(queue, f)

        err = la.norm((bdry_f-bdry_f_2).get(), np.inf)
        eoc_rec.add_data_point(h, err)

    print(eoc_rec)
    assert eoc_rec.order_estimate() >= order-0.5
Beispiel #20
0
def test_strang_splitting(plot_solution=False):
    from leap.rk import LSRK4Method

    method1 = LSRK4Method("y", rhs_func_name="<func>y1")
    method2 = LSRK4Method("y", rhs_func_name="<func>y2")

    code1 = method1.generate()
    code2 = method2.generate()

    from leap.transform import strang_splitting

    code = strang_splitting(code1, code2, "primary")
    print(code)

    from utils import python_method_impl_codegen

    def exact(t):
        return np.exp(3j * t)

    from pytools.convergence import EOCRecorder
    eocrec = EOCRecorder()

    for dt in 2**-np.array(range(4, 7), dtype=np.float64):  # noqa pylint:disable=invalid-unary-operand-type
        interp = python_method_impl_codegen(code,
                                            function_map={
                                                "<func>y1":
                                                lambda t, y: 1j * y,
                                                "<func>y2":
                                                lambda t, y: 2j * y,
                                            })
        interp.set_up(t_start=0, dt_start=dt, context={"y": 1})

        final_t = 4
        times = []
        values = []
        for event in interp.run(t_end=final_t):
            if isinstance(event, interp.StateComputed):
                values.append(event.state_component)
                times.append(event.t)

        assert abs(times[-1] - final_t) / final_t < 0.1

        times = np.array(times)

        # Check that the timestep is preserved.
        assert np.allclose(np.diff(times), dt)

        if plot_solution:
            import matplotlib.pyplot as pt
            pt.plot(times, values, label="comp")
            pt.plot(times, exact(times), label="true")
            pt.show()

        error = abs(values[-1] - exact(final_t))
        eocrec.add_data_point(dt, error)

    print(eocrec.pretty_print())

    orderest = eocrec.estimate_order_of_convergence()[0, 1]
    assert orderest > 2 * 0.9
Beispiel #21
0
def test_isentropic_vortex(actx_factory, order):
    """Advance the 2D isentropic vortex case in time with non-zero velocities
    using an RK4 timestepping scheme. Check the advanced field values against
    the exact/analytic expressions.

    This tests all parts of the Euler module working together, with results
    converging at the expected rates vs. the order.
    """
    actx = actx_factory()

    dim = 2

    from pytools.convergence import EOCRecorder

    eoc_rec = EOCRecorder()

    for nel_1d in [16, 32, 64]:
        from meshmode.mesh.generation import (
            generate_regular_rect_mesh, )

        mesh = generate_regular_rect_mesh(a=(-5.0, ) * dim,
                                          b=(5.0, ) * dim,
                                          nelements_per_axis=(nel_1d, ) * dim)

        exittol = 1.0
        t_final = 0.001
        cfl = 1.0
        vel = np.zeros(shape=(dim, ))
        orig = np.zeros(shape=(dim, ))
        vel[:dim] = 1.0
        dt = .0001
        initializer = Vortex2D(center=orig, velocity=vel)
        casename = "Vortex"
        boundaries = {BTAG_ALL: PrescribedBoundary(initializer)}
        eos = IdealSingleGas()
        t = 0
        flowparams = {
            "dim": dim,
            "dt": dt,
            "order": order,
            "time": t,
            "boundaries": boundaries,
            "initializer": initializer,
            "eos": eos,
            "casename": casename,
            "mesh": mesh,
            "tfinal": t_final,
            "exittol": exittol,
            "cfl": cfl,
            "constantcfl": False,
            "nstatus": 0
        }
        maxerr = _euler_flow_stepper(actx, flowparams)
        eoc_rec.add_data_point(1.0 / nel_1d, maxerr)

    logger.info(f"Error for (dim,order) = ({dim},{order}):\n" f"{eoc_rec}")

    assert (eoc_rec.order_estimate() >= order - 0.5
            or eoc_rec.max_error() < 1e-11)
Beispiel #22
0
def check_simple_convergence(method,
                             method_impl,
                             expected_order,
                             problem=DefaultProblem(),
                             dts=_default_dts,
                             show_dag=False,
                             plot_solution=False):
    component_id = method.component_id
    code = method.generate()
    print(code)

    if show_dag:
        from dagrt.language import show_dependency_graph
        show_dependency_graph(code)

    from pytools.convergence import EOCRecorder
    eocrec = EOCRecorder()

    for dt in dts:
        t = problem.t_start
        y = problem.initial()
        final_t = problem.t_end

        interp = method_impl(code,
                             function_map={
                                 "<func>" + component_id: problem,
                             })
        interp.set_up(t_start=t, dt_start=dt, context={component_id: y})

        times = []
        values = []
        for event in interp.run(t_end=final_t):
            if isinstance(event, interp.StateComputed):
                assert event.component_id == component_id
                values.append(event.state_component[0])
                times.append(event.t)

        assert abs(times[-1] - final_t) / final_t < 0.1

        times = np.array(times)

        if plot_solution:
            import matplotlib.pyplot as pt
            pt.plot(times, values, label="comp")
            pt.plot(times, problem.exact(times), label="true")
            pt.show()

        error = abs(values[-1] - problem.exact(final_t))
        eocrec.add_data_point(dt, error)

    print("------------------------------------------------------")
    print("%s: expected order %d" %
          (method.__class__.__name__, expected_order))
    print("------------------------------------------------------")
    print(eocrec.pretty_print())

    orderest = eocrec.estimate_order_of_convergence()[0, 1]
    assert orderest > expected_order * 0.9
Beispiel #23
0
def test_wave_accuracy(actx_factory, problem, order, visualize=False):
    """Checks accuracy of the wave operator for a given problem setup.
    """
    actx = actx_factory()

    p = problem

    sym_u, sym_v, sym_f, sym_rhs = sym_wave(p.dim, p.sym_phi)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for n in [8, 10, 12] if p.dim == 3 else [8, 12, 16]:
        mesh = p.mesh_factory(n)

        from grudge.eager import EagerDGDiscretization
        discr = EagerDGDiscretization(actx, mesh, order=order)

        nodes = thaw(actx, discr.nodes())

        def sym_eval(expr, t):
            return sym.EvaluationMapper({"c": p.c, "x": nodes, "t": t})(expr)

        t_check = 1.23456789

        u = sym_eval(sym_u, t_check)
        v = sym_eval(sym_v, t_check)

        fields = flat_obj_array(u, v)

        rhs = wave_operator(discr, c=p.c, w=fields)
        rhs[0] = rhs[0] + sym_eval(sym_f, t_check)

        expected_rhs = sym_eval(sym_rhs, t_check)

        rel_linf_err = actx.to_numpy(
            discr.norm(rhs - expected_rhs, np.inf) /
            discr.norm(expected_rhs, np.inf))
        eoc_rec.add_data_point(1. / n, rel_linf_err)

        if visualize:
            from grudge.shortcuts import make_visualizer
            vis = make_visualizer(discr, discr.order)
            vis.write_vtk_file(
                "wave_accuracy_{order}_{n}.vtu".format(order=order, n=n), [
                    ("u", fields[0]),
                    ("v", fields[1:]),
                    ("rhs_u_actual", rhs[0]),
                    ("rhs_v_actual", rhs[1:]),
                    ("rhs_u_expected", expected_rhs[0]),
                    ("rhs_v_expected", expected_rhs[1:]),
                ])

    print("Approximation error:")
    print(eoc_rec)
    assert (eoc_rec.order_estimate() >= order - 0.5
            or eoc_rec.max_error() < 1e-11)
Beispiel #24
0
def test_integral_equation(ctx_getter, case, visualize=False):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    if USE_SYMENGINE and case.fmm_backend is None:
        pytest.skip("https://gitlab.tiker.net/inducer/sumpy/issues/25")

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    from pytools.convergence import EOCRecorder
    print("qbx_order: %d, %s" % (case.qbx_order, case))

    eoc_rec_target = EOCRecorder()
    eoc_rec_td = EOCRecorder()

    have_error_data = False
    for resolution in case.resolutions:
        result = run_int_eq_test(cl_ctx, queue, case, resolution,
                visualize=visualize)

        if result.rel_err_2 is not None:
            have_error_data = True
            eoc_rec_target.add_data_point(result.h_max, result.rel_err_2)

        if result.rel_td_err_inf is not None:
            eoc_rec_td.add_data_point(result.h_max, result.rel_td_err_inf)

    if case.bc_type == "dirichlet":
        tgt_order = case.qbx_order
    elif case.bc_type == "neumann":
        tgt_order = case.qbx_order-1
    else:
        assert False

    if have_error_data:
        print("TARGET ERROR:")
        print(eoc_rec_target)
        assert eoc_rec_target.order_estimate() > tgt_order - 1.3

        if case.check_tangential_deriv:
            print("TANGENTIAL DERIVATIVE ERROR:")
            print(eoc_rec_td)
            assert eoc_rec_td.order_estimate() > tgt_order - 2.3
Beispiel #25
0
def test_convergence(python_method_impl, problem, method, expected_order):
    pytest.importorskip("scipy")

    code = method.generate()

    from leap.implicit import replace_AssignImplicit
    code = replace_AssignImplicit(code, {"solve": solver_hook})

    from pytools.convergence import EOCRecorder
    eocrec = EOCRecorder()

    for n in range(2, 7):
        dt = 2**(-n)

        y_0 = problem.initial()
        t_start = problem.t_start
        t_end = problem.t_end

        from functools import partial
        interp = python_method_impl(code,
                                    function_map={
                                        "<func>expl_y":
                                        problem.nonstiff,
                                        "<func>impl_y":
                                        problem.stiff,
                                        "<func>solver":
                                        partial(solver, problem.stiff),
                                    })

        interp.set_up(t_start=t_start, dt_start=dt, context={"y": y_0})

        times = []
        values = []

        for event in interp.run(t_end=t_end):
            if isinstance(event, interp.StateComputed):
                values.append(event.state_component)
                times.append(event.t)

        times = np.array(times)
        values = np.array(values)

        assert abs(times[-1] - t_end) < 1e-10

        times = np.array(times)

        error = np.linalg.norm(values[-1] - problem.exact(t_end))
        eocrec.add_data_point(dt, error)

    print("------------------------------------------------------")
    print("expected order %d" % expected_order)
    print("------------------------------------------------------")
    print(eocrec.pretty_print())

    orderest = eocrec.estimate_order_of_convergence()[0, 1]
    assert orderest > 0.9 * expected_order
Beispiel #26
0
def test_lump_rhs(actx_factory, dim, order):
    """Test the inviscid rhs using the non-trivial mass lump case.

    The case is tested against the analytic expressions of the RHS.
    Checks several different orders and refinement levels to check error behavior.
    """
    actx = actx_factory()

    tolerance = 1e-10
    maxxerr = 0.0

    from pytools.convergence import EOCRecorder

    eoc_rec = EOCRecorder()

    for nel_1d in [4, 8, 12]:
        from meshmode.mesh.generation import (
            generate_regular_rect_mesh, )

        mesh = generate_regular_rect_mesh(
            a=(-5, ) * dim,
            b=(5, ) * dim,
            nelements_per_axis=(nel_1d, ) * dim,
        )

        logger.info(f"Number of elements: {mesh.nelements}")

        discr = EagerDGDiscretization(actx, mesh, order=order)
        nodes = thaw(actx, discr.nodes())

        # Init soln with Lump and expected RHS = 0
        center = np.zeros(shape=(dim, ))
        velocity = np.zeros(shape=(dim, ))
        lump = Lump(dim=dim, center=center, velocity=velocity)
        lump_soln = lump(nodes)
        boundaries = {
            BTAG_ALL: PrescribedInviscidBoundary(fluid_solution_func=lump)
        }
        inviscid_rhs = euler_operator(discr,
                                      eos=IdealSingleGas(),
                                      boundaries=boundaries,
                                      cv=lump_soln,
                                      time=0.0)
        expected_rhs = lump.exact_rhs(discr, cv=lump_soln, time=0)

        err_max = discr.norm((inviscid_rhs - expected_rhs).join(), np.inf)
        if err_max > maxxerr:
            maxxerr = err_max

        eoc_rec.add_data_point(1.0 / nel_1d, err_max)
    logger.info(f"Max error: {maxxerr}")

    logger.info(f"Error for (dim,order) = ({dim},{order}):\n" f"{eoc_rec}")

    assert (eoc_rec.order_estimate() >= order - 0.5
            or eoc_rec.max_error() < tolerance)
Beispiel #27
0
    def conv_test(descr, use_quad):
        logger.info("-" * 75)
        logger.info(descr)
        logger.info("-" * 75)
        eoc_rec = EOCRecorder()

        if use_quad:
            qtag = dof_desc.DISCR_TAG_QUAD
        else:
            qtag = None

        ns = [20, 25]
        for n in ns:
            mesh = mgen.generate_regular_rect_mesh(a=(-0.5, ) * dims,
                                                   b=(0.5, ) * dims,
                                                   nelements_per_axis=(n, ) *
                                                   dims,
                                                   order=order)

            if use_quad:
                discr_tag_to_group_factory = {
                    qtag: QuadratureSimplexGroupFactory(order=4 * order)
                }
            else:
                discr_tag_to_group_factory = {}

            dcoll = DiscretizationCollection(
                actx,
                mesh,
                order=order,
                discr_tag_to_group_factory=discr_tag_to_group_factory)

            nodes = thaw(dcoll.nodes(), actx)

            def zero_inflow(dtag, t=0):
                dd = dof_desc.DOFDesc(dtag, qtag)
                return dcoll.discr_from_dd(dd).zeros(actx)

            adv_op = VariableCoefficientAdvectionOperator(
                dcoll,
                flat_obj_array(-1 * nodes[1], nodes[0]),
                inflow_u=lambda t: zero_inflow(BTAG_ALL, t=t),
                flux_type="upwind",
                quad_tag=qtag)

            total_error = op.norm(dcoll,
                                  adv_op.operator(0, gaussian_mode(nodes)), 2)
            eoc_rec.add_data_point(1.0 / n, actx.to_numpy(total_error))

        logger.info(
            "\n%s",
            eoc_rec.pretty_print(abscissa_label="h", error_label="L2 Error"))

        return eoc_rec.order_estimate(), np.array(
            [x[1] for x in eoc_rec.history])
Beispiel #28
0
def test_velocity_gradient_eoc(actx_factory, dim):
    """Test that the velocity gradient converges at the proper rate."""
    from mirgecom.fluid import velocity_gradient
    actx = actx_factory()

    order = 3

    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    nel_1d_0 = 4
    for hn1 in [1, 2, 3, 4]:

        nel_1d = hn1 * nel_1d_0
        h = 1/nel_1d

        from meshmode.mesh.generation import generate_regular_rect_mesh
        mesh = generate_regular_rect_mesh(
            a=(1.0,) * dim, b=(2.0,) * dim, nelements_per_axis=(nel_1d,) * dim
        )

        discr = EagerDGDiscretization(actx, mesh, order=order)
        nodes = thaw(actx, discr.nodes())
        zeros = discr.zeros(actx)
        energy = zeros + 2.5

        mass = nodes[dim-1]*nodes[dim-1]
        velocity = make_obj_array([actx.np.cos(nodes[i]) for i in range(dim)])
        mom = mass*velocity

        q = join_conserved(dim, mass=mass, energy=energy, momentum=mom)
        cv = split_conserved(dim, q)

        grad_q = obj_array_vectorize(discr.grad, q)
        grad_cv = split_conserved(dim, grad_q)

        grad_v = velocity_gradient(discr, cv, grad_cv)

        def exact_grad_row(xdata, gdim, dim):
            exact_grad_row = make_obj_array([zeros for _ in range(dim)])
            exact_grad_row[gdim] = -actx.np.sin(xdata)
            return exact_grad_row

        comp_err = make_obj_array([
            discr.norm(grad_v[i] - exact_grad_row(nodes[i], i, dim), np.inf)
            for i in range(dim)])
        err_max = comp_err.max()
        eoc.add_data_point(h, err_max)

    logger.info(eoc)
    assert (
        eoc.order_estimate() >= order - 0.5
        or eoc.max_error() < 1e-9
    )
Beispiel #29
0
def test_integral_equation(
        ctx_getter, curve_name, curve_f, qbx_order, bc_type, loc_sign, k,
        target_order=7, source_order=None):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    from pytools.convergence import EOCRecorder
    print(("curve_name: %s, qbx_order: %d, bc_type: %s, loc_sign: %s, "
            "helmholtz_k: %s"
            % (curve_name, qbx_order, bc_type, loc_sign, k)))

    eoc_rec_target = EOCRecorder()
    eoc_rec_td = EOCRecorder()

    for nelements in [30, 40, 50]:
        result = run_int_eq_test(
                cl_ctx, queue, curve_f, nelements, qbx_order,
                bc_type, loc_sign, k, target_order=target_order,
                source_order=source_order)

        eoc_rec_target.add_data_point(1/nelements, result.rel_err_2)
        eoc_rec_td.add_data_point(1/nelements, result.rel_td_err_inf)

    if bc_type == "dirichlet":
        tgt_order = qbx_order
    elif bc_type == "neumann":
        tgt_order = qbx_order-1
    else:
        assert False

    print("TARGET ERROR:")
    print(eoc_rec_target)
    assert eoc_rec_target.order_estimate() > tgt_order - 1.3

    print("TANGENTIAL DERIVATIVE ERROR:")
    print(eoc_rec_td)
    assert eoc_rec_td.order_estimate() > tgt_order - 2.3
Beispiel #30
0
def test_exterior_stokes(actx_factory, ambient_dim, visualize=False):
    if visualize:
        logging.basicConfig(level=logging.INFO)

    from pytools.convergence import EOCRecorder
    eocs = [EOCRecorder() for _ in range(ambient_dim)]

    target_order = 5
    source_ovsmp = 4
    qbx_order = 3

    if ambient_dim == 2:
        resolutions = [20, 35, 50]
    elif ambient_dim == 3:
        resolutions = [0, 1, 2]
    else:
        raise ValueError(f"unsupported dimension: {ambient_dim}")

    for resolution in resolutions:
        h_max, errors = run_exterior_stokes(actx_factory,
                ambient_dim=ambient_dim,
                target_order=target_order,
                qbx_order=qbx_order,
                source_ovsmp=source_ovsmp,
                resolution=resolution,
                visualize=visualize)

        for eoc, e in zip(eocs, errors):
            eoc.add_data_point(h_max, e)

    for eoc in eocs:
        print(eoc.pretty_print(
            abscissa_format="%.8e",
            error_format="%.8e",
            eoc_format="%.2f"))

    for eoc in eocs:
        # This convergence data is not as clean as it could be. See
        # https://github.com/inducer/pytential/pull/32
        # for some discussion.
        order = min(target_order, qbx_order)
        assert eoc.order_estimate() > order - 0.5
Beispiel #31
0
def test_eoc():
    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    for i in range(1, 8):
        eoc.add_data_point(1.0 / i, 10**(-i))

    p = eoc.pretty_print()
    print(p)
    print()

    p = eoc.pretty_print(abscissa_format="%.5e",
                         error_format="%.5e",
                         eoc_format="%5.2f")
    print(p)
Beispiel #32
0
def test_vortex_rhs(actx_factory, order):
    """Tests the inviscid rhs using the non-trivial
    2D isentropic vortex case configured to yield
    rhs = 0. Checks several different orders
    and refinement levels to check error
    behavior.
    """
    actx = actx_factory()

    dim = 2

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    from meshmode.mesh.generation import generate_regular_rect_mesh

    for nel_1d in [16, 32, 64]:

        mesh = generate_regular_rect_mesh(
            a=(-5, ) * dim,
            b=(5, ) * dim,
            n=(nel_1d, ) * dim,
        )

        logger.info(f"Number of {dim}d elements:  {mesh.nelements}")

        discr = EagerDGDiscretization(actx, mesh, order=order)
        nodes = thaw(actx, discr.nodes())

        # Init soln with Vortex and expected RHS = 0
        vortex = Vortex2D(center=[0, 0], velocity=[0, 0])
        vortex_soln = vortex(0, nodes)
        boundaries = {BTAG_ALL: PrescribedBoundary(vortex)}

        inviscid_rhs = inviscid_operator(discr,
                                         eos=IdealSingleGas(),
                                         boundaries=boundaries,
                                         q=vortex_soln,
                                         t=0.0)

        err_max = discr.norm(inviscid_rhs, np.inf)
        eoc_rec.add_data_point(1.0 / nel_1d, err_max)

    message = (f"Error for (dim,order) = ({dim},{order}):\n" f"{eoc_rec}")
    logger.info(message)

    assert (eoc_rec.order_estimate() >= order - 0.5
            or eoc_rec.max_error() < 1e-11)
Beispiel #33
0
def test_tri_diff_mat(ctx_factory, dim, order=4):
    """Check differentiation matrix along the coordinate axes on a disk

    Uses sines as the function to differentiate.
    """

    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)
    actx = PyOpenCLArrayContext(queue)

    from meshmode.mesh.generation import generate_regular_rect_mesh

    from pytools.convergence import EOCRecorder
    axis_eoc_recs = [EOCRecorder() for axis in range(dim)]

    for n in [10, 20]:
        mesh = generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                          b=(0.5, ) * dim,
                                          n=(n, ) * dim,
                                          order=4)

        discr = DGDiscretizationWithBoundaries(actx, mesh, order=4)
        nabla = sym.nabla(dim)

        for axis in range(dim):
            x = sym.nodes(dim)

            f = bind(discr, sym.sin(3 * x[axis]))(actx)
            df = bind(discr, 3 * sym.cos(3 * x[axis]))(actx)

            sym_op = nabla[axis](sym.var("f"))
            bound_op = bind(discr, sym_op)
            df_num = bound_op(f=f)

            linf_error = flat_norm(df_num - df, np.Inf)
            axis_eoc_recs[axis].add_data_point(1 / n, linf_error)

    for axis, eoc_rec in enumerate(axis_eoc_recs):
        logger.info("axis %d\n%s", axis, eoc_rec)
        assert eoc_rec.order_estimate() >= order
Beispiel #34
0
def test_tri_diff_mat(actx_factory, dim, order=4):
    """Check differentiation matrix along the coordinate axes on a disk

    Uses sines as the function to differentiate.
    """

    actx = actx_factory()

    from pytools.convergence import EOCRecorder
    axis_eoc_recs = [EOCRecorder() for axis in range(dim)]

    def f(x, axis):
        return actx.np.sin(3 * x[axis])

    def df(x, axis):
        return 3 * actx.np.cos(3 * x[axis])

    for n in [4, 8, 16]:
        mesh = mgen.generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                               b=(0.5, ) * dim,
                                               nelements_per_axis=(n, ) * dim,
                                               order=4)

        dcoll = DiscretizationCollection(actx, mesh, order=4)
        volume_discr = dcoll.discr_from_dd(dof_desc.DD_VOLUME)
        x = thaw(volume_discr.nodes(), actx)

        for axis in range(dim):
            df_num = op.local_grad(dcoll, f(x, axis))[axis]
            df_volm = df(x, axis)

            linf_error = flat_norm(df_num - df_volm, ord=np.inf)
            axis_eoc_recs[axis].add_data_point(1 / n,
                                               actx.to_numpy(linf_error))

    for axis, eoc_rec in enumerate(axis_eoc_recs):
        logger.info("axis %d\n%s", axis, eoc_rec)
        assert eoc_rec.order_estimate() > order - 0.25
Beispiel #35
0
def test_tri_diff_mat(actx_factory, dim, order=4):
    """Check differentiation matrix along the coordinate axes on a disk

    Uses sines as the function to differentiate.
    """

    actx = actx_factory()

    from pytools.convergence import EOCRecorder
    axis_eoc_recs = [EOCRecorder() for axis in range(dim)]

    for n in [4, 8, 16]:
        mesh = mgen.generate_regular_rect_mesh(a=(-0.5, ) * dim,
                                               b=(0.5, ) * dim,
                                               nelements_per_axis=(n, ) * dim,
                                               order=4)

        discr = DiscretizationCollection(actx, mesh, order=4)
        nabla = sym.nabla(dim)

        for axis in range(dim):
            x = sym.nodes(dim)

            f = bind(discr, sym.sin(3 * x[axis]))(actx)
            df = bind(discr, 3 * sym.cos(3 * x[axis]))(actx)

            sym_op = nabla[axis](sym.var("f"))
            bound_op = bind(discr, sym_op)
            df_num = bound_op(f=f)

            linf_error = actx.np.linalg.norm(df_num - df, ord=np.inf)
            axis_eoc_recs[axis].add_data_point(1 / n, linf_error)

    for axis, eoc_rec in enumerate(axis_eoc_recs):
        logger.info("axis %d\n%s", axis, eoc_rec)
        assert eoc_rec.order_estimate() > order - 0.25
Beispiel #36
0
def test_basis_grad(dim, shape_cls, order, basis_getter):
    """Do a simplistic FD-style check on the gradients of the basis."""

    h = 1.0e-4

    shape = shape_cls(dim)
    rng = np.random.Generator(np.random.PCG64(17))
    basis = basis_getter(mp.space_for_shape(shape, order), shape)

    from pytools.convergence import EOCRecorder
    from pytools import wandering_element
    for i_bf, (bf, gradbf) in enumerate(zip(
            basis.functions,
            basis.gradients,
    )):
        eoc_rec = EOCRecorder()
        for h in [1e-2, 1e-3]:
            r = mp.random_nodes_for_shape(shape, nnodes=1000, rng=rng)

            gradbf_v = np.array(gradbf(r))
            gradbf_v_num = np.array([
                (bf(r + h * unit) - bf(r - h * unit)) / (2 * h)
                for unit_tuple in wandering_element(shape.dim)
                for unit in (np.array(unit_tuple).reshape(-1, 1), )
            ])

            ref_norm = la.norm((gradbf_v).reshape(-1), np.inf)
            err = la.norm((gradbf_v_num - gradbf_v).reshape(-1), np.inf)
            if ref_norm > 1e-13:
                err = err / ref_norm

            logger.info("error: %.5", err)
            eoc_rec.add_data_point(h, err)

        tol = 1e-8
        if eoc_rec.max_error() >= tol:
            print(eoc_rec)
        assert (eoc_rec.max_error() < tol or eoc_rec.order_estimate() >= 1.5)
Beispiel #37
0
    def conv_test(descr, use_quad):
        logger.info("-" * 75)
        logger.info(descr)
        logger.info("-" * 75)
        eoc_rec = EOCRecorder()

        ns = [20, 25]
        for n in ns:
            mesh = mgen.generate_regular_rect_mesh(a=(-0.5, ) * dims,
                                                   b=(0.5, ) * dims,
                                                   nelements_per_axis=(n, ) *
                                                   dims,
                                                   order=order)

            if use_quad:
                discr_tag_to_group_factory = {
                    "product": QuadratureSimplexGroupFactory(order=4 * order)
                }
            else:
                discr_tag_to_group_factory = {"product": None}

            discr = DiscretizationCollection(
                actx,
                mesh,
                order=order,
                discr_tag_to_group_factory=discr_tag_to_group_factory)

            bound_op = bind(discr, op.sym_operator())
            fields = bind(discr, gaussian_mode())(actx, t=0)
            norm = bind(discr, sym.norm(2, sym.var("u")))

            esc = bound_op(u=fields)
            total_error = norm(u=esc)
            eoc_rec.add_data_point(1.0 / n, total_error)

        logger.info(
            "\n%s",
            eoc_rec.pretty_print(abscissa_label="h", error_label="L2 Error"))

        return eoc_rec.order_estimate(), np.array(
            [x[1] for x in eoc_rec.history])
def test_sphere_eigenvalues(ctx_getter, mode_m, mode_n, qbx_order,
        fmm_backend):
    logging.basicConfig(level=logging.INFO)

    special = pytest.importorskip("scipy.special")

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    target_order = 8

    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory
    from pytential.qbx import QBXLayerPotentialSource
    from pytools.convergence import EOCRecorder

    s_eoc_rec = EOCRecorder()
    d_eoc_rec = EOCRecorder()
    sp_eoc_rec = EOCRecorder()
    dp_eoc_rec = EOCRecorder()

    def rel_err(comp, ref):
        return (
                norm(density_discr, queue, comp - ref)
                / norm(density_discr, queue, ref))

    for nrefinements in [0, 1]:
        from meshmode.mesh.generation import generate_icosphere
        mesh = generate_icosphere(1, target_order)
        from meshmode.mesh.refinement import Refiner

        refiner = Refiner(mesh)
        for i in range(nrefinements):
            flags = np.ones(mesh.nelements, dtype=bool)
            refiner.refine(flags)
            mesh = refiner.get_current_mesh()

        pre_density_discr = Discretization(
                cl_ctx, mesh,
                InterpolatoryQuadratureSimplexGroupFactory(target_order))
        qbx, _ = QBXLayerPotentialSource(
                pre_density_discr, 4*target_order,
                qbx_order, fmm_order=6,
                fmm_backend=fmm_backend,
                ).with_refinement()

        density_discr = qbx.density_discr
        nodes = density_discr.nodes().with_queue(queue)
        r = cl.clmath.sqrt(nodes[0]**2 + nodes[1]**2 + nodes[2]**2)
        phi = cl.clmath.acos(nodes[2]/r)
        theta = cl.clmath.atan2(nodes[0], nodes[1])

        ymn = cl.array.to_device(queue,
                special.sph_harm(mode_m, mode_n, theta.get(), phi.get()))

        from sumpy.kernel import LaplaceKernel
        lap_knl = LaplaceKernel(3)

        # {{{ single layer

        s_sigma_op = bind(qbx, sym.S(lap_knl, sym.var("sigma")))
        s_sigma = s_sigma_op(queue=queue, sigma=ymn)
        s_eigval = 1/(2*mode_n + 1)
        s_eoc_rec.add_data_point(qbx.h_max, rel_err(s_sigma, s_eigval*ymn))

        # }}}

        # {{{ double layer

        d_sigma_op = bind(qbx, sym.D(lap_knl, sym.var("sigma")))
        d_sigma = d_sigma_op(queue=queue, sigma=ymn)
        d_eigval = -1/(2*(2*mode_n + 1))
        d_eoc_rec.add_data_point(qbx.h_max, rel_err(d_sigma, d_eigval*ymn))

        # }}}

        # {{{ S'

        sp_sigma_op = bind(qbx, sym.Sp(lap_knl, sym.var("sigma")))
        sp_sigma = sp_sigma_op(queue=queue, sigma=ymn)
        sp_eigval = -1/(2*(2*mode_n + 1))
        sp_eoc_rec.add_data_point(qbx.h_max, rel_err(sp_sigma, sp_eigval*ymn))

        # }}}

        # {{{ D'

        dp_sigma_op = bind(qbx, sym.Dp(lap_knl, sym.var("sigma")))
        dp_sigma = dp_sigma_op(queue=queue, sigma=ymn)
        dp_eigval = -(mode_n*(mode_n+1))/(2*mode_n + 1)
        dp_eoc_rec.add_data_point(qbx.h_max, rel_err(dp_sigma, dp_eigval*ymn))

        # }}}

    print("Errors for S:")
    print(s_eoc_rec)
    required_order = qbx_order + 1
    assert s_eoc_rec.order_estimate() > required_order - 1.5

    print("Errors for D:")
    print(d_eoc_rec)
    required_order = qbx_order
    assert d_eoc_rec.order_estimate() > required_order - 0.5

    print("Errors for S':")
    print(sp_eoc_rec)
    required_order = qbx_order
    assert sp_eoc_rec.order_estimate() > required_order - 1.5

    print("Errors for D':")
    print(dp_eoc_rec)
    required_order = qbx_order
    assert dp_eoc_rec.order_estimate() > required_order - 1.5
Beispiel #39
0
def test_all_faces_interpolation(ctx_getter, mesh_name, dim, mesh_pars,
        per_face_groups):
    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    from meshmode.discretization import Discretization
    from meshmode.discretization.connection import (
            make_face_restriction, make_face_to_all_faces_embedding,
            check_connection)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    order = 4

    def f(x):
        return 0.1*cl.clmath.sin(30*x)

    for mesh_par in mesh_pars:
        # {{{ get mesh

        if mesh_name == "blob":
            assert dim == 2

            h = mesh_par

            from meshmode.mesh.io import generate_gmsh, FileSource
            print("BEGIN GEN")
            mesh = generate_gmsh(
                    FileSource("blob-2d.step"), 2, order=order,
                    force_ambient_dim=2,
                    other_options=[
                        "-string", "Mesh.CharacteristicLengthMax = %s;" % h]
                    )
            print("END GEN")
        elif mesh_name == "warp":
            from meshmode.mesh.generation import generate_warped_rect_mesh
            mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par)

            h = 1/mesh_par
        else:
            raise ValueError("mesh_name not recognized")

        # }}}

        vol_discr = Discretization(cl_ctx, mesh,
                PolynomialWarpAndBlendGroupFactory(order))
        print("h=%s -> %d elements" % (
                h, sum(mgrp.nelements for mgrp in mesh.groups)))

        all_face_bdry_connection = make_face_restriction(
                vol_discr, PolynomialWarpAndBlendGroupFactory(order),
                FRESTR_ALL_FACES, per_face_groups=per_face_groups)
        all_face_bdry_discr = all_face_bdry_connection.to_discr

        for ito_grp, ceg in enumerate(all_face_bdry_connection.groups):
            for ibatch, batch in enumerate(ceg.batches):
                assert np.array_equal(
                        batch.from_element_indices.get(queue),
                        np.arange(vol_discr.mesh.nelements))

                if per_face_groups:
                    assert ito_grp == batch.to_element_face
                else:
                    assert ibatch == batch.to_element_face

        all_face_x = all_face_bdry_discr.nodes()[0].with_queue(queue)
        all_face_f = f(all_face_x)

        all_face_f_2 = all_face_bdry_discr.zeros(queue)

        for boundary_tag in [
                BTAG_ALL,
                FRESTR_INTERIOR_FACES,
                ]:
            bdry_connection = make_face_restriction(
                    vol_discr, PolynomialWarpAndBlendGroupFactory(order),
                    boundary_tag, per_face_groups=per_face_groups)
            bdry_discr = bdry_connection.to_discr

            bdry_x = bdry_discr.nodes()[0].with_queue(queue)
            bdry_f = f(bdry_x)

            all_face_embedding = make_face_to_all_faces_embedding(
                    bdry_connection, all_face_bdry_discr)

            check_connection(all_face_embedding)

            all_face_f_2 += all_face_embedding(queue, bdry_f)

        err = la.norm((all_face_f-all_face_f_2).get(), np.inf)
        eoc_rec.add_data_point(h, err)

    print(eoc_rec)
    assert (
            eoc_rec.order_estimate() >= order-0.5
            or eoc_rec.max_error() < 1e-14)
Beispiel #40
0
def test_reversed_chained_connection(ctx_factory, ndim, mesh_name, visualize=False):
    ctx = ctx_factory()
    queue = cl.CommandQueue(ctx)

    def run(nelements, order):
        discr = create_discretization(queue, ndim,
            nelements=nelements,
            order=order,
            mesh_name=mesh_name)

        threshold = 1.0
        connections = []
        conn = create_refined_connection(queue,
                discr, threshold=threshold)
        connections.append(conn)
        if ndim == 2:
            # NOTE: additional refinement makes the 3D meshes explode in size
            conn = create_refined_connection(queue,
                    conn.to_discr, threshold=threshold)
            connections.append(conn)
            conn = create_refined_connection(queue,
                    conn.to_discr, threshold=threshold)
            connections.append(conn)

        from meshmode.discretization.connection import \
                ChainedDiscretizationConnection
        chained = ChainedDiscretizationConnection(connections)
        from meshmode.discretization.connection import \
                L2ProjectionInverseDiscretizationConnection
        reverse = L2ProjectionInverseDiscretizationConnection(chained)

        # create test vector
        from_nodes = chained.from_discr.nodes().with_queue(queue)
        to_nodes = chained.to_discr.nodes().with_queue(queue)

        from_x = 0
        to_x = 0
        for d in range(ndim):
            from_x += cl.clmath.cos(from_nodes[d]) ** (d + 1)
            to_x += cl.clmath.cos(to_nodes[d]) ** (d + 1)

        from_interp = reverse(queue, to_x)

        from_interp = from_interp.get(queue)
        from_x = from_x.get(queue)

        return 1.0 / nelements, la.norm(from_interp - from_x) / la.norm(from_x)

    from pytools.convergence import EOCRecorder
    eoc = EOCRecorder()

    order = 4
    mesh_sizes = [16, 32, 48, 64, 96, 128]

    for n in mesh_sizes:
        h, error = run(n, order)
        eoc.add_data_point(h, error)

    print(eoc)

    assert eoc.order_estimate() > (order + 1 - 0.5)
Beispiel #41
0
def test_opposite_face_interpolation(ctx_getter, group_factory,
        mesh_name, dim, mesh_pars):
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    from meshmode.discretization import Discretization
    from meshmode.discretization.connection import (
            make_face_restriction, make_opposite_face_connection,
            check_connection)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    order = 5

    def f(x):
        return 0.1*cl.clmath.sin(30*x)

    for mesh_par in mesh_pars:
        # {{{ get mesh

        if mesh_name == "blob":
            assert dim == 2

            h = mesh_par

            from meshmode.mesh.io import generate_gmsh, FileSource
            print("BEGIN GEN")
            mesh = generate_gmsh(
                    FileSource("blob-2d.step"), 2, order=order,
                    force_ambient_dim=2,
                    other_options=[
                        "-string", "Mesh.CharacteristicLengthMax = %s;" % h]
                    )
            print("END GEN")
        elif mesh_name == "warp":
            from meshmode.mesh.generation import generate_warped_rect_mesh
            mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par)

            h = 1/mesh_par
        else:
            raise ValueError("mesh_name not recognized")

        # }}}

        vol_discr = Discretization(cl_ctx, mesh,
                group_factory(order))
        print("h=%s -> %d elements" % (
                h, sum(mgrp.nelements for mgrp in mesh.groups)))

        bdry_connection = make_face_restriction(
                vol_discr, group_factory(order),
                FRESTR_INTERIOR_FACES)
        bdry_discr = bdry_connection.to_discr

        opp_face = make_opposite_face_connection(bdry_connection)
        check_connection(opp_face)

        bdry_x = bdry_discr.nodes()[0].with_queue(queue)
        bdry_f = f(bdry_x)

        bdry_f_2 = opp_face(queue, bdry_f)

        err = la.norm((bdry_f-bdry_f_2).get(), np.inf)
        eoc_rec.add_data_point(h, err)

    print(eoc_rec)
    assert (
            eoc_rec.order_estimate() >= order-0.5
            or eoc_rec.max_error() < 1e-13)
Beispiel #42
0
def test_3d_jump_relations(ctx_factory, relation, visualize=False):
    # logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_factory()
    queue = cl.CommandQueue(cl_ctx)

    if relation == "div_s":
        target_order = 3
    else:
        target_order = 4

    qbx_order = target_order

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for nel_factor in [6, 10, 14]:
        from meshmode.mesh.generation import generate_torus
        mesh = generate_torus(
                5, 2, order=target_order,
                n_outer=2*nel_factor, n_inner=nel_factor)

        from meshmode.discretization import Discretization
        from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory
        pre_discr = Discretization(
                cl_ctx, mesh,
                InterpolatoryQuadratureSimplexGroupFactory(3))

        from pytential.qbx import QBXLayerPotentialSource
        qbx, _ = QBXLayerPotentialSource(
                pre_discr, fine_order=4*target_order,
                qbx_order=qbx_order,
                fmm_order=qbx_order + 5,
                fmm_backend="fmmlib"
                ).with_refinement()

        from sumpy.kernel import LaplaceKernel
        knl = LaplaceKernel(3)

        def nxcurlS(qbx_forced_limit):

            return sym.n_cross(sym.curl(sym.S(
                knl,
                sym.cse(sym.tangential_to_xyz(density_sym), "jxyz"),
                qbx_forced_limit=qbx_forced_limit)))

        x, y, z = qbx.density_discr.nodes().with_queue(queue)
        m = cl.clmath

        if relation == "nxcurls":
            density_sym = sym.make_sym_vector("density", 2)

            jump_identity_sym = (
                    nxcurlS(+1)
                    - (nxcurlS("avg") + 0.5*sym.tangential_to_xyz(density_sym)))

            # The tangential coordinate system is element-local, so we can't just
            # conjure up some globally smooth functions, interpret their values
            # in the tangential coordinate system, and be done. Instead, generate
            # an XYZ function and project it.
            density = bind(
                    qbx,
                    sym.xyz_to_tangential(sym.make_sym_vector("jxyz", 3)))(
                            queue,
                            jxyz=sym.make_obj_array([
                                m.cos(0.5*x) * m.cos(0.5*y) * m.cos(0.5*z),
                                m.sin(0.5*x) * m.cos(0.5*y) * m.sin(0.5*z),
                                m.sin(0.5*x) * m.cos(0.5*y) * m.cos(0.5*z),
                                ]))

        elif relation == "sp":

            density = m.cos(2*x) * m.cos(2*y) * m.cos(z)
            density_sym = sym.var("density")

            jump_identity_sym = (
                    sym.Sp(knl, density_sym, qbx_forced_limit=+1)
                    - (sym.Sp(knl, density_sym, qbx_forced_limit="avg")
                        - 0.5*density_sym))

        elif relation == "div_s":

            density = m.cos(2*x) * m.cos(2*y) * m.cos(z)
            density_sym = sym.var("density")

            jump_identity_sym = (
                    sym.div(sym.S(knl, sym.normal(3).as_vector()*density_sym,
                        qbx_forced_limit="avg"))
                    + sym.D(knl, density_sym, qbx_forced_limit="avg"))

        else:
            raise ValueError("unexpected value of 'relation': %s" % relation)

        bound_jump_identity = bind(qbx, jump_identity_sym)
        jump_identity = bound_jump_identity(queue, density=density)

        err = (
                norm(qbx, queue, jump_identity, np.inf)
                / norm(qbx, queue, density, np.inf))
        print("ERROR", qbx.h_max, err)

        eoc_rec.add_data_point(qbx.h_max, err)

        # {{{ visualization

        if visualize and relation == "nxcurls":
            nxcurlS_ext = bind(qbx, nxcurlS(+1))(queue, density=density)
            nxcurlS_avg = bind(qbx, nxcurlS("avg"))(queue, density=density)
            jtxyz = bind(qbx, sym.tangential_to_xyz(density_sym))(
                    queue, density=density)

            from meshmode.discretization.visualization import make_visualizer
            bdry_vis = make_visualizer(queue, qbx.density_discr, target_order+3)

            bdry_normals = bind(qbx, sym.normal(3))(queue)\
                    .as_vector(dtype=object)

            bdry_vis.write_vtk_file("source-%s.vtu" % nel_factor, [
                ("jt", jtxyz),
                ("nxcurlS_ext", nxcurlS_ext),
                ("nxcurlS_avg", nxcurlS_avg),
                ("bdry_normals", bdry_normals),
                ])

        if visualize and relation == "sp":
            sp_ext = bind(qbx, sym.Sp(knl, density_sym, qbx_forced_limit=+1))(
                    queue, density=density)
            sp_avg = bind(qbx, sym.Sp(knl, density_sym, qbx_forced_limit="avg"))(
                    queue, density=density)

            from meshmode.discretization.visualization import make_visualizer
            bdry_vis = make_visualizer(queue, qbx.density_discr, target_order+3)

            bdry_normals = bind(qbx, sym.normal(3))(queue)\
                    .as_vector(dtype=object)

            bdry_vis.write_vtk_file("source-%s.vtu" % nel_factor, [
                ("density", density),
                ("sp_ext", sp_ext),
                ("sp_avg", sp_avg),
                ("bdry_normals", bdry_normals),
                ])

        # }}}

    print(eoc_rec)

    assert eoc_rec.order_estimate() >= qbx_order - 1.5
Beispiel #43
0
def test_pec_mfie_extinction(ctx_getter, case, visualize=False):
    """For (say) is_interior=False (the 'exterior' MFIE), this test verifies
    extinction of the combined (incoming + scattered) field on the interior
    of the scatterer.
    """
    logging.basicConfig(level=logging.INFO)

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    np.random.seed(12)

    knl_kwargs = {"k": case.k}

    # {{{ come up with a solution to Maxwell's equations

    j_sym = sym.make_sym_vector("j", 3)
    jt_sym = sym.make_sym_vector("jt", 2)
    rho_sym = sym.var("rho")

    from pytential.symbolic.pde.maxwell import (
            PECChargeCurrentMFIEOperator,
            get_sym_maxwell_point_source,
            get_sym_maxwell_plane_wave)
    mfie = PECChargeCurrentMFIEOperator()

    test_source = case.get_source(queue)

    calc_patch = CalculusPatch(np.array([-3, 0, 0]), h=0.01)
    calc_patch_tgt = PointsTarget(cl.array.to_device(queue, calc_patch.points))

    rng = cl.clrandom.PhiloxGenerator(cl_ctx, seed=12)
    src_j = rng.normal(queue, (3, test_source.nnodes), dtype=np.float64)

    def eval_inc_field_at(tgt):
        if 0:
            # plane wave
            return bind(
                    tgt,
                    get_sym_maxwell_plane_wave(
                        amplitude_vec=np.array([1, 1, 1]),
                        v=np.array([1, 0, 0]),
                        omega=case.k)
                    )(queue)
        else:
            # point source
            return bind(
                    (test_source, tgt),
                    get_sym_maxwell_point_source(mfie.kernel, j_sym, mfie.k)
                    )(queue, j=src_j, k=case.k)

    pde_test_inc = EHField(
            vector_from_device(queue, eval_inc_field_at(calc_patch_tgt)))

    source_maxwell_resids = [
            calc_patch.norm(x, np.inf) / calc_patch.norm(pde_test_inc.e, np.inf)
            for x in frequency_domain_maxwell(
                calc_patch, pde_test_inc.e, pde_test_inc.h, case.k)]
    print("Source Maxwell residuals:", source_maxwell_resids)
    assert max(source_maxwell_resids) < 1e-6

    # }}}

    loc_sign = -1 if case.is_interior else +1

    from pytools.convergence import EOCRecorder

    eoc_rec_repr_maxwell = EOCRecorder()
    eoc_pec_bc = EOCRecorder()
    eoc_rec_e = EOCRecorder()
    eoc_rec_h = EOCRecorder()

    from pytential.qbx import QBXLayerPotentialSource
    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory
    from sumpy.expansion.level_to_order import SimpleExpansionOrderFinder

    for resolution in case.resolutions:
        scat_mesh = case.get_mesh(resolution, case.target_order)
        observation_mesh = case.get_observation_mesh(case.target_order)

        pre_scat_discr = Discretization(
                cl_ctx, scat_mesh,
                InterpolatoryQuadratureSimplexGroupFactory(case.target_order))
        qbx, _ = QBXLayerPotentialSource(
                pre_scat_discr, fine_order=4*case.target_order,
                qbx_order=case.qbx_order,
                fmm_level_to_order=SimpleExpansionOrderFinder(
                    case.fmm_tolerance),
                fmm_backend=case.fmm_backend
                ).with_refinement(_expansion_disturbance_tolerance=0.05)
        h_max = qbx.h_max

        scat_discr = qbx.density_discr
        obs_discr = Discretization(
                cl_ctx, observation_mesh,
                InterpolatoryQuadratureSimplexGroupFactory(case.target_order))

        inc_field_scat = EHField(eval_inc_field_at(scat_discr))
        inc_field_obs = EHField(eval_inc_field_at(obs_discr))

        # {{{ system solve

        inc_xyz_sym = EHField(sym.make_sym_vector("inc_fld", 6))

        bound_j_op = bind(qbx, mfie.j_operator(loc_sign, jt_sym))
        j_rhs = bind(qbx, mfie.j_rhs(inc_xyz_sym.h))(
                queue, inc_fld=inc_field_scat.field, **knl_kwargs)

        gmres_settings = dict(
                tol=case.gmres_tol,
                progress=True,
                hard_failure=True,
                stall_iterations=50, no_progress_factor=1.05)
        from pytential.solve import gmres
        gmres_result = gmres(
                bound_j_op.scipy_op(queue, "jt", np.complex128, **knl_kwargs),
                j_rhs, **gmres_settings)

        jt = gmres_result.solution

        bound_rho_op = bind(qbx, mfie.rho_operator(loc_sign, rho_sym))
        rho_rhs = bind(qbx, mfie.rho_rhs(jt_sym, inc_xyz_sym.e))(
                queue, jt=jt, inc_fld=inc_field_scat.field, **knl_kwargs)

        gmres_result = gmres(
                bound_rho_op.scipy_op(queue, "rho", np.complex128, **knl_kwargs),
                rho_rhs, **gmres_settings)

        rho = gmres_result.solution

        # }}}

        jxyz = bind(qbx, sym.tangential_to_xyz(jt_sym))(queue, jt=jt)

        # {{{ volume eval

        sym_repr = mfie.scattered_volume_field(jt_sym, rho_sym)

        def eval_repr_at(tgt, source=None):
            if source is None:
                source = qbx

            return bind((source, tgt), sym_repr)(queue, jt=jt, rho=rho, **knl_kwargs)

        pde_test_repr = EHField(
                vector_from_device(queue, eval_repr_at(calc_patch_tgt)))

        maxwell_residuals = [
                calc_patch.norm(x, np.inf) / calc_patch.norm(pde_test_repr.e, np.inf)
                for x in frequency_domain_maxwell(
                    calc_patch, pde_test_repr.e, pde_test_repr.h, case.k)]
        print("Maxwell residuals:", maxwell_residuals)

        eoc_rec_repr_maxwell.add_data_point(h_max, max(maxwell_residuals))

        # }}}

        # {{{ check PEC BC on total field

        bc_repr = EHField(mfie.scattered_volume_field(
            jt_sym, rho_sym, qbx_forced_limit=loc_sign))
        pec_bc_e = sym.n_cross(bc_repr.e + inc_xyz_sym.e)
        pec_bc_h = sym.normal(3).as_vector().dot(bc_repr.h + inc_xyz_sym.h)

        eh_bc_values = bind(qbx, sym.join_fields(pec_bc_e, pec_bc_h))(
                    queue, jt=jt, rho=rho, inc_fld=inc_field_scat.field,
                    **knl_kwargs)

        def scat_norm(f):
            return norm(qbx, queue, f, p=np.inf)

        e_bc_residual = scat_norm(eh_bc_values[:3]) / scat_norm(inc_field_scat.e)
        h_bc_residual = scat_norm(eh_bc_values[3]) / scat_norm(inc_field_scat.h)

        print("E/H PEC BC residuals:", h_max, e_bc_residual, h_bc_residual)

        eoc_pec_bc.add_data_point(h_max, max(e_bc_residual, h_bc_residual))

        # }}}

        # {{{ visualization

        if visualize:
            from meshmode.discretization.visualization import make_visualizer
            bdry_vis = make_visualizer(queue, scat_discr, case.target_order+3)

            bdry_normals = bind(scat_discr, sym.normal(3))(queue)\
                    .as_vector(dtype=object)

            bdry_vis.write_vtk_file("source-%s.vtu" % resolution, [
                ("j", jxyz),
                ("rho", rho),
                ("Einc", inc_field_scat.e),
                ("Hinc", inc_field_scat.h),
                ("bdry_normals", bdry_normals),
                ("e_bc_residual", eh_bc_values[:3]),
                ("h_bc_residual", eh_bc_values[3]),
                ])

            fplot = make_field_plotter_from_bbox(
                    find_bounding_box(scat_discr.mesh), h=(0.05, 0.05, 0.3),
                    extend_factor=0.3)

            from pytential.qbx import QBXTargetAssociationFailedException

            qbx_tgt_tol = qbx.copy(target_association_tolerance=0.2)

            fplot_tgt = PointsTarget(cl.array.to_device(queue, fplot.points))
            try:
                fplot_repr = eval_repr_at(fplot_tgt, source=qbx_tgt_tol)
            except QBXTargetAssociationFailedException as e:
                fplot.write_vtk_file(
                        "failed-targets.vts",
                        [
                            ("failed_targets", e.failed_target_flags.get(queue))
                            ])
                raise

            fplot_repr = EHField(vector_from_device(queue, fplot_repr))

            fplot_inc = EHField(
                    vector_from_device(queue, eval_inc_field_at(fplot_tgt)))

            fplot.write_vtk_file(
                    "potential-%s.vts" % resolution,
                    [
                        ("E", fplot_repr.e),
                        ("H", fplot_repr.h),
                        ("Einc", fplot_inc.e),
                        ("Hinc", fplot_inc.h),
                        ]
                    )

        # }}}

        # {{{ error in E, H

        obs_repr = EHField(eval_repr_at(obs_discr))

        def obs_norm(f):
            return norm(obs_discr, queue, f, p=np.inf)

        rel_err_e = (obs_norm(inc_field_obs.e + obs_repr.e)
                / obs_norm(inc_field_obs.e))
        rel_err_h = (obs_norm(inc_field_obs.h + obs_repr.h)
                / obs_norm(inc_field_obs.h))

        # }}}

        print("ERR", h_max, rel_err_h, rel_err_e)

        eoc_rec_h.add_data_point(h_max, rel_err_h)
        eoc_rec_e.add_data_point(h_max, rel_err_e)

    print("--------------------------------------------------------")
    print("is_interior=%s" % case.is_interior)
    print("--------------------------------------------------------")

    good = True
    for which_eoc, eoc_rec, order_tol in [
            ("maxwell", eoc_rec_repr_maxwell, 1.5),
            ("PEC BC", eoc_pec_bc, 1.5),
            ("H", eoc_rec_h, 1.5),
            ("E", eoc_rec_e, 1.5)]:
        print(which_eoc)
        print(eoc_rec.pretty_print())

        if len(eoc_rec.history) > 1:
            if eoc_rec.order_estimate() < case.qbx_order - order_tol:
                good = False

    assert good
Beispiel #44
0
def test_sanity_balls(ctx_getter, src_file, dim, mesh_order,
        visualize=False):
    pytest.importorskip("pytential")

    logging.basicConfig(level=logging.INFO)

    ctx = ctx_getter()
    queue = cl.CommandQueue(ctx)

    from pytools.convergence import EOCRecorder
    vol_eoc_rec = EOCRecorder()
    surf_eoc_rec = EOCRecorder()

    # overkill
    quad_order = mesh_order

    from pytential import bind, sym

    for h in [0.2, 0.14, 0.1]:
        from meshmode.mesh.io import generate_gmsh, FileSource
        mesh = generate_gmsh(
                FileSource(src_file), dim, order=mesh_order,
                other_options=["-string", "Mesh.CharacteristicLengthMax = %g;" % h],
                force_ambient_dim=dim)

        logger.info("%d elements" % mesh.nelements)

        # {{{ discretizations and connections

        from meshmode.discretization import Discretization
        from meshmode.discretization.poly_element import \
                InterpolatoryQuadratureSimplexGroupFactory
        vol_discr = Discretization(ctx, mesh,
                InterpolatoryQuadratureSimplexGroupFactory(quad_order))

        from meshmode.discretization.connection import make_boundary_restriction
        bdry_mesh, bdry_discr, bdry_connection = make_boundary_restriction(
                queue, vol_discr,
                InterpolatoryQuadratureSimplexGroupFactory(quad_order))

        # }}}

        # {{{ visualizers

        from meshmode.discretization.visualization import make_visualizer
        vol_vis = make_visualizer(queue, vol_discr, 20)
        bdry_vis = make_visualizer(queue, bdry_discr, 20)

        # }}}

        from math import gamma
        true_surf = 2*np.pi**(dim/2)/gamma(dim/2)
        true_vol = true_surf/dim

        vol_x = vol_discr.nodes().with_queue(queue)

        vol_one = vol_x[0].copy()
        vol_one.fill(1)
        from pytential import norm, integral  # noqa

        comp_vol = integral(vol_discr, queue, vol_one)
        rel_vol_err = abs(true_vol - comp_vol) / true_vol
        vol_eoc_rec.add_data_point(h, rel_vol_err)
        print("VOL", true_vol, comp_vol)

        bdry_x = bdry_discr.nodes().with_queue(queue)

        bdry_one_exact = bdry_x[0].copy()
        bdry_one_exact.fill(1)

        bdry_one = bdry_connection(queue, vol_one).with_queue(queue)
        intp_err = norm(bdry_discr, queue, bdry_one-bdry_one_exact)
        assert intp_err < 1e-14

        comp_surf = integral(bdry_discr, queue, bdry_one)
        rel_surf_err = abs(true_surf - comp_surf) / true_surf
        surf_eoc_rec.add_data_point(h, rel_surf_err)
        print("SURF", true_surf, comp_surf)

        if visualize:
            vol_vis.write_vtk_file("volume-h=%g.vtu" % h, [
                ("f", vol_one),
                ("area_el", bind(vol_discr, sym.area_element())(queue)),
                ])
            bdry_vis.write_vtk_file("boundary-h=%g.vtu" % h, [("f", bdry_one)])

        # {{{ check normals point outward

        normal_outward_check = bind(bdry_discr,
                sym.normal() | sym.Nodes(),
                )(queue).as_scalar() > 0

        assert normal_outward_check.get().all(), normal_outward_check.get()

        # }}}

    print("---------------------------------")
    print("VOLUME")
    print("---------------------------------")
    print(vol_eoc_rec)
    assert vol_eoc_rec.order_estimate() >= mesh_order

    print("---------------------------------")
    print("SURFACE")
    print("---------------------------------")
    print(surf_eoc_rec)
    assert surf_eoc_rec.order_estimate() >= mesh_order
Beispiel #45
0
def test_identities(ctx_getter, zero_op_name, curve_name, curve_f, qbx_order, k):
    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    target_order = 7

    u_sym = sym.var("u")
    grad_u_sym = sym.VectorVariable("grad_u")
    dn_u_sym = sym.var("dn_u")

    if k == 0:
        k_sym = 0
    else:
        k_sym = "k"

    zero_op_table = {
            "green":
            sym.S(k_sym, dn_u_sym) - sym.D(k_sym, u_sym) - 0.5*u_sym,

            "green_grad":
            d1.nabla * d1(sym.S(k_sym, dn_u_sym))
            - d2.nabla * d2(sym.D(k_sym, u_sym))
            - 0.5*grad_u_sym,

            # only for k==0:
            "zero_calderon":
            -sym.Dp(0, sym.S(0, u_sym))
            - 0.25*u_sym + sym.Sp(0, sym.Sp(0, u_sym))
            }
    order_table = {
            "green": qbx_order,
            "green_grad": qbx_order-1,
            "zero_calderon": qbx_order-1,
            }

    zero_op = zero_op_table[zero_op_name]

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for nelements in [30, 50, 70]:
        mesh = make_curve_mesh(curve_f,
                np.linspace(0, 1, nelements+1),
                target_order)

        from meshmode.discretization import Discretization
        from meshmode.discretization.poly_element import \
                InterpolatoryQuadratureSimplexGroupFactory
        from pytential.qbx import QBXLayerPotentialSource
        density_discr = Discretization(
                cl_ctx, mesh,
                InterpolatoryQuadratureSimplexGroupFactory(target_order))

        qbx = QBXLayerPotentialSource(density_discr, 4*target_order,
                qbx_order,
                # Don't use FMM for now
                fmm_order=False)

        # {{{ compute values of a solution to the PDE

        nodes_host = density_discr.nodes().get(queue)
        normal = bind(density_discr, sym.normal())(queue).as_vector(np.object)
        normal_host = [normal[0].get(), normal[1].get()]

        if k != 0:
            angle = 0.3
            wave_vec = np.array([np.cos(angle), np.sin(angle)])
            u = np.exp(1j*k*np.tensordot(wave_vec, nodes_host, axes=1))
            grad_u = 1j*k*wave_vec[:, np.newaxis]*u
        else:
            center = np.array([3, 1])
            diff = nodes_host - center[:, np.newaxis]
            dist_squared = np.sum(diff**2, axis=0)
            dist = np.sqrt(dist_squared)
            u = np.log(dist)
            grad_u = diff/dist_squared

        dn_u = normal_host[0]*grad_u[0] + normal_host[1]*grad_u[1]

        # }}}

        u_dev = cl.array.to_device(queue, u)
        dn_u_dev = cl.array.to_device(queue, dn_u)
        grad_u_dev = cl.array.to_device(queue, grad_u)

        key = (qbx_order, curve_name, nelements, zero_op_name)

        bound_op = bind(qbx, zero_op)
        error = bound_op(
                queue, u=u_dev, dn_u=dn_u_dev, grad_u=grad_u_dev, k=k)
        if 0:
            pt.plot(error)
            pt.show()

        l2_error_norm = norm(density_discr, queue, error)
        print(key, l2_error_norm)

        eoc_rec.add_data_point(1/nelements, l2_error_norm)

    print(eoc_rec)
    tgt_order = order_table[zero_op_name]
    assert eoc_rec.order_estimate() > tgt_order - 1.3
Beispiel #46
0
def test_boundary_interpolation(ctx_getter, group_factory, boundary_tag,
        mesh_name, dim, mesh_pars, per_face_groups):
    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    from meshmode.discretization import Discretization
    from meshmode.discretization.connection import (
            make_face_restriction, check_connection)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    order = 4

    def f(x):
        return 0.1*cl.clmath.sin(30*x)

    for mesh_par in mesh_pars:
        # {{{ get mesh

        if mesh_name == "blob":
            assert dim == 2

            h = mesh_par

            from meshmode.mesh.io import generate_gmsh, FileSource
            print("BEGIN GEN")
            mesh = generate_gmsh(
                    FileSource("blob-2d.step"), 2, order=order,
                    force_ambient_dim=2,
                    other_options=[
                        "-string", "Mesh.CharacteristicLengthMax = %s;" % h]
                    )
            print("END GEN")
        elif mesh_name == "warp":
            from meshmode.mesh.generation import generate_warped_rect_mesh
            mesh = generate_warped_rect_mesh(dim, order=4, n=mesh_par)

            h = 1/mesh_par
        else:
            raise ValueError("mesh_name not recognized")

        # }}}

        vol_discr = Discretization(cl_ctx, mesh,
                group_factory(order))
        print("h=%s -> %d elements" % (
                h, sum(mgrp.nelements for mgrp in mesh.groups)))

        x = vol_discr.nodes()[0].with_queue(queue)
        vol_f = f(x)

        bdry_connection = make_face_restriction(
                vol_discr, group_factory(order),
                boundary_tag, per_face_groups=per_face_groups)
        check_connection(bdry_connection)
        bdry_discr = bdry_connection.to_discr

        bdry_x = bdry_discr.nodes()[0].with_queue(queue)
        bdry_f = f(bdry_x)
        bdry_f_2 = bdry_connection(queue, vol_f)

        if mesh_name == "blob" and dim == 2:
            mat = bdry_connection.full_resample_matrix(queue).get(queue)
            bdry_f_2_by_mat = mat.dot(vol_f.get())

            mat_error = la.norm(bdry_f_2.get(queue=queue) - bdry_f_2_by_mat)
            assert mat_error < 1e-14, mat_error

        err = la.norm((bdry_f-bdry_f_2).get(), np.inf)
        eoc_rec.add_data_point(h, err)

    print(eoc_rec)
    assert (
            eoc_rec.order_estimate() >= order-0.5
            or eoc_rec.max_error() < 1e-14)
Beispiel #47
0
def test_refinement_connection(
        ctx_getter, refiner_cls, group_factory,
        mesh_name, dim, mesh_pars, mesh_order, refine_flags, visualize=False):
    from random import seed
    seed(13)

    # Discretization order
    order = 5

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    from meshmode.discretization import Discretization
    from meshmode.discretization.connection import (
            make_refinement_connection, check_connection)

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for mesh_par in mesh_pars:
        # {{{ get mesh

        if mesh_name == "circle":
            assert dim == 1
            h = 1 / mesh_par
            mesh = make_curve_mesh(
                partial(ellipse, 1), np.linspace(0, 1, mesh_par + 1),
                order=mesh_order)
        elif mesh_name == "blob":
            if mesh_order == 5:
                pytest.xfail("https://gitlab.tiker.net/inducer/meshmode/issues/2")
            assert dim == 2
            mesh = get_blob_mesh(mesh_par, mesh_order)
            h = float(mesh_par)
        elif mesh_name == "warp":
            from meshmode.mesh.generation import generate_warped_rect_mesh
            mesh = generate_warped_rect_mesh(dim, order=mesh_order, n=mesh_par)
            h = 1/mesh_par
        else:
            raise ValueError("mesh_name not recognized")

        # }}}

        from meshmode.mesh.processing import find_bounding_box
        mesh_bbox_low, mesh_bbox_high = find_bounding_box(mesh)
        mesh_ext = mesh_bbox_high-mesh_bbox_low

        def f(x):
            result = 1
            if mesh_name == "blob":
                factor = 15
            else:
                factor = 9

            for iaxis in range(len(x)):
                result = result * cl.clmath.sin(factor * (x[iaxis]/mesh_ext[iaxis]))

            return result

        discr = Discretization(cl_ctx, mesh, group_factory(order))

        refiner = refiner_cls(mesh)
        flags = refine_flags(mesh)
        refiner.refine(flags)

        connection = make_refinement_connection(
            refiner, discr, group_factory(order))
        check_connection(connection)

        fine_discr = connection.to_discr

        x = discr.nodes().with_queue(queue)
        x_fine = fine_discr.nodes().with_queue(queue)
        f_coarse = f(x)
        f_interp = connection(queue, f_coarse).with_queue(queue)
        f_true = f(x_fine).with_queue(queue)

        if visualize == "dots":
            import matplotlib.pyplot as plt
            x = x.get(queue)
            err = np.array(np.log10(
                1e-16 + np.abs((f_interp - f_true).get(queue))), dtype=float)
            import matplotlib.cm as cm
            cmap = cm.ScalarMappable(cmap=cm.jet)
            cmap.set_array(err)
            plt.scatter(x[0], x[1], c=cmap.to_rgba(err), s=20, cmap=cmap)
            plt.colorbar(cmap)
            plt.show()

        elif visualize == "vtk":
            from meshmode.discretization.visualization import make_visualizer
            fine_vis = make_visualizer(queue, fine_discr, mesh_order)

            fine_vis.write_vtk_file(
                    "refine-fine-%s-%dd-%s.vtu" % (mesh_name, dim, mesh_par), [
                        ("f_interp", f_interp),
                        ("f_true", f_true),
                        ])

        import numpy.linalg as la
        err = la.norm((f_interp - f_true).get(queue), np.inf)
        eoc_rec.add_data_point(h, err)

    order_slack = 0.5
    if mesh_name == "blob" and order > 1:
        order_slack = 1

    print(eoc_rec)
    assert (
            eoc_rec.order_estimate() >= order-order_slack
            or eoc_rec.max_error() < 1e-14)
Beispiel #48
0
def test_p2e2p(ctx_getter, base_knl, expn_class, order, with_source_derivative):
    #logging.basicConfig(level=logging.INFO)

    from sympy.core.cache import clear_cache
    clear_cache()

    ctx = ctx_getter()
    queue = cl.CommandQueue(ctx)

    np.random.seed(17)

    res = 100
    nsources = 100

    extra_kwargs = {}
    if isinstance(base_knl, HelmholtzKernel):
        if base_knl.allow_evanescent:
            extra_kwargs["k"] = 0.2 * (0.707 + 0.707j)
        else:
            extra_kwargs["k"] = 0.2

    if with_source_derivative:
        knl = DirectionalSourceDerivative(base_knl, "dir_vec")
    else:
        knl = base_knl

    out_kernels = [
            knl,
            AxisTargetDerivative(0, knl),
            ]
    expn = expn_class(knl, order=order)

    from sumpy import P2EFromSingleBox, E2PFromSingleBox, P2P
    p2e = P2EFromSingleBox(ctx, expn, out_kernels)
    e2p = E2PFromSingleBox(ctx, expn, out_kernels)
    p2p = P2P(ctx, out_kernels, exclude_self=False)

    from pytools.convergence import EOCRecorder
    eoc_rec_pot = EOCRecorder()
    eoc_rec_grad_x = EOCRecorder()

    from sumpy.expansion.local import LocalExpansionBase
    if issubclass(expn_class, LocalExpansionBase):
        h_values = [1/5, 1/7, 1/20]
    else:
        h_values = [1/2, 1/3, 1/5]

    center = np.array([2, 1], np.float64)
    sources = (0.7*(-0.5+np.random.rand(knl.dim, nsources).astype(np.float64))
            + center[:, np.newaxis])

    strengths = np.ones(nsources, dtype=np.float64) * (1/nsources)

    source_boxes = np.array([0], dtype=np.int32)
    box_source_starts = np.array([0], dtype=np.int32)
    box_source_counts_nonchild = np.array([nsources], dtype=np.int32)

    extra_source_kwargs = extra_kwargs.copy()
    if with_source_derivative:
        alpha = np.linspace(0, 2*np.pi, nsources, np.float64)
        dir_vec = np.vstack([np.cos(alpha), np.sin(alpha)])
        extra_source_kwargs["dir_vec"] = dir_vec

    from sumpy.visualization import FieldPlotter

    for h in h_values:
        if issubclass(expn_class, LocalExpansionBase):
            loc_center = np.array([5.5, 0.0]) + center
            centers = np.array(loc_center, dtype=np.float64).reshape(knl.dim, 1)
            fp = FieldPlotter(loc_center, extent=h, npoints=res)
        else:
            eval_center = np.array([1/h, 0.0]) + center
            fp = FieldPlotter(eval_center, extent=0.1, npoints=res)
            centers = (
                    np.array([0.0, 0.0], dtype=np.float64).reshape(knl.dim, 1)
                    + center[:, np.newaxis])

        targets = fp.points

        rscale = 0.5  # pick something non-1

        # {{{ apply p2e

        evt, (mpoles,) = p2e(queue,
                source_boxes=source_boxes,
                box_source_starts=box_source_starts,
                box_source_counts_nonchild=box_source_counts_nonchild,
                centers=centers,
                sources=sources,
                strengths=strengths,
                nboxes=1,
                tgt_base_ibox=0,
                rscale=rscale,

                #flags="print_hl_cl",
                out_host=True, **extra_source_kwargs)

        # }}}

        # {{{ apply e2p

        ntargets = targets.shape[-1]

        box_target_starts = np.array([0], dtype=np.int32)
        box_target_counts_nonchild = np.array([ntargets], dtype=np.int32)

        evt, (pot, grad_x, ) = e2p(
                queue,
                src_expansions=mpoles,
                src_base_ibox=0,
                target_boxes=source_boxes,
                box_target_starts=box_target_starts,
                box_target_counts_nonchild=box_target_counts_nonchild,
                centers=centers,
                targets=targets,
                rscale=rscale,

                #flags="print_hl_cl",
                out_host=True, **extra_kwargs)

        # }}}

        # {{{ compute (direct) reference solution

        evt, (pot_direct, grad_x_direct, ) = p2p(
                queue,
                targets, sources, (strengths,),
                out_host=True,
                **extra_source_kwargs)

        err_pot = la.norm((pot - pot_direct)/res**2)
        err_grad_x = la.norm((grad_x - grad_x_direct)/res**2)

        if 1:
            err_pot = err_pot / la.norm((pot_direct)/res**2)
            err_grad_x = err_grad_x / la.norm((grad_x_direct)/res**2)

        if 0:
            import matplotlib.pyplot as pt
            from matplotlib.colors import Normalize

            pt.subplot(131)
            im = fp.show_scalar_in_matplotlib(pot.real)
            im.set_norm(Normalize(vmin=-0.1, vmax=0.1))

            pt.subplot(132)
            im = fp.show_scalar_in_matplotlib(pot_direct.real)
            im.set_norm(Normalize(vmin=-0.1, vmax=0.1))
            pt.colorbar()

            pt.subplot(133)
            im = fp.show_scalar_in_matplotlib(np.log10(1e-15+np.abs(pot-pot_direct)))
            im.set_norm(Normalize(vmin=-6, vmax=1))

            pt.colorbar()
            pt.show()

        # }}}

        eoc_rec_pot.add_data_point(h, err_pot)
        eoc_rec_grad_x.add_data_point(h, err_grad_x)

    print(expn_class, knl, order)
    print("POTENTIAL:")
    print(eoc_rec_pot)
    print("X TARGET DERIVATIVE:")
    print(eoc_rec_grad_x)

    tgt_order = order + 1
    if issubclass(expn_class, LocalExpansionBase):
        tgt_order_grad = tgt_order - 1
        slack = 0.7
        grad_slack = 0.5
    else:
        tgt_order_grad = tgt_order + 1

        slack = 0.5
        grad_slack = 1

        if order <= 2:
            slack += 1
            grad_slack += 1

    if isinstance(knl, DirectionalSourceDerivative):
        slack += 1
        grad_slack += 2

    if isinstance(base_knl, HelmholtzKernel):
        if base_knl.allow_evanescent:
            slack += 0.5
            grad_slack += 0.5

        if issubclass(expn_class, VolumeTaylorMultipoleExpansionBase):
            slack += 0.3
            grad_slack += 0.3

    assert eoc_rec_pot.order_estimate() > tgt_order - slack
    assert eoc_rec_grad_x.order_estimate() > tgt_order_grad - grad_slack
Beispiel #49
0
def test_ellipse_eigenvalues(ctx_getter, ellipse_aspect, mode_nr, qbx_order):
    logging.basicConfig(level=logging.INFO)

    print("ellipse_aspect: %s, mode_nr: %d, qbx_order: %d" % (
            ellipse_aspect, mode_nr, qbx_order))

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    target_order = 7

    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            InterpolatoryQuadratureSimplexGroupFactory
    from pytential.qbx import QBXLayerPotentialSource
    from pytools.convergence import EOCRecorder

    s_eoc_rec = EOCRecorder()
    d_eoc_rec = EOCRecorder()
    sp_eoc_rec = EOCRecorder()

    if ellipse_aspect != 1:
        nelements_values = [60, 100, 150, 200]
    else:
        nelements_values = [30, 70]

    # See
    #
    # [1] G. J. Rodin and O. Steinbach, "Boundary Element Preconditioners
    # for Problems Defined on Slender Domains", SIAM Journal on Scientific
    # Computing, Vol. 24, No. 4, pg. 1450, 2003.
    # http://dx.doi.org/10.1137/S1064827500372067

    for nelements in nelements_values:
        mesh = make_curve_mesh(partial(ellipse, ellipse_aspect),
                np.linspace(0, 1, nelements+1),
                target_order)

        fmm_order = qbx_order
        if fmm_order > 3:
            # FIXME: for now
            fmm_order = False

        density_discr = Discretization(
                cl_ctx, mesh,
                InterpolatoryQuadratureSimplexGroupFactory(target_order))
        qbx = QBXLayerPotentialSource(density_discr, 4*target_order,
                qbx_order, fmm_order=fmm_order)

        nodes = density_discr.nodes().with_queue(queue)

        if 0:
            # plot geometry, centers, normals
            centers = qbx.centers(density_discr, 1)
            nodes_h = nodes.get()
            centers_h = [centers[0].get(), centers[1].get()]
            pt.plot(nodes_h[0], nodes_h[1], "x-")
            pt.plot(centers_h[0], centers_h[1], "o")
            normal = bind(qbx, sym.normal())(queue).as_vector(np.object)
            pt.quiver(nodes_h[0], nodes_h[1],
                    normal[0].get(), normal[1].get())
            pt.gca().set_aspect("equal")
            pt.show()

        angle = cl.clmath.atan2(nodes[1]*ellipse_aspect, nodes[0])

        ellipse_fraction = ((1-ellipse_aspect)/(1+ellipse_aspect))**mode_nr

        # (2.6) in [1]
        J = cl.clmath.sqrt(  # noqa
                cl.clmath.sin(angle)**2
                + (1/ellipse_aspect)**2 * cl.clmath.cos(angle)**2)

        # {{{ single layer

        sigma = cl.clmath.cos(mode_nr*angle)/J

        s_sigma_op = bind(qbx, sym.S(0, sym.var("sigma")))
        s_sigma = s_sigma_op(queue=queue, sigma=sigma)

        # SIGN BINGO! :)
        s_eigval = 1/(2*mode_nr) * (1 + (-1)**mode_nr * ellipse_fraction)

        # (2.12) in [1]
        s_sigma_ref = s_eigval*J*sigma

        if 0:
            #pt.plot(s_sigma.get(), label="result")
            #pt.plot(s_sigma_ref.get(), label="ref")
            pt.plot((s_sigma_ref-s_sigma).get(), label="err")
            pt.legend()
            pt.show()

        s_err = (
                norm(density_discr, queue, s_sigma - s_sigma_ref)
                /
                norm(density_discr, queue, s_sigma_ref))
        s_eoc_rec.add_data_point(1/nelements, s_err)

        # }}}

        # {{{ double layer

        sigma = cl.clmath.cos(mode_nr*angle)

        d_sigma_op = bind(qbx, sym.D(0, sym.var("sigma")))
        d_sigma = d_sigma_op(queue=queue, sigma=sigma)

        # SIGN BINGO! :)
        d_eigval = -(-1)**mode_nr * 1/2*ellipse_fraction

        d_sigma_ref = d_eigval*sigma

        if 0:
            pt.plot(d_sigma.get(), label="result")
            pt.plot(d_sigma_ref.get(), label="ref")
            pt.legend()
            pt.show()

        if ellipse_aspect == 1:
            d_ref_norm = norm(density_discr, queue, sigma)
        else:
            d_ref_norm = norm(density_discr, queue, d_sigma_ref)

        d_err = (
                norm(density_discr, queue, d_sigma - d_sigma_ref)
                /
                d_ref_norm)
        d_eoc_rec.add_data_point(1/nelements, d_err)

        # }}}

        if ellipse_aspect == 1:
            # {{{ S'

            sigma = cl.clmath.cos(mode_nr*angle)

            sp_sigma_op = bind(qbx, sym.Sp(0, sym.var("sigma")))
            sp_sigma = sp_sigma_op(queue=queue, sigma=sigma)
            sp_eigval = 0

            sp_sigma_ref = sp_eigval*sigma

            sp_err = (
                    norm(density_discr, queue, sp_sigma - sp_sigma_ref)
                    /
                    norm(density_discr, queue, sigma))
            sp_eoc_rec.add_data_point(1/nelements, sp_err)

            # }}}

    print("Errors for S:")
    print(s_eoc_rec)
    required_order = qbx_order + 1
    assert s_eoc_rec.order_estimate() > required_order - 1.5

    print("Errors for D:")
    print(d_eoc_rec)
    required_order = qbx_order
    assert d_eoc_rec.order_estimate() > required_order - 1.5

    if ellipse_aspect == 1:
        print("Errors for S':")
        print(sp_eoc_rec)
        required_order = qbx_order
        assert sp_eoc_rec.order_estimate() > required_order - 1.5
def test_identity_convergence(ctx_getter,  case, visualize=False):
    logging.basicConfig(level=logging.INFO)

    case.check()

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    # prevent cache 'splosion
    from sympy.core.cache import clear_cache
    clear_cache()

    target_order = 8

    from pytools.convergence import EOCRecorder
    eoc_rec = EOCRecorder()

    for resolution in (
            getattr(case, "resolutions", None)
            or case.geometry.resolutions
            ):
        mesh = case.geometry.get_mesh(resolution, target_order)
        if mesh is None:
            break

        d = mesh.ambient_dim
        k = case.k

        lap_k_sym = LaplaceKernel(d)
        if k == 0:
            k_sym = lap_k_sym
            knl_kwargs = {}
        else:
            k_sym = HelmholtzKernel(d)
            knl_kwargs = {"k": sym.var("k")}

        from meshmode.discretization import Discretization
        from meshmode.discretization.poly_element import \
                InterpolatoryQuadratureSimplexGroupFactory
        from pytential.qbx import QBXLayerPotentialSource
        pre_density_discr = Discretization(
                cl_ctx, mesh,
                InterpolatoryQuadratureSimplexGroupFactory(target_order))

        refiner_extra_kwargs = {}

        if case.k != 0:
            refiner_extra_kwargs["kernel_length_scale"] = 5/case.k

        qbx, _ = QBXLayerPotentialSource(
                pre_density_discr, 4*target_order,
                case.qbx_order,
                fmm_order=case.fmm_order,
                fmm_backend=case.fmm_backend,
                _expansions_in_tree_have_extent=True,
                _expansion_stick_out_factor=getattr(
                    case, "_expansion_stick_out_factor", 0),
                ).with_refinement(**refiner_extra_kwargs)

        density_discr = qbx.density_discr

        # {{{ compute values of a solution to the PDE

        nodes_host = density_discr.nodes().get(queue)
        normal = bind(density_discr, sym.normal(d))(queue).as_vector(np.object)
        normal_host = [normal[j].get() for j in range(d)]

        if k != 0:
            if d == 2:
                angle = 0.3
                wave_vec = np.array([np.cos(angle), np.sin(angle)])
                u = np.exp(1j*k*np.tensordot(wave_vec, nodes_host, axes=1))
                grad_u = 1j*k*wave_vec[:, np.newaxis]*u
            elif d == 3:
                center = np.array([3, 1, 2])
                diff = nodes_host - center[:, np.newaxis]
                r = la.norm(diff, axis=0)
                u = np.exp(1j*k*r) / r
                grad_u = diff * (1j*k*u/r - u/r**2)
            else:
                raise ValueError("invalid dim")
        else:
            center = np.array([3, 1, 2])[:d]
            diff = nodes_host - center[:, np.newaxis]
            dist_squared = np.sum(diff**2, axis=0)
            dist = np.sqrt(dist_squared)
            if d == 2:
                u = np.log(dist)
                grad_u = diff/dist_squared
            elif d == 3:
                u = 1/dist
                grad_u = -diff/dist**3
            else:
                assert False

        dn_u = 0
        for i in range(d):
            dn_u = dn_u + normal_host[i]*grad_u[i]

        # }}}

        u_dev = cl.array.to_device(queue, u)
        dn_u_dev = cl.array.to_device(queue, dn_u)
        grad_u_dev = cl.array.to_device(queue, grad_u)

        key = (case.qbx_order, case.geometry.mesh_name, resolution,
                case.expr.zero_op_name)

        bound_op = bind(qbx, case.expr.get_zero_op(k_sym, **knl_kwargs))
        error = bound_op(
                queue, u=u_dev, dn_u=dn_u_dev, grad_u=grad_u_dev, k=case.k)
        if 0:
            pt.plot(error)
            pt.show()

        linf_error_norm = norm(density_discr, queue, error, p=np.inf)
        print("--->", key, linf_error_norm)

        eoc_rec.add_data_point(qbx.h_max, linf_error_norm)

        if visualize:
            from meshmode.discretization.visualization import make_visualizer
            bdry_vis = make_visualizer(queue, density_discr, target_order)

            bdry_normals = bind(density_discr, sym.normal(mesh.ambient_dim))(queue)\
                    .as_vector(dtype=object)

            bdry_vis.write_vtk_file("source-%s.vtu" % resolution, [
                ("u", u_dev),
                ("bdry_normals", bdry_normals),
                ("error", error),
                ])

    print(eoc_rec)
    tgt_order = case.qbx_order - case.expr.order_drop
    assert eoc_rec.order_estimate() > tgt_order - 1.6