def test_projection():
    """Test whether projection between different orders works"""

    from hedge.mesh.generator import make_disk_mesh
    from hedge.discretization import Projector
    from hedge.discretization.local import TriangleDiscretization
    from math import sin, pi

    from numpy import dot

    a = numpy.array([1, 3])

    def u_analytic(x, el):
        return sin(dot(a, x))

    mesh = make_disk_mesh(r=pi, max_area=0.5)

    discr2 = discr_class(mesh,
                         TriangleDiscretization(2),
                         debug=discr_class.noninteractive_debug_flags())
    discr5 = discr_class(mesh,
                         TriangleDiscretization(5),
                         debug=discr_class.noninteractive_debug_flags())
    p2to5 = Projector(discr2, discr5)
    p5to2 = Projector(discr5, discr2)

    u2 = discr2.interpolate_volume_function(u_analytic)
    u2_i = p5to2(p2to5(u2))
    assert discr2.norm(u2 - u2_i) < 3e-15
Example #2
0
def test_tri_face_node_distribution():
    """Test whether the nodes on the faces of the triangle are distributed
    according to the same proportions on each face.

    If this is not the case, then reusing the same face mass matrix
    for each face would be invalid.
    """

    from hedge.discretization.local import TriangleDiscretization

    tri = TriangleDiscretization(8)
    unodes = tri.unit_nodes()
    projected_face_points = []
    for face_i in tri.face_indices():
        start = unodes[face_i[0]]
        end = unodes[face_i[-1]]
        dir = end-start
        dir /= numpy.dot(dir, dir)
        pfp = numpy.array([numpy.dot(dir, unodes[i]-start) for i in face_i])
        projected_face_points.append(pfp)

    first_points =  projected_face_points[0]
    for points in projected_face_points[1:]:
        error = la.norm(points-first_points, numpy.Inf)
        assert error < 1e-15
Example #3
0
def test_tri_face_node_distribution():
    """Test whether the nodes on the faces of the triangle are distributed
    according to the same proportions on each face.

    If this is not the case, then reusing the same face mass matrix
    for each face would be invalid.
    """

    from hedge.discretization.local import TriangleDiscretization

    tri = TriangleDiscretization(8)
    unodes = tri.unit_nodes()
    projected_face_points = []
    for face_i in tri.face_indices():
        start = unodes[face_i[0]]
        end = unodes[face_i[-1]]
        dir = end-start
        dir /= numpy.dot(dir, dir)
        pfp = numpy.array([numpy.dot(dir, unodes[i]-start) for i in face_i])
        projected_face_points.append(pfp)

    first_points =  projected_face_points[0]
    for points in projected_face_points[1:]:
        error = la.norm(points-first_points, numpy.Inf)
        assert error < 1e-15
Example #4
0
def test_simp_orthogonality():
    """Test orthogonality of simplicial bases using Grundmann-Moeller cubature"""
    from hedge.quadrature import SimplexCubature
    from hedge.discretization.local import TriangleDiscretization, TetrahedronDiscretization

    for order, ebound in [
            (1, 2e-15),
            (2, 5e-15),
            (3, 1e-14),
            #(4, 3e-14),
            #(7, 3e-14),
            #(9, 2e-13),
            ]:
        for ldis in [TriangleDiscretization(order), TetrahedronDiscretization(order)]:
            cub = SimplexCubature(order, ldis.dimensions)
            basis = ldis.basis_functions()

            maxerr = 0
            for i, f in enumerate(basis):
                for j, g in enumerate(basis):
                    if i == j:
                        true_result = 1
                    else:
                        true_result = 0
                    result = cub(lambda x: f(x)*g(x))
                    err = abs(result-true_result)
                    print maxerr, err
                    maxerr = max(maxerr, err)
                    if err > ebound:
                        print "bad", order,i,j, err
                    assert err < ebound
def no_test_tri_mass_mat_gauss(self):
    """Check the integral of a Gaussian on a disk using the mass matrix"""

    # This is a bad test, since it's never exact. The Gaussian has infinite support,
    # and this *does* matter numerically.

    from hedge.mesh.generator import make_disk_mesh
    from hedge.discretization.local import TriangleDiscretization
    from math import sqrt, exp, pi

    sigma_squared = 1 / 219.3

    discr = self.discr_class(
        make_disk_mesh(),
        TriangleDiscretization(4),
        debug=self.discr_class.noninteractive_debug_flags())
    f = discr.interpolate_volume_function(
        lambda x, el: exp(-x * x / (2 * sigma_squared)))
    ones = discr.interpolate_volume_function(lambda x, el: 1)

    #discr.visualize_vtk("gaussian.vtk", [("f", f)])
    num_integral_1 = ones * (discr.mass_operator * f)
    num_integral_2 = f * (discr.mass_operator * ones)
    dim = 2
    true_integral = (2 * pi)**(dim / 2) * sqrt(sigma_squared)**dim
    err_1 = abs(num_integral_1 - true_integral)
    err_2 = abs(num_integral_2 - true_integral)
    self.assert_(err_1 < 1e-11)
    self.assert_(err_2 < 1e-11)
def test_tri_diff_mat():
    """Check differentiation matrix along the coordinate axes on a disk

    Uses sines as the function to differentiate.
    """
    from hedge.mesh.generator import make_disk_mesh
    from hedge.discretization.local import TriangleDiscretization
    from math import sin, cos

    from hedge.optemplate import make_nabla
    nabla = make_nabla(2)

    for coord in [0, 1]:
        discr = discr_class(make_disk_mesh(),
                            TriangleDiscretization(4),
                            debug=discr_class.noninteractive_debug_flags())
        f = discr.interpolate_volume_function(lambda x, el: sin(3 * x[coord]))
        df = discr.interpolate_volume_function(
            lambda x, el: 3 * cos(3 * x[coord]))

        df_num = nabla[coord].apply(discr, f)
        #discr.visualize_vtk("diff-err.vtk",
        #[("f", f), ("df", df), ("df_num", df_num), ("error", error)])

        linf_error = la.norm(df_num - df, numpy.Inf)
        print linf_error
        assert linf_error < 4e-5
def test_tri_mass_mat_trig():
    """Check the integral of some trig functions on a square using the mass matrix"""

    from hedge.mesh.generator import make_square_mesh
    from hedge.discretization.local import TriangleDiscretization
    from math import pi, cos, sin

    mesh = make_square_mesh(a=-pi, b=pi, max_area=(2 * pi / 10)**2 / 2)
    discr = discr_class(mesh,
                        TriangleDiscretization(8),
                        debug=discr_class.noninteractive_debug_flags())

    f = discr.interpolate_volume_function(
        lambda x, el: cos(x[0])**2 * sin(x[1])**2)
    ones = discr.interpolate_volume_function(lambda x, el: 1)

    from hedge.optemplate import MassOperator
    mass_op = MassOperator()

    num_integral_1 = numpy.dot(ones, mass_op.apply(discr, f))
    num_integral_2 = numpy.dot(f, mass_op.apply(discr, ones))
    true_integral = pi**2
    err_1 = abs(num_integral_1 - true_integral)
    err_2 = abs(num_integral_2 - true_integral)
    #print err_1, err_2
    assert err_1 < 1e-10
    assert err_2 < 1e-10
Example #8
0
def test_tri_map():
    """Verify that the mapping and node-building operations maintain triangle vertices"""
    from hedge.discretization.local import TriangleDiscretization

    n = 8
    tri = TriangleDiscretization(n)

    node_dict = dict((ituple, idx) for idx, ituple in enumerate(tri.node_tuples()))
    corner_indices = [node_dict[0,0], node_dict[n,0], node_dict[0,n]]
    unodes = tri.unit_nodes()
    corners = [unodes[i] for i in corner_indices]

    for i in range(10):
        vertices = [numpy.random.randn(2) for vi in range(3)]
        map = tri.geometry.get_map_unit_to_global(vertices)
        global_corners = [map(pt) for pt in corners]
        for gc, v in zip(global_corners, vertices):
            assert la.norm(gc-v) < 1e-12
Example #9
0
def test_tri_map():
    """Verify that the mapping and node-building operations maintain triangle vertices"""
    from hedge.discretization.local import TriangleDiscretization

    n = 8
    tri = TriangleDiscretization(n)

    node_dict = dict((ituple, idx) for idx, ituple in enumerate(tri.node_tuples()))
    corner_indices = [node_dict[0,0], node_dict[n,0], node_dict[0,n]]
    unodes = tri.unit_nodes()
    corners = [unodes[i] for i in corner_indices]

    for i in range(10):
        vertices = [numpy.random.randn(2) for vi in range(3)]
        map = tri.geometry.get_map_unit_to_global(vertices)
        global_corners = [map(pt) for pt in corners]
        for gc, v in zip(global_corners, vertices):
            assert la.norm(gc-v) < 1e-12
Example #10
0
def test_tri_map_jacobian_and_mass_matrix():
    """Verify whether tri map jacobians recover known values of triangle area"""
    from hedge.discretization.local import TriangleDiscretization

    for i in range(1,10):
        edata = TriangleDiscretization(i)
        ones = numpy.ones((edata.node_count(),))
        unit_tri_area = 2
        error = la.norm(
            numpy.dot(ones,numpy.dot(edata.mass_matrix(), ones))-unit_tri_area)
        assert error < 1e-14

    for i in range(10):
        vertices = [numpy.random.randn(2) for vi in range(3)]
        map = edata.geometry.get_map_unit_to_global(vertices)
        mat = numpy.zeros((2,2))
        mat[:,0] = (vertices[1] - vertices[0])
        mat[:,1] = (vertices[2] - vertices[0])
        tri_area = abs(la.det(mat)/2)
        tri_area_2 = abs(unit_tri_area*map.jacobian())

        assert abs(tri_area - tri_area_2)/tri_area < 5e-15
Example #11
0
def test_tri_map_jacobian_and_mass_matrix():
    """Verify whether tri map jacobians recover known values of triangle area"""
    from hedge.discretization.local import TriangleDiscretization

    for i in range(1,10):
        edata = TriangleDiscretization(i)
        ones = numpy.ones((edata.node_count(),))
        unit_tri_area = 2
        error = la.norm(
            numpy.dot(ones,numpy.dot(edata.mass_matrix(), ones))-unit_tri_area)
        assert error < 1e-14

    for i in range(10):
        vertices = [numpy.random.randn(2) for vi in range(3)]
        map = edata.geometry.get_map_unit_to_global(vertices)
        mat = numpy.zeros((2,2))
        mat[:,0] = (vertices[1] - vertices[0])
        mat[:,1] = (vertices[2] - vertices[0])
        tri_area = abs(la.det(mat)/2)
        tri_area_2 = abs(unit_tri_area*map.jacobian())

        assert abs(tri_area - tri_area_2)/tri_area < 5e-15
Example #12
0
def test_simp_nodes():
    """Verify basic assumptions on simplex interpolation nodes"""
    from hedge.discretization.local import \
            IntervalDiscretization, \
            TriangleDiscretization, \
            TetrahedronDiscretization

    els = [
            IntervalDiscretization(19),
            TriangleDiscretization(8),
            TriangleDiscretization(17),
            TetrahedronDiscretization(13)]

    for el in els:
        eps = 1e-10

        unodes = list(el.unit_nodes())
        assert len(unodes) == el.node_count()
        for ux in unodes:
            for uc in ux:
                assert uc >= -1-eps
            assert sum(ux) <= 1+eps

        try:
            equnodes = list(el.equidistant_unit_nodes())
        except AttributeError:
            assert isinstance(el, IntervalDiscretization)
        else:
            assert len(equnodes) == el.node_count()
            for ux in equnodes:
                for uc in ux:
                    assert uc >= -1-eps
                assert sum(ux) <= 1+eps

        for indices in el.node_tuples():
            for index in indices:
                assert index >= 0
            assert sum(indices) <= el.order
def test_filter():
    """Exercise mode-based filtering."""

    from hedge.mesh.generator import make_disk_mesh
    from hedge.discretization.local import TriangleDiscretization
    from math import sin

    mesh = make_disk_mesh(r=3.4, max_area=0.5)
    discr = discr_class(mesh,
                        TriangleDiscretization(5),
                        debug=discr_class.noninteractive_debug_flags())

    from hedge.optemplate.operators import FilterOperator
    from hedge.discretization import ExponentialFilterResponseFunction
    half_filter = FilterOperator(lambda mid, ldis: 0.5)
    for eg in discr.element_groups:
        fmat = half_filter.matrix(eg)
        n, m = fmat.shape
        assert la.norm(fmat - 0.5 * numpy.eye(n, m)) < 2e-15

    from numpy import dot

    def test_freq(n):
        a = numpy.array([1, n])

        def u_analytic(x, el):
            return sin(dot(a, x))

        exp_filter = FilterOperator(ExponentialFilterResponseFunction(0.9, 3)) \
                .bind(discr)

        u = discr.interpolate_volume_function(u_analytic)
        filt_u = exp_filter(u)

        int_error = abs(discr.integral(u) - discr.integral(filt_u))
        l2_ratio = discr.norm(filt_u) / discr.norm(u)
        assert int_error < 1e-14
        assert 0.96 < l2_ratio < 0.99999

    test_freq(3)
    test_freq(5)
    test_freq(9)
    test_freq(17)
Example #14
0
def test_face_vertex_order():
    """Verify that face_indices() emits face vertex indices in the right order"""
    from hedge.discretization.local import \
            IntervalDiscretization, \
            TriangleDiscretization, \
            TetrahedronDiscretization

    for el in [
            IntervalDiscretization(5),
            TriangleDiscretization(5),
            TetrahedronDiscretization(5)]:
        vertex_indices = el.vertex_indices()
        for fn, (face_vertices, face_indices) in enumerate(zip(
                el.geometry.face_vertices(vertex_indices),
                el.face_indices())):
            face_vertices_i = 0
            for fi in face_indices:
                if fi == face_vertices[face_vertices_i]:
                    face_vertices_i += 1

            assert face_vertices_i == len(face_vertices)
def test_simp_mass_and_diff_matrices_by_monomial():
    """Verify simplicial mass and differentiation matrices using monomials"""

    from hedge.discretization.local import \
            IntervalDiscretization, \
            TriangleDiscretization, \
            TetrahedronDiscretization
    from pytools import generate_nonnegative_integer_tuples_summing_to_at_most

    thresh = 1e-13

    from numpy import dot
    for el in [
            IntervalDiscretization(5),
            TriangleDiscretization(3),
            TetrahedronDiscretization(5),
    ]:
        for comb in generate_nonnegative_integer_tuples_summing_to_at_most(
                el.order, el.dimensions):
            ones = numpy.ones((el.node_count(), ))
            unodes = el.unit_nodes()
            f = Monomial(comb)
            f_n = numpy.array([f(x) for x in unodes])
            int_f_n = dot(ones, dot(el.mass_matrix(), f_n))
            int_f = f.simplex_integral()
            err = la.norm(int_f - int_f_n)
            if err > thresh:
                print "bad", el, comb, int_f, int_f_n, err
            assert err < thresh

            dmats = el.differentiation_matrices()
            for i in range(el.dimensions):
                df = f.diff(i)
                df = numpy.array([df(x) for x in unodes]) / 2
                df_n = dot(dmats[i], f_n)
                err = la.norm(df - df_n, numpy.Inf)
                if err > thresh:
                    print "bad-diff", comb, i, err
                assert err < thresh
Example #16
0
def test_simp_basis_grad():
    """Do a simplistic FD-style check on the differentiation matrix"""
    from itertools import izip
    from hedge.discretization.local import \
            IntervalDiscretization, \
            TriangleDiscretization, \
            TetrahedronDiscretization
    from random import uniform

    els = [
            (1, IntervalDiscretization(5)),
            (1, TriangleDiscretization(8)),
            (3,TetrahedronDiscretization(7))]

    for err_factor, el in els:
        d = el.dimensions
        for i_bf, (bf, gradbf) in \
                enumerate(izip(el.basis_functions(), el.grad_basis_functions())):
            for i in range(10):
                base = -0.95
                remaining = 1.90
                r = numpy.zeros((d,))
                for i in range(d):
                    rn = uniform(0, remaining)
                    r[i] = base+rn
                    remaining -= rn

                from pytools import wandering_element
                h = 1e-4
                gradbf_v = numpy.array(gradbf(r))
                approx_gradbf_v = numpy.array([
                    (bf(r+h*dir) - bf(r-h*dir))/(2*h)
                    for dir in [numpy.array(dir) for dir in wandering_element(d)]
                    ])
                err = la.norm(approx_gradbf_v-gradbf_v, numpy.Inf)
                #print el.dimensions, el.order, i_bf, err
                assert err < err_factor*h
def test_mapping_differences_tri():
    """Check that triangle interpolation is independent of mapping to reference
    """
    from hedge.discretization.local import TriangleDiscretization
    from random import random
    from pytools import generate_permutations

    def shift(list):
        return list[1:] + [list[0]]

    class LinearCombinationOfFunctions:
        def __init__(self, coefficients, functions, premap):
            self.coefficients = coefficients
            self.functions = functions
            self.premap = premap

        def __call__(self, x):
            return sum(coeff * f(self.premap(x))
                       for coeff, f in zip(self.coefficients, self.functions))

    def random_barycentric_coordinates(dim):
        remain = 1
        coords = []
        for i in range(dim):
            coords.append(random() * remain)
            remain -= coords[-1]
        coords.append(remain)
        return coords

    tri = TriangleDiscretization(5)

    for trial_number in range(10):
        vertices = [numpy.random.randn(2) for vi in range(3)]
        map = tri.geometry.get_map_unit_to_global(vertices)
        nodes = [map(node) for node in tri.unit_nodes()]
        node_values = numpy.array([random() for node in nodes])

        functions = []
        for pvertices in generate_permutations(vertices):
            pmap = tri.geometry.get_map_unit_to_global(pvertices)
            pnodes = [pmap(node) for node in tri.unit_nodes()]

            # map from pnode# to node#
            nodematch = {}
            for pi, pn in enumerate(pnodes):
                for i, n in enumerate(nodes):
                    if la.norm(n - pn) < 1e-13:
                        nodematch[pi] = i
                        break

            pnode_values = numpy.array(
                [node_values[nodematch[pi]] for pi in range(len(nodes))])

            interp_f = LinearCombinationOfFunctions(
                la.solve(tri.vandermonde(), pnode_values),
                tri.basis_functions(), pmap.inverted())

            # verify interpolation property
            #for n, nv in zip(pnodes, pnode_values):
            #assert abs(interp_f(n) - nv) < 1e-13

            functions.append(interp_f)

        for subtrial_number in range(15):
            pt_in_element = sum(coeff * vertex for coeff, vertex in zip(
                random_barycentric_coordinates(2), vertices))
            f_values = [f(pt_in_element) for f in functions]
            avg = sum(f_values) / len(f_values)
            err = [abs(fv - avg) for fv in f_values]
            assert max(err) < 1e-12
def test_simp_gauss_theorem():
    """Verify Gauss's theorem explicitly on simplicial elements"""

    from hedge.discretization.local import \
            IntervalDiscretization, \
            TriangleDiscretization, \
            TetrahedronDiscretization
    from operator import add
    from math import sin, cos

    from numpy import dot

    def f1_1d(x):
        return sin(3 * x[0])

    def f1_2d(x):
        return sin(3 * x[0]) + cos(3 * x[1])

    def f2_2d(x):
        return sin(2 * x[0]) + cos(x[1])

    def f1_3d(x):
        return sin(3 * x[0]) + cos(3 * x[1]) + sin(1.9 * x[2])

    def f2_3d(x):
        return sin(1.2 * x[0]) + cos(2.1 * x[1]) - cos(1.5 * x[2])

    def f3_3d(x):
        return 5 * sin(-0.2 * x[0]) - 3 * cos(x[1]) + cos(x[2])

    #def f1_3d(x):
    #return 1
    #def f2_3d(x):
    #return 0
    #def f3_3d(x):
    #return 0

    def d(imap, coordinate, field):
        col = imap.matrix[:, coordinate]
        matrices = el.differentiation_matrices()
        return reduce(add, (coeff * dot(dmat, field)
                            for dmat, coeff in zip(matrices, col)))

    array = numpy.array

    intervals = [[array([-0.5]), array([17.])]]

    triangles = [
        [
            array([-7.1687642250744492, 0.63058995062684642]),
            array([9.9744219044921199, 6.6530989283689781]),
            array([12.269380138171147, -17.529689194536481])
        ],
        [
            array([-3.1285787297852634, -16.579403405465403]),
            array([-5.2882160938912515, -6.2209234150214137]),
            array([11.251223490342774, 4.6571427341871727])
        ],
        [
            array([4.7407472917152553, -18.406868078408063]),
            array([1.8224524488556271, 11.551374404003361]),
            array([2.523148394963088, 1.632574414790982])
        ],
        [
            array([-11.523714017493292, -14.2820557378961]),
            array([-0.44311816855771136, 19.572194735728861]),
            array([5.2855990566779445, -9.8743423935894388])
        ],
        [
            array([1.113949150102217, -3.2255502625302639]),
            array([-13.028732972681315, 2.1525752429773379]),
            array([-2.3929000970202705, 6.2884649052982198])
        ],
        [
            array([-8.0878061368549741, -14.604092423350167]),
            array([4.5339922477199996, 8.3770287646932022]),
            array([-5.2180549365480156, -1.9930760171433717])
        ],
        [
            array([-1.9047012017294165, -3.6517882549544485]),
            array([3.1461902282192784, 5.7397849191668229]),
            array([-11.072761256907262, -8.3758508470287509])
        ],
        [
            array([8.6609581113102934, 9.1121629958018566]),
            array([3.8230948675835497, -14.004679313330751]),
            array([10.975623610855521, 1.6267418698764553])
        ],
        [
            array([13.959685276941629, -12.201892555481464]),
            array([-7.8057604576925499, -3.5283871457281757]),
            array([-0.41961743047735317, -3.2615635891671872])
        ],
        [
            array([-9.8469907360335078, 6.0635407355366242]),
            array([7.8727080309703439, 7.634505157189091]),
            array([-2.7723038834027118, 8.5441656500931789])
        ],
    ]
    tets = [
        #[make_random_vector(3, num.Float) for i in range(4)]
        #for j in range(10)
        [
            array([
                -0.087835976182812386, 8.4241880323369127, 2.6557808710807933
            ]),
            array(
                [5.4875560966799677, -7.5530368326122499, 8.4703868377747877]),
            array(
                [-8.4888098806626751, 1.8369058453192324,
                 -6.9041468708803713]),
            array(
                [17.327527449499168, -9.0586108433594319, 5.7459746913914636])
        ],  # noqa
        [
            array(
                [16.993689961344593, -12.116305360441197,
                 -12.711045554409088]),
            array(
                [-2.0324332275643817, -5.0524187595904335,
                 5.9257028535230383]),
            array(
                [6.4221144729287687, -7.2496949199427245,
                 -1.1590391996379827]),
            array(
                [-5.7529432953399171, -6.9587987820990262, 3.7223773892240426])
        ],  # noqa
        [
            array([
                -0.4423263927732628, -1.6306971591009138, -1.2554069824001064
            ]),
            array(
                [-9.1171749892163785, 14.232868970928301, 4.6548620163014505]),
            array(
                [16.554360867165595, -2.1451702825571202,
                 -1.9050837421951314]),
            array(
                [-8.7455417971698139, 19.016251630886945, -15.137691422305545])
        ],  # noqa
        [
            array(
                [-1.9251811954429843, -4.5369007736338665,
                 9.2675942450331963]),
            array([
                -13.586778017089083, -3.6666239130220553, -14.095112617514117
            ]),
            array([
                -15.014799506040006, -3.4363888726140681, -0.85237426213437206
            ]),
            array(
                [6.3854978041452597, 13.293981904633554, -7.8432774486183146])
        ],  # noqa
        [
            array([-6.761839340374304, 14.864784704377955, 1.574274771089831]),
            array(
                [-0.1823468063801317, -21.892423945260102,
                 11.565172070570537]),
            array(
                [-0.14658389181168049, 13.07241603902848, 7.2652184007323042]),
            array(
                [-20.35574011769496, 14.816503793175773, -7.2800214849607254])
        ],  # noqa
        [
            array([23.294362873156878, 13.644282203469114,
                   10.383738204469243]),
            array(
                [-19.792088993452555, 0.4209925297886693,
                 -7.3933945447515388]),
            array(
                [-2.832898385995708, -1.6480401382241885,
                 -6.2689214950820924]),
            array([
                -0.081772347748623617, -3.3803599922239673, -19.614368674546114
            ])
        ],  # noqa
        [
            array([
                0.43913744703796659, -16.473036116412242, -0.8653853759721295
            ]),
            array([
                -7.3270752283484297, -0.97723025169973787, 2.1330514627504464
            ]),
            array(
                [3.8730334021748307, -9.0983850278388143, 3.3578300089831501]),
            array(
                [18.639504439820936, 20.594835769217696, -10.666261239487298])
        ],  # noqa
        [
            array([
                -12.786230591302058, -9.2931510923111169, -2.1598642469378935
            ]),
            array([
                -4.0458439207057459, -9.0298998997705144, -0.11666215074316685
            ]),
            array([7.5023999981398424, 4.8603369473110583,
                   2.1813627427875013]),
            array(
                [2.9579841500551272, -22.563123335973565, 10.335559822513606])
        ],  # noqa
        [
            array(
                [-7.7732699602949893, 15.816977096296963,
                 -6.8826683632918728]),
            array(
                [7.6233333630240256, -9.3309869383569026,
                 0.50189282953625991]),
            array(
                [-11.272342858699034, 1.089016041114454, -6.0359393299451476]),
            array([
                -6.4746449930954348, -0.026130504314747997, -2.2786267101817677
            ])
        ],  # noqa
        [
            array(
                [-18.243993907118757, 5.0646875774948974,
                 -9.2110046334596856]),
            array(
                [-8.1550804560957264, -3.1021806460634913,
                 7.5622831439916105]),
            array([19.460768761970783, 17.494565076685859,
                   16.295621155355697]),
            array(
                [4.6186236213250131, -1.3869183721072562, -0.2159066724152843])
        ],  # noqa
    ]

    for el_geoms, el, f in [
        (intervals, IntervalDiscretization(9), [f1_1d]),
        (triangles, TriangleDiscretization(9), [f1_2d, f2_2d]),
        (tets, TetrahedronDiscretization(1), [f1_3d, f2_3d, f3_3d]),
    ]:
        for vertices in el_geoms:
            ones = numpy.ones((el.node_count(), ))
            face_ones = numpy.ones((len(el.face_indices()[0]), ))

            map = el.geometry.get_map_unit_to_global(vertices)
            imap = map.inverted()

            mapped_points = [map(node) for node in el.unit_nodes()]

            f_n = [numpy.array([fi(x) for x in mapped_points]) for fi in f]
            df_n = [d(imap, i, f_n[i]) for i, fi_n in enumerate(f_n)]

            int_div_f = abs(map.jacobian()) * sum(
                dot(ones, dot(el.mass_matrix(), dfi_n)) for dfi_n in df_n)

            if False:
                boundary_comp = [  # noqa
                    array([
                        fjac * dot(
                            face_ones,
                            dot(el.face_mass_matrix(),
                                numpy.take(fi_n, face_indices))) * n_coord
                        for fi_n, n_coord in zip(f_n, n)
                    ]) for face_indices, n, fjac in zip(
                        el.face_indices(),
                        *el.face_normals_and_jacobians(vertices, map))
                ]

            boundary_sum = sum(
                sum(fjac * dot(
                    face_ones,
                    dot(el.face_mass_matrix(), numpy.take(fi_n, face_indices)))
                    * n_coord for fi_n, n_coord in zip(f_n, n))
                for face_indices, n, fjac in zip(
                    el.face_indices(),
                    *el.geometry.face_normals_and_jacobians(vertices, map)))

            #print el.face_normals_and_jacobians(map)[1]
            #print 'mp', [mapped_points[fi] for fi in el.face_indices()[2]]
            #print num.take(f_n[0], el.face_indices()[2])
            #print 'bc', boundary_comp
            #print 'bs', boundary_sum
            #print 'idiv', int_div_f
            #print abs(boundary_sum-int_div_f)
            assert abs(boundary_sum - int_div_f) < 1e-12
Example #19
0
def test_mapping_differences_tri():
    """Check that triangle interpolation is independent of mapping to reference
    """
    from hedge.discretization.local import TriangleDiscretization
    from random import random
    from pytools import generate_permutations

    def shift(list):
        return list[1:] + [list[0]]

    class LinearCombinationOfFunctions:
        def __init__(self, coefficients, functions, premap):
            self.coefficients = coefficients
            self.functions = functions
            self.premap = premap

        def __call__(self, x):
            return sum(coeff * f(self.premap(x)) for coeff, f in zip(self.coefficients, self.functions))

    def random_barycentric_coordinates(dim):
        remain = 1
        coords = []
        for i in range(dim):
            coords.append(random() * remain)
            remain -= coords[-1]
        coords.append(remain)
        return coords

    tri = TriangleDiscretization(5)

    for trial_number in range(10):
        vertices = [numpy.random.randn(2) for vi in range(3)]
        map = tri.geometry.get_map_unit_to_global(vertices)
        nodes = [map(node) for node in tri.unit_nodes()]
        node_values = numpy.array([random() for node in nodes])

        functions = []
        for pvertices in generate_permutations(vertices):
            pmap = tri.geometry.get_map_unit_to_global(pvertices)
            pnodes = [pmap(node) for node in tri.unit_nodes()]

            # map from pnode# to node#
            nodematch = {}
            for pi, pn in enumerate(pnodes):
                for i, n in enumerate(nodes):
                    if la.norm(n - pn) < 1e-13:
                        nodematch[pi] = i
                        break

            pnode_values = numpy.array([node_values[nodematch[pi]] for pi in range(len(nodes))])

            interp_f = LinearCombinationOfFunctions(
                la.solve(tri.vandermonde(), pnode_values), tri.basis_functions(), pmap.inverted()
            )

            # verify interpolation property
            # for n, nv in zip(pnodes, pnode_values):
            # assert abs(interp_f(n) - nv) < 1e-13

            functions.append(interp_f)

        for subtrial_number in range(15):
            pt_in_element = sum(coeff * vertex for coeff, vertex in zip(random_barycentric_coordinates(2), vertices))
            f_values = [f(pt_in_element) for f in functions]
            avg = sum(f_values) / len(f_values)
            err = [abs(fv - avg) for fv in f_values]
            assert max(err) < 5e-13
def test_elliptic():
    """Test various properties of elliptic operators."""

    from hedge.tools import unit_vector

    def matrix_rep(op):
        h, w = op.shape
        mat = numpy.zeros(op.shape)
        for j in range(w):
            mat[:, j] = op(unit_vector(w, j))
        return mat

    def check_grad_mat():
        import pyublas
        if not pyublas.has_sparse_wrappers():
            return

        grad_mat = op.grad_matrix()

        #print len(discr), grad_mat.nnz, type(grad_mat)
        for i in range(10):
            u = numpy.random.randn(len(discr))

            mat_result = grad_mat * u
            op_result = numpy.hstack(op.grad(u))

            err = la.norm(mat_result - op_result) * la.norm(op_result)
            assert err < 1e-5

    def check_matrix_tgt():
        big = numpy.zeros((20, 20), flavor=numpy.SparseBuildMatrix)
        small = numpy.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
        print small
        from hedge._internal import MatrixTarget
        tgt = MatrixTarget(big, 4, 4)
        tgt.begin(small.shape[0], small.shape[1])
        print "YO"
        tgt.add_coefficients(4, 4, small)
        print "DUDE"
        tgt.finalize()
        print big

    import pymbolic
    v_x = pymbolic.var("x")
    truesol = pymbolic.parse("math.sin(x[0]**2*x[1]**2)")
    truesol_c = pymbolic.compile(truesol, variables=["x"])

    def laplace(expression, variables):
        return sum(
            pymbolic.diff(pymbolic.diff(expression, var), var)
            for var in variables)

    rhs = laplace(truesol, [v_x[0], v_x[1]])
    rhs_c = pymbolic.compile(rhs, variables=["x", "el"])

    from hedge.mesh import TAG_ALL, TAG_NONE
    from hedge.mesh.generator import make_disk_mesh
    mesh = make_disk_mesh(r=0.5, max_area=0.1, faces=20)
    mesh = mesh.reordered_by("cuthill")

    from hedge.backends import CPURunContext
    rcon = CPURunContext()

    from hedge.tools import EOCRecorder
    eocrec = EOCRecorder()
    for order in [1, 2, 3, 4, 5]:
        for flux in ["ldg", "ip"]:
            from hedge.discretization.local import TriangleDiscretization
            discr = rcon.make_discretization(
                mesh,
                TriangleDiscretization(order),
                debug=discr_class.noninteractive_debug_flags())

            from hedge.data import GivenFunction
            from hedge.models.poisson import PoissonOperator
            op = PoissonOperator(
                discr.dimensions,
                dirichlet_tag=TAG_ALL,
                dirichlet_bc=GivenFunction(lambda x, el: truesol_c(x)),
                neumann_tag=TAG_NONE)

            bound_op = op.bind(discr)

            if order <= 3:
                mat = matrix_rep(bound_op)
                sym_err = la.norm(mat - mat.T)
                #print sym_err
                assert sym_err < 1e-12
                #check_grad_mat()

            from hedge.iterative import parallel_cg
            truesol_v = discr.interpolate_volume_function(
                lambda x, el: truesol_c(x))
            sol_v = -parallel_cg(rcon,
                                 -bound_op,
                                 bound_op.prepare_rhs(
                                     discr.interpolate_volume_function(rhs_c)),
                                 tol=1e-10,
                                 max_iterations=40000)

            eocrec.add_data_point(order, discr.norm(sol_v - truesol_v))

    #print eocrec.pretty_print()
    assert eocrec.estimate_order_of_convergence()[0, 1] > 8
Example #21
0
def test_tri_nodes_against_known_values():
    """Check triangle nodes against a previous implementation"""
    from hedge.discretization.local import TriangleDiscretization
    triorder = 8
    tri = TriangleDiscretization(triorder)

    def tri_equilateral_nodes_reference(self):
        # This is the old, more explicit, less general way of computing
        # the triangle nodes. Below, we compare its results with that of the
        # new routine.

        alpha_opt = [0.0000, 0.0000, 1.4152, 0.1001, 0.2751, 0.9800, 1.0999,
                1.2832, 1.3648, 1.4773, 1.4959, 1.5743, 1.5770, 1.6223, 1.6258]

        try:
            alpha = alpha_opt[self.order-1]
        except IndexError:
            alpha = 5/3

        from hedge.discretization.local import WarpFactorCalculator
        from math import sin, cos, pi

        warp = WarpFactorCalculator(self.order)

        edge1dir = numpy.array([1,0])
        edge2dir = numpy.array([cos(2*pi/3), sin(2*pi/3)])
        edge3dir = numpy.array([cos(4*pi/3), sin(4*pi/3)])

        for bary in self.equidistant_barycentric_nodes():
            lambda1, lambda2, lambda3 = bary

            # find equidistant (x,y) coordinates in equilateral triangle
            point = self.barycentric_to_equilateral(bary)

            # compute blend factors
            blend1 = 4*lambda1*lambda2 # nonzero on AB
            blend2 = 4*lambda3*lambda2 # nonzero on BC
            blend3 = 4*lambda3*lambda1 # nonzero on AC

            # calculate amount of warp for each node, for each edge
            warp1 = blend1*warp(lambda2 - lambda1)*(1 + (alpha*lambda3)**2)
            warp2 = blend2*warp(lambda3 - lambda2)*(1 + (alpha*lambda1)**2)
            warp3 = blend3*warp(lambda1 - lambda3)*(1 + (alpha*lambda2)**2)

            # return warped point
            yield point + warp1*edge1dir + warp2*edge2dir + warp3*edge3dir

    if False:
        outf = open("trinodes1.dat", "w")
        for ux in tri.equilateral_nodes():
            outf.write("%g\t%g\n" % tuple(ux))
        outf = open("trinodes2.dat", "w")
        for ux in tri_equilateral_nodes_reference(tri):
            outf.write("%g\t%g\n" % tuple(ux))

    for n1, n2 in zip(tri.equilateral_nodes(),
            tri_equilateral_nodes_reference(tri)):
        assert la.norm(n1-n2) < 3e-15

    def node_indices_2(order):
        for n in range(0, order+1):
             for m in range(0, order+1-n):
                 yield m,n

    assert set(tri.node_tuples()) == set(node_indices_2(triorder))
def test_convergence_advec_2d():
    """Test whether 2D advection actually converges"""

    import pyublas  # noqa
    from hedge.mesh.generator import make_disk_mesh, make_regular_rect_mesh
    from hedge.discretization.local import TriangleDiscretization
    from hedge.timestep import RK4TimeStepper
    from hedge.tools import EOCRecorder
    from math import sin, pi
    from hedge.models.advection import StrongAdvectionOperator
    from hedge.data import TimeDependentGivenFunction

    v = numpy.array([0.27, 0])
    norm_a = la.norm(v)

    from numpy import dot

    def f(x):
        return sin(x)

    def u_analytic(x, el, t):
        return f((-dot(v, x) / norm_a + t * norm_a))

    def boundary_tagger(vertices, el, face_nr, all_v):
        if dot(el.face_normals[face_nr], v) < 0:
            return ["inflow"]
        else:
            return ["outflow"]

    for mesh in [
            # non-periodic
            make_disk_mesh(r=pi, boundary_tagger=boundary_tagger,
                           max_area=0.5),
            # periodic
            make_regular_rect_mesh(
                a=(0, 0),
                b=(2 * pi, 1),
                n=(8, 4),
                periodicity=(True, False),
                boundary_tagger=boundary_tagger,
            )
    ]:
        for flux_type in StrongAdvectionOperator.flux_types:
            eoc_rec = EOCRecorder()

            for order in [1, 2, 3, 4, 5, 6]:
                discr = discr_class(
                    mesh,
                    TriangleDiscretization(order),
                    debug=discr_class.noninteractive_debug_flags())
                op = StrongAdvectionOperator(
                    v,
                    inflow_u=TimeDependentGivenFunction(u_analytic),
                    flux_type=flux_type)

                u = discr.interpolate_volume_function(
                    lambda x, el: u_analytic(x, el, 0))

                stepper = RK4TimeStepper()
                dt = op.estimate_timestep(discr, stepper=stepper)
                nsteps = int(1 / dt)
                rhs = op.bind(discr)
                for step in range(nsteps):
                    u = stepper(u, step * dt, dt, rhs)

                u_true = discr.interpolate_volume_function(
                    lambda x, el: u_analytic(x, el, nsteps * dt))
                error = u - u_true
                error_l2 = discr.norm(error)
                eoc_rec.add_data_point(order, error_l2)

            if False:
                print "%s\n%s\n" % (flux_type.upper(), "-" * len(flux_type))
                print eoc_rec.pretty_print(abscissa_label="Poly. Order",
                                           error_label="L2 Error")

            assert eoc_rec.estimate_order_of_convergence()[0, 1] > 4
            assert eoc_rec.estimate_order_of_convergence(2)[-1, 1] > 10
Example #23
0
def test_tri_nodes_against_known_values():
    """Check triangle nodes against a previous implementation"""
    from hedge.discretization.local import TriangleDiscretization
    triorder = 8
    tri = TriangleDiscretization(triorder)

    def tri_equilateral_nodes_reference(self):
        # This is the old, more explicit, less general way of computing
        # the triangle nodes. Below, we compare its results with that of the
        # new routine.

        alpha_opt = [0.0000, 0.0000, 1.4152, 0.1001, 0.2751, 0.9800, 1.0999,
                1.2832, 1.3648, 1.4773, 1.4959, 1.5743, 1.5770, 1.6223, 1.6258]

        try:
            alpha = alpha_opt[self.order-1]
        except IndexError:
            alpha = 5/3

        from hedge.discretization.local import WarpFactorCalculator
        from math import sin, cos, pi

        warp = WarpFactorCalculator(self.order)

        edge1dir = numpy.array([1,0])
        edge2dir = numpy.array([cos(2*pi/3), sin(2*pi/3)])
        edge3dir = numpy.array([cos(4*pi/3), sin(4*pi/3)])

        for bary in self.equidistant_barycentric_nodes():
            lambda1, lambda2, lambda3 = bary

            # find equidistant (x,y) coordinates in equilateral triangle
            point = self.barycentric_to_equilateral(bary)

            # compute blend factors
            blend1 = 4*lambda1*lambda2 # nonzero on AB
            blend2 = 4*lambda3*lambda2 # nonzero on BC
            blend3 = 4*lambda3*lambda1 # nonzero on AC

            # calculate amount of warp for each node, for each edge
            warp1 = blend1*warp(lambda2 - lambda1)*(1 + (alpha*lambda3)**2)
            warp2 = blend2*warp(lambda3 - lambda2)*(1 + (alpha*lambda1)**2)
            warp3 = blend3*warp(lambda1 - lambda3)*(1 + (alpha*lambda2)**2)

            # return warped point
            yield point + warp1*edge1dir + warp2*edge2dir + warp3*edge3dir

    if False:
        outf = open("trinodes1.dat", "w")
        for ux in tri.equilateral_nodes():
            outf.write("%g\t%g\n" % tuple(ux))
        outf = open("trinodes2.dat", "w")
        for ux in tri_equilateral_nodes_reference(tri):
            outf.write("%g\t%g\n" % tuple(ux))

    for n1, n2 in zip(tri.equilateral_nodes(),
            tri_equilateral_nodes_reference(tri)):
        assert la.norm(n1-n2) < 3e-15

    def node_indices_2(order):
        for n in range(0, order+1):
             for m in range(0, order+1-n):
                 yield m,n

    assert set(tri.node_tuples()) == set(node_indices_2(triorder))
def test_interior_fluxes_tri():
    """Check triangle surface integrals computed using interior fluxes
    against their known values.
    """

    from math import pi, sin, cos

    def round_trip_connect(start, end):
        for i in range(start, end):
            yield i, i + 1
        yield end, start

    a = -pi
    b = pi
    points = [(a, 0), (b, 0), (a, -1), (b, -1), (a, 1), (b, 1)]

    import meshpy.triangle as triangle

    mesh_info = triangle.MeshInfo()
    mesh_info.set_points(points)
    mesh_info.set_facets([(0, 1), (1, 3), (3, 2), (2, 0), (0, 4), (4, 5),
                          (1, 5)])

    mesh_info.regions.resize(2)
    mesh_info.regions[0] = [
        0,
        -0.5,  # coordinate
        1,  # lower element tag
        0.1,  # max area
    ]
    mesh_info.regions[1] = [
        0,
        0.5,  # coordinate
        2,  # upper element tag
        0.01,  # max area
    ]

    generated_mesh = triangle.build(mesh_info,
                                    attributes=True,
                                    volume_constraints=True)

    #triangle.write_gnuplot_mesh("mesh.dat", generated_mesh)

    def element_tagger(el):
        if generated_mesh.element_attributes[el.id] == 1:
            return ["upper"]
        else:
            return ["lower"]

    from hedge.mesh import make_conformal_mesh
    mesh = make_conformal_mesh(generated_mesh.points, generated_mesh.elements)

    from hedge.discretization.local import TriangleDiscretization
    from hedge.discretization import ones_on_volume
    discr = discr_class(mesh,
                        TriangleDiscretization(4),
                        debug=discr_class.noninteractive_debug_flags())

    def f_u(x, el):
        if generated_mesh.element_attributes[el.id] == 1:
            return cos(x[0] - x[1])
        else:
            return 0

    def f_l(x, el):
        if generated_mesh.element_attributes[el.id] == 0:
            return sin(x[0] - x[1])
        else:
            return 0

    # u_l = discr.interpolate_volume_function(f_l)
    u_u = discr.interpolate_volume_function(f_u)
    u = u_u + u_u

    #discr.visualize_vtk("dual.vtk", [("u", u)])

    from hedge.flux import make_normal, FluxScalarPlaceholder
    from hedge.optemplate import Field, get_flux_operator
    fluxu = FluxScalarPlaceholder()
    res = discr.compile(
        get_flux_operator(
            (fluxu.int - fluxu.ext) * make_normal(discr.dimensions)[1]) *
        Field("u"))(u=u)

    ones = ones_on_volume(discr)
    err = abs(numpy.dot(res, ones))
    #print err
    assert err < 5e-14
Example #25
0
def test_simp_face_normals_and_jacobians():
    """Check computed face normals and face jacobians on simplicial elements
    """
    from hedge.discretization.local import \
            IntervalDiscretization, \
            TriangleDiscretization, \
            TetrahedronDiscretization
    from hedge.mesh.element import Triangle
    from numpy import dot

    for el in [
            IntervalDiscretization(3),
            TetrahedronDiscretization(1),
            TriangleDiscretization(4),
            ]:
        for i in range(50):
            geo = el.geometry

            vertices = [numpy.random.randn(el.dimensions)
                    for vi in range(el.dimensions+1)]
            #array = num.array
            #vertices = [array([-1, -1.0, -1.0]), array([1, -1.0, -1.0]), array([-1.0, 1, -1.0]), array([-1.0, -1.0, 1.0])]
            map = geo.get_map_unit_to_global(vertices)

            unodes = el.unit_nodes()
            nodes = [map(v) for v in unodes]

            all_vertex_indices = range(el.dimensions+1)

            for face_i, fvi, normal, jac in \
                    zip(el.face_indices(),
                            geo.face_vertices(all_vertex_indices),
                            *geo.face_normals_and_jacobians(vertices, map)):
                mapped_corners = [vertices[i] for i in fvi]
                mapped_face_basis = [mc-mapped_corners[0] for mc in mapped_corners[1:]]

                # face vertices must be among all face nodes
                close_nodes = 0
                for fi in face_i:
                    face_node = nodes[fi]
                    for mc in mapped_corners:
                        if la.norm(mc-face_node) < 1e-13:
                            close_nodes += 1

                assert close_nodes == len(mapped_corners)

                opp_node = (set(all_vertex_indices) - set(fvi)).__iter__().next()
                mapped_opposite = vertices[opp_node]

                if el.dimensions == 1:
                    true_jac = 1
                elif el.dimensions == 2:
                    true_jac = la.norm(mapped_corners[1]-mapped_corners[0])/2
                elif el.dimensions == 3:
                    from hedge.tools import orthonormalize
                    mapped_face_projection = numpy.array(
                            orthonormalize(mapped_face_basis))
                    projected_corners = (
                            [ numpy.zeros((2,))]
                            + [dot(mapped_face_projection, v) for v in mapped_face_basis])
                    true_jac = abs(Triangle
                            .get_map_unit_to_global(projected_corners)
                            .jacobian())
                else:
                    assert False, "this test does not support %d dimensions yet" % el.dimensions

                #print abs(true_jac-jac)/true_jac
                #print "aft, bef", la.norm(mapped_end-mapped_start),la.norm(end-start)

                assert abs(true_jac - jac)/true_jac < 1e-13
                assert abs(la.norm(normal) - 1) < 1e-13
                for mfbv in mapped_face_basis:
                    assert abs(dot(normal, mfbv)) < 1e-13

                for mc in mapped_corners:
                    assert dot(mapped_opposite-mc, normal) < 0
from __future__ import division
from hedge.discretization.local import TriangleDiscretization

tri = TriangleDiscretization(5)

import Gnuplot
gp = Gnuplot.Gnuplot()

import numpy

xpts = numpy.arange(-1.5, 1.5, 0.1)
ypts = numpy.arange(-1.5, 1.5, 0.1)

gp("set zrange [-3:3]")

for bfi, bf in zip(tri.generate_mode_identifiers(), tri.basis_functions()):
    lines = []
    for x in xpts:
        values = []
        for y in ypts:
            values.append((x, y, bf(numpy.array((x, y)))))
        lines.append(Gnuplot.Data(values, with_="lines"))

    for y in xpts:
        values = []
        for x in ypts:
            values.append((x, y, bf(numpy.array((x, y)))))
        lines.append(Gnuplot.Data(values, with_="lines"))

    tri = numpy.array([
        (-1, -1, 0),
from __future__ import division
from hedge.discretization.local import TriangleDiscretization

tri = TriangleDiscretization(5)

import Gnuplot

gp = Gnuplot.Gnuplot()

import numpy

xpts = numpy.arange(-1.5, 1.5, 0.1)
ypts = numpy.arange(-1.5, 1.5, 0.1)

gp("set zrange [-3:3]")

for bfi, bf in zip(tri.generate_mode_identifiers(), tri.basis_functions()):
    lines = []
    for x in xpts:
        values = []
        for y in ypts:
            values.append((x, y, bf(numpy.array((x, y)))))
        lines.append(Gnuplot.Data(values, with_="lines"))

    for y in xpts:
        values = []
        for x in ypts:
            values.append((x, y, bf(numpy.array((x, y)))))
        lines.append(Gnuplot.Data(values, with_="lines"))

    tri = numpy.array([(-1, -1, 0), (-1, 1, 0), (1, -1, 0), (-1, -1, 0)])