示例#1
0
    def __init__(self, order, dimension):
        """
        :arg order: A parameter correlated with the total degree of polynomials
            that are integrated exactly. (See also :attr:`exact_to`.)
        :arg dimension: The number of dimensions for the quadrature rule.
            Any positive integer.
        """
        s = order
        n = dimension
        d = 2*s+1

        self.exact_to = d

        if dimension == 0:
            nodes = np.zeros((dimension, 1))
            weights = np.ones(1)

            Quadrature.__init__(self, nodes, weights)
            return

        from pytools import \
                generate_decreasing_nonnegative_tuples_summing_to, \
                generate_unique_permutations, \
                factorial, \
                wandering_element

        points_to_weights = {}

        for i in range(s+1):
            weight = (-1)**i * 2**(-2*s) \
                    * (d + n-2*i)**d \
                    / factorial(i) \
                    / factorial(d+n-i)

            for t in generate_decreasing_nonnegative_tuples_summing_to(s-i, n+1):
                for beta in generate_unique_permutations(t):
                    denominator = d+n-2*i
                    point = tuple(
                            _simplify_fraction((2*beta_i+1, denominator))
                            for beta_i in beta)

                    points_to_weights[point] = \
                            points_to_weights.get(point, 0) + weight

        from operator import add

        vertices = [-1 * np.ones((n,))] \
                + [np.array(x)
                        for x in wandering_element(n, landscape=-1, wanderer=1)]

        nodes = []
        weights = []

        dim_factor = 2**n
        for p, w in six.iteritems(points_to_weights):
            real_p = reduce(add, (a/b*v for (a, b), v in zip(p, vertices)))
            nodes.append(real_p)
            weights.append(dim_factor*w)

        Quadrature.__init__(self, np.array(nodes).T, np.array(weights))
    def __init__(self, order, dimension):
        """
        :arg order: A parameter correlated with the total degree of polynomials
            that are integrated exactly. (See also :attr:`exact_to`.)
        :arg dimension: The number of dimensions for the quadrature rule.
            Any positive integer.
        """
        s = order
        n = dimension
        d = 2*s + 1

        self.exact_to = d

        if dimension == 0:
            nodes = np.zeros((dimension, 1))
            weights = np.ones(1)

            Quadrature.__init__(self, nodes, weights)
            return

        from pytools import \
                generate_decreasing_nonnegative_tuples_summing_to, \
                generate_unique_permutations, \
                factorial, \
                wandering_element

        points_to_weights = {}

        for i in range(s + 1):
            weight = (-1)**i * 2**(-2*s) \
                    * (d + n - 2*i)**d \
                    / factorial(i) \
                    / factorial(d + n - i)

            for t in generate_decreasing_nonnegative_tuples_summing_to(s - i, n + 1):
                for beta in generate_unique_permutations(t):
                    denominator = d + n - 2*i
                    point = tuple(
                            _simplify_fraction((2*beta_i + 1, denominator))
                            for beta_i in beta)

                    points_to_weights[point] = \
                            points_to_weights.get(point, 0) + weight

        from operator import add

        vertices = ([-1 * np.ones((n,))]
                + [np.array(x)
                    for x in wandering_element(n, landscape=-1, wanderer=1)])

        nodes = []
        weights = []

        dim_factor = 2**n
        for p, w in points_to_weights.items():
            real_p = reduce(add, (a/b * v for (a, b), v in zip(p, vertices)))
            nodes.append(real_p)
            weights.append(dim_factor * w)

        Quadrature.__init__(self, np.array(nodes).T, np.array(weights))
示例#3
0
    def simplex_integral(self):
        """Integral over the unit simplex."""
        from pytools import factorial
        from operator import mul

        return (self.factor * 2**len(self.exponents) *
                reduce(mul, (factorial(alpha) for alpha in self.exponents)) /
                factorial(len(self.exponents) + sum(self.exponents)))
示例#4
0
    def simplex_integral(self):
        r"""Integral over the simplex
        :math:`\{\mathbf{x} \in [0, 1]^n: \sum x_i \le 1 \}`."""
        from pytools import factorial
        from operator import mul

        return (self.factor * 2**len(self.exponents) *
                reduce(mul, (factorial(alpha) for alpha in self.exponents)) /
                factorial(len(self.exponents) + sum(self.exponents)))
示例#5
0
    def simplex_integral(self):
        """Integral over the unit simplex."""
        from pytools import factorial
        from operator import mul

        return (self.factor*2**len(self.exponents)*
            reduce(mul, (factorial(alpha) for alpha in self.exponents))
            /
            factorial(len(self.exponents)+sum(self.exponents)))
示例#6
0
 def node_count(self):
     """Return the number of interpolation nodes in this element."""
     d = self.dimensions
     o = self.order
     from operator import mul
     from pytools import factorial
     return int(reduce(mul, (o + 1 + i for i in range(d)), 1) / factorial(d))
示例#7
0
 def node_count(self):
     """Return the number of interpolation nodes in this element."""
     d = self.dimensions
     o = self.order
     from operator import mul
     from pytools import factorial
     return int(reduce(mul, (o + 1 + i for i in range(d))) / factorial(d))
示例#8
0
def test_sanity_single_element(ctx_getter, dim, order, visualize=False):
    pytest.importorskip("pytential")

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    from modepy.tools import unit_vertices
    vertices = unit_vertices(dim).T.copy()

    center = np.empty(dim, np.float64)
    center.fill(-0.5)

    import modepy as mp
    from meshmode.mesh import SimplexElementGroup, Mesh, BTAG_ALL
    mg = SimplexElementGroup(
        order=order,
        vertex_indices=np.arange(dim + 1, dtype=np.int32).reshape(1, -1),
        nodes=mp.warp_and_blend_nodes(dim, order).reshape(dim, 1, -1),
        dim=dim)

    mesh = Mesh(vertices, [mg],
                nodal_adjacency=None,
                facial_adjacency_groups=None)

    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            PolynomialWarpAndBlendGroupFactory
    vol_discr = Discretization(cl_ctx, mesh,
                               PolynomialWarpAndBlendGroupFactory(order + 3))

    # {{{ volume calculation check

    vol_x = vol_discr.nodes().with_queue(queue)

    vol_one = vol_x[0].copy()
    vol_one.fill(1)
    from pytential import norm, integral  # noqa

    from pytools import factorial
    true_vol = 1 / factorial(dim) * 2**dim

    comp_vol = integral(vol_discr, queue, vol_one)
    rel_vol_err = abs(true_vol - comp_vol) / true_vol

    assert rel_vol_err < 1e-12

    # }}}

    # {{{ boundary discretization

    from meshmode.discretization.connection import make_face_restriction
    bdry_connection = make_face_restriction(
        vol_discr, PolynomialWarpAndBlendGroupFactory(order + 3), BTAG_ALL)
    bdry_discr = bdry_connection.to_discr

    # }}}

    # {{{ visualizers

    from meshmode.discretization.visualization import make_visualizer
    #vol_vis = make_visualizer(queue, vol_discr, 4)
    bdry_vis = make_visualizer(queue, bdry_discr, 4)

    # }}}

    from pytential import bind, sym
    bdry_normals = bind(bdry_discr,
                        sym.normal(dim))(queue).as_vector(dtype=object)

    if visualize:
        bdry_vis.write_vtk_file("boundary.vtu",
                                [("bdry_normals", bdry_normals)])

    from pytential import bind, sym
    normal_outward_check = bind(
        bdry_discr,
        sym.normal(dim)
        | (sym.nodes(dim) + 0.5 * sym.ones_vec(dim)),
    )(queue).as_scalar() > 0

    assert normal_outward_check.get().all(), normal_outward_check.get()
示例#9
0
def test_sanity_single_element(actx_factory,
                               dim,
                               mesh_order,
                               group_cls,
                               visualize=False):
    pytest.importorskip("pytential")
    actx = actx_factory()

    if group_cls is SimplexElementGroup:
        group_factory = PolynomialWarpAndBlendGroupFactory(mesh_order + 3)
    elif group_cls is TensorProductElementGroup:
        group_factory = LegendreGaussLobattoTensorProductGroupFactory(
            mesh_order + 3)
    else:
        raise TypeError

    import modepy as mp
    shape = group_cls._modepy_shape_cls(dim)
    space = mp.space_for_shape(shape, mesh_order)

    vertices = mp.unit_vertices_for_shape(shape)
    nodes = mp.edge_clustered_nodes_for_space(space, shape).reshape(dim, 1, -1)
    vertex_indices = np.arange(shape.nvertices, dtype=np.int32).reshape(1, -1)

    center = np.empty(dim, np.float64)
    center.fill(-0.5)

    mg = group_cls(mesh_order, vertex_indices, nodes, dim=dim)
    mesh = Mesh(vertices, [mg], is_conforming=True)

    from meshmode.discretization import Discretization
    vol_discr = Discretization(actx, mesh, group_factory)

    # {{{ volume calculation check

    if isinstance(mg, SimplexElementGroup):
        from pytools import factorial
        true_vol = 1 / factorial(dim) * 2**dim
    elif isinstance(mg, TensorProductElementGroup):
        true_vol = 2**dim
    else:
        raise TypeError

    nodes = thaw(vol_discr.nodes(), actx)
    vol_one = 1 + 0 * nodes[0]

    from pytential import norm, integral  # noqa
    comp_vol = integral(vol_discr, vol_one)
    rel_vol_err = abs(true_vol - comp_vol) / true_vol

    assert rel_vol_err < 1e-12

    # }}}

    # {{{ boundary discretization

    from meshmode.discretization.connection import make_face_restriction
    bdry_connection = make_face_restriction(actx, vol_discr, group_factory,
                                            BTAG_ALL)
    bdry_discr = bdry_connection.to_discr

    # }}}

    from pytential import bind, sym
    bdry_normals = bind(bdry_discr, sym.normal(dim).as_vector())(actx)

    if visualize:
        from meshmode.discretization.visualization import make_visualizer
        bdry_vis = make_visualizer(actx, bdry_discr, 4)

        bdry_vis.write_vtk_file("sanity_single_element_boundary.vtu",
                                [("normals", bdry_normals)])

    normal_outward_check = bind(
        bdry_discr,
        sym.normal(dim)
        | (sym.nodes(dim) + 0.5 * sym.ones_vec(dim)),
    )(actx).as_scalar()

    normal_outward_check = flatten_to_numpy(actx, normal_outward_check > 0)
    assert normal_outward_check.all(), normal_outward_check
示例#10
0
def test_sanity_single_element(ctx_getter, dim, order, visualize=False):
    pytest.importorskip("pytential")

    cl_ctx = ctx_getter()
    queue = cl.CommandQueue(cl_ctx)

    from modepy.tools import UNIT_VERTICES
    vertices = UNIT_VERTICES[dim].T.copy()

    center = np.empty(dim, np.float64)
    center.fill(-0.5)

    import modepy as mp
    from meshmode.mesh import SimplexElementGroup, Mesh
    mg = SimplexElementGroup(
            order=order,
            vertex_indices=np.arange(dim+1, dtype=np.int32).reshape(1, -1),
            nodes=mp.warp_and_blend_nodes(dim, order).reshape(dim, 1, -1),
            dim=dim)

    mesh = Mesh(vertices, [mg])

    from meshmode.discretization import Discretization
    from meshmode.discretization.poly_element import \
            PolynomialWarpAndBlendGroupFactory
    vol_discr = Discretization(cl_ctx, mesh,
            PolynomialWarpAndBlendGroupFactory(order+3))

    # {{{ volume calculation check

    vol_x = vol_discr.nodes().with_queue(queue)

    vol_one = vol_x[0].copy()
    vol_one.fill(1)
    from pytential import norm, integral  # noqa

    from pytools import factorial
    true_vol = 1/factorial(dim) * 2**dim

    comp_vol = integral(vol_discr, queue, vol_one)
    rel_vol_err = abs(true_vol - comp_vol) / true_vol

    assert rel_vol_err < 1e-12

    # }}}

    # {{{ boundary discretization

    from meshmode.discretization.connection import make_boundary_restriction
    bdry_mesh, bdry_discr, bdry_connection = make_boundary_restriction(
            queue, vol_discr, PolynomialWarpAndBlendGroupFactory(order + 3))

    # }}}

    # {{{ visualizers

    from meshmode.discretization.visualization import make_visualizer
    #vol_vis = make_visualizer(queue, vol_discr, 4)
    bdry_vis = make_visualizer(queue, bdry_discr, 4)

    # }}}

    from pytential import bind, sym
    bdry_normals = bind(bdry_discr, sym.normal())(queue).as_vector(dtype=object)

    if visualize:
        bdry_vis.write_vtk_file("boundary.vtu", [
            ("bdry_normals", bdry_normals)
            ])

    from pytential import bind, sym
    normal_outward_check = bind(bdry_discr,
            sym.normal()
            |
            (sym.Nodes() + 0.5*sym.ones_vec(dim)),
            )(queue).as_scalar() > 0

    assert normal_outward_check.get().all(), normal_outward_check.get()
示例#11
0
    def translate_from(self, src_expansion, src_coeff_exprs, src_rscale,
            dvec, tgt_rscale, sac=None, _fast_version=True):
        if not isinstance(src_expansion, type(self)):
            raise RuntimeError("do not know how to translate %s to "
                    "Taylor multipole expansion"
                               % type(src_expansion).__name__)

        if not self.use_rscale:
            src_rscale = 1
            tgt_rscale = 1

        logger.info("building translation operator: %s(%d) -> %s(%d): start"
                % (type(src_expansion).__name__,
                    src_expansion.order,
                    type(self).__name__,
                    self.order))

        from sumpy.tools import mi_factorial

        src_mi_to_index = {mi: i for i, mi in enumerate(
            src_expansion.get_coefficient_identifiers())}

        tgt_mi_to_index = {mi: i for i, mi in enumerate(
            self.get_full_coefficient_identifiers())}

        # This algorithm uses the observation that M2M coefficients
        # have the following form in 2D
        #
        # $T_{m, n} = \sum_{i\le m, j\le n} C_{i, j}
        #             d_x^i d_y^j \binom{m}{i} \binom{n}{j}$
        # and can be rewritten as follows.
        #
        # Let $Y_{m, n} = \sum_{i\le m} C_{i, n} d_x^i \binom{m}{i}$.
        #
        # Then, $T_{m, n} = \sum_{j\le n} Y_{m, j} d_y^j \binom{n}{j}$.
        #
        # $Y_{m, n}$ are $p^2$ temporary variables that are
        # reused for different M2M coefficients and costs $p$ per variable.
        # Total cost for calculating $Y_{m, n}$ is $p^3$ and similar
        # for $T_{m, n}$. For compressed Taylor series this can be done
        # more efficiently.

        # Let's take the example u_xy + u_x + u_y = 0.
        # In the diagram below, C depicts a non zero source coefficient.
        # We divide these into two hyperplanes.
        #
        #  C              C             0
        #  C 0            C 0           0 0
        #  C 0 0       =  C 0 0      +  0 0 0
        #  C 0 0 0        C 0 0 0       0 0 0 0
        #  C C C C C      C 0 0 0 0     0 C C C C
        #
        # The calculations done when naively translating first hyperplane of the
        # source coefficients (C) to target coefficients (T) are shown
        # below in the graph. Each connection represents a O(1) calculation,
        # and the arrows go "up and to the right".
        #
        #  ┌─→C             T
        #  │  ↑
        #  │┌→C→0←─────┐->  T T
        #  ││ ↑ ↑      │
        #  ││ ┌─┘┌────┐│
        #  ││↱C→0↲0←─┐││    T T T
        #  │││└───⬏  │││
        #  └└└C→0 0 0│││    T T T T
        #     └───⬏ ↑│││
        #     └─────┘│││
        #     └──────┘││
        #     └───────┘│
        #     └────────┘
        #
        # By using temporaries (Y), this can be reduced as shown below.
        #
        #  ┌→C           Y             T
        #  │ ↑
        #  │↱C 0     ->  Y→0       ->  T T
        #  ││↑
        #  ││C 0 0       Y→0 0         T T T
        #  ││↑           └───⬏
        #  └└C 0 0 0     Y 0 0 0       T T T T
        #                └───⬏ ↑
        #                └─────┘
        #
        # Note that in the above calculation data is propagated upwards
        # in the first pass and then rightwards in the second pass.
        # Data propagation with zeros are not shown as they are not calculated.
        # If the propagation was done rightwards first and upwards second
        # number of calculations are higher as shown below.
        #
        #    C             ┌→Y           T
        #                  │ ↑
        #    C→0       ->  │↱Y↱Y     ->  T T
        #                  ││↑│↑
        #    C→0 0         ││Y│Y Y       T T T
        #    └───⬏         ││↑│↑ ↑
        #    C→0 0 0       └└Y└Y Y Y     T T T T
        #    └───⬏ ↑
        #    └─────┘
        #
        # For the second hyperplane, data is propogated rightwards first
        # and then upwards second which is opposite to that of the first
        # hyperplane.
        #
        #    0              0            0
        #
        #    0 0       ->   0↱0      ->  0 T
        #                    │↑
        #    0 0 0          0│0 0        0 T T
        #                    │↑ ↑
        #    0 C→C→C        0└Y Y Y      0 T T T
        #      └───⬏
        #
        # In other words, we're better off computing the translation
        # one dimension at a time. If the coefficient-identifying multi-indices
        # in the source expansion have the form (0, m) and (n, 0), where m>=0, n>=1,
        # then we calculate the output from (0, m) with the second
        # dimension as the fastest varying dimension and then calculate
        # the output from (n, 0) with the first dimension as the fastest
        # varying dimension.

        tgt_hyperplanes = \
            self.expansion_terms_wrangler._split_coeffs_into_hyperplanes()
        result = [0] * len(self.get_full_coefficient_identifiers())

        # axis morally iterates over 'hyperplane directions'
        for axis in range(self.dim):
            # {{{ index gymnastics

            # First, let's write source coefficients in target coefficient
            # indices. If target order is lower than source order, then
            # we will discard higher order terms from source coefficients.
            cur_dim_input_coeffs = \
                [0] * len(self.get_full_coefficient_identifiers())
            for d, mis in tgt_hyperplanes:
                # Only consider hyperplanes perpendicular to *axis*.
                if d != axis:
                    continue
                for mi in mis:
                    # When target order is higher than source order, we assume
                    # that the higher order source coefficients were zero.
                    if mi not in src_mi_to_index:
                        continue

                    src_idx = src_mi_to_index[mi]
                    tgt_idx = tgt_mi_to_index[mi]
                    cur_dim_input_coeffs[tgt_idx] = src_coeff_exprs[src_idx] * \
                            sym.UnevaluatedExpr(src_rscale/tgt_rscale)**sum(mi)

            if all(coeff == 0 for coeff in cur_dim_input_coeffs):
                continue

            # }}}

            # {{{ translation

            # As explained above using the unicode art, we use the orthogonal axis
            # as the last dimension to vary to reduce the number of operations.
            dims = list(range(axis)) + \
                   list(range(axis+1, self.dim)) + [axis]

            # d is the axis along which we translate.
            for d in dims:
                # We build the full target multipole and then compress it
                # at the very end.
                cur_dim_output_coeffs = \
                    [0] * len(self.get_full_coefficient_identifiers())
                for i, tgt_mi in enumerate(
                        self.get_full_coefficient_identifiers()):

                    # Calling this input_mis instead of src_mis because we
                    # converted the source coefficients to target coefficient
                    # indices beforehand.
                    for mi_i in range(tgt_mi[d]+1):
                        input_mi = mi_set_axis(tgt_mi, d, mi_i)
                        contrib = cur_dim_input_coeffs[tgt_mi_to_index[input_mi]]
                        for n, k, dist in zip(tgt_mi, input_mi, dvec):
                            assert n >= k
                            contrib /= factorial(n-k)
                            contrib *= \
                                sym.UnevaluatedExpr(dist/tgt_rscale)**(n-k)

                        cur_dim_output_coeffs[i] += contrib
                # cur_dim_output_coeffs is the input in the next iteration
                cur_dim_input_coeffs = cur_dim_output_coeffs

            # }}}

            for i in range(len(cur_dim_output_coeffs)):
                result[i] += cur_dim_output_coeffs[i]

        # {{{ simpler, functionally equivalent code
        if not _fast_version:
            src_mi_to_index = dict((mi, i) for i, mi in enumerate(
                src_expansion.get_coefficient_identifiers()))
            result = [0] * len(self.get_full_coefficient_identifiers())

            for i, mi in enumerate(src_expansion.get_coefficient_identifiers()):
                src_coeff_exprs[i] *= mi_factorial(mi)

            from pytools import generate_nonnegative_integer_tuples_below as gnitb

            for i, tgt_mi in enumerate(
                    self.get_full_coefficient_identifiers()):

                tgt_mi_plus_one = tuple(mi_i + 1 for mi_i in tgt_mi)

                for src_mi in gnitb(tgt_mi_plus_one):
                    try:
                        src_index = src_mi_to_index[src_mi]
                    except KeyError:
                        # Omitted coefficients: not life-threatening
                        continue

                    contrib = src_coeff_exprs[src_index]

                    for idim in range(self.dim):
                        n = tgt_mi[idim]
                        k = src_mi[idim]
                        assert n >= k
                        from sympy import binomial
                        contrib *= (binomial(n, k)
                                * sym.UnevaluatedExpr(dvec[idim]/tgt_rscale)**(n-k))

                    result[i] += (contrib
                        * sym.UnevaluatedExpr(src_rscale/tgt_rscale)**sum(src_mi))

                result[i] /= mi_factorial(tgt_mi)
        # }}}

        logger.info("building translation operator: done")
        return (
            self.expansion_terms_wrangler.get_stored_mpole_coefficients_from_full(
                result, tgt_rscale, sac=sac))
示例#12
0
    def __init__(self, order, dimension):
        s = order
        n = dimension
        d = 2*s+1

        self.exact_to = d

        from pytools import \
                generate_decreasing_nonnegative_tuples_summing_to, \
                generate_unique_permutations, \
                factorial, \
                wandering_element

        points_to_weights = {}

        for i in xrange(s+1):
            weight = (-1)**i * 2**(-2*s) \
                    * (d + n-2*i)**d \
                    / factorial(i) \
                    / factorial(d+n-i)

            for t in generate_decreasing_nonnegative_tuples_summing_to(s-i, n+1):
                for beta in generate_unique_permutations(t):
                    denominator = d+n-2*i
                    point = tuple(
                            _simplify_fraction((2*beta_i+1, denominator))
                            for beta_i in beta)

                    points_to_weights[point] = points_to_weights.get(point, 0) + weight

        from operator import add

        vertices = [-1 * numpy.ones((n,))] \
                + [numpy.array(x) for x in wandering_element(n, landscape=-1, wanderer=1)]

        self.pos_points = []
        self.pos_weights = []
        self.neg_points = []
        self.neg_weights = []

        dim_factor = 2**n
        for p, w in points_to_weights.iteritems():
            real_p = reduce(add, (a/b*v for (a,b),v in zip(p, vertices)))
            if w > 0:
                self.pos_points.append(real_p)
                self.pos_weights.append(dim_factor*w)
            else:
                self.neg_points.append(real_p)
                self.neg_weights.append(dim_factor*w)

        self.points = numpy.array(self.pos_points + self.neg_points)
        self.weights = numpy.array(self.pos_weights + self.neg_weights)

        self.pos_points = numpy.array(self.pos_points)
        self.pos_weights = numpy.array(self.pos_weights)
        self.neg_points = numpy.array(self.neg_points)
        self.neg_weights = numpy.array(self.neg_weights)

        self.points = numpy.array(self.points)
        self.weights = numpy.array(self.weights)

        self.pos_info = zip(self.pos_points, self.pos_weights)
        self.neg_info = zip(self.neg_points, self.neg_weights)
示例#13
0
def mi_factorial(mi):
    from pytools import factorial
    result = 1
    for mi_i in mi:
        result *= factorial(mi_i)
    return result
示例#14
0
文件: local.py 项目: inducer/sumpy
    def evaluate(self, coeffs, bvec):
        from pytools import factorial

        return sum(coeffs[self.get_storage_index(i)] / factorial(i) for i in self.get_coefficient_identifiers())
示例#15
0
文件: local.py 项目: inducer/sumpy
 def evaluate(self, coeffs, bvec, rscale):
     # no point in heeding rscale here--just ignore it
     from pytools import factorial
     return sym.Add(*(
             coeffs[self.get_storage_index(i)] / factorial(i)
             for i in self.get_coefficient_identifiers()))
示例#16
0
def mi_factorial(mi):
    from pytools import factorial
    result = 1
    for mi_i in mi:
        result *= factorial(mi_i)
    return result
示例#17
0
 def evaluate(self, coeffs, bvec, rscale):
     # no point in heeding rscale here--just ignore it
     from pytools import factorial
     return sym.Add(*(coeffs[self.get_storage_index(i)] / factorial(i)
                      for i in self.get_coefficient_identifiers()))
示例#18
0
    def evaluate(self, coeffs, bvec, rscale):
        from pytools import factorial

        evaluated_coeffs = (
            self.derivative_wrangler.get_full_kernel_derivatives_from_stored(
                coeffs, rscale))

        bvec = [b * rscale**-1 for b in bvec]
        mi_to_index = dict(
            (mi, i)
            for i, mi in enumerate(self.get_full_coefficient_identifiers()))

        # Sort multi-indices so that last dimension varies fastest
        sorted_target_mis = sorted(self.get_full_coefficient_identifiers())
        dim = self.dim

        # Start with an invalid "seen" multi-index
        seen_mi = [-1] * dim
        # Local sum keep the sum of the terms that differ by each dimension
        local_sum = [0] * dim
        # Local multiplier keep the scalar that the sum has to be multiplied by
        # when adding to the sum of the preceding dimension.
        local_multiplier = [0] * dim

        # For the multi-indices in 3D, local_sum looks like this:
        #
        # Multi-index | coef | local_sum                              | local_mult
        # (0, 0, 0)   |  c0  | 0, 0,                c0                | 0, 1, 1
        # (0, 0, 1)   |  c1  | 0, 0,                c0+c1*dz          | 0, 1, 1
        # (0, 0, 2)   |  c2  | 0, 0,                c0+c1*dz+c2*dz^2  | 0, 1, 1
        # (0, 1, 0)   |  c3  | 0, c0+c1*dz+c2*dz^2, c3                | 0, 1, dy
        # (0, 1, 1)   |  c4  | 0, c0+c1*dz+c2*dz^2, c3+c4*dz          | 0, 1, dy
        # (0, 1, 2)   |  c5  | 0, c0+c1*dz+c2*dz^2, c3+c4*dz+c5*dz^2  | 0, 1, dy
        # (0, 2, 0)   |  c6  | 0, c0+c1*dz+c2*dz^2, c6                | 0, 1, dy^2
        #             |      |    +dy*(c3+c4*dz+c5*dz^2)              |
        # (0, 2, 1)   |  c7  | 0, c0+c1*dz+c2*dz^2, c6+c7*dz          | 0, 1, dy^2
        #             |      |    +dy*(c3+c4*dz+c5*dz^2)              |
        # (0, 2, 2)   |  c8  | 0, c0+c1*dz+c2*dz^2, c6+c7*dz+x8*dz^2  | 0, 1, dy^2
        #             |      |    +dy*(c3+c4*dz+c5*dz^2)              |
        # (1, 0, 0)   |  c8  | c0+c1*dz+c2*dz^2,         0, 0         | 0, dx, 1
        #             |      |  +dy*(c3+c4*dz+c5*dz^2)                |
        #             |      |  +dy^2*(c6+c7*dz+c8*dz^2)              |

        for mi in sorted_target_mis:

            # {{{ handle the case where a not-last dimension "clicked over"

            # (where d will be that not-last dimension)

            # Iterate in reverse order of dimensions to properly handle a
            # "double click-over".

            for d in reversed(range(dim - 1)):
                if seen_mi[d] != mi[d]:
                    # If the dimension d of mi changed from the previous value
                    # then the sum for dimension d+1 is complete, add it to
                    # dimension d after multiplying and restart.

                    local_sum[d] += local_sum[d + 1] * local_multiplier[d + 1]
                    local_sum[d + 1] = 0
                    local_multiplier[d + 1] = bvec[d]**mi[d] / factorial(mi[d])

            # }}}

            local_sum[dim-1] += evaluated_coeffs[mi_to_index[mi]] * \
                                    bvec[dim-1]**mi[dim-1] / factorial(mi[dim-1])
            seen_mi = mi

        for d in reversed(range(dim - 1)):
            local_sum[d] += local_sum[d + 1] * local_multiplier[d + 1]

        # {{{ simpler, functionally equivalent code

        if 0:
            from sumpy.tools import mi_power, mi_factorial

            return sum(
                coeff * mi_power(bvec, mi, evaluate=False) / mi_factorial(mi)
                for coeff, mi in zip(evaluated_coeffs,
                                     self.get_full_coefficient_identifiers()))

        # }}}

        return local_sum[0]
示例#19
0
    def __init__(self, order, dimension):
        s = order
        n = dimension
        d = 2 * s + 1

        self.exact_to = d

        from pytools import \
                generate_decreasing_nonnegative_tuples_summing_to, \
                generate_unique_permutations, \
                factorial, \
                wandering_element

        points_to_weights = {}

        for i in xrange(s + 1):
            weight = (-1)**i * 2**(-2*s) \
                    * (d + n-2*i)**d \
                    / factorial(i) \
                    / factorial(d+n-i)

            for t in generate_decreasing_nonnegative_tuples_summing_to(
                    s - i, n + 1):
                for beta in generate_unique_permutations(t):
                    denominator = d + n - 2 * i
                    point = tuple(
                        _simplify_fraction((2 * beta_i + 1, denominator))
                        for beta_i in beta)

                    points_to_weights[point] = points_to_weights.get(
                        point, 0) + weight

        from operator import add

        vertices = [-1 * numpy.ones((n,))] \
                + [numpy.array(x) for x in wandering_element(n, landscape=-1, wanderer=1)]

        self.pos_points = []
        self.pos_weights = []
        self.neg_points = []
        self.neg_weights = []

        dim_factor = 2**n
        for p, w in points_to_weights.iteritems():
            real_p = reduce(add, (a / b * v for (a, b), v in zip(p, vertices)))
            if w > 0:
                self.pos_points.append(real_p)
                self.pos_weights.append(dim_factor * w)
            else:
                self.neg_points.append(real_p)
                self.neg_weights.append(dim_factor * w)

        self.points = numpy.array(self.pos_points + self.neg_points)
        self.weights = numpy.array(self.pos_weights + self.neg_weights)

        self.pos_points = numpy.array(self.pos_points)
        self.pos_weights = numpy.array(self.pos_weights)
        self.neg_points = numpy.array(self.neg_points)
        self.neg_weights = numpy.array(self.neg_weights)

        self.points = numpy.array(self.points)
        self.weights = numpy.array(self.weights)

        self.pos_info = zip(self.pos_points, self.pos_weights)
        self.neg_info = zip(self.neg_points, self.neg_weights)