Esempio n. 1
0
def no_cancel_equal(b, c, n, DE):
    """
    Poly Risch Differential Equation - No cancellation: deg(b) == deg(D) - 1

    Given a derivation D on k[t] with deg(D) >= 2, n either an integer
    or +oo, and b, c in k[t] with deg(b) == deg(D) - 1, either raise
    NonElementaryIntegralException, in which case the equation Dq + b*q == c has
    no solution of degree at most n in k[t], or a solution q in k[t] of
    this equation with deg(q) <= n, or the tuple (h, m, C) such that h
    in k[t], m in ZZ, and C in k[t], and for any solution q in k[t] of
    degree at most n of Dq + b*q == c, y == q - h is a solution in k[t]
    of degree at most m of Dy + b*y == C.
    """
    q = Poly(0, DE.t)
    lc = cancel(-b.as_poly(DE.t).LC() / DE.d.as_poly(DE.t).LC())
    if lc.is_Integer and lc.is_positive:
        M = lc
    else:
        M = -1

    while not c.is_zero:
        m = max(M, c.degree(DE.t) - DE.d.degree(DE.t) + 1)

        if not 0 <= m <= n:  # n < 0 or m < 0 or m > n
            raise NonElementaryIntegralException

        u = cancel(m * DE.d.as_poly(DE.t).LC() + b.as_poly(DE.t).LC())
        if u.is_zero:
            return q, m, c
        if m > 0:
            p = Poly(c.as_poly(DE.t).LC() / u * DE.t**m, DE.t, expand=False)
        else:
            if c.degree(DE.t) != DE.d.degree(DE.t) - 1:
                raise NonElementaryIntegralException
            else:
                p = c.as_poly(DE.t).LC() / b.as_poly(DE.t).LC()

        q = q + p
        n = m - 1
        c = c - derivation(p, DE) - b * p

    return q
Esempio n. 2
0
def ratsimp(expr):
    """
    Put an expression over a common denominator, cancel and reduce.

    Examples
    ========

    >>> from diofant import ratsimp
    >>> from diofant.abc import x, y
    >>> ratsimp(1/x + 1/y)
    (x + y)/(x*y)
    """

    f, g = cancel(expr).as_numer_denom()
    try:
        Q, r = reduced(f, [g], field=True, expand=False)
    except ComputationFailed:
        return f / g

    return Add(*Q) + cancel(r / g)
Esempio n. 3
0
    def _splitter(p):
        for y in V:
            if not p.has(y):
                continue

            if _derivation(y) is not S.Zero:
                c, q = p.as_poly(y).primitive()

                q = q.as_expr()

                h = gcd(q, _derivation(q), y)
                s = quo(h, gcd(q, q.diff(y), y), y)

                c_split = _splitter(c)

                if s.as_poly(y).degree() == 0:
                    return c_split[0], q * c_split[1]

                q_split = _splitter(cancel(q / s))

                return c_split[0] * q_split[0] * s, c_split[1] * q_split[1]
        else:
            return S.One, p
Esempio n. 4
0
def simplify(expr, ratio=1.7, measure=count_ops, fu=False):
    """
    Simplifies the given expression.

    Simplification is not a well defined term and the exact strategies
    this function tries can change in the future versions of Diofant. If
    your algorithm relies on "simplification" (whatever it is), try to
    determine what you need exactly  -  is it powsimp()?, radsimp()?,
    together()?, logcombine()?, or something else? And use this particular
    function directly, because those are well defined and thus your algorithm
    will be robust.

    Nonetheless, especially for interactive use, or when you don't know
    anything about the structure of the expression, simplify() tries to apply
    intelligent heuristics to make the input expression "simpler".  For
    example:

    >>> from diofant import simplify, cos, sin
    >>> from diofant.abc import x, y
    >>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2)
    >>> a
    (x**2 + x)/(x*sin(y)**2 + x*cos(y)**2)
    >>> simplify(a)
    x + 1

    Note that we could have obtained the same result by using specific
    simplification functions:

    >>> from diofant import trigsimp, cancel
    >>> trigsimp(a)
    (x**2 + x)/x
    >>> cancel(_)
    x + 1

    In some cases, applying :func:`simplify` may actually result in some more
    complicated expression. The default ``ratio=1.7`` prevents more extreme
    cases: if (result length)/(input length) > ratio, then input is returned
    unmodified.  The ``measure`` parameter lets you specify the function used
    to determine how complex an expression is.  The function should take a
    single argument as an expression and return a number such that if
    expression ``a`` is more complex than expression ``b``, then
    ``measure(a) > measure(b)``.  The default measure function is
    :func:`~diofant.core.function.count_ops`, which returns the total number of operations in the
    expression.

    For example, if ``ratio=1``, ``simplify`` output can't be longer
    than input.

    ::

        >>> from diofant import sqrt, simplify, count_ops, oo
        >>> root = 1/(sqrt(2)+3)

    Since ``simplify(root)`` would result in a slightly longer expression,
    root is returned unchanged instead::

       >>> simplify(root, ratio=1) == root
       True

    If ``ratio=oo``, simplify will be applied anyway::

        >>> count_ops(simplify(root, ratio=oo)) > count_ops(root)
        True

    Note that the shortest expression is not necessary the simplest, so
    setting ``ratio`` to 1 may not be a good idea.
    Heuristically, the default value ``ratio=1.7`` seems like a reasonable
    choice.

    You can easily define your own measure function based on what you feel
    should represent the "size" or "complexity" of the input expression.  Note
    that some choices, such as ``lambda expr: len(str(expr))`` may appear to be
    good metrics, but have other problems (in this case, the measure function
    may slow down simplify too much for very large expressions).  If you don't
    know what a good metric would be, the default, ``count_ops``, is a good
    one.

    For example:

    >>> from diofant import symbols, log
    >>> a, b = symbols('a b', positive=True)
    >>> g = log(a) + log(b) + log(a)*log(1/b)
    >>> h = simplify(g)
    >>> h
    log(a*b**(-log(a) + 1))
    >>> count_ops(g)
    8
    >>> count_ops(h)
    5

    So you can see that ``h`` is simpler than ``g`` using the count_ops metric.
    However, we may not like how ``simplify`` (in this case, using
    ``logcombine``) has created the ``b**(log(1/a) + 1)`` term.  A simple way
    to reduce this would be to give more weight to powers as operations in
    ``count_ops``.  We can do this by using the ``visual=True`` option:

    >>> print(count_ops(g, visual=True))
    2*ADD + DIV + 4*LOG + MUL
    >>> print(count_ops(h, visual=True))
    2*LOG + MUL + POW + SUB

    >>> from diofant import Symbol, S
    >>> def my_measure(expr):
    ...     POW = Symbol('POW')
    ...     # Discourage powers by giving POW a weight of 10
    ...     count = count_ops(expr, visual=True).subs(POW, 10)
    ...     # Every other operation gets a weight of 1 (the default)
    ...     count = count.replace(Symbol, type(S.One))
    ...     return count
    >>> my_measure(g)
    8
    >>> my_measure(h)
    14
    >>> 15./8 > 1.7  # 1.7 is the default ratio
    True
    >>> simplify(g, measure=my_measure)
    -log(a)*log(b) + log(a) + log(b)

    Note that because ``simplify()`` internally tries many different
    simplification strategies and then compares them using the measure
    function, we get a completely different result that is still different
    from the input expression by doing this.
    """
    expr = sympify(expr)

    try:
        return expr._eval_simplify(ratio=ratio, measure=measure)
    except AttributeError:
        pass

    original_expr = expr = signsimp(expr)

    from diofant.simplify.hyperexpand import hyperexpand
    from diofant.functions.special.bessel import BesselBase
    from diofant import Sum, Product

    if not isinstance(expr, Basic) or not expr.args:  # XXX: temporary hack
        return expr

    if not isinstance(expr, (Add, Mul, Pow, exp_polar)):
        return expr.func(*[
            simplify(x, ratio=ratio, measure=measure, fu=fu) for x in expr.args
        ])

    # TODO: Apply different strategies, considering expression pattern:
    # is it a purely rational function? Is there any trigonometric function?...
    # See also https://github.com/sympy/sympy/pull/185.

    def shorter(*choices):
        '''Return the choice that has the fewest ops. In case of a tie,
        the expression listed first is selected.'''
        if not has_variety(choices):
            return choices[0]
        return min(choices, key=measure)

    expr = bottom_up(expr, lambda w: w.normal())
    expr = Mul(*powsimp(expr).as_content_primitive())
    _e = cancel(expr)
    expr1 = shorter(_e, _mexpand(_e).cancel())  # issue 6829
    expr2 = shorter(together(expr, deep=True), together(expr1, deep=True))

    if ratio is S.Infinity:
        expr = expr2
    else:
        expr = shorter(expr2, expr1, expr)
    if not isinstance(expr, Basic):  # XXX: temporary hack
        return expr

    expr = factor_terms(expr, sign=False)

    # hyperexpand automatically only works on hypergeometric terms
    expr = hyperexpand(expr)

    expr = piecewise_fold(expr)

    if expr.has(BesselBase):
        expr = besselsimp(expr)

    if expr.has(TrigonometricFunction) and not fu or expr.has(
            HyperbolicFunction):
        expr = trigsimp(expr, deep=True)

    if expr.has(log):
        expr = shorter(expand_log(expr, deep=True), logcombine(expr))

    if expr.has(CombinatorialFunction, gamma):
        expr = combsimp(expr)

    if expr.has(Sum):
        expr = sum_simplify(expr)

    if expr.has(Product):
        expr = product_simplify(expr)

    short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr),
                    expr)
    short = shorter(short, factor_terms(short),
                    expand_power_exp(expand_mul(short)))
    if (short.has(TrigonometricFunction, HyperbolicFunction, exp_polar)
            or any(a.base is S.Exp1 for a in short.atoms(Pow))):
        short = exptrigsimp(short, simplify=False)

    # get rid of hollow 2-arg Mul factorization
    hollow_mul = Transform(
        lambda x: Mul(*x.args), lambda x: x.is_Mul and len(x.args) == 2 and x.
        args[0].is_Number and x.args[1].is_Add and x.is_commutative)
    expr = short.xreplace(hollow_mul)

    numer, denom = expr.as_numer_denom()
    if denom.is_Add:
        n, d = fraction(radsimp(1 / denom, symbolic=False, max_terms=1))
        if n is not S.One:
            expr = (numer * n).expand() / d

    if expr.could_extract_minus_sign():
        n, d = fraction(expr)
        if d != 0:
            expr = signsimp(-n / (-d))

    if measure(expr) > ratio * measure(original_expr):
        expr = original_expr

    return expr
Esempio n. 5
0
def trigsimp_groebner(expr,
                      hints=[],
                      quick=False,
                      order="grlex",
                      polynomial=False):
    """
    Simplify trigonometric expressions using a groebner basis algorithm.

    This routine takes a fraction involving trigonometric or hyperbolic
    expressions, and tries to simplify it. The primary metric is the
    total degree. Some attempts are made to choose the simplest possible
    expression of the minimal degree, but this is non-rigorous, and also
    very slow (see the ``quick=True`` option).

    If ``polynomial`` is set to True, instead of simplifying numerator and
    denominator together, this function just brings numerator and denominator
    into a canonical form. This is much faster, but has potentially worse
    results. However, if the input is a polynomial, then the result is
    guaranteed to be an equivalent polynomial of minimal degree.

    The most important option is hints. Its entries can be any of the
    following:

    - a natural number
    - a function
    - an iterable of the form (func, var1, var2, ...)
    - anything else, interpreted as a generator

    A number is used to indicate that the search space should be increased.
    A function is used to indicate that said function is likely to occur in a
    simplified expression.
    An iterable is used indicate that func(var1 + var2 + ...) is likely to
    occur in a simplified .
    An additional generator also indicates that it is likely to occur.
    (See examples below).

    This routine carries out various computationally intensive algorithms.
    The option ``quick=True`` can be used to suppress one particularly slow
    step (at the expense of potentially more complicated results, but never at
    the expense of increased total degree).

    Examples
    ========

    >>> from diofant.abc import x, y
    >>> from diofant import sin, tan, cos, sinh, cosh, tanh
    >>> from diofant.simplify.trigsimp import trigsimp_groebner

    Suppose you want to simplify ``sin(x)*cos(x)``. Naively, nothing happens:

    >>> ex = sin(x)*cos(x)
    >>> trigsimp_groebner(ex)
    sin(x)*cos(x)

    This is because ``trigsimp_groebner`` only looks for a simplification
    involving just ``sin(x)`` and ``cos(x)``. You can tell it to also try
    ``2*x`` by passing ``hints=[2]``:

    >>> trigsimp_groebner(ex, hints=[2])
    sin(2*x)/2
    >>> trigsimp_groebner(sin(x)**2 - cos(x)**2, hints=[2])
    -cos(2*x)

    Increasing the search space this way can quickly become expensive. A much
    faster way is to give a specific expression that is likely to occur:

    >>> trigsimp_groebner(ex, hints=[sin(2*x)])
    sin(2*x)/2

    Hyperbolic expressions are similarly supported:

    >>> trigsimp_groebner(sinh(2*x)/sinh(x))
    2*cosh(x)

    Note how no hints had to be passed, since the expression already involved
    ``2*x``.

    The tangent function is also supported. You can either pass ``tan`` in the
    hints, to indicate that than should be tried whenever cosine or sine are,
    or you can pass a specific generator:

    >>> trigsimp_groebner(sin(x)/cos(x), hints=[tan])
    tan(x)
    >>> trigsimp_groebner(sinh(x)/cosh(x), hints=[tanh(x)])
    tanh(x)

    Finally, you can use the iterable form to suggest that angle sum formulae
    should be tried:

    >>> ex = (tan(x) + tan(y))/(1 - tan(x)*tan(y))
    >>> trigsimp_groebner(ex, hints=[(tan, x, y)])
    tan(x + y)
    """

    # TODO
    #  - preprocess by replacing everything by funcs we can handle
    # - optionally use cot instead of tan
    # - more intelligent hinting.
    #     For example, if the ideal is small, and we have sin(x), sin(y),
    #     add sin(x + y) automatically... ?
    # - algebraic numbers ...
    # - expressions of lowest degree are not distinguished properly
    #   e.g. 1 - sin(x)**2
    # - we could try to order the generators intelligently, so as to influence
    #   which monomials appear in the quotient basis

    # THEORY
    # ------
    # Ratsimpmodprime above can be used to "simplify" a rational function
    # modulo a prime ideal. "Simplify" mainly means finding an equivalent
    # expression of lower total degree.
    #
    # We intend to use this to simplify trigonometric functions. To do that,
    # we need to decide (a) which ring to use, and (b) modulo which ideal to
    # simplify. In practice, (a) means settling on a list of "generators"
    # a, b, c, ..., such that the fraction we want to simplify is a rational
    # function in a, b, c, ..., with coefficients in ZZ (integers).
    # (2) means that we have to decide what relations to impose on the
    # generators. There are two practical problems:
    #   (1) The ideal has to be *prime* (a technical term).
    #   (2) The relations have to be polynomials in the generators.
    #
    # We typically have two kinds of generators:
    # - trigonometric expressions, like sin(x), cos(5*x), etc
    # - "everything else", like gamma(x), pi, etc.
    #
    # Since this function is trigsimp, we will concentrate on what to do with
    # trigonometric expressions. We can also simplify hyperbolic expressions,
    # but the extensions should be clear.
    #
    # One crucial point is that all *other* generators really should behave
    # like indeterminates. In particular if (say) "I" is one of them, then
    # in fact I**2 + 1 = 0 and we may and will compute non-sensical
    # expressions. However, we can work with a dummy and add the relation
    # I**2 + 1 = 0 to our ideal, then substitute back in the end.
    #
    # Now regarding trigonometric generators. We split them into groups,
    # according to the argument of the trigonometric functions. We want to
    # organise this in such a way that most trigonometric identities apply in
    # the same group. For example, given sin(x), cos(2*x) and cos(y), we would
    # group as [sin(x), cos(2*x)] and [cos(y)].
    #
    # Our prime ideal will be built in three steps:
    # (1) For each group, compute a "geometrically prime" ideal of relations.
    #     Geometrically prime means that it generates a prime ideal in
    #     CC[gens], not just ZZ[gens].
    # (2) Take the union of all the generators of the ideals for all groups.
    #     By the geometric primality condition, this is still prime.
    # (3) Add further inter-group relations which preserve primality.
    #
    # Step (1) works as follows. We will isolate common factors in the
    # argument, so that all our generators are of the form sin(n*x), cos(n*x)
    # or tan(n*x), with n an integer. Suppose first there are no tan terms.
    # The ideal [sin(x)**2 + cos(x)**2 - 1] is geometrically prime, since
    # X**2 + Y**2 - 1 is irreducible over CC.
    # Now, if we have a generator sin(n*x), than we can, using trig identities,
    # express sin(n*x) as a polynomial in sin(x) and cos(x). We can add this
    # relation to the ideal, preserving geometric primality, since the quotient
    # ring is unchanged.
    # Thus we have treated all sin and cos terms.
    # For tan(n*x), we add a relation tan(n*x)*cos(n*x) - sin(n*x) = 0.
    # (This requires of course that we already have relations for cos(n*x) and
    # sin(n*x).) It is not obvious, but it seems that this preserves geometric
    # primality.
    # XXX A real proof would be nice. HELP!
    #     Sketch that <S**2 + C**2 - 1, C*T - S> is a prime ideal of
    #     CC[S, C, T]:
    #     - it suffices to show that the projective closure in CP**3 is
    #       irreducible
    #     - using the half-angle substitutions, we can express sin(x), tan(x),
    #       cos(x) as rational functions in tan(x/2)
    #     - from this, we get a rational map from CP**1 to our curve
    #     - this is a morphism, hence the curve is prime
    #
    # Step (2) is trivial.
    #
    # Step (3) works by adding selected relations of the form
    # sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is
    # preserved by the same argument as before.

    def parse_hints(hints):
        """Split hints into (n, funcs, iterables, gens)."""
        n = 1
        funcs, iterables, gens = [], [], []
        for e in hints:
            if isinstance(e, (int, Integer)):
                n = e
            elif isinstance(e, FunctionClass):
                funcs.append(e)
            elif iterable(e):
                iterables.append((e[0], e[1:]))
                # XXX sin(x+2y)?
                # Note: we go through polys so e.g.
                # sin(-x) -> -sin(x) -> sin(x)
                gens.extend(
                    parallel_poly_from_expr([e[0](x) for x in e[1:]] +
                                            [e[0](Add(*e[1:]))])[1].gens)
            else:
                gens.append(e)
        return n, funcs, iterables, gens

    def build_ideal(x, terms):
        """
        Build generators for our ideal. Terms is an iterable with elements of
        the form (fn, coeff), indicating that we have a generator fn(coeff*x).

        If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed
        to appear in terms. Similarly for hyperbolic functions. For tan(n*x),
        sin(n*x) and cos(n*x) are guaranteed.
        """
        gens = []
        I = []
        y = Dummy('y')
        for fn, coeff in terms:
            for c, s, t, rel in ([cos, sin, tan,
                                  cos(x)**2 + sin(x)**2 - 1], [
                                      cosh, sinh, tanh,
                                      cosh(x)**2 - sinh(x)**2 - 1
                                  ]):
                if coeff == 1 and fn in [c, s]:
                    I.append(rel)
                elif fn == t:
                    I.append(t(coeff * x) * c(coeff * x) - s(coeff * x))
                elif fn in [c, s]:
                    cn = fn(coeff * y).expand(trig=True).subs(y, x)
                    I.append(fn(coeff * x) - cn)
        return list(set(I))

    def analyse_gens(gens, hints):
        """
        Analyse the generators ``gens``, using the hints ``hints``.

        The meaning of ``hints`` is described in the main docstring.
        Return a new list of generators, and also the ideal we should
        work with.
        """
        # First parse the hints
        n, funcs, iterables, extragens = parse_hints(hints)
        debug('n=%s' % n, 'funcs:', funcs, 'iterables:', iterables,
              'extragens:', extragens)

        # We just add the extragens to gens and analyse them as before
        gens = list(gens)
        gens.extend(extragens)

        # remove duplicates
        funcs = list(set(funcs))
        iterables = list(set(iterables))
        gens = list(set(gens))

        # all the functions we can do anything with
        allfuncs = {sin, cos, tan, sinh, cosh, tanh}
        # sin(3*x) -> ((3, x), sin)
        trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens
                     if g.func in allfuncs]
        # Our list of new generators - start with anything that we cannot
        # work with (i.e. is not a trigonometric term)
        freegens = [g for g in gens if g.func not in allfuncs]
        newgens = []
        trigdict = {}
        for (coeff, var), fn in trigterms:
            trigdict.setdefault(var, []).append((coeff, fn))
        res = []  # the ideal

        for key, val in trigdict.items():
            # We have now assembeled a dictionary. Its keys are common
            # arguments in trigonometric expressions, and values are lists of
            # pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we
            # need to deal with fn(coeff*x0). We take the rational gcd of the
            # coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol",
            # all other arguments are integral multiples thereof.
            # We will build an ideal which works with sin(x), cos(x).
            # If hint tan is provided, also work with tan(x). Moreover, if
            # n > 1, also work with sin(k*x) for k <= n, and similarly for cos
            # (and tan if the hint is provided). Finally, any generators which
            # the ideal does not work with but we need to accomodate (either
            # because it was in expr or because it was provided as a hint)
            # we also build into the ideal.
            # This selection process is expressed in the list ``terms``.
            # build_ideal then generates the actual relations in our ideal,
            # from this list.
            fns = [x[1] for x in val]
            val = [x[0] for x in val]
            gcd = reduce(igcd, val)
            terms = [(fn, v / gcd) for (fn, v) in zip(fns, val)]
            fs = set(funcs + fns)
            for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]):
                if any(x in fs for x in (c, s, t)):
                    fs.add(c)
                    fs.add(s)
            for fn in fs:
                for k in range(1, n + 1):
                    terms.append((fn, k))
            extra = []
            for fn, v in terms:
                if fn == tan:
                    extra.append((sin, v))
                    extra.append((cos, v))
                if fn in [sin, cos] and tan in fs:
                    extra.append((tan, v))
                if fn == tanh:
                    extra.append((sinh, v))
                    extra.append((cosh, v))
                if fn in [sinh, cosh] and tanh in fs:
                    extra.append((tanh, v))
            terms.extend(extra)
            x = gcd * Mul(*key)
            r = build_ideal(x, terms)
            res.extend(r)
            newgens.extend({fn(v * x) for fn, v in terms})

        # Add generators for compound expressions from iterables
        for fn, args in iterables:
            if fn == tan:
                # Tan expressions are recovered from sin and cos.
                iterables.extend([(sin, args), (cos, args)])
            elif fn == tanh:
                # Tanh expressions are recovered from sihn and cosh.
                iterables.extend([(sinh, args), (cosh, args)])
            else:
                dummys = symbols('d:%i' % len(args), cls=Dummy)
                expr = fn(Add(*dummys)).expand(trig=True).subs(
                    list(zip(dummys, args)))
                res.append(fn(Add(*args)) - expr)

        if myI in gens:
            res.append(myI**2 + 1)
            freegens.remove(myI)
            newgens.append(myI)

        return res, freegens, newgens

    myI = Dummy('I')
    expr = expr.subs(S.ImaginaryUnit, myI)
    subs = [(myI, S.ImaginaryUnit)]

    num, denom = cancel(expr).as_numer_denom()
    try:
        (pnum, pdenom), opt = parallel_poly_from_expr([num, denom])
    except PolificationFailed:
        return expr
    debug('initial gens:', opt.gens)
    ideal, freegens, gens = analyse_gens(opt.gens, hints)
    debug('ideal:', ideal)
    debug('new gens:', gens, " -- len", len(gens))
    debug('free gens:', freegens, " -- len", len(gens))
    # NOTE we force the domain to be ZZ to stop polys from injecting generators
    #      (which is usually a sign of a bug in the way we build the ideal)
    if not gens:
        return expr
    G = groebner(ideal, order=order, gens=gens, domain=ZZ)
    debug('groebner basis:', list(G), " -- len", len(G))

    # If our fraction is a polynomial in the free generators, simplify all
    # coefficients separately:

    from diofant.simplify.ratsimp import ratsimpmodprime

    if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)):
        num = Poly(num, gens=gens + freegens).eject(*gens)
        res = []
        for monom, coeff in num.terms():
            ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens)
            # We compute the transitive closure of all generators that can
            # be reached from our generators through relations in the ideal.
            changed = True
            while changed:
                changed = False
                for p in ideal:
                    p = Poly(p)
                    if not ourgens.issuperset(p.gens) and \
                       not p.has_only_gens(*set(p.gens).difference(ourgens)):
                        changed = True
                        ourgens.update(p.exclude().gens)
            # NOTE preserve order!
            realgens = [x for x in gens if x in ourgens]
            # The generators of the ideal have now been (implicitely) split
            # into two groups: those involving ourgens and those that don't.
            # Since we took the transitive closure above, these two groups
            # live in subgrings generated by a *disjoint* set of variables.
            # Any sensible groebner basis algorithm will preserve this disjoint
            # structure (i.e. the elements of the groebner basis can be split
            # similarly), and and the two subsets of the groebner basis then
            # form groebner bases by themselves. (For the smaller generating
            # sets, of course.)
            ourG = [
                g.as_expr() for g in G.polys
                if g.has_only_gens(*ourgens.intersection(g.gens))
            ]
            res.append(
                Mul(*[a**b for a, b in zip(freegens, monom)]) *
                ratsimpmodprime(coeff / denom,
                                ourG,
                                order=order,
                                gens=realgens,
                                quick=quick,
                                domain=ZZ,
                                polynomial=polynomial).subs(subs))
        return Add(*res)
        # NOTE The following is simpler and has less assumptions on the
        #      groebner basis algorithm. If the above turns out to be broken,
        #      use this.
        return Add(*[
            Mul(*[a**b for a, b in zip(freegens, monom)]) *
            ratsimpmodprime(coeff / denom,
                            list(G),
                            order=order,
                            gens=gens,
                            quick=quick,
                            domain=ZZ) for monom, coeff in num.terms()
        ])
    else:
        return ratsimpmodprime(expr,
                               list(G),
                               order=order,
                               gens=freegens + gens,
                               quick=quick,
                               domain=ZZ,
                               polynomial=polynomial).subs(subs)
Esempio n. 6
0
def heurisch(f,
             x,
             rewrite=False,
             hints=None,
             mappings=None,
             retries=3,
             degree_offset=0,
             unnecessary_permutations=None):
    """
    Compute indefinite integral using heuristic Risch algorithm.

    This is a heuristic approach to indefinite integration in finite
    terms using the extended heuristic (parallel) Risch algorithm, based
    on Manuel Bronstein's "Poor Man's Integrator" [1]_.

    The algorithm supports various classes of functions including
    transcendental elementary or special functions like Airy,
    Bessel, Whittaker and Lambert.

    Note that this algorithm is not a decision procedure. If it isn't
    able to compute the antiderivative for a given function, then this is
    not a proof that such a functions does not exist.  One should use
    recursive Risch algorithm in such case.  It's an open question if
    this algorithm can be made a full decision procedure.

    This is an internal integrator procedure. You should use toplevel
    'integrate' function in most cases,  as this procedure needs some
    preprocessing steps and otherwise may fail.

    Parameters
    ==========

    heurisch(f, x, rewrite=False, hints=None)

    f : Expr
        expression
    x : Symbol
        variable

    rewrite : Boolean, optional
        force rewrite 'f' in terms of 'tan' and 'tanh', default False.
    hints : None or list
        a list of functions that may appear in anti-derivate.  If
        None (default) - no suggestions at all, if empty list - try
        to figure out.

    Examples
    ========

    >>> from diofant import tan
    >>> from diofant.integrals.heurisch import heurisch
    >>> from diofant.abc import x, y

    >>> heurisch(y*tan(x), x)
    y*log(tan(x)**2 + 1)/2

    References
    ==========

    .. [1] Manuel Bronstein's "Poor Man's Integrator",
           http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html

    .. [2] K. Geddes, L. Stefanus, On the Risch-Norman Integration
           Method and its Implementation in Maple, Proceedings of
           ISSAC'89, ACM Press, 212-217.

    .. [3] J. H. Davenport, On the Parallel Risch Algorithm (I),
           Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.

    .. [4] J. H. Davenport, On the Parallel Risch Algorithm (III):
           Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.

    .. [5] J. H. Davenport, B. M. Trager, On the Parallel Risch
           Algorithm (II), ACM Transactions on Mathematical
           Software 11 (1985), 356-362.

    See Also
    ========

    diofant.integrals.integrals.Integral.doit
    diofant.integrals.integrals.Integral
    diofant.integrals.heurisch.components
    """
    f = sympify(f)
    if x not in f.free_symbols:
        return f * x

    if not f.is_Add:
        indep, f = f.as_independent(x)
    else:
        indep = S.One

    rewritables = {
        (sin, cos, cot): tan,
        (sinh, cosh, coth): tanh,
    }

    if rewrite:
        for candidates, rule in rewritables.items():
            f = f.rewrite(candidates, rule)
    else:
        for candidates in rewritables.keys():
            if f.has(*candidates):
                break
        else:
            rewrite = True

    terms = components(f, x)

    if hints is not None:
        if not hints:
            a = Wild('a', exclude=[x])
            b = Wild('b', exclude=[x])
            c = Wild('c', exclude=[x])

            for g in set(terms):  # using copy of terms
                if g.is_Function:
                    if g.func is li:
                        M = g.args[0].match(a * x**b)

                        if M is not None:
                            terms.add(
                                x *
                                (li(M[a] * x**M[b]) -
                                 (M[a] * x**M[b])**(-1 / M[b]) * Ei(
                                     (M[b] + 1) * log(M[a] * x**M[b]) / M[b])))
                            # terms.add( x*(li(M[a]*x**M[b]) - (x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
                            # terms.add( x*(li(M[a]*x**M[b]) - x*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) )
                            # terms.add( li(M[a]*x**M[b]) - Ei((M[b]+1)*log(M[a]*x**M[b])/M[b]) )

                elif g.is_Pow:
                    if g.base is S.Exp1:
                        M = g.exp.match(a * x**2)

                        if M is not None:
                            if M[a].is_positive:
                                terms.add(erfi(sqrt(M[a]) * x))
                            else:  # M[a].is_negative or unknown
                                terms.add(erf(sqrt(-M[a]) * x))

                        M = g.exp.match(a * x**2 + b * x + c)

                        if M is not None:
                            if M[a].is_positive:
                                terms.add(
                                    sqrt(pi / 4 * (-M[a])) *
                                    exp(M[c] - M[b]**2 / (4 * M[a])) * erfi(
                                        sqrt(M[a]) * x + M[b] /
                                        (2 * sqrt(M[a]))))
                            elif M[a].is_negative:
                                terms.add(
                                    sqrt(pi / 4 * (-M[a])) *
                                    exp(M[c] - M[b]**2 / (4 * M[a])) * erf(
                                        sqrt(-M[a]) * x - M[b] /
                                        (2 * sqrt(-M[a]))))

                        M = g.exp.match(a * log(x)**2)

                        if M is not None:
                            if M[a].is_positive:
                                terms.add(
                                    erfi(
                                        sqrt(M[a]) * log(x) + 1 /
                                        (2 * sqrt(M[a]))))
                            if M[a].is_negative:
                                terms.add(
                                    erf(
                                        sqrt(-M[a]) * log(x) - 1 /
                                        (2 * sqrt(-M[a]))))

                    elif g.exp.is_Rational and g.exp.q == 2:
                        M = g.base.match(a * x**2 + b)

                        if M is not None and M[b].is_positive:
                            if M[a].is_positive:
                                terms.add(asinh(sqrt(M[a] / M[b]) * x))
                            elif M[a].is_negative:
                                terms.add(asin(sqrt(-M[a] / M[b]) * x))

                        M = g.base.match(a * x**2 - b)

                        if M is not None and M[b].is_positive:
                            if M[a].is_positive:
                                terms.add(acosh(sqrt(M[a] / M[b]) * x))
                            elif M[a].is_negative:
                                terms.add((-M[b] / 2 * sqrt(-M[a]) * atan(
                                    sqrt(-M[a]) * x / sqrt(M[a] * x**2 - M[b]))
                                           ))

        else:
            terms |= set(hints)

    for g in set(terms):  # using copy of terms
        terms |= components(cancel(g.diff(x)), x)

    # TODO: caching is significant factor for why permutations work at all. Change this.
    V = _symbols('x', len(terms))

    # sort mapping expressions from largest to smallest (last is always x).
    mapping = list(
        reversed(
            list(
                zip(*ordered(  #
                    [(a[0].as_independent(x)[1], a)
                     for a in zip(terms, V)])))[1]))  #
    rev_mapping = {v: k for k, v in mapping}  #
    if mappings is None:  #
        # optimizing the number of permutations of mapping               #
        assert mapping[-1][0] == x  # if not, find it and correct this comment
        unnecessary_permutations = [mapping.pop(-1)]
        mappings = permutations(mapping)
    else:
        unnecessary_permutations = unnecessary_permutations or []

    def _substitute(expr):
        return expr.subs(mapping)

    for mapping in mappings:
        mapping = list(mapping)
        mapping = mapping + unnecessary_permutations
        diffs = [_substitute(cancel(g.diff(x))) for g in terms]
        denoms = [g.as_numer_denom()[1] for g in diffs]
        if all(h.is_polynomial(*V)
               for h in denoms) and _substitute(f).is_rational_function(*V):
            denom = reduce(lambda p, q: lcm(p, q, *V), denoms)
            break
    else:
        if not rewrite:
            result = heurisch(
                f,
                x,
                rewrite=True,
                hints=hints,
                unnecessary_permutations=unnecessary_permutations)

            if result is not None:
                return indep * result
        return

    numers = [cancel(denom * g) for g in diffs]

    def _derivation(h):
        return Add(*[d * h.diff(v) for d, v in zip(numers, V)])

    def _deflation(p):
        for y in V:
            if not p.has(y):
                continue

            if _derivation(p) is not S.Zero:
                c, q = p.as_poly(y).primitive()
                return _deflation(c) * gcd(q, q.diff(y)).as_expr()
        else:
            return p

    def _splitter(p):
        for y in V:
            if not p.has(y):
                continue

            if _derivation(y) is not S.Zero:
                c, q = p.as_poly(y).primitive()

                q = q.as_expr()

                h = gcd(q, _derivation(q), y)
                s = quo(h, gcd(q, q.diff(y), y), y)

                c_split = _splitter(c)

                if s.as_poly(y).degree() == 0:
                    return c_split[0], q * c_split[1]

                q_split = _splitter(cancel(q / s))

                return c_split[0] * q_split[0] * s, c_split[1] * q_split[1]
        else:
            return S.One, p

    special = {}

    for term in terms:
        if term.is_Function:
            if term.func is tan:
                special[1 + _substitute(term)**2] = False
            elif term.func is tanh:
                special[1 + _substitute(term)] = False
                special[1 - _substitute(term)] = False
            elif term.func is LambertW:
                special[_substitute(term)] = True

    F = _substitute(f)

    P, Q = F.as_numer_denom()

    u_split = _splitter(denom)
    v_split = _splitter(Q)

    polys = set(list(v_split) + [u_split[0]] + list(special.keys()))

    s = u_split[0] * Mul(*[k for k, v in special.items() if v])
    polified = [p.as_poly(*V) for p in [s, P, Q]]

    if None in polified:
        return

    # --- definitions for _integrate ---
    a, b, c = [p.total_degree() for p in polified]

    poly_denom = (s * v_split[0] * _deflation(v_split[1])).as_expr()

    def _exponent(g):
        if g.is_Pow:
            if g.exp.is_Rational and g.exp.q != 1:
                if g.exp.p > 0:
                    return g.exp.p + g.exp.q - 1
                else:
                    return abs(g.exp.p + g.exp.q)
            else:
                return 1
        elif not g.is_Atom and g.args:
            return max([_exponent(h) for h in g.args])
        else:
            return 1

    A, B = _exponent(f), a + max(b, c)

    if A > 1 and B > 1:
        monoms = itermonomials(V, A + B - 1 + degree_offset)
    else:
        monoms = itermonomials(V, A + B + degree_offset)

    poly_coeffs = _symbols('A', len(monoms))

    poly_part = Add(*[
        poly_coeffs[i] * monomial for i, monomial in enumerate(ordered(monoms))
    ])

    reducibles = set()

    for poly in polys:
        if poly.has(*V):
            try:
                factorization = factor(poly, greedy=True)
            except PolynomialError:
                factorization = poly
            factorization = poly

            if factorization.is_Mul:
                reducibles |= set(factorization.args)
            else:
                reducibles.add(factorization)

    def _integrate(field=None):
        irreducibles = set()

        for poly in reducibles:
            for z in poly.free_symbols:
                if z in V:
                    break  # should this be: `irreducibles |= \
            else:  # set(root_factors(poly, z, filter=field))`
                continue  # and the line below deleted?
                #                          |
                #                          V
            irreducibles |= set(root_factors(poly, z, filter=field))

        log_coeffs, log_part = [], []
        B = _symbols('B', len(irreducibles))

        # Note: the ordering matters here
        for poly, b in reversed(list(ordered(zip(irreducibles, B)))):
            if poly.has(*V):
                poly_coeffs.append(b)
                log_part.append(b * log(poly))

        # TODO: Currently it's better to use symbolic expressions here instead
        # of rational functions, because it's simpler and FracElement doesn't
        # give big speed improvement yet. This is because cancelation is slow
        # due to slow polynomial GCD algorithms. If this gets improved then
        # revise this code.
        candidate = poly_part / poly_denom + Add(*log_part)
        h = F - _derivation(candidate) / denom
        raw_numer = h.as_numer_denom()[0]

        # Rewrite raw_numer as a polynomial in K[coeffs][V] where K is a field
        # that we have to determine. We can't use simply atoms() because log(3),
        # sqrt(y) and similar expressions can appear, leading to non-trivial
        # domains.
        syms = set(poly_coeffs) | set(V)
        non_syms = set()

        def find_non_syms(expr):
            if expr.is_Integer or expr.is_Rational:
                pass  # ignore trivial numbers
            elif expr in syms:
                pass  # ignore variables
            elif not expr.has(*syms):
                non_syms.add(expr)
            elif expr.is_Add or expr.is_Mul or expr.is_Pow:
                list(map(find_non_syms, expr.args))
            else:
                # TODO: Non-polynomial expression. This should have been
                # filtered out at an earlier stage.
                raise PolynomialError

        try:
            find_non_syms(raw_numer)
        except PolynomialError:
            return
        else:
            ground, _ = construct_domain(non_syms, field=True)

        coeff_ring = PolyRing(poly_coeffs, ground)
        ring = PolyRing(V, coeff_ring)

        numer = ring.from_expr(raw_numer)

        solution = solve_lin_sys(numer.coeffs(), coeff_ring)

        if solution is None:
            return
        else:
            solution = [(coeff_ring.symbols[coeff_ring.index(k)], v.as_expr())
                        for k, v in solution.items()]
            return candidate.subs(solution).subs(
                list(zip(poly_coeffs, [S.Zero] * len(poly_coeffs))))

    if not (F.free_symbols - set(V)):
        solution = _integrate('Q')

        if solution is None:
            solution = _integrate()
    else:
        solution = _integrate()

    if solution is not None:
        antideriv = solution.subs(rev_mapping)
        antideriv = cancel(antideriv).expand(force=True)

        if antideriv.is_Add:
            antideriv = antideriv.as_independent(x)[1]

        return indep * antideriv
    else:
        if retries >= 0:
            result = heurisch(
                f,
                x,
                mappings=mappings,
                rewrite=rewrite,
                hints=hints,
                retries=retries - 1,
                unnecessary_permutations=unnecessary_permutations)

            if result is not None:
                return indep * result

        return
Esempio n. 7
0
def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):
    """
    Checks if Df is the logarithmic derivative of a k(t)-radical.

    b in k(t) can be written as the logarithmic derivative of a k(t) radical if
    there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u.
    Either returns (ans, u, n, const) or None, which means that Df cannot be
    written as the logarithmic derivative of a k(t)-radical.  ans is a list of
    tuples such that Mul(*[i**j for i, j in ans]) == u.  This is useful for
    seeing exactly what elements of k(t) produce u.

    This function uses the structure theorem approach, which says that for any
    f in K, Df is the logarithmic derivative of a K-radical if and only if there
    are ri in QQ such that::

            ---               ---       Dt
            \    r  * Dt   +  \    r  *   i
            /     i     i     /     i   ---   =  Df.
            ---               ---        t
         i in L            i in E         i
               K/C(x)            K/C(x)


    Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
    transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
    in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
    monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
    is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
    a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
    hyperexponential monomials of K over C(x)).  If K is an elementary extension
    over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
    transcendence degree of K over C(x).  Furthermore, because Const_D(K) ==
    Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
    deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
    and L_K/C(x) are disjoint.

    The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
    recursively using this same function.  Therefore, it is required to pass
    them as indices to D (or T).  L_args are the arguments of the logarithms
    indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])).  This is
    needed to compute the final answer u such that n*f == Du/u.

    exp(f) will be the same as u up to a multiplicative constant.  This is
    because they will both behave the same as monomials.  For example, both
    exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const
    is returned.  const is such that exp(const)*f == u.  This is calculated by
    subtracting the arguments of one exponential from the other.  Therefore, it
    is necessary to pass the arguments of the exponential terms in E_args.

    To handle the case where we are given Df, not f, use
    is_log_deriv_k_t_radical_in_field().
    """
    H = []
    if Df:
        dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,
            include=True)
    else:
        dfa, dfd = fa, fd

    # Our assumption here is that each monomial is recursively transcendental
    if len(DE.L_K) + len(DE.E_K) != len(DE.D) - 1:
        if [i for i in DE.cases if i == 'tan'] or \
                {i for i in DE.cases if i == 'primitive'} - set(DE.L_K):
            raise NotImplementedError("Real version of the structure "
                "theorems with hypertangent support is not yet implemented.")

        # TODO: What should really be done in this case?
        raise NotImplementedError("Nonelementary extensions not supported "
            "in the structure theorems.")

    E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.E_K]
    L_part = [DE.D[i].as_expr() for i in DE.L_K]

    lhs = Matrix([E_part + L_part])
    rhs = Matrix([dfa.as_expr()/dfd.as_expr()])

    A, u = constant_system(lhs, rhs, DE)
    if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
        # If the elements of u are not all constant
        # Note: See comment in constant_system

        # Also note: derivation(basic=True) calls cancel()
        return
    else:
        if not all(i.is_Rational for i in u):
            # TODO: But maybe we can tell if they're not rational, like
            # log(2)/log(3). Also, there should be an option to continue
            # anyway, even if the result might potentially be wrong.
            raise NotImplementedError("Cannot work with non-rational "
                "coefficients in this case.")
        else:
            n = reduce(ilcm, [i.as_numer_denom()[1] for i in u])
            u *= n
            terms = [DE.T[i] for i in DE.E_K] + DE.L_args
            ans = list(zip(terms, u))
            result = Mul(*[Pow(i, j) for i, j in ans])

            # exp(f) will be the same as result up to a multiplicative
            # constant.  We now find the log of that constant.
            argterms = DE.E_args + [DE.T[i] for i in DE.L_K]
            const = cancel(fa.as_expr()/fd.as_expr() -
                Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))

            return ans, result, n, const
Esempio n. 8
0
def is_deriv_k(fa, fd, DE):
    """
    Checks if Df/f is the derivative of an element of k(t).

    a in k(t) is the derivative of an element of k(t) if there exists b in k(t)
    such that a = Db.  Either returns (ans, u), such that Df/f == Du, or None,
    which means that Df/f is not the derivative of an element of k(t).  ans is
    a list of tuples such that Add(*[i*j for i, j in ans]) == u.  This is useful
    for seeing exactly which elements of k(t) produce u.

    This function uses the structure theorem approach, which says that for any
    f in K, Df/f is the derivative of a element of K if and only if there are ri
    in QQ such that::

            ---               ---       Dt
            \    r  * Dt   +  \    r  *   i      Df
            /     i     i     /     i   ---   =  --.
            ---               ---        t        f
         i in L            i in E         i
               K/C(x)            K/C(x)


    Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
    transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
    in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
    monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
    is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
    a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
    hyperexponential monomials of K over C(x)).  If K is an elementary extension
    over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
    transcendence degree of K over C(x).  Furthermore, because Const_D(K) ==
    Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
    deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
    and L_K/C(x) are disjoint.

    The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
    recursively using this same function.  Therefore, it is required to pass
    them as indices to D (or T).  E_args are the arguments of the
    hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] ==
    exp(E_args[i])).  This is needed to compute the final answer u such that
    Df/f == Du.

    log(f) will be the same as u up to a additive constant.  This is because
    they will both behave the same as monomials. For example, both log(x) and
    log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant.
    Therefore, the term const is returned.  const is such that
    log(const) + f == u.  This is calculated by dividing the arguments of one
    logarithm from the other.  Therefore, it is necessary to pass the arguments
    of the logarithmic terms in L_args.

    To handle the case where we are given Df/f, not f, use is_deriv_k_in_field().
    """
    # Compute Df/f
    dfa, dfd = fd*(fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd**2*fa
    dfa, dfd = dfa.cancel(dfd, include=True)

    # Our assumption here is that each monomial is recursively transcendental
    if len(DE.L_K) + len(DE.E_K) != len(DE.D) - 1:
        if [i for i in DE.cases if i == 'tan'] or \
                {i for i in DE.cases if i == 'primitive'} - set(DE.L_K):
            raise NotImplementedError("Real version of the structure "
                "theorems with hypertangent support is not yet implemented.")

        # TODO: What should really be done in this case?
        raise NotImplementedError("Nonelementary extensions not supported "
            "in the structure theorems.")

    E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.E_K]
    L_part = [DE.D[i].as_expr() for i in DE.L_K]

    lhs = Matrix([E_part + L_part])
    rhs = Matrix([dfa.as_expr()/dfd.as_expr()])

    A, u = constant_system(lhs, rhs, DE)

    if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
        # If the elements of u are not all constant
        # Note: See comment in constant_system

        # Also note: derivation(basic=True) calls cancel()
        return
    else:
        if not all(i.is_Rational for i in u):
            raise NotImplementedError("Cannot work with non-rational "
                "coefficients in this case.")
        else:
            terms = DE.E_args + [DE.T[i] for i in DE.L_K]
            ans = list(zip(terms, u))
            result = Add(*[Mul(i, j) for i, j in ans])
            argterms = [DE.T[i] for i in DE.E_K] + DE.L_args
            l = []
            for i, j in zip(argterms, u):
                # We need to get around things like sqrt(x**2) != x
                # and also sqrt(x**2 + 2*x + 1) != x + 1
                icoeff, iterms = sqf_list(i)
                l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms])))
            const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l))

            return ans, result, const
Esempio n. 9
0
def constant_system(A, u, DE):
    """
    Generate a system for the constant solutions.

    Given a differential field (K, D) with constant field C = Const(K), a Matrix
    A, and a vector (Matrix) u with coefficients in K, returns the tuple
    (B, v, s), where B is a Matrix with coefficients in C and v is a vector
    (Matrix) such that either v has coefficients in C, in which case s is True
    and the solutions in C of Ax == u are exactly all the solutions of Bx == v,
    or v has a non-constant coefficient, in which case s is False Ax == u has no
    constant solution.

    This algorithm is used both in solving parametric problems and in
    determining if an element a of K is a derivative of an element of K or the
    logarithmic derivative of a K-radical using the structure theorem approach.

    Because Poly does not play well with Matrix yet, this algorithm assumes that
    all matrix entries are Basic expressions.
    """
    if not A:
        return A, u
    Au = A.row_join(u)
    Au = Au.rref(simplify=cancel)[0]
    # Warning: This will NOT return correct results if cancel() cannot reduce
    # an identically zero expression to 0.  The danger is that we might
    # incorrectly prove that an integral is nonelementary (such as
    # risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x).
    # But this is a limitation in computer algebra in general, and implicit
    # in the correctness of the Risch Algorithm is the computability of the
    # constant field (actually, this same correctness problem exists in any
    # algorithm that uses rref()).
    #
    # We therefore limit ourselves to constant fields that are computable
    # via the cancel() function, in order to prevent a speed bottleneck from
    # calling some more complex simplification function (rational function
    # coefficients will fall into this class).  Furthermore, (I believe) this
    # problem will only crop up if the integral explicitly contains an
    # expression in the constant field that is identically zero, but cannot
    # be reduced to such by cancel().  Therefore, a careful user can avoid this
    # problem entirely by being careful with the sorts of expressions that
    # appear in his integrand in the variables other than the integration
    # variable (the structure theorems should be able to completely decide these
    # problems in the integration variable).

    Au = Au.applyfunc(cancel)
    A, u = Au[:, :-1], Au[:, -1]

    for j in range(A.cols):
        for i in range(A.rows):
            if A[i, j].has(*DE.T):
                # This assumes that const(F(t0, ..., tn) == const(K) == F
                Ri = A[i, :]
                # Rm+1; m = A.rows
                Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True) /
                    derivation(A[i, j], DE, basic=True))
                Rm1 = Rm1.applyfunc(cancel)
                um1 = cancel(derivation(u[i], DE, basic=True) /
                    derivation(A[i, j], DE, basic=True))

                for s in range(A.rows):
                    # A[s, :] = A[s, :] - A[s, i]*A[:, m+1]
                    Asj = A[s, j]
                    A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj]))
                    # u[s] = u[s] - A[s, j]*u[m+1
                    u.row_op(s, lambda r, jj: cancel(r - Asj*um1))

                A = A.col_join(Rm1)
                u = u.col_join(Matrix([um1]))

    return A, u
Esempio n. 10
0
               i, j in roots):
        # If f is the logarithmic derivative of a k(t)-radical, then all the
        # roots of the resultant must be rational numbers.
        return

    # [(a, i), ...], where i*log(a) is a term in the log-part of the integral
    # of f
    respolys, residues = list(zip(*roots)) or [[], []]
    # Note: this might be empty, but everything below should work find in that
    # case (it should be the same as if it were [[1, 1]])
    residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for
        i in residues[j]]

    # TODO: finish writing this and write tests

    p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))

    p = p.as_poly(DE.t)
    if p is None:
        # f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical
        return

    if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):
        return

    if case == 'auto':
        case = DE.case

    if case == 'exp':
        wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)
        with DecrementLevel(DE):
Esempio n. 11
0
                                        is_log_deriv_k_t_radical_in_field)
    # TODO: finish writing this and write tests

    if case == 'auto':
        case = DE.case

    da = a.degree(DE.t)
    db = b.degree(DE.t)

    # The parametric and regular cases are identical, except for this part
    if parametric:
        dc = max([i.degree(DE.t) for i in cQ])
    else:
        dc = cQ.degree(DE.t)

    alpha = cancel(-b.as_poly(DE.t).LC().as_expr() /
                   a.as_poly(DE.t).LC().as_expr())

    if case == 'base':
        n = max(0, dc - max(db, da - 1))
        if db == da - 1 and alpha.is_Integer:
            n = max(0, alpha, dc - db)

    elif case == 'primitive':
        if db > da:
            n = max(0, dc - db)
        else:
            n = max(0, dc - da + 1)

        etaa, etad = frac_in(DE.d, DE.T[DE.level - 1])

        t1 = DE.t
Esempio n. 12
0
def apart_list_full_decomposition(P, Q, dummygen):
    """
    Bronstein's full partial fraction decomposition algorithm.

    Given a univariate rational function ``f``, performing only GCD
    operations over the algebraic closure of the initial ground domain
    of definition, compute full partial fraction decomposition with
    fractions having linear denominators.

    Note that no factorization of the initial denominator of ``f`` is
    performed. The final decomposition is formed in terms of a sum of
    :class:`RootSum` instances.

    References
    ==========

    .. [1] [Bronstein93]_
    """
    f, x, U = P / Q, P.gen, []

    u = Function('u')(x)
    a = Dummy('a')

    partial = []

    for d, n in Q.sqf_list_include(all=True):
        b = d.as_expr()
        U += [u.diff(x, n - 1)]

        h = cancel(f * b**n) / u**n

        H, subs = [h], []

        for j in range(1, n):
            H += [H[-1].diff(x) / j]

        for j in range(1, n + 1):
            subs += [(U[j - 1], b.diff(x, j) / j)]

        for j in range(0, n):
            P, Q = cancel(H[j]).as_numer_denom()

            for i in range(0, j + 1):
                P = P.subs(*subs[j - i])

            Q = Q.subs(*subs[0])

            P = Poly(P, x)
            Q = Poly(Q, x)

            G = P.gcd(d)
            D = d.quo(G)

            B, g = Q.half_gcdex(D)
            b = (P * B.quo(g)).rem(D)

            Dw = D.subs(x, next(dummygen))
            numer = Lambda(a, b.as_expr().subs(x, a))
            denom = Lambda(a, (x - a))
            exponent = n - j

            partial.append((Dw, numer, denom, exponent))

    return partial
Esempio n. 13
0
def ratsimpmodprime(expr, G, *gens, **args):
    """
    Simplifies a rational expression ``expr`` modulo the prime ideal
    generated by ``G``.  ``G`` should be a Groebner basis of the
    ideal.

    >>> from diofant.simplify.ratsimp import ratsimpmodprime
    >>> from diofant.abc import x, y
    >>> eq = (x + y**5 + y)/(x - y)
    >>> ratsimpmodprime(eq, [x*y**5 - x - y], x, y, order='lex')
    (x**2 + x*y + x + y)/(x**2 - x*y)

    If ``polynomial`` is False, the algorithm computes a rational
    simplification which minimizes the sum of the total degrees of
    the numerator and the denominator.

    If ``polynomial`` is True, this function just brings numerator and
    denominator into a canonical form. This is much faster, but has
    potentially worse results.

    References
    ==========

    M. Monagan, R. Pearce, Rational Simplification Modulo a Polynomial
    Ideal,
    http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.163.6984
    (specifically, the second algorithm)
    """
    from diofant import solve

    quick = args.pop('quick', True)
    polynomial = args.pop('polynomial', False)
    debug('ratsimpmodprime', expr)

    # usual preparation of polynomials:

    num, denom = cancel(expr).as_numer_denom()

    try:
        polys, opt = parallel_poly_from_expr([num, denom] + G, *gens, **args)
    except PolificationFailed:
        return expr

    domain = opt.domain

    if domain.has_assoc_Field:
        opt.domain = domain.get_field()
    else:
        raise DomainError("can't compute rational simplification over %s" %
                          domain)

    # compute only once
    leading_monomials = [g.LM(opt.order) for g in polys[2:]]
    tested = set()

    def staircase(n):
        """
        Compute all monomials with degree less than ``n`` that are
        not divisible by any element of ``leading_monomials``.
        """
        if n == 0:
            return [1]
        S = []
        for mi in combinations_with_replacement(range(len(opt.gens)), n):
            m = [0] * len(opt.gens)
            for i in mi:
                m[i] += 1
            if all([monomial_div(m, lmg) is None
                    for lmg in leading_monomials]):
                S.append(m)

        return [Monomial(s).as_expr(*opt.gens) for s in S] + staircase(n - 1)

    def _ratsimpmodprime(a, b, allsol, N=0, D=0):
        """
        Computes a rational simplification of ``a/b`` which minimizes
        the sum of the total degrees of the numerator and the denominator.

        The algorithm proceeds by looking at ``a * d - b * c`` modulo
        the ideal generated by ``G`` for some ``c`` and ``d`` with degree
        less than ``a`` and ``b`` respectively.
        The coefficients of ``c`` and ``d`` are indeterminates and thus
        the coefficients of the normalform of ``a * d - b * c`` are
        linear polynomials in these indeterminates.
        If these linear polynomials, considered as system of
        equations, have a nontrivial solution, then `\frac{a}{b}
        \equiv \frac{c}{d}` modulo the ideal generated by ``G``. So,
        by construction, the degree of ``c`` and ``d`` is less than
        the degree of ``a`` and ``b``, so a simpler representation
        has been found.
        After a simpler representation has been found, the algorithm
        tries to reduce the degree of the numerator and denominator
        and returns the result afterwards.

        As an extension, if quick=False, we look at all possible degrees such
        that the total degree is less than *or equal to* the best current
        solution. We retain a list of all solutions of minimal degree, and try
        to find the best one at the end.
        """
        c, d = a, b
        steps = 0

        maxdeg = a.total_degree() + b.total_degree()
        if quick:
            bound = maxdeg - 1
        else:
            bound = maxdeg
        while N + D <= bound:
            if (N, D) in tested:
                break
            tested.add((N, D))

            M1 = staircase(N)
            M2 = staircase(D)
            debug('%s / %s: %s, %s' % (N, D, M1, M2))

            Cs = symbols("c:%d" % len(M1), cls=Dummy)
            Ds = symbols("d:%d" % len(M2), cls=Dummy)
            ng = Cs + Ds

            c_hat = Poly(sum(Cs[i] * M1[i] for i in range(len(M1))),
                         opt.gens + ng)
            d_hat = Poly(sum(Ds[i] * M2[i] for i in range(len(M2))),
                         opt.gens + ng)

            r = reduced(a * d_hat - b * c_hat,
                        G,
                        opt.gens + ng,
                        order=opt.order,
                        polys=True)[1]

            S = Poly(r, gens=opt.gens).coeffs()
            sol = solve(S, Cs + Ds, particular=True, quick=True)

            if sol and not all([s == 0 for s in sol.values()]):
                c = c_hat.subs(sol)
                d = d_hat.subs(sol)

                # The "free" variables occuring before as parameters
                # might still be in the substituted c, d, so set them
                # to the value chosen before:
                c = c.subs(dict(zip(Cs + Ds, [1] * (len(Cs) + len(Ds)))))
                d = d.subs(dict(zip(Cs + Ds, [1] * (len(Cs) + len(Ds)))))

                c = Poly(c, opt.gens)
                d = Poly(d, opt.gens)
                if d == 0:
                    raise ValueError('Ideal not prime?')

                allsol.append((c_hat, d_hat, S, Cs + Ds))
                if N + D != maxdeg:
                    allsol = [allsol[-1]]

                break

            steps += 1
            N += 1
            D += 1

        if steps > 0:
            c, d, allsol = _ratsimpmodprime(c, d, allsol, N, D - steps)
            c, d, allsol = _ratsimpmodprime(c, d, allsol, N - steps, D)

        return c, d, allsol

    # preprocessing. this improves performance a bit when deg(num)
    # and deg(denom) are large:
    num = reduced(num, G, opt.gens, order=opt.order)[1]
    denom = reduced(denom, G, opt.gens, order=opt.order)[1]

    if polynomial:
        return (num / denom).cancel()

    c, d, allsol = _ratsimpmodprime(Poly(num, opt.gens), Poly(denom, opt.gens),
                                    [])
    if not quick and allsol:
        debug('Looking for best minimal solution. Got: %s' % len(allsol))
        newsol = []
        for c_hat, d_hat, S, ng in allsol:
            sol = solve(S, ng, particular=True, quick=False)
            newsol.append((c_hat.subs(sol), d_hat.subs(sol)))
        c, d = min(newsol, key=lambda x: len(x[0].terms()) + len(x[1].terms()))

    if not domain.has_Field:
        cn, c = c.clear_denoms(convert=True)
        dn, d = d.clear_denoms(convert=True)

    cf, c, d = cancel((c, d), opt.gens, order=opt.order)  # canonicalize signs
    r = cf * Rational(cn, dn)

    return (c * r.q) / (d * r.p)