def test_roots_quartic(): assert roots_quartic(Poly(x ** 4, x)) == [0, 0, 0, 0] assert roots_quartic(Poly(x ** 4 + x ** 3, x)) in [[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]] assert roots_quartic(Poly(x ** 4 - x ** 3, x)) in [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] lhs = roots_quartic(Poly(x ** 4 + x, x)) rhs = [S.Half + I * sqrt(3) / 2, S.Half - I * sqrt(3) / 2, S.Zero, -S.One] assert sorted(lhs, key=hash) == sorted(rhs, key=hash) # test of all branches of roots quartic for i, (a, b, c, d) in enumerate( [(1, 2, 3, 0), (3, -7, -9, 9), (1, 2, 3, 4), (1, 2, 3, 4), (-7, -3, 3, -6), (-3, 5, -6, -4), (6, -5, -10, -3)] ): if i == 2: c = -a * (a ** 2 / S(8) - b / S(2)) elif i == 3: d = a * (a * (3 * a ** 2 / S(256) - b / S(16)) + c / S(4)) eq = x ** 4 + a * x ** 3 + b * x ** 2 + c * x + d ans = roots_quartic(Poly(eq, x)) assert all(eq.subs(x, ai).n(chop=True) == 0 for ai in ans) # not all symbolic quartics are unresolvable eq = Poly(q * x + q / 4 + x ** 4 + x ** 3 + 2 * x ** 2 - Rational(1, 3), x) sol = roots_quartic(eq) assert all(test_numerically(eq.subs(x, i), 0) for i in sol) # but some are (see also iss 1890) raises(PolynomialError, lambda: roots_quartic(Poly(y * x ** 4 + x + z, x)))
def test_RootsOf(): f = Poly((x-4)**4, x) roots = RootsOf(f) assert roots.count == 4 assert list(roots.roots()) == [ Integer(4), Integer(4), Integer(4), Integer(4) ] assert RootSum(lambda r: r**2, f) == 64 roots = RootsOf(x**5+x+1, x) assert roots.count == 5 f = Poly(x**5+x+1, x) assert list(roots.roots()) == [ RootOf(f, 0), RootOf(f, 1), RootOf(f, 2), RootOf(f, 3), RootOf(f, 4) ] assert RootSum(lambda r: r**2, f).doit() == RootOf(f, 0)**2 + \ RootOf(f, 1)**2 + RootOf(f, 2)**2 + RootOf(f, 3)**2 + RootOf(f, 4)**2 assert RootSum(Lambda(x, x), Poly(0, x), evaluate=True) == S.Zero assert RootSum(Lambda(x, x), Poly(0, x), evaluate=False) != S.Zero assert RootSum(Lambda(x, x), Poly(x-1, x), evaluate=False).doit() == S.One y = symbols('y') f = Poly(x**6 - x**5 - 1, x) assert RootOf(f, 0).subs(x, y) == RootOf(f.subs(x, y), 0)
def _sqrt_symbolic_denest(a, b, r): """Given an expression, sqrt(a + b*sqrt(b)), return the denested expression or None. Algorithm: If r = ra + rb*sqrt(rr), try replacing sqrt(rr) in ``a`` with (y**2 - ra)/rb, and if the result is a quadratic, ca*y**2 + cb*y + cc, and (cb + b)**2 - 4*ca*cc is 0, then sqrt(a + b*sqrt(r)) can be rewritten as sqrt(ca*(sqrt(r) + (cb + b)/(2*ca))**2). Examples ======== >>> from sympy.simplify.sqrtdenest import _sqrt_symbolic_denest, sqrtdenest >>> from sympy import sqrt, Symbol >>> from sympy.abc import x >>> a, b, r = 16 - 2*sqrt(29), 2, -10*sqrt(29) + 55 >>> _sqrt_symbolic_denest(a, b, r) sqrt(-2*sqrt(29) + 11) + sqrt(5) If the expression is numeric, it will be simplified: >>> w = sqrt(sqrt(sqrt(3) + 1) + 1) + 1 + sqrt(2) >>> sqrtdenest(sqrt((w**2).expand())) 1 + sqrt(2) + sqrt(1 + sqrt(1 + sqrt(3))) Otherwise, it will only be simplified if assumptions allow: >>> w = w.subs(sqrt(3), sqrt(x + 3)) >>> sqrtdenest(sqrt((w**2).expand())) sqrt((sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2))**2) Notice that the argument of the sqrt is a square. If x is made positive then the sqrt of the square is resolved: >>> _.subs(x, Symbol('x', positive=True)) sqrt(sqrt(sqrt(x + 3) + 1) + 1) + 1 + sqrt(2) """ a, b, r = map(sympify, (a, b, r)) rval = _sqrt_match(r) if not rval: return None ra, rb, rr = rval if rb: y = Dummy('y', positive=True) try: newa = Poly(a.subs(sqrt(rr), (y**2 - ra)/rb), y) except PolynomialError: return None if newa.degree() == 2: ca, cb, cc = newa.all_coeffs() cb += b if _mexpand(cb**2 - 4*ca*cc).equals(0): z = sqrt(ca*(sqrt(r) + cb/(2*ca))**2) if z.is_number: z = _mexpand(Mul._from_args(z.as_content_primitive())) return z
def reduce_poly_inequalities(exprs, gen, assume=True, relational=True): """Reduce a system of polynomial inequalities with rational coefficients. """ exact = True polys = [] for _exprs in exprs: _polys = [] for expr in _exprs: if isinstance(expr, tuple): expr, rel = expr else: if expr.is_Relational: expr, rel = expr.lhs - expr.rhs, expr.rel_op else: expr, rel = expr, '==' poly = Poly(expr, gen) if not poly.get_domain().is_Exact: poly, exact = poly.to_exact(), False domain = poly.get_domain() if not (domain.is_ZZ or domain.is_QQ): raise NotImplementedError("inequality solving is not supported over %s" % domain) _polys.append((poly, rel)) polys.append(_polys) solution = solve_poly_inequalities(polys) if isinstance(solution, Union): intervals = list(solution.args) elif isinstance(solution, Interval): intervals = [solution] else: intervals = [] if not exact: intervals = map(interval_evalf, intervals) if not relational: return intervals real = ask(gen, 'real', assume) def relationalize(gen): return Or(*[ i.as_relational(gen) for i in intervals ]) if not real: result = And(relationalize(re(gen)), Eq(im(gen), 0)) else: result = relationalize(gen) return result
def test_issue_8438(): p = Poly([1, y, -2, -3], x).as_expr() roots = roots_cubic(Poly(p, x), x) z = -S(3)/2 - 7*I/2 # this will fail in code given in commit msg post = [r.subs(y, z) for r in roots] assert set(post) == \ set(roots_cubic(Poly(p.subs(y, z), x))) # /!\ if p is not made an expression, this is *very* slow assert all(p.subs({y: z, x: i}).n(2, chop=True) == 0 for i in post)
def test_roots_quartic(): assert roots_quartic(Poly(x**4, x)) == [0, 0, 0, 0] assert roots_quartic(Poly(x**4 + x**3, x)) in [ [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1] ] assert roots_quartic(Poly(x**4 - x**3, x)) in [ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1] ] lhs = roots_quartic(Poly(x**4 + x, x)) rhs = [S.Half + I*sqrt(3)/2, S.Half - I*sqrt(3)/2, S.Zero, -S.One] assert sorted(lhs, key=hash) == sorted(rhs, key=hash) # test of all branches of roots quartic for i, (a, b, c, d) in enumerate([(1, 2, 3, 0), (3, -7, -9, 9), (1, 2, 3, 4), (1, 2, 3, 4), (-7, -3, 3, -6), (-3, 5, -6, -4), (6, -5, -10, -3)]): if i == 2: c = -a*(a**2/S(8) - b/S(2)) elif i == 3: d = a*(a*(3*a**2/S(256) - b/S(16)) + c/S(4)) eq = x**4 + a*x**3 + b*x**2 + c*x + d ans = roots_quartic(Poly(eq, x)) assert all(eq.subs(x, ai).n(chop=True) == 0 for ai in ans) # not all symbolic quartics are unresolvable eq = Poly(q*x + q/4 + x**4 + x**3 + 2*x**2 - Rational(1, 3), x) sol = roots_quartic(eq) assert all(verify_numerically(eq.subs(x, i), 0) for i in sol) z = symbols('z', negative=True) eq = x**4 + 2*x**3 + 3*x**2 + x*(z + 11) + 5 zans = roots_quartic(Poly(eq, x)) assert all([verify_numerically(eq.subs(((x, i), (z, -1))), 0) for i in zans]) # but some are (see also issue 4989) # it's ok if the solution is not Piecewise, but the tests below should pass eq = Poly(y*x**4 + x**3 - x + z, x) ans = roots_quartic(eq) assert all(type(i) == Piecewise for i in ans) reps = ( dict(y=-Rational(1, 3), z=-Rational(1, 4)), # 4 real dict(y=-Rational(1, 3), z=-Rational(1, 2)), # 2 real dict(y=-Rational(1, 3), z=-2)) # 0 real for rep in reps: sol = roots_quartic(Poly(eq.subs(rep), x)) assert all([verify_numerically(w.subs(rep) - s, 0) for w, s in zip(ans, sol)])
def _is_negative_or_zero(term): if getattr(term, 'is_number', False): return term <= 0 elif isinstance(term, Pow): if term.args[1]%2==1: return _is_negative_or_zero(term.args[0]) t = Poly(term).as_dict() if (all(c < 0 for c in t.values()) and all(i % 2 == 0 for d in t.keys() for i in d)): return True return ask_is_negative(term)
def _solve_as_poly(f, symbol, solveset_solver, invert_func): """ Solve the equation using polynomial techniques if it already is a polynomial equation or, with a change of variables, can be made so. """ result = None if f.is_polynomial(symbol): solns = roots(f, symbol, cubics=True, quartics=True, quintics=True, domain='EX') num_roots = sum(solns.values()) if degree(f, symbol) <= num_roots: result = FiniteSet(*solns.keys()) else: poly = Poly(f, symbol) solns = poly.all_roots() if poly.degree() <= len(solns): result = FiniteSet(*solns) else: result = ConditionSet(symbol, Eq(f, 0), S.Complexes) else: poly = Poly(f) if poly is None: result = ConditionSet(symbol, Eq(f, 0), S.Complexes) gens = [g for g in poly.gens if g.has(symbol)] if len(gens) == 1: poly = Poly(poly, gens[0]) gen = poly.gen deg = poly.degree() poly = Poly(poly.as_expr(), poly.gen, composite=True) poly_solns = FiniteSet(*roots(poly, cubics=True, quartics=True, quintics=True).keys()) if len(poly_solns) < deg: result = ConditionSet(symbol, Eq(f, 0), S.Complexes) if gen != symbol: y = Dummy('y') lhs, rhs_s = invert_func(gen, y, symbol) if lhs is symbol: result = Union(*[rhs_s.subs(y, s) for s in poly_solns]) else: result = ConditionSet(symbol, Eq(f, 0), S.Complexes) else: result = ConditionSet(symbol, Eq(f, 0), S.Complexes) if result is not None: if isinstance(result, FiniteSet): # this is to simplify solutions like -sqrt(-I) to sqrt(2)/2 # - sqrt(2)*I/2. We are not expanding for solution with free # variables because that makes the solution more complicated. For # example expand_complex(a) returns re(a) + I*im(a) if all([s.free_symbols == set() and not isinstance(s, RootOf) for s in result]): s = Dummy('s') result = imageset(Lambda(s, expand_complex(s)), result) return result else: return ConditionSet(symbol, Eq(f, 0), S.Complexes)
def test_nroots2(): p = Poly(x**5+3*x+1, x) roots = p.nroots(n=3) # The order of roots matters. The roots are ordered by their real # components (if they agree, then by their imaginary components). assert [str(r) for r in roots] == \ ['-0.839 - 0.944*I', '-0.839 + 0.944*I', '-0.332', '1.01 - 0.937*I', '1.01 + 0.937*I'] roots = p.nroots(n=5) assert [str(r) for r in roots] == \ ['-0.83907 - 0.94385*I', '-0.83907 + 0.94385*I', '-0.33199', '1.0051 - 0.93726*I', '1.0051 + 0.93726*I']
def ratint_ratpart(f, g, x): """Horowitz-Ostrogradsky algorithm. Given a field K and polynomials f and g in K[x], such that f and g are coprime and deg(f) < deg(g), returns fractions A and B in K(x), such that f/g = A' + B and B has square-free denominator. """ f = Poly(f, x) g = Poly(g, x) u, v, _ = g.cofactors(g.diff()) n = u.degree() m = v.degree() A_coeffs = [ Dummy('a' + str(n-i)) for i in xrange(0, n) ] B_coeffs = [ Dummy('b' + str(m-i)) for i in xrange(0, m) ] C_coeffs = A_coeffs + B_coeffs A = Poly(A_coeffs, x, domain=ZZ[C_coeffs]) B = Poly(B_coeffs, x, domain=ZZ[C_coeffs]) H = f - A.diff()*v + A*(u.diff()*v).quo(u) - B*u result = solve(H.coeffs(), C_coeffs) A = A.as_expr().subs(result) B = B.as_expr().subs(result) rat_part = cancel(A/u.as_expr(), x) log_part = cancel(B/v.as_expr(), x) return rat_part, log_part
def max_onepiece(x, f: Poly, g: Poly, l, u): roots = sorted(set((f - g).real_roots())) new_polynomial_pieces = [] new_bounds = [l] for r in roots: if l < r < u: m = (r + new_bounds[-1]) / 2 if f.subs(x, m) >= g.subs(x, m): new_polynomial_pieces.append(f) else: new_polynomial_pieces.append(g) new_bounds.append(r) new_bounds.append(u) return PiecewisePolynomial(new_polynomial_pieces, new_bounds)
def ratint_ratpart(f, g, x): """ Horowitz-Ostrogradsky algorithm. Given a field K and polynomials f and g in K[x], such that f and g are coprime and deg(f) < deg(g), returns fractions A and B in K(x), such that f/g = A' + B and B has square-free denominator. Examples ======== >>> from sympy.integrals.rationaltools import ratint_ratpart >>> from sympy.abc import x, y >>> from sympy import Poly >>> ratint_ratpart(Poly(1, x, domain='ZZ'), ... Poly(x + 1, x, domain='ZZ'), x) (0, 1/(x + 1)) >>> ratint_ratpart(Poly(1, x, domain='EX'), ... Poly(x**2 + y**2, x, domain='EX'), x) (0, 1/(x**2 + y**2)) >>> ratint_ratpart(Poly(36, x, domain='ZZ'), ... Poly(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2, x, domain='ZZ'), x) ((12*x + 6)/(x**2 - 1), 12/(x**2 - x - 2)) See Also ======== ratint, ratint_logpart """ from sympy import solve f = Poly(f, x) g = Poly(g, x) u, v, _ = g.cofactors(g.diff()) n = u.degree() m = v.degree() A_coeffs = [ Dummy('a' + str(n - i)) for i in range(0, n) ] B_coeffs = [ Dummy('b' + str(m - i)) for i in range(0, m) ] C_coeffs = A_coeffs + B_coeffs A = Poly(A_coeffs, x, domain=ZZ[C_coeffs]) B = Poly(B_coeffs, x, domain=ZZ[C_coeffs]) H = f - A.diff()*v + A*(u.diff()*v).quo(u) - B*u result = solve(H.coeffs(), C_coeffs) A = A.as_expr().subs(result) B = B.as_expr().subs(result) rat_part = cancel(A/u.as_expr(), x) log_part = cancel(B/v.as_expr(), x) return rat_part, log_part
def _is_function_class_equation(func_class, f, symbol): """ Tests whether the equation is an equation of the given function class. The given equation belongs to the given function class if it is comprised of functions of the function class which are multiplied by or added to expressions independent of the symbol. In addition, the arguments of all such functions must be linear in the symbol as well. Examples ======== >>> from sympy.solvers.solveset import _is_function_class_equation >>> from sympy import tan, sin, tanh, sinh, exp >>> from sympy.abc import x >>> from sympy.functions.elementary.trigonometric import (TrigonometricFunction, ... HyperbolicFunction) >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x) True >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x) False >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x) True >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x) True """ if f.is_Mul or f.is_Add: return all(_is_function_class_equation(func_class, arg, symbol) for arg in f.args) if f.is_Pow: if not f.exp.has(symbol): return _is_function_class_equation(func_class, f.base, symbol) else: return False if not f.has(symbol): return True if isinstance(f, func_class): try: g = Poly(f.args[0], symbol) return g.degree() <= 1 except PolynomialError: return False else: return False
def cancel(f, *symbols): """Cancel common factors in a given rational function. Given a quotient of polynomials, performing only gcd and quo operations in polynomial algebra, return rational function with numerator and denominator of minimal total degree in an expanded form. For all other kinds of expressions the input is returned in an unchanged form. Note however, that 'cancel' function can thread over sums and relational operators. Additionally you can specify a list of variables to perform cancelation more efficiently using only those symbols. >>> from sympy import * >>> x,y = symbols('xy') >>> cancel((x**2-1)/(x-1)) 1 + x >>> cancel((x**2-y**2)/(x-y), x) x + y >>> cancel((x**2-2)/(x+sqrt(2))) x - 2**(1/2) """ return Poly.cancel(f, *symbols)
def _is_negative(term): if getattr(term, 'is_number', False): return term < 0 elif isinstance(term, Pow): if term.args[1]%2==1: return _is_negative(term.args[0]) else: return False l = len(term.free_symbols) t = Poly(term).as_dict() if (all(c < 0 for c in t.values()) and all(i % 2 == 0 for d in t.keys() for i in d) and (0,)*l in t.keys()): return True return ask_is_negative(term)
def reduce_poly_inequalities(exprs, gen, assume=True, relational=True): """Reduce a system of polynomial inequalities with rational coefficients. """ exact = True polys = [] for _exprs in exprs: _polys = [] for expr in _exprs: if isinstance(expr, tuple): expr, rel = expr else: if expr.is_Relational: expr, rel = expr.lhs - expr.rhs, expr.rel_op else: expr, rel = expr, "==" poly = Poly(expr, gen) if not poly.get_domain().is_Exact: poly, exact = poly.to_exact(), False domain = poly.get_domain() if not (domain.is_ZZ or domain.is_QQ): raise NotImplementedError("inequality solving is not supported over %s" % domain) _polys.append((poly, rel)) polys.append(_polys) solution = solve_poly_inequalities(polys) if not exact: solution = solution.evalf() if not relational: return solution real = ask(Q.real(gen), assumptions=assume) if not real: result = And(solution.as_relational(re(gen)), Eq(im(gen), 0)) else: result = solution.as_relational(gen) return result
def weak_normalizer(a, d, DE, z=None): """ Weak normalization. Given a derivation D on k[t] and f == a/d in k(t), return q in k[t] such that f - Dq/q is weakly normalized with respect to t. f in k(t) is said to be "weakly normalized" with respect to t if residue_p(f) is not a positive integer for any normal irreducible p in k[t] such that f is in R_p (Definition 6.1.1). If f has an elementary integral, this is equivalent to no logarithm of integral(f) whose argument depends on t has a positive integer coefficient, where the arguments of the logarithms not in k(t) are in k[t]. Returns (q, f - Dq/q) """ z = z or Dummy('z') dn, ds = splitfactor(d, DE) # Compute d1, where dn == d1*d2**2*...*dn**n is a square-free # factorization of d. g = gcd(dn, dn.diff(DE.t)) d_sqf_part = dn.quo(g) d1 = d_sqf_part.quo(gcd(d_sqf_part, g)) a1, b = gcdex_diophantine(d.quo(d1).as_poly(DE.t), d1.as_poly(DE.t), a.as_poly(DE.t)) r = (a - Poly(z, DE.t)*derivation(d1, DE)).as_poly(DE.t).resultant( d1.as_poly(DE.t)) r = Poly(r, z) if not r.has(z): return (Poly(1, DE.t), (a, d)) N = [i for i in r.real_roots() if i in ZZ and i > 0] q = reduce(mul, [gcd(a - Poly(n, DE.t)*derivation(d1, DE), d1) for n in N], Poly(1, DE.t)) dq = derivation(q, DE) sn = q*a - d*dq sd = q*d sn, sd = sn.cancel(sd, include=True) return (q, (sn, sd))
def _solve_inequality(ie, s): """ A hacky replacement for solve, since the latter only works for univariate inequalities. """ from sympy import Poly if not ie.rel_op in ('>', '>=', '<', '<='): raise NotImplementedError expr = ie.lhs - ie.rhs p = Poly(expr, s) if p.degree() != 1: raise NotImplementedError('%s' % ie) a, b = p.all_coeffs() if a.is_positive: return ie.func(s, -b/a) elif a.is_negative: return ie.func(-b/a, s) else: raise NotImplementedError
def test_roots_quadratic(): assert roots_quadratic(Poly(2*x**2, x)) == [0, 0] assert roots_quadratic(Poly(2*x**2 + 3*x, x)) == [-Rational(3, 2), 0] assert roots_quadratic(Poly(2*x**2 + 3, x)) == [-I*sqrt(6)/2, I*sqrt(6)/2] assert roots_quadratic(Poly(2*x**2 + 4*x + 3, x)) == [-1 - I*sqrt(2)/2, -1 + I*sqrt(2)/2] f = x**2 + (2*a*e + 2*c*e)/(a - c)*x + (d - b + a*e**2 - c*e**2)/(a - c) assert roots_quadratic(Poly(f, x)) == \ [-e*(a + c)/(a - c) - sqrt((a*b + c*d - a*d - b*c + 4*a*c*e**2)/(a - c)**2), -e*(a + c)/(a - c) + sqrt((a*b + c*d - a*d - b*c + 4*a*c*e**2)/(a - c)**2)] # check for simplification f = Poly(y*x**2 - 2*x - 2*y, x) assert roots_quadratic(f) == \ [-sqrt(2*y**2 + 1)/y + 1/y, sqrt(2*y**2 + 1)/y + 1/y] f = Poly(x**2 + (-y**2 - 2)*x + y**2 + 1, x) assert roots_quadratic(f) == \ [y**2/2 - sqrt(y**4)/2 + 1, y**2/2 + sqrt(y**4)/2 + 1] f = Poly(sqrt(2)*x**2 - 1, x) r = roots_quadratic(f) assert r == _nsort(r) # issue 8255 f = Poly(-24*x**2 - 180*x + 264) assert [w.n(2) for w in f.all_roots(radicals=True)] == \ [w.n(2) for w in f.all_roots(radicals=False)] for _a, _b, _c in cartes((-2, 2), (-2, 2), (0, -1)): f = Poly(_a*x**2 + _b*x + _c) roots = roots_quadratic(f) assert roots == _nsort(roots)
def root_factors(f, *gens, **args): """Returns all factors of a univariate polynomial. Examples ======== >>> from sympy.abc import x, y >>> from sympy.polys.polyroots import root_factors >>> root_factors(x**2-y, x) [x - y**(1/2), x + y**(1/2)] """ F = Poly(f, *gens, **args) if not F.is_Poly: return [f] if F.is_multivariate: raise ValueError('multivariate polynomials not supported') x = F.gens[0] if 'multiple' in args: del args['multiple'] zeros = roots(F, **args) if not zeros: factors = [F] else: factors, N = [], 0 for r, n in zeros.iteritems(): factors, N = factors + [Poly(x-r, x)]*n, N + n if N < F.degree(): g = reduce(lambda p,q: p*q, factors) factors.append(f.exquo(g)) if not isinstance(f, Poly): return [ f.as_basic() for f in factors ] else: return factors
def _solve_inequality(ie, s): """ A hacky replacement for solve, since the latter only works for univariate inequalities. """ if not ie.rel_op in (">", ">=", "<", "<="): raise NotImplementedError expr = ie.lhs - ie.rhs try: p = Poly(expr, s) except PolynomialError: raise NotImplementedError if p.degree() != 1: raise NotImplementedError("%s" % ie) a, b = p.all_coeffs() if a.is_positive: return ie.func(s, -b / a) elif a.is_negative: return ie.func(-b / a, s) else: raise NotImplementedError
def _solve_inequality(ie, s): """ A hacky replacement for solve, since the latter only works for univariate inequalities. """ expr = ie.lhs - ie.rhs try: p = Poly(expr, s) if p.degree() != 1: raise NotImplementedError except (PolynomialError, NotImplementedError): try: return reduce_rational_inequalities([[ie]], s) except PolynomialError: return solve_univariate_inequality(ie, s) a, b = p.all_coeffs() if a.is_positive or ie.rel_op in ('!=', '=='): return ie.func(s, -b/a) elif a.is_negative: return ie.reversed.func(s, -b/a) else: raise NotImplementedError
def apart_undetermined_coeffs(P, Q): """Partial fractions via method of undetermined coefficients. """ X = numbered_symbols(cls=Dummy) partial, symbols = [], [] _, factors = Q.factor_list() for f, k in factors: n, q = f.degree(), Q for i in xrange(1, k + 1): coeffs, q = take(X, n), q.quo(f) partial.append((coeffs, q, f, i)) symbols.extend(coeffs) dom = Q.get_domain().inject(*symbols) F = Poly(0, Q.gen, domain=dom) for i, (coeffs, q, f, k) in enumerate(partial): h = Poly(coeffs, Q.gen, domain=dom) partial[i] = (h, f, k) q = q.set_domain(dom) F += h * q system, result = [], S(0) for (k,), coeff in F.terms(): system.append(coeff - P.nth(k)) from sympy.solvers import solve solution = solve(system, symbols) for h, f, k in partial: h = h.as_expr().subs(solution) result += h / f.as_expr() ** k return result
def simplify(expr): """Naively simplifies the given expression. Simplification is not a well defined term and the exact strategies this function tries can change in the future versions of SymPy. If your algorithm relies on "simplification" (whatever it is), try to determine what you need exactly - is it powsimp(), or radsimp(), or together(), or something else? And use this particular function directly, because those are well defined and thus your algorithm will be robust. """ expr = Poly.cancel(powsimp(expr)) return together(expr.expand())
def test_nroots2(): p = Poly(x ** 5 + 3 * x + 1, x) roots = p.nroots(n=3) # The order of roots matters. The roots are ordered by their real # components (if they agree, then by their imaginary components), # with real roots appearing first. assert [str(r) for r in roots] == [ "-0.332", "-0.839 - 0.944*I", "-0.839 + 0.944*I", "1.01 - 0.937*I", "1.01 + 0.937*I", ] roots = p.nroots(n=5) assert [str(r) for r in roots] == [ "-0.33199", "-0.83907 - 0.94385*I", "-0.83907 + 0.94385*I", "1.0051 - 0.93726*I", "1.0051 + 0.93726*I", ]
def _solve_inequality(ie, s, assume=True): """ A hacky replacement for solve, since the latter only works for univariate inequalities. """ if not ie.rel_op in ('>', '>=', '<', '<='): raise NotImplementedError expr = ie.lhs - ie.rhs try: p = Poly(expr, s) if p.degree() != 1: raise NotImplementedError except (PolynomialError, NotImplementedError): try: n, d = expr.as_numer_denom() return reduce_rational_inequalities([[ie]], s, assume=assume) except PolynomialError: return solve_univariate_inequality(ie, s, assume=assume) a, b = p.all_coeffs() if a.is_positive: return ie.func(s, -b/a) elif a.is_negative: return ie.func(-b/a, s) else: raise NotImplementedError
def simplify(expr): """Naively simplifies the given expression. Simplification is not a well defined term and the exact strategies this function tries can change in the future versions of SymPy. If your algorithm relies on "simplification" (whatever it is), try to determine what you need exactly - is it powsimp()? radsimp()? together()?, logcombine()?, or something else? And use this particular function directly, because those are well defined and thus your algorithm will be robust. """ expr = Poly.cancel(powsimp(expr)) expr = powsimp(together(expr.expand()), combine='exp', deep=True) if expr.could_extract_minus_sign(): n, d = expr.as_numer_denom() if d != 0: expr = -n/(-d) return expr
def hypersimp(f, k): """Given combinatorial term f(k) simplify its consecutive term ratio ie. f(k+1)/f(k). The input term can be composed of functions and integer sequences which have equivalent representation in terms of gamma special function. The algorithm performs three basic steps: (1) Rewrite all functions in terms of gamma, if possible. (2) Rewrite all occurrences of gamma in terms of products of gamma and rising factorial with integer, absolute constant exponent. (3) Perform simplification of nested fractions, powers and if the resulting expression is a quotient of polynomials, reduce their total degree. If f(k) is hypergeometric then as result we arrive with a quotient of polynomials of minimal degree. Otherwise None is returned. For more information on the implemented algorithm refer to: [1] W. Koepf, Algorithms for m-fold Hypergeometric Summation, Journal of Symbolic Computation (1995) 20, 399-417 """ f = sympify(f) g = f.subs(k, k+1) / f g = g.rewrite(gamma) g = expand_func(g) g = powsimp(g, deep=True, combine='exp') if g.is_rational_function(k): return Poly.cancel(g, k) else: return None
def splitter(p): for y in V: if not p.has_any_symbols(y): continue if derivation(y) is not S.Zero: c, q = p.as_poly(y).as_primitive() q = q.as_basic() h = gcd(q, derivation(q), y) s = quo(h, gcd(q, q.diff(y), y), y) c_split = splitter(c) if s.as_poly(y).degree == 0: return (c_split[0], q * c_split[1]) q_split = splitter(Poly.cancel((q, s), *V)) return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1]) else: return (S.One, p)
def test_issue_13340(): eq = Poly(y**3 + exp(x) * y + x, y, domain='EX') roots_d = roots(eq) assert len(roots_d) == 3
def dispersionset(p, q=None, *gens, **args): r"""Compute the *dispersion set* of two polynomials. For two polynomials `f(x)` and `g(x)` with `\deg f > 0` and `\deg g > 0` the dispersion set `\operatorname{J}(f, g)` is defined as: .. math:: \operatorname{J}(f, g) & := \{a \in \mathbb{N}_0 | \gcd(f(x), g(x+a)) \neq 1\} \\ & = \{a \in \mathbb{N}_0 | \deg \gcd(f(x), g(x+a)) \geq 1\} For a single polynomial one defines `\operatorname{J}(f) := \operatorname{J}(f, f)`. Examples ======== >>> from sympy import poly >>> from sympy.polys.dispersion import dispersion, dispersionset >>> from sympy.abc import x Dispersion set and dispersion of a simple polynomial: >>> fp = poly((x - 3)*(x + 3), x) >>> sorted(dispersionset(fp)) [0, 6] >>> dispersion(fp) 6 Note that the definition of the dispersion is not symmetric: >>> fp = poly(x**4 - 3*x**2 + 1, x) >>> gp = fp.shift(-3) >>> sorted(dispersionset(fp, gp)) [2, 3, 4] >>> dispersion(fp, gp) 4 >>> sorted(dispersionset(gp, fp)) [] >>> dispersion(gp, fp) -oo Computing the dispersion also works over field extensions: >>> from sympy import sqrt >>> fp = poly(x**2 + sqrt(5)*x - 1, x, domain='QQ<sqrt(5)>') >>> gp = poly(x**2 + (2 + sqrt(5))*x + sqrt(5), x, domain='QQ<sqrt(5)>') >>> sorted(dispersionset(fp, gp)) [2] >>> sorted(dispersionset(gp, fp)) [1, 4] We can even perform the computations for polynomials having symbolic coefficients: >>> from sympy.abc import a >>> fp = poly(4*x**4 + (4*a + 8)*x**3 + (a**2 + 6*a + 4)*x**2 + (a**2 + 2*a)*x, x) >>> sorted(dispersionset(fp)) [0, 1] See Also ======== dispersion References ========== .. [1] [ManWright94]_ .. [2] [Koepf98]_ .. [3] [Abramov71]_ .. [4] [Man93]_ """ # Check for valid input same = False if q is not None else True if same: q = p p = Poly(p, *gens, **args) q = Poly(q, *gens, **args) if not p.is_univariate or not q.is_univariate: raise ValueError("Polynomials need to be univariate") # The generator if not p.gen == q.gen: raise ValueError("Polynomials must have the same generator") gen = p.gen # We define the dispersion of constant polynomials to be zero if p.degree() < 1 or q.degree() < 1: return {0} # Factor p and q over the rationals fp = p.factor_list() fq = q.factor_list() if not same else fp # Iterate over all pairs of factors J = set() for s, unused in fp[1]: for t, unused in fq[1]: m = s.degree() n = t.degree() if n != m: continue an = s.LC() bn = t.LC() if not (an - bn).is_zero: continue # Note that the roles of `s` and `t` below are switched # w.r.t. the original paper. This is for consistency # with the description in the book of W. Koepf. anm1 = s.coeff_monomial(gen**(m - 1)) bnm1 = t.coeff_monomial(gen**(n - 1)) alpha = (anm1 - bnm1) / S(n * bn) if not alpha.is_integer: continue if alpha < 0 or alpha in J: continue if n > 1 and not (s - t.shift(alpha)).is_zero: continue J.add(alpha) return J
def _get_euler_characteristic_eq_sols(eq, func, match_obj): r""" Returns the solution of homogeneous part of the linear euler ODE and the list of roots of characteristic equation. The parameter ``match_obj`` is a dict of order:coeff terms, where order is the order of the derivative on each term, and coeff is the coefficient of that derivative. """ x = func.args[0] f = func.func # First, set up characteristic equation. chareq, symbol = S.Zero, Dummy('x') for i in match_obj: if i >= 0: chareq += (match_obj[i] * diff(x**symbol, x, i) * x**-symbol).expand() chareq = Poly(chareq, symbol) chareqroots = [rootof(chareq, k) for k in range(chareq.degree())] collectterms = [] # A generator of constants constants = list(get_numbered_constants(eq, num=chareq.degree() * 2)) constants.reverse() # Create a dict root: multiplicity or charroots charroots = defaultdict(int) for root in chareqroots: charroots[root] += 1 gsol = S.Zero ln = log for root, multiplicity in charroots.items(): for i in range(multiplicity): if isinstance(root, RootOf): gsol += (x**root) * constants.pop() if multiplicity != 1: raise ValueError("Value should be 1") collectterms = [(0, root, 0)] + collectterms elif root.is_real: gsol += ln(x)**i * (x**root) * constants.pop() collectterms = [(i, root, 0)] + collectterms else: reroot = re(root) imroot = im(root) gsol += ln(x)**i * ( x**reroot) * (constants.pop() * sin(abs(imroot) * ln(x)) + constants.pop() * cos(imroot * ln(x))) collectterms = [(i, reroot, imroot)] + collectterms gsol = Eq(f(x), gsol) gensols = [] # Keep track of when to use sin or cos for nonzero imroot for i, reroot, imroot in collectterms: if imroot == 0: gensols.append(ln(x)**i * x**reroot) else: sin_form = ln(x)**i * x**reroot * sin(abs(imroot) * ln(x)) if sin_form in gensols: cos_form = ln(x)**i * x**reroot * cos(imroot * ln(x)) gensols.append(cos_form) else: gensols.append(sin_form) return gsol, gensols
def test_ExtensionElement(): A = FiniteExtension(Poly(x**2 + 1, x)) assert srepr(A.generator) == \ "ExtElem(DMP([1, 0], ZZ, ring=GlobalPolynomialRing(ZZ, Symbol('x'))), FiniteExtension(Poly(x**2 + 1, x, domain='ZZ')))"
def normal_lines(self, p, prec=None): """Normal lines between `p` and the ellipse. Parameters ========== p : Point Returns ======= normal_lines : list with 1, 2 or 4 Lines Examples ======== >>> from sympy import Line, Point, Ellipse >>> e = Ellipse((0, 0), 2, 3) >>> c = e.center >>> e.normal_lines(c + Point(1, 0)) [Line(Point(0, 0), Point(1, 0))] >>> e.normal_lines(c) [Line(Point(0, 0), Point(0, 1)), Line(Point(0, 0), Point(1, 0))] Off-axis points require the solution of a quartic equation. This often leads to very large expressions that may be of little practical use. An approximate solution of `prec` digits can be obtained by passing in the desired value: >>> e.normal_lines((3, 3), prec=2) [Line(Point(-38/47, -85/31), Point(9/47, -21/17)), Line(Point(19/13, -43/21), Point(32/13, -8/3))] Whereas the above solution has an operation count of 12, the exact solution has an operation count of 2020. """ p = Point(p) # XXX change True to something like self.angle == 0 if the arbitrarily # rotated ellipse is introduced. # https://github.com/sympy/sympy/issues/2815) if True: rv = [] if p.x == self.center.x: rv.append(Line(self.center, slope=oo)) if p.y == self.center.y: rv.append(Line(self.center, slope=0)) if rv: # at these special orientations of p either 1 or 2 normals # exist and we are done return rv # find the 4 normal points and construct lines through them with # the corresponding slope x, y = Dummy('x', real=True), Dummy('y', real=True) eq = self.equation(x, y) dydx = idiff(eq, y, x) norm = -1 / dydx slope = Line(p, (x, y)).slope seq = slope - norm points = [] if prec is not None: yis = solve(seq, y)[0] xeq = eq.subs(y, yis).as_numer_denom()[0].expand() try: iv = list(zip(*Poly(xeq).intervals()))[0] # bisection is safest here since other methods may miss root xsol = [ S(nroot(lambdify(x, xeq), i, solver="anderson")) for i in iv ] points = [ Point(i, solve(eq.subs(x, i), y)[0]).n(prec) for i in xsol ] except PolynomialError: pass if not points: points = [Point(*s) for s in solve((seq, eq), (x, y))] # complicated expressions may not be decidably real so evaluate to # check whether they are real or not points = [ i.n(prec) if prec is not None else i for i in points if all( j.n(2).is_real for j in i.args) ] slopes = [norm.subs(zip((x, y), pt.args)) for pt in points] if prec is not None: slopes = [ i.n(prec) if i not in (-oo, oo, zoo) else i for i in slopes ] return [Line(pt, slope=s) for pt, s in zip(points, slopes)]
def test_Pow_Expr_args(): x = Symbol('x') bases = [Basic(), Poly(x, x), FiniteSet(x)] for base in bases: with warns_deprecated_sympy(): Pow(base, S.One)
def ratint(f, x, **flags): """Performs indefinite integration of rational functions. Given a field :math:`K` and a rational function :math:`f = p/q`, where :math:`p` and :math:`q` are polynomials in :math:`K[x]`, returns a function :math:`g` such that :math:`f = g'`. >>> from sympy.integrals.rationaltools import ratint >>> from sympy.abc import x >>> ratint(36/(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2), x) (12*x + 6)/(x**2 - 1) + 4*log(x - 2) - 4*log(x + 1) References ========== .. [Bro05] M. Bronstein, Symbolic Integration I: Transcendental Functions, Second Edition, Springer-Verlag, 2005, pp. 35-70 See Also ======== sympy.integrals.integrals.Integral.doit ratint_logpart, ratint_ratpart """ if type(f) is not tuple: p, q = f.as_numer_denom() else: p, q = f p, q = Poly(p, x, composite=False, field=True), Poly(q, x, composite=False, field=True) coeff, p, q = p.cancel(q) poly, p = p.div(q) result = poly.integrate(x).as_expr() if p.is_zero: return coeff * result g, h = ratint_ratpart(p, q, x) P, Q = h.as_numer_denom() P = Poly(P, x) Q = Poly(Q, x) q, r = P.div(Q) result += g + q.integrate(x).as_expr() if not r.is_zero: symbol = flags.get('symbol', 't') if not isinstance(symbol, Symbol): t = Dummy(symbol) else: t = symbol.as_dummy() L = ratint_logpart(r, Q, x, t) real = flags.get('real') if real is None: if type(f) is not tuple: atoms = f.atoms() else: p, q = f atoms = p.atoms() | q.atoms() for elt in atoms - set([x]): if not elt.is_real: real = False break else: real = True eps = S(0) if not real: for h, q in L: eps += RootSum(q, Lambda(t, t * log(h.as_expr())), quadratic=True) else: for h, q in L: R = log_to_real(h, q, x, t) if R is not None: eps += R else: eps += RootSum(q, Lambda(t, t * log(h.as_expr())), quadratic=True) result += eps return coeff * result
def test_issue_21287(): assert not any( isinstance(i, Piecewise) for i in roots_quartic( Poly(x**4 - x**2 * (3 + 5 * I) + 2 * x * (-1 + I) - 1 + 3 * I, x)))
def test_roots_quadratic(): assert roots_quadratic(Poly(2 * x**2, x)) == [0, 0] assert roots_quadratic(Poly(2 * x**2 + 3 * x, x)) == [Rational(-3, 2), 0] assert roots_quadratic(Poly(2 * x**2 + 3, x)) == [-I * sqrt(6) / 2, I * sqrt(6) / 2] assert roots_quadratic( Poly(2 * x**2 + 4 * x + 3, x)) == [-1 - I * sqrt(2) / 2, -1 + I * sqrt(2) / 2] _check(Poly(2 * x**2 + 4 * x + 3, x).all_roots()) f = x**2 + (2 * a * e + 2 * c * e) / (a - c) * x + (d - b + a * e**2 - c * e**2) / (a - c) assert roots_quadratic(Poly(f, x)) == \ [-e*(a + c)/(a - c) - sqrt((a*b + c*d - a*d - b*c + 4*a*c*e**2))/(a - c), -e*(a + c)/(a - c) + sqrt((a*b + c*d - a*d - b*c + 4*a*c*e**2))/(a - c)] # check for simplification f = Poly(y * x**2 - 2 * x - 2 * y, x) assert roots_quadratic(f) == \ [-sqrt(2*y**2 + 1)/y + 1/y, sqrt(2*y**2 + 1)/y + 1/y] f = Poly(x**2 + (-y**2 - 2) * x + y**2 + 1, x) assert roots_quadratic(f) == \ [1,y**2 + 1] f = Poly(sqrt(2) * x**2 - 1, x) r = roots_quadratic(f) assert r == _nsort(r) # issue 8255 f = Poly(-24 * x**2 - 180 * x + 264) assert [w.n(2) for w in f.all_roots(radicals=True)] == \ [w.n(2) for w in f.all_roots(radicals=False)] for _a, _b, _c in cartes((-2, 2), (-2, 2), (0, -1)): f = Poly(_a * x**2 + _b * x + _c) roots = roots_quadratic(f) assert roots == _nsort(roots)
def test_roots_linear(): assert roots_linear(Poly(2 * x + 1, x)) == [Rational(-1, 2)]
def test_roots0(): assert roots(1, x) == {} assert roots(x, x) == {S.Zero: 1} assert roots(x**9, x) == {S.Zero: 9} assert roots(((x - 2) * (x + 3) * (x - 4)).expand(), x) == { -S(3): 1, S(2): 1, S(4): 1 } assert roots(2 * x + 1, x) == {Rational(-1, 2): 1} assert roots((2 * x + 1)**2, x) == {Rational(-1, 2): 2} assert roots((2 * x + 1)**5, x) == {Rational(-1, 2): 5} assert roots((2 * x + 1)**10, x) == {Rational(-1, 2): 10} assert roots(x**4 - 1, x) == {I: 1, S.One: 1, -S.One: 1, -I: 1} assert roots((x**4 - 1)**2, x) == {I: 2, S.One: 2, -S.One: 2, -I: 2} assert roots(((2 * x - 3)**2).expand(), x) == {Rational(3, 2): 2} assert roots(((2 * x + 3)**2).expand(), x) == {Rational(-3, 2): 2} assert roots(((2 * x - 3)**3).expand(), x) == {Rational(3, 2): 3} assert roots(((2 * x + 3)**3).expand(), x) == {Rational(-3, 2): 3} assert roots(((2 * x - 3)**5).expand(), x) == {Rational(3, 2): 5} assert roots(((2 * x + 3)**5).expand(), x) == {Rational(-3, 2): 5} assert roots(((a * x - b)**5).expand(), x) == {b / a: 5} assert roots(((a * x + b)**5).expand(), x) == {-b / a: 5} assert roots(x**2 + (-a - 1) * x + a, x) == {a: 1, S.One: 1} assert roots(x**4 - 2 * x**2 + 1, x) == {S.One: 2, S.NegativeOne: 2} assert roots(x**6 - 4*x**4 + 4*x**3 - x**2, x) == \ {S.One: 2, -1 - sqrt(2): 1, S.Zero: 2, -1 + sqrt(2): 1} assert roots(x**8 - 1, x) == { sqrt(2) / 2 + I * sqrt(2) / 2: 1, sqrt(2) / 2 - I * sqrt(2) / 2: 1, -sqrt(2) / 2 + I * sqrt(2) / 2: 1, -sqrt(2) / 2 - I * sqrt(2) / 2: 1, S.One: 1, -S.One: 1, I: 1, -I: 1 } f = -2016*x**2 - 5616*x**3 - 2056*x**4 + 3324*x**5 + 2176*x**6 - \ 224*x**7 - 384*x**8 - 64*x**9 assert roots(f) == { S.Zero: 2, -S(2): 2, S(2): 1, Rational(-7, 2): 1, Rational(-3, 2): 1, Rational(-1, 2): 1, Rational(3, 2): 1 } assert roots((a + b + c) * x - (a + b + c + d), x) == { (a + b + c + d) / (a + b + c): 1 } assert roots(x**3 + x**2 - x + 1, x, cubics=False) == {} assert roots(((x - 2) * (x + 3) * (x - 4)).expand(), x, cubics=False) == { -S(3): 1, S(2): 1, S(4): 1 } assert roots(((x - 2)*(x + 3)*(x - 4)*(x - 5)).expand(), x, cubics=False) == \ {-S(3): 1, S(2): 1, S(4): 1, S(5): 1} assert roots(x**3 + 2 * x**2 + 4 * x + 8, x) == { -S(2): 1, -2 * I: 1, 2 * I: 1 } assert roots(x**3 + 2*x**2 + 4*x + 8, x, cubics=True) == \ {-2*I: 1, 2*I: 1, -S(2): 1} assert roots((x**2 - x)*(x**3 + 2*x**2 + 4*x + 8), x ) == \ {S.One: 1, S.Zero: 1, -S(2): 1, -2*I: 1, 2*I: 1} r1_2, r1_3 = S.Half, Rational(1, 3) x0 = (3 * sqrt(33) + 19)**r1_3 x1 = 4 / x0 / 3 x2 = x0 / 3 x3 = sqrt(3) * I / 2 x4 = x3 - r1_2 x5 = -x3 - r1_2 assert roots(x**3 + x**2 - x + 1, x, cubics=True) == { -x1 - x2 - r1_3: 1, -x1 / x4 - x2 * x4 - r1_3: 1, -x1 / x5 - x2 * x5 - r1_3: 1, } f = (x**2 + 2 * x + 3).subs(x, 2 * x**2 + 3 * x).subs(x, 5 * x - 4) r13_20, r1_20 = [Rational(*r) for r in ((13, 20), (1, 20))] s2 = sqrt(2) assert roots(f, x) == { r13_20 + r1_20 * sqrt(1 - 8 * I * s2): 1, r13_20 - r1_20 * sqrt(1 - 8 * I * s2): 1, r13_20 + r1_20 * sqrt(1 + 8 * I * s2): 1, r13_20 - r1_20 * sqrt(1 + 8 * I * s2): 1, } f = x**4 + x**3 + x**2 + x + 1 r1_4, r1_8, r5_8 = [Rational(*r) for r in ((1, 4), (1, 8), (5, 8))] assert roots(f, x) == { -r1_4 + r1_4 * 5**r1_2 + I * (r5_8 + r1_8 * 5**r1_2)**r1_2: 1, -r1_4 + r1_4 * 5**r1_2 - I * (r5_8 + r1_8 * 5**r1_2)**r1_2: 1, -r1_4 - r1_4 * 5**r1_2 + I * (r5_8 - r1_8 * 5**r1_2)**r1_2: 1, -r1_4 - r1_4 * 5**r1_2 - I * (r5_8 - r1_8 * 5**r1_2)**r1_2: 1, } f = z**3 + (-2 - y) * z**2 + (1 + 2 * y - 2 * x**2) * z - y + 2 * x**2 assert roots(f, z) == { S.One: 1, S.Half + S.Half * y + S.Half * sqrt(1 - 2 * y + y**2 + 8 * x**2): 1, S.Half + S.Half * y - S.Half * sqrt(1 - 2 * y + y**2 + 8 * x**2): 1, } assert roots(a * b * c * x**3 + 2 * x**2 + 4 * x + 8, x, cubics=False) == {} assert roots(a * b * c * x**3 + 2 * x**2 + 4 * x + 8, x, cubics=True) != {} assert roots(x**4 - 1, x, filter='Z') == {S.One: 1, -S.One: 1} assert roots(x**4 - 1, x, filter='I') == {I: 1, -I: 1} assert roots((x - 1) * (x + 1), x) == {S.One: 1, -S.One: 1} assert roots((x - 1) * (x + 1), x, predicate=lambda r: r.is_positive) == { S.One: 1 } assert roots(x**4 - 1, x, filter='Z', multiple=True) == [-S.One, S.One] assert roots(x**4 - 1, x, filter='I', multiple=True) == [I, -I] ar, br = symbols('a, b', real=True) p = x**2 * (ar - br)**2 + 2 * x * (br - ar) + 1 assert roots(p, x, filter='R') == {1 / (ar - br): 2} assert roots(x**3, x, multiple=True) == [S.Zero, S.Zero, S.Zero] assert roots(1234, x, multiple=True) == [] f = x**6 - x**5 + x**4 - x**3 + x**2 - x + 1 assert roots(f) == { -I * sin(pi / 7) + cos(pi / 7): 1, -I * sin(pi * Rational(2, 7)) - cos(pi * Rational(2, 7)): 1, -I * sin(pi * Rational(3, 7)) + cos(pi * Rational(3, 7)): 1, I * sin(pi / 7) + cos(pi / 7): 1, I * sin(pi * Rational(2, 7)) - cos(pi * Rational(2, 7)): 1, I * sin(pi * Rational(3, 7)) + cos(pi * Rational(3, 7)): 1, } g = ((x**2 + 1) * f**2).expand() assert roots(g) == { -I * sin(pi / 7) + cos(pi / 7): 2, -I * sin(pi * Rational(2, 7)) - cos(pi * Rational(2, 7)): 2, -I * sin(pi * Rational(3, 7)) + cos(pi * Rational(3, 7)): 2, I * sin(pi / 7) + cos(pi / 7): 2, I * sin(pi * Rational(2, 7)) - cos(pi * Rational(2, 7)): 2, I * sin(pi * Rational(3, 7)) + cos(pi * Rational(3, 7)): 2, -I: 1, I: 1, } r = roots(x**3 + 40 * x + 64) real_root = [rx for rx in r if rx.is_real][0] cr = 108 + 6 * sqrt(1074) assert real_root == -2 * root(cr, 3) / 3 + 20 / root(cr, 3) eq = Poly((7 + 5 * sqrt(2)) * x**3 + (-6 - 4 * sqrt(2)) * x**2 + (-sqrt(2) - 1) * x + 2, x, domain='EX') assert roots(eq) == {-1 + sqrt(2): 1, -2 + 2 * sqrt(2): 1, -sqrt(2) + 1: 1} eq = Poly(41 * x**5 + 29 * sqrt(2) * x**5 - 153 * x**4 - 108 * sqrt(2) * x**4 + 175 * x**3 + 125 * sqrt(2) * x**3 - 45 * x**2 - 30 * sqrt(2) * x**2 - 26 * sqrt(2) * x - 26 * x + 24, x, domain='EX') assert roots(eq) == { -sqrt(2) + 1: 1, -2 + 2 * sqrt(2): 1, -1 + sqrt(2): 1, -4 + 4 * sqrt(2): 1, -3 + 3 * sqrt(2): 1 } eq = Poly(x**3 - 2 * x**2 + 6 * sqrt(2) * x**2 - 8 * sqrt(2) * x + 23 * x - 14 + 14 * sqrt(2), x, domain='EX') assert roots(eq) == { -2 * sqrt(2) + 2: 1, -2 * sqrt(2) + 1: 1, -2 * sqrt(2) - 1: 1 } assert roots(Poly((x + sqrt(2))**3 - 7, x, domain='EX')) == \ {-sqrt(2) - root(7, 3)/2 - sqrt(3)*root(7, 3)*I/2: 1, -sqrt(2) - root(7, 3)/2 + sqrt(3)*root(7, 3)*I/2: 1, -sqrt(2) + root(7, 3): 1}
def test_roots_preprocessing(): f = a * y * x**2 + y - b coeff, poly = preprocess_roots(Poly(f, x)) assert coeff == 1 assert poly == Poly(a * y * x**2 + y - b, x) f = c**3 * x**3 + c**2 * x**2 + c * x + a coeff, poly = preprocess_roots(Poly(f, x)) assert coeff == 1 / c assert poly == Poly(x**3 + x**2 + x + a, x) f = c**3 * x**3 + c**2 * x**2 + a coeff, poly = preprocess_roots(Poly(f, x)) assert coeff == 1 / c assert poly == Poly(x**3 + x**2 + a, x) f = c**3 * x**3 + c * x + a coeff, poly = preprocess_roots(Poly(f, x)) assert coeff == 1 / c assert poly == Poly(x**3 + x + a, x) f = c**3 * x**3 + a coeff, poly = preprocess_roots(Poly(f, x)) assert coeff == 1 / c assert poly == Poly(x**3 + a, x) E, F, J, L = symbols("E,F,J,L") f = -21601054687500000000*E**8*J**8/L**16 + \ 508232812500000000*F*x*E**7*J**7/L**14 - \ 4269543750000000*E**6*F**2*J**6*x**2/L**12 + \ 16194716250000*E**5*F**3*J**5*x**3/L**10 - \ 27633173750*E**4*F**4*J**4*x**4/L**8 + \ 14840215*E**3*F**5*J**3*x**5/L**6 + \ 54794*E**2*F**6*J**2*x**6/(5*L**4) - \ 1153*E*J*F**7*x**7/(80*L**2) + \ 633*F**8*x**8/160000 coeff, poly = preprocess_roots(Poly(f, x)) assert coeff == 20 * E * J / (F * L**2) assert poly == 633*x**8 - 115300*x**7 + 4383520*x**6 + 296804300*x**5 - 27633173750*x**4 + \ 809735812500*x**3 - 10673859375000*x**2 + 63529101562500*x - 135006591796875 f = Poly(-y**2 + x**2 * exp(x), y, domain=ZZ[x, exp(x)]) g = Poly(-y**2 + exp(x), y, domain=ZZ[exp(x)]) assert preprocess_roots(f) == (x, g)
def test_roots_quartic(): assert roots_quartic(Poly(x**4, x)) == [0, 0, 0, 0] assert roots_quartic(Poly(x**4 + x**3, x)) in [[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]] assert roots_quartic(Poly(x**4 - x**3, x)) in [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] lhs = roots_quartic(Poly(x**4 + x, x)) rhs = [S.Half + I * sqrt(3) / 2, S.Half - I * sqrt(3) / 2, S.Zero, -S.One] assert sorted(lhs, key=hash) == sorted(rhs, key=hash) # test of all branches of roots quartic for i, (a, b, c, d) in enumerate([(1, 2, 3, 0), (3, -7, -9, 9), (1, 2, 3, 4), (1, 2, 3, 4), (-7, -3, 3, -6), (-3, 5, -6, -4), (6, -5, -10, -3)]): if i == 2: c = -a * (a**2 / S(8) - b / S(2)) elif i == 3: d = a * (a * (a**2 * Rational(3, 256) - b / S(16)) + c / S(4)) eq = x**4 + a * x**3 + b * x**2 + c * x + d ans = roots_quartic(Poly(eq, x)) assert all(eq.subs(x, ai).n(chop=True) == 0 for ai in ans) # not all symbolic quartics are unresolvable eq = Poly(q * x + q / 4 + x**4 + x**3 + 2 * x**2 - Rational(1, 3), x) sol = roots_quartic(eq) assert all(verify_numerically(eq.subs(x, i), 0) for i in sol) z = symbols('z', negative=True) eq = x**4 + 2 * x**3 + 3 * x**2 + x * (z + 11) + 5 zans = roots_quartic(Poly(eq, x)) assert all( [verify_numerically(eq.subs(((x, i), (z, -1))), 0) for i in zans]) # but some are (see also issue 4989) # it's ok if the solution is not Piecewise, but the tests below should pass eq = Poly(y * x**4 + x**3 - x + z, x) ans = roots_quartic(eq) assert all(type(i) == Piecewise for i in ans) reps = ( dict(y=Rational(-1, 3), z=Rational(-1, 4)), # 4 real dict(y=Rational(-1, 3), z=Rational(-1, 2)), # 2 real dict(y=Rational(-1, 3), z=-2)) # 0 real for rep in reps: sol = roots_quartic(Poly(eq.subs(rep), x)) assert all( [verify_numerically(w.subs(rep) - s, 0) for w, s in zip(ans, sol)])
def test_roots_composite(): assert len(roots(Poly(y**3 + y**2 * sqrt(x) + y + x, y, composite=True))) == 3
def ratint_logpart(f, g, x, t=None): """ Lazard-Rioboo-Trager algorithm. Given a field K and polynomials f and g in K[x], such that f and g are coprime, deg(f) < deg(g) and g is square-free, returns a list of tuples (s_i, q_i) of polynomials, for i = 1..n, such that s_i in K[t, x] and q_i in K[t], and: ___ ___ d f d \ ` \ ` -- - = -- ) ) a log(s_i(a, x)) dx g dx /__, /__, i=1..n a | q_i(a) = 0 Examples ======== >>> from sympy.integrals.rationaltools import ratint_logpart >>> from sympy.abc import x >>> from sympy import Poly >>> ratint_logpart(Poly(1, x, domain='ZZ'), ... Poly(x**2 + x + 1, x, domain='ZZ'), x) [(Poly(x + 3*_t/2 + 1/2, x, domain='QQ[_t]'), ...Poly(3*_t**2 + 1, _t, domain='ZZ'))] >>> ratint_logpart(Poly(12, x, domain='ZZ'), ... Poly(x**2 - x - 2, x, domain='ZZ'), x) [(Poly(x - 3*_t/8 - 1/2, x, domain='QQ[_t]'), ...Poly(-_t**2 + 16, _t, domain='ZZ'))] See Also ======== ratint, ratint_ratpart """ f, g = Poly(f, x), Poly(g, x) t = t or Dummy('t') a, b = g, f - g.diff() * Poly(t, x) res, R = resultant(a, b, includePRS=True) res = Poly(res, t, composite=False) assert res, "BUG: resultant(%s, %s) can't be zero" % (a, b) R_map, H = {}, [] for r in R: R_map[r.degree()] = r def _include_sign(c, sqf): if (c < 0) == True: h, k = sqf[0] sqf[0] = h * c, k C, res_sqf = res.sqf_list() _include_sign(C, res_sqf) for q, i in res_sqf: _, q = q.primitive() if g.degree() == i: H.append((g, q)) else: h = R_map[i] h_lc = Poly(h.LC(), t, field=True) c, h_lc_sqf = h_lc.sqf_list(all=True) _include_sign(c, h_lc_sqf) for a, j in h_lc_sqf: h = h.quo(Poly(a.gcd(q)**j, x)) inv, coeffs = h_lc.invert(q), [S(1)] for coeff in h.coeffs()[1:]: T = (inv * coeff).rem(q) coeffs.append(T.as_expr()) h = Poly(dict(list(zip(h.monoms(), coeffs))), x) H.append((h, q)) return H
def test_Poly(): assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')" assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')" assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')" assert str(Poly(2 * x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')" assert str(Poly(2 * x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')" assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')" assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')" assert str(Poly(-2 * x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')" assert str(Poly(-2 * x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')" assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')" assert str(Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')" assert str(Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')" assert str(Poly(x**2 + I * x, x)) == "Poly(x**2 + I*x, x, domain='EX')" assert str(Poly(x**2 - I * x, x)) == "Poly(x**2 - I*x, x, domain='EX')" assert str(Poly(-x * y * z + x * y - 1, x, y, z)) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')" assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \ "Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')" assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)" assert str(Poly(2 * x**2 + 3 * x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)"
def _solve_inequality(ie, s, linear=False): """Return the inequality with s isolated on the left, if possible. If the relationship is non-linear, a solution involving And or Or may be returned. False or True are returned if the relationship is never True or always True, respectively. If `linear` is True (default is False) an `s`-dependent expression will be isolated on the left, if possible but it will not be solved for `s` unless the expression is linear in `s`. Furthermore, only "safe" operations which don't change the sense of the relationship are applied: no division by an unsigned value is attempted unless the relationship involves Eq or Ne and no division by a value not known to be nonzero is ever attempted. Examples ======== >>> from sympy import Eq, Symbol >>> from sympy.solvers.inequalities import _solve_inequality as f >>> from sympy.abc import x, y For linear expressions, the symbol can be isolated: >>> f(x - 2 < 0, x) x < 2 >>> f(-x - 6 < x, x) x > -3 Sometimes nonlinear relationships will be False >>> f(x**2 + 4 < 0, x) False Or they may involve more than one region of values: >>> f(x**2 - 4 < 0, x) (-2 < x) & (x < 2) To restrict the solution to a relational, set linear=True and only the x-dependent portion will be isolated on the left: >>> f(x**2 - 4 < 0, x, linear=True) x**2 < 4 Division of only nonzero quantities is allowed, so x cannot be isolated by dividing by y: >>> y.is_nonzero is None # it is unknown whether it is 0 or not True >>> f(x*y < 1, x) x*y < 1 And while an equality (or inequality) still holds after dividing by a non-zero quantity >>> nz = Symbol('nz', nonzero=True) >>> f(Eq(x*nz, 1), x) Eq(x, 1/nz) the sign must be known for other inequalities involving > or <: >>> f(x*nz <= 1, x) nz*x <= 1 >>> p = Symbol('p', positive=True) >>> f(x*p <= 1, x) x <= 1/p When there are denominators in the original expression that are removed by expansion, conditions for them will be returned as part of the result: >>> f(x < x*(2/x - 1), x) (x < 1) & Ne(x, 0) """ from sympy.solvers.solvers import denoms if s not in ie.free_symbols: return ie if ie.rhs == s: ie = ie.reversed if ie.lhs == s and s not in ie.rhs.free_symbols: return ie def classify(ie, s, i): # return True or False if ie evaluates when substituting s with # i else None (if unevaluated) or NaN (when there is an error # in evaluating) try: v = ie.subs(s, i) if v is S.NaN: return v elif v not in (True, False): return return v except TypeError: return S.NaN rv = None oo = S.Infinity expr = ie.lhs - ie.rhs try: p = Poly(expr, s) if p.degree() == 0: rv = ie.func(p.as_expr(), 0) elif not linear and p.degree() > 1: # handle in except clause raise NotImplementedError except (PolynomialError, NotImplementedError): if not linear: try: rv = reduce_rational_inequalities([[ie]], s) except PolynomialError: rv = solve_univariate_inequality(ie, s) # remove restrictions wrt +/-oo that may have been # applied when using sets to simplify the relationship okoo = classify(ie, s, oo) if okoo is S.true and classify(rv, s, oo) is S.false: rv = rv.subs(s < oo, True) oknoo = classify(ie, s, -oo) if (oknoo is S.true and classify(rv, s, -oo) is S.false): rv = rv.subs(-oo < s, True) rv = rv.subs(s > -oo, True) if rv is S.true: rv = (s <= oo) if okoo is S.true else (s < oo) if oknoo is not S.true: rv = And(-oo < s, rv) else: p = Poly(expr) conds = [] if rv is None: e = p.as_expr() # this is in expanded form # Do a safe inversion of e, moving non-s terms # to the rhs and dividing by a nonzero factor if # the relational is Eq/Ne; for other relationals # the sign must also be positive or negative rhs = 0 b, ax = e.as_independent(s, as_Add=True) e -= b rhs -= b ef = factor_terms(e) a, e = ef.as_independent(s, as_Add=False) if (a.is_zero != False or # don't divide by potential 0 a.is_negative == a.is_positive is None and # if sign is not known then ie.rel_op not in ('!=', '==')): # reject if not Eq/Ne e = ef a = S.One rhs /= a if a.is_positive: rv = ie.func(e, rhs) else: rv = ie.reversed.func(e, rhs) # return conditions under which the value is # valid, too. beginning_denoms = denoms(ie.lhs) | denoms(ie.rhs) current_denoms = denoms(rv) for d in beginning_denoms - current_denoms: c = _solve_inequality(Eq(d, 0), s, linear=linear) if isinstance(c, Eq) and c.lhs == s: if classify(rv, s, c.rhs) is S.true: # rv is permitting this value but it shouldn't conds.append(~c) for i in (-oo, oo): if (classify(rv, s, i) is S.true and classify(ie, s, i) is not S.true): conds.append(s < i if i is oo else i < s) conds.append(rv) return And(*conds)
def ratint_ratpart(f, g, x): """ Horowitz-Ostrogradsky algorithm. Given a field K and polynomials f and g in K[x], such that f and g are coprime and deg(f) < deg(g), returns fractions A and B in K(x), such that f/g = A' + B and B has square-free denominator. Examples ======== >>> from sympy.integrals.rationaltools import ratint_ratpart >>> from sympy.abc import x, y >>> from sympy import Poly >>> ratint_ratpart(Poly(1, x, domain='ZZ'), ... Poly(x + 1, x, domain='ZZ'), x) (0, 1/(x + 1)) >>> ratint_ratpart(Poly(1, x, domain='EX'), ... Poly(x**2 + y**2, x, domain='EX'), x) (0, 1/(x**2 + y**2)) >>> ratint_ratpart(Poly(36, x, domain='ZZ'), ... Poly(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2, x, domain='ZZ'), x) ((12*x + 6)/(x**2 - 1), 12/(x**2 - x - 2)) See Also ======== ratint, ratint_logpart """ from sympy import solve f = Poly(f, x) g = Poly(g, x) u, v, _ = g.cofactors(g.diff()) n = u.degree() m = v.degree() A_coeffs = [Dummy('a' + str(n - i)) for i in range(0, n)] B_coeffs = [Dummy('b' + str(m - i)) for i in range(0, m)] C_coeffs = A_coeffs + B_coeffs A = Poly(A_coeffs, x, domain=ZZ[C_coeffs]) B = Poly(B_coeffs, x, domain=ZZ[C_coeffs]) H = f - A.diff() * v + A * (u.diff() * v).quo(u) - B * u result = solve(H.coeffs(), C_coeffs) A = A.as_expr().subs(result) B = B.as_expr().subs(result) rat_part = cancel(A / u.as_expr(), x) log_part = cancel(B / v.as_expr(), x) return rat_part, log_part
gcd(a, t**2 + 1) == 1), return the quadruplet (A, B, C, 1/h) such that A, B, C, h in k[t] and for any solution q in k<t> of a*Dq + b*q == c, r = qh in k[t] satisfies A*Dr + B*r == C. For ``case == 'primitive'``, k<t> == k[t], so it returns (a, b, c, 1) in this case. This constitutes step 2 of the outline given in the rde.py docstring. """ # TODO: finish writing this and write tests if case == 'auto': case = DE.case if case == 'exp': p = Poly(DE.t, DE.t) elif case == 'tan': p = Poly(DE.t**2 + 1, DE.t) elif case in ('primitive', 'base'): B = ba.to_field().quo(bd) C = ca.to_field().quo(cd) return (a, B, C, Poly(1, DE.t)) else: raise ValueError("case must be one of {'exp', 'tan', 'primitive', " "'base'}, not %s." % case) nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t) nc = order_at(ca, p, DE.t) - order_at(cd, p, DE.t) n = min(0, nc - min(0, nb)) if not nb:
def test_Poly(): assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')" assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')" assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')" assert str(Poly(2 * x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')" assert str(Poly(2 * x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')" assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')" assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')" assert str(Poly(-2 * x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')" assert str(Poly(-2 * x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')" assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')" assert str(Poly(2 * x + x**5, x)) == "Poly(x**5 + 2*x, x, domain='ZZ')" assert str(Poly(3**(2 * x), 3**x)) == "Poly((3**x)**2, 3**x, domain='ZZ')" assert str(Poly((x**2)**x)) == "Poly(((x**2)**x), (x**2)**x, domain='ZZ')" assert str(Poly((x + y)**3, (x + y), expand=False)) == "Poly((x + y)**3, x + y, domain='ZZ')" assert str(Poly((x - 1)**2, (x - 1), expand=False)) == "Poly((x - 1)**2, x - 1, domain='ZZ')" assert str(Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')" assert str(Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')" assert str(Poly(x**2 + I * x, x)) == "Poly(x**2 + I*x, x, domain='EX')" assert str(Poly(x**2 - I * x, x)) == "Poly(x**2 - I*x, x, domain='EX')" assert str(Poly(-x * y * z + x * y - 1, x, y, z)) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')" assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \ "Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')" assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)" assert str(Poly(2 * x**2 + 3 * x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)"
def gosper_normal(f, g, n, polys=True): r""" Compute the Gosper's normal form of ``f`` and ``g``. Given relatively prime univariate polynomials ``f`` and ``g``, rewrite their quotient to a normal form defined as follows: .. math:: \frac{f(n)}{g(n)} = Z \cdot \frac{A(n) C(n+1)}{B(n) C(n)} where ``Z`` is an arbitrary constant and ``A``, ``B``, ``C`` are monic polynomials in ``n`` with the following properties: 1. `\gcd(A(n), B(n+h)) = 1 \forall h \in \mathbb{N}` 2. `\gcd(B(n), C(n+1)) = 1` 3. `\gcd(A(n), C(n)) = 1` This normal form, or rational factorization in other words, is a crucial step in Gosper's algorithm and in solving of difference equations. It can be also used to decide if two hypergeometric terms are similar or not. This procedure will return a tuple containing elements of this factorization in the form ``(Z*A, B, C)``. Examples ======== >>> from sympy.concrete.gosper import gosper_normal >>> from sympy.abc import n >>> gosper_normal(4*n+5, 2*(4*n+1)*(2*n+3), n, polys=False) (1/4, n + 3/2, n + 1/4) """ (p, q), opt = parallel_poly_from_expr((f, g), n, field=True, extension=True) a, A = p.LC(), p.monic() b, B = q.LC(), q.monic() C, Z = A.one, a / b h = Dummy('h') D = Poly(n + h, n, h, domain=opt.domain) R = A.resultant(B.compose(D)) roots = set(R.ground_roots().keys()) for r in set(roots): if not r.is_Integer or r < 0: roots.remove(r) for i in sorted(roots): d = A.gcd(B.shift(+i)) A = A.quo(d) B = B.quo(d.shift(-i)) for j in xrange(1, i + 1): C *= d.shift(-j) A = A.mul_ground(Z) if not polys: A = A.as_expr() B = B.as_expr() C = C.as_expr() return A, B, C
def apart_list_full_decomposition(P, Q, dummygen): """ Bronstein's full partial fraction decomposition algorithm. Given a univariate rational function ``f``, performing only GCD operations over the algebraic closure of the initial ground domain of definition, compute full partial fraction decomposition with fractions having linear denominators. Note that no factorization of the initial denominator of ``f`` is performed. The final decomposition is formed in terms of a sum of :class:`RootSum` instances. References ========== 1. [Bronstein93]_ """ f, x, U = P / Q, P.gen, [] u = Function('u')(x) a = Dummy('a') partial = [] for d, n in Q.sqf_list_include(all=True): b = d.as_expr() U += [u.diff(x, n - 1)] h = cancel(f * b**n) / u**n H, subs = [h], [] for j in range(1, n): H += [H[-1].diff(x) / j] for j in range(1, n + 1): subs += [(U[j - 1], b.diff(x, j) / j)] for j in range(0, n): P, Q = cancel(H[j]).as_numer_denom() for i in range(0, j + 1): P = P.subs(*subs[j - i]) Q = Q.subs(*subs[0]) P = Poly(P, x) Q = Poly(Q, x) G = P.gcd(d) D = d.quo(G) B, g = Q.half_gcdex(D) b = (P * B.quo(g)).rem(D) Dw = D.subs(x, next(dummygen)) numer = Lambda(a, b.as_expr().subs(x, a)) denom = Lambda(a, (x - a)) exponent = n - j partial.append((Dw, numer, denom, exponent)) return partial
def gosper_term(f, n): r""" Compute Gosper's hypergeometric term for ``f``. Suppose ``f`` is a hypergeometric term such that: .. math:: s_n = \sum_{k=0}^{n-1} f_k and `f_k` doesn't depend on `n`. Returns a hypergeometric term `g_n` such that `g_{n+1} - g_n = f_n`. Examples ======== >>> from sympy.concrete.gosper import gosper_term >>> from sympy.functions import factorial >>> from sympy.abc import n >>> gosper_term((4*n + 1)*factorial(n)/factorial(2*n + 1), n) (-n - 1/2)/(n + 1/4) """ r = hypersimp(f, n) if r is None: return None # 'f' is *not* a hypergeometric term p, q = r.as_numer_denom() A, B, C = gosper_normal(p, q, n) B = B.shift(-1) N = S(A.degree()) M = S(B.degree()) K = S(C.degree()) if (N != M) or (A.LC() != B.LC()): D = set([K - max(N, M)]) elif not N: D = set([K - N + 1, S(0)]) else: D = set([K - N + 1, (B.nth(N - 1) - A.nth(N - 1)) / A.LC()]) for d in set(D): if not d.is_Integer or d < 0: D.remove(d) if not D: return None # 'f(n)' is *not* Gosper-summable d = max(D) coeffs = symbols('c:%s' % (d + 1), cls=Dummy) domain = A.get_domain().inject(*coeffs) x = Poly(coeffs, n, domain=domain) H = A * x.shift(1) - B * x - C solution = solve(H.coeffs(), coeffs) if solution is None: return None # 'f(n)' is *not* Gosper-summable x = x.as_expr().subs(solution) for coeff in coeffs: if coeff not in solution: x = x.subs(coeff, 0) if x is S.Zero: return None # 'f(n)' is *not* Gosper-summable else: return B.as_expr() * x / C.as_expr()
def test_FiniteExtension(): assert srepr(FiniteExtension(Poly(x**2 + 1, x))) == \ "FiniteExtension(Poly(x**2 + 1, x, domain='ZZ'))"
def trigsimp_groebner(expr, hints=[], quick=False, order="grlex", polynomial=False): """ Simplify trigonometric expressions using a groebner basis algorithm. This routine takes a fraction involving trigonometric or hyperbolic expressions, and tries to simplify it. The primary metric is the total degree. Some attempts are made to choose the simplest possible expression of the minimal degree, but this is non-rigorous, and also very slow (see the ``quick=True`` option). If ``polynomial`` is set to True, instead of simplifying numerator and denominator together, this function just brings numerator and denominator into a canonical form. This is much faster, but has potentially worse #1lab_results. However, if the input is a polynomial, then the result is guaranteed to be an equivalent polynomial of minimal degree. The most important option is hints. Its entries can be any of the following: - a natural number - a function - an iterable of the form (func, var1, var2, ...) - anything else, interpreted as a generator A number is used to indicate that the search space should be increased. A function is used to indicate that said function is likely to occur in a simplified expression. An iterable is used indicate that func(var1 + var2 + ...) is likely to occur in a simplified . An additional generator also indicates that it is likely to occur. (See examples below). This routine carries out various computationally intensive algorithms. The option ``quick=True`` can be used to suppress one particularly slow step (at the expense of potentially more complicated #1lab_results, but never at the expense of increased total degree). Examples ======== >>> from sympy.abc import x, y >>> from sympy import sin, tan, cos, sinh, cosh, tanh >>> from sympy.simplify.trigsimp import trigsimp_groebner Suppose you want to simplify ``sin(x)*cos(x)``. Naively, nothing happens: >>> ex = sin(x)*cos(x) >>> trigsimp_groebner(ex) sin(x)*cos(x) This is because ``trigsimp_groebner`` only looks for a simplification involving just ``sin(x)`` and ``cos(x)``. You can tell it to also try ``2*x`` by passing ``hints=[2]``: >>> trigsimp_groebner(ex, hints=[2]) sin(2*x)/2 >>> trigsimp_groebner(sin(x)**2 - cos(x)**2, hints=[2]) -cos(2*x) Increasing the search space this way can quickly become expensive. A much faster way is to give a specific expression that is likely to occur: >>> trigsimp_groebner(ex, hints=[sin(2*x)]) sin(2*x)/2 Hyperbolic expressions are similarly supported: >>> trigsimp_groebner(sinh(2*x)/sinh(x)) 2*cosh(x) Note how no hints had to be passed, since the expression already involved ``2*x``. The tangent function is also supported. You can either pass ``tan`` in the hints, to indicate that than should be tried whenever cosine or sine are, or you can pass a specific generator: >>> trigsimp_groebner(sin(x)/cos(x), hints=[tan]) tan(x) >>> trigsimp_groebner(sinh(x)/cosh(x), hints=[tanh(x)]) tanh(x) Finally, you can use the iterable form to suggest that angle sum formulae should be tried: >>> ex = (tan(x) + tan(y))/(1 - tan(x)*tan(y)) >>> trigsimp_groebner(ex, hints=[(tan, x, y)]) tan(x + y) """ # TODO # - preprocess by replacing everything by funcs we can handle # - optionally use cot instead of tan # - more intelligent hinting. # For example, if the ideal is small, and we have sin(x), sin(y), # add sin(x + y) automatically... ? # - algebraic numbers ... # - expressions of lowest degree are not distinguished properly # e.g. 1 - sin(x)**2 # - we could try to order the generators intelligently, so as to influence # which monomials appear in the quotient basis # THEORY # ------ # Ratsimpmodprime above can be used to "simplify" a rational function # modulo a prime ideal. "Simplify" mainly means finding an equivalent # expression of lower total degree. # # We intend to use this to simplify trigonometric functions. To do that, # we need to decide (a) which ring to use, and (b) modulo which ideal to # simplify. In practice, (a) means settling on a list of "generators" # a, b, c, ..., such that the fraction we want to simplify is a rational # function in a, b, c, ..., with coefficients in ZZ (integers). # (2) means that we have to decide what relations to impose on the # generators. There are two practical problems: # (1) The ideal has to be *prime* (a technical term). # (2) The relations have to be polynomials in the generators. # # We typically have two kinds of generators: # - trigonometric expressions, like sin(x), cos(5*x), etc # - "everything else", like gamma(x), pi, etc. # # Since this function is trigsimp, we will concentrate on what to do with # trigonometric expressions. We can also simplify hyperbolic expressions, # but the extensions should be clear. # # One crucial point is that all *other* generators really should behave # like indeterminates. In particular if (say) "I" is one of them, then # in fact I**2 + 1 = 0 and we may and will compute non-sensical # expressions. However, we can work with a dummy and add the relation # I**2 + 1 = 0 to our ideal, then substitute back in the end. # # Now regarding trigonometric generators. We split them into groups, # according to the argument of the trigonometric functions. We want to # organise this in such a way that most trigonometric identities apply in # the same group. For example, given sin(x), cos(2*x) and cos(y), we would # group as [sin(x), cos(2*x)] and [cos(y)]. # # Our prime ideal will be built in three steps: # (1) For each group, compute a "geometrically prime" ideal of relations. # Geometrically prime means that it generates a prime ideal in # CC[gens], not just ZZ[gens]. # (2) Take the union of all the generators of the ideals for all groups. # By the geometric primality condition, this is still prime. # (3) Add further inter-group relations which preserve primality. # # Step (1) works as follows. We will isolate common factors in the # argument, so that all our generators are of the form sin(n*x), cos(n*x) # or tan(n*x), with n an integer. Suppose first there are no tan terms. # The ideal [sin(x)**2 + cos(x)**2 - 1] is geometrically prime, since # X**2 + Y**2 - 1 is irreducible over CC. # Now, if we have a generator sin(n*x), than we can, using trig identities, # express sin(n*x) as a polynomial in sin(x) and cos(x). We can add this # relation to the ideal, preserving geometric primality, since the quotient # ring is unchanged. # Thus we have treated all sin and cos terms. # For tan(n*x), we add a relation tan(n*x)*cos(n*x) - sin(n*x) = 0. # (This requires of course that we already have relations for cos(n*x) and # sin(n*x).) It is not obvious, but it seems that this preserves geometric # primality. # XXX A real proof would be nice. HELP! # Sketch that <S**2 + C**2 - 1, C*T - S> is a prime ideal of # CC[S, C, T]: # - it suffices to show that the projective closure in CP**3 is # irreducible # - using the half-angle substitutions, we can express sin(x), tan(x), # cos(x) as rational functions in tan(x/2) # - from this, we get a rational map from CP**1 to our curve # - this is a morphism, hence the curve is prime # # Step (2) is trivial. # # Step (3) works by adding selected relations of the form # sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is # preserved by the same argument as before. def parse_hints(hints): """Split hints into (n, funcs, iterables, gens).""" n = 1 funcs, iterables, gens = [], [], [] for e in hints: if isinstance(e, (int, Integer)): n = e elif isinstance(e, FunctionClass): funcs.append(e) elif iterable(e): iterables.append((e[0], e[1:])) # XXX sin(x+2y)? # Note: we go through polys so e.g. # sin(-x) -> -sin(x) -> sin(x) gens.extend(parallel_poly_from_expr( [e[0](x) for x in e[1:]] + [e[0](Add(*e[1:]))])[1].gens) else: gens.append(e) return n, funcs, iterables, gens def build_ideal(x, terms): """ Build generators for our ideal. Terms is an iterable with elements of the form (fn, coeff), indicating that we have a generator fn(coeff*x). If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed to appear in terms. Similarly for hyperbolic functions. For tan(n*x), sin(n*x) and cos(n*x) are guaranteed. """ gens = [] I = [] y = Dummy('y') for fn, coeff in terms: for c, s, t, rel in ( [cos, sin, tan, cos(x)**2 + sin(x)**2 - 1], [cosh, sinh, tanh, cosh(x)**2 - sinh(x)**2 - 1]): if coeff == 1 and fn in [c, s]: I.append(rel) elif fn == t: I.append(t(coeff*x)*c(coeff*x) - s(coeff*x)) elif fn in [c, s]: cn = fn(coeff*y).expand(trig=True).subs(y, x) I.append(fn(coeff*x) - cn) return list(set(I)) def analyse_gens(gens, hints): """ Analyse the generators ``gens``, using the hints ``hints``. The meaning of ``hints`` is described in the main docstring. Return a new list of generators, and also the ideal we should work with. """ # First parse the hints n, funcs, iterables, extragens = parse_hints(hints) debug('n=%s' % n, 'funcs:', funcs, 'iterables:', iterables, 'extragens:', extragens) # We just add the extragens to gens and analyse them as before gens = list(gens) gens.extend(extragens) # remove duplicates funcs = list(set(funcs)) iterables = list(set(iterables)) gens = list(set(gens)) # all the functions we can do anything with allfuncs = {sin, cos, tan, sinh, cosh, tanh} # sin(3*x) -> ((3, x), sin) trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens if g.func in allfuncs] # Our list of new generators - start with anything that we cannot # work with (i.e. is not a trigonometric term) freegens = [g for g in gens if g.func not in allfuncs] newgens = [] trigdict = {} for (coeff, var), fn in trigterms: trigdict.setdefault(var, []).append((coeff, fn)) res = [] # the ideal for key, val in trigdict.items(): # We have now assembeled a dictionary. Its keys are common # arguments in trigonometric expressions, and values are lists of # pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we # need to deal with fn(coeff*x0). We take the rational gcd of the # coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol", # all other arguments are integral multiples thereof. # We will build an ideal which works with sin(x), cos(x). # If hint tan is provided, also work with tan(x). Moreover, if # n > 1, also work with sin(k*x) for k <= n, and similarly for cos # (and tan if the hint is provided). Finally, any generators which # the ideal does not work with but we need to accomodate (either # because it was in expr or because it was provided as a hint) # we also build into the ideal. # This selection process is expressed in the list ``terms``. # build_ideal then generates the actual relations in our ideal, # from this list. fns = [x[1] for x in val] val = [x[0] for x in val] gcd = reduce(igcd, val) terms = [(fn, v/gcd) for (fn, v) in zip(fns, val)] fs = set(funcs + fns) for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]): if any(x in fs for x in (c, s, t)): fs.add(c) fs.add(s) for fn in fs: for k in range(1, n + 1): terms.append((fn, k)) extra = [] for fn, v in terms: if fn == tan: extra.append((sin, v)) extra.append((cos, v)) if fn in [sin, cos] and tan in fs: extra.append((tan, v)) if fn == tanh: extra.append((sinh, v)) extra.append((cosh, v)) if fn in [sinh, cosh] and tanh in fs: extra.append((tanh, v)) terms.extend(extra) x = gcd*Mul(*key) r = build_ideal(x, terms) res.extend(r) newgens.extend(set(fn(v*x) for fn, v in terms)) # Add generators for compound expressions from iterables for fn, args in iterables: if fn == tan: # Tan expressions are recovered from sin and cos. iterables.extend([(sin, args), (cos, args)]) elif fn == tanh: # Tanh expressions are recovered from sihn and cosh. iterables.extend([(sinh, args), (cosh, args)]) else: dummys = symbols('d:%i' % len(args), cls=Dummy) expr = fn( Add(*dummys)).expand(trig=True).subs(list(zip(dummys, args))) res.append(fn(Add(*args)) - expr) if myI in gens: res.append(myI**2 + 1) freegens.remove(myI) newgens.append(myI) return res, freegens, newgens myI = Dummy('I') expr = expr.subs(S.ImaginaryUnit, myI) subs = [(myI, S.ImaginaryUnit)] num, denom = cancel(expr).as_numer_denom() try: (pnum, pdenom), opt = parallel_poly_from_expr([num, denom]) except PolificationFailed: return expr debug('initial gens:', opt.gens) ideal, freegens, gens = analyse_gens(opt.gens, hints) debug('ideal:', ideal) debug('new gens:', gens, " -- len", len(gens)) debug('free gens:', freegens, " -- len", len(gens)) # NOTE we force the domain to be ZZ to stop polys from injecting generators # (which is usually a sign of a bug in the way we build the ideal) if not gens: return expr G = groebner(ideal, order=order, gens=gens, domain=ZZ) debug('groebner basis:', list(G), " -- len", len(G)) # If our fraction is a polynomial in the free generators, simplify all # coefficients separately: from sympy.simplify.ratsimp import ratsimpmodprime if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)): num = Poly(num, gens=gens+freegens).eject(*gens) res = [] for monom, coeff in num.terms(): ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens) # We compute the transitive closure of all generators that can # be reached from our generators through relations in the ideal. changed = True while changed: changed = False for p in ideal: p = Poly(p) if not ourgens.issuperset(p.gens) and \ not p.has_only_gens(*set(p.gens).difference(ourgens)): changed = True ourgens.update(p.exclude().gens) # NOTE preserve order! realgens = [x for x in gens if x in ourgens] # The generators of the ideal have now been (implicitely) split # into two groups: those involving ourgens and those that don't. # Since we took the transitive closure above, these two groups # live in subgrings generated by a *disjoint* set of variables. # Any sensible groebner basis algorithm will preserve this disjoint # structure (i.e. the elements of the groebner basis can be split # similarly), and and the two subsets of the groebner basis then # form groebner bases by themselves. (For the smaller generating # sets, of course.) ourG = [g.as_expr() for g in G.polys if g.has_only_gens(*ourgens.intersection(g.gens))] res.append(Mul(*[a**b for a, b in zip(freegens, monom)]) * \ ratsimpmodprime(coeff/denom, ourG, order=order, gens=realgens, quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)) return Add(*res) # NOTE The following is simpler and has less assumptions on the # groebner basis algorithm. If the above turns out to be broken, # use this. return Add(*[Mul(*[a**b for a, b in zip(freegens, monom)]) * \ ratsimpmodprime(coeff/denom, list(G), order=order, gens=gens, quick=quick, domain=ZZ) for monom, coeff in num.terms()]) else: return ratsimpmodprime( expr, list(G), order=order, gens=freegens+gens, quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)
def _get_const_characteristic_eq_sols(r, func, order): r""" Returns the roots of characteristic equation of constant coefficient linear ODE and list of collectterms which is later on used by simplification to use collect on solution. The parameter `r` is a dict of order:coeff terms, where order is the order of the derivative on each term, and coeff is the coefficient of that derivative. """ x = func.args[0] # First, set up characteristic equation. chareq, symbol = S.Zero, Dummy('x') for i in r.keys(): if type(i) == str or i < 0: pass else: chareq += r[i] * symbol**i chareq = Poly(chareq, symbol) # Can't just call roots because it doesn't return rootof for unsolveable # polynomials. chareqroots = roots(chareq, multiple=True) if len(chareqroots) != order: chareqroots = [rootof(chareq, k) for k in range(chareq.degree())] chareq_is_complex = not all(i.is_real for i in chareq.all_coeffs()) # Create a dict root: multiplicity or charroots charroots = defaultdict(int) for root in chareqroots: charroots[root] += 1 # We need to keep track of terms so we can run collect() at the end. # This is necessary for constantsimp to work properly. collectterms = [] gensols = [] conjugate_roots = [] # used to prevent double-use of conjugate roots # Loop over roots in theorder provided by roots/rootof... for root in chareqroots: # but don't repoeat multiple roots. if root not in charroots: continue multiplicity = charroots.pop(root) for i in range(multiplicity): if chareq_is_complex: gensols.append(x**i * exp(root * x)) collectterms = [(i, root, 0)] + collectterms continue reroot = re(root) imroot = im(root) if imroot.has(atan2) and reroot.has(atan2): # Remove this condition when re and im stop returning # circular atan2 usages. gensols.append(x**i * exp(root * x)) collectterms = [(i, root, 0)] + collectterms else: if root in conjugate_roots: collectterms = [(i, reroot, imroot)] + collectterms continue if imroot == 0: gensols.append(x**i * exp(reroot * x)) collectterms = [(i, reroot, 0)] + collectterms continue conjugate_roots.append(conjugate(root)) gensols.append(x**i * exp(reroot * x) * sin(abs(imroot) * x)) gensols.append(x**i * exp(reroot * x) * cos(imroot * x)) # This ordering is important collectterms = [(i, reroot, imroot)] + collectterms return gensols, collectterms
def log_to_real(h, q, x, t): """ Convert complex logarithms to real functions. Given real field K and polynomials h in K[t,x] and q in K[t], returns real function f such that: ___ df d \ ` -- = -- ) a log(h(a, x)) dx dx /__, a | q(a) = 0 Examples ======== >>> from sympy.integrals.rationaltools import log_to_real >>> from sympy.abc import x, y >>> from sympy import Poly, sqrt, S >>> log_to_real(Poly(x + 3*y/2 + S(1)/2, x, domain='QQ[y]'), ... Poly(3*y**2 + 1, y, domain='ZZ'), x, y) 2*sqrt(3)*atan(2*sqrt(3)*x/3 + sqrt(3)/3)/3 >>> log_to_real(Poly(x**2 - 1, x, domain='ZZ'), ... Poly(-2*y + 1, y, domain='ZZ'), x, y) log(x**2 - 1)/2 See Also ======== log_to_atan """ from sympy import collect u, v = symbols('u,v', cls=Dummy) H = h.as_expr().subs({t: u + I * v}).expand() Q = q.as_expr().subs({t: u + I * v}).expand() H_map = collect(H, I, evaluate=False) Q_map = collect(Q, I, evaluate=False) a, b = H_map.get(S(1), S(0)), H_map.get(I, S(0)) c, d = Q_map.get(S(1), S(0)), Q_map.get(I, S(0)) R = Poly(resultant(c, d, v), u) R_u = roots(R, filter='R') if len(R_u) != R.count_roots(): return None result = S(0) for r_u in R_u.keys(): C = Poly(c.subs({u: r_u}), v) R_v = roots(C, filter='R') if len(R_v) != C.count_roots(): return None for r_v in R_v: if not r_v.is_positive: continue D = d.subs({u: r_u, v: r_v}) if D.evalf(chop=True) != 0: continue A = Poly(a.subs({u: r_u, v: r_v}), x) B = Poly(b.subs({u: r_u, v: r_v}), x) AB = (A**2 + B**2).as_expr() result += r_u * log(AB) + r_v * log_to_atan(A, B) R_q = roots(q, filter='R') if len(R_q) != q.count_roots(): return None for r in R_q.keys(): result += r * log(h.as_expr().subs(t, r)) return result
def test_issue_15076(): sol = roots_quartic(Poly(t**4 - 6 * t**2 + t / x - 3, t)) assert sol[0].has(x)
def test_issue_14522(): eq = Poly( x**4 + x**3 * (16 + 32 * I) + x**2 * (-285 + 386 * I) + x * (-2824 - 448 * I) - 2058 - 6053 * I, x) roots_eq = roots(eq) assert all(eq(r) == 0 for r in roots_eq)
def trigsimp_groebner(expr, hints=[], quick=False, order="grlex", polynomial=False): """ Simplify trigonometric expressions using a groebner basis algorithm. This routine takes a fraction involving trigonometric or hyperbolic expressions, and tries to simplify it. The primary metric is the total degree. Some attempts are made to choose the simplest possible expression of the minimal degree, but this is non-rigorous, and also very slow (see the ``quick=True`` option). If ``polynomial`` is set to True, instead of simplifying numerator and denominator together, this function just brings numerator and denominator into a canonical form. This is much faster, but has potentially worse results. However, if the input is a polynomial, then the result is guaranteed to be an equivalent polynomial of minimal degree. The most important option is hints. Its entries can be any of the following: - a natural number - a function - an iterable of the form (func, var1, var2, ...) - anything else, interpreted as a generator A number is used to indicate that the search space should be increased. A function is used to indicate that said function is likely to occur in a simplified expression. An iterable is used indicate that func(var1 + var2 + ...) is likely to occur in a simplified . An additional generator also indicates that it is likely to occur. (See examples below). This routine carries out various computationally intensive algorithms. The option ``quick=True`` can be used to suppress one particularly slow step (at the expense of potentially more complicated results, but never at the expense of increased total degree). Examples ======== >>> from sympy.abc import x, y >>> from sympy import sin, tan, cos, sinh, cosh, tanh >>> from sympy.simplify.trigsimp import trigsimp_groebner Suppose you want to simplify ``sin(x)*cos(x)``. Naively, nothing happens: >>> ex = sin(x)*cos(x) >>> trigsimp_groebner(ex) sin(x)*cos(x) This is because ``trigsimp_groebner`` only looks for a simplification involving just ``sin(x)`` and ``cos(x)``. You can tell it to also try ``2*x`` by passing ``hints=[2]``: >>> trigsimp_groebner(ex, hints=[2]) sin(2*x)/2 >>> trigsimp_groebner(sin(x)**2 - cos(x)**2, hints=[2]) -cos(2*x) Increasing the search space this way can quickly become expensive. A much faster way is to give a specific expression that is likely to occur: >>> trigsimp_groebner(ex, hints=[sin(2*x)]) sin(2*x)/2 Hyperbolic expressions are similarly supported: >>> trigsimp_groebner(sinh(2*x)/sinh(x)) 2*cosh(x) Note how no hints had to be passed, since the expression already involved ``2*x``. The tangent function is also supported. You can either pass ``tan`` in the hints, to indicate that tan should be tried whenever cosine or sine are, or you can pass a specific generator: >>> trigsimp_groebner(sin(x)/cos(x), hints=[tan]) tan(x) >>> trigsimp_groebner(sinh(x)/cosh(x), hints=[tanh(x)]) tanh(x) Finally, you can use the iterable form to suggest that angle sum formulae should be tried: >>> ex = (tan(x) + tan(y))/(1 - tan(x)*tan(y)) >>> trigsimp_groebner(ex, hints=[(tan, x, y)]) tan(x + y) """ # TODO # - preprocess by replacing everything by funcs we can handle # - optionally use cot instead of tan # - more intelligent hinting. # For example, if the ideal is small, and we have sin(x), sin(y), # add sin(x + y) automatically... ? # - algebraic numbers ... # - expressions of lowest degree are not distinguished properly # e.g. 1 - sin(x)**2 # - we could try to order the generators intelligently, so as to influence # which monomials appear in the quotient basis # THEORY # ------ # Ratsimpmodprime above can be used to "simplify" a rational function # modulo a prime ideal. "Simplify" mainly means finding an equivalent # expression of lower total degree. # # We intend to use this to simplify trigonometric functions. To do that, # we need to decide (a) which ring to use, and (b) modulo which ideal to # simplify. In practice, (a) means settling on a list of "generators" # a, b, c, ..., such that the fraction we want to simplify is a rational # function in a, b, c, ..., with coefficients in ZZ (integers). # (2) means that we have to decide what relations to impose on the # generators. There are two practical problems: # (1) The ideal has to be *prime* (a technical term). # (2) The relations have to be polynomials in the generators. # # We typically have two kinds of generators: # - trigonometric expressions, like sin(x), cos(5*x), etc # - "everything else", like gamma(x), pi, etc. # # Since this function is trigsimp, we will concentrate on what to do with # trigonometric expressions. We can also simplify hyperbolic expressions, # but the extensions should be clear. # # One crucial point is that all *other* generators really should behave # like indeterminates. In particular if (say) "I" is one of them, then # in fact I**2 + 1 = 0 and we may and will compute non-sensical # expressions. However, we can work with a dummy and add the relation # I**2 + 1 = 0 to our ideal, then substitute back in the end. # # Now regarding trigonometric generators. We split them into groups, # according to the argument of the trigonometric functions. We want to # organise this in such a way that most trigonometric identities apply in # the same group. For example, given sin(x), cos(2*x) and cos(y), we would # group as [sin(x), cos(2*x)] and [cos(y)]. # # Our prime ideal will be built in three steps: # (1) For each group, compute a "geometrically prime" ideal of relations. # Geometrically prime means that it generates a prime ideal in # CC[gens], not just ZZ[gens]. # (2) Take the union of all the generators of the ideals for all groups. # By the geometric primality condition, this is still prime. # (3) Add further inter-group relations which preserve primality. # # Step (1) works as follows. We will isolate common factors in the # argument, so that all our generators are of the form sin(n*x), cos(n*x) # or tan(n*x), with n an integer. Suppose first there are no tan terms. # The ideal [sin(x)**2 + cos(x)**2 - 1] is geometrically prime, since # X**2 + Y**2 - 1 is irreducible over CC. # Now, if we have a generator sin(n*x), than we can, using trig identities, # express sin(n*x) as a polynomial in sin(x) and cos(x). We can add this # relation to the ideal, preserving geometric primality, since the quotient # ring is unchanged. # Thus we have treated all sin and cos terms. # For tan(n*x), we add a relation tan(n*x)*cos(n*x) - sin(n*x) = 0. # (This requires of course that we already have relations for cos(n*x) and # sin(n*x).) It is not obvious, but it seems that this preserves geometric # primality. # XXX A real proof would be nice. HELP! # Sketch that <S**2 + C**2 - 1, C*T - S> is a prime ideal of # CC[S, C, T]: # - it suffices to show that the projective closure in CP**3 is # irreducible # - using the half-angle substitutions, we can express sin(x), tan(x), # cos(x) as rational functions in tan(x/2) # - from this, we get a rational map from CP**1 to our curve # - this is a morphism, hence the curve is prime # # Step (2) is trivial. # # Step (3) works by adding selected relations of the form # sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is # preserved by the same argument as before. def parse_hints(hints): """Split hints into (n, funcs, iterables, gens).""" n = 1 funcs, iterables, gens = [], [], [] for e in hints: if isinstance(e, (SYMPY_INTS, Integer)): n = e elif isinstance(e, FunctionClass): funcs.append(e) elif iterable(e): iterables.append((e[0], e[1:])) # XXX sin(x+2y)? # Note: we go through polys so e.g. # sin(-x) -> -sin(x) -> sin(x) gens.extend(parallel_poly_from_expr( [e[0](x) for x in e[1:]] + [e[0](Add(*e[1:]))])[1].gens) else: gens.append(e) return n, funcs, iterables, gens def build_ideal(x, terms): """ Build generators for our ideal. Terms is an iterable with elements of the form (fn, coeff), indicating that we have a generator fn(coeff*x). If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed to appear in terms. Similarly for hyperbolic functions. For tan(n*x), sin(n*x) and cos(n*x) are guaranteed. """ I = [] y = Dummy('y') for fn, coeff in terms: for c, s, t, rel in ( [cos, sin, tan, cos(x)**2 + sin(x)**2 - 1], [cosh, sinh, tanh, cosh(x)**2 - sinh(x)**2 - 1]): if coeff == 1 and fn in [c, s]: I.append(rel) elif fn == t: I.append(t(coeff*x)*c(coeff*x) - s(coeff*x)) elif fn in [c, s]: cn = fn(coeff*y).expand(trig=True).subs(y, x) I.append(fn(coeff*x) - cn) return list(set(I)) def analyse_gens(gens, hints): """ Analyse the generators ``gens``, using the hints ``hints``. The meaning of ``hints`` is described in the main docstring. Return a new list of generators, and also the ideal we should work with. """ # First parse the hints n, funcs, iterables, extragens = parse_hints(hints) debug('n=%s' % n, 'funcs:', funcs, 'iterables:', iterables, 'extragens:', extragens) # We just add the extragens to gens and analyse them as before gens = list(gens) gens.extend(extragens) # remove duplicates funcs = list(set(funcs)) iterables = list(set(iterables)) gens = list(set(gens)) # all the functions we can do anything with allfuncs = {sin, cos, tan, sinh, cosh, tanh} # sin(3*x) -> ((3, x), sin) trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens if g.func in allfuncs] # Our list of new generators - start with anything that we cannot # work with (i.e. is not a trigonometric term) freegens = [g for g in gens if g.func not in allfuncs] newgens = [] trigdict = {} for (coeff, var), fn in trigterms: trigdict.setdefault(var, []).append((coeff, fn)) res = [] # the ideal for key, val in trigdict.items(): # We have now assembeled a dictionary. Its keys are common # arguments in trigonometric expressions, and values are lists of # pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we # need to deal with fn(coeff*x0). We take the rational gcd of the # coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol", # all other arguments are integral multiples thereof. # We will build an ideal which works with sin(x), cos(x). # If hint tan is provided, also work with tan(x). Moreover, if # n > 1, also work with sin(k*x) for k <= n, and similarly for cos # (and tan if the hint is provided). Finally, any generators which # the ideal does not work with but we need to accommodate (either # because it was in expr or because it was provided as a hint) # we also build into the ideal. # This selection process is expressed in the list ``terms``. # build_ideal then generates the actual relations in our ideal, # from this list. fns = [x[1] for x in val] val = [x[0] for x in val] gcd = reduce(igcd, val) terms = [(fn, v/gcd) for (fn, v) in zip(fns, val)] fs = set(funcs + fns) for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]): if any(x in fs for x in (c, s, t)): fs.add(c) fs.add(s) for fn in fs: for k in range(1, n + 1): terms.append((fn, k)) extra = [] for fn, v in terms: if fn == tan: extra.append((sin, v)) extra.append((cos, v)) if fn in [sin, cos] and tan in fs: extra.append((tan, v)) if fn == tanh: extra.append((sinh, v)) extra.append((cosh, v)) if fn in [sinh, cosh] and tanh in fs: extra.append((tanh, v)) terms.extend(extra) x = gcd*Mul(*key) r = build_ideal(x, terms) res.extend(r) newgens.extend(set(fn(v*x) for fn, v in terms)) # Add generators for compound expressions from iterables for fn, args in iterables: if fn == tan: # Tan expressions are recovered from sin and cos. iterables.extend([(sin, args), (cos, args)]) elif fn == tanh: # Tanh expressions are recovered from sihn and cosh. iterables.extend([(sinh, args), (cosh, args)]) else: dummys = symbols('d:%i' % len(args), cls=Dummy) expr = fn( Add(*dummys)).expand(trig=True).subs(list(zip(dummys, args))) res.append(fn(Add(*args)) - expr) if myI in gens: res.append(myI**2 + 1) freegens.remove(myI) newgens.append(myI) return res, freegens, newgens myI = Dummy('I') expr = expr.subs(S.ImaginaryUnit, myI) subs = [(myI, S.ImaginaryUnit)] num, denom = cancel(expr).as_numer_denom() try: (pnum, pdenom), opt = parallel_poly_from_expr([num, denom]) except PolificationFailed: return expr debug('initial gens:', opt.gens) ideal, freegens, gens = analyse_gens(opt.gens, hints) debug('ideal:', ideal) debug('new gens:', gens, " -- len", len(gens)) debug('free gens:', freegens, " -- len", len(gens)) # NOTE we force the domain to be ZZ to stop polys from injecting generators # (which is usually a sign of a bug in the way we build the ideal) if not gens: return expr G = groebner(ideal, order=order, gens=gens, domain=ZZ) debug('groebner basis:', list(G), " -- len", len(G)) # If our fraction is a polynomial in the free generators, simplify all # coefficients separately: from sympy.simplify.ratsimp import ratsimpmodprime if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)): num = Poly(num, gens=gens+freegens).eject(*gens) res = [] for monom, coeff in num.terms(): ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens) # We compute the transitive closure of all generators that can # be reached from our generators through relations in the ideal. changed = True while changed: changed = False for p in ideal: p = Poly(p) if not ourgens.issuperset(p.gens) and \ not p.has_only_gens(*set(p.gens).difference(ourgens)): changed = True ourgens.update(p.exclude().gens) # NOTE preserve order! realgens = [x for x in gens if x in ourgens] # The generators of the ideal have now been (implicitly) split # into two groups: those involving ourgens and those that don't. # Since we took the transitive closure above, these two groups # live in subgrings generated by a *disjoint* set of variables. # Any sensible groebner basis algorithm will preserve this disjoint # structure (i.e. the elements of the groebner basis can be split # similarly), and and the two subsets of the groebner basis then # form groebner bases by themselves. (For the smaller generating # sets, of course.) ourG = [g.as_expr() for g in G.polys if g.has_only_gens(*ourgens.intersection(g.gens))] res.append(Mul(*[a**b for a, b in zip(freegens, monom)]) * \ ratsimpmodprime(coeff/denom, ourG, order=order, gens=realgens, quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)) return Add(*res) # NOTE The following is simpler and has less assumptions on the # groebner basis algorithm. If the above turns out to be broken, # use this. return Add(*[Mul(*[a**b for a, b in zip(freegens, monom)]) * \ ratsimpmodprime(coeff/denom, list(G), order=order, gens=gens, quick=quick, domain=ZZ) for monom, coeff in num.terms()]) else: return ratsimpmodprime( expr, list(G), order=order, gens=freegens+gens, quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)
def test_issue_16589(): eq = Poly( x**4 - 8 * sqrt(2) * x**3 + 4 * x**3 - 64 * sqrt(2) * x**2 + 1024 * x, x) roots_eq = roots(eq) assert 0 in roots_eq