def test_issue_6367(): z = -5*sqrt(2)/(2*sqrt(2*sqrt(29) + 29)) + sqrt(-sqrt(29)/29 + S.Half) assert Mul(*[powsimp(a) for a in Mul.make_args(z.normal())]) == 0 assert powsimp(z.normal()) == 0 assert simplify(z) == 0 assert powsimp(sqrt(2 + sqrt(3))*sqrt(2 - sqrt(3)) + 1) == 2 assert powsimp(z) != 0
def test_roots_binomial(): assert roots_binomial(Poly(5 * x, x)) == [0] assert roots_binomial(Poly(5 * x**4, x)) == [0, 0, 0, 0] assert roots_binomial(Poly(5 * x + 2, x)) == [Rational(-2, 5)] A = 10**Rational(3, 4) / 10 assert roots_binomial(Poly(5*x**4 + 2, x)) == \ [-A - A*I, -A + A*I, A - A*I, A + A*I] _check(roots_binomial(Poly(x**8 - 2))) a1 = Symbol('a1', nonnegative=True) b1 = Symbol('b1', nonnegative=True) r0 = roots_quadratic(Poly(a1 * x**2 + b1, x)) r1 = roots_binomial(Poly(a1 * x**2 + b1, x)) assert powsimp(r0[0]) == powsimp(r1[0]) assert powsimp(r0[1]) == powsimp(r1[1]) for a, b, s, n in product((1, 2), (1, 2), (-1, 1), (2, 3, 4, 5)): if a == b and a != 1: # a == b == 1 is sufficient continue p = Poly(a * x**n + s * b) ans = roots_binomial(p) assert ans == _nsort(ans) # issue 8813 assert roots(Poly(2 * x**3 - 16 * y**3, x)) == { 2 * y * (Rational(-1, 2) - sqrt(3) * I / 2): 1, 2 * y: 1, 2 * y * (Rational(-1, 2) + sqrt(3) * I / 2): 1 }
def doit(self, **hints): # first make sure any definite limits have product # variables with matching assumptions reps = {} for xab in self.limits: d = _dummy_with_inherited_properties_concrete(xab) if d: reps[xab[0]] = d if reps: undo = {v: k for k, v in reps.items()} did = self.xreplace(reps).doit(**hints) if isinstance(did, tuple): # when separate=True did = tuple([i.xreplace(undo) for i in did]) else: did = did.xreplace(undo) return did f = self.function for index, limit in enumerate(self.limits): i, a, b = limit dif = b - a if dif.is_integer and dif.is_negative: a, b = b + 1, a - 1 f = 1 / f g = self._eval_product(f, (i, a, b)) if g in (None, S.NaN): return self.func(powsimp(f), *self.limits[index:]) else: f = g if hints.get('deep', True): return f.doit(**hints) else: return powsimp(f)
def test_issue_from_PR1599(): n1, n2, n3, n4 = symbols('n1 n2 n3 n4', negative=True) assert (powsimp(sqrt(n1)*sqrt(n2)*sqrt(n3)) == -I*sqrt(-n1)*sqrt(-n2)*sqrt(-n3)) assert (powsimp(root(n1, 3)*root(n2, 3)*root(n3, 3)*root(n4, 3)) == -(-1)**Rational(1, 3)* (-n1)**Rational(1, 3)*(-n2)**Rational(1, 3)*(-n3)**Rational(1, 3)*(-n4)**Rational(1, 3))
def test_issue_10195(): a = Symbol('a', integer=True) l = Symbol('l', even=True, nonzero=True) n = Symbol('n', odd=True) e_x = (-1)**(n/2 - S.Half) - (-1)**(n*Rational(3, 2) - S.Half) assert powsimp((-1)**(l/2)) == I**l assert powsimp((-1)**(n/2)) == I**n assert powsimp((-1)**(n*Rational(3, 2))) == -I**n assert powsimp(e_x) == (-1)**(n/2 - S.Half) + (-1)**(n*Rational(3, 2) + S.Half) assert powsimp((-1)**(a*Rational(3, 2))) == (-I)**a
def test_issue_22546(): p1, p2 = symbols('p1, p2', positive=True) ref = powsimp(p1**z/p2**z) e = z + 1 ans = ref.subs(z, e) assert ans.is_Pow assert powsimp(p1**e/p2**e) == ans i = symbols('i', integer=True) ref = powsimp(x**i/y**i) e = i + 1 ans = ref.subs(i, e) assert ans.is_Pow assert powsimp(x**e/y**e) == ans
def mysimp(expr): from sympy.core.function import expand from sympy.simplify.powsimp import powsimp from sympy.simplify.simplify import logcombine return expand( powsimp(logcombine(expr, force=True), force=True, deep=True), force=True).replace(exp_polar, exp)
def test_OneMatrix_power(): o = OneMatrix(3, 3) assert o**0 == Identity(3) assert o**1 == o assert o * o == o**2 == 3 * o assert o * o * o == o**3 == 9 * o o = OneMatrix(n, n) assert o * o == o**2 == n * o # powsimp necessary as n ** (n - 2) * n does not produce n ** (n - 1) assert powsimp(o**(n - 1) * o) == o**n == n**(n - 1) * o
def taylor_term(n, x, *previous_terms): # of log(1+x) r""" Returns the next term in the Taylor series expansion of `\log(1+x)`. """ from sympy.simplify.powsimp import powsimp if n < 0: return S.Zero x = sympify(x) if n == 0: return x if previous_terms: p = previous_terms[-1] if p is not None: return powsimp((-n) * p * x / (n + 1), deep=True, combine='exp') return (1 - 2*(n % 2)) * x**(n + 1)/(n + 1)
def test_powsimp_negated_base(): assert powsimp((-x + y)/sqrt(x - y)) == -sqrt(x - y) assert powsimp((-x + y)*(-z + y)/sqrt(x - y)/sqrt(z - y)) == sqrt(x - y)*sqrt(z - y) p = symbols('p', positive=True) reps = {p: 2, a: S.Half} assert powsimp((-p)**a/p**a).subs(reps) == ((-1)**a).subs(reps) assert powsimp((-p)**a*p**a).subs(reps) == ((-p**2)**a).subs(reps) n = symbols('n', negative=True) reps = {p: -2, a: S.Half} assert powsimp((-n)**a/n**a).subs(reps) == (-1)**(-a).subs(a, S.Half) assert powsimp((-n)**a*n**a).subs(reps) == ((-n**2)**a).subs(reps) # if x is 0 then the lhs is 0**a*oo**a which is not (-1)**a eq = (-x)**a/x**a assert powsimp(eq) == eq
def _eval_nseries(self, x, n, logx, cdir=0): # NOTE Please see the comment at the beginning of this file, labelled # IMPORTANT. from sympy.functions.elementary.complexes import sign from sympy.functions.elementary.integers import ceiling from sympy.series.limits import limit from sympy.series.order import Order from sympy.simplify.powsimp import powsimp arg = self.exp arg_series = arg._eval_nseries(x, n=n, logx=logx) if arg_series.is_Order: return 1 + arg_series arg0 = limit(arg_series.removeO(), x, 0) if arg0 is S.NegativeInfinity: return Order(x**n, x) if arg0 is S.Infinity: return self # checking for indecisiveness/ sign terms in arg0 if any(isinstance(arg, (sign, ImaginaryUnit)) for arg in arg0.args): return self t = Dummy("t") nterms = n try: cf = Order(arg.as_leading_term(x, logx=logx), x).getn() except (NotImplementedError, PoleError): cf = 0 if cf and cf > 0: nterms = ceiling(n/cf) exp_series = exp(t)._taylor(t, nterms) r = exp(arg0)*exp_series.subs(t, arg_series - arg0) if cf and cf > 1: r += Order((arg_series - arg0)**n, x)/x**((cf-1)*n) else: r += Order((arg_series - arg0)**n, x) r = r.expand() r = powsimp(r, deep=True, combine='exp') # powsimp may introduce unexpanded (-1)**Rational; see PR #17201 simplerat = lambda x: x.is_Rational and x.q in [3, 4, 6] w = Wild('w', properties=[simplerat]) r = r.replace(S.NegativeOne**w, expand_complex(S.NegativeOne**w)) return r
def test_TR2i(): # just a reminder that ratios of powers only simplify if both # numerator and denominator satisfy the condition that each # has a positive base or an integer exponent; e.g. the following, # at y=-1, x=1/2 gives sqrt(2)*I != -sqrt(2)*I assert powsimp(2**x / y**x) != (2 / y)**x assert TR2i(sin(x) / cos(x)) == tan(x) assert TR2i(sin(x) * sin(y) / cos(x)) == tan(x) * sin(y) assert TR2i(1 / (sin(x) / cos(x))) == 1 / tan(x) assert TR2i(1 / (sin(x) * sin(y) / cos(x))) == 1 / tan(x) / sin(y) assert TR2i(sin(x) / 2 / (cos(x) + 1)) == sin(x) / (cos(x) + 1) / 2 assert TR2i(sin(x) / 2 / (cos(x) + 1), half=True) == tan(x / 2) / 2 assert TR2i(sin(1) / (cos(1) + 1), half=True) == tan(S.Half) assert TR2i(sin(2) / (cos(2) + 1), half=True) == tan(1) assert TR2i(sin(4) / (cos(4) + 1), half=True) == tan(2) assert TR2i(sin(5) / (cos(5) + 1), half=True) == tan(5 * S.Half) assert TR2i((cos(1) + 1) / sin(1), half=True) == 1 / tan(S.Half) assert TR2i((cos(2) + 1) / sin(2), half=True) == 1 / tan(1) assert TR2i((cos(4) + 1) / sin(4), half=True) == 1 / tan(2) assert TR2i((cos(5) + 1) / sin(5), half=True) == 1 / tan(5 * S.Half) assert TR2i((cos(1) + 1)**(-a) * sin(1)**a, half=True) == tan(S.Half)**a assert TR2i((cos(2) + 1)**(-a) * sin(2)**a, half=True) == tan(1)**a assert TR2i((cos(4) + 1)**(-a) * sin(4)**a, half=True) == (cos(4) + 1)**(-a) * sin(4)**a assert TR2i((cos(5) + 1)**(-a) * sin(5)**a, half=True) == (cos(5) + 1)**(-a) * sin(5)**a assert TR2i((cos(1) + 1)**a * sin(1)**(-a), half=True) == tan(S.Half)**(-a) assert TR2i((cos(2) + 1)**a * sin(2)**(-a), half=True) == tan(1)**(-a) assert TR2i((cos(4) + 1)**a * sin(4)**(-a), half=True) == (cos(4) + 1)**a * sin(4)**(-a) assert TR2i((cos(5) + 1)**a * sin(5)**(-a), half=True) == (cos(5) + 1)**a * sin(5)**(-a) i = symbols('i', integer=True) assert TR2i(((cos(5) + 1)**i * sin(5)**(-i)), half=True) == tan(5 * S.Half)**(-i) assert TR2i(1 / ((cos(5) + 1)**i * sin(5)**(-i)), half=True) == tan(5 * S.Half)**i
def hypersimp(f, k): """Given combinatorial term f(k) simplify its consecutive term ratio i.e. f(k+1)/f(k). The input term can be composed of functions and integer sequences which have equivalent representation in terms of gamma special function. The algorithm performs three basic steps: 1. Rewrite all functions in terms of gamma, if possible. 2. Rewrite all occurrences of gamma in terms of products of gamma and rising factorial with integer, absolute constant exponent. 3. Perform simplification of nested fractions, powers and if the resulting expression is a quotient of polynomials, reduce their total degree. If f(k) is hypergeometric then as result we arrive with a quotient of polynomials of minimal degree. Otherwise None is returned. For more information on the implemented algorithm refer to: 1. W. Koepf, Algorithms for m-fold Hypergeometric Summation, Journal of Symbolic Computation (1995) 20, 399-417 """ f = sympify(f) g = f.subs(k, k + 1) / f g = g.rewrite(gamma) g = expand_func(g) g = powsimp(g, deep=True, combine='exp') if g.is_rational_function(k): return simplify(g, ratio=S.Infinity) else: return None
def __post_init__(self): e_o, e_i, e_m, R, l_o, l_i, l_m, d, a_1, a_2, a_3, b_1, b_2, b_3 = symbols('e_o e_i e_m R l_o l_i l_m d a_1 a_2 a_3 b_1 b_2 b_3') e_o = self.extracellular_permittivity * eps0 # S/m e_i = self.intracellular_permittivity * eps0 #S/m e_m = self.membrane_permittivity * eps0 #S/m R = self.cell_diameter / 2 self.R = R l_o = self.extracellular_conductivity*Spm # S/m l_i = self.intracellular_conductivity*Spm #S/m l_m = self.membrane_conductivity*Spm #S/m d = self.membrane_thickness # epsilon_0 sub1, sub2 = symbols('sub1 sub2') sub1 = (3 * (R**2) - 3 * d * R + d**2) sub2 = (3 * d * R - d**2) a_1 = 3 * d * l_o * ((l_i * (sub1)) + l_m*(sub2)) #eq.9a a_2 = 3 * d * ((l_i * e_o + l_o * e_i) * sub1 + (l_m * e_o + l_o * e_m) * sub2) a_3 = 3 * d * e_o * (e_i * (sub1) + e_m * sub2) b_1 = 2 * R**3 * (l_m + 2*l_o) * (l_m + (1/2) * l_i) + 2 * (R-d)**3 * (l_m - l_o) * (l_i - l_m) b_2 = 2 * R**3 * (l_i * ((1/2) * e_m + e_o) + l_m * ((1/2)*e_i + 2*e_m + 2*e_o) + l_o * (e_i + 2 * e_m)) + (2 * (R - d)**3\ * (l_i * (e_m - e_o) + l_m * (e_i - 2*e_m + e_o) - l_o * (e_i - e_m))) # is this truly a multiply, or a cross? b_3 = 2 * R**3 * (e_m + 2*e_o) * (e_m + (1/2) * e_i) + 2 * (R-d)**3 * (e_m - e_o) * (e_i - e_m) sympy.pprint(powsimp(util.quantity_simplify(refine(a_1.simplify()))).as_coeff_Mul()[1]) print() sympy.pprint(powsimp(util.quantity_simplify(refine(a_2.simplify()))).as_coeff_Mul()[1]) print() sympy.pprint(powsimp(util.quantity_simplify(refine(a_3.simplify()))).as_coeff_Mul()[1]) print() sympy.pprint(powsimp(util.quantity_simplify(refine(b_1.simplify()))).as_coeff_Mul()[1]) print() sympy.pprint(powsimp(util.quantity_simplify(refine(b_2.simplify()))).as_coeff_Mul()[1]) print() sympy.pprint(powsimp(util.quantity_simplify(refine(b_3.simplify()))).as_coeff_Mul()[1]) print() print()
def rubi_powsimp(expr): """ This function is needed to preprocess an expression as done in matchpy `x^a*x^b` in matchpy auotmatically transforms to `x^(a+b)` Examples ======== >>> from sympy.integrals.rubi.rubimain import rubi_powsimp >>> from sympy.abc import a, b, x >>> rubi_powsimp(x**a*x**b) x**(a + b) """ lst_pow = [] lst_non_pow = [] if isinstance(expr, Mul): for i in expr.args: if isinstance(i, (Pow, rubi_exp, sym_exp)): lst_pow.append(i) else: lst_non_pow.append(i) return powsimp(Mul(*lst_pow)) * Mul(*lst_non_pow) return expr
def test_powsimp_polar(): from sympy.functions.elementary.complexes import polar_lift from sympy.functions.elementary.exponential import exp_polar x, y, z = symbols('x y z') p, q, r = symbols('p q r', polar=True) assert (polar_lift(-1))**(2*x) == exp_polar(2*pi*I*x) assert powsimp(p**x * q**x) == (p*q)**x assert p**x * (1/p)**x == 1 assert (1/p)**x == p**(-x) assert exp_polar(x)*exp_polar(y) == exp_polar(x)*exp_polar(y) assert powsimp(exp_polar(x)*exp_polar(y)) == exp_polar(x + y) assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y) == \ (p*exp_polar(1))**(x + y) assert powsimp(exp_polar(x)*exp_polar(y)*p**x*p**y, combine='exp') == \ exp_polar(x + y)*p**(x + y) assert powsimp( exp_polar(x)*exp_polar(y)*exp_polar(2)*sin(x) + sin(y) + p**x*p**y) \ == p**(x + y) + sin(x)*exp_polar(2 + x + y) + sin(y) assert powsimp(sin(exp_polar(x)*exp_polar(y))) == \ sin(exp_polar(x)*exp_polar(y)) assert powsimp(sin(exp_polar(x)*exp_polar(y)), deep=True) == \ sin(exp_polar(x + y))
def rewrite(e, Omega, x, wsym): """e(x) ... the function Omega ... the mrv set wsym ... the symbol which is going to be used for w Returns the rewritten e in terms of w and log(w). See test_rewrite1() for examples and correct results. """ from sympy import ilcm if not isinstance(Omega, SubsSet): raise TypeError("Omega should be an instance of SubsSet") if len(Omega) == 0: raise ValueError("Length can not be 0") #all items in Omega must be exponentials for t in Omega.keys(): if not t.func is exp: raise ValueError("Value should be exp") rewrites = Omega.rewrites Omega = list(Omega.items()) nodes = build_expression_tree(Omega, rewrites) Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True) # make sure we know the sign of each exp() term; after the loop, # g is going to be the "w" - the simplest one in the mrv set for g, _ in Omega: sig = sign(g.args[0], x) if sig != 1 and sig != -1: raise NotImplementedError('Result depends on the sign of %s' % sig) if sig == 1: wsym = 1 / wsym # if g goes to oo, substitute 1/w #O2 is a list, which results by rewriting each item in Omega using "w" O2 = [] denominators = [] for f, var in Omega: c = limitinf(f.args[0] / g.args[0], x) if c.is_Rational: denominators.append(c.q) arg = f.args[0] if var in rewrites: if not rewrites[var].func is exp: raise ValueError("Value should be exp") arg = rewrites[var].args[0] O2.append((var, exp((arg - c * g.args[0]).expand()) * wsym**c)) #Remember that Omega contains subexpressions of "e". So now we find #them in "e" and substitute them for our rewriting, stored in O2 # the following powsimp is necessary to automatically combine exponentials, # so that the .subs() below succeeds: # TODO this should not be necessary f = powsimp(e, deep=True, combine='exp') for a, b in O2: f = f.subs(a, b) for _, var in Omega: assert not f.has(var) #finally compute the logarithm of w (logw). logw = g.args[0] if sig == 1: logw = -logw # log(w)->log(1/w)=-log(w) # Some parts of sympy have difficulty computing series expansions with # non-integral exponents. The following heuristic improves the situation: exponent = reduce(ilcm, denominators, 1) f = f.subs(wsym, wsym**exponent) logw /= exponent return f, logw
def mrv(e, x): """Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e', and e rewritten in terms of these""" e = powsimp(e, deep=True, combine='exp') if not isinstance(e, Basic): raise TypeError("e should be an instance of Basic") if not e.has(x): return SubsSet(), e elif e == x: s = SubsSet() return s, s[x] elif e.is_Mul or e.is_Add: i, d = e.as_independent(x) # throw away x-independent terms if d.func != e.func: s, expr = mrv(d, x) return s, e.func(i, expr) a, b = d.as_two_terms() s1, e1 = mrv(a, x) s2, e2 = mrv(b, x) return mrv_max1(s1, s2, e.func(i, e1, e2), x) elif e.is_Pow: b, e = e.as_base_exp() if b == 1: return SubsSet(), b if e.has(x): return mrv(exp(e * log(b)), x) else: s, expr = mrv(b, x) return s, expr**e elif e.func is log: s, expr = mrv(e.args[0], x) return s, log(expr) elif e.func is exp: # We know from the theory of this algorithm that exp(log(...)) may always # be simplified here, and doing so is vital for termination. if e.args[0].func is log: return mrv(e.args[0].args[0], x) # if a product has an infinite factor the result will be # infinite if there is no zero, otherwise NaN; here, we # consider the result infinite if any factor is infinite li = limitinf(e.args[0], x) if any(_.is_infinite for _ in Mul.make_args(li)): s1 = SubsSet() e1 = s1[e] s2, e2 = mrv(e.args[0], x) su = s1.union(s2)[0] su.rewrites[e1] = exp(e2) return mrv_max3(s1, e1, s2, exp(e2), su, e1, x) else: s, expr = mrv(e.args[0], x) return s, exp(expr) elif e.is_Function: l = [mrv(a, x) for a in e.args] l2 = [s for (s, _) in l if s != SubsSet()] if len(l2) != 1: # e.g. something like BesselJ(x, x) raise NotImplementedError("MRV set computation for functions in" " several variables not implemented.") s, ss = l2[0], SubsSet() args = [ss.do_subs(x[1]) for x in l] return s, e.func(*args) elif e.is_Derivative: raise NotImplementedError("MRV set computation for derviatives" " not implemented yet.") return mrv(e.args[0], x) raise NotImplementedError("Don't know how to calculate the mrv of '%s'" % e)
def simp_pows(expr): return simplify(powsimp(expand_mul(expr, deep=False), force=True)).replace(exp_polar, exp)
def doit(self, **hints): """Evaluates the limit. Parameters ========== deep : bool, optional (default: True) Invoke the ``doit`` method of the expressions involved before taking the limit. hints : optional keyword arguments To be passed to ``doit`` methods; only used if deep is True. """ e, z, z0, dir = self.args if z0 is S.ComplexInfinity: raise NotImplementedError("Limits at complex " "infinity are not implemented") if hints.get('deep', True): e = e.doit(**hints) z = z.doit(**hints) z0 = z0.doit(**hints) if e == z: return z0 if not e.has(z): return e if z0 is S.NaN: return S.NaN if e.has(*_illegal): return self if e.is_Order: return Order(limit(e.expr, z, z0), *e.args[1:]) cdir = 0 if str(dir) == "+": cdir = 1 elif str(dir) == "-": cdir = -1 def set_signs(expr): if not expr.args: return expr newargs = tuple(set_signs(arg) for arg in expr.args) if newargs != expr.args: expr = expr.func(*newargs) abs_flag = isinstance(expr, Abs) sign_flag = isinstance(expr, sign) if abs_flag or sign_flag: sig = limit(expr.args[0], z, z0, dir) if sig.is_zero: sig = limit(1 / expr.args[0], z, z0, dir) if sig.is_extended_real: if (sig < 0) == True: return -expr.args[0] if abs_flag else S.NegativeOne elif (sig > 0) == True: return expr.args[0] if abs_flag else S.One return expr if e.has(Float): # Convert floats like 0.5 to exact SymPy numbers like S.Half, to # prevent rounding errors which can lead to unexpected execution # of conditional blocks that work on comparisons # Also see comments in https://github.com/sympy/sympy/issues/19453 from sympy.simplify.simplify import nsimplify e = nsimplify(e) e = set_signs(e) if e.is_meromorphic(z, z0): if abs(z0) is S.Infinity: newe = e.subs(z, 1 / z) # cdir changes sign as oo- should become 0+ cdir = -cdir else: newe = e.subs(z, z + z0) try: coeff, ex = newe.leadterm(z, cdir=cdir) except ValueError: pass else: if ex > 0: return S.Zero elif ex == 0: return coeff if cdir == 1 or not (int(ex) & 1): return S.Infinity * sign(coeff) elif cdir == -1: return S.NegativeInfinity * sign(coeff) else: return S.ComplexInfinity if abs(z0) is S.Infinity: if e.is_Mul: e = factor_terms(e) newe = e.subs(z, 1 / z) # cdir changes sign as oo- should become 0+ cdir = -cdir else: newe = e.subs(z, z + z0) try: coeff, ex = newe.leadterm(z, cdir=cdir) except (ValueError, NotImplementedError, PoleError): # The NotImplementedError catching is for custom functions from sympy.simplify.powsimp import powsimp e = powsimp(e) if e.is_Pow: r = self.pow_heuristics(e) if r is not None: return r else: if isinstance(coeff, AccumBounds) and ex == S.Zero: return coeff if coeff.has(S.Infinity, S.NegativeInfinity, S.ComplexInfinity, S.NaN): return self if not coeff.has(z): if ex.is_positive: return S.Zero elif ex == 0: return coeff elif ex.is_negative: if ex.is_integer: if cdir == 1 or ex.is_even: return S.Infinity * sign(coeff) elif cdir == -1: return S.NegativeInfinity * sign(coeff) else: return S.ComplexInfinity else: if cdir == 1: return S.Infinity * sign(coeff) elif cdir == -1: return S.Infinity * sign(coeff) * S.NegativeOne**ex else: return S.ComplexInfinity # gruntz fails on factorials but works with the gamma function # If no factorial term is present, e should remain unchanged. # factorial is defined to be zero for negative inputs (which # differs from gamma) so only rewrite for positive z0. if z0.is_extended_positive: e = e.rewrite(factorial, gamma) l = None try: if str(dir) == '+-': r = gruntz(e, z, z0, '+') l = gruntz(e, z, z0, '-') if l != r: raise ValueError( "The limit does not exist since " "left hand limit = %s and right hand limit = %s" % (l, r)) else: r = gruntz(e, z, z0, dir) if r is S.NaN or l is S.NaN: raise PoleError() except (PoleError, ValueError): if l is not None: raise r = heuristics(e, z, z0, dir) if r is None: return self return r
def diop_simplify(eq): return _mexpand(powsimp(_mexpand(eq)))
def is_convergent(self): r"""Checks for the convergence of a Sum. We divide the study of convergence of infinite sums and products in two parts. First Part: One part is the question whether all the terms are well defined, i.e., they are finite in a sum and also non-zero in a product. Zero is the analogy of (minus) infinity in products as :math:`e^{-\infty} = 0`. Second Part: The second part is the question of convergence after infinities, and zeros in products, have been omitted assuming that their number is finite. This means that we only consider the tail of the sum or product, starting from some point after which all terms are well defined. For example, in a sum of the form: .. math:: \sum_{1 \leq i < \infty} \frac{1}{n^2 + an + b} where a and b are numbers. The routine will return true, even if there are infinities in the term sequence (at most two). An analogous product would be: .. math:: \prod_{1 \leq i < \infty} e^{\frac{1}{n^2 + an + b}} This is how convergence is interpreted. It is concerned with what happens at the limit. Finding the bad terms is another independent matter. Note: It is responsibility of user to see that the sum or product is well defined. There are various tests employed to check the convergence like divergence test, root test, integral test, alternating series test, comparison tests, Dirichlet tests. It returns true if Sum is convergent and false if divergent and NotImplementedError if it can not be checked. References ========== .. [1] https://en.wikipedia.org/wiki/Convergence_tests Examples ======== >>> from sympy import factorial, S, Sum, Symbol, oo >>> n = Symbol('n', integer=True) >>> Sum(n/(n - 1), (n, 4, 7)).is_convergent() True >>> Sum(n/(2*n + 1), (n, 1, oo)).is_convergent() False >>> Sum(factorial(n)/5**n, (n, 1, oo)).is_convergent() False >>> Sum(1/n**(S(6)/5), (n, 1, oo)).is_convergent() True See Also ======== Sum.is_absolutely_convergent() Product.is_convergent() """ from sympy import Interval, Integral, Limit, log, symbols, Ge, Gt, simplify p, q = symbols('p q', cls=Wild) sym = self.limits[0][0] lower_limit = self.limits[0][1] upper_limit = self.limits[0][2] sequence_term = self.function if len(sequence_term.free_symbols) > 1: raise NotImplementedError("convergence checking for more than one symbol " "containing series is not handled") if lower_limit.is_finite and upper_limit.is_finite: return S.true # transform sym -> -sym and swap the upper_limit = S.Infinity # and lower_limit = - upper_limit if lower_limit is S.NegativeInfinity: if upper_limit is S.Infinity: return Sum(sequence_term, (sym, 0, S.Infinity)).is_convergent() and \ Sum(sequence_term, (sym, S.NegativeInfinity, 0)).is_convergent() sequence_term = simplify(sequence_term.xreplace({sym: -sym})) lower_limit = -upper_limit upper_limit = S.Infinity sym_ = Dummy(sym.name, integer=True, positive=True) sequence_term = sequence_term.xreplace({sym: sym_}) sym = sym_ interval = Interval(lower_limit, upper_limit) # Piecewise function handle if sequence_term.is_Piecewise: for func, cond in sequence_term.args: # see if it represents something going to oo if cond == True or cond.as_set().sup is S.Infinity: s = Sum(func, (sym, lower_limit, upper_limit)) return s.is_convergent() return S.true ### -------- Divergence test ----------- ### try: lim_val = limit(sequence_term, sym, upper_limit) if lim_val.is_number and lim_val is not S.Zero: return S.false except NotImplementedError: pass try: lim_val_abs = limit(abs(sequence_term), sym, upper_limit) if lim_val_abs.is_number and lim_val_abs is not S.Zero: return S.false except NotImplementedError: pass order = O(sequence_term, (sym, S.Infinity)) ### ----------- ratio test ---------------- ### next_sequence_term = sequence_term.xreplace({sym: sym + 1}) ratio = combsimp(powsimp(next_sequence_term/sequence_term)) lim_ratio = limit(ratio, sym, upper_limit) if lim_ratio.is_number: if abs(lim_ratio) > 1: return S.false if abs(lim_ratio) < 1: return S.true ### --------- p-series test (1/n**p) ---------- ### p1_series_test = order.expr.match(sym**p) if p1_series_test is not None: if p1_series_test[p] < -1: return S.true if p1_series_test[p] >= -1: return S.false p2_series_test = order.expr.match((1/sym)**p) if p2_series_test is not None: if p2_series_test[p] > 1: return S.true if p2_series_test[p] <= 1: return S.false ### ------------- Limit comparison test -----------### # (1/n) comparison try: lim_comp = limit(sym*sequence_term, sym, S.Infinity) if lim_comp.is_number and lim_comp > 0: return S.false except NotImplementedError: pass ### ----------- root test ---------------- ### lim = Limit(abs(sequence_term)**(1/sym), sym, S.Infinity) lim_evaluated = lim.doit() if lim_evaluated.is_number: if lim_evaluated < 1: return S.true if lim_evaluated > 1: return S.false ### ------------- alternating series test ----------- ### dict_val = sequence_term.match((-1)**(sym + p)*q) if not dict_val[p].has(sym) and is_decreasing(dict_val[q], interval): return S.true ### ------------- comparison test ------------- ### # (1/log(n)**p) comparison log_test = order.expr.match(1/(log(sym)**p)) if log_test is not None: return S.false # (1/(n*log(n)**p)) comparison log_n_test = order.expr.match(1/(sym*(log(sym))**p)) if log_n_test is not None: if log_n_test[p] > 1: return S.true return S.false # (1/(n*log(n)*log(log(n))*p)) comparison log_log_n_test = order.expr.match(1/(sym*(log(sym)*log(log(sym))**p))) if log_log_n_test is not None: if log_log_n_test[p] > 1: return S.true return S.false # (1/(n**p*log(n))) comparison n_log_test = order.expr.match(1/(sym**p*log(sym))) if n_log_test is not None: if n_log_test[p] > 1: return S.true return S.false ### ------------- integral test -------------- ### maxima = solveset(sequence_term.diff(sym), sym, interval) if not maxima: check_interval = interval elif isinstance(maxima, FiniteSet) and maxima.sup.is_number: check_interval = Interval(maxima.sup, interval.sup) if ( is_decreasing(sequence_term, check_interval) or is_decreasing(-sequence_term, check_interval)): integral_val = Integral( sequence_term, (sym, lower_limit, upper_limit)) try: integral_val_evaluated = integral_val.doit() if integral_val_evaluated.is_number: return S(integral_val_evaluated.is_finite) except NotImplementedError: pass ### -------------- Dirichlet tests -------------- ### if order.expr.is_Mul: a_n, b_n = order.expr.args[0], order.expr.args[1] m = Dummy('m', integer=True) def _dirichlet_test(g_n): try: ing_val = limit(Sum(g_n, (sym, interval.inf, m)).doit(), m, S.Infinity) if ing_val.is_finite: return S.true except NotImplementedError: pass if is_decreasing(a_n, interval): dirich1 = _dirichlet_test(b_n) if dirich1 is not None: return dirich1 if is_decreasing(b_n, interval): dirich2 = _dirichlet_test(a_n) if dirich2 is not None: return dirich2 _sym = self.limits[0][0] sequence_term = sequence_term.xreplace({sym: _sym}) raise NotImplementedError("The algorithm to find the Sum convergence of %s " "is not yet implemented" % (sequence_term))
def mrv(e, x): """Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e', and e rewritten in terms of these""" e = powsimp(e, deep=True, combine='exp') if not isinstance(e, Basic): raise TypeError("e should be an instance of Basic") if not e.has(x): return SubsSet(), e elif e == x: s = SubsSet() return s, s[x] elif e.is_Mul or e.is_Add: i, d = e.as_independent(x) # throw away x-independent terms if d.func != e.func: s, expr = mrv(d, x) return s, e.func(i, expr) a, b = d.as_two_terms() s1, e1 = mrv(a, x) s2, e2 = mrv(b, x) return mrv_max1(s1, s2, e.func(i, e1, e2), x) elif e.is_Pow: b, e = e.as_base_exp() if b == 1: return SubsSet(), b if e.has(x): return mrv(exp(e * log(b)), x) else: s, expr = mrv(b, x) return s, expr**e elif isinstance(e, log): s, expr = mrv(e.args[0], x) return s, log(expr) elif isinstance(e, exp): # We know from the theory of this algorithm that exp(log(...)) may always # be simplified here, and doing so is vital for termination. if isinstance(e.args[0], log): return mrv(e.args[0].args[0], x) # if a product has an infinite factor the result will be # infinite if there is no zero, otherwise NaN; here, we # consider the result infinite if any factor is infinite li = limitinf(e.args[0], x) if any(_.is_infinite for _ in Mul.make_args(li)): s1 = SubsSet() e1 = s1[e] s2, e2 = mrv(e.args[0], x) su = s1.union(s2)[0] su.rewrites[e1] = exp(e2) return mrv_max3(s1, e1, s2, exp(e2), su, e1, x) else: s, expr = mrv(e.args[0], x) return s, exp(expr) elif e.is_Function: l = [mrv(a, x) for a in e.args] l2 = [s for (s, _) in l if s != SubsSet()] if len(l2) != 1: # e.g. something like BesselJ(x, x) raise NotImplementedError("MRV set computation for functions in" " several variables not implemented.") s, ss = l2[0], SubsSet() args = [ss.do_subs(x[1]) for x in l] return s, e.func(*args) elif e.is_Derivative: raise NotImplementedError("MRV set computation for derviatives" " not implemented yet.") return mrv(e.args[0], x) raise NotImplementedError( "Don't know how to calculate the mrv of '%s'" % e)
def rewrite(e, Omega, x, wsym): """e(x) ... the function Omega ... the mrv set wsym ... the symbol which is going to be used for w Returns the rewritten e in terms of w and log(w). See test_rewrite1() for examples and correct results. """ from sympy import ilcm if not isinstance(Omega, SubsSet): raise TypeError("Omega should be an instance of SubsSet") if len(Omega) == 0: raise ValueError("Length can not be 0") # all items in Omega must be exponentials for t in Omega.keys(): if not isinstance(t, exp): raise ValueError("Value should be exp") rewrites = Omega.rewrites Omega = list(Omega.items()) nodes = build_expression_tree(Omega, rewrites) Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True) # make sure we know the sign of each exp() term; after the loop, # g is going to be the "w" - the simplest one in the mrv set for g, _ in Omega: sig = sign(g.args[0], x) if sig != 1 and sig != -1: raise NotImplementedError('Result depends on the sign of %s' % sig) if sig == 1: wsym = 1/wsym # if g goes to oo, substitute 1/w # O2 is a list, which results by rewriting each item in Omega using "w" O2 = [] denominators = [] for f, var in Omega: c = limitinf(f.args[0]/g.args[0], x) if c.is_Rational: denominators.append(c.q) arg = f.args[0] if var in rewrites: if not isinstance(rewrites[var], exp): raise ValueError("Value should be exp") arg = rewrites[var].args[0] O2.append((var, exp((arg - c*g.args[0]).expand())*wsym**c)) # Remember that Omega contains subexpressions of "e". So now we find # them in "e" and substitute them for our rewriting, stored in O2 # the following powsimp is necessary to automatically combine exponentials, # so that the .xreplace() below succeeds: # TODO this should not be necessary f = powsimp(e, deep=True, combine='exp') for a, b in O2: f = f.xreplace({a: b}) for _, var in Omega: assert not f.has(var) # finally compute the logarithm of w (logw). logw = g.args[0] if sig == 1: logw = -logw # log(w)->log(1/w)=-log(w) # Some parts of sympy have difficulty computing series expansions with # non-integral exponents. The following heuristic improves the situation: exponent = reduce(ilcm, denominators, 1) f = f.xreplace({wsym: wsym**exponent}) logw /= exponent return f, logw
def contains(self, expr): r""" Return True if expr belongs to Order(self.expr, \*self.variables). Return False if self belongs to expr. Return None if the inclusion relation cannot be determined (e.g. when self and expr have different symbols). """ from sympy.simplify.powsimp import powsimp expr = sympify(expr) if expr.is_zero: return True if expr is S.NaN: return False point = self.point[0] if self.point else S.Zero if expr.is_Order: if (any(p != point for p in expr.point) or any(p != point for p in self.point)): return None if expr.expr == self.expr: # O(1) + O(1), O(1) + O(1, x), etc. return all(x in self.args[1:] for x in expr.args[1:]) if expr.expr.is_Add: return all(self.contains(x) for x in expr.expr.args) if self.expr.is_Add and point.is_zero: return any( self.func(x, *self.args[1:]).contains(expr) for x in self.expr.args) if self.variables and expr.variables: common_symbols = tuple( [s for s in self.variables if s in expr.variables]) elif self.variables: common_symbols = self.variables else: common_symbols = expr.variables if not common_symbols: return None if (self.expr.is_Pow and len(self.variables) == 1 and self.variables == expr.variables): symbol = self.variables[0] other = expr.expr.as_independent(symbol, as_Add=False)[1] if (other.is_Pow and other.base == symbol and self.expr.base == symbol): if point.is_zero: rv = (self.expr.exp - other.exp).is_nonpositive if point.is_infinite: rv = (self.expr.exp - other.exp).is_nonnegative if rv is not None: return rv r = None ratio = self.expr / expr.expr ratio = powsimp(ratio, deep=True, combine='exp') for s in common_symbols: from sympy.series.limits import Limit l = Limit(ratio, s, point).doit(heuristics=False) if not isinstance(l, Limit): l = l != 0 else: l = None if r is None: r = l else: if r != l: return return r if self.expr.is_Pow and len(self.variables) == 1: symbol = self.variables[0] other = expr.as_independent(symbol, as_Add=False)[1] if (other.is_Pow and other.base == symbol and self.expr.base == symbol): if point.is_zero: rv = (self.expr.exp - other.exp).is_nonpositive if point.is_infinite: rv = (self.expr.exp - other.exp).is_nonnegative if rv is not None: return rv obj = self.func(expr, *self.args[1:]) return self.contains(obj)
def is_convergent(self): r"""Checks for the convergence of a Sum. We divide the study of convergence of infinite sums and products in two parts. First Part: One part is the question whether all the terms are well defined, i.e., they are finite in a sum and also non-zero in a product. Zero is the analogy of (minus) infinity in products as :math:`e^{-\infty} = 0`. Second Part: The second part is the question of convergence after infinities, and zeros in products, have been omitted assuming that their number is finite. This means that we only consider the tail of the sum or product, starting from some point after which all terms are well defined. For example, in a sum of the form: .. math:: \sum_{1 \leq i < \infty} \frac{1}{n^2 + an + b} where a and b are numbers. The routine will return true, even if there are infinities in the term sequence (at most two). An analogous product would be: .. math:: \prod_{1 \leq i < \infty} e^{\frac{1}{n^2 + an + b}} This is how convergence is interpreted. It is concerned with what happens at the limit. Finding the bad terms is another independent matter. Note: It is responsibility of user to see that the sum or product is well defined. There are various tests employed to check the convergence like divergence test, root test, integral test, alternating series test, comparison tests, Dirichlet tests. It returns true if Sum is convergent and false if divergent and NotImplementedError if it can not be checked. References ========== .. [1] https://en.wikipedia.org/wiki/Convergence_tests Examples ======== >>> from sympy import factorial, S, Sum, Symbol, oo >>> n = Symbol('n', integer=True) >>> Sum(n/(n - 1), (n, 4, 7)).is_convergent() True >>> Sum(n/(2*n + 1), (n, 1, oo)).is_convergent() False >>> Sum(factorial(n)/5**n, (n, 1, oo)).is_convergent() False >>> Sum(1/n**(S(6)/5), (n, 1, oo)).is_convergent() True See Also ======== Sum.is_absolutely_convergent() Product.is_convergent() """ from sympy import Interval, Integral, Limit, log, symbols, Ge, Gt, simplify p, q = symbols('p q', cls=Wild) sym = self.limits[0][0] lower_limit = self.limits[0][1] upper_limit = self.limits[0][2] sequence_term = self.function if len(sequence_term.free_symbols) > 1: raise NotImplementedError( "convergence checking for more than one symbol " "containing series is not handled") if lower_limit.is_finite and upper_limit.is_finite: return S.true # transform sym -> -sym and swap the upper_limit = S.Infinity # and lower_limit = - upper_limit if lower_limit is S.NegativeInfinity: if upper_limit is S.Infinity: return Sum(sequence_term, (sym, 0, S.Infinity)).is_convergent() and \ Sum(sequence_term, (sym, S.NegativeInfinity, 0)).is_convergent() sequence_term = simplify(sequence_term.xreplace({sym: -sym})) lower_limit = -upper_limit upper_limit = S.Infinity sym_ = Dummy(sym.name, integer=True, positive=True) sequence_term = sequence_term.xreplace({sym: sym_}) sym = sym_ interval = Interval(lower_limit, upper_limit) # Piecewise function handle if sequence_term.is_Piecewise: for func, cond in sequence_term.args: # see if it represents something going to oo if cond == True or cond.as_set().sup is S.Infinity: s = Sum(func, (sym, lower_limit, upper_limit)) return s.is_convergent() return S.true ### -------- Divergence test ----------- ### try: lim_val = limit(sequence_term, sym, upper_limit) if lim_val.is_number and lim_val is not S.Zero: return S.false except NotImplementedError: pass try: lim_val_abs = limit(abs(sequence_term), sym, upper_limit) if lim_val_abs.is_number and lim_val_abs is not S.Zero: return S.false except NotImplementedError: pass order = O(sequence_term, (sym, S.Infinity)) ### ----------- ratio test ---------------- ### next_sequence_term = sequence_term.xreplace({sym: sym + 1}) ratio = combsimp(powsimp(next_sequence_term / sequence_term)) lim_ratio = limit(ratio, sym, upper_limit) if abs(lim_ratio) > 1: return S.false if abs(lim_ratio) < 1: return S.true ### --------- p-series test (1/n**p) ---------- ### p1_series_test = order.expr.match(sym**p) if p1_series_test is not None: if p1_series_test[p] < -1: return S.true if p1_series_test[p] >= -1: return S.false p2_series_test = order.expr.match((1 / sym)**p) if p2_series_test is not None: if p2_series_test[p] > 1: return S.true if p2_series_test[p] <= 1: return S.false ### ----------- root test ---------------- ### lim = Limit(abs(sequence_term)**(1 / sym), sym, S.Infinity) lim_evaluated = lim.doit() if lim_evaluated.is_number: if lim_evaluated < 1: return S.true if lim_evaluated > 1: return S.false ### ------------- alternating series test ----------- ### dict_val = sequence_term.match((-1)**(sym + p) * q) if not dict_val[p].has(sym) and is_decreasing(dict_val[q], interval): return S.true ### ------------- comparison test ------------- ### # (1/log(n)**p) comparison log_test = order.expr.match(1 / (log(sym)**p)) if log_test is not None: return S.false # (1/(n*log(n)**p)) comparison log_n_test = order.expr.match(1 / (sym * (log(sym))**p)) if log_n_test is not None: if log_n_test[p] > 1: return S.true return S.false # (1/(n*log(n)*log(log(n))*p)) comparison log_log_n_test = order.expr.match(1 / (sym * (log(sym) * log(log(sym))**p))) if log_log_n_test is not None: if log_log_n_test[p] > 1: return S.true return S.false # (1/(n**p*log(n))) comparison n_log_test = order.expr.match(1 / (sym**p * log(sym))) if n_log_test is not None: if n_log_test[p] > 1: return S.true return S.false ### ------------- integral test -------------- ### maxima = solveset(sequence_term.diff(sym), sym, interval) if not maxima: check_interval = interval elif isinstance(maxima, FiniteSet) and maxima.sup.is_number: check_interval = Interval(maxima.sup, interval.sup) if (is_decreasing(sequence_term, check_interval) or is_decreasing(-sequence_term, check_interval)): integral_val = Integral(sequence_term, (sym, lower_limit, upper_limit)) try: integral_val_evaluated = integral_val.doit() if integral_val_evaluated.is_number: return S(integral_val_evaluated.is_finite) except NotImplementedError: pass ### -------------- Dirichlet tests -------------- ### if order.expr.is_Mul: a_n, b_n = order.expr.args[0], order.expr.args[1] m = Dummy('m', integer=True) def _dirichlet_test(g_n): try: ing_val = limit( Sum(g_n, (sym, interval.inf, m)).doit(), m, S.Infinity) if ing_val.is_finite: return S.true except NotImplementedError: pass if is_decreasing(a_n, interval): dirich1 = _dirichlet_test(b_n) if dirich1 is not None: return dirich1 if is_decreasing(b_n, interval): dirich2 = _dirichlet_test(a_n) if dirich2 is not None: return dirich2 _sym = self.limits[0][0] sequence_term = sequence_term.xreplace({sym: _sym}) raise NotImplementedError( "The algorithm to find the Sum convergence of %s " "is not yet implemented" % (sequence_term))
def handle(expr): # Handle first reduces to the case # expr = 1/d, where d is an add, or d is base**p/2. # We do this by recursively calling handle on each piece. from sympy.simplify.simplify import nsimplify n, d = fraction(expr) if expr.is_Atom or (d.is_Atom and n.is_Atom): return expr elif not n.is_Atom: n = n.func(*[handle(a) for a in n.args]) return _unevaluated_Mul(n, handle(1/d)) elif n is not S.One: return _unevaluated_Mul(n, handle(1/d)) elif d.is_Mul: return _unevaluated_Mul(*[handle(1/d) for d in d.args]) # By this step, expr is 1/d, and d is not a mul. if not symbolic and d.free_symbols: return expr if ispow2(d): d2 = sqrtdenest(sqrt(d.base))**numer(d.exp) if d2 != d: return handle(1/d2) elif d.is_Pow and (d.exp.is_integer or d.base.is_positive): # (1/d**i) = (1/d)**i return handle(1/d.base)**d.exp if not (d.is_Add or ispow2(d)): return 1/d.func(*[handle(a) for a in d.args]) # handle 1/d treating d as an Add (though it may not be) keep = True # keep changes that are made # flatten it and collect radicals after checking for special # conditions d = _mexpand(d) # did it change? if d.is_Atom: return 1/d # is it a number that might be handled easily? if d.is_number: _d = nsimplify(d) if _d.is_Number and _d.equals(d): return 1/_d while True: # collect similar terms collected = defaultdict(list) for m in Add.make_args(d): # d might have become non-Add p2 = [] other = [] for i in Mul.make_args(m): if ispow2(i, log2=True): p2.append(i.base if i.exp is S.Half else i.base**(2*i.exp)) elif i is S.ImaginaryUnit: p2.append(S.NegativeOne) else: other.append(i) collected[tuple(ordered(p2))].append(Mul(*other)) rterms = list(ordered(list(collected.items()))) rterms = [(Mul(*i), Add(*j)) for i, j in rterms] nrad = len(rterms) - (1 if rterms[0][0] is S.One else 0) if nrad < 1: break elif nrad > max_terms: # there may have been invalid operations leading to this point # so don't keep changes, e.g. this expression is troublesome # in collecting terms so as not to raise the issue of 2834: # r = sqrt(sqrt(5) + 5) # eq = 1/(sqrt(5)*r + 2*sqrt(5)*sqrt(-sqrt(5) + 5) + 5*r) keep = False break if len(rterms) > 4: # in general, only 4 terms can be removed with repeated squaring # but other considerations can guide selection of radical terms # so that radicals are removed if all([x.is_Integer and (y**2).is_Rational for x, y in rterms]): nd, d = rad_rationalize(S.One, Add._from_args( [sqrt(x)*y for x, y in rterms])) n *= nd else: # is there anything else that might be attempted? keep = False break from sympy.simplify.powsimp import powsimp, powdenest num = powsimp(_num(rterms)) n *= num d *= num d = powdenest(_mexpand(d), force=symbolic) if d.is_Atom: break if not keep: return expr return _unevaluated_Mul(n, 1/d)
def test_issue_17524(): a = symbols("a", real=True) e = (-1 - a**2)*sqrt(1 + a**2) assert signsimp(powsimp(e)) == signsimp(e) == -(a**2 + 1)**(S(3)/2)
def simplify(expr, ratio=1.7, measure=count_ops, rational=False): # type: (object, object, object, object) -> object """ Simplifies the given expression. Simplification is not a well defined term and the exact strategies this function tries can change in the future versions of SymPy. If your algorithm relies on "simplification" (whatever it is), try to determine what you need exactly - is it powsimp()?, radsimp()?, together()?, logcombine()?, or something else? And use this particular function directly, because those are well defined and thus your algorithm will be robust. Nonetheless, especially for interactive use, or when you don't know anything about the structure of the expression, simplify() tries to apply intelligent heuristics to make the input expression "simpler". For example: >>> from sympy import simplify, cos, sin >>> from sympy.abc import x, y >>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2) >>> a (x**2 + x)/(x*sin(y)**2 + x*cos(y)**2) >>> simplify(a) x + 1 Note that we could have obtained the same result by using specific simplification functions: >>> from sympy import trigsimp, cancel >>> trigsimp(a) (x**2 + x)/x >>> cancel(_) x + 1 In some cases, applying :func:`simplify` may actually result in some more complicated expression. The default ``ratio=1.7`` prevents more extreme cases: if (result length)/(input length) > ratio, then input is returned unmodified. The ``measure`` parameter lets you specify the function used to determine how complex an expression is. The function should take a single argument as an expression and return a number such that if expression ``a`` is more complex than expression ``b``, then ``measure(a) > measure(b)``. The default measure function is :func:`count_ops`, which returns the total number of operations in the expression. For example, if ``ratio=1``, ``simplify`` output can't be longer than input. :: >>> from sympy import sqrt, simplify, count_ops, oo >>> root = 1/(sqrt(2)+3) Since ``simplify(root)`` would result in a slightly longer expression, root is returned unchanged instead:: >>> simplify(root, ratio=1) == root True If ``ratio=oo``, simplify will be applied anyway:: >>> count_ops(simplify(root, ratio=oo)) > count_ops(root) True Note that the shortest expression is not necessary the simplest, so setting ``ratio`` to 1 may not be a good idea. Heuristically, the default value ``ratio=1.7`` seems like a reasonable choice. You can easily define your own measure function based on what you feel should represent the "size" or "complexity" of the input expression. Note that some choices, such as ``lambda expr: len(str(expr))`` may appear to be good metrics, but have other problems (in this case, the measure function may slow down simplify too much for very large expressions). If you don't know what a good metric would be, the default, ``count_ops``, is a good one. For example: >>> from sympy import symbols, log >>> a, b = symbols('a b', positive=True) >>> g = log(a) + log(b) + log(a)*log(1/b) >>> h = simplify(g) >>> h log(a*b**(-log(a) + 1)) >>> count_ops(g) 8 >>> count_ops(h) 5 So you can see that ``h`` is simpler than ``g`` using the count_ops metric. However, we may not like how ``simplify`` (in this case, using ``logcombine``) has created the ``b**(log(1/a) + 1)`` term. A simple way to reduce this would be to give more weight to powers as operations in ``count_ops``. We can do this by using the ``visual=True`` option: >>> print(count_ops(g, visual=True)) 2*ADD + DIV + 4*LOG + MUL >>> print(count_ops(h, visual=True)) 2*LOG + MUL + POW + SUB >>> from sympy import Symbol, S >>> def my_measure(expr): ... POW = Symbol('POW') ... # Discourage powers by giving POW a weight of 10 ... count = count_ops(expr, visual=True).subs(POW, 10) ... # Every other operation gets a weight of 1 (the default) ... count = count.replace(Symbol, type(S.One)) ... return count >>> my_measure(g) 8 >>> my_measure(h) 14 >>> 15./8 > 1.7 # 1.7 is the default ratio True >>> simplify(g, measure=my_measure) -log(a)*log(b) + log(a) + log(b) Note that because ``simplify()`` internally tries many different simplification strategies and then compares them using the measure function, we get a completely different result that is still different from the input expression by doing this. If rational=True, Floats will be recast as Rationals before simplification. If rational=None, Floats will be recast as Rationals but the result will be recast as Floats. If rational=False(default) then nothing will be done to the Floats. """ expr = sympify(expr) try: return expr._eval_simplify(ratio=ratio, measure=measure) except AttributeError: pass original_expr = expr = signsimp(expr) from sympy.simplify.hyperexpand import hyperexpand from sympy.functions.special.bessel import BesselBase from sympy import Sum, Product if not isinstance(expr, Basic) or not expr.args: # XXX: temporary hack return expr if not isinstance(expr, (Add, Mul, Pow, ExpBase)): if isinstance(expr, Function) and hasattr(expr, "inverse"): if len(expr.args) == 1 and len(expr.args[0].args) == 1 and \ isinstance(expr.args[0], expr.inverse(argindex=1)): return simplify(expr.args[0].args[0], ratio=ratio, measure=measure, rational=rational) return expr.func(*[simplify(x, ratio=ratio, measure=measure, rational=rational) for x in expr.args]) # TODO: Apply different strategies, considering expression pattern: # is it a purely rational function? Is there any trigonometric function?... # See also https://github.com/sympy/sympy/pull/185. def shorter(*choices): '''Return the choice that has the fewest ops. In case of a tie, the expression listed first is selected.''' if not has_variety(choices): return choices[0] return min(choices, key=measure) # rationalize Floats floats = False if rational is not False and expr.has(Float): floats = True expr = nsimplify(expr, rational=True) expr = bottom_up(expr, lambda w: w.normal()) expr = Mul(*powsimp(expr).as_content_primitive()) _e = cancel(expr) expr1 = shorter(_e, _mexpand(_e).cancel()) # issue 6829 expr2 = shorter(together(expr, deep=True), together(expr1, deep=True)) if ratio is S.Infinity: expr = expr2 else: expr = shorter(expr2, expr1, expr) if not isinstance(expr, Basic): # XXX: temporary hack return expr expr = factor_terms(expr, sign=False) # hyperexpand automatically only works on hypergeometric terms expr = hyperexpand(expr) expr = piecewise_fold(expr) if expr.has(BesselBase): expr = besselsimp(expr) if expr.has(TrigonometricFunction, HyperbolicFunction): expr = trigsimp(expr, deep=True) if expr.has(log): expr = shorter(expand_log(expr, deep=True), logcombine(expr)) if expr.has(CombinatorialFunction, gamma): # expression with gamma functions or non-integer arguments is # automatically passed to gammasimp expr = combsimp(expr) if expr.has(Sum): expr = sum_simplify(expr) if expr.has(Product): expr = product_simplify(expr) short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr) short = shorter(short, cancel(short)) short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short))) if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase): short = exptrigsimp(short) # get rid of hollow 2-arg Mul factorization hollow_mul = Transform( lambda x: Mul(*x.args), lambda x: x.is_Mul and len(x.args) == 2 and x.args[0].is_Number and x.args[1].is_Add and x.is_commutative) expr = short.xreplace(hollow_mul) numer, denom = expr.as_numer_denom() if denom.is_Add: n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1)) if n is not S.One: expr = (numer*n).expand()/d if expr.could_extract_minus_sign(): n, d = fraction(expr) if d != 0: expr = signsimp(-n/(-d)) if measure(expr) > ratio*measure(original_expr): expr = original_expr # restore floats if floats and rational is None: expr = nfloat(expr, exponent=False) return expr
def simplify(expr, ratio=1.7, measure=count_ops, fu=False): """ Simplifies the given expression. Simplification is not a well defined term and the exact strategies this function tries can change in the future versions of SymPy. If your algorithm relies on "simplification" (whatever it is), try to determine what you need exactly - is it powsimp()?, radsimp()?, together()?, logcombine()?, or something else? And use this particular function directly, because those are well defined and thus your algorithm will be robust. Nonetheless, especially for interactive use, or when you don't know anything about the structure of the expression, simplify() tries to apply intelligent heuristics to make the input expression "simpler". For example: >>> from sympy import simplify, cos, sin >>> from sympy.abc import x, y >>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2) >>> a (x**2 + x)/(x*sin(y)**2 + x*cos(y)**2) >>> simplify(a) x + 1 Note that we could have obtained the same result by using specific simplification functions: >>> from sympy import trigsimp, cancel >>> trigsimp(a) (x**2 + x)/x >>> cancel(_) x + 1 In some cases, applying :func:`simplify` may actually result in some more complicated expression. The default ``ratio=1.7`` prevents more extreme cases: if (result length)/(input length) > ratio, then input is returned unmodified. The ``measure`` parameter lets you specify the function used to determine how complex an expression is. The function should take a single argument as an expression and return a number such that if expression ``a`` is more complex than expression ``b``, then ``measure(a) > measure(b)``. The default measure function is :func:`count_ops`, which returns the total number of operations in the expression. For example, if ``ratio=1``, ``simplify`` output can't be longer than input. :: >>> from sympy import sqrt, simplify, count_ops, oo >>> root = 1/(sqrt(2)+3) Since ``simplify(root)`` would result in a slightly longer expression, root is returned unchanged instead:: >>> simplify(root, ratio=1) == root True If ``ratio=oo``, simplify will be applied anyway:: >>> count_ops(simplify(root, ratio=oo)) > count_ops(root) True Note that the shortest expression is not necessary the simplest, so setting ``ratio`` to 1 may not be a good idea. Heuristically, the default value ``ratio=1.7`` seems like a reasonable choice. You can easily define your own measure function based on what you feel should represent the "size" or "complexity" of the input expression. Note that some choices, such as ``lambda expr: len(str(expr))`` may appear to be good metrics, but have other problems (in this case, the measure function may slow down simplify too much for very large expressions). If you don't know what a good metric would be, the default, ``count_ops``, is a good one. For example: >>> from sympy import symbols, log >>> a, b = symbols('a b', positive=True) >>> g = log(a) + log(b) + log(a)*log(1/b) >>> h = simplify(g) >>> h log(a*b**(-log(a) + 1)) >>> count_ops(g) 8 >>> count_ops(h) 5 So you can see that ``h`` is simpler than ``g`` using the count_ops metric. However, we may not like how ``simplify`` (in this case, using ``logcombine``) has created the ``b**(log(1/a) + 1)`` term. A simple way to reduce this would be to give more weight to powers as operations in ``count_ops``. We can do this by using the ``visual=True`` option: >>> print(count_ops(g, visual=True)) 2*ADD + DIV + 4*LOG + MUL >>> print(count_ops(h, visual=True)) 2*LOG + MUL + POW + SUB >>> from sympy import Symbol, S >>> def my_measure(expr): ... POW = Symbol('POW') ... # Discourage powers by giving POW a weight of 10 ... count = count_ops(expr, visual=True).subs(POW, 10) ... # Every other operation gets a weight of 1 (the default) ... count = count.replace(Symbol, type(S.One)) ... return count >>> my_measure(g) 8 >>> my_measure(h) 14 >>> 15./8 > 1.7 # 1.7 is the default ratio True >>> simplify(g, measure=my_measure) -log(a)*log(b) + log(a) + log(b) Note that because ``simplify()`` internally tries many different simplification strategies and then compares them using the measure function, we get a completely different result that is still different from the input expression by doing this. """ expr = sympify(expr) try: return expr._eval_simplify(ratio=ratio, measure=measure) except AttributeError: pass original_expr = expr = signsimp(expr) from sympy.simplify.hyperexpand import hyperexpand from sympy.functions.special.bessel import BesselBase from sympy import Sum, Product if not isinstance(expr, Basic) or not expr.args: # XXX: temporary hack return expr if not isinstance(expr, (Add, Mul, Pow, ExpBase)): if isinstance(expr, Function) and hasattr(expr, "inverse"): if len(expr.args) == 1 and len(expr.args[0].args) == 1 and \ isinstance(expr.args[0], expr.inverse(argindex=1)): return simplify(expr.args[0].args[0], ratio=ratio, measure=measure, fu=fu) return expr.func(*[simplify(x, ratio=ratio, measure=measure, fu=fu) for x in expr.args]) # TODO: Apply different strategies, considering expression pattern: # is it a purely rational function? Is there any trigonometric function?... # See also https://github.com/sympy/sympy/pull/185. def shorter(*choices): '''Return the choice that has the fewest ops. In case of a tie, the expression listed first is selected.''' if not has_variety(choices): return choices[0] return min(choices, key=measure) expr = bottom_up(expr, lambda w: w.normal()) expr = Mul(*powsimp(expr).as_content_primitive()) _e = cancel(expr) expr1 = shorter(_e, _mexpand(_e).cancel()) # issue 6829 expr2 = shorter(together(expr, deep=True), together(expr1, deep=True)) if ratio is S.Infinity: expr = expr2 else: expr = shorter(expr2, expr1, expr) if not isinstance(expr, Basic): # XXX: temporary hack return expr expr = factor_terms(expr, sign=False) # hyperexpand automatically only works on hypergeometric terms expr = hyperexpand(expr) expr = piecewise_fold(expr) if expr.has(BesselBase): expr = besselsimp(expr) if expr.has(TrigonometricFunction) and not fu or expr.has( HyperbolicFunction): expr = trigsimp(expr, deep=True) if expr.has(log): expr = shorter(expand_log(expr, deep=True), logcombine(expr)) if expr.has(CombinatorialFunction, gamma): expr = combsimp(expr) if expr.has(Sum): expr = sum_simplify(expr) if expr.has(Product): expr = product_simplify(expr) short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr) short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short))) if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase): short = exptrigsimp(short, simplify=False) # get rid of hollow 2-arg Mul factorization hollow_mul = Transform( lambda x: Mul(*x.args), lambda x: x.is_Mul and len(x.args) == 2 and x.args[0].is_Number and x.args[1].is_Add and x.is_commutative) expr = short.xreplace(hollow_mul) numer, denom = expr.as_numer_denom() if denom.is_Add: n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1)) if n is not S.One: expr = (numer*n).expand()/d if expr.could_extract_minus_sign(): n, d = fraction(expr) if d != 0: expr = signsimp(-n/(-d)) if measure(expr) > ratio*measure(original_expr): expr = original_expr return expr
def test_probability(): # various integrals from probability theory from sympy.core.function import expand_mul from sympy.core.symbol import (Symbol, symbols) from sympy.functions.elementary.complexes import Abs from sympy.simplify.gammasimp import gammasimp from sympy.simplify.powsimp import powsimp mu1, mu2 = symbols('mu1 mu2', nonzero=True) sigma1, sigma2 = symbols('sigma1 sigma2', positive=True) rate = Symbol('lambda', positive=True) def normal(x, mu, sigma): return 1 / sqrt(2 * pi * sigma**2) * exp(-(x - mu)**2 / 2 / sigma**2) def exponential(x, rate): return rate * exp(-rate * x) assert integrate(normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == 1 assert integrate(x*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) == \ mu1 assert integrate(x**2*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \ == mu1**2 + sigma1**2 assert integrate(x**3*normal(x, mu1, sigma1), (x, -oo, oo), meijerg=True) \ == mu1**3 + 3*mu1*sigma1**2 assert integrate(normal(x, mu1, sigma1) * normal(y, mu2, sigma2), (x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 assert integrate(x * normal(x, mu1, sigma1) * normal(y, mu2, sigma2), (x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1 assert integrate(y * normal(x, mu1, sigma1) * normal(y, mu2, sigma2), (x, -oo, oo), (y, -oo, oo), meijerg=True) == mu2 assert integrate(x * y * normal(x, mu1, sigma1) * normal(y, mu2, sigma2), (x, -oo, oo), (y, -oo, oo), meijerg=True) == mu1 * mu2 assert integrate( (x + y + 1) * normal(x, mu1, sigma1) * normal(y, mu2, sigma2), (x, -oo, oo), (y, -oo, oo), meijerg=True) == 1 + mu1 + mu2 assert integrate((x + y - 1)*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), (x, -oo, oo), (y, -oo, oo), meijerg=True) == \ -1 + mu1 + mu2 i = integrate(x**2 * normal(x, mu1, sigma1) * normal(y, mu2, sigma2), (x, -oo, oo), (y, -oo, oo), meijerg=True) assert not i.has(Abs) assert simplify(i) == mu1**2 + sigma1**2 assert integrate(y**2*normal(x, mu1, sigma1)*normal(y, mu2, sigma2), (x, -oo, oo), (y, -oo, oo), meijerg=True) == \ sigma2**2 + mu2**2 assert integrate(exponential(x, rate), (x, 0, oo), meijerg=True) == 1 assert integrate(x*exponential(x, rate), (x, 0, oo), meijerg=True) == \ 1/rate assert integrate(x**2*exponential(x, rate), (x, 0, oo), meijerg=True) == \ 2/rate**2 def E(expr): res1 = integrate(expr * exponential(x, rate) * normal(y, mu1, sigma1), (x, 0, oo), (y, -oo, oo), meijerg=True) res2 = integrate(expr * exponential(x, rate) * normal(y, mu1, sigma1), (y, -oo, oo), (x, 0, oo), meijerg=True) assert expand_mul(res1) == expand_mul(res2) return res1 assert E(1) == 1 assert E(x * y) == mu1 / rate assert E(x * y**2) == mu1**2 / rate + sigma1**2 / rate ans = sigma1**2 + 1 / rate**2 assert simplify(E((x + y + 1)**2) - E(x + y + 1)**2) == ans assert simplify(E((x + y - 1)**2) - E(x + y - 1)**2) == ans assert simplify(E((x + y)**2) - E(x + y)**2) == ans # Beta' distribution alpha, beta = symbols('alpha beta', positive=True) betadist = x**(alpha - 1)*(1 + x)**(-alpha - beta)*gamma(alpha + beta) \ /gamma(alpha)/gamma(beta) assert integrate(betadist, (x, 0, oo), meijerg=True) == 1 i = integrate(x * betadist, (x, 0, oo), meijerg=True, conds='separate') assert (gammasimp(i[0]), i[1]) == (alpha / (beta - 1), 1 < beta) j = integrate(x**2 * betadist, (x, 0, oo), meijerg=True, conds='separate') assert j[1] == (beta > 2) assert gammasimp(j[0] - i[0]**2) == (alpha + beta - 1)*alpha \ /(beta - 2)/(beta - 1)**2 # Beta distribution # NOTE: this is evaluated using antiderivatives. It also tests that # meijerint_indefinite returns the simplest possible answer. a, b = symbols('a b', positive=True) betadist = x**(a - 1) * (-x + 1)**(b - 1) * gamma(a + b) / (gamma(a) * gamma(b)) assert simplify(integrate(betadist, (x, 0, 1), meijerg=True)) == 1 assert simplify(integrate(x*betadist, (x, 0, 1), meijerg=True)) == \ a/(a + b) assert simplify(integrate(x**2*betadist, (x, 0, 1), meijerg=True)) == \ a*(a + 1)/(a + b)/(a + b + 1) assert simplify(integrate(x**y*betadist, (x, 0, 1), meijerg=True)) == \ gamma(a + b)*gamma(a + y)/gamma(a)/gamma(a + b + y) # Chi distribution k = Symbol('k', integer=True, positive=True) chi = 2**(1 - k / 2) * x**(k - 1) * exp(-x**2 / 2) / gamma(k / 2) assert powsimp(integrate(chi, (x, 0, oo), meijerg=True)) == 1 assert simplify(integrate(x*chi, (x, 0, oo), meijerg=True)) == \ sqrt(2)*gamma((k + 1)/2)/gamma(k/2) assert simplify(integrate(x**2 * chi, (x, 0, oo), meijerg=True)) == k # Chi^2 distribution chisquared = 2**(-k / 2) / gamma(k / 2) * x**(k / 2 - 1) * exp(-x / 2) assert powsimp(integrate(chisquared, (x, 0, oo), meijerg=True)) == 1 assert simplify(integrate(x * chisquared, (x, 0, oo), meijerg=True)) == k assert simplify(integrate(x**2*chisquared, (x, 0, oo), meijerg=True)) == \ k*(k + 2) assert gammasimp( integrate(((x - k) / sqrt(2 * k))**3 * chisquared, (x, 0, oo), meijerg=True)) == 2 * sqrt(2) / sqrt(k) # Dagum distribution a, b, p = symbols('a b p', positive=True) # XXX (x/b)**a does not work dagum = a * p / x * (x / b)**(a * p) / (1 + x**a / b**a)**(p + 1) assert simplify(integrate(dagum, (x, 0, oo), meijerg=True)) == 1 # XXX conditions are a mess arg = x * dagum assert simplify(integrate( arg, (x, 0, oo), meijerg=True, conds='none')) == a * b * gamma(1 - 1 / a) * gamma(p + 1 + 1 / a) / ( (a * p + 1) * gamma(p)) assert simplify(integrate( x * arg, (x, 0, oo), meijerg=True, conds='none')) == a * b**2 * gamma(1 - 2 / a) * gamma(p + 1 + 2 / a) / ( (a * p + 2) * gamma(p)) # F-distribution d1, d2 = symbols('d1 d2', positive=True) f = sqrt(((d1*x)**d1 * d2**d2)/(d1*x + d2)**(d1 + d2))/x \ /gamma(d1/2)/gamma(d2/2)*gamma((d1 + d2)/2) assert simplify(integrate(f, (x, 0, oo), meijerg=True)) == 1 # TODO conditions are a mess assert simplify(integrate(x * f, (x, 0, oo), meijerg=True, conds='none')) == d2 / (d2 - 2) assert simplify( integrate(x**2 * f, (x, 0, oo), meijerg=True, conds='none')) == d2**2 * (d1 + 2) / d1 / (d2 - 4) / (d2 - 2) # TODO gamma, rayleigh # inverse gaussian lamda, mu = symbols('lamda mu', positive=True) dist = sqrt(lamda / 2 / pi) * x**(Rational(-3, 2)) * exp( -lamda * (x - mu)**2 / x / 2 / mu**2) mysimp = lambda expr: simplify(expr.rewrite(exp)) assert mysimp(integrate(dist, (x, 0, oo))) == 1 assert mysimp(integrate(x * dist, (x, 0, oo))) == mu assert mysimp(integrate((x - mu)**2 * dist, (x, 0, oo))) == mu**3 / lamda assert mysimp(integrate((x - mu)**3 * dist, (x, 0, oo))) == 3 * mu**5 / lamda**2 # Levi c = Symbol('c', positive=True) assert integrate( sqrt(c / 2 / pi) * exp(-c / 2 / (x - mu)) / (x - mu)**S('3/2'), (x, mu, oo)) == 1 # higher moments oo # log-logistic alpha, beta = symbols('alpha beta', positive=True) distn = (beta/alpha)*x**(beta - 1)/alpha**(beta - 1)/ \ (1 + x**beta/alpha**beta)**2 # FIXME: If alpha, beta are not declared as finite the line below hangs # after the changes in: # https://github.com/sympy/sympy/pull/16603 assert simplify(integrate(distn, (x, 0, oo))) == 1 # NOTE the conditions are a mess, but correctly state beta > 1 assert simplify(integrate(x*distn, (x, 0, oo), conds='none')) == \ pi*alpha/beta/sin(pi/beta) # (similar comment for conditions applies) assert simplify(integrate(x**y*distn, (x, 0, oo), conds='none')) == \ pi*alpha**y*y/beta/sin(pi*y/beta) # weibull k = Symbol('k', positive=True) n = Symbol('n', positive=True) distn = k / lamda * (x / lamda)**(k - 1) * exp(-(x / lamda)**k) assert simplify(integrate(distn, (x, 0, oo))) == 1 assert simplify(integrate(x**n*distn, (x, 0, oo))) == \ lamda**n*gamma(1 + n/k) # rice distribution from sympy.functions.special.bessel import besseli nu, sigma = symbols('nu sigma', positive=True) rice = x / sigma**2 * exp(-(x**2 + nu**2) / 2 / sigma**2) * besseli( 0, x * nu / sigma**2) assert integrate(rice, (x, 0, oo), meijerg=True) == 1 # can someone verify higher moments? # Laplace distribution mu = Symbol('mu', real=True) b = Symbol('b', positive=True) laplace = exp(-abs(x - mu) / b) / 2 / b assert integrate(laplace, (x, -oo, oo), meijerg=True) == 1 assert integrate(x * laplace, (x, -oo, oo), meijerg=True) == mu assert integrate(x**2*laplace, (x, -oo, oo), meijerg=True) == \ 2*b**2 + mu**2 # TODO are there other distributions supported on (-oo, oo) that we can do? # misc tests k = Symbol('k', positive=True) assert gammasimp( expand_mul( integrate(log(x) * x**(k - 1) * exp(-x) / gamma(k), (x, 0, oo)))) == polygamma(0, k)
def _compute_fps(f, x, x0, dir, hyper, order, rational, full): """Recursive wrapper to compute fps. See :func:`compute_fps` for details. """ if x0 in [S.Infinity, S.NegativeInfinity]: dir = S.One if x0 is S.Infinity else -S.One temp = f.subs(x, 1/x) result = _compute_fps(temp, x, 0, dir, hyper, order, rational, full) if result is None: return None return (result[0], result[1].subs(x, 1/x), result[2].subs(x, 1/x)) elif x0 or dir == -S.One: if dir == -S.One: rep = -x + x0 rep2 = -x rep2b = x0 else: rep = x + x0 rep2 = x rep2b = -x0 temp = f.subs(x, rep) result = _compute_fps(temp, x, 0, S.One, hyper, order, rational, full) if result is None: return None return (result[0], result[1].subs(x, rep2 + rep2b), result[2].subs(x, rep2 + rep2b)) if f.is_polynomial(x): k = Dummy('k') ak = sequence(Coeff(f, x, k), (k, 1, oo)) xk = sequence(x**k, (k, 0, oo)) ind = f.coeff(x, 0) return ak, xk, ind # Break instances of Add # this allows application of different # algorithms on different terms increasing the # range of admissible functions. if isinstance(f, Add): result = False ak = sequence(S.Zero, (0, oo)) ind, xk = S.Zero, None for t in Add.make_args(f): res = _compute_fps(t, x, 0, S.One, hyper, order, rational, full) if res: if not result: result = True xk = res[1] if res[0].start > ak.start: seq = ak s, f = ak.start, res[0].start else: seq = res[0] s, f = res[0].start, ak.start save = Add(*[z[0]*z[1] for z in zip(seq[0:(f - s)], xk[s:f])]) ak += res[0] ind += res[2] + save else: ind += t if result: return ak, xk, ind return None # The symbolic term - symb, if present, is being separated from the function # Otherwise symb is being set to S.One syms = f.free_symbols.difference({x}) (f, symb) = expand(f).as_independent(*syms) if symb.is_zero: symb = S.One symb = powsimp(symb) result = None # from here on it's x0=0 and dir=1 handling k = Dummy('k') if rational: result = rational_algorithm(f, x, k, order, full) if result is None and hyper: result = hyper_algorithm(f, x, k, order) if result is None: return None ak = sequence(result[0], (k, result[2], oo)) xk_formula = powsimp(x**k * symb) xk = sequence(xk_formula, (k, 0, oo)) ind = powsimp(result[1] * symb) return ak, xk, ind
def is_convergent(self): r"""Checks for the convergence of a Sum. We divide the study of convergence of infinite sums and products in two parts. First Part: One part is the question whether all the terms are well defined, i.e., they are finite in a sum and also non-zero in a product. Zero is the analogy of (minus) infinity in products as :math:`e^{-\infty} = 0`. Second Part: The second part is the question of convergence after infinities, and zeros in products, have been omitted assuming that their number is finite. This means that we only consider the tail of the sum or product, starting from some point after which all terms are well defined. For example, in a sum of the form: .. math:: \sum_{1 \leq i < \infty} \frac{1}{n^2 + an + b} where a and b are numbers. The routine will return true, even if there are infinities in the term sequence (at most two). An analogous product would be: .. math:: \prod_{1 \leq i < \infty} e^{\frac{1}{n^2 + an + b}} This is how convergence is interpreted. It is concerned with what happens at the limit. Finding the bad terms is another independent matter. Note: It is responsibility of user to see that the sum or product is well defined. There are various tests employed to check the convergence like divergence test, root test, integral test, alternating series test, comparison tests, Dirichlet tests. It returns true if Sum is convergent and false if divergent and NotImplementedError if it can not be checked. References ========== .. [1] https://en.wikipedia.org/wiki/Convergence_tests Examples ======== >>> from sympy import factorial, S, Sum, Symbol, oo >>> n = Symbol('n', integer=True) >>> Sum(n/(n - 1), (n, 4, 7)).is_convergent() True >>> Sum(n/(2*n + 1), (n, 1, oo)).is_convergent() False >>> Sum(factorial(n)/5**n, (n, 1, oo)).is_convergent() False >>> Sum(1/n**(S(6)/5), (n, 1, oo)).is_convergent() True See Also ======== Sum.is_absolutely_convergent() Product.is_convergent() """ from sympy import Interval, Integral, log, symbols, simplify p, q, r = symbols('p q r', cls=Wild) sym = self.limits[0][0] lower_limit = self.limits[0][1] upper_limit = self.limits[0][2] sequence_term = self.function if len(sequence_term.free_symbols) > 1: raise NotImplementedError("convergence checking for more than one symbol " "containing series is not handled") if lower_limit.is_finite and upper_limit.is_finite: return S.true # transform sym -> -sym and swap the upper_limit = S.Infinity # and lower_limit = - upper_limit if lower_limit is S.NegativeInfinity: if upper_limit is S.Infinity: return Sum(sequence_term, (sym, 0, S.Infinity)).is_convergent() and \ Sum(sequence_term, (sym, S.NegativeInfinity, 0)).is_convergent() sequence_term = simplify(sequence_term.xreplace({sym: -sym})) lower_limit = -upper_limit upper_limit = S.Infinity sym_ = Dummy(sym.name, integer=True, positive=True) sequence_term = sequence_term.xreplace({sym: sym_}) sym = sym_ interval = Interval(lower_limit, upper_limit) # Piecewise function handle if sequence_term.is_Piecewise: for func, cond in sequence_term.args: # see if it represents something going to oo if cond == True or cond.as_set().sup is S.Infinity: s = Sum(func, (sym, lower_limit, upper_limit)) return s.is_convergent() return S.true ### -------- Divergence test ----------- ### try: lim_val = limit_seq(sequence_term, sym) if lim_val is not None and lim_val.is_zero is False: return S.false except NotImplementedError: pass try: lim_val_abs = limit_seq(abs(sequence_term), sym) if lim_val_abs is not None and lim_val_abs.is_zero is False: return S.false except NotImplementedError: pass order = O(sequence_term, (sym, S.Infinity)) ### --------- p-series test (1/n**p) ---------- ### p1_series_test = order.expr.match(sym**p) if p1_series_test is not None: if p1_series_test[p] < -1: return S.true if p1_series_test[p] >= -1: return S.false p2_series_test = order.expr.match((1/sym)**p) if p2_series_test is not None: if p2_series_test[p] > 1: return S.true if p2_series_test[p] <= 1: return S.false ### ------------- comparison test ------------- ### # 1/(n**p*log(n)**q*log(log(n))**r) comparison n_log_test = order.expr.match(1/(sym**p*log(sym)**q*log(log(sym))**r)) if n_log_test is not None: if (n_log_test[p] > 1 or (n_log_test[p] == 1 and n_log_test[q] > 1) or (n_log_test[p] == n_log_test[q] == 1 and n_log_test[r] > 1)): return S.true return S.false ### ------------- Limit comparison test -----------### # (1/n) comparison try: lim_comp = limit_seq(sym*sequence_term, sym) if lim_comp is not None and lim_comp.is_number and lim_comp > 0: return S.false except NotImplementedError: pass ### ----------- ratio test ---------------- ### next_sequence_term = sequence_term.xreplace({sym: sym + 1}) ratio = combsimp(powsimp(next_sequence_term/sequence_term)) try: lim_ratio = limit_seq(ratio, sym) if lim_ratio is not None and lim_ratio.is_number: if abs(lim_ratio) > 1: return S.false if abs(lim_ratio) < 1: return S.true except NotImplementedError: pass ### ----------- root test ---------------- ### # lim = Limit(abs(sequence_term)**(1/sym), sym, S.Infinity) try: lim_evaluated = limit_seq(abs(sequence_term)**(1/sym), sym) if lim_evaluated is not None and lim_evaluated.is_number: if lim_evaluated < 1: return S.true if lim_evaluated > 1: return S.false except NotImplementedError: pass ### ------------- alternating series test ----------- ### dict_val = sequence_term.match((-1)**(sym + p)*q) if not dict_val[p].has(sym) and is_decreasing(dict_val[q], interval): return S.true ### ------------- integral test -------------- ### check_interval = None maxima = solveset(sequence_term.diff(sym), sym, interval) if not maxima: check_interval = interval elif isinstance(maxima, FiniteSet) and maxima.sup.is_number: check_interval = Interval(maxima.sup, interval.sup) if (check_interval is not None and (is_decreasing(sequence_term, check_interval) or is_decreasing(-sequence_term, check_interval))): integral_val = Integral( sequence_term, (sym, lower_limit, upper_limit)) try: integral_val_evaluated = integral_val.doit() if integral_val_evaluated.is_number: return S(integral_val_evaluated.is_finite) except NotImplementedError: pass ### ----- Dirichlet and bounded times convergent tests ----- ### # TODO # # Dirichlet_test # https://en.wikipedia.org/wiki/Dirichlet%27s_test # # Bounded times convergent test # It is based on comparison theorems for series. # In particular, if the general term of a series can # be written as a product of two terms a_n and b_n # and if a_n is bounded and if Sum(b_n) is absolutely # convergent, then the original series Sum(a_n * b_n) # is absolutely convergent and so convergent. # # The following code can grows like 2**n where n is the # number of args in order.expr # Possibly combined with the potentially slow checks # inside the loop, could make this test extremely slow # for larger summation expressions. if order.expr.is_Mul: args = order.expr.args argset = set(args) ### -------------- Dirichlet tests -------------- ### m = Dummy('m', integer=True) def _dirichlet_test(g_n): try: ing_val = limit_seq(Sum(g_n, (sym, interval.inf, m)).doit(), m) if ing_val is not None and ing_val.is_finite: return S.true except NotImplementedError: pass ### -------- bounded times convergent test ---------### def _bounded_convergent_test(g1_n, g2_n): try: lim_val = limit_seq(g1_n, sym) if lim_val is not None and (lim_val.is_finite or ( isinstance(lim_val, AccumulationBounds) and (lim_val.max - lim_val.min).is_finite)): if Sum(g2_n, (sym, lower_limit, upper_limit)).is_absolutely_convergent(): return S.true except NotImplementedError: pass for n in range(1, len(argset)): for a_tuple in itertools.combinations(args, n): b_set = argset - set(a_tuple) a_n = Mul(*a_tuple) b_n = Mul(*b_set) if is_decreasing(a_n, interval): dirich = _dirichlet_test(b_n) if dirich is not None: return dirich bc_test = _bounded_convergent_test(a_n, b_n) if bc_test is not None: return bc_test _sym = self.limits[0][0] sequence_term = sequence_term.xreplace({sym: _sym}) raise NotImplementedError("The algorithm to find the Sum convergence of %s " "is not yet implemented" % (sequence_term))
def test_gammasimp(): R = Rational # was part of test_combsimp_gamma() in test_combsimp.py assert gammasimp(gamma(x)) == gamma(x) assert gammasimp(gamma(x + 1) / x) == gamma(x) assert gammasimp(gamma(x) / (x - 1)) == gamma(x - 1) assert gammasimp(x * gamma(x)) == gamma(x + 1) assert gammasimp((x + 1) * gamma(x + 1)) == gamma(x + 2) assert gammasimp(gamma(x + y) * (x + y)) == gamma(x + y + 1) assert gammasimp(x / gamma(x + 1)) == 1 / gamma(x) assert gammasimp((x + 1)**2 / gamma(x + 2)) == (x + 1) / gamma(x + 1) assert gammasimp(x*gamma(x) + gamma(x + 3)/(x + 2)) == \ (x + 2)*gamma(x + 1) assert gammasimp(gamma(2 * x) * x) == gamma(2 * x + 1) / 2 assert gammasimp(gamma(2 * x) / (x - S.Half)) == 2 * gamma(2 * x - 1) assert gammasimp(gamma(x) * gamma(1 - x)) == pi / sin(pi * x) assert gammasimp(gamma(x) * gamma(-x)) == -pi / (x * sin(pi * x)) assert gammasimp(1/gamma(x + 3)/gamma(1 - x)) == \ sin(pi*x)/(pi*x*(x + 1)*(x + 2)) assert gammasimp(factorial(n + 2)) == gamma(n + 3) assert gammasimp(binomial(n, k)) == \ gamma(n + 1)/(gamma(k + 1)*gamma(-k + n + 1)) assert powsimp(gammasimp( gamma(x)*gamma(x + S.Half)*gamma(y)/gamma(x + y))) == \ 2**(-2*x + 1)*sqrt(pi)*gamma(2*x)*gamma(y)/gamma(x + y) assert gammasimp(1/gamma(x)/gamma(x - Rational(1, 3))/gamma(x + Rational(1, 3))) == \ 3**(3*x - Rational(3, 2))/(2*pi*gamma(3*x - 1)) assert simplify( gamma(S.Half + x / 2) * gamma(1 + x / 2) / gamma(1 + x) / sqrt(pi) * 2**x) == 1 assert gammasimp(gamma(Rational(-1, 4)) * gamma(Rational(-3, 4))) == 16 * sqrt(2) * pi / 3 assert powsimp(gammasimp(gamma(2*x)/gamma(x))) == \ 2**(2*x - 1)*gamma(x + S.Half)/sqrt(pi) # issue 6792 e = (-gamma(k) * gamma(k + 2) + gamma(k + 1)**2) / gamma(k)**2 assert gammasimp(e) == -k assert gammasimp(1 / e) == -1 / k e = (gamma(x) + gamma(x + 1)) / gamma(x) assert gammasimp(e) == x + 1 assert gammasimp(1 / e) == 1 / (x + 1) e = (gamma(x) + gamma(x + 2)) * (gamma(x - 1) + gamma(x)) / gamma(x) assert gammasimp(e) == (x**2 + x + 1) * gamma(x + 1) / (x - 1) e = (-gamma(k) * gamma(k + 2) + gamma(k + 1)**2) / gamma(k)**2 assert gammasimp(e**2) == k**2 assert gammasimp(e**2 / gamma(k + 1)) == k / gamma(k) a = R(1, 2) + R(1, 3) b = a + R(1, 3) assert gammasimp(gamma(2 * k) / gamma(k) * gamma(k + a) * gamma(k + b)) == 3 * 2**(2 * k + 1) * 3**( -3 * k - 2) * sqrt(pi) * gamma(3 * k + R(3, 2)) / 2 # issue 9699 assert gammasimp( (x + 1) * factorial(x) / gamma(y)) == gamma(x + 2) / gamma(y) assert gammasimp(rf(x + n, k) * binomial(n, k)).simplify() == Piecewise( (gamma(n + 1) * gamma(k + n + x) / (gamma(k + 1) * gamma(n + x) * gamma(-k + n + 1)), n > -x), ((-1)**k * gamma(n + 1) * gamma(-n - x + 1) / (gamma(k + 1) * gamma(-k + n + 1) * gamma(-k - n - x + 1)), True)) A, B = symbols('A B', commutative=False) assert gammasimp(e * B * A) == gammasimp(e) * B * A # check iteration assert gammasimp(gamma(2 * k) / gamma(k) * gamma(-k - R(1, 2))) == (-2**(2 * k + 1) * sqrt(pi) / (2 * ((2 * k + 1) * cos(pi * k)))) assert gammasimp( gamma(k) * gamma(k + R(1, 3)) * gamma(k + R(2, 3)) / gamma(k * R(3, 2))) == (3 * 2**(3 * k + 1) * 3**(-3 * k - S.Half) * sqrt(pi) * gamma(k * R(3, 2) + S.Half) / 2) # issue 6153 assert gammasimp(gamma(Rational(1, 4)) / gamma(Rational(5, 4))) == 4 # was part of test_combsimp() in test_combsimp.py assert gammasimp(binomial(n + 2, k + S.Half)) == gamma(n + 3)/ \ (gamma(k + R(3, 2))*gamma(-k + n + R(5, 2))) assert gammasimp(binomial(n + 2, k + 2.0)) == \ gamma(n + 3)/(gamma(k + 3.0)*gamma(-k + n + 1)) # issue 11548 assert gammasimp(binomial(0, x)) == sin(pi * x) / (pi * x) e = gamma(n + Rational(1, 3)) * gamma(n + R(2, 3)) assert gammasimp(e) == e assert gammasimp(gamma(4*n + S.Half)/gamma(2*n - R(3, 4))) == \ 2**(4*n - R(5, 2))*(8*n - 3)*gamma(2*n + R(3, 4))/sqrt(pi) i, m = symbols('i m', integer=True) e = gamma(exp(i)) assert gammasimp(e) == e e = gamma(m + 3) assert gammasimp(e) == e e = gamma(m + 1) / (gamma(i + 1) * gamma(-i + m + 1)) assert gammasimp(e) == e p = symbols("p", integer=True, positive=True) assert gammasimp(gamma(-p + 4)) == gamma(-p + 4)