def MLE_(a_i,t_i,s_i): l, p, m1, m2 = sym.symbols('l,p,m1,m2', positive=True) L1 = p ** a * (1 - p) ** (1 - a) J1 = np.prod([L1.subs(a, i) for i in a_i]) print(J1) L2 = l * sym.exp(-l * t) J2 = np.prod([L2.subs(t, i) for i in t_i]) print(J2) L3 = sym.Add((1 - a) * m1 * sym.exp(-m1 * s), a * m2 * sym.exp(-m2 * s)) J3 = np.prod([L3.subs({a: i, s: s_i[j]}) for j, i in enumerate(a_i)]) print(J3) print(sym.expand_log(sym.log(J3))) logJ = sym.expand_log(sym.log(J1 * J2 * J3)) print(logJ) sol_p = float(sym.solve(sym.diff(logJ, p), p)[0]) sol_l = float(sym.solve(sym.diff(logJ, l), l)[0]) sol_m1 = float(sym.solve(sym.diff(logJ, m1), m1)[0]) sol_m2 = float(sym.solve(sym.diff(logJ, m2), m2)[0]) print(sol_p, sol_l, sol_m1, sol_m2) return sol_p, sol_l, sol_m1, sol_m2
def test_log_simplify(): x = Symbol("x", positive=True) assert log(x**2).expand() == 2*log(x) assert expand_log(log(x**(2 + log(2)))) == (2 + log(2))*log(x) z = Symbol('z') assert log(sqrt(z)).expand() == log(z)/2 assert expand_log(log(z**(log(2) - 1))) == (log(2) - 1)*log(z) assert log(z**(-1)).expand() != -log(z) assert log(z**(x/(x+1))).expand() == x*log(z)/(x + 1)
def test_log_simplify(): x = Symbol("x", positive=True) assert log(x**2).expand() == 2 * log(x) assert expand_log(log(x**(2 + log(2)))) == (2 + log(2)) * log(x) z = Symbol('z') assert log(sqrt(z)).expand() == log(z) / 2 assert expand_log(log(z**(log(2) - 1))) == (log(2) - 1) * log(z) assert log(z**(-1)).expand() != -log(z) assert log(z**(x / (x + 1))).expand() == x * log(z) / (x + 1)
def test_issue_8866(): assert simplify(log(x, 10, evaluate=False)) == simplify(log(x, 10)) assert expand_log(log(x, 10, evaluate=False)) == expand_log(log(x, 10)) y = Symbol('y', positive=True) l1 = log(exp(y), exp(10)) b1 = log(exp(y), exp(5)) l2 = log(exp(y), exp(10), evaluate=False) b2 = log(exp(y), exp(5), evaluate=False) assert simplify(log(l1, b1)) == simplify(log(l2, b2)) assert expand_log(log(l1, b1)) == expand_log(log(l2, b2))
def test_log_expand_factor(): assert (log(18) / log(3) - 2).expand(factor=True) == log(2) / log(3) assert (log(12) / log(2)).expand(factor=True) == log(3) / log(2) + 2 assert (log(15) / log(3)).expand(factor=True) == 1 + log(5) / log(3) assert (log(2) / (-log(12) + log(24))).expand(factor=True) == 1 assert expand_log(log(12), factor=True) == log(3) + 2 * log(2) assert expand_log(log(21) / log(7), factor=False) == log(3) / log(7) + 1 assert expand_log(log(45)/log(5) + log(20), factor=False) == \ 1 + 2*log(3)/log(5) + log(20) assert expand_log(log(45)/log(5) + log(26), factor=True) == \ log(2) + log(13) + (log(5) + 2*log(3))/log(5)
def expands_log(expression): ''' Uses sympy.expand_log() and applies identity (1) and (2) ''' return expand_log(expression)
def _eval_simplify(self, ratio, measure): from sympy.simplify.simplify import expand_log, simplify if (len(self.args) == 2): return simplify(self.func(*self.args), ratio=ratio, measure=measure) expr = self.func(simplify(self.args[0], ratio=ratio, measure=measure)) expr = expand_log(expr, deep=True) return min([expr, self], key=measure)
def _eval_expand_log(self, deep=True, **hints): from sympy import unpolarify, expand_log, factorint from sympy.concrete import Sum, Product force = hints.get('force', False) factor = hints.get('factor', False) if (len(self.args) == 2): return expand_log(self.func(*self.args), deep=deep, force=force) arg = self.args[0] if arg.is_Integer: # remove perfect powers p = perfect_power(arg) logarg = None coeff = 1 if p is not False: arg, coeff = p logarg = self.func(arg) # expand as product of its prime factors if factor=True if factor: p = factorint(arg) if arg not in p.keys(): logarg = sum(n * log(val) for val, n in p.items()) if logarg is not None: return coeff * logarg elif arg.is_Rational: return log(arg.p) - log(arg.q) elif arg.is_Mul: expr = [] nonpos = [] for x in arg.args: if force or x.is_positive or x.is_polar: a = self.func(x) if isinstance(a, log): expr.append(self.func(x)._eval_expand_log(**hints)) else: expr.append(a) elif x.is_negative: a = self.func(-x) expr.append(a) nonpos.append(S.NegativeOne) else: nonpos.append(x) return Add(*expr) + log(Mul(*nonpos)) elif arg.is_Pow or isinstance(arg, exp): if force or ( arg.exp.is_extended_real and (arg.base.is_positive or ((arg.exp + 1).is_positive and (arg.exp - 1).is_nonpositive))) or arg.base.is_polar: b = arg.base e = arg.exp a = self.func(b) if isinstance(a, log): return unpolarify(e) * a._eval_expand_log(**hints) else: return unpolarify(e) * a elif isinstance(arg, Product): if force or arg.function.is_positive: return Sum(log(arg.function), *arg.limits) return self.func(arg)
def is_equal(self, eq1, eq2): """Compare answers""" #answer=eq1, solution=eq2 equation_types = [ Equality, Unequality, StrictLessThan, LessThan, StrictGreaterThan, GreaterThan ] #Symbolic equality/Perfect match if self._comparison_type == "perfect_match": return eq1 == eq2 eq1 = factor( simplify(eq1) ) #simplify is mandatory to counter expand_trig and expand_log weaknesses eq2 = factor(simplify(eq2)) #Trigonometric simplifications if self._use_trigo: eq1 = expand_trig(eq1) eq2 = expand_trig(eq2) #Logarithmic simplifications if self._use_log: if self._use_complex: eq1 = expand_log(eq1) eq2 = expand_log(eq2) else: eq1 = expand_log(eq1, force=True) eq2 = expand_log(eq2, force=True) if self._tolerance: eq1 = eq1.subs([(E, math.e), (pi, math.pi)]) eq2 = eq2.subs([(E, math.e), (pi, math.pi)]) #Numbers if (isinstance(eq1, Number) and isinstance(eq2, Number)) or self._tolerance: return round(float(abs(N(eq1 - eq2))), 10) <= round( float(self._tolerance), 10) if self._tolerance else abs( N(eq1 - eq2)) == 0 #Numerical Evaluation if not type(eq1) == type(eq2): return N(eq1) == N(eq2) #Equality and inequalities if type(eq1) in equation_types: return eq1 == eq2 or simplify(eq1) == simplify(eq2) #Direct match if eq1 == eq2 or simplify(eq1) == simplify(eq2): return True #Uncaught return abs(N(eq1 - eq2)) == 0
def _eval_simplify(self, **kwargs): from sympy.simplify.simplify import expand_log, simplify, inversecombine if len(self.args) == 2: # it's unevaluated return simplify(self.func(*self.args), **kwargs) expr = self.func(simplify(self.args[0], **kwargs)) if kwargs['inverse']: expr = inversecombine(expr) expr = expand_log(expr, deep=True) return min([expr, self], key=kwargs['measure'])
def _eval_simplify(self, ratio, measure, rational, inverse): from sympy.simplify.simplify import expand_log, simplify, inversecombine if (len(self.args) == 2): return simplify(self.func(*self.args), ratio=ratio, measure=measure, rational=rational, inverse=inverse) expr = self.func(simplify(self.args[0], ratio=ratio, measure=measure, rational=rational, inverse=inverse)) if inverse: expr = inversecombine(expr) expr = expand_log(expr, deep=True) return min([expr, self], key=measure)
def canonise_log(equation): expanded_log = sympy.expand_log(equation, force=True) terms = expanded_log.as_ordered_terms() a, b = sympy.Wild('a'), sympy.Wild('b') total_interior = 1 for term in terms: if sympy.ask(sympy.Q.complex(term)): term_interior *= -1 else: term_interior = term.match(sympy.log(a) / b)[a] if term.could_extract_minus_sign(): total_interior /= term_interior else: total_interior *= term_interior if isinstance(total_interior, sympy.Add): invert = False elif isinstance(total_interior, sympy.Mul): match = total_interior.together().match(x / b) # for some reason, (x/3).match(a/b) gives {a: 1/3, b: 1/x} so we have to use a workaround if match is not None: invert = False else: match = total_interior.together().match(a / b) degree_numerator = 0 if isinstance(match[a], sympy.Rational) else match[a].as_poly().degree() degree_denominator = 0 if isinstance(match[b], sympy.Rational) else match[b].as_poly().degree() if degree_numerator < degree_denominator: invert = True else: invert = False elif isinstance(total_interior, sympy.Pow): index = total_interior.as_base_exp()[1] if index < 0: invert = True else: invert = False else: # for debugging - wtf kind of c-c-c-class is it??? print(total_interior, type(total_interior)) if invert: return -sympy.log((1 / total_interior).together(), evaluate=False) / terms[0].as_coeff_Mul()[0].q else: return sympy.log(total_interior.together(), evaluate=False) / terms[0].as_coeff_Mul()[0].q
def _eval_expand_log(self, deep=True, **hints): from sympy import unpolarify, expand_log from sympy.concrete import Sum, Product force = hints.get("force", False) if len(self.args) == 2: return expand_log(self.func(*self.args), deep=deep, force=force) arg = self.args[0] if arg.is_Integer: # remove perfect powers p = perfect_power(int(arg)) if p is not False: return p[1] * self.func(p[0]) elif arg.is_Rational: return log(arg.p) - log(arg.q) elif arg.is_Mul: expr = [] nonpos = [] for x in arg.args: if force or x.is_positive or x.is_polar: a = self.func(x) if isinstance(a, log): expr.append(self.func(x)._eval_expand_log(**hints)) else: expr.append(a) elif x.is_negative: a = self.func(-x) expr.append(a) nonpos.append(S.NegativeOne) else: nonpos.append(x) return Add(*expr) + log(Mul(*nonpos)) elif arg.is_Pow or isinstance(arg, exp): if (force or (arg.exp.is_extended_real and (arg.base.is_positive or ((arg.exp + 1).is_positive and (arg.exp - 1).is_nonpositive))) or arg.base.is_polar): b = arg.base e = arg.exp a = self.func(b) if isinstance(a, log): return unpolarify(e) * a._eval_expand_log(**hints) else: return unpolarify(e) * a elif isinstance(arg, Product): if force or arg.function.is_positive: return Sum(log(arg.function), *arg.limits) return self.func(arg)
def test_risch_integrate(): assert risch_integrate(t0 * exp(x), x) == t0 * exp(x) assert ( risch_integrate(sin(x), x, rewrite_complex=True) == -exp(I * x) / 2 - exp(-I * x) / 2 ) # From my GSoC writeup assert risch_integrate( (1 + 2 * x ** 2 + x ** 4 + 2 * x ** 3 * exp(2 * x ** 2)) / (x ** 4 * exp(x ** 2) + 2 * x ** 2 * exp(x ** 2) + exp(x ** 2)), x, ) == NonElementaryIntegral(exp(-(x ** 2)), x) + exp(x ** 2) / (1 + x ** 2) assert risch_integrate(0, x) == 0 # also tests prde_cancel() e1 = log(x / exp(x) + 1) ans1 = risch_integrate(e1, x) assert ans1 == ( x * log(x * exp(-x) + 1) + NonElementaryIntegral((x ** 2 - x) / (x + exp(x)), x) ) assert cancel(diff(ans1, x) - e1) == 0 # also tests issue #10798 e2 = (log(-1 / y) / 2 - log(1 / y) / 2) / y - ( log(1 - 1 / y) / 2 - log(1 + 1 / y) / 2 ) / y ans2 = risch_integrate(e2, y) assert ans2 == log(1 / y) * log(1 - 1 / y) / 2 - log(1 / y) * log( 1 + 1 / y ) / 2 + NonElementaryIntegral( (I * pi * y ** 2 - 2 * y * log(1 / y) - I * pi) / (2 * y ** 3 - 2 * y), y ) assert expand_log(cancel(diff(ans2, y) - e2), force=True) == 0 # These are tested here in addition to in test_DifferentialExtension above # (symlogs) to test that backsubs works correctly. The integrals should be # written in terms of the original logarithms in the integrands. # XXX: Unfortunately, making backsubs work on this one is a little # trickier, because x**x is converted to exp(x*log(x)), and so log(x**x) # is converted to x*log(x). (x**2*log(x)).subs(x*log(x), log(x**x)) is # smart enough, the issue is that these splits happen at different places # in the algorithm. Maybe a heuristic is in order assert risch_integrate(log(x ** x), x) == x ** 2 * log(x) / 2 - x ** 2 / 4 assert risch_integrate(log(x ** y), x) == x * log(x ** y) - x * y assert risch_integrate(log(sqrt(x)), x) == x * log(sqrt(x)) - x / 2
def extra_simple(mul): """Simplification of pysb rates Arguments: mul: psyb reaction (sympy.Mul object) Returns: a simplified version denested of exponents. TODO: make sure it fully simplifies! """ return sp.powsimp((sp.expand_power_base(sp.powdenest(sp.logcombine( sp.expand_log(mul.simplify(), force=True), force=True), force=True), force=True)), force=True)
def _eval_expand_log(self, deep=True, **hints): from sympy import unpolarify, expand_log from sympy.concrete import Sum, Product force = hints.get('force', False) if (len(self.args) == 2): return expand_log(self.func(*self.args), deep=deep, force=force) arg = self.args[0] if arg.is_Integer: # remove perfect powers p = perfect_power(int(arg)) if p is not False: return p[1]*self.func(p[0]) elif arg.is_Rational: return log(arg.p) - log(arg.q) elif arg.is_Mul: expr = [] nonpos = [] for x in arg.args: if force or x.is_positive or x.is_polar: a = self.func(x) if isinstance(a, log): expr.append(self.func(x)._eval_expand_log(**hints)) else: expr.append(a) elif x.is_negative: a = self.func(-x) expr.append(a) nonpos.append(S.NegativeOne) else: nonpos.append(x) return Add(*expr) + log(Mul(*nonpos)) elif arg.is_Pow or isinstance(arg, exp): if force or (arg.exp.is_real and (arg.base.is_positive or ((arg.exp+1) .is_positive and (arg.exp-1).is_nonpositive))) or arg.base.is_polar: b = arg.base e = arg.exp a = self.func(b) if isinstance(a, log): return unpolarify(e) * a._eval_expand_log(**hints) else: return unpolarify(e) * a elif isinstance(arg, Product): if arg.function.is_positive: return Sum(log(arg.function), *arg.limits) return self.func(arg)
def conjugate(self, rep, symbolic=False, **kwargs): r"""Finds the conjugate representation of the U1 representation Examples ========= >>> from liesym import U1 >>> from sympy import * >>> u1 = U1() >>> u1.conjugate("1/6", symbolic=True) -1/6 >>> print(u1.irrep_lookup("1/6")) exp(I*\theta/6) """ if symbolic: cleaned_rep = sympify(rep) if isinstance(rep, str) else rep math_rep = self._from_charge(cleaned_rep) return expand_log(log(conjugate(math_rep)), force=True) / (I * self._theta) return conjugate(rep)
def test_risch_integrate(): assert risch_integrate(t0*exp(x), x) == t0*exp(x) assert risch_integrate(sin(x), x, rewrite_complex=True) == -exp(I*x)/2 - exp(-I*x)/2 # From my GSoC writeup assert risch_integrate((1 + 2*x**2 + x**4 + 2*x**3*exp(2*x**2))/ (x**4*exp(x**2) + 2*x**2*exp(x**2) + exp(x**2)), x) == \ NonElementaryIntegral(exp(-x**2), x) + exp(x**2)/(1 + x**2) assert risch_integrate(0, x) == 0 # also tests prde_cancel() e1 = log(x/exp(x) + 1) ans1 = risch_integrate(e1, x) assert ans1 == (x*log(x*exp(-x) + 1) + NonElementaryIntegral((x**2 - x)/(x + exp(x)), x)) assert cancel(diff(ans1, x) - e1) == 0 # also tests issue #10798 e2 = (log(-1/y)/2 - log(1/y)/2)/y - (log(1 - 1/y)/2 - log(1 + 1/y)/2)/y ans2 = risch_integrate(e2, y) assert ans2 == log(1/y)*log(1 - 1/y)/2 - log(1/y)*log(1 + 1/y)/2 + \ NonElementaryIntegral((I*pi*y**2 - 2*y*log(1/y) - I*pi)/(2*y**3 - 2*y), y) assert expand_log(cancel(diff(ans2, y) - e2), force=True) == 0 # These are tested here in addition to in test_DifferentialExtension above # (symlogs) to test that backsubs works correctly. The integrals should be # written in terms of the original logarithms in the integrands. # XXX: Unfortunately, making backsubs work on this one is a little # trickier, because x**x is converted to exp(x*log(x)), and so log(x**x) # is converted to x*log(x). (x**2*log(x)).subs(x*log(x), log(x**x)) is # smart enough, the issue is that these splits happen at different places # in the algorithm. Maybe a heuristic is in order assert risch_integrate(log(x**x), x) == x**2*log(x)/2 - x**2/4 assert risch_integrate(log(x**y), x) == x*log(x**y) - x*y assert risch_integrate(log(sqrt(x)), x) == x*log(sqrt(x)) - x/2
def simplify_multiple_exp_sum(expr, do_simplify=False, optims=None): if optims is None: # optims = sympy.codegen.rewriting.optims_c99 + (logsumexp_2terms_opt,) optims = ( sympy.codegen.rewriting.log1p_opt, logsumexp_2terms_opt, ) if not (ask(Q.positive(expr)) or ask(Q.negative(expr))): if expr.args: return expr.func(*[ simplify_multiple_exp_sum(arg, do_simplify, optims) for arg in expr.args ]) return expr sign = 1 if ask(Q.positive(expr)) else -1 # expand log so that the resulting expression is a sum # given it is a multiplication/division before # expand_log apparently is not aware of assumptions given by a context manager # Therefore, use the force optin for now. log_expr = sy.expand_log(sy.log(sign * expr), force=True) log_expr = sympy.codegen.rewriting.optimize(log_expr, optims) val = sign * sy.exp(log_expr, evaluate=False) return val
# <codecell> import sympy from sympy.abc import x, z p=sympy.symbols('p',positive=True) sympy.init_printing() # <codecell> L=p**x*(1-p)**(1-x) J=np.prod([L.subs(x,i) for i in xs]) # objective function to maximize J # <codecell> logJ=sympy.expand_log(sympy.log(J)) sol=sympy.solve(sympy.diff(logJ,p),p)[0] x=linspace(0,1,100) plot(x,map(sympy.lambdify(p,logJ,'numpy'),x),sol,logJ.subs(p,sol),'o', p_true,logJ.subs(p,p_true),'s',) xlabel('$p$',fontsize=18) ylabel('Likelihood',fontsize=18) title('Estimate not equal to true value',fontsize=18) # <codecell> L=p**x*(1-p)**(1-x) J=np.prod([L.subs(x,i) for i in xs]) # objective function
kap = kap3 if region == 3 else kap12 # radiation pressure in the inner region, gas press everywhere else P = P1 if region == 1 else P23 # left and right hand sides of 3 equations L1 = 4 * cgs_pi * alpha * rho * H**3 * omega R1 = mdot_cgs * f L2 = (Rational(3, 4) * rho * H**2)**2 * alpha * omega**3 * kap R2 = 4 * cgs_stef * T**4 L3 = P R3 = rho * H**2 * omega**2 # obtain so that RHS = 1 everywhere and we can logarithm A = [L1 / R1, L2 / R2, L3 / R3] # logarithm the lhs logA = [expand_log(sympy_log(x)) for x in A] # now collect the coefficients next to logarithms of each # variable: rho, T, H M = np.ndarray((3, 3)) for i in range(3): for j in range(3): M[i, j] = logA[i].coeff(log_yvar[j]) print M # the above will constitute our new left hand sides lhs = [sum([M[i, j] * log_yvar[j] for j in range(3)]) for i in range(3)] # right hand sides shall be a remainder rhs = [lhs[i] - logA[i] for i in range(3)] for i in range(3):
4 * x) print(sympy.apart(expr)) # 三角化简 trigsimp expr = sympy.sin(x) / sympy.cos(x) print(sympy.trigsimp(expr)) # 三角展开 expand_trig expr = sympy.sin(x + y) print(sympy.expand_trig(expr)) # 指数化简 powsimp / 指数展开 expand_power_exp a, b = sympy.symbols('a b') expr = x**a * x**b print(sympy.powsimp(expr)) # 化简指数的指数 powdenest # 必须满足条件 底数 positive=True x = sympy.symbols('x', positive=True) expr = (x**a)**b print(sympy.powdenest(expr)) # 对数展开 expand_log / 对数合并 logcombine # 需要指出 log ln 在 sympy 中都是自然对数 # symbol 也需要满足条件 x, y = sympy.symbols('x y', positive=True) n = sympy.symbols('n', real=True) print(sympy.expand_log(sympy.log(x**n))) print(sympy.expand_log(sympy.log(x * y))) # more: # series-泰勒展开函数 # diff-求导 # pprint-将公式用更好看的格式打印出来 sympy.pprint(x / (y + 1))
def test_log_simplify(): x = Symbol("x", positive=True) assert log(x**2).expand() == 2*log(x) assert expand_log(log(x**(2+log(2)))) == (2+log(2))*log(x)
def numeric_equality(test_expr, target_expr, *, complexify=False): """Test if two expressions are numerically equivalent to one another. The implementation of this method is liable to change and currently has several major flaws. It will sample the test and target functions over the free parameters of the target expression. If the test expression has more symbols, the parameter space is extended to include these (to test for cases where these parameters make no difference). Testing is performed on the interval [0, 1) and if 'complexify' is set then complex values are allowed, but the samples are still in the interval [0, 1) on the real line. Returns True if the two expressions are equal for the sampled points, and False otherwise. - 'test_expr' should be the untrusted sympy expression to check. - 'target_expr' should be the trusted sympy expression to match against. - 'complexify' is a boolean flag for sampling in the complex plane rather than just over the reals. """ print("[NUMERIC TEST]" if not complexify else "[NUMERIC TEST (COMPLEX)]") SAMPLE_POINTS = 25 lambdify_modules = [NUMPY_MISSING_FN, "numpy"] # Leave original expressions unchanged, and expand logarithms! # NumPy has a log(x) function that takes only one argument, whereas SymPy # has a log(x, base) function which would break when calling lambdify if it # was left unexpanded. target_expr_n = sympy.expand_log(target_expr) test_expr_n = sympy.expand_log(test_expr) # Replace any derivatives that exist with new dummy symbols, and treat them # as independent from the variables they involve. To avoid naming clashes, # just name them in ascending numeric order by length of arguments. # This ordering helps ensure something like d^2y/dx^2 gets substituted before # the implicit inner dy/dx gets replaced and breaks things. derivatives = target_expr.atoms(sympy.Derivative).union( test_expr.atoms(sympy.Derivative)) for d, derivative in enumerate( sorted(derivatives, key=lambda d: len(d.args), reverse=True)): derivative_symbol = sympy.Symbol("Derivative_{}".format(d)) print("Swapping '{0}' into variable '{1}' for numeric evaluation!". format(derivative, derivative_symbol)) target_expr_n = target_expr_n.subs(derivative, derivative_symbol) test_expr_n = test_expr_n.subs(derivative, derivative_symbol) # If target has variables not in test, then test cannot possibly be equal. # This introduces an asymmetry; target is trusted to only contain necessary symbols, # but test is not. if len(target_expr_n.free_symbols.difference( test_expr_n.free_symbols)) > 0: print( "Test expression doesn't contain all target expression variables! Can't be numerically tested." ) return False # Evaluate over a domain, but if the test domain is larger; add in extra dimensions # i.e. if target is f(x) but test is g(x, y) then we need to sample over y too # in case it has no effect on the result [say g(x,y) = (y/y) * f(x) , which is # mathematically identical to f(x) but may have been missed by the symbolic part.] domain_target = numpy.random.random_sample( (len(target_expr_n.free_symbols), SAMPLE_POINTS)) extra_test_freedom = numpy.random.random_sample( (len(test_expr_n.free_symbols) - len(target_expr_n.free_symbols), SAMPLE_POINTS)) domain_test = numpy.concatenate((domain_target, extra_test_freedom)) # If we're trying the samples in the complex plane, make these arrays complex # in the simplest way possible: adding 0 of the imaginary unit. # Also use the complex versions of the missing numpy functions (for cases # where there are no variables, only constants, this is essential!) if complexify: domain_target = domain_target + 0j domain_test = domain_test + 0j lambdify_modules = [NUMPY_COMPLEX_FN, "numpy"] # Make sure that the arguments are given in the same order to lambdify for target and test # to ensure that when numbers are blindly passed in, the same number goes to the same # symbol when evaluated for both test and target. shared_variables = list( target_expr_n.free_symbols ) # We ensured above that all symbols in target are in test also extra_test_variables = list( test_expr_n.free_symbols.difference(target_expr_n.free_symbols)) test_variables = shared_variables + extra_test_variables try: # Make the target expression into something numpy can evaluate, then evaluate # for the sample points. This *should* now be safe, but still could be dangerous. f_target = sympy.lambdify(shared_variables, target_expr_n, lambdify_modules) eval_f_target = f_target(*domain_target) # Repeat for the test expression, to get an array of containing SAMPLE_POINTS # values of test_expr_n to be compared to target_expr_n f_test = sympy.lambdify(test_variables, test_expr_n, lambdify_modules) eval_f_test = f_test(*domain_test) except OverflowError as e: raise NumericRangeException(e) # Output the function values at the sample points for debugging? # The actual domain arrays are probably too long to be worth ever printing. print("Target function value(s):") print(eval_f_target) print("Test function value(s):") print(eval_f_test) # Can we safely cast the values to 64 bit floats (2 x 64 bits for complex values)? # Real values that can be safely cast to 'float64' can always be cast to 'complex128' # safely as well, and since eval_f_test may be complex, this errs on the side of caution. safe_datatype = "complex128" if not all([ numpy.can_cast(a, safe_datatype, casting='safe') for a in [eval_f_target, eval_f_test] ]): raise NumericRangeException( "A function has values not representable by 64 bit floats!") # If get any NaN's from the functions; things are looking bad: if not (numpy.all(numpy.isfinite(eval_f_target)) and numpy.all(numpy.isfinite(eval_f_test))): # If have not tried using complex numbers, try using those: if not complexify: print( "A function appears to be undefined in the interval [0,1). Trying again with complex values!" ) return numeric_equality(test_expr, target_expr, complexify=True) else: # If have tried using complex numbers, can't evaluate and have gone badly wrong: raise NumericRangeException( "A function in the test or target expression is undefined in the interval [0,1)." ) # Do some numeric sanity checking; 64-bit floating points are not perfect. numeric_range = numpy.abs( numpy.max(eval_f_target) - numpy.min(eval_f_target)) # If the function is wildly different at these points, probably can't reliably conclude anything if numeric_range > 10E10: raise NumericRangeException( "Too Large Range, numeric equality test unlikely to be accurate!") # If the function is the same at all of these points, probably can't conclude anything; # Unless the expected result is actually a constant (no free symbols) if (numeric_range < 10E-10) and (len(target_expr.free_symbols) > 0): raise NumericRangeException( "Too Small Range, numeric equality test unlikely to be accurate!") # Calculate the difference between the two arrays, if it is less than 10E-8% of # the largest value in the target function; the two things are probably equal! # This will cope perfectly with complex numbers too! diff = numpy.sum(numpy.abs(eval_f_target - eval_f_test)) print( "Numeric Equality Tested: absolute difference of {:.6E}".format(diff)) if diff <= (1E-10 * numpy.max(numpy.abs(eval_f_target))): print("INFO: Adding known pair ({0}, {1})".format( target_expr, test_expr)) KNOWN_PAIRS[(target_expr, test_expr)] = EqualityType.NUMERIC return True else: return False
# -*- coding: utf-8 -*- """ Created on Sun Dec 8 11:41:19 2019 @author: Yao """ from scipy.stats import bernoulli p = 1.0/2 #需要估计的目标 sample = bernoulli(p) xs = sample.rvs(100) #生成100个样本 print(xs[:10]) #查看前10个生成样本 import sympy import numpy as np x, p, z = sympy.symbols('x p z', positive = True) phi = p ** x * (1-p) ** (1-x) L = np.prod([phi.subs(x,i)for i in xs]) print(L) logL = sympy.expand_log(sympy.log(L)) print(logL) sol, = sympy.solve(sympy.diff(logL, p),p) #求解 print(sol)
attempt = _try_expm1(looking_at) if looking_at == attempt: new_exp_terms.append(exp_term) else: done = True new_exp_terms.append(attempt) if not done: new_exp_terms.append(numsum) return e.func(*chain(new_exp_terms, non_num_other)) expm1_opt = ReplaceOptim(lambda e: e.is_Add, _expm1_value) log1p_opt = ReplaceOptim( lambda e: isinstance(e, log), lambda l: expand_log(l.replace(log, lambda arg: log(arg.factor())) ).replace(log(_u + 1), log1p(_u))) def create_expand_pow_optimization(limit): """ Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``. The requirements for expansions are that the base needs to be a symbol and the exponent needs to be an integer (and be less than or equal to ``limit``). Parameters ========== limit : int The highest power which is expanded into multiplication.
print(S.sqrt(2) * S.sqrt(3)) # 去括号 print(S.expand((a + b)**2)) # 方程组 print(S.solve([x + y - 1, x - y - 3]), [x, y]) # 求和式 i = S.Symbol('i', integer=True) fx = S.summation(x, (i, 1, 5)) + 10 * x - 15 print(S.solve(fx, x)) # 解一元二次方程 print(S.solve(x**2 - 2, x)) f = S.solve(x * S.log(4, 3) - 2, x) print(f) #print(S.log(S.E)) #print(S.log(1000,10)) print(S.expand_log(S.log(x * y), force=True)) #约分 fx = (x**2 + 3 * x + 2) / (x**2 + x) print(S.cancel(fx)) #三角函数 print(S.solve([S.sin(x - y), S.cos(x + y)], [x, y])) #求定积分 print(S.integrate(2 * x, (x, 0, 1))) #求不定积分 print(S.integrate((S.E**x + 2 * x), x)) #计算结果以小数方式显示 print("exp is e sqrt x")
def calculate(config_str, internal_momenta): """ Calculate expression for configuration. """ latex = Latex() ################################################ ######## CONSTRUCT AMPLITUDE ########## ################################################ try: internal_momenta = internal_momenta.split() config, amp = make_amplitude(config_str, internal_momenta) except: raise ParseException("Error while parsing.") # Render latex.add_text("\\section*{Raw amplitude}") amp.latex_add(latex) if RENDER_ALL: latex.render() ################################################ ######## SIMPLIFY NUMERATOR ########## ################################################ amp.numer = amp.numer.expand() # Render latex.add_text("\\section*{Simplified numerator}") amp.latex_add(latex) if RENDER_ALL: latex.render() ################################################ ######## FEYNMAN'S TRICK ########## ################################################ denom_ = [] for arg in amp.denom.args: if type(arg) == sy.Pow: base, power = arg.args for _ in range(power): denom_.append(base) else: denom_.append(arg) n = len(denom_) amp.const *= gamma(n) zs = [sy.Symbol("{{ z_{{ {0} }} }}".format(i + 1)) for i in range(n)] amp.denom = sum([d * z for (d, z) in zip(denom_, zs)]).expand()**n for i, z in enumerate(zs): a = 0 b = 1 - sum(zs[:i]) amp.integrals_zs.append((z, a, b)) # Render latex.add_text("\\section*{Feynman parameterization}") latex.add_text("Here, we perform the following expansion:") latex.add_text("""$$ \\frac{1}{A_1} \\cdots \\frac{1}{A_n} = (n-1)! \\int\\limits_0^1 dz_1 \\int\\limits_0^{1-z_1} dz_2 \\cdots \\int\\limits_0^{1-z_1-\\cdots-z_{n-1}} dz_n \\frac{1}{(z_1 A_1 + \\cdots + z_n A_n)^n} $$""") latex.add_text( "We use this form because a single denominator raised to a power can be simplified with the Golden Integral." ) amp.latex_add(latex) if RENDER_ALL: latex.render() ################################################ ######## SPLIT NUMERATOR INTO TERMS ########## ################################################ amps = [] for numer_ in sy.Add.make_args(amp.numer): amp_ = amp.copy() amp_.numer = numer_ amps.append(amp_) # Render latex.add_text("\\section*{Expanded numerator}") latex.add_text( "We split the numerator into additive terms, to process individually. The following is a list of such terms:" ) for amp_ in amps: amp_.latex_add(latex) if RENDER_ALL: latex.render() ################################################ ######## EVAL. INTERNAL MOMENTA ########## ################################################ # TODO evaluate internal momenta integrals # At this point we stop with numer and denom and combine them into # one expression, `inner`, which is a sum of fractions. # TODO update this comment # Render an explanation latex.add_text("\\section*{{Golden Integral}}") latex.add_text("We resolve internal momentas with this transformation:") latex.add_text(""" $$\\int \\frac{d^d q}{(2 \pi)^d} \\frac{(q^2)^a}{(q^2 + D)^b} = i \\frac{\\Gamma (b-a-\\frac{1}{2}d) \\Gamma (a + \\frac{1}{2} d)}{(4 \\pi)^{d/2} \\Gamma(b) \\Gamma(\\frac{1}{2}d)} D^{-(b-a-d/2)}$$ """) latex.add_text( "After this section, all internal momenta should disappear. We will now resolve each term in a queue. Each term may produce additional terms, which are pushed to the back of the queue and resolved later." ) integrated_amps = [] #for i, amp_ in enumerate(amps): i = 0 while len(amps) > 0: # Pop off one amplitude amp_ = amps[0] amps = amps[1:] i += 1 latex.add_text( "\\section*{{Evaluating internal momenta in this term ({0} terms left)}}" .format(len(amps))) amp_.latex_add(latex) if RENDER_ALL: latex.render() # Find an internal momenta if len(amp_.integrals_internal) > 0: (k, _, _) = amp_.integrals_internal[0] latex.add_text("Integrating over ${0}$\\\\".format(k)) # TODO cleanup weird namespacing k_down_dummy = Momentum(k, "DUMMY", 0) k_up_dummy = Momentum(k, "DUMMY", 1) k2_dummy = k_down_dummy * k_up_dummy # Decompose denominator # denom = denom_nopow ^ b denom_nopow, b = amp_.denom.args[0], amp_.denom.args[1] # Completing the square # Denominator is always quadratic in momenta G = denom_nopow # aliasing for convenience latex.add_text("Completing the square\\") A = G.collect(k2_dummy).coeff(k2_dummy) G = sy.simplify(G - A * k2_dummy) B_up = G.collect(k_down_dummy).coeff(k_down_dummy) B_down = G.collect(k_up_dummy).coeff(k_up_dummy) B = B_up + Amplitude.flip_variant(B_down) G = sy.simplify(G - B_up * k_down_dummy - B_down * k_up_dummy) C = G D = -(B_up * Amplitude.flip_variant(B_up)) / (4 * A) + C latex.add("A = " + latex.get(A)) latex.add("B = " + latex.get(B)) latex.add("C = " + latex.get(C)) """ The denominator is in the form: A k^2 + Bk + C We define a new variable, q, such that q = A^(1/2) k + B / (2 A^(1/2)) and replace k: k = q / A^(1/2) - B / (2A) d^d k = (1 / A^(1/2)) d^d q The substitution k -> q yields: A k^2 + Bk + C |-> q^2 + D where we define D = C - B^2 / (4A) """ # Prepare to replace numerator k_name = k q_name = "q_{0}".format(len(amp_.qs) + 1) # TODO sloppy af amp_.qs.append(q_name) q_up = Momentum(q_name, "DUMMY", 1) q_down = Momentum(q_name, "DUMMY", 0) any_name = sy.Wild("a") any_ind = sy.Wild("b") any_variant = sy.Wild("c") any_B_up = B_up.replace(Momentum(any_name, "DUMMY", any_variant), Momentum(any_name, any_ind, any_variant)) any_B_down = Amplitude.flip_variant(any_B_up) # Actually replace numerator amp_.numer = amp_.numer.replace( Momentum(k_name, any_ind, 1), Momentum(q_name, any_ind, 1) / (A**0.5) - any_B_up / (2 * A)) amp_.numer = amp_.numer.replace( Momentum(k_name, any_ind, 0), Momentum(q_name, any_ind, 0) / (A**0.5) - any_B_down / (2 * A)) amp_.numer = sy.simplify(amp_.numer) # Replace denominator amp_.denom = (q_down * q_up + D)**b # Replace integral # TODO replace integral amp_.integrals_internal[0] = (q_name, _, _) amp_.numer /= A**0.5 latex.add_text("After ${0} \\to {1}$ substitutions".format( k_name, q_name)) amp_.latex_add(latex) if RENDER_ALL: latex.render() # Expand the numerator into different amplitudes and multiply them # back in the end amps__ = [] for numer in sy.Add.make_args(amp_.numer.expand()): new_amp = amp_.copy() new_amp.numer = numer amps__.append(new_amp) # Render latex.add_text( "\\section*{{Expanding numerator into {0} term(s)}}".format( len(amps__))) for amp__ in amps__: amp__.latex_add(latex) if RENDER_ALL: latex.render() # Finish q_name integration for each amplitude separately for amp__ in amps__: prod = sy.Mul.make_args(amp__.numer) # Get k-vectors # TODO Figure out a way to collect qs nicely qs = [ q for q in prod if isinstance(q, Momentum) and q.args[0].name == q_name ] latex.add_text("\\subsection*{Integrating this term:}") amp__.latex_add(latex) latex.add_text( "Found {0} q-vector terms in the numerator.\\\\".format( len(qs))) # Simplify q vectors # Ward identity for odd tensors if len(qs) % 2 == 1: # TODO integral evaluates to zero latex.add_text("Term vanishes due to Ward identity\\\\") amp__.const = 0 continue if len(qs) > 0: # TODO convert higher-order even tensor integral to # scalar integral pass else: # TODO Assume a = 0 for now # This is obviously wrong in general but will be easier # to fix with a good test case a = 0 # Golden integral #c, a = term.as_coeff_exponent(sy.Symbol(k2)) c_ = sy.I * gamma(b - a - (4 - EPS) / 2) * gamma(a + (4 + EPS) / 2) c_ /= gamma(b) c_ /= (4 * sy.pi)**2 # Part of the Golden integral d^q factor c_ *= (2 * sy.pi)**4 amp__.const *= c_ amp__.denom = D**(b - a - 2 ) # TODO generalize to d-dimensions # with 2 -> D / 2 # Add to amps if nonzero amps.append(amp__) # Remove internal integral amp__.integrals_internal = amp__.integrals_internal[1:] # Render latex.add_text("Apply golden integral") amp__.latex_add(latex) if RENDER_ALL: latex.render() else: integrated_amps.append(amp_) ## Compress inners by term #inners_dict = {} #for (c_, expr_) in amp.inners: # if expr_ not in inners_dict: # inners_dict[expr_] = 0 # inners_dict[expr_] += c_ #amp.inners = [(v, k) for (k, v) in inners_dict.items()] #amp.latex_add2(latex) amps = integrated_amps latex.add_text("\\section*{Final amplitudes after momenta integration}") for amp_ in amps: amp_.latex_add(latex) if RENDER_ALL: latex.render() ########################################## ######## CUTOFF INTEGRATIONS ########## ########################################## latex.add_text("\\section*{Integrating cutoffs}") latex.add_text( "Here we integrate all $t$-variables, which represent the upper and lower cutoffs." ) uv = sy.Symbol(config["Lamb"]) integrated_amps = [] for amp_ in amps: latex.add_text("\\subsection*{Integrating this term}") amp_.latex_add(latex) expr_ = 1 / amp_.denom latex.add_text("Denominator only") latex.add(latex.get(expr_)) if RENDER_ALL: latex.render() # Integrate cutoffs for (t, a, b) in amp_.integrals_cutoffs: # Integrate w.r.t. cutoff expr_ = sy.integrate(expr_, (t, a, b)) latex.add_text("Integrating wrt ${0}$...".format(t)) latex.add(latex.get(expr_)) if RENDER_ALL: latex.render() # Collecting highest order term old_expr = expr_ while True: old_expr = expr_ expr_ = sy.expand_log(expr_, force=True) expr_ = get_highest_log_term(expr_, uv) expr_ = sy.simplify(expr_) if old_expr == expr_: # No more changes break latex.add_text("Keeping only highest order term...") latex.add(latex.get(expr_)) if RENDER_ALL: latex.render() amp_.numer *= expr_ amp_.denom = 1 amp_.integrals_cutoffs = [] integrated_amps.append(amp_) amps = integrated_amps ###################################### ######## Z INTEGRATIONS ########## ###################################### latex.add_text("\\section*{Integrating $z$-variables}") latex.add_text( "Here we integrate all $z$-variables, the Feynman parameters.") integrated_amps = [] for amp_ in amps: latex.add_text("\\subsection*{Integrating this term}") amp_.latex_add(latex) # Integrate cutoffs expr_ = amp_.numer for (z, a, b) in amp_.integrals_zs[::-1]: # Integrate w.r.t. cutoff # Rationalize decimal powers first, or sympy breaks expr_ = sy.nsimplify(expr_, tolerance=0.001, rational=True) expr_ = sy.integrate(expr_, (z, a, b)) latex.add_text("Integrating wrt ${0}$...".format(z)) latex.add(latex.get(expr_)) if RENDER_ALL: latex.render() amp_.numer = expr_ amp_.integrals_zs = [] integrated_amps.append(amp_) amps = integrated_amps latex.add_text( "\\section*{Final amplitudes after $z$-variable integration}") for amp_ in amps: amp_.latex_add(latex) if RENDER_ALL: latex.render() ################################################ ######## EVALUATE SPINS AND GAMMAS ########## ################################################ latex.add_text("\\section*{Evaluating spins and gamma matrices}") #latex.add_text("TODO jk do it yourself you slags, here's the sum, have fun. Don't forget to take traces/multiply by -1 for internal fermion loops.") latex.add_text("TODO. Here's the sum for now:") latex.add("\\; + \\;".join([amp_.get_latex(latex) for amp_ in amps])) if RENDER_ALL: latex.render() ################################################ ######## RENDER ########## ################################################ #amp.latex_add2(latex) latex.render() return "\\; + \\;".join([amp_.get_latex(latex) for amp_ in amps])
new_exp_terms.append(exp_term) else: done = True new_exp_terms.append(attempt) if not done: new_exp_terms.append(numsum) return e.func(*chain(new_exp_terms, non_num_other)) expm1_opt = ReplaceOptim(lambda e: e.is_Add, _expm1_value) log1p_opt = ReplaceOptim( lambda e: isinstance(e, log), lambda l: expand_log(l.replace( log, lambda arg: log(arg.factor()) )).replace(log(_u+1), log1p(_u)) ) def create_expand_pow_optimization(limit): """ Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``. The requirements for expansions are that the base needs to be a symbol and the exponent needs to be an integer (and be less than or equal to ``limit``). Parameters ========== limit : int The highest power which is expanded into multiplication.