def solve_undetermined_coeffs(equ, coeffs, sym, **flags): """Solve equation of a type p(x; a_1, ..., a_k) == q(x) where both p, q are univariate polynomials and f depends on k parameters. The result of this functions is a dictionary with symbolic values of those parameters with respect to coefficiens in q. This functions accepts both Equations class instances and ordinary SymPy expressions. Specification of parameters and variable is obligatory for efficiency and simplicity reason. >>> from sympy import * >>> a, b, c, x = symbols('a', 'b', 'c', 'x') >>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x) {a: 1/2, b: -1/2} >>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x) {a: 1/c, b: -1/c} """ if isinstance(equ, Equality): # got equation, so move all the # terms to the left hand side equ = equ.lhs - equ.rhs system = collect(equ.expand(), sym, evaluate=False).values() if not any([ equ.has(sym) for equ in system ]): # consecutive powers in the input expressions have # been successfully collected, so solve remaining # system using Gaussian ellimination algorithm return solve(system, *coeffs, **flags) else: return None # no solutions
def _linear_neq_order1_type3(match_): r""" System of n first-order nonconstant-coefficient linear homogeneous differential equations .. math:: X' = A(t) X where $X$ is the vector of $n$ dependent variables, $t$ is the dependent variable, $X'$ is the first order differential of $X$ with respect to $t$ and $A(t)$ is a $n \times n$ coefficient matrix. Let us define $B$ as antiderivative of coefficient matrix $A$: .. math:: B(t) = \int A(t) dt If the system of ODEs defined above is such that its antiderivative $B(t)$ commutes with $A(t)$ itself, then, the solution of the above system is given as: .. math:: X = \exp(B(t)) C where $C$ is the vector of constants. """ # Some parts of code is repeated, this needs to be taken care of # The constant vector obtained here can be done so in the match # function itself. eq = match_['eq'] func = match_['func'] fc = match_['func_coeff'] n = len(eq) t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] constants = numbered_symbols(prefix='C', cls=Symbol, start=1) # This needs to be modified in future so that fc is only of type Matrix M = -fc if type(fc) is Matrix else Matrix(n, n, lambda i,j:-fc[i,func[j],0]) Cvect = Matrix(list(next(constants) for _ in range(n))) # The code in if block will be removed when it is made sure # that the code works without the statements in if block. if "commutative_antiderivative" not in match_: B, is_commuting = _is_commutative_anti_derivative(M, t) # This course is subject to change if not is_commuting: return None else: B = match_['commutative_antiderivative'] sol_vector = B.exp() * Cvect # The expand_mul is added to handle the solutions so that # the exponential terms are collected properly. sol_vector = [collect(expand_mul(s), ordered(s.atoms(exp)), exact=True) for s in sol_vector] sol_dict = [Eq(func[i], sol_vector[i]) for i in range(n)] return sol_dict
def solve_undetermined_coeffs(equ, coeffs, sym, **flags): """Solve equation of a type p(x; a_1, ..., a_k) == q(x) where both p, q are univariate polynomials and f depends on k parameters. The result of this functions is a dictionary with symbolic values of those parameters with respect to coefficiens in q. This functions accepts both Equations class instances and ordinary SymPy expressions. Specification of parameters and variable is obligatory for efficiency and simplicity reason. >>> from sympy import * >>> a, b, c, x = symbols('a', 'b', 'c', 'x') >>> solve_undetermined_coeffs(Eq(2*a*x + a+b, x), [a, b], x) {a: 1/2, b: -1/2} >>> solve_undetermined_coeffs(Eq(a*c*x + a+b, x), [a, b], x) {a: 1/c, b: -1/c} """ if isinstance(equ, Equality): # got equation, so move all the # terms to the left hand side equ = equ.lhs - equ.rhs system = collect(equ.expand(), sym, evaluate=False).values() if not any([equ.has(sym) for equ in system]): # consecutive powers in the input expressions have # been successfully collected, so solve remaining # system using Gaussian ellimination algorithm return solve(system, *coeffs, **flags) else: return None # no solutions
def match_2nd_hypergeometric(eq, func): x = func.args[0] df = func.diff(x) a3 = Wild('a3', exclude=[func, func.diff(x), func.diff(x, 2)]) b3 = Wild('b3', exclude=[func, func.diff(x), func.diff(x, 2)]) c3 = Wild('c3', exclude=[func, func.diff(x), func.diff(x, 2)]) deq = a3 * (func.diff(x, 2)) + b3 * df + c3 * func r = collect(eq, [func.diff(x, 2), func.diff(x), func]).match(deq) if r: if not all(val.is_polynomial() for val in r.values()): n, d = eq.as_numer_denom() eq = expand(n) r = collect(eq, [func.diff(x, 2), func.diff(x), func]).match(deq) if r and r[a3] != 0: A = cancel(r[b3] / r[a3]) B = cancel(r[c3] / r[a3]) return [A, B] else: return []
def _linear_neq_order1_type2(match_): r""" System of n first-order coefficient linear non-homogeneous differential equations .. math:: X' = A X + b(t) where $X$ is the vector of $n$ dependent variables, $t$ is the dependent variable, $X'$ is the first order differential of $X$ with respect to $t$, $A$ is a $n \times n$ constant coefficient matrix and $b(t)$ is the non-homogeneous term. The solution of the above system is: .. math:: X = e^{A t} ( \int e^{- A t} b \,dt + C) where $C$ is the vector of constants. """ eq = match_['eq'] func = match_['func'] fc = match_['func_coeff'] b = match_['rhs'] n = len(eq) t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] constants = numbered_symbols(prefix='C', cls=Symbol, start=1) # This needs to be modified in future so that fc is only of type Matrix M = -fc if type(fc) is Matrix else Matrix(n, n, lambda i, j: -fc[i, func[j], 0]) P, J = matrix_exp_jordan_form(M, t) P = simplify(P) Cvect = Matrix(list(next(constants) for _ in range(n))) sol_vector = P * J * ( (J.inv() * P.inv() * b).applyfunc(lambda x: Integral(x, t)) + Cvect) # sol_vector = sol_vector.applyfunc(_solsimp) # Removing the expand_mul can simplify the solutions of the ODEs # with symbolic coeffs. To be addressed in the future. sol_vector = [ collect(expand_mul(s), sol_vector.atoms(exp), exact=True) for s in sol_vector ] sol_dict = [Eq(func[i], sol_vector[i]) for i in range(n)] # sol_dict = [simpsol(eq) for eq in sol_dict] return sol_dict
def _get_simplified_sol(sol, func, collectterms): r""" Helper function which collects the solution on collectterms. Ideally this should be handled by odesimp.It is used only when the simplify is set to True in dsolve. The parameter ``collectterms`` is a list of tuple (i, reroot, imroot) where `i` is the multiplicity of the root, reroot is real part and imroot being the imaginary part. """ f = func.func x = func.args[0] collectterms.sort(key=default_sort_key) collectterms.reverse() assert len(sol) == 1 and sol[0].lhs == f(x) sol = sol[0].rhs sol = expand_mul(sol) for i, reroot, imroot in collectterms: sol = collect(sol, x**i * exp(reroot * x) * sin(abs(imroot) * x)) sol = collect(sol, x**i * exp(reroot * x) * cos(imroot * x)) for i, reroot, imroot in collectterms: sol = collect(sol, x**i * exp(reroot * x)) sol = powsimp(sol) return Eq(f(x), sol)
def _linear_neq_order1_type1(match_): r""" System of n first-order constant-coefficient linear homogeneous differential equations .. math:: y'_k = a_{k1} y_1 + a_{k2} y_2 +...+ a_{kn} y_n; k = 1,2,...,n or that can be written as `\vec{y'} = A . \vec{y}` where `\vec{y}` is matrix of `y_k` for `k = 1,2,...n` and `A` is a `n \times n` matrix. These equations are equivalent to a first order homogeneous linear differential equation. The system of ODEs described above has a unique solution, namely: .. math :: \vec{y} = \exp(A t) C where $t$ is the independent variable and $C$ is a vector of n constants. These are constants from the integration. """ eq = match_['eq'] func = match_['func'] fc = match_['func_coeff'] n = len(eq) t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] constants = numbered_symbols(prefix='C', cls=Symbol, start=1) # This needs to be modified in future so that fc is only of type Matrix M = -fc if type(fc) is Matrix else Matrix(n, n, lambda i, j: -fc[i, func[j], 0]) P, J = matrix_exp_jordan_form(M, t) P = simplify(P) Cvect = Matrix(list(next(constants) for _ in range(n))) sol_vector = P * (J * Cvect) gens = sol_vector.atoms(exp) sol_vector = [collect(s, ordered(gens), exact=True) for s in sol_vector] sol_dict = [Eq(func[i], sol_vector[i]) for i in range(n)] return sol_dict
def _collect(expr, variables): """Collect terms with respect to a list of variables. This applies :func:`sympy.simplify.collect` to the a :mod:`pymbolic` expression with respect to the iterable of names in *variables*. Returns a dictionary mapping variable names to terms. """ from pymbolic.interop.sympy import PymbolicToSympyMapper, SympyToPymbolicMapper p2s = PymbolicToSympyMapper() s2p = SympyToPymbolicMapper() from sympy.simplify import collect sympy_variables = [sp.var(v) for v in variables] collect_result = collect(p2s(expr), sympy_variables, evaluate=False) result = {} for v in variables: try: result[v] = s2p(collect_result[sp.var(v)]) except KeyError: continue return result
def test_noneuclidian_distance_calculation(): from sympy import solve, sqrt GA_Printer.on() metric = '0 # #,# 0 #,# # 1' (X, Y, e) = MV.setup('X Y e', metric) assert str((X ^ Y) * (X ^ Y)) == '(X.Y)**2' L = X ^ Y ^ e B = L * e assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e' Bsq = B * B assert str(Bsq) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))' Bsq = Bsq.scalar() assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e' BeBr = B * e * B.rev() assert str(BeBr) == '((X.Y)*(-(X.Y) + 2*(X.e)*(Y.e)))*e' assert str(B * B) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))' assert str(L * L) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))' (s, c, Binv, M, BigS, BigC, alpha, XdotY, Xdote, Ydote) = symbols('s c (1/B) M S C alpha (X.Y) (X.e) (Y.e)') Bhat = Binv * B R = c + s * Bhat assert str(R) == 'c + (1/B)*s*X^Y - (1/B)*(Y.e)*s*X^e + (1/B)*(X.e)*s*Y^e' Z = R * X * R.rev() Z.obj = expand(Z.obj) Z.obj = Z.obj.collect([Binv, s, c, XdotY]) assert str( Z ) == '((1/B)**2*(X.Y)**2*s**2 - 2*(1/B)**2*(X.Y)*(X.e)*(Y.e)*s**2 + 2*(1/B)*(X.Y)*c*s - 2*(1/B)*(X.e)*(Y.e)*c*s + c**2)*X + 2*(1/B)*(X.e)**2*c*s*Y + (2*(1/B)*(X.Y)*(X.e)*s*(-(1/B)*(X.Y)*s + 2*(1/B)*(X.e)*(Y.e)*s - c))*e' W = Z | Y # From this point forward all calculations are with sympy scalars W = W.scalar() assert str( W ) == '(1/B)**2*(X.Y)**3*s**2 - 4*(1/B)**2*(X.Y)**2*(X.e)*(Y.e)*s**2 + 4*(1/B)**2*(X.Y)*(X.e)**2*(Y.e)**2*s**2 + 2*(1/B)*(X.Y)**2*c*s - 4*(1/B)*(X.Y)*(X.e)*(Y.e)*c*s + (X.Y)*c**2' W = expand(W) W = simplify(W) W = W.collect([s * Binv]) M = 1 / Bsq W = W.subs(Binv**2, M) W = simplify(W) Bmag = sqrt(XdotY**2 - 2 * XdotY * Xdote * Ydote) W = W.collect([Binv * c * s, XdotY]) #Double angle substitutions W = W.subs(2 * XdotY**2 - 4 * XdotY * Xdote * Ydote, 2 / (Binv**2)) W = W.subs(2 * c * s, BigS) W = W.subs(c**2, (BigC + 1) / 2) W = W.subs(s**2, (BigC - 1) / 2) W = simplify(W) W = expand(W) W = W.subs(1 / Binv, Bmag) assert str( W ) == '(X.Y)*C - (X.e)*(Y.e)*C + (X.e)*(Y.e) + S*sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))' Wd = collect(W, [BigC, BigS], exact=True, evaluate=False) Wd_1 = Wd[S.One] Wd_C = Wd[BigC] Wd_S = Wd[BigS] assert str(Wd_1) == '(X.e)*(Y.e)' assert str(Wd_C) == '(X.Y) - (X.e)*(Y.e)' assert str(Wd_S) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))' assert str(Bmag) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))' Wd_1 = Wd_1.subs(Bmag, 1 / Binv) Wd_C = Wd_C.subs(Bmag, 1 / Binv) Wd_S = Wd_S.subs(Bmag, 1 / Binv) lhs = Wd_1 + Wd_C * BigC rhs = -Wd_S * BigS lhs = lhs**2 rhs = rhs**2 W = expand(lhs - rhs) W = expand(W.subs(1 / Binv**2, Bmag**2)) W = expand(W.subs(BigS**2, BigC**2 - 1)) W = W.collect([BigC, BigC**2], evaluate=False) a = simplify(W[BigC**2]) b = simplify(W[BigC]) c = simplify(W[S.One]) assert str(a) == '(X.e)**2*(Y.e)**2' assert str(b) == '2*(X.e)*(Y.e)*((X.Y) - (X.e)*(Y.e))' assert str(c) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e) + (X.e)**2*(Y.e)**2' x = Symbol('x') C = solve(a * x**2 + b * x + c, x)[0] assert str(expand(simplify(expand(C)))) == '-(X.Y)/((X.e)*(Y.e)) + 1' GA_Printer.off() return
def test_noneuclidian_distance_calculation(): from sympy import solve, sqrt with GA_Printer(): metric = '0 # #,# 0 #,# # 1' (X, Y, e) = MV.setup('X Y e', metric) assert str((X ^ Y)*(X ^ Y)) == '(X.Y)**2' L = X ^ Y ^ e B = L*e assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e' Bsq = B*B assert str(Bsq) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))' Bsq = Bsq.scalar() assert str(B) == 'X^Y - (Y.e)*X^e + (X.e)*Y^e' BeBr = B*e*B.rev() assert str(BeBr) == '((X.Y)*(-(X.Y) + 2*(X.e)*(Y.e)))*e' assert str(B*B) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))' assert str(L*L) == '(X.Y)*((X.Y) - 2*(X.e)*(Y.e))' (s, c, Binv, M, BigS, BigC, alpha, XdotY, Xdote, Ydote) = symbols('s c (1/B) M S C alpha (X.Y) (X.e) (Y.e)') Bhat = Binv*B R = c + s*Bhat assert str(R) == 'c + (1/B)*s*X^Y - (1/B)*(Y.e)*s*X^e + (1/B)*(X.e)*s*Y^e' Z = R*X*R.rev() Z.obj = expand(Z.obj) Z.obj = Z.obj.collect([Binv, s, c, XdotY]) assert str(Z) == '((1/B)**2*(X.Y)**2*s**2 - 2*(1/B)**2*(X.Y)*(X.e)*(Y.e)*s**2 + 2*(1/B)*(X.Y)*c*s - 2*(1/B)*(X.e)*(Y.e)*c*s + c**2)*X + 2*(1/B)*(X.e)**2*c*s*Y + (2*(1/B)*(X.Y)*(X.e)*s*(-(1/B)*(X.Y)*s + 2*(1/B)*(X.e)*(Y.e)*s - c))*e' W = Z | Y # From this point forward all calculations are with sympy scalars W = W.scalar() assert str(W) == '(1/B)**2*(X.Y)**3*s**2 - 4*(1/B)**2*(X.Y)**2*(X.e)*(Y.e)*s**2 + 4*(1/B)**2*(X.Y)*(X.e)**2*(Y.e)**2*s**2 + 2*(1/B)*(X.Y)**2*c*s - 4*(1/B)*(X.Y)*(X.e)*(Y.e)*c*s + (X.Y)*c**2' W = expand(W) W = simplify(W) W = W.collect([s*Binv]) M = 1/Bsq W = W.subs(Binv**2, M) W = simplify(W) Bmag = sqrt(XdotY**2 - 2*XdotY*Xdote*Ydote) W = W.collect([Binv*c*s, XdotY]) #Double angle substitutions W = W.subs(2*XdotY**2 - 4*XdotY*Xdote*Ydote, 2/(Binv**2)) W = W.subs(2*c*s, BigS) W = W.subs(c**2, (BigC + 1)/2) W = W.subs(s**2, (BigC - 1)/2) W = simplify(W) W = expand(W) W = W.subs(1/Binv, Bmag) assert str(W) == '(X.Y)*C - (X.e)*(Y.e)*C + (X.e)*(Y.e) + S*sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))' Wd = collect(W, [BigC, BigS], exact=True, evaluate=False) Wd_1 = Wd[S.One] Wd_C = Wd[BigC] Wd_S = Wd[BigS] assert str(Wd_1) == '(X.e)*(Y.e)' assert str(Wd_C) == '(X.Y) - (X.e)*(Y.e)' assert str(Wd_S) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))' assert str(Bmag) == 'sqrt((X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e))' Wd_1 = Wd_1.subs(Bmag, 1/Binv) Wd_C = Wd_C.subs(Bmag, 1/Binv) Wd_S = Wd_S.subs(Bmag, 1/Binv) lhs = Wd_1 + Wd_C*BigC rhs = -Wd_S*BigS lhs = lhs**2 rhs = rhs**2 W = expand(lhs - rhs) W = expand(W.subs(1/Binv**2, Bmag**2)) W = expand(W.subs(BigS**2, BigC**2 - 1)) W = W.collect([BigC, BigC**2], evaluate=False) a = simplify(W[BigC**2]) b = simplify(W[BigC]) c = simplify(W[S.One]) assert str(a) == '(X.e)**2*(Y.e)**2' assert str(b) == '2*(X.e)*(Y.e)*((X.Y) - (X.e)*(Y.e))' assert str(c) == '(X.Y)**2 - 2*(X.Y)*(X.e)*(Y.e) + (X.e)**2*(Y.e)**2' x = Symbol('x') C = solve(a*x**2 + b*x + c, x)[0] assert str(expand(simplify(expand(C)))) == '-(X.Y)/((X.e)*(Y.e)) + 1' return
def _neq_linear_first_order_const_coeff_homogeneous(match_): r""" System of n first-order constant-coefficient linear homogeneous differential equations .. math:: y'_k = a_{k1} y_1 + a_{k2} y_2 +...+ a_{kn} y_n; k = 1,2,...,n or that can be written as `\vec{y'} = A . \vec{y}` where `\vec{y}` is matrix of `y_k` for `k = 1,2,...n` and `A` is a `n \times n` matrix. Since these equations are equivalent to a first order homogeneous linear differential equation. So the general solution will contain `n` linearly independent parts and solution will consist some type of exponential functions. Assuming `y = \vec{v} e^{rt}` is a solution of the system where `\vec{v}` is a vector of coefficients of `y_1,...,y_n`. Substituting `y` and `y' = r v e^{r t}` into the equation `\vec{y'} = A . \vec{y}`, we get .. math:: r \vec{v} e^{rt} = A \vec{v} e^{rt} .. math:: r \vec{v} = A \vec{v} where `r` comes out to be eigenvalue of `A` and vector `\vec{v}` is the eigenvector of `A` corresponding to `r`. There are three possibilities of eigenvalues of `A` - `n` distinct real eigenvalues - complex conjugate eigenvalues - eigenvalues with multiplicity `k` 1. When all eigenvalues `r_1,..,r_n` are distinct with `n` different eigenvectors `v_1,...v_n` then the solution is given by .. math:: \vec{y} = C_1 e^{r_1 t} \vec{v_1} + C_2 e^{r_2 t} \vec{v_2} +...+ C_n e^{r_n t} \vec{v_n} where `C_1,C_2,...,C_n` are arbitrary constants. 2. When some eigenvalues are complex then in order to make the solution real, we take a linear combination: if `r = a + bi` has an eigenvector `\vec{v} = \vec{w_1} + i \vec{w_2}` then to obtain real-valued solutions to the system, replace the complex-valued solutions `e^{rx} \vec{v}` with real-valued solution `e^{ax} (\vec{w_1} \cos(bx) - \vec{w_2} \sin(bx))` and for `r = a - bi` replace the solution `e^{-r x} \vec{v}` with `e^{ax} (\vec{w_1} \sin(bx) + \vec{w_2} \cos(bx))` 3. If some eigenvalues are repeated. Then we get fewer than `n` linearly independent eigenvectors, we miss some of the solutions and need to construct the missing ones. We do this via generalized eigenvectors, vectors which are not eigenvectors but are close enough that we can use to write down the remaining solutions. For a eigenvalue `r` with eigenvector `\vec{w}` we obtain `\vec{w_2},...,\vec{w_k}` using .. math:: (A - r I) . \vec{w_2} = \vec{w} .. math:: (A - r I) . \vec{w_3} = \vec{w_2} .. math:: \vdots .. math:: (A - r I) . \vec{w_k} = \vec{w_{k-1}} Then the solutions to the system for the eigenspace are `e^{rt} [\vec{w}], e^{rt} [t \vec{w} + \vec{w_2}], e^{rt} [\frac{t^2}{2} \vec{w} + t \vec{w_2} + \vec{w_3}], ...,e^{rt} [\frac{t^{k-1}}{(k-1)!} \vec{w} + \frac{t^{k-2}}{(k-2)!} \vec{w_2} +...+ t \vec{w_{k-1}} + \vec{w_k}]` So, If `\vec{y_1},...,\vec{y_n}` are `n` solution of obtained from three categories of `A`, then general solution to the system `\vec{y'} = A . \vec{y}` .. math:: \vec{y} = C_1 \vec{y_1} + C_2 \vec{y_2} + \cdots + C_n \vec{y_n} """ eq = match_['eq'] func = match_['func'] fc = match_['func_coeff'] n = len(eq) t = list(list(eq[0].atoms(Derivative))[0].atoms(Symbol))[0] constants = numbered_symbols(prefix='C', cls=Symbol, start=1) # This needs to be modified in future so that fc is only of type Matrix M = -fc if type(fc) is Matrix else Matrix(n, n, lambda i, j: -fc[i, func[j], 0]) P, J = matrix_exp_jordan_form(M, t) P = simplify(P) Cvect = Matrix(list(next(constants) for _ in range(n))) sol_vector = P * (J * Cvect) sol_vector = [ collect(s, ordered(J.atoms(exp)), exact=True) for s in sol_vector ] sol_dict = [Eq(func[i], sol_vector[i]) for i in range(n)] return sol_dict
def match_2nd_2F1_hypergeometric(I, k, sing_point, func): x = func.args[0] a = Wild("a") b = Wild("b") c = Wild("c") t = Wild("t") s = Wild("s") r = Wild("r") alpha = Wild("alpha") beta = Wild("beta") gamma = Wild("gamma") delta = Wild("delta") # I0 of the standerd 2F1 equation. I0 = ((a-b+1)*(a-b-1)*x**2 + 2*((1-a-b)*c + 2*a*b)*x + c*(c-2))/(4*x**2*(x-1)**2) if sing_point != [0, 1]: # If singular point is [0, 1] then we have standerd equation. eqs = [] sing_eqs = [-beta/alpha, -delta/gamma, (delta-beta)/(alpha-gamma)] # making equations for the finding the mobius transformation for i in range(3): if i<len(sing_point): eqs.append(Eq(sing_eqs[i], sing_point[i])) else: eqs.append(Eq(1/sing_eqs[i], 0)) # solving above equations for the mobius transformation _beta = -alpha*sing_point[0] _delta = -gamma*sing_point[1] _gamma = alpha if len(sing_point) == 3: _gamma = (_beta + sing_point[2]*alpha)/(sing_point[2] - sing_point[1]) mob = (alpha*x + beta)/(gamma*x + delta) mob = mob.subs(beta, _beta) mob = mob.subs(delta, _delta) mob = mob.subs(gamma, _gamma) mob = cancel(mob) t = (beta - delta*x)/(gamma*x - alpha) t = cancel(((t.subs(beta, _beta)).subs(delta, _delta)).subs(gamma, _gamma)) else: mob = x t = x # applying mobius transformation in I to make it into I0. I = I.subs(x, t) I = I*(t.diff(x))**2 I = factor(I) dict_I = {x**2:0, x:0, 1:0} I0_num, I0_dem = I0.as_numer_denom() # collecting coeff of (x**2, x), of the standerd equation. # substituting (a-b) = s, (a+b) = r dict_I0 = {x**2:s**2 - 1, x:(2*(1-r)*c + (r+s)*(r-s)), 1:c*(c-2)} # collecting coeff of (x**2, x) from I0 of the given equation. dict_I.update(collect(expand(cancel(I*I0_dem)), [x**2, x], evaluate=False)) eqs = [] # We are comparing the coeff of powers of different x, for finding the values of # parameters of standerd equation. for key in [x**2, x, 1]: eqs.append(Eq(dict_I[key], dict_I0[key])) # We can have many possible roots for the equation. # I am selecting the root on the basis that when we have # standard equation eq = x*(x-1)*f(x).diff(x, 2) + ((a+b+1)*x-c)*f(x).diff(x) + a*b*f(x) # then root should be a, b, c. _c = 1 - factor(sqrt(1+eqs[2].lhs)) if not _c.has(Symbol): _c = min(list(roots(eqs[2], c))) _s = factor(sqrt(eqs[0].lhs + 1)) _r = _c - factor(sqrt(_c**2 + _s**2 + eqs[1].lhs - 2*_c)) _a = (_r + _s)/2 _b = (_r - _s)/2 rn = {'a':simplify(_a), 'b':simplify(_b), 'c':simplify(_c), 'k':k, 'mobius':mob, 'type':"2F1"} return rn
def linodesolve(A, t, b=None, B=None, type="auto", doit=False): r""" System of n equations linear first-order differential equations Explanation =========== This solver solves the system of ODEs of the follwing form: .. math:: X'(t) = A(t) X(t) + b(t) Here, $A(t)$ is the coefficient matrix, $X(t)$ is the vector of n independent variables, $b(t)$ is the non-homogeneous term and $X'(t)$ is the derivative of $X(t)$ Depending on the properties of $A(t)$ and $b(t)$, this solver evaluates the solution differently. When $A(t)$ is constant coefficient matrix and $b(t)$ is zero vector i.e. system is homogeneous, the solution is: .. math:: X(t) = \exp(A t) C Here, $C$ is a vector of constants and $A$ is the constant coefficient matrix. When $A(t)$ is constant coefficient matrix and $b(t)$ is non-zero i.e. system is non-homogeneous, the solution is: .. math:: X(t) = e^{A t} ( \int e^{- A t} b \,dt + C) When $A(t)$ is coefficient matrix such that its commutative with its antiderivative $B(t)$ and $b(t)$ is a zero vector i.e. system is homogeneous, the solution is: .. math:: X(t) = \exp(B(t)) C When $A(t)$ is commutative with its antiderivative $B(t)$ and $b(t)$ is non-zero i.e. system is non-homogeneous, the solution is: .. math:: X(t) = e^{B(t)} ( \int e^{-B(t)} b(t) \,dt + C) The final solution is the general solution for all the four equations since a constant coefficient matrix is always commutative with its antidervative. Parameters ========== A : Matrix Coefficient matrix of the system of linear first order ODEs. t : Symbol Independent variable in the system of ODEs. b : Matrix or None Non-homogeneous term in the system of ODEs. If None is passed, a homogeneous system of ODEs is assumed. B : Matrix or None Antiderivative of the coefficient matrix. If the antiderivative is not passed and the solution requires the term, then the solver would compute it internally. type : String Type of the system of ODEs passed. Depending on the type, the solution is evaluated. The type values allowed and the corresponding system it solves are: "type1" for constant coefficient homogeneous "type2" for constant coefficient non-homogeneous, "type3" for non-constant coefficient homogeneous and "type4" for non-constant coefficient non-homogeneous. The default value is "auto" which will let the solver decide the correct type of the system passed. doit : Boolean Evaluate the solution if True, default value is False Examples ======== To solve the system of ODEs using this function directly, several things must be done in the right order. Wrong inputs to the function will lead to incorrect results. >>> from sympy import symbols, Function, Eq >>> from sympy.solvers.ode.systems import canonical_odes, linear_ode_to_matrix, linodesolve, linodesolve_type >>> from sympy.solvers.ode.subscheck import checkodesol >>> f, g = symbols("f, g", cls=Function) >>> x, a = symbols("x, a") >>> funcs = [f(x), g(x)] >>> eqs = [Eq(f(x).diff(x) - f(x), a*g(x) + 1), Eq(g(x).diff(x) + g(x), a*f(x))] Here, it is important to note that before we derive the coefficient matrix, it is important to get the system of ODEs into the desired form. For that we will use :obj:`sympy.solvers.ode.systems.canonical_odes()`. >>> eqs = canonical_odes(eqs, funcs, x) >>> eqs [[Eq(Derivative(f(x), x), a*g(x) + f(x) + 1), Eq(Derivative(g(x), x), a*f(x) - g(x))]] Now, we will use :obj:`sympy.solvers.ode.systems.linear_ode_to_matrix()` to get the coefficient matrix and the non-homogeneous term if it is there. >>> eqs = eqs[0] >>> (A1, A0), b = linear_ode_to_matrix(eqs, funcs, x, 1) >>> A = A0 We have the coefficient matrices and the non-homogeneous term ready. Now, we can use :obj:`sympy.solvers.ode.systems.linodesolve_type()` to get the information for the system of ODEs to finally pass it to the solver. >>> system_info = linodesolve_type(A, x, b=b) >>> sol_vector = linodesolve(A, x, b=b, B=system_info['antiderivative'], type=system_info['type']) Now, we can prove if the solution is correct or not by using :obj:`sympy.solvers.ode.checkodesol()` >>> sol = [Eq(f, s) for f, s in zip(funcs, sol_vector)] >>> checkodesol(eqs, sol) (True, [0, 0]) We can also use the doit method to evaluate the solutions passed by the function. >>> sol_vector_evaluated = linodesolve(A, x, b=b, type="type2", doit=True) Now, we will look at a system of ODEs which is non-constant. >>> eqs = [Eq(f(x).diff(x), f(x) + x*g(x)), Eq(g(x).diff(x), -x*f(x) + g(x))] The system defined above is already in the desired form, so we don't have to convert it. >>> (A1, A0), b = linear_ode_to_matrix(eqs, funcs, x, 1) >>> A = A0 A user can also pass the commutative antiderivative required for type3 and type4 system of ODEs. Passing an incorrect one will lead to incorrect results. If the coefficient matrix is not commutative with its antiderivative, then :obj:`sympy.solvers.ode.systems.linodesolve_type()` raises a NotImplementedError. If it does have a commutative antiderivative, then the function just returns the information about the system. >>> system_info = linodesolve_type(A, x, b=b) Now, we can pass the antiderivative as an argument to get the solution. If the system information is not passed, then the solver will compute the required arguments internally. >>> sol_vector = linodesolve(A, x, b=b) Once again, we can verify the solution obtained. >>> sol = [Eq(f, s) for f, s in zip(funcs, sol_vector)] >>> checkodesol(eqs, sol) (True, [0, 0]) Returns ======= List Raises ====== ValueError This error is raised when the coefficient matrix, non-homogeneous term or the antiderivative, if passed, aren't a matrix or don't have correct dimensions NonSquareMatrixError When the coefficient matrix or its antiderivative, if passed isn't a square matrix NotImplementedError If the coefficient matrix doesn't have a commutative antiderivative See Also ======== linear_ode_to_matrix: Coefficient matrix computation function canonical_odes: System of ODEs representation change linodesolve_type: Getting information about systems of ODEs to pass in this solver """ if not isinstance(A, MatrixBase): raise ValueError( filldedent('''\ The coefficients of the system of ODEs should be of type Matrix ''')) if not A.is_square: raise NonSquareMatrixError( filldedent('''\ The coefficient matrix must be a square ''')) if b is not None: if not isinstance(b, MatrixBase): raise ValueError( filldedent('''\ The non-homogeneous terms of the system of ODEs should be of type Matrix ''')) if A.rows != b.rows: raise ValueError( filldedent('''\ The system of ODEs should have the same number of non-homogeneous terms and the number of equations ''')) if B is not None: if not isinstance(B, MatrixBase): raise ValueError( filldedent('''\ The antiderivative of coefficients of the system of ODEs should be of type Matrix ''')) if not B.is_square: raise NonSquareMatrixError( filldedent('''\ The antiderivative of the coefficient matrix must be a square ''')) if A.rows != B.rows: raise ValueError( filldedent('''\ The coefficient matrix and its antiderivative should have same dimensions ''')) if not any(type == "type{}".format(i) for i in range(1, 5)) and not type == "auto": raise ValueError( filldedent('''\ The input type should be a valid one ''')) n = A.rows # constants = numbered_symbols(prefix='C', cls=Dummy, start=const_idx+1) Cvect = Matrix(list(Dummy() for _ in range(n))) if (type == "type2" or type == "type4") and b is None: b = zeros(n, 1) if type == "auto": system_info = linodesolve_type(A, t, b=b) type = system_info["type"] B = system_info["antiderivative"] if type == "type1" or type == "type2": P, J = matrix_exp_jordan_form(A, t) P = simplify(P) if type == "type1": sol_vector = P * (J * Cvect) else: sol_vector = P * J * ( (J.inv() * P.inv() * b).applyfunc(lambda x: Integral(x, t)) + Cvect) else: if B is None: B, _ = _is_commutative_anti_derivative(A, t) if type == "type3": sol_vector = B.exp() * Cvect else: sol_vector = B.exp() * (( (-B).exp() * b).applyfunc(lambda x: Integral(x, t)) + Cvect) gens = sol_vector.atoms(exp) if type != "type1": sol_vector = [expand_mul(s) for s in sol_vector] sol_vector = [collect(s, ordered(gens), exact=True) for s in sol_vector] if doit: sol_vector = [s.doit() for s in sol_vector] return sol_vector
def solve(eq, syms, simplified=True): """Solves univariate polynomial equations and linear systems with arbitrary symbolic coefficients. This function is just a wrapper which makes analysis of its arguments and executes more specific functions like 'roots' or 'solve_linear_system' etc. On input you have to specify equation or a set of equations (in this case via a list) using '==' pretty syntax or via ordinary expressions, and a list of variables. On output you will get a list of solutions in univariate case or a dictionary with variables as keys and solutions as values in the other case. If there were variables with can be assigned with arbitrary value, then they will be avoided in the output. Optionaly it is possible to have the solutions preprocessed using simplification routines if 'simplified' flag is set. To solve recurrence relations or differential equations use 'rsolve' or 'dsolve' functions respectively, which are also wrappers combining set of problem specific methods. >>> from sympy import * >>> x, y, a = symbols('xya') >>> r = solve(x**2 - 3*x + 2, x) >>> r.sort() >>> print r [1, 2] >>> solve(x**2 == a, x) [-a**(1/2), a**(1/2)] >>> solve(x**4 == 1, x) [I, 1, -1, -I] >>> solve([x + 5*y == 2, -3*x + 6*y == 15], [x, y]) {y: 1, x: -3} """ if isinstance(syms, Basic): syms = [syms] if not isinstance(eq, list): if isinstance(eq, Equality): # got equation, so move all the # terms to the left hand side equ = eq.lhs - eq.rhs else: equ = Basic.sympify(eq) try: # 'roots' method will return all possible complex # solutions, however we have to remove duplicates solutions = list(set(roots(equ, syms[0]))) except PolynomialException: raise "Not a polynomial equation. Can't solve it, yet." if simplified == True: return [ simplify(s) for s in solutions ] else: return solutions else: if eq == []: return {} else: # augmented matrix n, m = len(eq), len(syms) matrix = zeronm(n, m+1) index = {} for i in range(0, m): index[syms[i]] = i for i in range(0, n): if isinstance(eq[i], Equality): # got equation, so move all the # terms to the left hand side equ = eq[i].lhs - eq[i].rhs else: equ = Basic.sympify(eq[i]) content = collect(equ.expand(), syms, evaluate=False) for var, expr in content.iteritems(): if isinstance(var, Symbol) and not expr.has(*syms): matrix[i, index[var]] = expr elif isinstance(var, Basic.One) and not expr.has(*syms): matrix[i, m] = -expr else: raise "Not a linear system. Can't solve it, yet." else: return solve_linear_system(matrix, syms, simplified)