def test_vector_simplify(): A, s, k, m = symbols("A, s, k, m") test1 = (1 / a + 1 / b) * i assert (test1 & i) != (a + b) / (a * b) test1 = simplify(test1) assert (test1 & i) == (a + b) / (a * b) assert test1.simplify() == simplify(test1) test2 = (A ** 2 * s ** 4 / (4 * pi * k * m ** 3)) * i test2 = simplify(test2) assert (test2 & i) == (A ** 2 * s ** 4 / (4 * pi * k * m ** 3)) test3 = ((4 + 4 * a - 2 * (2 + 2 * a)) / (2 + 2 * a)) * i test3 = simplify(test3) assert (test3 & i) == 0 test4 = ((-4 * a * b ** 2 - 2 * b ** 3 - 2 * a ** 2 * b) / (a + b) ** 2) * i test4 = simplify(test4) assert (test4 & i) == -2 * b v = (sin(a) + cos(a)) ** 2 * i - j assert trigsimp(v) == (2 * sin(a + pi / 4) ** 2) * i + (-1) * j assert trigsimp(v) == v.trigsimp() assert simplify(Vector.zero) == Vector.zero
def test_vector_simplify(): A, s, k, m = symbols('A, s, k, m') test1 = (1 / a + 1 / b) * i assert (test1 & i) != (a + b) / (a * b) test1 = simplify(test1) assert (test1 & i) == (a + b) / (a * b) assert test1.simplify() == simplify(test1) test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * i test2 = simplify(test2) assert (test2 & i) == (A**2 * s**4 / (4 * pi * k * m**3)) test3 = ((4 + 4 * a - 2 * (2 + 2 * a)) / (2 + 2 * a)) * i test3 = simplify(test3) assert (test3 & i) == 0 test4 = ((-4 * a * b**2 - 2 * b**3 - 2 * a**2 * b) / (a + b)**2) * i test4 = simplify(test4) assert (test4 & i) == -2 * b v = (sin(a) + cos(a))**2 * i - j assert trigsimp(v) == (2 * sin(a + pi / 4)**2) * i + (-1) * j assert trigsimp(v) == v.trigsimp() assert simplify(Vector.zero) == Vector.zero
def test_functional_diffgeom_ch4(): x0, y0, theta0 = symbols('x0, y0, theta0', real=True) x, y, r, theta = symbols('x, y, r, theta', real=True) r0 = symbols('r0', positive=True) f = Function('f') b1 = Function('b1') b2 = Function('b2') p_r = R2_r.point([x0, y0]) p_p = R2_p.point([r0, theta0]) f_field = b1(R2.x,R2.y)*R2.dx + b2(R2.x,R2.y)*R2.dy assert f_field(R2.e_x)(p_r) == b1(x0, y0) assert f_field(R2.e_y)(p_r) == b2(x0, y0) s_field_r = f(R2.x,R2.y) df = Differential(s_field_r) assert df(R2.e_x)(p_r).doit() == Derivative(f(x0, y0), x0) assert df(R2.e_y)(p_r).doit() == Derivative(f(x0, y0), y0) s_field_p = f(R2.r,R2.theta) df = Differential(s_field_p) assert trigsimp(df(R2.e_x)(p_p).doit()) == cos(theta0)*Derivative(f(r0, theta0), r0) - sin(theta0)*Derivative(f(r0, theta0), theta0)/r0 assert trigsimp(df(R2.e_y)(p_p).doit()) == sin(theta0)*Derivative(f(r0, theta0), r0) + cos(theta0)*Derivative(f(r0, theta0), theta0)/r0 assert R2.dx(R2.e_x)(p_r) == 1 assert R2.dx(R2.e_y)(p_r) == 0 circ = -R2.y*R2.e_x + R2.x*R2.e_y assert R2.dx(circ)(p_r).doit() == -y0 assert R2.dy(circ)(p_r) == x0 assert R2.dr(circ)(p_r) == 0 assert simplify(R2.dtheta(circ)(p_r)) == 1 assert (circ - R2.e_theta)(s_field_r)(p_r) == 0
def _solve_variation_of_parameters(eq, func, roots, homogen_sol, order, match_obj, simplify_flag=True): r""" Helper function for the method of variation of parameters and nonhomogeneous euler eq. See the :py:meth:`~sympy.solvers.ode.single.NthLinearConstantCoeffVariationOfParameters` docstring for more information on this method. The parameter are ``match_obj`` should be a dictionary that has the following keys: ``list`` A list of solutions to the homogeneous equation. ``sol`` The general solution. """ f = func.func x = func.args[0] r = match_obj psol = 0 wr = wronskian(roots, x) if simplify_flag: wr = simplify(wr) # We need much better simplification for # some ODEs. See issue 4662, for example. # To reduce commonly occurring sin(x)**2 + cos(x)**2 to 1 wr = trigsimp(wr, deep=True, recursive=True) if not wr: # The wronskian will be 0 iff the solutions are not linearly # independent. raise NotImplementedError( "Cannot find " + str(order) + " solutions to the homogeneous equation necessary to apply " + "variation of parameters to " + str(eq) + " (Wronskian == 0)") if len(roots) != order: raise NotImplementedError( "Cannot find " + str(order) + " solutions to the homogeneous equation necessary to apply " + "variation of parameters to " + str(eq) + " (number of terms != order)") negoneterm = (-1)**(order) for i in roots: psol += negoneterm * Integral( wronskian([sol for sol in roots if sol != i], x) * r[-1] / wr, x) * i / r[order] negoneterm *= -1 if simplify_flag: psol = simplify(psol) psol = trigsimp(psol, deep=True) return Eq(f(x), homogen_sol.rhs + psol)
def test_R2(): x0, y0, r0, theta0 = symbols('x0, y0, r0, theta0', real=True) point_r = R2_r.point([x0, y0]) point_p = R2_p.point([r0, theta0]) # r**2 = x**2 + y**2 assert (R2.r**2 - R2.x**2 - R2.y**2)(point_r) == 0 assert trigsimp( (R2.r**2 - R2.x**2 - R2.y**2)(point_p) ) == 0 assert trigsimp(R2.e_r(R2.x**2+R2.y**2)(point_p).doit()) == 2*r0 # polar->rect->polar == Id a, b = symbols('a b', positive=True) m = Matrix([[a], [b]]) #TODO assert m == R2_r.coord_tuple_transform_to(R2_p, R2_p.coord_tuple_transform_to(R2_r, [a, b])).applyfunc(simplify) assert m == R2_p.coord_tuple_transform_to(R2_r, R2_r.coord_tuple_transform_to(R2_p, m)).applyfunc(simplify)
def express_coordinates(self, coordinate_system): """ Returns the Cartesian/rectangular coordinates of this point wrt the origin of the given CoordSysCartesian instance. Parameters ========== coordinate_system : CoordSysCartesian The coordinate system to express the coordinates of this Point in. Examples ======== >>> from sympy.vector import Point, CoordSysCartesian >>> N = CoordSysCartesian('N') >>> p1 = N.origin.locate_new('p1', 10 * N.i) >>> p2 = p1.locate_new('p2', 5 * N.j) >>> p2.express_coordinates(N) (10, 5, 0) """ #Determine the position vector pos_vect = self.position_wrt(coordinate_system.origin) #Express it in the given coordinate system pos_vect = trigsimp( express(pos_vect, coordinate_system, variables=True)) coords = [] for vect in coordinate_system.base_vectors(): coords.append(pos_vect.dot(vect)) return tuple(coords)
def express_coordinates(self, coordinate_system): """ Returns the Cartesian/rectangular coordinates of this point wrt the origin of the given CoordSysCartesian instance. Parameters ========== coordinate_system : CoordSysCartesian The coordinate system to express the coordinates of this Point in. Examples ======== >>> from sympy.vector import Point, CoordSysCartesian >>> N = CoordSysCartesian('N') >>> p1 = N.origin.locate_new('p1', 10 * N.i) >>> p2 = p1.locate_new('p2', 5 * N.j) >>> p2.express_coordinates(N) (10, 5, 0) """ #Determine the position vector pos_vect = self.position_wrt(coordinate_system.origin) #Express it in the given coordinate system pos_vect = trigsimp(express(pos_vect, coordinate_system, variables = True)) coords = [] for vect in coordinate_system.base_vectors(): coords.append(pos_vect.dot(vect)) return tuple(coords)
def _eval_rewrite_as_cos(self, n, m, theta, phi): # This method can be expensive due to extensive use of simplification! from sympy.simplify import simplify, trigsimp # TODO: Make sure n \in N # TODO: Assert |m| <= n ortherwise we should return 0 term = simplify(self.expand(func=True)) # We can do this because of the range of theta term = term.xreplace({Abs(sin(theta)): sin(theta)}) return simplify(trigsimp(term))
def _eval_rewrite_as_cos(self, n, m, theta, phi): # This method can be expensive due to extensive use of simplification! from sympy.simplify import simplify, trigsimp # TODO: Make sure n \in N # TODO: Assert |m| <= n ortherwise we should return 0 term = simplify(self.expand(func=True)) # We can do this because of the range of theta term = term.xreplace({Abs(sin(theta)):sin(theta)}) return simplify(trigsimp(term))
def __contains__(self, o): if isinstance(o, Point): x = Dummy('x', real=True) y = Dummy('y', real=True) res = self.equation(x, y).subs({x: o.x, y: o.y}) return trigsimp(simplify(res)) is S.Zero elif isinstance(o, Ellipse): return self == o return False
def __contains__(self, o): if isinstance(o, Point): x = Basic.Symbol('x', real=True) y = Basic.Symbol('y', real=True) res = self.equation('x', 'y').subs_dict({x: o[0], y: o[1]}) res = trigsimp(simplify(res)) return res == 0 elif isinstance(o, Ellipse): return (self == o) return False
def __contains__(self, o): if isinstance(o, Point): x = C.Dummy("x", real=True) y = C.Dummy("y", real=True) res = self.equation(x, y).subs({x: o[0], y: o[1]}) return trigsimp(simplify(res)) is S.Zero elif isinstance(o, Ellipse): return self == o return False
def __contains__(self, o): if isinstance(o, Point): x = C.Symbol('x', real=True, dummy=True) y = C.Symbol('y', real=True, dummy=True) res = self.equation(x, y).subs({x: o[0], y: o[1]}) res = trigsimp(simplify(res)) return res == 0 elif isinstance(o, Ellipse): return (self == o) return False
def __contains__(self, o): if isinstance(o, Point): x = C.Dummy('x', real=True) y = C.Dummy('y', real=True) res = self.equation(x, y).subs({x: o[0], y: o[1]}) res = trigsimp(simplify(res)) return res == 0 elif isinstance(o, Ellipse): return (self == o) return False
def __contains__(self, o): if isinstance(o, Point): x = C.Symbol("x", real=True, dummy=True) y = C.Symbol("y", real=True, dummy=True) res = self.equation(x, y).subs({x: o[0], y: o[1]}) res = trigsimp(simplify(res)) return res == 0 elif isinstance(o, Ellipse): return self == o return False
def _(obj, parameters=('t', 's')): definition = obj.rational_parametrization(parameters) bounds = [] for i in range(len(obj.variables) - 1): # Each parameter is replaced by its tangent to simplify intergation parameter = _symbol(parameters[i], real=True) definition = [trigsimp(elem.subs(parameter, tan(parameter/2))) for elem in definition] bounds.append((parameter, 0, 2*pi),) definition = Tuple(*definition) return [ParametricRegion(definition, *bounds)]
def test_functional_diffgeom_ch4(): x0, y0, theta0 = symbols('x0, y0, theta0', real=True) x, y, r, theta = symbols('x, y, r, theta', real=True) r0 = symbols('r0', positive=True) f = Function('f') b1 = Function('b1') b2 = Function('b2') p_r = R2_r.point([x0, y0]) p_p = R2_p.point([r0, theta0]) f_field = b1(R2.x, R2.y)*R2.dx + b2(R2.x, R2.y)*R2.dy assert f_field.rcall(R2.e_x).rcall(p_r) == b1(x0, y0) assert f_field.rcall(R2.e_y).rcall(p_r) == b2(x0, y0) s_field_r = f(R2.x, R2.y) df = Differential(s_field_r) assert df(R2.e_x).rcall(p_r).doit() == Derivative(f(x0, y0), x0) assert df(R2.e_y).rcall(p_r).doit() == Derivative(f(x0, y0), y0) s_field_p = f(R2.r, R2.theta) df = Differential(s_field_p) assert trigsimp(df(R2.e_x).rcall(p_p).doit()) == ( cos(theta0)*Derivative(f(r0, theta0), r0) - sin(theta0)*Derivative(f(r0, theta0), theta0)/r0) assert trigsimp(df(R2.e_y).rcall(p_p).doit()) == ( sin(theta0)*Derivative(f(r0, theta0), r0) + cos(theta0)*Derivative(f(r0, theta0), theta0)/r0) assert R2.dx(R2.e_x).rcall(p_r) == 1 assert R2.dx(R2.e_x) == 1 assert R2.dx(R2.e_y).rcall(p_r) == 0 assert R2.dx(R2.e_y) == 0 circ = -R2.y*R2.e_x + R2.x*R2.e_y assert R2.dx(circ).rcall(p_r).doit() == -y0 assert R2.dy(circ).rcall(p_r) == x0 assert R2.dr(circ).rcall(p_r) == 0 assert simplify(R2.dtheta(circ).rcall(p_r)) == 1 assert (circ - R2.e_theta).rcall(s_field_r).rcall(p_r) == 0
def _eval_trigsimp(self, **opts): from sympy.simplify import trigsimp return self.func(trigsimp(self.lhs, **opts), trigsimp(self.rhs, **opts))
particles = list(map(set_pot_grav_energy, particles)) links = list(map(set_pot_grav_energy, links)) # ---------------------------------------------------------------------------- # Equations of motion # ---------------------------------------------------------------------------- if __name__ == "__main__": # Do not perform derivation when imported simplify_exps = False T = 0 for obj in (*particles, *links): T += trigsimp(obj.kinetic_energy(N)) print(f"Included {obj} in T") #sum([obj.kinetic_energy(N).simplify() for obj in (*particles, *links)]) #T = kinetic_energy(N, *particles, *links) # Kinetic energy V = potential_energy(*particles, *links) # Potential energy # Add the contribution of the spring V += simplify(0.5*k*(A.pos_from(B).magnitude() - l0)**2) L = T - V # Lagrangian # -------------------------- Friction torques ----------------------------- # N.z is used because all z-axis are parallel torques = [(pend_frame, -b_joint*dq[1]*N.z),
def checkodesol(ode, sol, func=None, order="auto", solve_for_func=True): r""" Substitutes ``sol`` into ``ode`` and checks that the result is ``0``. This only works when ``func`` is one function, like `f(x)`. ``sol`` can be a single solution or a list of solutions. Each solution may be an :py:class:`~sympy.core.relational.Equality` that the solution satisfies, e.g. ``Eq(f(x), C1), Eq(f(x) + C1, 0)``; or simply an :py:class:`~sympy.core.expr.Expr`, e.g. ``f(x) - C1``. In most cases it will not be necessary to explicitly identify the function, but if the function cannot be inferred from the original equation it can be supplied through the ``func`` argument. If a sequence of solutions is passed, the same sort of container will be used to return the result for each solution. It tries the following methods, in order, until it finds zero equivalence: 1. Substitute the solution for `f` in the original equation. This only works if ``ode`` is solved for `f`. It will attempt to solve it first unless ``solve_for_func == False``. 2. Take `n` derivatives of the solution, where `n` is the order of ``ode``, and check to see if that is equal to the solution. This only works on exact ODEs. 3. Take the 1st, 2nd, ..., `n`\th derivatives of the solution, each time solving for the derivative of `f` of that order (this will always be possible because `f` is a linear operator). Then back substitute each derivative into ``ode`` in reverse order. This function returns a tuple. The first item in the tuple is ``True`` if the substitution results in ``0``, and ``False`` otherwise. The second item in the tuple is what the substitution results in. It should always be ``0`` if the first item is ``True``. Sometimes this function will return ``False`` even when an expression is identically equal to ``0``. This happens when :py:meth:`~sympy.simplify.simplify.simplify` does not reduce the expression to ``0``. If an expression returned by this function vanishes identically, then ``sol`` really is a solution to the ``ode``. If this function seems to hang, it is probably because of a hard simplification. To use this function to test, test the first item of the tuple. Examples ======== >>> from sympy import Eq, Function, checkodesol, symbols >>> x, C1 = symbols('x,C1') >>> f = Function('f') >>> checkodesol(f(x).diff(x), Eq(f(x), C1)) (True, 0) >>> assert checkodesol(f(x).diff(x), C1)[0] >>> assert not checkodesol(f(x).diff(x), x)[0] >>> checkodesol(f(x).diff(x, 2), x**2) (False, 2) """ if not isinstance(ode, Equality): ode = Eq(ode, 0) if func is None: try: _, func = _preprocess(ode.lhs) except ValueError: funcs = [ s.atoms(AppliedUndef) for s in (sol if is_sequence(sol, set) else [sol]) ] funcs = set().union(*funcs) if len(funcs) != 1: raise ValueError( "must pass func arg to checkodesol for this case.") func = funcs.pop() if not isinstance(func, AppliedUndef) or len(func.args) != 1: raise ValueError("func must be a function of one variable, not %s" % func) if is_sequence(sol, set): return type(sol)([ checkodesol(ode, i, order=order, solve_for_func=solve_for_func) for i in sol ]) if not isinstance(sol, Equality): sol = Eq(func, sol) elif sol.rhs == func: sol = sol.reversed if order == "auto": order = ode_order(ode, func) solved = sol.lhs == func and not sol.rhs.has(func) if solve_for_func and not solved: rhs = solve(sol, func) if rhs: eqs = [Eq(func, t) for t in rhs] if len(rhs) == 1: eqs = eqs[0] return checkodesol(ode, eqs, order=order, solve_for_func=False) x = func.args[0] # Handle series solutions here if sol.has(Order): assert sol.lhs == func Oterm = sol.rhs.getO() solrhs = sol.rhs.removeO() Oexpr = Oterm.expr assert isinstance(Oexpr, Pow) sorder = Oexpr.exp assert Oterm == Order(x**sorder) odesubs = (ode.lhs - ode.rhs).subs(func, solrhs).doit().expand() neworder = Order(x**(sorder - order)) odesubs = odesubs + neworder assert odesubs.getO() == neworder residual = odesubs.removeO() return (residual == 0, residual) s = True testnum = 0 while s: if testnum == 0: # First pass, try substituting a solved solution directly into the # ODE. This has the highest chance of succeeding. ode_diff = ode.lhs - ode.rhs if sol.lhs == func: s = sub_func_doit(ode_diff, func, sol.rhs) s = besselsimp(s) else: testnum += 1 continue ss = simplify(s.rewrite(exp)) if ss: # with the new numer_denom in power.py, if we do a simple # expansion then testnum == 0 verifies all solutions. s = ss.expand(force=True) else: s = 0 testnum += 1 elif testnum == 1: # Second pass. If we cannot substitute f, try seeing if the nth # derivative is equal, this will only work for odes that are exact, # by definition. s = simplify( trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) - trigsimp(ode.lhs) + trigsimp(ode.rhs)) # s2 = simplify( # diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \ # ode.lhs + ode.rhs) testnum += 1 elif testnum == 2: # Third pass. Try solving for df/dx and substituting that into the # ODE. Thanks to Chris Smith for suggesting this method. Many of # the comments below are his, too. # The method: # - Take each of 1..n derivatives of the solution. # - Solve each nth derivative for d^(n)f/dx^(n) # (the differential of that order) # - Back substitute into the ODE in decreasing order # (i.e., n, n-1, ...) # - Check the result for zero equivalence if sol.lhs == func and not sol.rhs.has(func): diffsols = {0: sol.rhs} elif sol.rhs == func and not sol.lhs.has(func): diffsols = {0: sol.lhs} else: diffsols = {} sol = sol.lhs - sol.rhs for i in range(1, order + 1): # Differentiation is a linear operator, so there should always # be 1 solution. Nonetheless, we test just to make sure. # We only need to solve once. After that, we automatically # have the solution to the differential in the order we want. if i == 1: ds = sol.diff(x) try: sdf = solve(ds, func.diff(x, i)) if not sdf: raise NotImplementedError except NotImplementedError: testnum += 1 break else: diffsols[i] = sdf[0] else: # This is what the solution says df/dx should be. diffsols[i] = diffsols[i - 1].diff(x) # Make sure the above didn't fail. if testnum > 2: continue else: # Substitute it into ODE to check for self consistency. lhs, rhs = ode.lhs, ode.rhs for i in range(order, -1, -1): if i == 0 and 0 not in diffsols: # We can only substitute f(x) if the solution was # solved for f(x). break lhs = sub_func_doit(lhs, func.diff(x, i), diffsols[i]) rhs = sub_func_doit(rhs, func.diff(x, i), diffsols[i]) ode_or_bool = Eq(lhs, rhs) ode_or_bool = simplify(ode_or_bool) if isinstance(ode_or_bool, (bool, BooleanAtom)): if ode_or_bool: lhs = rhs = S.Zero else: lhs = ode_or_bool.lhs rhs = ode_or_bool.rhs # No sense in overworking simplify -- just prove that the # numerator goes to zero num = trigsimp((lhs - rhs).as_numer_denom()[0]) # since solutions are obtained using force=True we test # using the same level of assumptions ## replace function with dummy so assumptions will work _func = Dummy("func") num = num.subs(func, _func) ## posify the expression num, reps = posify(num) s = simplify(num).xreplace(reps).xreplace({_func: func}) testnum += 1 else: break if not s: return (True, s) elif s is True: # The code above never was able to change s raise NotImplementedError("Unable to test if " + str(sol) + " is a solution to " + str(ode) + ".") else: return (False, s)
def trigsimp(self, deep=False, recursive=False): """See the trigsimp function in sympy.simplify""" from sympy.simplify import trigsimp return trigsimp(self, deep, recursive)