Ejemplo n.º 1
0
def test_trigsimp2():
    x, y = symbols('x,y')
    assert trigsimp(cos(x)**2 * sin(y)**2 + cos(x)**2 * cos(y)**2 + sin(x)**2,
                    recursive=True) == 1
    assert trigsimp(sin(x)**2 * sin(y)**2 + sin(x)**2 * cos(y)**2 + cos(x)**2,
                    recursive=True) == 1
    assert trigsimp(Subs(x, x, sin(y)**2 + cos(y)**2)) == Subs(x, x, 1)
Ejemplo n.º 2
0
def test_complete_simple_double_pendulum():
    q1, q2 = dynamicsymbols('q1 q2')
    u1, u2 = dynamicsymbols('u1 u2')
    m, l, g = symbols('m l g')
    C = Body('C')  # ceiling
    PartP = Body('P', mass=m)
    PartR = Body('R', mass=m)

    J1 = PinJoint('J1', C, PartP, speeds=u1, coordinates=q1,
                  child_joint_pos=-l*PartP.x, parent_axis=C.z,
                  child_axis=PartP.z)
    J2 = PinJoint('J2', PartP, PartR, speeds=u2, coordinates=q2,
                  child_joint_pos=-l*PartR.x, parent_axis=PartP.z,
                  child_axis=PartR.z)

    PartP.apply_force(m*g*C.x)
    PartR.apply_force(m*g*C.x)

    method = JointsMethod(C, J1, J2)
    method.form_eoms()

    assert expand(method.mass_matrix_full) == Matrix([[1, 0, 0, 0],
                                                      [0, 1, 0, 0],
                                                      [0, 0, 2*l**2*m*cos(q2) + 3*l**2*m, l**2*m*cos(q2) + l**2*m],
                                                      [0, 0, l**2*m*cos(q2) + l**2*m, l**2*m]])
    assert trigsimp(method.forcing_full) == trigsimp(Matrix([[u1], [u2], [-g*l*m*(sin(q1 + q2) + sin(q1)) -
                                           g*l*m*sin(q1) + l**2*m*(2*u1 + u2)*u2*sin(q2)],
                                          [-g*l*m*sin(q1 + q2) - l**2*m*u1**2*sin(q2)]]))
Ejemplo n.º 3
0
def test_trigsimp_groebner():
    from sympy.simplify.trigsimp import trigsimp_groebner

    c = cos(x)
    s = sin(x)
    ex = (4 * s * c + 12 * s + 5 * c**3 + 21 * c**2 + 23 * c + 15) / (
        -s * c**2 + 2 * s * c + 15 * s + 7 * c**3 + 31 * c**2 + 37 * c + 21)
    resnum = (5 * s - 5 * c + 1)
    resdenom = (8 * s - 6 * c)
    results = [resnum / resdenom, (-resnum) / (-resdenom)]
    assert trigsimp_groebner(ex) in results
    assert trigsimp_groebner(s / c, hints=[tan]) == tan(x)
    assert trigsimp_groebner(c * s) == c * s
    assert trigsimp((-s + 1) / c + c / (-s + 1), method='groebner') == 2 / c
    assert trigsimp((-s + 1) / c + c / (-s + 1),
                    method='groebner',
                    polynomial=True) == 2 / c

    # Test quick=False works
    assert trigsimp_groebner(ex, hints=[2]) in results
    assert trigsimp_groebner(ex, hints=[int(2)]) in results

    # test "I"
    assert trigsimp_groebner(sin(I * x) / cos(I * x),
                             hints=[tanh]) == I * tanh(x)

    # test hyperbolic / sums
    assert trigsimp_groebner((tanh(x) + tanh(y)) / (1 + tanh(x) * tanh(y)),
                             hints=[(tanh, x, y)]) == tanh(x + y)
Ejemplo n.º 4
0
def test_quaternion_conversions():
    q1 = Quaternion(1, 2, 3, 4)

    assert q1.to_axis_angle() == ((2 * sqrt(29)/29,
                                   3 * sqrt(29)/29,
                                   4 * sqrt(29)/29),
                                   2 * acos(sqrt(30)/30))

    assert q1.to_rotation_matrix() == Matrix([[Rational(-2, 3), Rational(2, 15), Rational(11, 15)],
                                              [Rational(2, 3), Rational(-1, 3), Rational(2, 3)],
                                              [Rational(1, 3), Rational(14, 15), Rational(2, 15)]])

    assert q1.to_rotation_matrix((1, 1, 1)) == Matrix([[Rational(-2, 3), Rational(2, 15), Rational(11, 15), Rational(4, 5)],
                                                       [Rational(2, 3), Rational(-1, 3), Rational(2, 3), S.Zero],
                                                       [Rational(1, 3), Rational(14, 15), Rational(2, 15), Rational(-2, 5)],
                                                       [S.Zero, S.Zero, S.Zero, S.One]])

    theta = symbols("theta", real=True)
    q2 = Quaternion(cos(theta/2), 0, 0, sin(theta/2))

    assert trigsimp(q2.to_rotation_matrix()) == Matrix([
                                               [cos(theta), -sin(theta), 0],
                                               [sin(theta),  cos(theta), 0],
                                               [0,           0,          1]])

    assert q2.to_axis_angle() == ((0, 0, sin(theta/2)/Abs(sin(theta/2))),
                                   2*acos(cos(theta/2)))

    assert trigsimp(q2.to_rotation_matrix((1, 1, 1))) == Matrix([
               [cos(theta), -sin(theta), 0, sin(theta) - cos(theta) + 1],
               [sin(theta),  cos(theta), 0, -sin(theta) - cos(theta) + 1],
               [0,           0,          1,  0],
               [0,           0,          0,  1]])
Ejemplo n.º 5
0
def test_issue_4280():
    a, x, y = symbols('a x y')
    assert trigsimp(cos(x)**2 + cos(y)**2 * sin(x)**2 +
                    sin(y)**2 * sin(x)**2) == 1
    assert trigsimp(a**2 * sin(x)**2 + a**2 * cos(y)**2 * cos(x)**2 +
                    a**2 * cos(x)**2 * sin(y)**2) == a**2
    assert trigsimp(a**2 * cos(y)**2 * sin(x)**2 +
                    a**2 * sin(y)**2 * sin(x)**2) == a**2 * sin(x)**2
Ejemplo n.º 6
0
def test_issue_15129_trigsimp_methods():
    t1 = Matrix([sin(Rational(1, 50)), cos(Rational(1, 50)), 0])
    t2 = Matrix([sin(Rational(1, 25)), cos(Rational(1, 25)), 0])
    t3 = Matrix([cos(Rational(1, 25)), sin(Rational(1, 25)), 0])
    r1 = t1.dot(t2)
    r2 = t1.dot(t3)
    assert trigsimp(r1) == cos(Rational(1, 50))
    assert trigsimp(r2) == sin(Rational(3, 50))
Ejemplo n.º 7
0
def test_issue_4661():
    a, x, y = symbols('a x y')
    eq = -4 * sin(x)**4 + 4 * cos(x)**4 - 8 * cos(x)**2
    assert trigsimp(eq) == -4
    n = sin(x)**6 + 4 * sin(x)**4 * cos(x)**2 + 5 * sin(x)**2 * cos(
        x)**4 + 2 * cos(x)**6
    d = -sin(x)**2 - 2 * cos(x)**2
    assert simplify(n / d) == -1
    assert trigsimp(-2 * cos(x)**2 + cos(x)**4 - sin(x)**4) == -1
    eq = (-sin(x)**3 / 4) * cos(x) + (cos(x)**3 / 4) * sin(x) - sin(
        2 * x) * cos(2 * x) / 8
    assert trigsimp(eq) == 0
Ejemplo n.º 8
0
def test_trigsimp():
    # issue 16736
    s, c = sin(2 * x), cos(2 * x)
    eq = Eq(s, c)
    assert trigsimp(eq) == eq  # no rearrangement of sides
    # simplification of sides might result in
    # an unevaluated Eq
    changed = trigsimp(Eq(s + c, sqrt(2)))
    assert isinstance(changed, Eq)
    assert changed.subs(x, pi / 8) is S.true
    # or an evaluated one
    assert trigsimp(Eq(cos(x)**2 + sin(x)**2, 1)) is S.true
Ejemplo n.º 9
0
def test_trigsimp3():
    x, y = symbols('x,y')
    assert trigsimp(sin(x) / cos(x)) == tan(x)
    assert trigsimp(sin(x)**2 / cos(x)**2) == tan(x)**2
    assert trigsimp(sin(x)**3 / cos(x)**3) == tan(x)**3
    assert trigsimp(sin(x)**10 / cos(x)**10) == tan(x)**10

    assert trigsimp(cos(x) / sin(x)) == 1 / tan(x)
    assert trigsimp(cos(x)**2 / sin(x)**2) == 1 / tan(x)**2
    assert trigsimp(cos(x)**10 / sin(x)**10) == 1 / tan(x)**10

    assert trigsimp(tan(x)) == trigsimp(sin(x) / cos(x))
Ejemplo n.º 10
0
 def repl(nu, z):
     if (nu % 1) == S(1)/2:
         return simplify(trigsimp(unpolarify(
                 fro(nu, z0).rewrite(besselj).rewrite(jn).expand(
                     func=True)).subs(z0, z)))
     elif nu.is_Integer and nu > 1:
         return fro(nu, z).expand(func=True)
     return fro(nu, z)
Ejemplo n.º 11
0
def test_issue_2827_trigsimp_methods():
    measure1 = lambda expr: len(str(expr))
    measure2 = lambda expr: -count_ops(expr)
    # Return the most complicated result
    expr = (x + 1) / (x + sin(x)**2 + cos(x)**2)
    ans = Matrix([1])
    M = Matrix([expr])
    assert trigsimp(M, method='fu', measure=measure1) == ans
    assert trigsimp(M, method='fu', measure=measure2) != ans
    # all methods should work with Basic expressions even if they
    # aren't Expr
    M = Matrix.eye(1)
    assert all(
        trigsimp(M, method=m) == M for m in 'fu matching groebner old'.split())
    # watch for E in exptrigsimp, not only exp()
    eq = 1 / sqrt(E) + E
    assert exptrigsimp(eq) == eq
 def repl(nu, z):
     if (nu % 1) == S(1)/2:
         return exptrigsimp(trigsimp(unpolarify(
                 fro(nu, z0).rewrite(besselj).rewrite(jn).expand(
                     func=True)).subs(z0, z)))
     elif nu.is_Integer and nu > 1:
         return fro(nu, z).expand(func=True)
     return fro(nu, z)
Ejemplo n.º 13
0
def test_issue_6811_fail():
    # from doc/src/modules/physics/mechanics/examples.rst, the current `eq`
    # at Line 576 (in different variables) was formerly the equivalent and
    # shorter expression given below...it would be nice to get the short one
    # back again
    xp, y, x, z = symbols('xp, y, x, z')
    eq = 4 * (-19 * sin(x) * y + 5 * sin(3 * x) * y + 15 * cos(2 * x) * z -
              21 * z) * xp / (9 * cos(x) - 5 * cos(3 * x))
    assert trigsimp(eq) == -2 * (2 * cos(x) * tan(x) * y + 3 * z) * xp / cos(x)
Ejemplo n.º 14
0
def test_Piecewise():
    e1 = x * (x + y) - y * (x + y)
    e2 = sin(x)**2 + cos(x)**2
    e3 = expand((x + y) * y / x)
    # s1 = simplify(e1)
    s2 = simplify(e2)
    # s3 = simplify(e3)

    # trigsimp tries not to touch non-trig containing args
    assert trigsimp(Piecewise((e1, e3 < e2), (e3, True))) == \
        Piecewise((e1, e3 < s2), (e3, True))
Ejemplo n.º 15
0
    def to_axis_angle(self):
        """Returns the axis and angle of rotation of a quaternion

        Returns
        =======

        tuple
            Tuple of (axis, angle)

        Examples
        ========

        >>> from sympy import Quaternion
        >>> q = Quaternion(1, 1, 1, 1)
        >>> (axis, angle) = q.to_axis_angle()
        >>> axis
        (sqrt(3)/3, sqrt(3)/3, sqrt(3)/3)
        >>> angle
        2*pi/3

        """
        q = self
        if q.a.is_negative:
            q = q * -1

        q = q.normalize()
        angle = trigsimp(2 * acos(q.a))

        # Since quaternion is normalised, q.a is less than 1.
        s = sqrt(1 - q.a * q.a)

        x = trigsimp(q.b / s)
        y = trigsimp(q.c / s)
        z = trigsimp(q.d / s)

        v = (x, y, z)
        t = (v, angle)

        return t
Ejemplo n.º 16
0
def test_quaternion_rotation_iss1593():
    """
    There was a sign mistake in the definition,
    of the rotation matrix. This tests that particular sign mistake.
    See issue 1593 for reference.
    See wikipedia
    https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Quaternion-derived_rotation_matrix
    for the correct definition
    """
    q = Quaternion(cos(phi / 2), sin(phi / 2), 0, 0)
    assert (trigsimp(q.to_rotation_matrix()) == Matrix(
        [[1, 0, 0], [0, cos(phi), -sin(phi)], [0, sin(phi),
                                               cos(phi)]]))
Ejemplo n.º 17
0
def test_quaternion_construction():
    q = Quaternion(w, x, y, z)
    assert q + q == Quaternion(2*w, 2*x, 2*y, 2*z)

    q2 = Quaternion.from_axis_angle((sqrt(3)/3, sqrt(3)/3, sqrt(3)/3),
                                    pi*Rational(2, 3))
    assert q2 == Quaternion(S.Half, S.Half,
                            S.Half, S.Half)

    M = Matrix([[cos(phi), -sin(phi), 0], [sin(phi), cos(phi), 0], [0, 0, 1]])
    q3 = trigsimp(Quaternion.from_rotation_matrix(M))
    assert q3 == Quaternion(sqrt(2)*sqrt(cos(phi) + 1)/2, 0, 0, sqrt(2 - 2*cos(phi))*sign(sin(phi))/2)

    nc = Symbol('nc', commutative=False)
    raises(ValueError, lambda: Quaternion(w, x, nc, z))
Ejemplo n.º 18
0
def test_issue_3210():
    eqs = (
        sin(2) * cos(3) + sin(3) * cos(2),
        -sin(2) * sin(3) + cos(2) * cos(3),
        sin(2) * cos(3) - sin(3) * cos(2),
        sin(2) * sin(3) + cos(2) * cos(3),
        sin(2) * sin(3) + cos(2) * cos(3) + cos(2),
        sinh(2) * cosh(3) + sinh(3) * cosh(2),
        sinh(2) * sinh(3) + cosh(2) * cosh(3),
    )
    assert [trigsimp(e) for e in eqs] == [
        sin(5),
        cos(5),
        -sin(1),
        cos(1),
        cos(1) + cos(2),
        sinh(5),
        cosh(5),
    ]
Ejemplo n.º 19
0
    def __and__(self, other):
        """Dot product of two vectors.

        Returns a scalar, the dot product of the two Vectors

        Parameters
        ==========

        other : Vector
            The Vector which we are dotting with

        Examples
        ========

        >>> from sympy.physics.vector import ReferenceFrame, dot
        >>> from sympy import symbols
        >>> q1 = symbols('q1')
        >>> N = ReferenceFrame('N')
        >>> dot(N.x, N.x)
        1
        >>> dot(N.x, N.y)
        0
        >>> A = N.orientnew('A', 'Axis', [q1, N.x])
        >>> dot(N.y, A.y)
        cos(q1)

        """

        from sympy.physics.vector.dyadic import Dyadic
        if isinstance(other, Dyadic):
            return NotImplemented
        other = _check_vector(other)
        out = S.Zero
        for i, v1 in enumerate(self.args):
            for j, v2 in enumerate(other.args):
                out += ((v2[0].T) * (v2[1].dcm(v1[1])) * (v1[0]))[0]
        if Vector.simp:
            return trigsimp(sympify(out), recursive=True)
        else:
            return sympify(out)
Ejemplo n.º 20
0
    def scalar_map(self, other):
        """
        Returns a dictionary which expresses the coordinate variables
        (base scalars) of this frame in terms of the variables of
        otherframe.

        Parameters
        ==========

        otherframe : CoordSys3D
            The other system to map the variables to.

        Examples
        ========

        >>> from sympy.vector import CoordSys3D
        >>> from sympy import Symbol
        >>> A = CoordSys3D('A')
        >>> q = Symbol('q')
        >>> B = A.orient_new_axis('B', q, A.k)
        >>> A.scalar_map(B)
        {A.x: B.x*cos(q) - B.y*sin(q), A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z}

        """

        relocated_scalars = []
        origin_coords = tuple(self.position_wrt(other).to_matrix(other))
        for i, x in enumerate(other.base_scalars()):
            relocated_scalars.append(x - origin_coords[i])

        vars_matrix = (self.rotation_matrix(other) *
                       Matrix(relocated_scalars))
        mapping = {}
        for i, x in enumerate(self.base_scalars()):
            mapping[x] = trigsimp(vars_matrix[i])
        return mapping
Ejemplo n.º 21
0
def express(expr, frame, frame2=None, variables=False):
    """
    Global function for 'express' functionality.

    Re-expresses a Vector, scalar(sympyfiable) or Dyadic in given frame.

    Refer to the local methods of Vector and Dyadic for details.
    If 'variables' is True, then the coordinate variables (CoordinateSym
    instances) of other frames present in the vector/scalar field or
    dyadic expression are also substituted in terms of the base scalars of
    this frame.

    Parameters
    ==========

    expr : Vector/Dyadic/scalar(sympyfiable)
        The expression to re-express in ReferenceFrame 'frame'

    frame: ReferenceFrame
        The reference frame to express expr in

    frame2 : ReferenceFrame
        The other frame required for re-expression(only for Dyadic expr)

    variables : boolean
        Specifies whether to substitute the coordinate variables present
        in expr, in terms of those of frame

    Examples
    ========

    >>> from sympy.physics.vector import ReferenceFrame, outer, dynamicsymbols
    >>> from sympy.physics.vector import init_vprinting
    >>> init_vprinting(pretty_print=False)
    >>> N = ReferenceFrame('N')
    >>> q = dynamicsymbols('q')
    >>> B = N.orientnew('B', 'Axis', [q, N.z])
    >>> d = outer(N.x, N.x)
    >>> from sympy.physics.vector import express
    >>> express(d, B, N)
    cos(q)*(B.x|N.x) - sin(q)*(B.y|N.x)
    >>> express(B.x, N)
    cos(q)*N.x + sin(q)*N.y
    >>> express(N[0], B, variables=True)
    B_x*cos(q) - B_y*sin(q)

    """

    _check_frame(frame)

    if expr == 0:
        return expr

    if isinstance(expr, Vector):
        #Given expr is a Vector
        if variables:
            #If variables attribute is True, substitute
            #the coordinate variables in the Vector
            frame_list = [x[-1] for x in expr.args]
            subs_dict = {}
            for f in frame_list:
                subs_dict.update(f.variable_map(frame))
            expr = expr.subs(subs_dict)
        #Re-express in this frame
        outvec = Vector([])
        for i, v in enumerate(expr.args):
            if v[1] != frame:
                temp = frame.dcm(v[1]) * v[0]
                if Vector.simp:
                    temp = temp.applyfunc(lambda x: trigsimp(x, method='fu'))
                outvec += Vector([(temp, frame)])
            else:
                outvec += Vector([v])
        return outvec

    if isinstance(expr, Dyadic):
        if frame2 is None:
            frame2 = frame
        _check_frame(frame2)
        ol = Dyadic(0)
        for i, v in enumerate(expr.args):
            ol += express(v[0], frame, variables=variables) * \
                  (express(v[1], frame, variables=variables) |
                   express(v[2], frame2, variables=variables))
        return ol

    else:
        if variables:
            #Given expr is a scalar field
            frame_set = set()
            expr = sympify(expr)
            #Substitute all the coordinate variables
            for x in expr.free_symbols:
                if isinstance(x, CoordinateSym) and x.frame != frame:
                    frame_set.add(x.frame)
            subs_dict = {}
            for f in frame_set:
                subs_dict.update(f.variable_map(frame))
            return expr.subs(subs_dict)
        return expr
Ejemplo n.º 22
0
def test_trigsimp_old():
    x, y = symbols('x,y')

    assert trigsimp(1 - sin(x)**2, old=True) == cos(x)**2
    assert trigsimp(1 - cos(x)**2, old=True) == sin(x)**2
    assert trigsimp(sin(x)**2 + cos(x)**2, old=True) == 1
    assert trigsimp(1 + tan(x)**2, old=True) == 1 / cos(x)**2
    assert trigsimp(1 / cos(x)**2 - 1, old=True) == tan(x)**2
    assert trigsimp(1 / cos(x)**2 - tan(x)**2, old=True) == 1
    assert trigsimp(1 + cot(x)**2, old=True) == 1 / sin(x)**2
    assert trigsimp(1 / sin(x)**2 - cot(x)**2, old=True) == 1

    assert trigsimp(5 * cos(x)**2 + 5 * sin(x)**2, old=True) == 5

    assert trigsimp(sin(x) / cos(x), old=True) == tan(x)
    assert trigsimp(2 * tan(x) * cos(x), old=True) == 2 * sin(x)
    assert trigsimp(cot(x)**3 * sin(x)**3, old=True) == cos(x)**3
    assert trigsimp(y * tan(x)**2 / sin(x)**2, old=True) == y / cos(x)**2
    assert trigsimp(cot(x) / cos(x), old=True) == 1 / sin(x)

    assert trigsimp(sin(x + y) + sin(x - y), old=True) == 2 * sin(x) * cos(y)
    assert trigsimp(sin(x + y) - sin(x - y), old=True) == 2 * sin(y) * cos(x)
    assert trigsimp(cos(x + y) + cos(x - y), old=True) == 2 * cos(x) * cos(y)
    assert trigsimp(cos(x + y) - cos(x - y), old=True) == -2 * sin(x) * sin(y)

    assert trigsimp(sinh(x + y) + sinh(x - y),
                    old=True) == 2 * sinh(x) * cosh(y)
    assert trigsimp(sinh(x + y) - sinh(x - y),
                    old=True) == 2 * sinh(y) * cosh(x)
    assert trigsimp(cosh(x + y) + cosh(x - y),
                    old=True) == 2 * cosh(x) * cosh(y)
    assert trigsimp(cosh(x + y) - cosh(x - y),
                    old=True) == 2 * sinh(x) * sinh(y)

    assert trigsimp(cos(0.12345)**2 + sin(0.12345)**2, old=True) == 1

    assert trigsimp(sin(x) / cos(x), old=True, method='combined') == tan(x)
    assert trigsimp(sin(x) / cos(x), old=True,
                    method='groebner') == sin(x) / cos(x)
    assert trigsimp(sin(x) / cos(x), old=True, method='groebner',
                    hints=[tan]) == tan(x)

    assert trigsimp(1 - sin(sin(x)**2 + cos(x)**2)**2, old=True,
                    deep=True) == cos(1)**2
def simplify(expr, ratio=1.7, measure=count_ops, fu=False):
    """
    Simplifies the given expression.

    Simplification is not a well defined term and the exact strategies
    this function tries can change in the future versions of SymPy. If
    your algorithm relies on "simplification" (whatever it is), try to
    determine what you need exactly  -  is it powsimp()?, radsimp()?,
    together()?, logcombine()?, or something else? And use this particular
    function directly, because those are well defined and thus your algorithm
    will be robust.

    Nonetheless, especially for interactive use, or when you don't know
    anything about the structure of the expression, simplify() tries to apply
    intelligent heuristics to make the input expression "simpler".  For
    example:

    >>> from sympy import simplify, cos, sin
    >>> from sympy.abc import x, y
    >>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2)
    >>> a
    (x**2 + x)/(x*sin(y)**2 + x*cos(y)**2)
    >>> simplify(a)
    x + 1

    Note that we could have obtained the same result by using specific
    simplification functions:

    >>> from sympy import trigsimp, cancel
    >>> trigsimp(a)
    (x**2 + x)/x
    >>> cancel(_)
    x + 1

    In some cases, applying :func:`simplify` may actually result in some more
    complicated expression. The default ``ratio=1.7`` prevents more extreme
    cases: if (result length)/(input length) > ratio, then input is returned
    unmodified.  The ``measure`` parameter lets you specify the function used
    to determine how complex an expression is.  The function should take a
    single argument as an expression and return a number such that if
    expression ``a`` is more complex than expression ``b``, then
    ``measure(a) > measure(b)``.  The default measure function is
    :func:`count_ops`, which returns the total number of operations in the
    expression.

    For example, if ``ratio=1``, ``simplify`` output can't be longer
    than input.

    ::

        >>> from sympy import sqrt, simplify, count_ops, oo
        >>> root = 1/(sqrt(2)+3)

    Since ``simplify(root)`` would result in a slightly longer expression,
    root is returned unchanged instead::

       >>> simplify(root, ratio=1) == root
       True

    If ``ratio=oo``, simplify will be applied anyway::

        >>> count_ops(simplify(root, ratio=oo)) > count_ops(root)
        True

    Note that the shortest expression is not necessary the simplest, so
    setting ``ratio`` to 1 may not be a good idea.
    Heuristically, the default value ``ratio=1.7`` seems like a reasonable
    choice.

    You can easily define your own measure function based on what you feel
    should represent the "size" or "complexity" of the input expression.  Note
    that some choices, such as ``lambda expr: len(str(expr))`` may appear to be
    good metrics, but have other problems (in this case, the measure function
    may slow down simplify too much for very large expressions).  If you don't
    know what a good metric would be, the default, ``count_ops``, is a good
    one.

    For example:

    >>> from sympy import symbols, log
    >>> a, b = symbols('a b', positive=True)
    >>> g = log(a) + log(b) + log(a)*log(1/b)
    >>> h = simplify(g)
    >>> h
    log(a*b**(-log(a) + 1))
    >>> count_ops(g)
    8
    >>> count_ops(h)
    5

    So you can see that ``h`` is simpler than ``g`` using the count_ops metric.
    However, we may not like how ``simplify`` (in this case, using
    ``logcombine``) has created the ``b**(log(1/a) + 1)`` term.  A simple way
    to reduce this would be to give more weight to powers as operations in
    ``count_ops``.  We can do this by using the ``visual=True`` option:

    >>> print(count_ops(g, visual=True))
    2*ADD + DIV + 4*LOG + MUL
    >>> print(count_ops(h, visual=True))
    2*LOG + MUL + POW + SUB

    >>> from sympy import Symbol, S
    >>> def my_measure(expr):
    ...     POW = Symbol('POW')
    ...     # Discourage powers by giving POW a weight of 10
    ...     count = count_ops(expr, visual=True).subs(POW, 10)
    ...     # Every other operation gets a weight of 1 (the default)
    ...     count = count.replace(Symbol, type(S.One))
    ...     return count
    >>> my_measure(g)
    8
    >>> my_measure(h)
    14
    >>> 15./8 > 1.7 # 1.7 is the default ratio
    True
    >>> simplify(g, measure=my_measure)
    -log(a)*log(b) + log(a) + log(b)

    Note that because ``simplify()`` internally tries many different
    simplification strategies and then compares them using the measure
    function, we get a completely different result that is still different
    from the input expression by doing this.
    """
    expr = sympify(expr)

    try:
        return expr._eval_simplify(ratio=ratio, measure=measure)
    except AttributeError:
        pass

    original_expr = expr = signsimp(expr)

    from sympy.simplify.hyperexpand import hyperexpand
    from sympy.functions.special.bessel import BesselBase
    from sympy import Sum, Product

    if not isinstance(expr, Basic) or not expr.args:  # XXX: temporary hack
        return expr

    if not isinstance(expr, (Add, Mul, Pow, ExpBase)):
        if isinstance(expr, Function) and hasattr(expr, "inverse"):
            if len(expr.args) == 1 and len(expr.args[0].args) == 1 and \
               isinstance(expr.args[0], expr.inverse(argindex=1)):
                return simplify(expr.args[0].args[0], ratio=ratio,
                                measure=measure, fu=fu)
        return expr.func(*[simplify(x, ratio=ratio, measure=measure, fu=fu)
                         for x in expr.args])

    # TODO: Apply different strategies, considering expression pattern:
    # is it a purely rational function? Is there any trigonometric function?...
    # See also https://github.com/sympy/sympy/pull/185.

    def shorter(*choices):
        '''Return the choice that has the fewest ops. In case of a tie,
        the expression listed first is selected.'''
        if not has_variety(choices):
            return choices[0]
        return min(choices, key=measure)

    expr = bottom_up(expr, lambda w: w.normal())
    expr = Mul(*powsimp(expr).as_content_primitive())
    _e = cancel(expr)
    expr1 = shorter(_e, _mexpand(_e).cancel())  # issue 6829
    expr2 = shorter(together(expr, deep=True), together(expr1, deep=True))

    if ratio is S.Infinity:
        expr = expr2
    else:
        expr = shorter(expr2, expr1, expr)
    if not isinstance(expr, Basic):  # XXX: temporary hack
        return expr

    expr = factor_terms(expr, sign=False)

    # hyperexpand automatically only works on hypergeometric terms
    expr = hyperexpand(expr)

    expr = piecewise_fold(expr)

    if expr.has(BesselBase):
        expr = besselsimp(expr)

    if expr.has(TrigonometricFunction) and not fu or expr.has(
            HyperbolicFunction):
        expr = trigsimp(expr, deep=True)

    if expr.has(log):
        expr = shorter(expand_log(expr, deep=True), logcombine(expr))

    if expr.has(CombinatorialFunction, gamma):
        expr = combsimp(expr)

    if expr.has(Sum):
        expr = sum_simplify(expr)

    if expr.has(Product):
        expr = product_simplify(expr)

    short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr)
    short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short)))
    if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase):
        short = exptrigsimp(short, simplify=False)

    # get rid of hollow 2-arg Mul factorization
    hollow_mul = Transform(
        lambda x: Mul(*x.args),
        lambda x:
        x.is_Mul and
        len(x.args) == 2 and
        x.args[0].is_Number and
        x.args[1].is_Add and
        x.is_commutative)
    expr = short.xreplace(hollow_mul)

    numer, denom = expr.as_numer_denom()
    if denom.is_Add:
        n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1))
        if n is not S.One:
            expr = (numer*n).expand()/d

    if expr.could_extract_minus_sign():
        n, d = fraction(expr)
        if d != 0:
            expr = signsimp(-n/(-d))

    if measure(expr) > ratio*measure(original_expr):
        expr = original_expr

    return expr
Ejemplo n.º 24
0
def test_issue_4373():
    x = Symbol("x")
    assert abs(trigsimp(2.0 * sin(x)**2 + 2.0 * cos(x)**2) - 2.0) < 1e-10
Ejemplo n.º 25
0
def test_trigsimp1a():
    assert trigsimp(sin(2)**2 * cos(3) * exp(2) /
                    cos(2)**2) == tan(2)**2 * cos(3) * exp(2)
    assert trigsimp(tan(2)**2 * cos(3) * exp(2) *
                    cos(2)**2) == sin(2)**2 * cos(3) * exp(2)
    assert trigsimp(cot(2) * cos(3) * exp(2) *
                    sin(2)) == cos(3) * exp(2) * cos(2)
    assert trigsimp(tan(2) * cos(3) * exp(2) /
                    sin(2)) == cos(3) * exp(2) / cos(2)
    assert trigsimp(cot(2) * cos(3) * exp(2) /
                    cos(2)) == cos(3) * exp(2) / sin(2)
    assert trigsimp(cot(2) * cos(3) * exp(2) * tan(2)) == cos(3) * exp(2)
    assert trigsimp(sinh(2) * cos(3) * exp(2) /
                    cosh(2)) == tanh(2) * cos(3) * exp(2)
    assert trigsimp(tanh(2) * cos(3) * exp(2) *
                    cosh(2)) == sinh(2) * cos(3) * exp(2)
    assert trigsimp(coth(2) * cos(3) * exp(2) *
                    sinh(2)) == cosh(2) * cos(3) * exp(2)
    assert trigsimp(tanh(2) * cos(3) * exp(2) /
                    sinh(2)) == cos(3) * exp(2) / cosh(2)
    assert trigsimp(coth(2) * cos(3) * exp(2) /
                    cosh(2)) == cos(3) * exp(2) / sinh(2)
    assert trigsimp(coth(2) * cos(3) * exp(2) * tanh(2)) == cos(3) * exp(2)
Ejemplo n.º 26
0
def simplify(expr, ratio=1.7, measure=count_ops, rational=False):
    # type: (object, object, object, object) -> object
    """
    Simplifies the given expression.

    Simplification is not a well defined term and the exact strategies
    this function tries can change in the future versions of SymPy. If
    your algorithm relies on "simplification" (whatever it is), try to
    determine what you need exactly  -  is it powsimp()?, radsimp()?,
    together()?, logcombine()?, or something else? And use this particular
    function directly, because those are well defined and thus your algorithm
    will be robust.

    Nonetheless, especially for interactive use, or when you don't know
    anything about the structure of the expression, simplify() tries to apply
    intelligent heuristics to make the input expression "simpler".  For
    example:

    >>> from sympy import simplify, cos, sin
    >>> from sympy.abc import x, y
    >>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2)
    >>> a
    (x**2 + x)/(x*sin(y)**2 + x*cos(y)**2)
    >>> simplify(a)
    x + 1

    Note that we could have obtained the same result by using specific
    simplification functions:

    >>> from sympy import trigsimp, cancel
    >>> trigsimp(a)
    (x**2 + x)/x
    >>> cancel(_)
    x + 1

    In some cases, applying :func:`simplify` may actually result in some more
    complicated expression. The default ``ratio=1.7`` prevents more extreme
    cases: if (result length)/(input length) > ratio, then input is returned
    unmodified.  The ``measure`` parameter lets you specify the function used
    to determine how complex an expression is.  The function should take a
    single argument as an expression and return a number such that if
    expression ``a`` is more complex than expression ``b``, then
    ``measure(a) > measure(b)``.  The default measure function is
    :func:`count_ops`, which returns the total number of operations in the
    expression.

    For example, if ``ratio=1``, ``simplify`` output can't be longer
    than input.

    ::

        >>> from sympy import sqrt, simplify, count_ops, oo
        >>> root = 1/(sqrt(2)+3)

    Since ``simplify(root)`` would result in a slightly longer expression,
    root is returned unchanged instead::

       >>> simplify(root, ratio=1) == root
       True

    If ``ratio=oo``, simplify will be applied anyway::

        >>> count_ops(simplify(root, ratio=oo)) > count_ops(root)
        True

    Note that the shortest expression is not necessary the simplest, so
    setting ``ratio`` to 1 may not be a good idea.
    Heuristically, the default value ``ratio=1.7`` seems like a reasonable
    choice.

    You can easily define your own measure function based on what you feel
    should represent the "size" or "complexity" of the input expression.  Note
    that some choices, such as ``lambda expr: len(str(expr))`` may appear to be
    good metrics, but have other problems (in this case, the measure function
    may slow down simplify too much for very large expressions).  If you don't
    know what a good metric would be, the default, ``count_ops``, is a good
    one.

    For example:

    >>> from sympy import symbols, log
    >>> a, b = symbols('a b', positive=True)
    >>> g = log(a) + log(b) + log(a)*log(1/b)
    >>> h = simplify(g)
    >>> h
    log(a*b**(-log(a) + 1))
    >>> count_ops(g)
    8
    >>> count_ops(h)
    5

    So you can see that ``h`` is simpler than ``g`` using the count_ops metric.
    However, we may not like how ``simplify`` (in this case, using
    ``logcombine``) has created the ``b**(log(1/a) + 1)`` term.  A simple way
    to reduce this would be to give more weight to powers as operations in
    ``count_ops``.  We can do this by using the ``visual=True`` option:

    >>> print(count_ops(g, visual=True))
    2*ADD + DIV + 4*LOG + MUL
    >>> print(count_ops(h, visual=True))
    2*LOG + MUL + POW + SUB

    >>> from sympy import Symbol, S
    >>> def my_measure(expr):
    ...     POW = Symbol('POW')
    ...     # Discourage powers by giving POW a weight of 10
    ...     count = count_ops(expr, visual=True).subs(POW, 10)
    ...     # Every other operation gets a weight of 1 (the default)
    ...     count = count.replace(Symbol, type(S.One))
    ...     return count
    >>> my_measure(g)
    8
    >>> my_measure(h)
    14
    >>> 15./8 > 1.7 # 1.7 is the default ratio
    True
    >>> simplify(g, measure=my_measure)
    -log(a)*log(b) + log(a) + log(b)

    Note that because ``simplify()`` internally tries many different
    simplification strategies and then compares them using the measure
    function, we get a completely different result that is still different
    from the input expression by doing this.

    If rational=True, Floats will be recast as Rationals before simplification.
    If rational=None, Floats will be recast as Rationals but the result will
    be recast as Floats. If rational=False(default) then nothing will be done
    to the Floats.
    """
    expr = sympify(expr)

    try:
        return expr._eval_simplify(ratio=ratio, measure=measure)
    except AttributeError:
        pass

    original_expr = expr = signsimp(expr)

    from sympy.simplify.hyperexpand import hyperexpand
    from sympy.functions.special.bessel import BesselBase
    from sympy import Sum, Product

    if not isinstance(expr, Basic) or not expr.args:  # XXX: temporary hack
        return expr

    if not isinstance(expr, (Add, Mul, Pow, ExpBase)):
        if isinstance(expr, Function) and hasattr(expr, "inverse"):
            if len(expr.args) == 1 and len(expr.args[0].args) == 1 and \
               isinstance(expr.args[0], expr.inverse(argindex=1)):
                return simplify(expr.args[0].args[0], ratio=ratio,
                                measure=measure, rational=rational)
        return expr.func(*[simplify(x, ratio=ratio, measure=measure, rational=rational)
                         for x in expr.args])

    # TODO: Apply different strategies, considering expression pattern:
    # is it a purely rational function? Is there any trigonometric function?...
    # See also https://github.com/sympy/sympy/pull/185.

    def shorter(*choices):
        '''Return the choice that has the fewest ops. In case of a tie,
        the expression listed first is selected.'''
        if not has_variety(choices):
            return choices[0]
        return min(choices, key=measure)

    # rationalize Floats
    floats = False
    if rational is not False and expr.has(Float):
        floats = True
        expr = nsimplify(expr, rational=True)

    expr = bottom_up(expr, lambda w: w.normal())
    expr = Mul(*powsimp(expr).as_content_primitive())
    _e = cancel(expr)
    expr1 = shorter(_e, _mexpand(_e).cancel())  # issue 6829
    expr2 = shorter(together(expr, deep=True), together(expr1, deep=True))

    if ratio is S.Infinity:
        expr = expr2
    else:
        expr = shorter(expr2, expr1, expr)
    if not isinstance(expr, Basic):  # XXX: temporary hack
        return expr

    expr = factor_terms(expr, sign=False)

    # hyperexpand automatically only works on hypergeometric terms
    expr = hyperexpand(expr)

    expr = piecewise_fold(expr)

    if expr.has(BesselBase):
        expr = besselsimp(expr)

    if expr.has(TrigonometricFunction, HyperbolicFunction):
        expr = trigsimp(expr, deep=True)

    if expr.has(log):
        expr = shorter(expand_log(expr, deep=True), logcombine(expr))

    if expr.has(CombinatorialFunction, gamma):
        # expression with gamma functions or non-integer arguments is
        # automatically passed to gammasimp
        expr = combsimp(expr)

    if expr.has(Sum):
        expr = sum_simplify(expr)

    if expr.has(Product):
        expr = product_simplify(expr)

    short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr)
    short = shorter(short, cancel(short))
    short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short)))
    if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase):
        short = exptrigsimp(short)

    # get rid of hollow 2-arg Mul factorization
    hollow_mul = Transform(
        lambda x: Mul(*x.args),
        lambda x:
        x.is_Mul and
        len(x.args) == 2 and
        x.args[0].is_Number and
        x.args[1].is_Add and
        x.is_commutative)
    expr = short.xreplace(hollow_mul)

    numer, denom = expr.as_numer_denom()
    if denom.is_Add:
        n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1))
        if n is not S.One:
            expr = (numer*n).expand()/d

    if expr.could_extract_minus_sign():
        n, d = fraction(expr)
        if d != 0:
            expr = signsimp(-n/(-d))

    if measure(expr) > ratio*measure(original_expr):
        expr = original_expr

    # restore floats
    if floats and rational is None:
        expr = nfloat(expr, exponent=False)

    return expr
Ejemplo n.º 27
0
def checkodesol(ode, sol, func=None, order='auto', solve_for_func=True):
    r"""
    Substitutes ``sol`` into ``ode`` and checks that the result is ``0``.

    This works when ``func`` is one function, like `f(x)` or a list of
    functions like `[f(x), g(x)]` when `ode` is a system of ODEs.  ``sol`` can
    be a single solution or a list of solutions.  Each solution may be an
    :py:class:`~sympy.core.relational.Equality` that the solution satisfies,
    e.g. ``Eq(f(x), C1), Eq(f(x) + C1, 0)``; or simply an
    :py:class:`~sympy.core.expr.Expr`, e.g. ``f(x) - C1``. In most cases it
    will not be necessary to explicitly identify the function, but if the
    function cannot be inferred from the original equation it can be supplied
    through the ``func`` argument.

    If a sequence of solutions is passed, the same sort of container will be
    used to return the result for each solution.

    It tries the following methods, in order, until it finds zero equivalence:

    1. Substitute the solution for `f` in the original equation.  This only
       works if ``ode`` is solved for `f`.  It will attempt to solve it first
       unless ``solve_for_func == False``.
    2. Take `n` derivatives of the solution, where `n` is the order of
       ``ode``, and check to see if that is equal to the solution.  This only
       works on exact ODEs.
    3. Take the 1st, 2nd, ..., `n`\th derivatives of the solution, each time
       solving for the derivative of `f` of that order (this will always be
       possible because `f` is a linear operator). Then back substitute each
       derivative into ``ode`` in reverse order.

    This function returns a tuple.  The first item in the tuple is ``True`` if
    the substitution results in ``0``, and ``False`` otherwise. The second
    item in the tuple is what the substitution results in.  It should always
    be ``0`` if the first item is ``True``. Sometimes this function will
    return ``False`` even when an expression is identically equal to ``0``.
    This happens when :py:meth:`~sympy.simplify.simplify.simplify` does not
    reduce the expression to ``0``.  If an expression returned by this
    function vanishes identically, then ``sol`` really is a solution to
    the ``ode``.

    If this function seems to hang, it is probably because of a hard
    simplification.

    To use this function to test, test the first item of the tuple.

    Examples
    ========

    >>> from sympy import (Eq, Function, checkodesol, symbols,
    ...     Derivative, exp)
    >>> x, C1, C2 = symbols('x,C1,C2')
    >>> f, g = symbols('f g', cls=Function)
    >>> checkodesol(f(x).diff(x), Eq(f(x), C1))
    (True, 0)
    >>> assert checkodesol(f(x).diff(x), C1)[0]
    >>> assert not checkodesol(f(x).diff(x), x)[0]
    >>> checkodesol(f(x).diff(x, 2), x**2)
    (False, 2)

    >>> eqs = [Eq(Derivative(f(x), x), f(x)), Eq(Derivative(g(x), x), g(x))]
    >>> sol = [Eq(f(x), C1*exp(x)), Eq(g(x), C2*exp(x))]
    >>> checkodesol(eqs, sol)
    (True, [0, 0])

    """
    if iterable(ode):
        return checksysodesol(ode, sol, func=func)

    if not isinstance(ode, Equality):
        ode = Eq(ode, 0)
    if func is None:
        try:
            _, func = _preprocess(ode.lhs)
        except ValueError:
            funcs = [s.atoms(AppliedUndef) for s in (
                sol if is_sequence(sol, set) else [sol])]
            funcs = set().union(*funcs)
            if len(funcs) != 1:
                raise ValueError(
                    'must pass func arg to checkodesol for this case.')
            func = funcs.pop()
    if not isinstance(func, AppliedUndef) or len(func.args) != 1:
        raise ValueError(
            "func must be a function of one variable, not %s" % func)
    if is_sequence(sol, set):
        return type(sol)([checkodesol(ode, i, order=order, solve_for_func=solve_for_func) for i in sol])

    if not isinstance(sol, Equality):
        sol = Eq(func, sol)
    elif sol.rhs == func:
        sol = sol.reversed

    if order == 'auto':
        order = ode_order(ode, func)
    solved = sol.lhs == func and not sol.rhs.has(func)
    if solve_for_func and not solved:
        rhs = solve(sol, func)
        if rhs:
            eqs = [Eq(func, t) for t in rhs]
            if len(rhs) == 1:
                eqs = eqs[0]
            return checkodesol(ode, eqs, order=order,
                solve_for_func=False)

    x = func.args[0]

    # Handle series solutions here
    if sol.has(Order):
        assert sol.lhs == func
        Oterm = sol.rhs.getO()
        solrhs = sol.rhs.removeO()

        Oexpr = Oterm.expr
        assert isinstance(Oexpr, Pow)
        sorder = Oexpr.exp
        assert Oterm == Order(x**sorder)

        odesubs = (ode.lhs-ode.rhs).subs(func, solrhs).doit().expand()

        neworder = Order(x**(sorder - order))
        odesubs = odesubs + neworder
        assert odesubs.getO() == neworder
        residual = odesubs.removeO()

        return (residual == 0, residual)

    s = True
    testnum = 0
    while s:
        if testnum == 0:
            # First pass, try substituting a solved solution directly into the
            # ODE. This has the highest chance of succeeding.
            ode_diff = ode.lhs - ode.rhs

            if sol.lhs == func:
                s = sub_func_doit(ode_diff, func, sol.rhs)
                s = besselsimp(s)
            else:
                testnum += 1
                continue
            ss = simplify(s.rewrite(exp))
            if ss:
                # with the new numer_denom in power.py, if we do a simple
                # expansion then testnum == 0 verifies all solutions.
                s = ss.expand(force=True)
            else:
                s = 0
            testnum += 1
        elif testnum == 1:
            # Second pass. If we cannot substitute f, try seeing if the nth
            # derivative is equal, this will only work for odes that are exact,
            # by definition.
            s = simplify(
                trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) -
                trigsimp(ode.lhs) + trigsimp(ode.rhs))
            # s2 = simplify(
            #     diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \
            #     ode.lhs + ode.rhs)
            testnum += 1
        elif testnum == 2:
            # Third pass. Try solving for df/dx and substituting that into the
            # ODE. Thanks to Chris Smith for suggesting this method.  Many of
            # the comments below are his, too.
            # The method:
            # - Take each of 1..n derivatives of the solution.
            # - Solve each nth derivative for d^(n)f/dx^(n)
            #   (the differential of that order)
            # - Back substitute into the ODE in decreasing order
            #   (i.e., n, n-1, ...)
            # - Check the result for zero equivalence
            if sol.lhs == func and not sol.rhs.has(func):
                diffsols = {0: sol.rhs}
            elif sol.rhs == func and not sol.lhs.has(func):
                diffsols = {0: sol.lhs}
            else:
                diffsols = {}
            sol = sol.lhs - sol.rhs
            for i in range(1, order + 1):
                # Differentiation is a linear operator, so there should always
                # be 1 solution. Nonetheless, we test just to make sure.
                # We only need to solve once.  After that, we automatically
                # have the solution to the differential in the order we want.
                if i == 1:
                    ds = sol.diff(x)
                    try:
                        sdf = solve(ds, func.diff(x, i))
                        if not sdf:
                            raise NotImplementedError
                    except NotImplementedError:
                        testnum += 1
                        break
                    else:
                        diffsols[i] = sdf[0]
                else:
                    # This is what the solution says df/dx should be.
                    diffsols[i] = diffsols[i - 1].diff(x)

            # Make sure the above didn't fail.
            if testnum > 2:
                continue
            else:
                # Substitute it into ODE to check for self consistency.
                lhs, rhs = ode.lhs, ode.rhs
                for i in range(order, -1, -1):
                    if i == 0 and 0 not in diffsols:
                        # We can only substitute f(x) if the solution was
                        # solved for f(x).
                        break
                    lhs = sub_func_doit(lhs, func.diff(x, i), diffsols[i])
                    rhs = sub_func_doit(rhs, func.diff(x, i), diffsols[i])
                    ode_or_bool = Eq(lhs, rhs)
                    ode_or_bool = simplify(ode_or_bool)

                    if isinstance(ode_or_bool, (bool, BooleanAtom)):
                        if ode_or_bool:
                            lhs = rhs = S.Zero
                    else:
                        lhs = ode_or_bool.lhs
                        rhs = ode_or_bool.rhs
                # No sense in overworking simplify -- just prove that the
                # numerator goes to zero
                num = trigsimp((lhs - rhs).as_numer_denom()[0])
                # since solutions are obtained using force=True we test
                # using the same level of assumptions
                ## replace function with dummy so assumptions will work
                _func = Dummy('func')
                num = num.subs(func, _func)
                ## posify the expression
                num, reps = posify(num)
                s = simplify(num).xreplace(reps).xreplace({_func: func})
                testnum += 1
        else:
            break

    if not s:
        return (True, s)
    elif s is True:  # The code above never was able to change s
        raise NotImplementedError("Unable to test if " + str(sol) +
            " is a solution to " + str(ode) + ".")
    else:
        return (False, s)
Ejemplo n.º 28
0
def test_hyperbolic_simp():
    x, y = symbols('x,y')

    assert trigsimp(sinh(x)**2 + 1) == cosh(x)**2
    assert trigsimp(cosh(x)**2 - 1) == sinh(x)**2
    assert trigsimp(cosh(x)**2 - sinh(x)**2) == 1
    assert trigsimp(1 - tanh(x)**2) == 1 / cosh(x)**2
    assert trigsimp(1 - 1 / cosh(x)**2) == tanh(x)**2
    assert trigsimp(tanh(x)**2 + 1 / cosh(x)**2) == 1
    assert trigsimp(coth(x)**2 - 1) == 1 / sinh(x)**2
    assert trigsimp(1 / sinh(x)**2 + 1) == 1 / tanh(x)**2
    assert trigsimp(coth(x)**2 - 1 / sinh(x)**2) == 1

    assert trigsimp(5 * cosh(x)**2 - 5 * sinh(x)**2) == 5
    assert trigsimp(5 * cosh(x / 2)**2 -
                    2 * sinh(x / 2)**2) == 3 * cosh(x) / 2 + Rational(7, 2)

    assert trigsimp(sinh(x) / cosh(x)) == tanh(x)
    assert trigsimp(tanh(x)) == trigsimp(sinh(x) / cosh(x))
    assert trigsimp(cosh(x) / sinh(x)) == 1 / tanh(x)
    assert trigsimp(2 * tanh(x) * cosh(x)) == 2 * sinh(x)
    assert trigsimp(coth(x)**3 * sinh(x)**3) == cosh(x)**3
    assert trigsimp(y * tanh(x)**2 / sinh(x)**2) == y / cosh(x)**2
    assert trigsimp(coth(x) / cosh(x)) == 1 / sinh(x)

    for a in (pi / 6 * I, pi / 4 * I, pi / 3 * I):
        assert trigsimp(sinh(a) * cosh(x) + cosh(a) * sinh(x)) == sinh(x + a)
        assert trigsimp(-sinh(a) * cosh(x) + cosh(a) * sinh(x)) == sinh(x - a)

    e = 2 * cosh(x)**2 - 2 * sinh(x)**2
    assert trigsimp(log(e)) == log(2)

    # issue 19535:
    assert trigsimp(sqrt(cosh(x)**2 - 1)) == sqrt(sinh(x)**2)

    assert trigsimp(cosh(x)**2 * cosh(y)**2 - cosh(x)**2 * sinh(y)**2 -
                    sinh(x)**2,
                    recursive=True) == 1
    assert trigsimp(sinh(x)**2 * sinh(y)**2 - sinh(x)**2 * cosh(y)**2 +
                    cosh(x)**2,
                    recursive=True) == 1

    assert abs(trigsimp(2.0 * cosh(x)**2 - 2.0 * sinh(x)**2) - 2.0) < 1e-10

    assert trigsimp(sinh(x)**2 / cosh(x)**2) == tanh(x)**2
    assert trigsimp(sinh(x)**3 / cosh(x)**3) == tanh(x)**3
    assert trigsimp(sinh(x)**10 / cosh(x)**10) == tanh(x)**10
    assert trigsimp(cosh(x)**3 / sinh(x)**3) == 1 / tanh(x)**3

    assert trigsimp(cosh(x) / sinh(x)) == 1 / tanh(x)
    assert trigsimp(cosh(x)**2 / sinh(x)**2) == 1 / tanh(x)**2
    assert trigsimp(cosh(x)**10 / sinh(x)**10) == 1 / tanh(x)**10

    assert trigsimp(x * cosh(x) * tanh(x)) == x * sinh(x)
    assert trigsimp(-sinh(x) + cosh(x) * tanh(x)) == 0

    assert tan(x) != 1 / cot(x)  # cot doesn't auto-simplify

    assert trigsimp(tan(x) - 1 / cot(x)) == 0
    assert trigsimp(3 * tanh(x)**7 - 2 / coth(x)**7) == tanh(x)**7
Ejemplo n.º 29
0
 def _eval_trigsimp(self, **opts):
     from sympy.simplify.trigsimp import trigsimp
     return self.func(trigsimp(self.lhs, **opts),
                      trigsimp(self.rhs, **opts))
Ejemplo n.º 30
0
    def orient_new(self, name, orienters, location=None,
                   vector_names=None, variable_names=None):
        """
        Creates a new CoordSys3D oriented in the user-specified way
        with respect to this system.

        Please refer to the documentation of the orienter classes
        for more information about the orientation procedure.

        Parameters
        ==========

        name : str
            The name of the new CoordSys3D instance.

        orienters : iterable/Orienter
            An Orienter or an iterable of Orienters for orienting the
            new coordinate system.
            If an Orienter is provided, it is applied to get the new
            system.
            If an iterable is provided, the orienters will be applied
            in the order in which they appear in the iterable.

        location : Vector(optional)
            The location of the new coordinate system's origin wrt this
            system's origin. If not specified, the origins are taken to
            be coincident.

        vector_names, variable_names : iterable(optional)
            Iterables of 3 strings each, with custom names for base
            vectors and base scalars of the new system respectively.
            Used for simple str printing.

        Examples
        ========

        >>> from sympy.vector import CoordSys3D
        >>> from sympy import symbols
        >>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
        >>> N = CoordSys3D('N')

        Using an AxisOrienter

        >>> from sympy.vector import AxisOrienter
        >>> axis_orienter = AxisOrienter(q1, N.i + 2 * N.j)
        >>> A = N.orient_new('A', (axis_orienter, ))

        Using a BodyOrienter

        >>> from sympy.vector import BodyOrienter
        >>> body_orienter = BodyOrienter(q1, q2, q3, '123')
        >>> B = N.orient_new('B', (body_orienter, ))

        Using a SpaceOrienter

        >>> from sympy.vector import SpaceOrienter
        >>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
        >>> C = N.orient_new('C', (space_orienter, ))

        Using a QuaternionOrienter

        >>> from sympy.vector import QuaternionOrienter
        >>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
        >>> D = N.orient_new('D', (q_orienter, ))
        """
        if variable_names is None:
            variable_names = self._variable_names
        if vector_names is None:
            vector_names = self._vector_names

        if isinstance(orienters, Orienter):
            if isinstance(orienters, AxisOrienter):
                final_matrix = orienters.rotation_matrix(self)
            else:
                final_matrix = orienters.rotation_matrix()
            # TODO: trigsimp is needed here so that the matrix becomes
            # canonical (scalar_map also calls trigsimp; without this, you can
            # end up with the same CoordinateSystem that compares differently
            # due to a differently formatted matrix). However, this is
            # probably not so good for performance.
            final_matrix = trigsimp(final_matrix)
        else:
            final_matrix = Matrix(eye(3))
            for orienter in orienters:
                if isinstance(orienter, AxisOrienter):
                    final_matrix *= orienter.rotation_matrix(self)
                else:
                    final_matrix *= orienter.rotation_matrix()

        return CoordSys3D(name, rotation_matrix=final_matrix,
                          vector_names=vector_names,
                          variable_names=variable_names,
                          location=location,
                          parent=self)
Ejemplo n.º 31
0
 def norm(self):
     """Returns the norm of the quaternion."""
     q = self
     # trigsimp is used to simplify sin(x)^2 + cos(x)^2 (these terms
     # arise when from_axis_angle is used).
     return sqrt(trigsimp(q.a**2 + q.b**2 + q.c**2 + q.d**2))
Ejemplo n.º 32
0
def test_trigsimp_noncommutative():
    x, y = symbols('x,y')
    A, B = symbols('A,B', commutative=False)

    assert trigsimp(A - A * sin(x)**2) == A * cos(x)**2
    assert trigsimp(A - A * cos(x)**2) == A * sin(x)**2
    assert trigsimp(A * sin(x)**2 + A * cos(x)**2) == A
    assert trigsimp(A + A * tan(x)**2) == A / cos(x)**2
    assert trigsimp(A / cos(x)**2 - A) == A * tan(x)**2
    assert trigsimp(A / cos(x)**2 - A * tan(x)**2) == A
    assert trigsimp(A + A * cot(x)**2) == A / sin(x)**2
    assert trigsimp(A / sin(x)**2 - A) == A / tan(x)**2
    assert trigsimp(A / sin(x)**2 - A * cot(x)**2) == A

    assert trigsimp(y * A * cos(x)**2 + y * A * sin(x)**2) == y * A

    assert trigsimp(A * sin(x) / cos(x)) == A * tan(x)
    assert trigsimp(A * tan(x) * cos(x)) == A * sin(x)
    assert trigsimp(A * cot(x)**3 * sin(x)**3) == A * cos(x)**3
    assert trigsimp(y * A * tan(x)**2 / sin(x)**2) == y * A / cos(x)**2
    assert trigsimp(A * cot(x) / cos(x)) == A / sin(x)

    assert trigsimp(A * sin(x + y) + A * sin(x - y)) == 2 * A * sin(x) * cos(y)
    assert trigsimp(A * sin(x + y) - A * sin(x - y)) == 2 * A * sin(y) * cos(x)
    assert trigsimp(A * cos(x + y) + A * cos(x - y)) == 2 * A * cos(x) * cos(y)
    assert trigsimp(A * cos(x + y) -
                    A * cos(x - y)) == -2 * A * sin(x) * sin(y)

    assert trigsimp(A * sinh(x + y) +
                    A * sinh(x - y)) == 2 * A * sinh(x) * cosh(y)
    assert trigsimp(A * sinh(x + y) -
                    A * sinh(x - y)) == 2 * A * sinh(y) * cosh(x)
    assert trigsimp(A * cosh(x + y) +
                    A * cosh(x - y)) == 2 * A * cosh(x) * cosh(y)
    assert trigsimp(A * cosh(x + y) -
                    A * cosh(x - y)) == 2 * A * sinh(x) * sinh(y)

    assert trigsimp(A * cos(0.12345)**2 + A * sin(0.12345)**2) == 1.0 * A