Beispiel #1
0
def test_float_handling():
    def test(e1, e2):
        return len(e1.atoms(Float)) == len(e2.atoms(Float))

    assert solve(x - 0.5, rational=True)[0].is_Rational
    assert solve(x - 0.5, rational=False)[0].is_Float
    assert solve(x - S.Half, rational=False)[0].is_Rational
    assert solve(x - 0.5, rational=None)[0].is_Float
    assert solve(x - S.Half, rational=None)[0].is_Rational
    assert test(nfloat(1 + 2 * x), 1.0 + 2.0 * x)
    for contain in [list, tuple, set]:
        ans = nfloat(contain([1 + 2 * x]))
        assert type(ans) is contain and test(list(ans)[0], 1.0 + 2.0 * x)
    k, v = nfloat({2 * x: [1 + 2 * x]}).items()[0]
    assert test(k, 2 * x) and test(v[0], 1.0 + 2.0 * x)
    assert test(nfloat(cos(2 * x)), cos(2.0 * x))
    assert test(nfloat(3 * x**2), 3.0 * x**2)
    assert test(nfloat(3 * x**2, exponent=True), 3.0 * x**2.0)
    assert test(nfloat(exp(2 * x)), exp(2.0 * x))
    assert test(nfloat(x / 3), x / 3.0)
    assert test(nfloat(x**4 + 2 * x + cos(S(1) / 3) + 1),
                x**4 + 2.0 * x + 1.94495694631474)
    # don't call nfloat if there is no solution
    tot = 100 + c + z + t
    assert solve(
        ((.7 + c) / tot - .6, (.2 + z) / tot - .3, t / tot - .1)) == []
Beispiel #2
0
    def gaussmfAreaU(x, params1, params2, prevInter):
        assert len(params1) == 2, 'sigma,c parameter must have exactly two \
                                   elements.'

        assert len(params2) == 2, 'sigma,c parameter must have exactly two \
                                   elements.'

        m1, s1 = np.r_[params1]  # Zero-indexing in Python
        m2, s2 = np.r_[params2]  # Zero-indexing in Python

        # Solving coeffiecients of quadratic poly
        def solve(m1, m2, std1, std2):
            a = 1 / (2 * std1**2) - 1 / (2 * std2**2)
            b = m2 / (std2**2) - m1 / (std1**2)
            c = m1**2 / (2 * std1**2) - m2**2 / (2 * std2**2) - np.log(
                std2 / std1)
            return np.roots([a, b, c])

        flag = 2
        result = solve(m1, m2, s1, s2)
        if len(result) == 0:  # Completely non-overlapping
            area = 0.0
            return area
        if len(result) > 0:  # One point of contact
            for i in range(len(result)):
                #print("All Points of intersection")
                #print(result)
                if result[i] < 0:
                    result[i] = 10000
                    flag = 0
            if np.min(result) > prevInter:
                r = np.min(result)
            else:
                r = np.max(result)
            if flag == 0:
                r = np.min(result)

            #print("POINT OF INTERSECTION")
            #print(r)

        x = sympy.var("x")
        func1 = sympy.exp(-(((x - params1[0]) / params1[1])**2) / 2)
        func2 = sympy.exp(-(((x - params2[0]) / params2[1])**2) / 2)
        int1 = sympy.integrate(func1, (x, r, 10))
        int2 = sympy.integrate(func2, (x, 0, r))
        area1 = nfloat(int1)
        area2 = nfloat(int2)
        areaTot = round(area1, 3) + round(area2, 3)

        return round(areaTot, 3), r
Beispiel #3
0
def test_evalf_relational():
    assert Eq(x / 5, y / 10).evalf() == Eq(0.2 * x, 0.1 * y)
    # if this first assertion fails it should be replaced with
    # one that doesn't
    assert unchanged(Eq, (3 - I)**2 / 2 + I, 0)
    assert Eq((3 - I)**2 / 2 + I, 0).n() is S.false
    assert nfloat(Eq((3 - I)**2 + I, 0)) == S.false
Beispiel #4
0
def test_float_handling():
    def test(e1, e2):
        return len(e1.atoms(Float)) == len(e2.atoms(Float))
    assert solve(x - 0.5, rational=True)[0].is_Rational
    assert solve(x - 0.5, rational=False)[0].is_Float
    assert solve(x - S.Half, rational=False)[0].is_Rational
    assert solve(x - 0.5, rational=None)[0].is_Float
    assert solve(x - S.Half, rational=None)[0].is_Rational
    assert test(nfloat(1 + 2*x), 1.0 + 2.0*x)
    for contain in [list, tuple, set]:
        ans = nfloat(contain([1 + 2*x]))
        assert type(ans) is contain and test(list(ans)[0], 1.0 + 2.0*x)
    k, v = nfloat({2*x: [1 + 2*x]}).items()[0]
    assert test(k, 2*x) and test(v[0], 1.0 + 2.0*x)
    assert test(nfloat(cos(2*x)), cos(2.0*x))
    assert test(nfloat(3*x**2), 3.0*x**2)
    assert test(nfloat(3*x**2, exponent=True), 3.0*x**2.0)
    assert test(nfloat(exp(2*x)), exp(2.0*x))
    assert test(nfloat(x/3), x/3.0)
    assert test(nfloat(x**4 + 2*x + cos(S(1)/3) + 1),
            x**4 + 2.0*x + 1.94495694631474)
Beispiel #5
0
    def gaussmfAreaC(x, params1, params2, pUnA, nUnA):
        assert len(params1) == 2, 'sigma,c parameter must have exactly two \
                                   elements.'

        assert len(params2) == 2, 'sigma,c parameter must have exactly two \
                                   elements.'

        m1, s1 = np.r_[params1]  # Zero-indexing in Python
        m2, s2 = np.r_[params2]  # Zero-indexing in Python

        x = sympy.var("x")
        func1 = sympy.exp(-(((x - params1[0]) / params1[1])**2) / 2)
        int1 = sympy.integrate(func1, (x, 0, 10))
        area1 = nfloat(int1)
        areaTot = round(area1, 3) - pUnA - nUnA

        return round(areaTot, 3)
def test_return_root_of():
    f = x ** 5 - 15 * x ** 3 - 5 * x ** 2 + 10 * x + 20
    s = list(solveset_complex(f, x))
    for root in s:
        assert root.func == RootOf

    # if one uses solve to get the roots of a polynomial that has a RootOf
    # solution, make sure that the use of nfloat during the solve process
    # doesn't fail. Note: if you want numerical solutions to a polynomial
    # it is *much* faster to use nroots to get them than to solve the
    # equation only to get RootOf solutions which are then numerically
    # evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
    # than [i.n() for i in solve(eq)] to get the numerical roots of eq.
    assert (
        nfloat(list(solveset_complex(x ** 5 + 3 * x ** 3 + 7, x))[0], exponent=False)
        == RootOf(x ** 5 + 3 * x ** 3 + 7, 0).n()
    )

    sol = list(solveset_complex(x ** 6 - 2 * x + 2, x))
    assert all(isinstance(i, RootOf) for i in sol) and len(sol) == 6

    f = x ** 5 - 15 * x ** 3 - 5 * x ** 2 + 10 * x + 20
    s = list(solveset_complex(f, x))
    for root in s:
        assert root.func == RootOf

    s = x ** 5 + 4 * x ** 3 + 3 * x ** 2 + S(7) / 4
    assert solveset_complex(s, x) == FiniteSet(*Poly(s * 4, domain="ZZ").all_roots())

    # XXX: this comparison should work without converting the FiniteSet to list
    # See #7876
    eq = x * (x - 1) ** 2 * (x + 1) * (x ** 6 - x + 1)
    assert list(solveset_complex(eq, x)) == list(
        FiniteSet(
            -1,
            0,
            1,
            RootOf(x ** 6 - x + 1, 0),
            RootOf(x ** 6 - x + 1, 1),
            RootOf(x ** 6 - x + 1, 2),
            RootOf(x ** 6 - x + 1, 3),
            RootOf(x ** 6 - x + 1, 4),
            RootOf(x ** 6 - x + 1, 5),
        )
    )
Beispiel #7
0
def test_return_root_of():
    f = x**5 - 15 * x**3 - 5 * x**2 + 10 * x + 20
    s = list(solveset_complex(f, x))
    for root in s:
        assert root.func == CRootOf

    # if one uses solve to get the roots of a polynomial that has a CRootOf
    # solution, make sure that the use of nfloat during the solve process
    # doesn't fail. Note: if you want numerical solutions to a polynomial
    # it is *much* faster to use nroots to get them than to solve the
    # equation only to get CRootOf solutions which are then numerically
    # evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
    # than [i.n() for i in solve(eq)] to get the numerical roots of eq.
    assert nfloat(list(solveset_complex(x**5 + 3 * x**3 + 7, x))[0],
                  exponent=False) == CRootOf(x**5 + 3 * x**3 + 7, 0).n()

    sol = list(solveset_complex(x**6 - 2 * x + 2, x))
    assert all(isinstance(i, CRootOf) for i in sol) and len(sol) == 6

    f = x**5 - 15 * x**3 - 5 * x**2 + 10 * x + 20
    s = list(solveset_complex(f, x))
    for root in s:
        assert root.func == CRootOf

    s = x**5 + 4 * x**3 + 3 * x**2 + S(7) / 4
    assert solveset_complex(s, x) == \
        FiniteSet(*Poly(s*4, domain='ZZ').all_roots())

    # XXX: this comparison should work without converting the FiniteSet to list
    # See #7876
    eq = x * (x - 1)**2 * (x + 1) * (x**6 - x + 1)
    assert list(solveset_complex(eq, x)) == \
        list(FiniteSet(-1, 0, 1, CRootOf(x**6 - x + 1, 0),
                       CRootOf(x**6 - x + 1, 1),
                       CRootOf(x**6 - x + 1, 2),
                       CRootOf(x**6 - x + 1, 3),
                       CRootOf(x**6 - x + 1, 4),
                       CRootOf(x**6 - x + 1, 5)))
Beispiel #8
0
def simplify(expr, ratio=1.7, measure=count_ops, rational=False, inverse=False):
    """Simplifies the given expression.

    Simplification is not a well defined term and the exact strategies
    this function tries can change in the future versions of SymPy. If
    your algorithm relies on "simplification" (whatever it is), try to
    determine what you need exactly  -  is it powsimp()?, radsimp()?,
    together()?, logcombine()?, or something else? And use this particular
    function directly, because those are well defined and thus your algorithm
    will be robust.

    Nonetheless, especially for interactive use, or when you don't know
    anything about the structure of the expression, simplify() tries to apply
    intelligent heuristics to make the input expression "simpler".  For
    example:

    >>> from sympy import simplify, cos, sin
    >>> from sympy.abc import x, y
    >>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2)
    >>> a
    (x**2 + x)/(x*sin(y)**2 + x*cos(y)**2)
    >>> simplify(a)
    x + 1

    Note that we could have obtained the same result by using specific
    simplification functions:

    >>> from sympy import trigsimp, cancel
    >>> trigsimp(a)
    (x**2 + x)/x
    >>> cancel(_)
    x + 1

    In some cases, applying :func:`simplify` may actually result in some more
    complicated expression. The default ``ratio=1.7`` prevents more extreme
    cases: if (result length)/(input length) > ratio, then input is returned
    unmodified.  The ``measure`` parameter lets you specify the function used
    to determine how complex an expression is.  The function should take a
    single argument as an expression and return a number such that if
    expression ``a`` is more complex than expression ``b``, then
    ``measure(a) > measure(b)``.  The default measure function is
    :func:`count_ops`, which returns the total number of operations in the
    expression.

    For example, if ``ratio=1``, ``simplify`` output can't be longer
    than input.

    ::

        >>> from sympy import sqrt, simplify, count_ops, oo
        >>> root = 1/(sqrt(2)+3)

    Since ``simplify(root)`` would result in a slightly longer expression,
    root is returned unchanged instead::

       >>> simplify(root, ratio=1) == root
       True

    If ``ratio=oo``, simplify will be applied anyway::

        >>> count_ops(simplify(root, ratio=oo)) > count_ops(root)
        True

    Note that the shortest expression is not necessary the simplest, so
    setting ``ratio`` to 1 may not be a good idea.
    Heuristically, the default value ``ratio=1.7`` seems like a reasonable
    choice.

    You can easily define your own measure function based on what you feel
    should represent the "size" or "complexity" of the input expression.  Note
    that some choices, such as ``lambda expr: len(str(expr))`` may appear to be
    good metrics, but have other problems (in this case, the measure function
    may slow down simplify too much for very large expressions).  If you don't
    know what a good metric would be, the default, ``count_ops``, is a good
    one.

    For example:

    >>> from sympy import symbols, log
    >>> a, b = symbols('a b', positive=True)
    >>> g = log(a) + log(b) + log(a)*log(1/b)
    >>> h = simplify(g)
    >>> h
    log(a*b**(-log(a) + 1))
    >>> count_ops(g)
    8
    >>> count_ops(h)
    5

    So you can see that ``h`` is simpler than ``g`` using the count_ops metric.
    However, we may not like how ``simplify`` (in this case, using
    ``logcombine``) has created the ``b**(log(1/a) + 1)`` term.  A simple way
    to reduce this would be to give more weight to powers as operations in
    ``count_ops``.  We can do this by using the ``visual=True`` option:

    >>> print(count_ops(g, visual=True))
    2*ADD + DIV + 4*LOG + MUL
    >>> print(count_ops(h, visual=True))
    2*LOG + MUL + POW + SUB

    >>> from sympy import Symbol, S
    >>> def my_measure(expr):
    ...     POW = Symbol('POW')
    ...     # Discourage powers by giving POW a weight of 10
    ...     count = count_ops(expr, visual=True).subs(POW, 10)
    ...     # Every other operation gets a weight of 1 (the default)
    ...     count = count.replace(Symbol, type(S.One))
    ...     return count
    >>> my_measure(g)
    8
    >>> my_measure(h)
    14
    >>> 15./8 > 1.7 # 1.7 is the default ratio
    True
    >>> simplify(g, measure=my_measure)
    -log(a)*log(b) + log(a) + log(b)

    Note that because ``simplify()`` internally tries many different
    simplification strategies and then compares them using the measure
    function, we get a completely different result that is still different
    from the input expression by doing this.

    If rational=True, Floats will be recast as Rationals before simplification.
    If rational=None, Floats will be recast as Rationals but the result will
    be recast as Floats. If rational=False(default) then nothing will be done
    to the Floats.

    If inverse=True, it will be assumed that a composition of inverse
    functions, such as sin and asin, can be cancelled in any order.
    For example, ``asin(sin(x))`` will yield ``x`` without checking whether
    x belongs to the set where this relation is true. The default is
    False.
    """
    expr = sympify(expr)

    try:
        return expr._eval_simplify(ratio=ratio, measure=measure)
    except AttributeError:
        pass

    original_expr = expr = signsimp(expr)

    from sympy.simplify.hyperexpand import hyperexpand
    from sympy.functions.special.bessel import BesselBase
    from sympy import Sum, Product

    if not isinstance(expr, Basic) or not expr.args:  # XXX: temporary hack
        return expr

    if inverse and expr.has(Function):
        expr = inversecombine(expr)
        if not expr.args:  # simplified to atomic
            return expr

    if not isinstance(expr, (Add, Mul, Pow, ExpBase)):
        return expr.func(*[simplify(x, ratio=ratio, measure=measure, rational=rational)
                         for x in expr.args])

    # TODO: Apply different strategies, considering expression pattern:
    # is it a purely rational function? Is there any trigonometric function?...
    # See also https://github.com/sympy/sympy/pull/185.

    def shorter(*choices):
        '''Return the choice that has the fewest ops. In case of a tie,
        the expression listed first is selected.'''
        if not has_variety(choices):
            return choices[0]
        return min(choices, key=measure)

    # rationalize Floats
    floats = False
    if rational is not False and expr.has(Float):
        floats = True
        expr = nsimplify(expr, rational=True)

    expr = bottom_up(expr, lambda w: w.normal())
    expr = Mul(*powsimp(expr).as_content_primitive())
    _e = cancel(expr)
    expr1 = shorter(_e, _mexpand(_e).cancel())  # issue 6829
    expr2 = shorter(together(expr, deep=True), together(expr1, deep=True))

    if ratio is S.Infinity:
        expr = expr2
    else:
        expr = shorter(expr2, expr1, expr)
    if not isinstance(expr, Basic):  # XXX: temporary hack
        return expr

    expr = factor_terms(expr, sign=False)

    # hyperexpand automatically only works on hypergeometric terms
    expr = hyperexpand(expr)

    expr = piecewise_fold(expr)

    if expr.has(BesselBase):
        expr = besselsimp(expr)

    if expr.has(TrigonometricFunction, HyperbolicFunction):
        expr = trigsimp(expr, deep=True)

    if expr.has(log):
        expr = shorter(expand_log(expr, deep=True), logcombine(expr))

    if expr.has(CombinatorialFunction, gamma):
        # expression with gamma functions or non-integer arguments is
        # automatically passed to gammasimp
        expr = combsimp(expr)

    if expr.has(Sum):
        expr = sum_simplify(expr)

    if expr.has(Product):
        expr = product_simplify(expr)

    from sympy.physics.units import Quantity
    from sympy.physics.units.util import quantity_simplify

    if expr.has(Quantity):
        expr = quantity_simplify(expr)

    short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr)
    short = shorter(short, cancel(short))
    short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short)))
    if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase):
        short = exptrigsimp(short)

    # get rid of hollow 2-arg Mul factorization
    hollow_mul = Transform(
        lambda x: Mul(*x.args),
        lambda x:
        x.is_Mul and
        len(x.args) == 2 and
        x.args[0].is_Number and
        x.args[1].is_Add and
        x.is_commutative)
    expr = short.xreplace(hollow_mul)

    numer, denom = expr.as_numer_denom()
    if denom.is_Add:
        n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1))
        if n is not S.One:
            expr = (numer*n).expand()/d

    if expr.could_extract_minus_sign():
        n, d = fraction(expr)
        if d != 0:
            expr = signsimp(-n/(-d))

    if measure(expr) > ratio*measure(original_expr):
        expr = original_expr

    # restore floats
    if floats and rational is None:
        expr = nfloat(expr, exponent=False)

    return expr
Beispiel #9
0
def simplify(expr, ratio=1.7, measure=count_ops, rational=False):
    # type: (object, object, object, object) -> object
    """
    Simplifies the given expression.

    Simplification is not a well defined term and the exact strategies
    this function tries can change in the future versions of SymPy. If
    your algorithm relies on "simplification" (whatever it is), try to
    determine what you need exactly  -  is it powsimp()?, radsimp()?,
    together()?, logcombine()?, or something else? And use this particular
    function directly, because those are well defined and thus your algorithm
    will be robust.

    Nonetheless, especially for interactive use, or when you don't know
    anything about the structure of the expression, simplify() tries to apply
    intelligent heuristics to make the input expression "simpler".  For
    example:

    >>> from sympy import simplify, cos, sin
    >>> from sympy.abc import x, y
    >>> a = (x + x**2)/(x*sin(y)**2 + x*cos(y)**2)
    >>> a
    (x**2 + x)/(x*sin(y)**2 + x*cos(y)**2)
    >>> simplify(a)
    x + 1

    Note that we could have obtained the same result by using specific
    simplification functions:

    >>> from sympy import trigsimp, cancel
    >>> trigsimp(a)
    (x**2 + x)/x
    >>> cancel(_)
    x + 1

    In some cases, applying :func:`simplify` may actually result in some more
    complicated expression. The default ``ratio=1.7`` prevents more extreme
    cases: if (result length)/(input length) > ratio, then input is returned
    unmodified.  The ``measure`` parameter lets you specify the function used
    to determine how complex an expression is.  The function should take a
    single argument as an expression and return a number such that if
    expression ``a`` is more complex than expression ``b``, then
    ``measure(a) > measure(b)``.  The default measure function is
    :func:`count_ops`, which returns the total number of operations in the
    expression.

    For example, if ``ratio=1``, ``simplify`` output can't be longer
    than input.

    ::

        >>> from sympy import sqrt, simplify, count_ops, oo
        >>> root = 1/(sqrt(2)+3)

    Since ``simplify(root)`` would result in a slightly longer expression,
    root is returned unchanged instead::

       >>> simplify(root, ratio=1) == root
       True

    If ``ratio=oo``, simplify will be applied anyway::

        >>> count_ops(simplify(root, ratio=oo)) > count_ops(root)
        True

    Note that the shortest expression is not necessary the simplest, so
    setting ``ratio`` to 1 may not be a good idea.
    Heuristically, the default value ``ratio=1.7`` seems like a reasonable
    choice.

    You can easily define your own measure function based on what you feel
    should represent the "size" or "complexity" of the input expression.  Note
    that some choices, such as ``lambda expr: len(str(expr))`` may appear to be
    good metrics, but have other problems (in this case, the measure function
    may slow down simplify too much for very large expressions).  If you don't
    know what a good metric would be, the default, ``count_ops``, is a good
    one.

    For example:

    >>> from sympy import symbols, log
    >>> a, b = symbols('a b', positive=True)
    >>> g = log(a) + log(b) + log(a)*log(1/b)
    >>> h = simplify(g)
    >>> h
    log(a*b**(-log(a) + 1))
    >>> count_ops(g)
    8
    >>> count_ops(h)
    5

    So you can see that ``h`` is simpler than ``g`` using the count_ops metric.
    However, we may not like how ``simplify`` (in this case, using
    ``logcombine``) has created the ``b**(log(1/a) + 1)`` term.  A simple way
    to reduce this would be to give more weight to powers as operations in
    ``count_ops``.  We can do this by using the ``visual=True`` option:

    >>> print(count_ops(g, visual=True))
    2*ADD + DIV + 4*LOG + MUL
    >>> print(count_ops(h, visual=True))
    2*LOG + MUL + POW + SUB

    >>> from sympy import Symbol, S
    >>> def my_measure(expr):
    ...     POW = Symbol('POW')
    ...     # Discourage powers by giving POW a weight of 10
    ...     count = count_ops(expr, visual=True).subs(POW, 10)
    ...     # Every other operation gets a weight of 1 (the default)
    ...     count = count.replace(Symbol, type(S.One))
    ...     return count
    >>> my_measure(g)
    8
    >>> my_measure(h)
    14
    >>> 15./8 > 1.7 # 1.7 is the default ratio
    True
    >>> simplify(g, measure=my_measure)
    -log(a)*log(b) + log(a) + log(b)

    Note that because ``simplify()`` internally tries many different
    simplification strategies and then compares them using the measure
    function, we get a completely different result that is still different
    from the input expression by doing this.

    If rational=True, Floats will be recast as Rationals before simplification.
    If rational=None, Floats will be recast as Rationals but the result will
    be recast as Floats. If rational=False(default) then nothing will be done
    to the Floats.
    """
    expr = sympify(expr)

    try:
        return expr._eval_simplify(ratio=ratio, measure=measure)
    except AttributeError:
        pass

    original_expr = expr = signsimp(expr)

    from sympy.simplify.hyperexpand import hyperexpand
    from sympy.functions.special.bessel import BesselBase
    from sympy import Sum, Product

    if not isinstance(expr, Basic) or not expr.args:  # XXX: temporary hack
        return expr

    if not isinstance(expr, (Add, Mul, Pow, ExpBase)):
        if isinstance(expr, Function) and hasattr(expr, "inverse"):
            if len(expr.args) == 1 and len(expr.args[0].args) == 1 and \
               isinstance(expr.args[0], expr.inverse(argindex=1)):
                return simplify(expr.args[0].args[0], ratio=ratio,
                                measure=measure, rational=rational)
        return expr.func(*[simplify(x, ratio=ratio, measure=measure, rational=rational)
                         for x in expr.args])

    # TODO: Apply different strategies, considering expression pattern:
    # is it a purely rational function? Is there any trigonometric function?...
    # See also https://github.com/sympy/sympy/pull/185.

    def shorter(*choices):
        '''Return the choice that has the fewest ops. In case of a tie,
        the expression listed first is selected.'''
        if not has_variety(choices):
            return choices[0]
        return min(choices, key=measure)

    # rationalize Floats
    floats = False
    if rational is not False and expr.has(Float):
        floats = True
        expr = nsimplify(expr, rational=True)

    expr = bottom_up(expr, lambda w: w.normal())
    expr = Mul(*powsimp(expr).as_content_primitive())
    _e = cancel(expr)
    expr1 = shorter(_e, _mexpand(_e).cancel())  # issue 6829
    expr2 = shorter(together(expr, deep=True), together(expr1, deep=True))

    if ratio is S.Infinity:
        expr = expr2
    else:
        expr = shorter(expr2, expr1, expr)
    if not isinstance(expr, Basic):  # XXX: temporary hack
        return expr

    expr = factor_terms(expr, sign=False)

    # hyperexpand automatically only works on hypergeometric terms
    expr = hyperexpand(expr)

    expr = piecewise_fold(expr)

    if expr.has(BesselBase):
        expr = besselsimp(expr)

    if expr.has(TrigonometricFunction, HyperbolicFunction):
        expr = trigsimp(expr, deep=True)

    if expr.has(log):
        expr = shorter(expand_log(expr, deep=True), logcombine(expr))

    if expr.has(CombinatorialFunction, gamma):
        # expression with gamma functions or non-integer arguments is
        # automatically passed to gammasimp
        expr = combsimp(expr)

    if expr.has(Sum):
        expr = sum_simplify(expr)

    if expr.has(Product):
        expr = product_simplify(expr)

    short = shorter(powsimp(expr, combine='exp', deep=True), powsimp(expr), expr)
    short = shorter(short, cancel(short))
    short = shorter(short, factor_terms(short), expand_power_exp(expand_mul(short)))
    if short.has(TrigonometricFunction, HyperbolicFunction, ExpBase):
        short = exptrigsimp(short)

    # get rid of hollow 2-arg Mul factorization
    hollow_mul = Transform(
        lambda x: Mul(*x.args),
        lambda x:
        x.is_Mul and
        len(x.args) == 2 and
        x.args[0].is_Number and
        x.args[1].is_Add and
        x.is_commutative)
    expr = short.xreplace(hollow_mul)

    numer, denom = expr.as_numer_denom()
    if denom.is_Add:
        n, d = fraction(radsimp(1/denom, symbolic=False, max_terms=1))
        if n is not S.One:
            expr = (numer*n).expand()/d

    if expr.could_extract_minus_sign():
        n, d = fraction(expr)
        if d != 0:
            expr = signsimp(-n/(-d))

    if measure(expr) > ratio*measure(original_expr):
        expr = original_expr

    # restore floats
    if floats and rational is None:
        expr = nfloat(expr, exponent=False)

    return expr
Beispiel #10
0
def test_issue_10395():
    eq = x * Max(0, y)
    assert nfloat(eq) == eq
    eq = x * Max(y, -1.1)
    assert nfloat(eq) == eq
    assert Max(y, 4).n() == Max(4.0, y)
Beispiel #11
0
                              gaussUA[i - 1], gaussUA[i]))
    #print("AreaCA {} : {} ".format(i+1, gaussCA[i]))
    gaussTotCA = gaussTotCA + gaussCA[i]
#print("Total Certainity Area: {}".format(gaussTotCA))

# GAUSSIAN MF'S - FuzR MEASSURE
FuzRgaussOrig = (gaussTotCA) / (gaussTotUA)
print("FuzR Meassure - Gaussian: {}".format(FuzRgaussOrig))

# Test Something
x = sympy.var("x")
totAreaAgg = 0
for i in range(l):
    func1 = sympy.exp(-(((x - gaussParam[i][0]) / gaussParam[i][1])**2) / 2)
    int1 = sympy.integrate(func1, (x, 0, 10))
    areaGauss = nfloat(int1)
    areaTri = 0.5 * (triParam[i][2] - triParam[i][0])
    totAreaAgg = totAreaAgg + areaGauss - areaTri

# Test Scaling Measure
print("Uncertainty Difference")
print(gaussTotUA - triTotUA)
print("Certainty Difference")
print(gaussTotCA - triTotCA)
FuzRgaussMod = (triTotCA + 2) / (triTotUA + 0.5)
print("\nFuzR Meassure - GaussianMod : {}".format(FuzRgaussMod))

#plt.margins(0)
plt.tight_layout()
plt.show()
Beispiel #12
0
def test_nfloat():
    from sympy.core.basic import _aresame
    from sympy.polys.rootoftools import rootof

    x = Symbol("x")
    eq = x**Rational(4, 3) + 4*x**(S.One/3)/3
    assert _aresame(nfloat(eq), x**Rational(4, 3) + (4.0/3)*x**(S.One/3))
    assert _aresame(nfloat(eq, exponent=True), x**(4.0/3) + (4.0/3)*x**(1.0/3))
    eq = x**Rational(4, 3) + 4*x**(x/3)/3
    assert _aresame(nfloat(eq), x**Rational(4, 3) + (4.0/3)*x**(x/3))
    big = 12345678901234567890
    # specify precision to match value used in nfloat
    Float_big = Float(big, 15)
    assert _aresame(nfloat(big), Float_big)
    assert _aresame(nfloat(big*x), Float_big*x)
    assert _aresame(nfloat(x**big, exponent=True), x**Float_big)
    assert nfloat(cos(x + sqrt(2))) == cos(x + nfloat(sqrt(2)))

    # issue 6342
    f = S('x*lamda + lamda**3*(x/2 + 1/2) + lamda**2 + 1/4')
    assert not any(a.free_symbols for a in solveset(f.subs(x, -0.139)))

    # issue 6632
    assert nfloat(-100000*sqrt(2500000001) + 5000000001) == \
        9.99999999800000e-11

    # issue 7122
    eq = cos(3*x**4 + y)*rootof(x**5 + 3*x**3 + 1, 0)
    assert str(nfloat(eq, exponent=False, n=1)) == '-0.7*cos(3.0*x**4 + y)'

    # issue 10933
    for ti in (dict, Dict):
        d = ti({S.Half: S.Half})
        n = nfloat(d)
        assert isinstance(n, ti)
        assert _aresame(list(n.items()).pop(), (S.Half, Float(.5)))
    for ti in (dict, Dict):
        d = ti({S.Half: S.Half})
        n = nfloat(d, dkeys=True)
        assert isinstance(n, ti)
        assert _aresame(list(n.items()).pop(), (Float(.5), Float(.5)))
    d = [S.Half]
    n = nfloat(d)
    assert type(n) is list
    assert _aresame(n[0], Float(.5))
    assert _aresame(nfloat(Eq(x, S.Half)).rhs, Float(.5))
    assert _aresame(nfloat(S(True)), S(True))
    assert _aresame(nfloat(Tuple(S.Half))[0], Float(.5))
    assert nfloat(Eq((3 - I)**2/2 + I, 0)) == S.false
    # pass along kwargs
    assert nfloat([{S.Half: x}], dkeys=True) == [{Float(0.5): x}]

    # Issue 17706
    A = MutableMatrix([[1, 2], [3, 4]])
    B = MutableMatrix(
        [[Float('1.0', precision=53), Float('2.0', precision=53)],
        [Float('3.0', precision=53), Float('4.0', precision=53)]])
    assert _aresame(nfloat(A), B)
    A = ImmutableMatrix([[1, 2], [3, 4]])
    B = ImmutableMatrix(
        [[Float('1.0', precision=53), Float('2.0', precision=53)],
        [Float('3.0', precision=53), Float('4.0', precision=53)]])
    assert _aresame(nfloat(A), B)

    # issue 22524
    f = Function('f')
    assert not nfloat(f(2)).atoms(Float)