Exemplo n.º 1
0
def checksol(f, symbol, sol=None, **flags):
    """Checks whether sol is a solution of equation f == 0.

    Input can be either a single symbol and corresponding value
    or a dictionary of symbols and values. ``f`` can be a single
    equation or an iterable of equations. A solution must satisfy
    all equations in ``f`` to be considered valid; if a solution
    does not satisfy any equation, False is returned; if one or
    more checks are inconclusive (and none are False) then None
    is returned.

    Examples:
    ---------

       >>> from sympy import symbols
       >>> from sympy.solvers import checksol
       >>> x, y = symbols('x,y')
       >>> checksol(x**4-1, x, 1)
       True
       >>> checksol(x**4-1, x, 0)
       False
       >>> checksol(x**2 + y**2 - 5**2, {x:3, y: 4})
       True

       None is returned if checksol() could not conclude.

       flags:
           'numerical=True (default)'
               do a fast numerical check if f has only one symbol.
           'minimal=True (default is False)'
               a very fast, minimal testing.
           'warning=True (default is False)'
               print a warning if checksol() could not conclude.
           'simplify=True (default)'
               simplify solution before substituting into function (which
               will then be simplified regardless of the flag setting)
           'force=True (default is False)'
               make positive all symbols without assumptions regarding sign.
    """

    if sol is not None:
        sol = {symbol: sol}
    elif isinstance(symbol, dict):
        sol = symbol
    else:
        msg = 'Expecting sym, val or {sym: val}, None but got %s, %s'
        raise ValueError(msg % (symbol, sol))

    if is_sequence(f):
        if not f:
            raise ValueError('no functions to check')
        rv = True
        for fi in f:
            check = checksol(fi, sol, **flags)
            if check:
                continue
            if check is False:
                return False
            rv = None # don't return, wait to see if there's a False
        return rv

    if isinstance(f, Poly):
        f = f.as_expr()
    elif isinstance(f, Equality):
        f = f.lhs - f.rhs

    if not f:
        return True

    if not f.has(*sol.keys()):
        return False

    attempt = -1
    numerical = flags.get('numerical', True)
    while 1:
        attempt += 1
        if attempt == 0:
            val = f.subs(sol)
        elif attempt == 1:
            if not val.atoms(Symbol) and numerical:
                # val is a constant, so a fast numerical test may suffice
                if val not in [S.Infinity, S.NegativeInfinity]:
                    # issue 2088 shows that +/-oo chops to 0
                    val = val.evalf(36).n(30, chop=True)
        elif attempt == 2:
            if flags.get('minimal', False):
                return
            # the flag 'simplify=False' is used in solve to avoid
            # simplifying the solution. So if it is set to False there
            # the simplification will not be attempted here, either. But
            # if the simplification is done here then the flag should be
            # set to False so it isn't done again there.
            # FIXME: this can't work, since `flags` is not passed to
            # `checksol()` as a dict, but as keywords.
            # So, any modification to `flags` here will be lost when returning
            # from `checksol()`.
            for k in sol:
                sol[k] = simplify(sympify(sol[k]))
            val = simplify(f.subs(sol))
            if flags.get('force', False):
                val = posify(val)[0]
        elif attempt == 3:
            val = powsimp(val)
        elif attempt == 4:
            val = cancel(val)
        elif attempt == 5:
            val = val.expand()
        elif attempt == 6:
            val = together(val)
        elif attempt == 7:
            val = powsimp(val)
        else:
            break
        if val.is_zero:
            return True
        elif attempt > 0 and numerical and val.is_nonzero:
            return False

    if flags.get('warning', False):
        print("\n\tWarning: could not verify solution %s." % sol)
Exemplo n.º 2
0
def checkodesol(ode, sol, func=None, order="auto", solve_for_func=True):
    r"""
    Substitutes ``sol`` into ``ode`` and checks that the result is ``0``.

    This only works when ``func`` is one function, like `f(x)`.  ``sol`` can
    be a single solution or a list of solutions.  Each solution may be an
    :py:class:`~sympy.core.relational.Equality` that the solution satisfies,
    e.g. ``Eq(f(x), C1), Eq(f(x) + C1, 0)``; or simply an
    :py:class:`~sympy.core.expr.Expr`, e.g. ``f(x) - C1``. In most cases it
    will not be necessary to explicitly identify the function, but if the
    function cannot be inferred from the original equation it can be supplied
    through the ``func`` argument.

    If a sequence of solutions is passed, the same sort of container will be
    used to return the result for each solution.

    It tries the following methods, in order, until it finds zero equivalence:

    1. Substitute the solution for `f` in the original equation.  This only
       works if ``ode`` is solved for `f`.  It will attempt to solve it first
       unless ``solve_for_func == False``.
    2. Take `n` derivatives of the solution, where `n` is the order of
       ``ode``, and check to see if that is equal to the solution.  This only
       works on exact ODEs.
    3. Take the 1st, 2nd, ..., `n`\th derivatives of the solution, each time
       solving for the derivative of `f` of that order (this will always be
       possible because `f` is a linear operator). Then back substitute each
       derivative into ``ode`` in reverse order.

    This function returns a tuple.  The first item in the tuple is ``True`` if
    the substitution results in ``0``, and ``False`` otherwise. The second
    item in the tuple is what the substitution results in.  It should always
    be ``0`` if the first item is ``True``. Sometimes this function will
    return ``False`` even when an expression is identically equal to ``0``.
    This happens when :py:meth:`~sympy.simplify.simplify.simplify` does not
    reduce the expression to ``0``.  If an expression returned by this
    function vanishes identically, then ``sol`` really is a solution to
    the ``ode``.

    If this function seems to hang, it is probably because of a hard
    simplification.

    To use this function to test, test the first item of the tuple.

    Examples
    ========

    >>> from sympy import Eq, Function, checkodesol, symbols
    >>> x, C1 = symbols('x,C1')
    >>> f = Function('f')
    >>> checkodesol(f(x).diff(x), Eq(f(x), C1))
    (True, 0)
    >>> assert checkodesol(f(x).diff(x), C1)[0]
    >>> assert not checkodesol(f(x).diff(x), x)[0]
    >>> checkodesol(f(x).diff(x, 2), x**2)
    (False, 2)

    """
    if not isinstance(ode, Equality):
        ode = Eq(ode, 0)
    if func is None:
        try:
            _, func = _preprocess(ode.lhs)
        except ValueError:
            funcs = [
                s.atoms(AppliedUndef)
                for s in (sol if is_sequence(sol, set) else [sol])
            ]
            funcs = set().union(*funcs)
            if len(funcs) != 1:
                raise ValueError(
                    "must pass func arg to checkodesol for this case.")
            func = funcs.pop()
    if not isinstance(func, AppliedUndef) or len(func.args) != 1:
        raise ValueError("func must be a function of one variable, not %s" %
                         func)
    if is_sequence(sol, set):
        return type(sol)([
            checkodesol(ode, i, order=order, solve_for_func=solve_for_func)
            for i in sol
        ])

    if not isinstance(sol, Equality):
        sol = Eq(func, sol)
    elif sol.rhs == func:
        sol = sol.reversed

    if order == "auto":
        order = ode_order(ode, func)
    solved = sol.lhs == func and not sol.rhs.has(func)
    if solve_for_func and not solved:
        rhs = solve(sol, func)
        if rhs:
            eqs = [Eq(func, t) for t in rhs]
            if len(rhs) == 1:
                eqs = eqs[0]
            return checkodesol(ode, eqs, order=order, solve_for_func=False)

    x = func.args[0]

    # Handle series solutions here
    if sol.has(Order):
        assert sol.lhs == func
        Oterm = sol.rhs.getO()
        solrhs = sol.rhs.removeO()

        Oexpr = Oterm.expr
        assert isinstance(Oexpr, Pow)
        sorder = Oexpr.exp
        assert Oterm == Order(x**sorder)

        odesubs = (ode.lhs - ode.rhs).subs(func, solrhs).doit().expand()

        neworder = Order(x**(sorder - order))
        odesubs = odesubs + neworder
        assert odesubs.getO() == neworder
        residual = odesubs.removeO()

        return (residual == 0, residual)

    s = True
    testnum = 0
    while s:
        if testnum == 0:
            # First pass, try substituting a solved solution directly into the
            # ODE. This has the highest chance of succeeding.
            ode_diff = ode.lhs - ode.rhs

            if sol.lhs == func:
                s = sub_func_doit(ode_diff, func, sol.rhs)
                s = besselsimp(s)
            else:
                testnum += 1
                continue
            ss = simplify(s.rewrite(exp))
            if ss:
                # with the new numer_denom in power.py, if we do a simple
                # expansion then testnum == 0 verifies all solutions.
                s = ss.expand(force=True)
            else:
                s = 0
            testnum += 1
        elif testnum == 1:
            # Second pass. If we cannot substitute f, try seeing if the nth
            # derivative is equal, this will only work for odes that are exact,
            # by definition.
            s = simplify(
                trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) -
                trigsimp(ode.lhs) + trigsimp(ode.rhs))
            # s2 = simplify(
            #     diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \
            #     ode.lhs + ode.rhs)
            testnum += 1
        elif testnum == 2:
            # Third pass. Try solving for df/dx and substituting that into the
            # ODE. Thanks to Chris Smith for suggesting this method.  Many of
            # the comments below are his, too.
            # The method:
            # - Take each of 1..n derivatives of the solution.
            # - Solve each nth derivative for d^(n)f/dx^(n)
            #   (the differential of that order)
            # - Back substitute into the ODE in decreasing order
            #   (i.e., n, n-1, ...)
            # - Check the result for zero equivalence
            if sol.lhs == func and not sol.rhs.has(func):
                diffsols = {0: sol.rhs}
            elif sol.rhs == func and not sol.lhs.has(func):
                diffsols = {0: sol.lhs}
            else:
                diffsols = {}
            sol = sol.lhs - sol.rhs
            for i in range(1, order + 1):
                # Differentiation is a linear operator, so there should always
                # be 1 solution. Nonetheless, we test just to make sure.
                # We only need to solve once.  After that, we automatically
                # have the solution to the differential in the order we want.
                if i == 1:
                    ds = sol.diff(x)
                    try:
                        sdf = solve(ds, func.diff(x, i))
                        if not sdf:
                            raise NotImplementedError
                    except NotImplementedError:
                        testnum += 1
                        break
                    else:
                        diffsols[i] = sdf[0]
                else:
                    # This is what the solution says df/dx should be.
                    diffsols[i] = diffsols[i - 1].diff(x)

            # Make sure the above didn't fail.
            if testnum > 2:
                continue
            else:
                # Substitute it into ODE to check for self consistency.
                lhs, rhs = ode.lhs, ode.rhs
                for i in range(order, -1, -1):
                    if i == 0 and 0 not in diffsols:
                        # We can only substitute f(x) if the solution was
                        # solved for f(x).
                        break
                    lhs = sub_func_doit(lhs, func.diff(x, i), diffsols[i])
                    rhs = sub_func_doit(rhs, func.diff(x, i), diffsols[i])
                    ode_or_bool = Eq(lhs, rhs)
                    ode_or_bool = simplify(ode_or_bool)

                    if isinstance(ode_or_bool, (bool, BooleanAtom)):
                        if ode_or_bool:
                            lhs = rhs = S.Zero
                    else:
                        lhs = ode_or_bool.lhs
                        rhs = ode_or_bool.rhs
                # No sense in overworking simplify -- just prove that the
                # numerator goes to zero
                num = trigsimp((lhs - rhs).as_numer_denom()[0])
                # since solutions are obtained using force=True we test
                # using the same level of assumptions
                ## replace function with dummy so assumptions will work
                _func = Dummy("func")
                num = num.subs(func, _func)
                ## posify the expression
                num, reps = posify(num)
                s = simplify(num).xreplace(reps).xreplace({_func: func})
                testnum += 1
        else:
            break

    if not s:
        return (True, s)
    elif s is True:  # The code above never was able to change s
        raise NotImplementedError("Unable to test if " + str(sol) +
                                  " is a solution to " + str(ode) + ".")
    else:
        return (False, s)
Exemplo n.º 3
0
def _tsolve(eq, sym, **flags):
    """
    Helper for _solve that solves a transcendental equation with respect
    to the given symbol. Various equations containing powers and logarithms,
    can be solved.

    Only a single solution is returned. This solution is generally
    not unique. In some cases, a complex solution may be returned
    even though a real solution exists.

        >>> from sympy import log
        >>> from sympy.solvers.solvers import _tsolve as tsolve
        >>> from sympy.abc import x

        >>> tsolve(3**(2*x+5)-4, x)
        [(-5*log(3) + 2*log(2))/(2*log(3))]

        >>> tsolve(log(x) + 2*x, x)
        [LambertW(2)/2]

    """
    if _patterns is None:
        _generate_patterns()
    eq2 = eq.subs(sym, _x)
    for p, sol in _patterns:
        m = eq2.match(p)
        if m:
            soln = sol.subs(m).subs(_x, sym)
            if not(soln is S.NaN or
                   soln.has(S.Infinity) or
                   soln.has(S.NegativeInfinity) or
                   sym in soln.free_symbols):
                return [soln]

    # let's also try to invert the equation
    lhs = eq
    rhs = S.Zero

    while True:
        indep, dep = lhs.as_independent(sym)

        # dep + indep == rhs
        if lhs.is_Add:
            # this indicates we have done it all
            if indep is S.Zero:
                break

            lhs = dep
            rhs-= indep

        # dep * indep == rhs
        else:
            # this indicates we have done it all
            if indep is S.One:
                break

            lhs = dep
            rhs/= indep

    #                    -1
    # f(x) = g  ->  x = f  (g)
    if lhs.is_Function and lhs.nargs==1 and hasattr(lhs, 'inverse'):
        rhs = lhs.inverse() (rhs)
        lhs = lhs.args[0]

        sol = solve(lhs-rhs, sym)
        return sol

    elif lhs.is_Add:
        # just a simple case - we do variable substitution for first function,
        # and if it removes all functions - let's call solve.
        #      x    -x                   -1
        # UC: e  + e   = y      ->  t + t   = y
        t = Dummy('t')
        terms = lhs.args

        # find first term which is a Function
        for f1 in lhs.args:
            if f1.is_Function:
                ok = True
                break
        else:
            ok = False # didn't find a function
        if ok:
            # perform the substitution
            lhs_ = lhs.subs(f1, t)

            # if no Functions left, we can proceed with usual solve
            if not (lhs_.is_Function or
                    any(term.is_Function for term in lhs_.args)):
                cv_sols = solve(lhs_ - rhs, t)
                for sol in cv_sols:
                    if sol.has(sym):
                        # there is more than one function
                        break
                else:
                    cv_inv = solve( t - f1, sym )[0]
                    sols = list()
                    for sol in cv_sols:
                        sols.append(cv_inv.subs(t, sol))
                    return sols

    if flags.pop('posify', True):
        flags['posify'] = False
        pos, reps = posify(lhs)
        u = sym
        for u, s in reps.iteritems():
            if s == sym:
                break
        try:
            soln = _solve(pos - rhs, u, **flags)
        except NotImplementedError:
            return
        return [s.subs(reps) for s in soln]