Exemplo n.º 1
0
    def test_line_search_bsc(self):
        #There is at least 1 function R^20->R to be tested, but this leads to s=None
        for name, f, fprime, x, p, old_f in self.line_iter():
            jac = lambda x: fprime(x)
            x0 = nl._as_inexact(x)
            func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()
            x = x0.flatten()
            jacobian = nl.asjacobian(jac)
            jacobian.setup(x.copy(), f(x), func)
            options = {
                'jacobian': jacobian,
                'jac_tol': min(1e-03, 1e-03 * norm(f(x))),
                'amin': 1e-8
            }
            Fx = func(x)
            dx = -jacobian.solve(Fx, tol=options['jac_tol'])

            ### check with ENM step as for rmt
            s, f_new = ls.scalar_search_bsc(func,
                                            x,
                                            dx,
                                            Fx,
                                            parameters=options)

            assert_fp_equal(f_new, f(x + s * dx), name)
            assert_bsc(s, x, dx, func, jacobian, options, err_msg="%s" % name)

            ### check different descent direction (not ENM)
            s, f_new = ls.scalar_search_bsc(func, x, p, Fx, parameters=options)

            assert_fp_equal(f_new, f(x + s * p), name)
            assert_bsc(s, x, p, func, jacobian, options, err_msg="%s" % name)
Exemplo n.º 2
0
 def test_line_search_bsc(self):
     #There is at least 1 function R^20->R to be tested, but this leads to s=None
     for name, f, fprime, x, p, old_f in self.line_iter():
         jac = lambda x: fprime(x)
         x0 = nl._as_inexact(x)
         func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()
         x = x0.flatten()
         jacobian = nl.asjacobian(jac)
         jacobian.setup(x.copy(), f(x), func)
         options = {
             'jacobian': jacobian,
             'jac_tol': min(1e-03, 1e-03 * norm(f(x))),
             'amin': 1e-8
         }
         #print("1: ",f(x),np.shape(dp(x)))
         s, f_new = ls.scalar_search_bsc(func,
                                         x,
                                         fprime(x),
                                         f(x),
                                         parameters=options)
         #print("2: ",p_new, s)
         assert_fp_equal(f_new, x + s * fprime(x), name)
         assert_bsc(s,
                    x,
                    fprime(x),
                    func,
                    jacobian,
                    options,
                    err_msg="%s %g" % name)
Exemplo n.º 3
0
    def test_line_search_rmt(self):
        #There is at least 1 function R^20->R to be tested, but this leads to s=None
        for name, f, fprime, x, p, old_f in self.line_iter():
            jac = lambda x: fprime(x)
            x0 = nl._as_inexact(x)
            func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()
            x = x0.flatten()
            jacobian = nl.asjacobian(jac)
            jacobian.setup(x.copy(), f(x), func)
            options = {
                'jacobian': jacobian,
                'jac_tol': min(1e-03, 1e-03 * norm(f(x))),
                'amin': 1e-8
            }

            ### We need a special search direction otherwise we get problems in calculating omega_F
            Fx = func(x)
            dx = -jacobian.solve(Fx, tol=options['jac_tol'])

            s, dxbar, f_new = ls.scalar_search_rmt(func,
                                                   x,
                                                   dx,
                                                   parameters=options)
            #here stepsize s is often (always??) equal to 1 due to fullfilling rmt_eta_lower > t_dx_omega and alpha == 1.0 (rmt_func)
            #is this an expected step size or is this wrong?

            if s == None:
                s = 1
            assert_fp_equal(f_new, f(x + s * dx), name)
            assert_rmt(s,
                       dx,
                       f(x),
                       f_new,
                       jacobian,
                       options,
                       err_msg="%s" % name)
Exemplo n.º 4
0
def nonlin_solve(
    F,
    x0,
    jacobian="krylov",
    iter=None,
    verbose=False,
    maxiter=None,
    f_tol=None,
    f_rtol=None,
    x_tol=None,
    x_rtol=None,
    tol_norm=None,
    line_search="armijo",
    callback=None,
    full_output=True,
    raise_exception=True,
):
    """
    Find a root of a function, in a way suitable for large-scale problems.

    Parameters
    ----------
    %(params_basic)s
    jacobian : Jacobian
        A Jacobian approximation: `Jacobian` object or something that
        `asjacobian` can transform to one. Alternatively, a string specifying
        which of the builtin Jacobian approximations to use:

            krylov, broyden1, broyden2, anderson
            diagbroyden, linearmixing, excitingmixing

    %(params_extra)s
    full_output : bool
        If true, returns a dictionary `info` containing convergence
        information.
    raise_exception : bool
        If True, a `NoConvergence` exception is raise if no solution is found.

    See Also
    --------
    asjacobian, Jacobian

    Notes
    -----
    This algorithm implements the inexact Newton method, with
    backtracking or full line searches. Several Jacobian
    approximations are available, including Krylov and Quasi-Newton
    methods.

    References
    ----------
    .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
       Equations\". Society for Industrial and Applied Mathematics. (1995)
       http://www.siam.org/books/kelley/fr16/index.php

    """

    tol_norm = nls.maxnorm if tol_norm is None else tol_norm
    condition = nls.TerminationCondition(f_tol=f_tol,
                                         f_rtol=f_rtol,
                                         x_tol=x_tol,
                                         x_rtol=x_rtol,
                                         iter=iter,
                                         norm=tol_norm)

    x0 = nls._as_inexact(x0)
    # func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
    func = lambda z: F(nls._array_like(z, x0))
    x = x0.flatten()

    dx = np.inf
    Fx, _, _ = func(x)
    # Fx = func(x)
    Fx_norm = norm(Fx)

    jacobian = asjacobian(jacobian)
    jacobian.setup(x.copy(), Fx, func)

    if maxiter is None:
        if iter is not None:
            maxiter = iter + 1
        else:
            maxiter = 100 * (x.size + 1)

    if line_search is True:
        line_search = "armijo"
    elif line_search is False:
        line_search = None

    if line_search not in (None, "armijo", "wolfe"):
        raise ValueError("Invalid line search")

    # Solver tolerance selection
    gamma = 0.9
    eta_max = 0.9999
    eta_treshold = 0.1
    eta = 1e-3

    for n in xrange(maxiter):
        status = condition.check(Fx, x, dx)
        if status:
            break

        # The tolerance, as computed for scipy.sparse.linalg.* routines
        tol = min(eta, eta * Fx_norm)
        dx = -jacobian.solve(Fx, tol=tol)

        if norm(dx) == 0:
            raise ValueError("Jacobian inversion yielded zero vector. "
                             "This indicates a bug in the Jacobian "
                             "approximation.")

        # Line search, or Newton step
        if line_search:
            s, x, Fx, Fx_norm_new = _nonlin_line_search(
                func, x, Fx, dx, line_search)
        else:
            s = 1.0
            x = x + dx
            Fx, _, _ = func(x)
            # Fx = func(x)
            Fx_norm_new = norm(Fx)

        jacobian.update(x.copy(), Fx)

        if callback:
            callback(x, Fx)

        # Adjust forcing parameters for inexact methods
        eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
        if gamma * eta**2 < eta_treshold:
            eta = min(eta_max, eta_A)
        else:
            eta = min(eta_max, max(eta_A, gamma * eta**2))

        Fx_norm = Fx_norm_new

        # Print status
        if verbose:
            sys.stdout.write("%d:  |F(x)| = %g; step %g; tol %g\n" %
                             (n, norm(Fx), s, eta))
            sys.stdout.flush()
    else:
        if raise_exception:
            raise NoConvergence(nls._array_like(x, x0))
        else:
            status = 2

    if full_output:
        info = {
            "nit": condition.iteration,
            "fun": Fx,
            "status": status,
            "success": status == 1,
            "message": {
                1: "A solution was found at the specified "
                "tolerance.",
                2: "The maximum number of iterations allowed "
                "has been reached.",
            }[status],
        }
        return nls._array_like(x, x0), info
    else:
        return nls._array_like(x, x0)