Example #1
0
File: tnc.py Project: minrk/scipy
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
             bounds=None, epsilon=1e-8, scale=None, offset=None,
             messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
             stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
             rescale=-1):
    """Minimize a function with variables subject to bounds, using
    gradient information.

    Parameters
    ----------
    func : callable func(x, *args)
        Function to minimize.  Should return f and g, where f is
        the value of the function and g its gradient (a list of
        floats).  If the function returns None, the minimization
        is aborted.
    x0 : list of floats
        Initial estimate of minimum.
    fprime : callable fprime(x, *args)
        Gradient of func. If None, then func must return the
        function value and the gradient (f,g = func(x, *args)).
    args : tuple
        Arguments to pass to function.
    approx_grad : bool
        If true, approximate the gradient numerically.
    bounds : list
        (min, max) pairs for each element in x, defining the
        bounds on that parameter. Use None or +/-inf for one of
        min or max when there is no bound in that direction.
    scale : list of floats
        Scaling factors to apply to each variable.  If None, the
        factors are up-low for interval bounded variables and
        1+|x] fo the others.  Defaults to None
    offset : float
        Value to substract from each variable.  If None, the
        offsets are (up+low)/2 for interval bounded variables
        and x for the others.
    messages :
        Bit mask used to select messages display during
        minimization values defined in the MSGS dict.  Defaults to
        MGS_ALL.
    maxCGit : int
        Maximum number of hessian*vector evaluations per main
        iteration.  If maxCGit == 0, the direction chosen is
        -gradient if maxCGit < 0, maxCGit is set to
        max(1,min(50,n/2)).  Defaults to -1.
    maxfun : int
        Maximum number of function evaluation.  if None, maxfun is
        set to max(100, 10*len(x0)).  Defaults to None.
    eta : float
        Severity of the line search. if < 0 or > 1, set to 0.25.
        Defaults to -1.
    stepmx : float
        Maximum step for the line search.  May be increased during
        call.  If too small, it will be set to 10.0.  Defaults to 0.
    accuracy : float
        Relative precision for finite difference calculations.  If
        <= machine_precision, set to sqrt(machine_precision).
        Defaults to 0.
    fmin : float
        Minimum function value estimate.  Defaults to 0.
    ftol : float
        Precision goal for the value of f in the stoping criterion.
        If ftol < 0.0, ftol is set to 0.0 defaults to -1.
    xtol : float
        Precision goal for the value of x in the stopping
        criterion (after applying x scaling factors).  If xtol <
        0.0, xtol is set to sqrt(machine_precision).  Defaults to
        -1.
    pgtol : float
        Precision goal for the value of the projected gradient in
        the stopping criterion (after applying x scaling factors).
        If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
        Setting it to 0.0 is not recommended.  Defaults to -1.
    rescale : float
        Scaling factor (in log10) used to trigger f value
        rescaling.  If 0, rescale at each iteration.  If a large
        value, never rescale.  If < 0, rescale is set to 1.3.

    Returns
    -------
    x : list of floats
        The solution.
    nfeval : int
        The number of function evaluations.
    rc :
        Return code as defined in the RCSTRINGS dict.

    """
    x0 = asarray(x0, dtype=float).tolist()
    n = len(x0)

    if bounds is None:
        bounds = [(None,None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    if approx_grad:
        def func_and_grad(x):
            x = asarray(x)
            f = func(x, *args)
            g = approx_fprime(x, func, epsilon, *args)
            return f, list(g)
    elif fprime is None:
        def func_and_grad(x):
            x = asarray(x)
            f, g = func(x, *args)
            return f, list(g)
    else:
        def func_and_grad(x):
            x = asarray(x)
            f = func(x, *args)
            g = fprime(x, *args)
            return f, list(g)

    """
    low, up   : the bounds (lists of floats)
                if low is None, the lower bounds are removed.
                if up is None, the upper bounds are removed.
                low and up defaults to None
    """
    low = [0]*n
    up = [0]*n
    for i in range(n):
        if bounds[i] is None: l, u = -inf, inf
        else:
            l,u = bounds[i]
            if l is None:
                low[i] = -inf
            else:
                low[i] = l
            if u is None:
                up[i] = inf
            else:
                up[i] = u

    if scale is None:
        scale = []

    if offset is None:
        offset = []

    if maxfun is None:
        maxfun = max(100, 10*len(x0))

    rc, nf, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, offset,
            messages, maxCGit, maxfun, eta, stepmx, accuracy,
            fmin, ftol, xtol, pgtol, rescale)
    return array(x), nf, rc
Example #2
0
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None, options=None):
    """
    Minimize a scalar function of one or more variables using a truncated
    Newton (TNC) algorithm.

    Options for the TNC algorithm are:
        eps: float
            Step size used for numerical approximation of the jacobian.
        scale : list of floats
            Scaling factors to apply to each variable.  If None, the
            factors are up-low for interval bounded variables and
            1+|x] fo the others.  Defaults to None
        offset : float
            Value to substract from each variable.  If None, the
            offsets are (up+low)/2 for interval bounded variables
            and x for the others.
        disp : bool
           Set to True to print convergence messages.
        maxCGit : int
            Maximum number of hessian*vector evaluations per main
            iteration.  If maxCGit == 0, the direction chosen is
            -gradient if maxCGit < 0, maxCGit is set to
            max(1,min(50,n/2)).  Defaults to -1.
        maxfev : int
            Maximum number of function evaluation.  if None, `maxfev` is
            set to max(100, 10*len(x0)).  Defaults to None.
        eta : float
            Severity of the line search. if < 0 or > 1, set to 0.25.
            Defaults to -1.
        stepmx : float
            Maximum step for the line search.  May be increased during
            call.  If too small, it will be set to 10.0.  Defaults to 0.
        accuracy : float
            Relative precision for finite difference calculations.  If
            <= machine_precision, set to sqrt(machine_precision).
            Defaults to 0.
        minfev : float
            Minimum function value estimate.  Defaults to 0.
        ftol : float
            Precision goal for the value of f in the stoping criterion.
            If ftol < 0.0, ftol is set to 0.0 defaults to -1.
        xtol : float
            Precision goal for the value of x in the stopping
            criterion (after applying x scaling factors).  If xtol <
            0.0, xtol is set to sqrt(machine_precision).  Defaults to
            -1.
        pgtol : float
            Precision goal for the value of the projected gradient in
            the stopping criterion (after applying x scaling factors).
            If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
            Setting it to 0.0 is not recommended.  Defaults to -1.
        rescale : float
            Scaling factor (in log10) used to trigger f value
            rescaling.  If 0, rescale at each iteration.  If a large
            value, never rescale.  If < 0, rescale is set to 1.3.

    This function is called by the `minimize` function with `method=TNC`.
    It is not supposed to be called directly.
    """
    if options is None:
        options = {}
    # retrieve useful options
    epsilon  = options.get('eps', 1e-8)
    scale    = options.get('scale')
    offset   = options.get('offset')
    mesg_num = options.get('mesg_num')
    maxCGit  = options.get('maxCGit', -1)
    maxfun   = options.get('maxfev')
    eta      = options.get('eta', -1)
    stepmx   = options.get('stepmx', 0)
    accuracy = options.get('accuracy', 0)
    fmin     = options.get('minfev', 0)
    ftol     = options.get('ftol', -1)
    xtol     = options.get('xtol', -1)
    pgtol    = options.get('pgtol', -1)
    rescale  = options.get('rescale', -1)
    disp     = options.get('disp', False)

    x0 = asarray(x0, dtype=float).tolist()
    n = len(x0)

    if bounds is None:
        bounds = [(None,None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    if mesg_num is not None:
        messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
                    4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
    elif disp:
        messages = MSG_ALL
    else:
        messages = MSG_NONE

    if jac is None:
        def func_and_grad(x):
            x = asarray(x)
            f = fun(x, *args)
            g = approx_fprime(x, fun, epsilon, *args)
            return f, list(g)
    else:
        def func_and_grad(x):
            x = asarray(x)
            f = fun(x, *args)
            g = jac(x, *args)
            return f, list(g)

    """
    low, up   : the bounds (lists of floats)
                if low is None, the lower bounds are removed.
                if up is None, the upper bounds are removed.
                low and up defaults to None
    """
    low = [0]*n
    up = [0]*n
    for i in range(n):
        if bounds[i] is None: l, u = -inf, inf
        else:
            l,u = bounds[i]
            if l is None:
                low[i] = -inf
            else:
                low[i] = l
            if u is None:
                up[i] = inf
            else:
                up[i] = u

    if scale is None:
        scale = []

    if offset is None:
        offset = []

    if maxfun is None:
        maxfun = max(100, 10*len(x0))

    rc, nf, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, offset,
            messages, maxCGit, maxfun, eta, stepmx, accuracy,
            fmin, ftol, xtol, pgtol, rescale)

    xopt = array(x)
    funv, jacv = func_and_grad(xopt)

    return Result(x=xopt, fun=funv, jac=jacv, nfev=nf, status=rc,
                  message=RCSTRINGS[rc], success=(-1 < rc < 3))
Example #3
0
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
                  eps=1e-8, scale=None, offset=None, mesg_num=None,
                  maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
                  minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
                  callback=None, **unknown_options):
    """
    Minimize a scalar function of one or more variables using a truncated
    Newton (TNC) algorithm.

    Options
    -------
    eps : float
        Step size used for numerical approximation of the jacobian.
    scale : list of floats
        Scaling factors to apply to each variable.  If None, the
        factors are up-low for interval bounded variables and
        1+|x] fo the others.  Defaults to None
    offset : float
        Value to subtract from each variable.  If None, the
        offsets are (up+low)/2 for interval bounded variables
        and x for the others.
    disp : bool
       Set to True to print convergence messages.
    maxCGit : int
        Maximum number of hessian*vector evaluations per main
        iteration.  If maxCGit == 0, the direction chosen is
        -gradient if maxCGit < 0, maxCGit is set to
        max(1,min(50,n/2)).  Defaults to -1.
    maxiter : int
        Maximum number of function evaluation.  if None, `maxiter` is
        set to max(100, 10*len(x0)).  Defaults to None.
    eta : float
        Severity of the line search. if < 0 or > 1, set to 0.25.
        Defaults to -1.
    stepmx : float
        Maximum step for the line search.  May be increased during
        call.  If too small, it will be set to 10.0.  Defaults to 0.
    accuracy : float
        Relative precision for finite difference calculations.  If
        <= machine_precision, set to sqrt(machine_precision).
        Defaults to 0.
    minfev : float
        Minimum function value estimate.  Defaults to 0.
    ftol : float
        Precision goal for the value of f in the stopping criterion.
        If ftol < 0.0, ftol is set to 0.0 defaults to -1.
    xtol : float
        Precision goal for the value of x in the stopping
        criterion (after applying x scaling factors).  If xtol <
        0.0, xtol is set to sqrt(machine_precision).  Defaults to
        -1.
    gtol : float
        Precision goal for the value of the projected gradient in
        the stopping criterion (after applying x scaling factors).
        If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
        Setting it to 0.0 is not recommended.  Defaults to -1.
    rescale : float
        Scaling factor (in log10) used to trigger f value
        rescaling.  If 0, rescale at each iteration.  If a large
        value, never rescale.  If < 0, rescale is set to 1.3.

    """
    _check_unknown_options(unknown_options)
    epsilon = eps
    maxfun = maxiter
    fmin = minfev
    pgtol = gtol

    x0 = asfarray(x0).flatten()
    n = len(x0)

    if bounds is None:
        bounds = [(None,None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    if mesg_num is not None:
        messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
                    4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
    elif disp:
        messages = MSG_ALL
    else:
        messages = MSG_NONE

    if jac is None:
        def func_and_grad(x):
            f = fun(x, *args)
            g = approx_fprime(x, fun, epsilon, *args)
            return f, g
    else:
        def func_and_grad(x):
            f = fun(x, *args)
            g = jac(x, *args)
            return f, g

    """
    low, up   : the bounds (lists of floats)
                if low is None, the lower bounds are removed.
                if up is None, the upper bounds are removed.
                low and up defaults to None
    """
    low = zeros(n)
    up = zeros(n)
    for i in range(n):
        if bounds[i] is None:
            l, u = -inf, inf
        else:
            l,u = bounds[i]
            if l is None:
                low[i] = -inf
            else:
                low[i] = l
            if u is None:
                up[i] = inf
            else:
                up[i] = u

    if scale is None:
        scale = array([])

    if offset is None:
        offset = array([])

    if maxfun is None:
        maxfun = max(100, 10*len(x0))

    rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
                                        offset, messages, maxCGit, maxfun,
                                        eta, stepmx, accuracy, fmin, ftol,
                                        xtol, pgtol, rescale, callback)

    funv, jacv = func_and_grad(x)

    return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=nf, nit=nit, status=rc,
                          message=RCSTRINGS[rc], success=(-1 < rc < 3))
Example #4
0
File: tnc.py Project: stefanv/scipy
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
             bounds=None, epsilon=1e-8, scale=None, offset=None,
             messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
             stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
             rescale=-1, disp=None):
    """
    Minimize a function with variables subject to bounds, using
    gradient information in a truncated Newton algorithm. This 
    method wraps a C implementation of the algorithm. 

    Parameters
    ----------
    func : callable ``func(x, *args)``
        Function to minimize.  Must do one of
        1. Return f and g, where f is
        the value of the function and g its gradient (a list of
        floats).  
        2. Return the function value but supply gradient function
        seperately as fprime
        3. Return the function value and set approx_grad=True.
        If the function returns None, the minimization
        is aborted.
    x0 : list of floats
        Initial estimate of minimum.
    fprime : callable ``fprime(x, *args)``
        Gradient of func. If None, then either func must return the
        function value and the gradient (``f,g = func(x, *args)``)
        or approx_grad must be True.
    args : tuple
        Arguments to pass to function.
    approx_grad : bool
        If true, approximate the gradient numerically.
    bounds : list
        (min, max) pairs for each element in x0, defining the
        bounds on that parameter. Use None or +/-inf for one of
        min or max when there is no bound in that direction.
    epsilon: float
        Used if approx_grad is True. The stepsize in a finite
        difference approximation for fprime. 
    scale : list of floats
        Scaling factors to apply to each variable.  If None, the
        factors are up-low for interval bounded variables and
        1+|x] fo the others.  Defaults to None
    offset : float
        Value to substract from each variable.  If None, the
        offsets are (up+low)/2 for interval bounded variables
        and x for the others.
    messages :
        Bit mask used to select messages display during
        minimization values defined in the MSGS dict.  Defaults to
        MGS_ALL.
    disp : int
        Integer interface to messages.  0 = no message, 5 = all messages
    maxCGit : int
        Maximum number of hessian*vector evaluations per main
        iteration.  If maxCGit == 0, the direction chosen is
        -gradient if maxCGit < 0, maxCGit is set to
        max(1,min(50,n/2)).  Defaults to -1.
    maxfun : int
        Maximum number of function evaluation.  if None, maxfun is
        set to max(100, 10*len(x0)).  Defaults to None.
    eta : float
        Severity of the line search. if < 0 or > 1, set to 0.25.
        Defaults to -1.
    stepmx : float
        Maximum step for the line search.  May be increased during
        call.  If too small, it will be set to 10.0.  Defaults to 0.
    accuracy : float
        Relative precision for finite difference calculations.  If
        <= machine_precision, set to sqrt(machine_precision).
        Defaults to 0.
    fmin : float
        Minimum function value estimate.  Defaults to 0.
    ftol : float
        Precision goal for the value of f in the stoping criterion.
        If ftol < 0.0, ftol is set to 0.0 defaults to -1.
    xtol : float
        Precision goal for the value of x in the stopping
        criterion (after applying x scaling factors).  If xtol <
        0.0, xtol is set to sqrt(machine_precision).  Defaults to
        -1.
    pgtol : float
        Precision goal for the value of the projected gradient in
        the stopping criterion (after applying x scaling factors).
        If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
        Setting it to 0.0 is not recommended.  Defaults to -1.
    rescale : float
        Scaling factor (in log10) used to trigger f value
        rescaling.  If 0, rescale at each iteration.  If a large
        value, never rescale.  If < 0, rescale is set to 1.3.

    Returns
    -------
    x : list of floats
        The solution.
    nfeval : int
        The number of function evaluations.
    rc : int
        Return code as defined in the RCSTRINGS dict.


    Notes
    -----
    The underlying algorithm is truncated Newton, also called 
    Newton Conjugate-Gradient. This method differs from 
    scipy.optimize.fmin_ncg in that
    
    1. It wraps a C implementation of the algorithm
    2. It allows each variable to be given an upper and lower bound.


    The algorithm incoporates the bound constraints by determining
    the descent direction as in an unconstrained truncated Newton, 
    but never taking a step-size large enough to leave the space
    of feasible x's. The algorithm keeps track of a set of 
    currently active constraints, and ignores them when computing 
    the minimum allowable step size. (The x's associated with the
    active constraint are kept fixed.) If the maximum allowable 
    step size is zero then a new constraint is added. At the end
    of each iteration one of the constraints may be deemed no 
    longer active and removed. A constraint is considered 
    no longer active is if it is currently active 
    but the gradient for that variable points inward from the 
    constraint. The specific constraint removed is the one
    associated with the variable of largest index whose 
    constraint is no longer active. 


    References
    ----------
    Wright S., Nocedal J. (2006), 'Numerical Optimization'

    Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
    SIAM Journal of Numerical Analysis 21, pp. 770-778


    """
    x0 = asarray(x0, dtype=float).tolist()
    n = len(x0)

    if bounds is None:
        bounds = [(None,None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    if disp is not None:
        messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
                    4:MSG_EXIT, 5:MSG_ALL}.get(disp, MSG_ALL)

    if approx_grad:
        def func_and_grad(x):
            x = asarray(x)
            f = func(x, *args)
            g = approx_fprime(x, func, epsilon, *args)
            return f, list(g)
    elif fprime is None:
        def func_and_grad(x):
            x = asarray(x)
            f, g = func(x, *args)
            return f, list(g)
    else:
        def func_and_grad(x):
            x = asarray(x)
            f = func(x, *args)
            g = fprime(x, *args)
            return f, list(g)

    """
    low, up   : the bounds (lists of floats)
                if low is None, the lower bounds are removed.
                if up is None, the upper bounds are removed.
                low and up defaults to None
    """
    low = [0]*n
    up = [0]*n
    for i in range(n):
        if bounds[i] is None: l, u = -inf, inf
        else:
            l,u = bounds[i]
            if l is None:
                low[i] = -inf
            else:
                low[i] = l
            if u is None:
                up[i] = inf
            else:
                up[i] = u

    if scale is None:
        scale = []

    if offset is None:
        offset = []

    if maxfun is None:
        maxfun = max(100, 10*len(x0))

    rc, nf, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, offset,
            messages, maxCGit, maxfun, eta, stepmx, accuracy,
            fmin, ftol, xtol, pgtol, rescale)
    return array(x), nf, rc
Example #5
0
def _minimize_tnc(fun, x0, args=(), jac=None, bounds=None,
                  eps=1e-8, scale=None, offset=None, mesg_num=None,
                  maxCGit=-1, maxiter=None, eta=-1, stepmx=0, accuracy=0,
                  minfev=0, ftol=-1, xtol=-1, gtol=-1, rescale=-1, disp=False,
                  callback=None, **unknown_options):
    """
    Minimize a scalar function of one or more variables using a truncated
    Newton (TNC) algorithm.

    Options
    -------
    eps : float
        Step size used for numerical approximation of the jacobian.
    scale : list of floats
        Scaling factors to apply to each variable.  If None, the
        factors are up-low for interval bounded variables and
        1+|x] fo the others.  Defaults to None
    offset : float
        Value to subtract from each variable.  If None, the
        offsets are (up+low)/2 for interval bounded variables
        and x for the others.
    disp : bool
       Set to True to print convergence messages.
    maxCGit : int
        Maximum number of hessian*vector evaluations per main
        iteration.  If maxCGit == 0, the direction chosen is
        -gradient if maxCGit < 0, maxCGit is set to
        max(1,min(50,n/2)).  Defaults to -1.
    maxiter : int
        Maximum number of function evaluation.  if None, `maxiter` is
        set to max(100, 10*len(x0)).  Defaults to None.
    eta : float
        Severity of the line search. if < 0 or > 1, set to 0.25.
        Defaults to -1.
    stepmx : float
        Maximum step for the line search.  May be increased during
        call.  If too small, it will be set to 10.0.  Defaults to 0.
    accuracy : float
        Relative precision for finite difference calculations.  If
        <= machine_precision, set to sqrt(machine_precision).
        Defaults to 0.
    minfev : float
        Minimum function value estimate.  Defaults to 0.
    ftol : float
        Precision goal for the value of f in the stoping criterion.
        If ftol < 0.0, ftol is set to 0.0 defaults to -1.
    xtol : float
        Precision goal for the value of x in the stopping
        criterion (after applying x scaling factors).  If xtol <
        0.0, xtol is set to sqrt(machine_precision).  Defaults to
        -1.
    gtol : float
        Precision goal for the value of the projected gradient in
        the stopping criterion (after applying x scaling factors).
        If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
        Setting it to 0.0 is not recommended.  Defaults to -1.
    rescale : float
        Scaling factor (in log10) used to trigger f value
        rescaling.  If 0, rescale at each iteration.  If a large
        value, never rescale.  If < 0, rescale is set to 1.3.

    """
    _check_unknown_options(unknown_options)
    epsilon = eps
    maxfun = maxiter
    fmin = minfev
    pgtol = gtol

    x0 = asfarray(x0).flatten()
    n = len(x0)

    if bounds is None:
        bounds = [(None,None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    if mesg_num is not None:
        messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
                    4:MSG_EXIT, 5:MSG_ALL}.get(mesg_num, MSG_ALL)
    elif disp:
        messages = MSG_ALL
    else:
        messages = MSG_NONE

    if jac is None:
        def func_and_grad(x):
            f = fun(x, *args)
            g = approx_fprime(x, fun, epsilon, *args)
            return f, g
    else:
        def func_and_grad(x):
            f = fun(x, *args)
            g = jac(x, *args)
            return f, g

    """
    low, up   : the bounds (lists of floats)
                if low is None, the lower bounds are removed.
                if up is None, the upper bounds are removed.
                low and up defaults to None
    """
    low = zeros(n)
    up = zeros(n)
    for i in range(n):
        if bounds[i] is None:
            l, u = -inf, inf
        else:
            l,u = bounds[i]
            if l is None:
                low[i] = -inf
            else:
                low[i] = l
            if u is None:
                up[i] = inf
            else:
                up[i] = u

    if scale is None:
        scale = array([])

    if offset is None:
        offset = array([])

    if maxfun is None:
        maxfun = max(100, 10*len(x0))

    rc, nf, nit, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale,
                                        offset, messages, maxCGit, maxfun,
                                        eta, stepmx, accuracy, fmin, ftol,
                                        xtol, pgtol, rescale, callback)

    funv, jacv = func_and_grad(x)

    return OptimizeResult(x=x, fun=funv, jac=jacv, nfev=nf, nit=nit, status=rc,
                          message=RCSTRINGS[rc], success=(-1 < rc < 3))
Example #6
0
File: tnc.py Project: gpaulbr/ERelp
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=0,
             bounds=None, epsilon=1e-8, scale=None, offset=None,
             messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1,
             stepmx=0, accuracy=0, fmin=0, ftol=-1, xtol=-1, pgtol=-1,
             rescale=-1, disp=None):
    """
    Minimize a function with variables subject to bounds, using
    gradient information in a truncated Newton algorithm. This
    method wraps a C implementation of the algorithm.

    Parameters
    ----------
    func : callable ``func(x, *args)``
        Function to minimize.  Must do one of
        1. Return f and g, where f is
        the value of the function and g its gradient (a list of
        floats).
        2. Return the function value but supply gradient function
        seperately as fprime
        3. Return the function value and set approx_grad=True.
        If the function returns None, the minimization
        is aborted.
    x0 : list of floats
        Initial estimate of minimum.
    fprime : callable ``fprime(x, *args)``
        Gradient of func. If None, then either func must return the
        function value and the gradient (``f,g = func(x, *args)``)
        or approx_grad must be True.
    args : tuple
        Arguments to pass to function.
    approx_grad : bool
        If true, approximate the gradient numerically.
    bounds : list
        (min, max) pairs for each element in x0, defining the
        bounds on that parameter. Use None or +/-inf for one of
        min or max when there is no bound in that direction.
    epsilon: float
        Used if approx_grad is True. The stepsize in a finite
        difference approximation for fprime.
    scale : list of floats
        Scaling factors to apply to each variable.  If None, the
        factors are up-low for interval bounded variables and
        1+|x] fo the others.  Defaults to None
    offset : float
        Value to substract from each variable.  If None, the
        offsets are (up+low)/2 for interval bounded variables
        and x for the others.
    messages :
        Bit mask used to select messages display during
        minimization values defined in the MSGS dict.  Defaults to
        MGS_ALL.
    disp : int
        Integer interface to messages.  0 = no message, 5 = all messages
    maxCGit : int
        Maximum number of hessian*vector evaluations per main
        iteration.  If maxCGit == 0, the direction chosen is
        -gradient if maxCGit < 0, maxCGit is set to
        max(1,min(50,n/2)).  Defaults to -1.
    maxfun : int
        Maximum number of function evaluation.  if None, maxfun is
        set to max(100, 10*len(x0)).  Defaults to None.
    eta : float
        Severity of the line search. if < 0 or > 1, set to 0.25.
        Defaults to -1.
    stepmx : float
        Maximum step for the line search.  May be increased during
        call.  If too small, it will be set to 10.0.  Defaults to 0.
    accuracy : float
        Relative precision for finite difference calculations.  If
        <= machine_precision, set to sqrt(machine_precision).
        Defaults to 0.
    fmin : float
        Minimum function value estimate.  Defaults to 0.
    ftol : float
        Precision goal for the value of f in the stoping criterion.
        If ftol < 0.0, ftol is set to 0.0 defaults to -1.
    xtol : float
        Precision goal for the value of x in the stopping
        criterion (after applying x scaling factors).  If xtol <
        0.0, xtol is set to sqrt(machine_precision).  Defaults to
        -1.
    pgtol : float
        Precision goal for the value of the projected gradient in
        the stopping criterion (after applying x scaling factors).
        If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
        Setting it to 0.0 is not recommended.  Defaults to -1.
    rescale : float
        Scaling factor (in log10) used to trigger f value
        rescaling.  If 0, rescale at each iteration.  If a large
        value, never rescale.  If < 0, rescale is set to 1.3.

    Returns
    -------
    x : list of floats
        The solution.
    nfeval : int
        The number of function evaluations.
    rc : int
        Return code as defined in the RCSTRINGS dict.


    Notes
    -----
    The underlying algorithm is truncated Newton, also called
    Newton Conjugate-Gradient. This method differs from
    scipy.optimize.fmin_ncg in that

    1. It wraps a C implementation of the algorithm
    2. It allows each variable to be given an upper and lower bound.


    The algorithm incoporates the bound constraints by determining
    the descent direction as in an unconstrained truncated Newton,
    but never taking a step-size large enough to leave the space
    of feasible x's. The algorithm keeps track of a set of
    currently active constraints, and ignores them when computing
    the minimum allowable step size. (The x's associated with the
    active constraint are kept fixed.) If the maximum allowable
    step size is zero then a new constraint is added. At the end
    of each iteration one of the constraints may be deemed no
    longer active and removed. A constraint is considered
    no longer active is if it is currently active
    but the gradient for that variable points inward from the
    constraint. The specific constraint removed is the one
    associated with the variable of largest index whose
    constraint is no longer active.


    References
    ----------
    Wright S., Nocedal J. (2006), 'Numerical Optimization'

    Nash S.G. (1984), "Newton-Type Minimization Via the Lanczos Method",
    SIAM Journal of Numerical Analysis 21, pp. 770-778


    """
    x0 = asarray(x0, dtype=float).tolist()
    n = len(x0)

    if bounds is None:
        bounds = [(None,None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    if disp is not None:
        messages = {0:MSG_NONE, 1:MSG_ITER, 2:MSG_INFO, 3:MSG_VERS,
                    4:MSG_EXIT, 5:MSG_ALL}.get(disp, MSG_ALL)

    if approx_grad:
        def func_and_grad(x):
            x = asarray(x)
            f = func(x, *args)
            g = approx_fprime(x, func, epsilon, *args)
            return f, list(g)
    elif fprime is None:
        def func_and_grad(x):
            x = asarray(x)
            f, g = func(x, *args)
            return f, list(g)
    else:
        def func_and_grad(x):
            x = asarray(x)
            f = func(x, *args)
            g = fprime(x, *args)
            return f, list(g)

    """
    low, up   : the bounds (lists of floats)
                if low is None, the lower bounds are removed.
                if up is None, the upper bounds are removed.
                low and up defaults to None
    """
    low = [0]*n
    up = [0]*n
    for i in range(n):
        if bounds[i] is None: l, u = -inf, inf
        else:
            l,u = bounds[i]
            if l is None:
                low[i] = -inf
            else:
                low[i] = l
            if u is None:
                up[i] = inf
            else:
                up[i] = u

    if scale is None:
        scale = []

    if offset is None:
        offset = []

    if maxfun is None:
        maxfun = max(100, 10*len(x0))

    rc, nf, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, offset,
            messages, maxCGit, maxfun, eta, stepmx, accuracy,
            fmin, ftol, xtol, pgtol, rescale)
    return array(x), nf, rc
Example #7
0
def _minimize_tnc(fun,
                  x0,
                  args=(),
                  jac=None,
                  bounds=None,
                  options={},
                  full_output=False):
    """
    Minimize a scalar function of one or more variables using a truncated
    Newton (TNC) algorithm.

    Options for the TNC algorithm are:
        eps: float
            Step size used for numerical approximation of the jacobian.
        scale : list of floats
            Scaling factors to apply to each variable.  If None, the
            factors are up-low for interval bounded variables and
            1+|x] fo the others.  Defaults to None
        offset : float
            Value to substract from each variable.  If None, the
            offsets are (up+low)/2 for interval bounded variables
            and x for the others.
        disp : bool
           Set to True to print convergence messages.
        maxCGit : int
            Maximum number of hessian*vector evaluations per main
            iteration.  If maxCGit == 0, the direction chosen is
            -gradient if maxCGit < 0, maxCGit is set to
            max(1,min(50,n/2)).  Defaults to -1.
        maxfev : int
            Maximum number of function evaluation.  if None, `maxfev` is
            set to max(100, 10*len(x0)).  Defaults to None.
        eta : float
            Severity of the line search. if < 0 or > 1, set to 0.25.
            Defaults to -1.
        stepmx : float
            Maximum step for the line search.  May be increased during
            call.  If too small, it will be set to 10.0.  Defaults to 0.
        accuracy : float
            Relative precision for finite difference calculations.  If
            <= machine_precision, set to sqrt(machine_precision).
            Defaults to 0.
        minfev : float
            Minimum function value estimate.  Defaults to 0.
        ftol : float
            Precision goal for the value of f in the stoping criterion.
            If ftol < 0.0, ftol is set to 0.0 defaults to -1.
        xtol : float
            Precision goal for the value of x in the stopping
            criterion (after applying x scaling factors).  If xtol <
            0.0, xtol is set to sqrt(machine_precision).  Defaults to
            -1.
        pgtol : float
            Precision goal for the value of the projected gradient in
            the stopping criterion (after applying x scaling factors).
            If pgtol < 0.0, pgtol is set to 1e-2 * sqrt(accuracy).
            Setting it to 0.0 is not recommended.  Defaults to -1.
        rescale : float
            Scaling factor (in log10) used to trigger f value
            rescaling.  If 0, rescale at each iteration.  If a large
            value, never rescale.  If < 0, rescale is set to 1.3.

    This function is called by the `minimize` function with `method=TNC`.
    It is not supposed to be called directly.
    """
    # retrieve useful options
    epsilon = options.get('eps', 1e-8)
    scale = options.get('scale')
    offset = options.get('offset')
    mesg_num = options.get('mesg_num')
    maxCGit = options.get('maxCGit', -1)
    maxfun = options.get('maxfev')
    eta = options.get('eta', -1)
    stepmx = options.get('stepmx', 0)
    accuracy = options.get('accuracy', 0)
    fmin = options.get('minfev', 0)
    ftol = options.get('ftol', -1)
    xtol = options.get('xtol', -1)
    pgtol = options.get('pgtol', -1)
    rescale = options.get('rescale', -1)
    disp = options.get('disp', False)

    x0 = asarray(x0, dtype=float).tolist()
    n = len(x0)

    if bounds is None:
        bounds = [(None, None)] * n
    if len(bounds) != n:
        raise ValueError('length of x0 != length of bounds')

    if mesg_num is not None:
        messages = {
            0: MSG_NONE,
            1: MSG_ITER,
            2: MSG_INFO,
            3: MSG_VERS,
            4: MSG_EXIT,
            5: MSG_ALL
        }.get(mesg_num, MSG_ALL)
    elif disp:
        messages = MSG_ALL
    else:
        messages = MSG_NONE

    if jac is None:

        def func_and_grad(x):
            x = asarray(x)
            f = fun(x, *args)
            g = approx_fprime(x, fun, epsilon, *args)
            return f, list(g)
    else:

        def func_and_grad(x):
            x = asarray(x)
            f = fun(x, *args)
            g = jac(x, *args)
            return f, list(g)

    """
    low, up   : the bounds (lists of floats)
                if low is None, the lower bounds are removed.
                if up is None, the upper bounds are removed.
                low and up defaults to None
    """
    low = [0] * n
    up = [0] * n
    for i in range(n):
        if bounds[i] is None: l, u = -inf, inf
        else:
            l, u = bounds[i]
            if l is None:
                low[i] = -inf
            else:
                low[i] = l
            if u is None:
                up[i] = inf
            else:
                up[i] = u

    if scale is None:
        scale = []

    if offset is None:
        offset = []

    if maxfun is None:
        maxfun = max(100, 10 * len(x0))

    rc, nf, x = moduleTNC.minimize(func_and_grad, x0, low, up, scale, offset,
                                   messages, maxCGit, maxfun, eta, stepmx,
                                   accuracy, fmin, ftol, xtol, pgtol, rescale)

    xopt = array(x)
    if full_output:
        funv, jacv = func_and_grad(xopt)
        info = {
            'solution': xopt,
            'fun': funv,
            'jac': jacv,
            'nfev': nf,
            'status': rc,
            'message': RCSTRINGS[rc],
            'success': -1 < rc < 3
        }
        return xopt, info
    else:
        return xopt