def fmin_l_bfgs_b(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=1e7, pgtol=1e-5, epsilon=1e-8, iprint=-1, maxfun=15000, disp=None): """ Minimize a function func using the L-BFGS-B algorithm. Parameters ---------- func : callable f(x,*args) Function to minimise. x0 : ndarray Initial guess. fprime : callable fprime(x,*args) The gradient of `func`. If None, then `func` returns the function value and the gradient (``f, g = func(x, *args)``), unless `approx_grad` is True in which case `func` returns only ``f``. args : sequence Arguments to pass to `func` and `fprime`. approx_grad : bool Whether to approximate the gradient numerically (in which case `func` returns only the function value). bounds : list ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for one of ``min`` or ``max`` when there is no bound in that direction. m : int The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) factr : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is the machine precision, which is automatically generated by the code. Typical values for `factr` are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. pgtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol`` where ``pg_i`` is the i-th component of the projected gradient. epsilon : float Step size used when `approx_grad` is True, for numerically calculating the gradient iprint : int Controls the frequency of output. ``iprint < 0`` means no output; ``iprint == 0`` means write messages to stdout; ``iprint > 1`` in addition means write logging information to a file named ``iterate.dat`` in the current working directory. disp : int, optional If zero, then no output. If a positive number, then this over-rides `iprint` (i.e., `iprint` gets the value of `disp`). maxfun : int Maximum number of function evaluations. Returns ------- x : array_like Estimated position of the minimum. f : float Value of `func` at the minimum. d : dict Information dictionary. * d['warnflag'] is - 0 if converged, - 1 if too many function evaluations, - 2 if stopped for another reason, given in d['task'] * d['grad'] is the gradient at the minimum (should be 0 ish) * d['funcalls'] is the number of function calls made. Notes ----- License of L-BFGS-B (Fortran code): The version included here (in fortran code) is 2.1 (released in 1997). It was written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <*****@*****.**>. It carries the following condition for use: This software is freely available, but we expect that all publications describing work using this software , or all commercial products using it, quote at least one of the references given below. References ---------- * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. """ x0 = asarray(x0).ravel() n, = x0.shape if bounds is None: bounds = [(None,None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if disp is not None: if disp == 0: iprint = -1 else: iprint = disp if approx_grad: def func_and_grad(x): f = func(x, *args) g = approx_fprime(x, func, epsilon, *args) return f, g elif fprime is None: def func_and_grad(x): f, g = func(x, *args) return f, g else: def func_and_grad(x): f = func(x, *args) g = fprime(x, *args) return f, g nbd = zeros(n, int32) low_bnd = zeros(n, float64) upper_bnd = zeros(n, float64) bounds_map = {(None, None): 0, (1, None) : 1, (1, 1) : 2, (None, 1) : 3} for i in range(0, n): l,u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] x = array(x0, float64) f = array(0.0, float64) g = zeros((n,), float64) wa = zeros(2*m*n+4*n + 12*m**2 + 12*m, float64) iwa = zeros(3*n, int32) task = zeros(1, 'S60') csave = zeros(1,'S60') lsave = zeros(4, int32) isave = zeros(44, int32) dsave = zeros(29, float64) task[:] = 'START' n_function_evals = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave) task_str = task.tostring() if task_str.startswith(asbytes('FG')): # minimization routine wants f and g at the current x n_function_evals += 1 # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith(asbytes('NEW_X')): # new iteration if n_function_evals > maxfun: task[:] = 'STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT' else: break task_str = task.tostring().strip(asbytes('\x00')).strip() if task_str.startswith(asbytes('CONV')): warnflag = 0 elif n_function_evals > maxfun: warnflag = 1 else: warnflag = 2 d = {'grad' : g, 'task' : task_str, 'funcalls' : n_function_evals, 'warnflag' : warnflag } return x, f, d
def fmin_l_bfgs_b( func, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=1e7, pgtol=1e-5, epsilon=1e-8, iprint=-1, maxfun=15000, disp=None, ): """ Minimize a function func using the L-BFGS-B algorithm. Parameters ---------- func : callable f(x, *args) Function to minimise. x0 : ndarray Initial guess. fprime : callable fprime(x, *args) The gradient of `func`. If None, then `func` returns the function value and the gradient (``f, g = func(x, *args)``), unless `approx_grad` is True in which case `func` returns only ``f``. args : tuple Arguments to pass to `func` and `fprime`. approx_grad : bool Whether to approximate the gradient numerically (in which case `func` returns only the function value). bounds : list ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for one of ``min`` or ``max`` when there is no bound in that direction. m : int The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) factr : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is the machine precision, which is automatically generated by the code. Typical values for `factr` are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. pgtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol`` where ``pg_i`` is the i-th component of the projected gradient. epsilon : float Step size used when `approx_grad` is True, for numerically calculating the gradient iprint : int Controls the frequency of output. ``iprint < 0`` means no output. disp : int, optional If zero, then no output. If positive number, then this over-rides `iprint`. maxfun : int Maximum number of function evaluations. Returns ------- x : ndarray Estimated position of the minimum. f : float Value of `func` at the minimum. d : dict Information dictionary. * d['warnflag'] is - 0 if converged, - 1 if too many function evaluations, - 2 if stopped for another reason, given in d['task'] * d['grad'] is the gradient at the minimum (should be 0 ish) * d['funcalls'] is the number of function calls made. Notes ----- License of L-BFGS-B (Fortran code): The version included here (in fortran code) is 2.1 (released in 1997). It was written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <*****@*****.**>. It carries the following condition for use: This software is freely available, but we expect that all publications describing work using this software , or all commercial products using it, quote at least one of the references given below. References ---------- * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. """ n = len(x0) if bounds is None: bounds = [(None, None)] * n if len(bounds) != n: raise ValueError("length of x0 != length of bounds") if disp is not None: if disp == 0: iprint = -1 else: iprint = disp if approx_grad: def func_and_grad(x): f = func(x, *args) g = approx_fprime(x, func, epsilon, *args) return f, g elif fprime is None: def func_and_grad(x): f, g = func(x, *args) return f, g else: def func_and_grad(x): f = func(x, *args) g = fprime(x, *args) return f, g nbd = zeros((n,), int32) low_bnd = zeros((n,), float64) upper_bnd = zeros((n,), float64) bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3} for i in range(0, n): l, u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] x = array(x0, float64) f = array(0.0, float64) g = zeros((n,), float64) wa = zeros((2 * m * n + 4 * n + 12 * m ** 2 + 12 * m,), float64) iwa = zeros((3 * n,), int32) task = zeros(1, "S60") csave = zeros(1, "S60") lsave = zeros((4,), int32) isave = zeros((44,), int32) dsave = zeros((29,), float64) task[:] = "START" n_function_evals = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb( m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave ) task_str = task.tostring() if task_str.startswith(asbytes("FG")): # minimization routine wants f and g at the current x n_function_evals += 1 # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith(asbytes("NEW_X")): # new iteration if n_function_evals > maxfun: task[:] = "STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT" else: break task_str = task.tostring().strip(asbytes("\x00")).strip() if task_str.startswith(asbytes("CONV")): warnflag = 0 elif n_function_evals > maxfun: warnflag = 1 else: warnflag = 2 d = {"grad": g, "task": task_str, "funcalls": n_function_evals, "warnflag": warnflag} return x, f, d
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, options={}, full_output=False): """ Minimize a scalar function of one or more variables using the L-BFGS-B algorithm. Options for the L-BFGS-B algorithm are: disp : bool Set to True to print convergence messages. maxcor : int The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) factr : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is the machine precision, which is automatically generated by the code. Typical values for `factr` are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. pgtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol`` where ``pg_i`` is the i-th component of the projected gradient. eps : float Step size used for numerical approximation of the jacobian. disp : int Set to True to print convergence messages. maxfev : int Maximum number of function evaluations. This function is called by the `minimize` function with `method=L-BFGS-B`. It is not supposed to be called directly. """ # retrieve useful options disp = options.get('disp', None) m = options.get('maxcor', 10) factr = options.get('factr', 1e7) pgtol = options.get('pgtol', 1e-5) epsilon = options.get('eps', 1e-8) maxfun = options.get('maxfev', 15000) iprint = options.get('iprint', -1) x0 = asarray(x0).ravel() n, = x0.shape if bounds is None: bounds = [(None,None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if disp is not None: if disp == 0: iprint = -1 else: iprint = disp if jac is None: def func_and_grad(x): f = fun(x, *args) g = approx_fprime(x, fun, epsilon, *args) return f, g else: def func_and_grad(x): f = fun(x, *args) g = jac(x, *args) return f, g nbd = zeros(n, int32) low_bnd = zeros(n, float64) upper_bnd = zeros(n, float64) bounds_map = {(None, None): 0, (1, None) : 1, (1, 1) : 2, (None, 1) : 3} for i in range(0, n): l,u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] x = array(x0, float64) f = array(0.0, float64) g = zeros((n,), float64) wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64) iwa = zeros(3*n, int32) task = zeros(1, 'S60') csave = zeros(1,'S60') lsave = zeros(4, int32) isave = zeros(44, int32) dsave = zeros(29, float64) task[:] = 'START' n_function_evals = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave) task_str = task.tostring() if task_str.startswith(asbytes('FG')): # minimization routine wants f and g at the current x n_function_evals += 1 # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith(asbytes('NEW_X')): # new iteration if n_function_evals > maxfun: task[:] = 'STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT' else: break task_str = task.tostring().strip(asbytes('\x00')).strip() if task_str.startswith(asbytes('CONV')): warnflag = 0 elif n_function_evals > maxfun: warnflag = 1 else: warnflag = 2 d = {'grad' : g, 'task' : task_str, 'funcalls' : n_function_evals, 'warnflag' : warnflag } if full_output: info = {'fun': f, 'jac': g, 'nfev': n_function_evals, 'status': warnflag, 'message': task_str, 'solution': x, 'success': warnflag==0} return x, info else: return x
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, disp=None, maxcor=10, ftol=2.2204460492503131e-09, gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, iprint=-1, callback=None, **unknown_options): """ Minimize a scalar function of one or more variables using the L-BFGS-B algorithm. Options for the L-BFGS-B algorithm are: disp : bool Set to True to print convergence messages. maxcor : int The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) factr : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is the machine precision, which is automatically generated by the code. Typical values for `factr` are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. gtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= gtol`` where ``pg_i`` is the i-th component of the projected gradient. eps : float Step size used for numerical approximation of the jacobian. disp : int Set to True to print convergence messages. maxfun : int Maximum number of function evaluations. maxiter : int Maximum number of iterations. This function is called by the `minimize` function with `method=L-BFGS-B`. It is not supposed to be called directly. """ _check_unknown_options(unknown_options) m = maxcor epsilon = eps pgtol = gtol factr = ftol / np.finfo(float).eps x0 = asarray(x0).ravel() n, = x0.shape if bounds is None: bounds = [(None, None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if disp is not None: if disp == 0: iprint = -1 else: iprint = disp if jac is None: def func_and_grad(x): f = fun(x, *args) g = approx_fprime(x, fun, epsilon, *args) return f, g else: def func_and_grad(x): f = fun(x, *args) g = jac(x, *args) return f, g nbd = zeros(n, int32) low_bnd = zeros(n, float64) upper_bnd = zeros(n, float64) bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3} for i in range(0, n): l, u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] x = array(x0, float64) f = array(0.0, float64) g = zeros((n, ), float64) wa = zeros(2 * m * n + 5 * n + 11 * m * m + 8 * m, float64) iwa = zeros(3 * n, int32) task = zeros(1, 'S60') csave = zeros(1, 'S60') lsave = zeros(4, int32) isave = zeros(44, int32) dsave = zeros(29, float64) task[:] = 'START' n_function_evals = 0 n_iterations = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave) task_str = task.tostring() if task_str.startswith(asbytes('FG')): if n_function_evals > maxfun: task[:] = 'STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT' else: # minimization routine wants f and g at the current x n_function_evals += 1 # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith(asbytes('NEW_X')): # new iteration if n_iterations > maxiter: task[:] = 'STOP: TOTAL NO. of ITERATIONS EXCEEDS LIMIT' else: n_iterations += 1 if callback is not None: callback(x) else: break task_str = task.tostring().strip(asbytes('\x00')).strip() if task_str.startswith(asbytes('CONV')): warnflag = 0 elif n_function_evals > maxfun: warnflag = 1 elif n_iterations > maxiter: warnflag = 1 else: warnflag = 2 return Result(fun=f, jac=g, nfev=n_function_evals, nit=n_iterations, status=warnflag, message=task_str, x=x, success=(warnflag == 0))
def fmin_l_bfgs_b(func, x0, fprime=None, args=(), approx_grad=0, bounds=None, m=10, factr=1e7, pgtol=1e-5, epsilon=1e-8, iprint=-1, maxfun=15000): """ Minimize a function func using the L-BFGS-B algorithm. Arguments: func -- function to minimize. Called as func(x, *args) x0 -- initial guess to minimum fprime -- gradient of func. If None, then func returns the function value and the gradient ( f, g = func(x, *args) ), unless approx_grad is True then func returns only f. Called as fprime(x, *args) args -- arguments to pass to function approx_grad -- if true, approximate the gradient numerically and func returns only function value. bounds -- a list of (min, max) pairs for each element in x, defining the bounds on that parameter. Use None for one of min or max when there is no bound in that direction m -- the maximum number of variable metric corrections used to define the limited memory matrix. (the limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it). factr -- The iteration stops when (f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr*epsmch where epsmch is the machine precision, which is automatically generated by the code. Typical values for factr: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. pgtol -- The iteration will stop when max{|proj g_i | i = 1, ..., n} <= pgtol where pg_i is the ith component of the projected gradient. epsilon -- step size used when approx_grad is true, for numerically calculating the gradient iprint -- controls the frequency of output. <0 means no output. maxfun -- maximum number of function evaluations. Returns: x, f, d = fmin_lbfgs_b(func, x0, ...) x -- position of the minimum f -- value of func at the minimum d -- dictionary of information from routine d['warnflag'] is 0 if converged, 1 if too many function evaluations, 2 if stopped for another reason, given in d['task'] d['grad'] is the gradient at the minimum (should be 0 ish) d['funcalls'] is the number of function calls made. License of L-BFGS-B (Fortran code) ================================== The version included here (in fortran code) is 2.1 (released in 1997). It was written by Ciyou Zhu, Richard Byrd, and Jorge Nocedal <*****@*****.**>. It carries the following condition for use: This software is freely available, but we expect that all publications describing work using this software , or all commercial products using it, quote at least one of the references given below. References * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. See also: scikits.openopt, which offers a unified syntax to call this and other solvers fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla -- constrained multivariate optimizers anneal, brute -- global optimizers fminbound, brent, golden, bracket -- local scalar minimizers fsolve -- n-dimenstional root-finding brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder """ n = len(x0) if bounds is None: bounds = [(None,None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if approx_grad: def func_and_grad(x): f = func(x, *args) g = approx_fprime(x, func, epsilon, *args) return f, g elif fprime is None: def func_and_grad(x): f, g = func(x, *args) return f, g else: def func_and_grad(x): f = func(x, *args) g = fprime(x, *args) return f, g nbd = zeros((n,), int32) low_bnd = zeros((n,), float64) upper_bnd = zeros((n,), float64) bounds_map = {(None, None): 0, (1, None) : 1, (1, 1) : 2, (None, 1) : 3} for i in range(0, n): l,u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] x = array(x0, float64) f = array(0.0, float64) g = zeros((n,), float64) wa = zeros((2*m*n+4*n + 12*m**2 + 12*m,), float64) iwa = zeros((3*n,), int32) task = zeros(1, 'S60') csave = zeros(1,'S60') lsave = zeros((4,), int32) isave = zeros((44,), int32) dsave = zeros((29,), float64) task[:] = 'START' n_function_evals = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave) task_str = task.tostring() if task_str.startswith('FG'): # minimization routine wants f and g at the current x n_function_evals += 1 # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith('NEW_X'): # new iteration if n_function_evals > maxfun: task[:] = 'STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT' else: break task_str = task.tostring().strip('\x00').strip() if task_str.startswith('CONV'): warnflag = 0 elif n_function_evals > maxfun: warnflag = 1 else: warnflag = 2 d = {'grad' : g, 'task' : task_str, 'funcalls' : n_function_evals, 'warnflag' : warnflag } return x, f, d
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None, options={}, full_output=False): """ Minimize a scalar function of one or more variables using the L-BFGS-B algorithm. Options for the L-BFGS-B algorithm are: disp : bool Set to True to print convergence messages. maxcor : int The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) factr : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is the machine precision, which is automatically generated by the code. Typical values for `factr` are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. pgtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= pgtol`` where ``pg_i`` is the i-th component of the projected gradient. eps : float Step size used for numerical approximation of the jacobian. disp : int Set to True to print convergence messages. maxfev : int Maximum number of function evaluations. This function is called by the `minimize` function with `method=L-BFGS-B`. It is not supposed to be called directly. """ # retrieve useful options disp = options.get('disp', None) m = options.get('maxcor', 10) factr = options.get('factr', 1e7) pgtol = options.get('pgtol', 1e-5) epsilon = options.get('eps', 1e-8) maxfun = options.get('maxfev', 15000) iprint = options.get('iprint', -1) x0 = asarray(x0).ravel() n, = x0.shape if bounds is None: bounds = [(None, None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if disp is not None: if disp == 0: iprint = -1 else: iprint = disp if jac is None: def func_and_grad(x): f = fun(x, *args) g = approx_fprime(x, fun, epsilon, *args) return f, g else: def func_and_grad(x): f = fun(x, *args) g = jac(x, *args) return f, g nbd = zeros(n, int32) low_bnd = zeros(n, float64) upper_bnd = zeros(n, float64) bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3} for i in range(0, n): l, u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] x = array(x0, float64) f = array(0.0, float64) g = zeros((n, ), float64) wa = zeros(2 * m * n + 4 * n + 12 * m**2 + 12 * m, float64) iwa = zeros(3 * n, int32) task = zeros(1, 'S60') csave = zeros(1, 'S60') lsave = zeros(4, int32) isave = zeros(44, int32) dsave = zeros(29, float64) task[:] = 'START' n_function_evals = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave) task_str = task.tostring() if task_str.startswith(asbytes('FG')): # minimization routine wants f and g at the current x n_function_evals += 1 # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith(asbytes('NEW_X')): # new iteration if n_function_evals > maxfun: task[:] = 'STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT' else: break task_str = task.tostring().strip(asbytes('\x00')).strip() if task_str.startswith(asbytes('CONV')): warnflag = 0 elif n_function_evals > maxfun: warnflag = 1 else: warnflag = 2 d = { 'grad': g, 'task': task_str, 'funcalls': n_function_evals, 'warnflag': warnflag } if full_output: info = { 'fun': f, 'jac': g, 'nfev': n_function_evals, 'status': warnflag, 'message': task_str, 'solution': x, 'success': warnflag == 0 } return x, info else: return x
def _minimize_lbfgsb( fun, x0, args=(), jac=None, bounds=None, disp=None, maxcor=10, ftol=2.2204460492503131e-09, gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000, iprint=-1, callback=None, **unknown_options ): """ Minimize a scalar function of one or more variables using the L-BFGS-B algorithm. Options for the L-BFGS-B algorithm are: disp : bool Set to True to print convergence messages. maxcor : int The maximum number of variable metric corrections used to define the limited memory matrix. (The limited memory BFGS method does not store the full hessian but uses this many terms in an approximation to it.) factr : float The iteration stops when ``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``, where ``eps`` is the machine precision, which is automatically generated by the code. Typical values for `factr` are: 1e12 for low accuracy; 1e7 for moderate accuracy; 10.0 for extremely high accuracy. gtol : float The iteration will stop when ``max{|proj g_i | i = 1, ..., n} <= gtol`` where ``pg_i`` is the i-th component of the projected gradient. eps : float Step size used for numerical approximation of the jacobian. disp : int Set to True to print convergence messages. maxfun : int Maximum number of function evaluations. maxiter : int Maximum number of iterations. This function is called by the `minimize` function with `method=L-BFGS-B`. It is not supposed to be called directly. """ _check_unknown_options(unknown_options) m = maxcor epsilon = eps pgtol = gtol factr = ftol / np.finfo(float).eps x0 = asarray(x0).ravel() n, = x0.shape if bounds is None: bounds = [(None, None)] * n if len(bounds) != n: raise ValueError("length of x0 != length of bounds") if disp is not None: if disp == 0: iprint = -1 else: iprint = disp if jac is None: def func_and_grad(x): f = fun(x, *args) g = approx_fprime(x, fun, epsilon, *args) return f, g else: def func_and_grad(x): f = fun(x, *args) g = jac(x, *args) return f, g nbd = zeros(n, int32) low_bnd = zeros(n, float64) upper_bnd = zeros(n, float64) bounds_map = {(None, None): 0, (1, None): 1, (1, 1): 2, (None, 1): 3} for i in range(0, n): l, u = bounds[i] if l is not None: low_bnd[i] = l l = 1 if u is not None: upper_bnd[i] = u u = 1 nbd[i] = bounds_map[l, u] x = array(x0, float64) f = array(0.0, float64) g = zeros((n,), float64) wa = zeros(2 * m * n + 5 * n + 11 * m * m + 8 * m, float64) iwa = zeros(3 * n, int32) task = zeros(1, "S60") csave = zeros(1, "S60") lsave = zeros(4, int32) isave = zeros(44, int32) dsave = zeros(29, float64) task[:] = "START" n_function_evals = 0 n_iterations = 0 while 1: # x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \ _lbfgsb.setulb( m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave ) task_str = task.tostring() if task_str.startswith(asbytes("FG")): if n_function_evals > maxfun: task[:] = "STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT" else: # minimization routine wants f and g at the current x n_function_evals += 1 # Overwrite f and g: f, g = func_and_grad(x) elif task_str.startswith(asbytes("NEW_X")): # new iteration if n_iterations > maxiter: task[:] = "STOP: TOTAL NO. of ITERATIONS EXCEEDS LIMIT" else: n_iterations += 1 if callback is not None: callback(x) else: break task_str = task.tostring().strip(asbytes("\x00")).strip() if task_str.startswith(asbytes("CONV")): warnflag = 0 elif n_function_evals > maxfun: warnflag = 1 elif n_iterations > maxiter: warnflag = 1 else: warnflag = 2 return Result( fun=f, jac=g, nfev=n_function_evals, nit=n_iterations, status=warnflag, message=task_str, x=x, success=(warnflag == 0), )