def line_search_wolfe1( state: OptimisationState, old_state: Optional[OptimisationState] = None, c1=1e-4, c2=0.9, amax=50, amin=1e-8, xtol=1e-14, extra_condition=None, **kwargs, ) -> Tuple[Optional[float], OptimisationState]: """ As `scalar_search_wolfe1` but do a line search to direction `pk` Parameters ---------- f : callable Function `f(x)` fprime : callable Gradient of `f` xk : array_like Current point pk : array_like Search direction gfk : array_like, optional Gradient of `f` at point `xk` old_fval : float, optional Value of `f` at point `xk` old_old_fval : float, optional Value of `f` at point preceding `xk` The rest of the parameters are the same as for `scalar_search_wolfe1`. Returns ------- stp, f_count, g_count, fval, old_fval As in `line_search_wolfe1` gval : array Gradient of `f` at the final point """ derphi0 = state.derphi(0) old_fval = state.value stepsize, _, _ = linesearch.scalar_search_wolfe1( state.phi, state.derphi, -old_fval, # we are actually performing maximisation old_state and -old_state.value, derphi0, c1=c1, c2=c2, amax=amax, amin=amin, xtol=xtol, ) next_state = state.step(stepsize) if stepsize is not None and extra_condition is not None: if not extra_condition(stepsize, next_state): stepsize = None return stepsize, next_state
def test_scalar_search_wolfe1(self): c = 0 for name, phi, derphi, old_phi0 in self.scalar_iter(): c += 1 s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), old_phi0, derphi(0)) assert_equal(phi0, phi(0), name) assert_equal(phi1, phi(s), name) assert_wolfe(s, phi, derphi, err_msg=name) assert_(c > 3) # check that the iterator really works...
def test_scalar_search_wolfe1(self): c = 0 for name, phi, derphi, old_phi0 in self.scalar_iter(): c += 1 s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0), old_phi0, derphi(0)) assert_fp_equal(phi0, phi(0), name) assert_fp_equal(phi1, phi(s), name) assert_wolfe(s, phi, derphi, err_msg=name) assert_(c > 3) # check that the iterator really works...