def test_relative_parameters(self): crit = criterion(iterations_max = 1000, xtol = 0.1) state = {'iteration' : 1001, 'old_parameters' : numpy.ones((1,)), 'new_parameters' : numpy.zeros((1,))} assert(crit(state)) state = {'iteration' : 5, 'old_parameters' : numpy.ones((1,)), 'new_parameters' : numpy.zeros((1,))} assert(not crit(state)) state = {'iteration' : 5, 'old_parameters' : numpy.ones((1,)), 'new_parameters' : numpy.ones((1,)) * 0.9} assert(crit(state))
def test_relative_gradient(self): crit = criterion(iterations_max = 1000, gtol = 0.1) state = {'iteration' : 1001, 'function' : Function(1.), 'new_parameters' : numpy.zeros((1,))} assert(crit(state)) state = {'iteration' : 5, 'function' : Function(1.), 'new_parameters' : numpy.zeros((1,))} assert(not crit(state)) state = {'iteration' : 5, 'function' : Function(0.09), 'new_parameters' : numpy.zeros((1,))} assert(crit(state))
def test_relative_value(self): crit = criterion(iterations_max = 1000, ftol = 0.1) state = {'iteration' : 1001, 'old_value' : 1., 'new_value' : 0.} assert(crit(state)) state = {'iteration' : 5, 'old_value' : 1., 'new_value' : 0.} assert(not crit(state)) state = {'iteration' : 5, 'old_value' : 1., 'new_value' : 0.9} assert(crit(state))
def test_swp_frgradient_relative(): startPoint = numpy.zeros(2, numpy.float) optimi = optimizer.StandardOptimizer( function=Quadratic(), step=step.FRConjugateGradientStep(), criterion=criterion.criterion(ftol=0.000001, iterations_max=1000, gtol=0.0001), x0=startPoint, line_search=line_search.StrongWolfePowellRule()) assert_almost_equal(optimi.optimize(), numpy.array([1, 3], dtype=numpy.float))
def test_simple_marquardt(): startPoint = numpy.zeros(2, numpy.float) optimi = optimizer.StandardOptimizer( function=Quadratic(), step=step.MarquardtStep(), criterion=criterion.criterion(gtol=0.00001, iterations_max=200), x0=startPoint, line_search=line_search.BacktrackingSearch()) opt = optimi.optimize() assert_almost_equal(optimi.optimize(), numpy.array([1, 3], dtype=numpy.float), decimal=5)
def test_wpr_cwgradient(): startPoint = numpy.empty(2, numpy.float) startPoint[0] = -1.01 startPoint[-1] = 1.01 optimi = optimizer.StandardOptimizer( function=Rosenbrock(2), step=step.CWConjugateGradientStep(), criterion=criterion.criterion(iterations_max=1000, ftol=0.00000001, gtol=0.0001), x0=startPoint, line_search=line_search.WolfePowellRule()) assert_array_almost_equal(optimi.optimize(), numpy.ones(2, numpy.float))
def test_simple_marquardt(): startPoint = numpy.zeros(2, numpy.float) optimi = optimizer.StandardOptimizer(function = Quadratic(), step = step.MarquardtStep(), criterion = criterion.criterion(gtol=0.00001, iterations_max=200), x0 = startPoint, line_search = line_search.BacktrackingSearch()) opt = optimi.optimize() assert_almost_equal(optimi.optimize(), numpy.array([1, 3], dtype = numpy.float), decimal = 5)
def test_swp_dypgradient_relative(): startPoint = numpy.zeros(2, numpy.float) optimi = optimizer.StandardOptimizer(function = Quadratic(), step = step.DYConjugateGradientStep(), criterion = criterion.criterion(ftol=0.000001, iterations_max=1000, gtol = 0.0001), x0 = startPoint, line_search = line_search.StrongWolfePowellRule()) assert_almost_equal(optimi.optimize(), numpy.array([1, 3], dtype = numpy.float))
def test_swpr_dygradient(): startPoint = numpy.empty(2, numpy.float) startPoint[0] = -1.01 startPoint[-1] = 1.01 optimi = optimizer.StandardOptimizer(function = Rosenbrock(2), step = step.DYConjugateGradientStep(), criterion = criterion.criterion(iterations_max = 1000, ftol = 0.00000001, gtol = 0.0001), x0 = startPoint, line_search = line_search.StrongWolfePowellRule()) assert_array_almost_equal(optimi.optimize(), numpy.ones(2, numpy.float), decimal = 4)