def test_alpha_beta_rc(): assert Version('1.8.0rc1') == Version('1.8.0rc1') for ver in ['1.8.0', '1.8.0rc2']: assert Version('1.8.0rc1') < Version(ver) for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: assert Version('1.8.0rc1') > Version(ver) assert Version('1.8.0b1') > Version('1.8.0a2')
def test_main_versions(): assert Version('1.8.0') == Version('1.8.0') for ver in ['1.9.0', '2.0.0', '1.8.1']: assert Version('1.8.0') < Version(ver) for ver in ['1.7.0', '1.7.1', '0.9.9']: assert Version('1.8.0') > Version(ver)
def test_dev0_version(): dev0_version = '1.9.0.dev0+f16acvda' assert Version('1.9.0.dev0+Unknown') < Version('1.9.0') for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: assert Version(dev0_version) < Version(ver) assert Version(dev0_version) == Version(dev0_version)
def test_dev_version(): assert Version('1.9.0.dev+Unknown') < Version('1.9.0') for ver in [ '1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev+ffffffff', '1.9.0.dev1' ]: assert Version('1.9.0.dev+f16acvda') < Version(ver) assert Version('1.9.0.dev+f16acvda') == Version('1.9.0.dev+f16acvda')
class TestDifferentialEvolutionSolver(object): def setup_method(self): self.old_seterr = np.seterr(invalid='raise') self.limits = np.array([[0., 0.], [2., 2.]]) self.bounds = [(0., 2.), (0., 2.)] self.dummy_solver = DifferentialEvolutionSolver(self.quadratic, [(0, 100)]) # dummy_solver2 will be used to test mutation strategies self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic, [(0, 1)], popsize=7, mutation=0.5) # create a population that's only 7 members long # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T self.dummy_solver2.population = population def teardown_method(self): np.seterr(**self.old_seterr) def quadratic(self, x): return x[0]**2 def test__strategy_resolves(self): # test that the correct mutation function is resolved by # different requested strategy arguments solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='best1exp') assert_equal(solver.strategy, 'best1exp') assert_equal(solver.mutation_func.__name__, '_best1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='best1bin') assert_equal(solver.strategy, 'best1bin') assert_equal(solver.mutation_func.__name__, '_best1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand1bin') assert_equal(solver.strategy, 'rand1bin') assert_equal(solver.mutation_func.__name__, '_rand1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand1exp') assert_equal(solver.strategy, 'rand1exp') assert_equal(solver.mutation_func.__name__, '_rand1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand2exp') assert_equal(solver.strategy, 'rand2exp') assert_equal(solver.mutation_func.__name__, '_rand2') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='best2bin') assert_equal(solver.strategy, 'best2bin') assert_equal(solver.mutation_func.__name__, '_best2') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand2bin') assert_equal(solver.strategy, 'rand2bin') assert_equal(solver.mutation_func.__name__, '_rand2') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='rand2exp') assert_equal(solver.strategy, 'rand2exp') assert_equal(solver.mutation_func.__name__, '_rand2') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='randtobest1bin') assert_equal(solver.strategy, 'randtobest1bin') assert_equal(solver.mutation_func.__name__, '_randtobest1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='randtobest1exp') assert_equal(solver.strategy, 'randtobest1exp') assert_equal(solver.mutation_func.__name__, '_randtobest1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='currenttobest1bin') assert_equal(solver.strategy, 'currenttobest1bin') assert_equal(solver.mutation_func.__name__, '_currenttobest1') solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='currenttobest1exp') assert_equal(solver.strategy, 'currenttobest1exp') assert_equal(solver.mutation_func.__name__, '_currenttobest1') def test__mutate1(self): # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc. result = np.array([0.05]) trial = self.dummy_solver2._best1((2, 3, 4, 5, 6)) assert_allclose(trial, result) result = np.array([0.25]) trial = self.dummy_solver2._rand1((2, 3, 4, 5, 6)) assert_allclose(trial, result) def test__mutate2(self): # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc. # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] result = np.array([-0.1]) trial = self.dummy_solver2._best2((2, 3, 4, 5, 6)) assert_allclose(trial, result) result = np.array([0.1]) trial = self.dummy_solver2._rand2((2, 3, 4, 5, 6)) assert_allclose(trial, result) def test__randtobest1(self): # strategies randtobest/1/* result = np.array([0.15]) trial = self.dummy_solver2._randtobest1((2, 3, 4, 5, 6)) assert_allclose(trial, result) def test__currenttobest1(self): # strategies currenttobest/1/* result = np.array([0.1]) trial = self.dummy_solver2._currenttobest1(1, (2, 3, 4, 5, 6)) assert_allclose(trial, result) def test_can_init_with_dithering(self): mutation = (0.5, 1) solver = DifferentialEvolutionSolver(self.quadratic, self.bounds, mutation=mutation) assert_equal(solver.dither, list(mutation)) def test_invalid_mutation_values_arent_accepted(self): func = rosen mutation = (0.5, 3) assert_raises(ValueError, DifferentialEvolutionSolver, func, self.bounds, mutation=mutation) mutation = (-1, 1) assert_raises(ValueError, DifferentialEvolutionSolver, func, self.bounds, mutation=mutation) mutation = (0.1, np.nan) assert_raises(ValueError, DifferentialEvolutionSolver, func, self.bounds, mutation=mutation) mutation = 0.5 solver = DifferentialEvolutionSolver(func, self.bounds, mutation=mutation) assert_equal(0.5, solver.scale) assert_equal(None, solver.dither) def test__scale_parameters(self): trial = np.array([0.3]) assert_equal(30, self.dummy_solver._scale_parameters(trial)) # it should also work with the limits reversed self.dummy_solver.limits = np.array([[100], [0.]]) assert_equal(30, self.dummy_solver._scale_parameters(trial)) def test__unscale_parameters(self): trial = np.array([30]) assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) # it should also work with the limits reversed self.dummy_solver.limits = np.array([[100], [0.]]) assert_equal(0.3, self.dummy_solver._unscale_parameters(trial)) def test__ensure_constraint(self): trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001]) self.dummy_solver._ensure_constraint(trial) assert_equal(trial[2], 0.9) assert_(np.logical_and(trial >= 0, trial <= 1).all()) def test_differential_evolution(self): # test that the Jmin of DifferentialEvolutionSolver # is the same as the function evaluation solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)]) result = solver.solve() assert_almost_equal(result.fun, self.quadratic(result.x)) def test_best_solution_retrieval(self): # test that the getter property method for the best solution works. solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)]) result = solver.solve() assert_almost_equal(result.x, solver.x) def test_callback_terminates(self): # test that if the callback returns true, then the minimization halts bounds = [(0, 2), (0, 2)] def callback(param, convergence=0.): return True result = differential_evolution(rosen, bounds, callback=callback) assert_string_equal(result.message, 'callback function requested stop early ' 'by returning True') def test_args_tuple_is_passed(self): # test that the args tuple is passed to the cost function properly. bounds = [(-10, 10)] args = (1., 2., 3.) def quadratic(x, *args): if type(args) != tuple: raise ValueError('args should be a tuple') return args[0] + args[1] * x + args[2] * x**2. result = differential_evolution(quadratic, bounds, args=args, polish=True) assert_almost_equal(result.fun, 2 / 3.) def test_init_with_invalid_strategy(self): # test that passing an invalid strategy raises ValueError func = rosen bounds = [(-3, 3)] assert_raises(ValueError, differential_evolution, func, bounds, strategy='abc') def test_bounds_checking(self): # test that the bounds checking works func = rosen bounds = [(-3)] assert_raises(ValueError, differential_evolution, func, bounds) bounds = [(-3, 3), (3, 4, 5)] assert_raises(ValueError, differential_evolution, func, bounds) # test that we can use a new-type Bounds object result = differential_evolution(rosen, Bounds([0, 0], [2, 2])) assert_almost_equal(result.x, (1., 1.)) def test_select_samples(self): # select_samples should return 5 separate random numbers. limits = np.arange(12., dtype='float64').reshape(2, 6) bounds = list(zip(limits[0, :], limits[1, :])) solver = DifferentialEvolutionSolver(None, bounds, popsize=1) candidate = 0 r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5) assert_equal( len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6) def test_maxiter_stops_solve(self): # test that if the maximum number of iterations is exceeded # the solver stops. solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1) result = solver.solve() assert_equal(result.success, False) assert_equal(result.message, 'Maximum number of iterations has been exceeded.') def test_maxfun_stops_solve(self): # test that if the maximum number of function evaluations is exceeded # during initialisation the solver stops solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1, polish=False) result = solver.solve() assert_equal(result.nfev, 2) assert_equal(result.success, False) assert_equal(result.message, 'Maximum number of function evaluations has ' 'been exceeded.') # test that if the maximum number of function evaluations is exceeded # during the actual minimisation, then the solver stops. # Have to turn polishing off, as this will still occur even if maxfun # is reached. For popsize=5 and len(bounds)=2, then there are only 10 # function evaluations during initialisation. solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=5, polish=False, maxfun=40) result = solver.solve() assert_equal(result.nfev, 41) assert_equal(result.success, False) assert_equal(result.message, 'Maximum number of function evaluations has ' 'been exceeded.') # now repeat for updating='deferred version solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=5, polish=False, maxfun=40, updating='deferred') result = solver.solve() assert_equal(result.nfev, 40) assert_equal(result.success, False) assert_equal(result.message, 'Maximum number of function evaluations has ' 'been reached.') def test_quadratic(self): # test the quadratic function from object solver = DifferentialEvolutionSolver(self.quadratic, [(-100, 100)], tol=0.02) solver.solve() assert_equal(np.argmin(solver.population_energies), 0) def test_quadratic_from_diff_ev(self): # test the quadratic function from differential_evolution function differential_evolution(self.quadratic, [(-100, 100)], tol=0.02) def test_seed_gives_repeatability(self): result = differential_evolution(self.quadratic, [(-100, 100)], polish=False, seed=1, tol=0.5) result2 = differential_evolution(self.quadratic, [(-100, 100)], polish=False, seed=1, tol=0.5) assert_equal(result.x, result2.x) assert_equal(result.nfev, result2.nfev) @pytest.mark.skipif(Version(np.__version__) < Version('1.17'), reason='Generator not available for numpy, < 1.17') def test_random_generator(self): # check that np.random.Generator can be used (numpy >= 1.17) # obtain a np.random.Generator object rng = np.random.default_rng() inits = ['random', 'latinhypercube'] for init in inits: differential_evolution(self.quadratic, [(-100, 100)], polish=False, seed=rng, tol=0.5, init=init) def test_exp_runs(self): # test whether exponential mutation loop runs solver = DifferentialEvolutionSolver(rosen, self.bounds, strategy='best1exp', maxiter=1) solver.solve() def test_gh_4511_regression(self): # This modification of the differential evolution docstring example # uses a custom popsize that had triggered an off-by-one error. # Because we do not care about solving the optimization problem in # this test, we use maxiter=1 to reduce the testing time. bounds = [(-5, 5), (-5, 5)] # result = differential_evolution(rosen, bounds, popsize=1815, # maxiter=1) # the original issue arose because of rounding error in arange, with # linspace being a much better solution. 1815 is quite a large popsize # to use and results in a long test time (~13s). I used the original # issue to figure out the lowest number of samples that would cause # this rounding error to occur, 49. differential_evolution(rosen, bounds, popsize=49, maxiter=1) def test_calculate_population_energies(self): # if popsize is 3, then the overall generation has size (6,) solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3) solver._calculate_population_energies(solver.population) solver._promote_lowest_energy() assert_equal(np.argmin(solver.population_energies), 0) # initial calculation of the energies should require 6 nfev. assert_equal(solver._nfev, 6) def test_iteration(self): # test that DifferentialEvolutionSolver is iterable # if popsize is 3, then the overall generation has size (6,) solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3, maxfun=12) x, fun = next(solver) assert_equal(np.size(x, 0), 2) # 6 nfev are required for initial calculation of energies, 6 nfev are # required for the evolution of the 6 population members. assert_equal(solver._nfev, 12) # the next generation should halt because it exceeds maxfun assert_raises(StopIteration, next, solver) # check a proper minimisation can be done by an iterable solver solver = DifferentialEvolutionSolver(rosen, self.bounds) _, fun_prev = next(solver) for i, soln in enumerate(solver): x_current, fun_current = soln assert(fun_prev >= fun_current) _, fun_prev = x_current, fun_current # need to have this otherwise the solver would never stop. if i == 50: break def test_convergence(self): solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2, polish=False) solver.solve() assert_(solver.convergence < 0.2) def test_maxiter_none_GH5731(self): # Pre 0.17 the previous default for maxiter and maxfun was None. # the numerical defaults are now 1000 and np.inf. However, some scripts # will still supply None for both of those, this will raise a TypeError # in the solve method. solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None, maxfun=None) solver.solve() def test_population_initiation(self): # test the different modes of population initiation # init must be either 'latinhypercube' or 'random' # raising ValueError is something else is passed in assert_raises(ValueError, DifferentialEvolutionSolver, *(rosen, self.bounds), **{'init': 'rubbish'}) solver = DifferentialEvolutionSolver(rosen, self.bounds) # check that population initiation: # 1) resets _nfev to 0 # 2) all population energies are np.inf solver.init_population_random() assert_equal(solver._nfev, 0) assert_(np.all(np.isinf(solver.population_energies))) solver.init_population_lhs() assert_equal(solver._nfev, 0) assert_(np.all(np.isinf(solver.population_energies))) # we should be able to initialize with our own array population = np.linspace(-1, 3, 10).reshape(5, 2) solver = DifferentialEvolutionSolver(rosen, self.bounds, init=population, strategy='best2bin', atol=0.01, seed=1, popsize=5) assert_equal(solver._nfev, 0) assert_(np.all(np.isinf(solver.population_energies))) assert_(solver.num_population_members == 5) assert_(solver.population_shape == (5, 2)) # check that the population was initialized correctly unscaled_population = np.clip(solver._unscale_parameters(population), 0, 1) assert_almost_equal(solver.population[:5], unscaled_population) # population values need to be clipped to bounds assert_almost_equal(np.min(solver.population[:5]), 0) assert_almost_equal(np.max(solver.population[:5]), 1) # shouldn't be able to initialize with an array if it's the wrong shape # this would have too many parameters population = np.linspace(-1, 3, 15).reshape(5, 3) assert_raises(ValueError, DifferentialEvolutionSolver, *(rosen, self.bounds), **{'init': population}) def test_infinite_objective_function(self): # Test that there are no problems if the objective function # returns inf on some runs def sometimes_inf(x): if x[0] < .5: return np.inf return x[1] bounds = [(0, 1), (0, 1)] differential_evolution(sometimes_inf, bounds=bounds, disp=False) def test_deferred_updating(self): # check setting of deferred updating, with default workers bounds = [(0., 2.), (0., 2.)] solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred') assert_(solver._updating == 'deferred') assert_(solver._mapwrapper._mapfunc is map) solver.solve() @knownfail_on_py38 def test_immediate_updating(self): # check setting of immediate updating, with default workers bounds = [(0., 2.), (0., 2.)] solver = DifferentialEvolutionSolver(rosen, bounds) assert_(solver._updating == 'immediate') # should raise a UserWarning because the updating='immediate' # is being overridden by the workers keyword with warns(UserWarning): solver = DifferentialEvolutionSolver(rosen, bounds, workers=2) assert_(solver._updating == 'deferred') del solver gc.collect() # ensure MapWrapper cleans up properly @knownfail_on_py38 def test_parallel(self): # smoke test for parallelization with deferred updating bounds = [(0., 2.), (0., 2.)] with multiprocessing.Pool(2) as p, DifferentialEvolutionSolver( rosen, bounds, updating='deferred', workers=p.map) as solver: assert_(solver._mapwrapper.pool is not None) assert_(solver._updating == 'deferred') solver.solve() with DifferentialEvolutionSolver(rosen, bounds, updating='deferred', workers=2) as solver: assert_(solver._mapwrapper.pool is not None) assert_(solver._updating == 'deferred') solver.solve() del solver gc.collect() # ensure MapWrapper cleans up properly def test_converged(self): solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)]) solver.solve() assert_(solver.converged()) def test_constraint_violation_fn(self): def constr_f(x): return [x[0] + x[1]] def constr_f2(x): return [x[0]**2 + x[1], x[0] - x[1]] nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc)) cv = solver._constraint_violation_fn([1.0, 1.0]) assert_almost_equal(cv, 0.1) nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc, nlc2)) # for multiple constraints the constraint violations should # be concatenated. cv = solver._constraint_violation_fn([1.2, 1.]) assert_almost_equal(cv, [0.3, 0.64, 0]) cv = solver._constraint_violation_fn([2., 2.]) assert_almost_equal(cv, [2.1, 4.2, 0]) # should accept valid values cv = solver._constraint_violation_fn([0.5, 0.5]) assert_almost_equal(cv, [0., 0., 0.]) def test_constraint_population_feasibilities(self): def constr_f(x): return [x[0] + x[1]] def constr_f2(x): return [x[0]**2 + x[1], x[0] - x[1]] nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc)) # are population feasibilities correct # [0.5, 0.5] corresponds to scaled values of [1., 1.] feas, cv = solver._calculate_population_feasibilities( np.array([[0.5, 0.5], [1., 1.]])) assert_equal(feas, [False, False]) assert_almost_equal(cv, np.array([[0.1], [2.1]])) assert cv.shape == (2, 1) nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc, nlc2)) feas, cv = solver._calculate_population_feasibilities( np.array([[0.5, 0.5], [0.6, 0.5]])) assert_equal(feas, [False, False]) assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]])) feas, cv = solver._calculate_population_feasibilities( np.array([[0.5, 0.5], [1., 1.]])) assert_equal(feas, [False, False]) assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]])) assert cv.shape == (2, 3) feas, cv = solver._calculate_population_feasibilities( np.array([[0.25, 0.25], [1., 1.]])) assert_equal(feas, [True, False]) assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]])) assert cv.shape == (2, 3) def test_constraint_solve(self): def constr_f(x): return np.array([x[0] + x[1]]) nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc)) # trust-constr warns if the constraint function is linear with warns(UserWarning): res = solver.solve() assert constr_f(res.x) <= 1.9 assert res.success def test_impossible_constraint(self): def constr_f(x): return np.array([x[0] + x[1]]) nlc = NonlinearConstraint(constr_f, -np.inf, -1) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc), popsize=3, seed=1) # a UserWarning is issued because the 'trust-constr' polishing is # attempted on the least infeasible solution found. with warns(UserWarning): res = solver.solve() assert res.maxcv > 0 assert not res.success # test _promote_lowest_energy works when none of the population is # feasible. In this case, the solution with the lowest constraint # violation should be promoted. solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc), polish=False) next(solver) assert not solver.feasible.all() assert not np.isfinite(solver.population_energies).all() # now swap two of the entries in the population l = 20 cv = solver.constraint_violation[0] solver.population_energies[[0, l]] = solver.population_energies[[l, 0]] solver.population[[0, l], :] = solver.population[[l, 0], :] solver.constraint_violation[[0, l], :] = ( solver.constraint_violation[[l, 0], :]) solver._promote_lowest_energy() assert_equal(solver.constraint_violation[0], cv) def test_accept_trial(self): # _accept_trial(self, energy_trial, feasible_trial, cv_trial, # energy_orig, feasible_orig, cv_orig) def constr_f(x): return [x[0] + x[1]] nlc = NonlinearConstraint(constr_f, -np.inf, 1.9) solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)], constraints=(nlc)) fn = solver._accept_trial # both solutions are feasible, select lower energy assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.])) assert (fn(1.0, True, np.array([0.]), 0.1, True, np.array([0.])) == False) assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.])) # trial is feasible, original is not assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.])) # trial and original are infeasible # cv_trial have to be <= cv_original to be better assert (fn(0.1, False, np.array([0.5, 0.5]), 1.0, False, np.array([1., 1.0]))) assert (fn(0.1, False, np.array([0.5, 0.5]), 1.0, False, np.array([1., 0.50]))) assert (fn(1.0, False, np.array([0.5, 0.5]), 1.0, False, np.array([1., 0.4])) == False) def test_constraint_wrapper(self): lb = np.array([0, 20, 30]) ub = np.array([0.5, np.inf, 70]) x0 = np.array([1, 2, 3]) pc = _ConstraintWrapper(Bounds(lb, ub), x0) assert (pc.violation(x0) > 0).any() assert (pc.violation([0.25, 21, 31]) == 0).all() x0 = np.array([1, 2, 3, 4]) A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]]) pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0) assert (pc.violation(x0) > 0).any() assert (pc.violation([-10, 2, -10, 4]) == 0).all() def fun(x): return A.dot(x) nonlinear = NonlinearConstraint(fun, -np.inf, 0) pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4]) assert (pc.violation(x0) > 0).any() assert (pc.violation([-10, 2, -10, 4]) == 0).all() def test_constraint_wrapper_violation(self): def cons_f(x): return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]]) nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2]) pc = _ConstraintWrapper(nlc, [0.5, 1]) assert np.size(pc.bounds[0]) == 2 assert_array_equal(pc.violation([0.5, 1]), [0., 0.]) assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1]) assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0]) assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0]) assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14]) def test_L1(self): # Lampinen ([5]) test problem 1 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:]) return fun A = np.zeros((10, 14)) # 1-indexed to match reference A[1, [1, 2, 10, 11]] = 2, 2, 1, 1 A[2, [1, 10]] = -8, 1 A[3, [4, 5, 10]] = -2, -1, 1 A[4, [1, 3, 10, 11]] = 2, 2, 1, 1 A[5, [2, 11]] = -8, 1 A[6, [6, 7, 11]] = -2, -1, 1 A[7, [2, 3, 11, 12]] = 2, 2, 1, 1 A[8, [3, 12]] = -8, 1 A[9, [8, 9, 12]] = -2, -1, 1 A = A[1:, 1:] b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0]) L = LinearConstraint(A, -np.inf, b) bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)] # using a lower popsize to speed the test up res = differential_evolution(f, bounds, strategy='best1bin', seed=1234, constraints=(L), popsize=2) x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1) f_opt = -15 assert_allclose(f(x_opt), f_opt) assert res.success assert_allclose(res.x, x_opt, atol=5e-4) assert_allclose(res.fun, f_opt, atol=5e-3) assert_(np.all([email protected] <= b)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) # now repeat the same solve, using the same overall constraints, # but specify half the constraints in terms of LinearConstraint, # and the other half by NonlinearConstraint def c1(x): x = np.hstack(([0], x)) return [2*x[2] + 2*x[3] + x[11] + x[12], -8*x[3] + x[12]] def c2(x): x = np.hstack(([0], x)) return -2*x[8] - x[9] + x[12] L = LinearConstraint(A[:5, :], -np.inf, b[:5]) L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6]) N = NonlinearConstraint(c1, -np.inf, b[6:8]) N2 = NonlinearConstraint(c2, -np.inf, b[8:9]) constraints = (L, N, L2, N2) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, popsize=2) assert_allclose(res.x, x_opt, atol=5e-4) assert_allclose(res.fun, f_opt, atol=5e-3) assert_(np.all([email protected] <= b)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L2(self): # Lampinen ([5]) test problem 2 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 + 10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] - 8*x[7]) return fun def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5], 196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7], 282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5], -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 - 5*x[6] + 11*x[7]] N = NonlinearConstraint(c1, 0, np.inf) bounds = [(-10, 10)]*7 constraints = (N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints) f_opt = 680.6300599487869 x_opt = (2.330499, 1.951372, -0.4775414, 4.365726, -0.6244870, 1.038131, 1.594227) assert_allclose(f(x_opt), f_opt) assert_allclose(res.fun, f_opt) assert_allclose(res.x, x_opt, atol=1e-5) assert res.success assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L3(self): # Lampinen ([5]) test problem 3 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] + (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 + 5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 + (x[10] - 7)**2 + 45 ) return fun # maximize A = np.zeros((4, 11)) A[1, [1, 2, 7, 8]] = -4, -5, 3, -9 A[2, [1, 2, 7, 8]] = -10, 8, 17, -2 A[3, [1, 2, 9, 10]] = 8, -2, -5, 2 A = A[1:, 1:] b = np.array([-105, 0, -12]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10], -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120, -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6], -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40, -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30] L = LinearConstraint(A, b, np.inf) N = NonlinearConstraint(c1, 0, np.inf) bounds = [(-10, 10)]*10 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, seed=1234, constraints=constraints, popsize=3) x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548, 1.430574, 1.321644, 9.828726, 8.280092, 8.375927) f_opt = 24.3062091 assert_allclose(f(x_opt), f_opt, atol=1e-5) assert_allclose(res.x, x_opt, atol=1e-6) assert_allclose(res.fun, f_opt, atol=1e-5) assert res.success assert_(np.all(A @ res.x >= b)) assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L4(self): # Lampinen ([5]) test problem 4 def f(x): return np.sum(x[:3]) A = np.zeros((4, 9)) A[1, [4, 6]] = 0.0025, 0.0025 A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025 A[3, [8, 5]] = 0.01, -0.01 A = A[1:, 1:] b = np.array([1, 1, 1]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333, x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4], x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]] L = LinearConstraint(A, -np.inf, 1) N = NonlinearConstraint(c1, 0, np.inf) bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, popsize=3) f_opt = 7049.248 x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172, 217.9823, 286.416528, 395.601172] assert_allclose(f(x_opt), f_opt, atol=0.001) assert_allclose(res.fun, f_opt, atol=0.001) assert_allclose(res.x, x_opt, atol=0.002) assert res.success assert_(np.all(A @ res.x <= b)) assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L5(self): # Lampinen ([5]) test problem 5 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) / (x[1]**3*(x[1]+x[2]))) return -fun # maximize def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [x[1]**2 - x[2] + 1, 1 - x[1] + (x[2]-4)**2] N = NonlinearConstraint(c1, -np.inf, 0) bounds = [(0, 10)]*2 constraints = (N) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints) x_opt = (1.22797135, 4.24537337) f_opt = -0.095825 print(res) assert_allclose(f(x_opt), f_opt, atol=2e-5) assert_allclose(res.fun, f_opt, atol=1e-4) assert res.success assert_(np.all(np.array(c1(res.x)) <= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L6(self): # Lampinen ([5]) test problem 6 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (x[1]-10)**3 + (x[2] - 20)**3 return fun def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [(x[1]-5)**2 + (x[2] - 5)**2 - 100, -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81] N = NonlinearConstraint(c1, 0, np.inf) bounds = [(13, 100), (0, 100)] constraints = (N) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, tol=1e-7) x_opt = (14.095, 0.84296) f_opt = -6961.814744 assert_allclose(f(x_opt), f_opt, atol=1e-6) assert_allclose(res.fun, f_opt, atol=0.001) assert_allclose(res.x, x_opt, atol=1e-4) assert res.success assert_(np.all(np.array(c1(res.x)) >= 0)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L7(self): # Lampinen ([5]) test problem 7 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] + 37.293239*x[1] - 40792.141) return fun def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [ 85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] - 0.0022053*x[3]*x[5], 80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] + 0.0021813*x[3]**2, 9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] + 0.0019085*x[3]*x[4] ] N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25]) bounds = [(78, 102), (33, 45)] + [(27, 45)]*3 constraints = (N) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints) # using our best solution, rather than Lampinen/Koziel. Koziel solution # doesn't satisfy constraints, Lampinen f_opt just plain wrong. x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971, 36.77579979] f_opt = -30665.537578 assert_allclose(f(x_opt), f_opt) assert_allclose(res.x, x_opt, atol=1e-3) assert_allclose(res.fun, f_opt, atol=1e-3) assert res.success assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20]))) assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25]))) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) @pytest.mark.slow @pytest.mark.xfail(platform.machine() == 'ppc64le', reason="fails on ppc64le") def test_L8(self): def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3 return fun A = np.zeros((3, 5)) A[1, [4, 3]] = 1, -1 A[2, [3, 4]] = 1, -1 A = A[1:, 1:] b = np.array([-.55, -.55]) def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [ 1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) + 894.8 - x[1], 1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) + 894.8 - x[2], 1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) + 1294.8 ] L = LinearConstraint(A, b, np.inf) N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001)) bounds = [(0, 1200)]*2+[(-.55, .55)]*2 constraints = (L, N) with suppress_warnings() as sup: sup.filter(UserWarning) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints, maxiter=5000) x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336) f_opt = 5126.4981 assert_allclose(f(x_opt), f_opt, atol=1e-3) assert_allclose(res.x[:2], x_opt[:2], atol=2e-3) assert_allclose(res.x[2:], x_opt[2:], atol=2e-3) assert_allclose(res.fun, f_opt, atol=2e-2) assert res.success assert_(np.all([email protected] >= b)) assert_(np.all(np.array(c1(res.x)) >= -0.001)) assert_(np.all(np.array(c1(res.x)) <= 0.001)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1])) def test_L9(self): # Lampinen ([5]) test problem 9 def f(x): x = np.hstack(([0], x)) # 1-indexed to match reference return x[1]**2 + (x[2]-1)**2 def c1(x): x = np.hstack(([0], x)) # 1-indexed to match reference return [x[2] - x[1]**2] N = NonlinearConstraint(c1, [-.001], [0.001]) bounds = [(-1, 1)]*2 constraints = (N) res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234, constraints=constraints) x_opt = [np.sqrt(2)/2, 0.5] f_opt = 0.75 assert_allclose(f(x_opt), f_opt) assert_allclose(np.abs(res.x), x_opt, atol=1e-3) assert_allclose(res.fun, f_opt, atol=1e-3) assert res.success assert_(np.all(np.array(c1(res.x)) >= -0.001)) assert_(np.all(np.array(c1(res.x)) <= 0.001)) assert_(np.all(res.x >= np.array(bounds)[:, 0])) assert_(np.all(res.x <= np.array(bounds)[:, 1]))
class TestDualAnnealing: def setup_method(self): # A function that returns always infinity for initialization tests self.weirdfunc = lambda x: np.inf # 2-D bounds for testing function self.ld_bounds = [(-5.12, 5.12)] * 2 # 4-D bounds for testing function self.hd_bounds = self.ld_bounds * 4 # Number of values to be generated for testing visit function self.nbtestvalues = 5000 self.high_temperature = 5230 self.low_temperature = 0.1 self.qv = 2.62 self.seed = 1234 self.rs = check_random_state(self.seed) self.nb_fun_call = 0 self.ngev = 0 def callback(self, x, f, context): # For testing callback mechanism. Should stop for e <= 1 as # the callback function returns True if f <= 1.0: return True def func(self, x, args=()): # Using Rastrigin function for performing tests if args: shift = args else: shift = 0 y = np.sum((x - shift)**2 - 10 * np.cos(2 * np.pi * (x - shift))) + 10 * np.size(x) + shift self.nb_fun_call += 1 return y def rosen_der_wrapper(self, x, args=()): self.ngev += 1 return rosen_der(x, *args) # FIXME: there are some discontinuities in behaviour as a function of `qv`, # this needs investigating - see gh-12384 @pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9]) def test_visiting_stepping(self, qv): lu = list(zip(*self.ld_bounds)) lower = np.array(lu[0]) upper = np.array(lu[1]) dim = lower.size vd = VisitingDistribution(lower, upper, qv, self.rs) values = np.zeros(dim) x_step_low = vd.visiting(values, 0, self.high_temperature) # Make sure that only the first component is changed assert_equal(np.not_equal(x_step_low, 0), True) values = np.zeros(dim) x_step_high = vd.visiting(values, dim, self.high_temperature) # Make sure that component other than at dim has changed assert_equal(np.not_equal(x_step_high[0], 0), True) @pytest.mark.parametrize('qv', [2.25, 2.62, 2.9]) def test_visiting_dist_high_temperature(self, qv): lu = list(zip(*self.ld_bounds)) lower = np.array(lu[0]) upper = np.array(lu[1]) vd = VisitingDistribution(lower, upper, qv, self.rs) # values = np.zeros(self.nbtestvalues) # for i in np.arange(self.nbtestvalues): # values[i] = vd.visit_fn(self.high_temperature) values = vd.visit_fn(self.high_temperature, self.nbtestvalues) # Visiting distribution is a distorted version of Cauchy-Lorentz # distribution, and as no 1st and higher moments (no mean defined, # no variance defined). # Check that big tails values are generated assert_array_less(np.min(values), 1e-10) assert_array_less(1e+10, np.max(values)) def test_reset(self): owf = ObjectiveFunWrapper(self.weirdfunc) lu = list(zip(*self.ld_bounds)) lower = np.array(lu[0]) upper = np.array(lu[1]) es = EnergyState(lower, upper) assert_raises(ValueError, es.reset, owf, check_random_state(None)) def test_low_dim(self): ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-12) assert ret.success def test_high_dim(self): ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-12) assert ret.success def test_low_dim_no_ls(self): ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-4) def test_high_dim_no_ls(self): ret = dual_annealing(self.func, self.hd_bounds, no_local_search=True, seed=self.seed) assert_allclose(ret.fun, 0., atol=1e-4) def test_nb_fun_call(self): ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed) assert_equal(self.nb_fun_call, ret.nfev) def test_nb_fun_call_no_ls(self): ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True, seed=self.seed) assert_equal(self.nb_fun_call, ret.nfev) def test_max_reinit(self): assert_raises(ValueError, dual_annealing, self.weirdfunc, self.ld_bounds) def test_reproduce(self): res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed) # If we have reproducible results, x components found has to # be exactly the same, which is not the case with no seeding assert_equal(res1.x, res2.x) assert_equal(res1.x, res3.x) @pytest.mark.skipif(Version(np.__version__) < Version('1.17'), reason='Generator not available for numpy, < 1.17') def test_rand_gen(self): # check that np.random.Generator can be used (numpy >= 1.17) # obtain a np.random.Generator object rng = np.random.default_rng(1) res1 = dual_annealing(self.func, self.ld_bounds, seed=rng) # seed again rng = np.random.default_rng(1) res2 = dual_annealing(self.func, self.ld_bounds, seed=rng) # If we have reproducible results, x components found has to # be exactly the same, which is not the case with no seeding assert_equal(res1.x, res2.x) def test_bounds_integrity(self): wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)] assert_raises(ValueError, dual_annealing, self.func, wrong_bounds) def test_bound_validity(self): invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)] assert_raises(ValueError, dual_annealing, self.func, invalid_bounds) invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)] assert_raises(ValueError, dual_annealing, self.func, invalid_bounds) invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)] assert_raises(ValueError, dual_annealing, self.func, invalid_bounds) def test_local_search_option_bounds(self): func = lambda x: np.sum((x - 5) * (x - 1)) bounds = list(zip([-6, -5], [6, 5])) # Test bounds can be passed (see gh-10831) with np.testing.suppress_warnings() as sup: sup.record(RuntimeWarning, "Values in x were outside bounds ") dual_annealing(func, bounds=bounds, local_search_options={ "method": "SLSQP", "bounds": bounds }) with np.testing.suppress_warnings() as sup: sup.record(RuntimeWarning, "Method CG cannot handle ") dual_annealing(func, bounds=bounds, local_search_options={ "method": "CG", "bounds": bounds }) # Verify warning happened for Method cannot handle bounds. assert sup.log def test_max_fun_ls(self): ret = dual_annealing(self.func, self.ld_bounds, maxfun=100, seed=self.seed) ls_max_iter = min( max( len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO, LocalSearchWrapper.LS_MAXITER_MIN), LocalSearchWrapper.LS_MAXITER_MAX) assert ret.nfev <= 100 + ls_max_iter assert not ret.success def test_max_fun_no_ls(self): ret = dual_annealing(self.func, self.ld_bounds, no_local_search=True, maxfun=500, seed=self.seed) assert ret.nfev <= 500 assert not ret.success def test_maxiter(self): ret = dual_annealing(self.func, self.ld_bounds, maxiter=700, seed=self.seed) assert ret.nit <= 700 # Testing that args are passed correctly for dual_annealing def test_fun_args_ls(self): ret = dual_annealing(self.func, self.ld_bounds, args=((3.14159, )), seed=self.seed) assert_allclose(ret.fun, 3.14159, atol=1e-6) # Testing that args are passed correctly for pure simulated annealing def test_fun_args_no_ls(self): ret = dual_annealing(self.func, self.ld_bounds, args=((3.14159, )), no_local_search=True, seed=self.seed) assert_allclose(ret.fun, 3.14159, atol=1e-4) def test_callback_stop(self): # Testing that callback make the algorithm stop for # fun value <= 1.0 (see callback method) ret = dual_annealing(self.func, self.ld_bounds, callback=self.callback, seed=self.seed) assert ret.fun <= 1.0 assert 'stop early' in ret.message[0] assert not ret.success @pytest.mark.parametrize('method, atol', [ ('Nelder-Mead', 2e-5), ('COBYLA', 1e-5), ('Powell', 1e-8), ('CG', 1e-8), ('BFGS', 1e-8), ('TNC', 1e-8), ('SLSQP', 2e-7), ]) def test_multi_ls_minimizer(self, method, atol): ret = dual_annealing(self.func, self.ld_bounds, local_search_options=dict(method=method), seed=self.seed) assert_allclose(ret.fun, 0., atol=atol) def test_wrong_restart_temp(self): assert_raises(ValueError, dual_annealing, self.func, self.ld_bounds, restart_temp_ratio=1) assert_raises(ValueError, dual_annealing, self.func, self.ld_bounds, restart_temp_ratio=0) def test_gradient_gnev(self): minimizer_opts = { 'jac': self.rosen_der_wrapper, } ret = dual_annealing(rosen, self.ld_bounds, local_search_options=minimizer_opts, seed=self.seed) assert ret.njev == self.ngev def test_from_docstring(self): func = lambda x: np.sum(x * x - 10 * np.cos(2 * np.pi * x) ) + 10 * np.size(x) lw = [-5.12] * 10 up = [5.12] * 10 ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234) assert_allclose(ret.x, [ -4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09, -6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09, -6.05775280e-09, -5.00668935e-09 ], atol=4e-8) assert_allclose(ret.fun, 0.000000, atol=5e-13) @pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [ (0, 100, 1000, 1.0097587941791923), (0, 2, 1000, 1.2599210498948732), (10, 100, 878, 0.8786035869128718), (10, 60, 695, 0.6812920690579612), (2, 100, 990, 0.9897404249173424), ]) def test_accept_reject_probabilistic(self, new_e, temp_step, accepted, accept_rate): # Test accepts unconditionally with e < current_energy and # probabilistically with e > current_energy rs = check_random_state(123) count_accepted = 0 iterations = 1000 accept_param = -5 current_energy = 1 for _ in range(iterations): energy_state = EnergyState(lower=None, upper=None) # Set energy state with current_energy, any location. energy_state.update_current(current_energy, [0]) chain = StrategyChain(accept_param, None, None, None, rs, energy_state) # Normally this is set in run() chain.temperature_step = temp_step # Check if update is accepted. chain.accept_reject(j=1, e=new_e, x_visit=[2]) if energy_state.current_energy == new_e: count_accepted += 1 assert count_accepted == accepted # Check accept rate pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param)) assert_allclose(rate, accept_rate)
"""`uarray` provides functions for generating multimethods that dispatch to multiple different backends This should be imported, rather than `_uarray` so that an installed version could be used instead, if available. This means that users can call `uarray.set_backend` directly instead of going through SciPy. """ # Prefer an installed version of uarray, if available try: import uarray as _uarray except ImportError: _has_uarray = False else: from scipy._lib._pep440 import Version _has_uarray = Version(_uarray.__version__) >= Version("0.5") del _uarray if _has_uarray: from uarray import * from uarray import _Function else: from ._uarray import * from ._uarray import _Function del _has_uarray
class TestBasinHopping: def setup_method(self): """ Tests setup. Run tests based on the 1-D and 2-D functions described above. """ self.x0 = (1.0, [1.0, 1.0]) self.sol = (-0.195, np.array([-0.195, -0.1])) self.tol = 3 # number of decimal places self.niter = 100 self.disp = False # fix random seed np.random.seed(1234) self.kwargs = {"method": "L-BFGS-B", "jac": True} self.kwargs_nograd = {"method": "L-BFGS-B"} def test_TypeError(self): # test the TypeErrors are raised on bad input i = 1 # if take_step is passed, it must be callable assert_raises(TypeError, basinhopping, func2d, self.x0[i], take_step=1) # if accept_test is passed, it must be callable assert_raises(TypeError, basinhopping, func2d, self.x0[i], accept_test=1) def test_input_validation(self): msg = 'target_accept_rate has to be in range \\(0, 1\\)' with assert_raises(ValueError, match=msg): basinhopping(func1d, self.x0[0], target_accept_rate=0.) with assert_raises(ValueError, match=msg): basinhopping(func1d, self.x0[0], target_accept_rate=1.) msg = 'stepwise_factor has to be in range \\(0, 1\\)' with assert_raises(ValueError, match=msg): basinhopping(func1d, self.x0[0], stepwise_factor=0.) with assert_raises(ValueError, match=msg): basinhopping(func1d, self.x0[0], stepwise_factor=1.) def test_1d_grad(self): # test 1-D minimizations with gradient i = 0 res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp) assert_almost_equal(res.x, self.sol[i], self.tol) def test_2d(self): # test 2d minimizations with gradient i = 1 res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp) assert_almost_equal(res.x, self.sol[i], self.tol) assert_(res.nfev > 0) def test_njev(self): # test njev is returned correctly i = 1 minimizer_kwargs = self.kwargs.copy() # L-BFGS-B doesn't use njev, but BFGS does minimizer_kwargs["method"] = "BFGS" res = basinhopping(func2d, self.x0[i], minimizer_kwargs=minimizer_kwargs, niter=self.niter, disp=self.disp) assert_(res.nfev > 0) assert_equal(res.nfev, res.njev) def test_jac(self): # test Jacobian returned minimizer_kwargs = self.kwargs.copy() # BFGS returns a Jacobian minimizer_kwargs["method"] = "BFGS" res = basinhopping(func2d_easyderiv, [0.0, 0.0], minimizer_kwargs=minimizer_kwargs, niter=self.niter, disp=self.disp) assert_(hasattr(res.lowest_optimization_result, "jac")) # in this case, the Jacobian is just [df/dx, df/dy] _, jacobian = func2d_easyderiv(res.x) assert_almost_equal(res.lowest_optimization_result.jac, jacobian, self.tol) def test_2d_nograd(self): # test 2-D minimizations without gradient i = 1 res = basinhopping(func2d_nograd, self.x0[i], minimizer_kwargs=self.kwargs_nograd, niter=self.niter, disp=self.disp) assert_almost_equal(res.x, self.sol[i], self.tol) def test_all_minimizers(self): # Test 2-D minimizations with gradient. Nelder-Mead, Powell, and COBYLA # don't accept jac=True, so aren't included here. i = 1 methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP'] minimizer_kwargs = copy.copy(self.kwargs) for method in methods: minimizer_kwargs["method"] = method res = basinhopping(func2d, self.x0[i], minimizer_kwargs=minimizer_kwargs, niter=self.niter, disp=self.disp) assert_almost_equal(res.x, self.sol[i], self.tol) def test_all_nograd_minimizers(self): # Test 2-D minimizations without gradient. Newton-CG requires jac=True, # so not included here. i = 1 methods = [ 'CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP', 'Nelder-Mead', 'Powell', 'COBYLA' ] minimizer_kwargs = copy.copy(self.kwargs_nograd) for method in methods: minimizer_kwargs["method"] = method res = basinhopping(func2d_nograd, self.x0[i], minimizer_kwargs=minimizer_kwargs, niter=self.niter, disp=self.disp) tol = self.tol if method == 'COBYLA': tol = 2 assert_almost_equal(res.x, self.sol[i], decimal=tol) def test_pass_takestep(self): # test that passing a custom takestep works # also test that the stepsize is being adjusted takestep = MyTakeStep1() initial_step_size = takestep.stepsize i = 1 res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp, take_step=takestep) assert_almost_equal(res.x, self.sol[i], self.tol) assert_(takestep.been_called) # make sure that the build in adaptive step size has been used assert_(initial_step_size != takestep.stepsize) def test_pass_simple_takestep(self): # test that passing a custom takestep without attribute stepsize takestep = myTakeStep2 i = 1 res = basinhopping(func2d_nograd, self.x0[i], minimizer_kwargs=self.kwargs_nograd, niter=self.niter, disp=self.disp, take_step=takestep) assert_almost_equal(res.x, self.sol[i], self.tol) def test_pass_accept_test(self): # test passing a custom accept test # makes sure it's being used and ensures all the possible return values # are accepted. accept_test = MyAcceptTest() i = 1 # there's no point in running it more than a few steps. basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=10, disp=self.disp, accept_test=accept_test) assert_(accept_test.been_called) def test_pass_callback(self): # test passing a custom callback function # This makes sure it's being used. It also returns True after 10 steps # to ensure that it's stopping early. callback = MyCallBack() i = 1 # there's no point in running it more than a few steps. res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=30, disp=self.disp, callback=callback) assert_(callback.been_called) assert_("callback" in res.message[0]) # One of the calls of MyCallBack is during BasinHoppingRunner # construction, so there are only 9 remaining before MyCallBack stops # the minimization. assert_equal(res.nit, 9) def test_minimizer_fail(self): # test if a minimizer fails i = 1 self.kwargs["options"] = dict(maxiter=0) self.niter = 10 res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp) # the number of failed minimizations should be the number of # iterations + 1 assert_equal(res.nit + 1, res.minimization_failures) def test_niter_zero(self): # gh5915, what happens if you call basinhopping with niter=0 i = 0 basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, niter=0, disp=self.disp) def test_seed_reproducibility(self): # seed should ensure reproducibility between runs minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} f_1 = [] def callback(x, f, accepted): f_1.append(f) basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, niter=10, callback=callback, seed=10) f_2 = [] def callback2(x, f, accepted): f_2.append(f) basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, niter=10, callback=callback2, seed=10) assert_equal(np.array(f_1), np.array(f_2)) @pytest.mark.skipif(Version(np.__version__) < Version('1.17'), reason='Generator not available for numpy, < 1.17') def test_random_gen(self): # check that np.random.Generator can be used (numpy >= 1.17) rng = np.random.default_rng(1) minimizer_kwargs = {"method": "L-BFGS-B", "jac": True} res1 = basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, niter=10, seed=rng) rng = np.random.default_rng(1) res2 = basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs, niter=10, seed=rng) assert_equal(res1.x, res2.x) def test_monotonic_basin_hopping(self): # test 1-D minimizations with gradient and T=0 i = 0 res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, niter=self.niter, disp=self.disp, T=0) assert_almost_equal(res.x, self.sol[i], self.tol)
def test_legacy_version(): # Non-PEP-440 version identifiers always compare less. For NumPy this only # occurs on dev builds prior to 1.10.0 which are unsupported anyway. assert parse('invalid') < Version('0.0.0') assert parse('1.9.0-f16acvda') < Version('1.0.0')
def test_dev0_a_b_rc_mixed(): assert Version('1.9.0a2.dev0+f16acvda') == Version('1.9.0a2.dev0+f16acvda') assert Version('1.9.0a2.dev0+6acvda54') < Version('1.9.0a2')
def test_version_1_point_10(): # regression test for gh-2998. assert Version('1.9.0') < Version('1.10.0') assert Version('1.11.0') < Version('1.11.1') assert Version('1.11.0') == Version('1.11.0') assert Version('1.99.11') < Version('1.99.12')
class TestUtils: def test_scale(self): # 1d scalar space = [[0], [1], [0.5]] out = [[-2], [6], [2]] scaled_space = qmc.scale(space, l_bounds=-2, u_bounds=6) assert_allclose(scaled_space, out) # 2d space space = [[0, 0], [1, 1], [0.5, 0.5]] bounds = np.array([[-2, 0], [6, 5]]) out = [[-2, 0], [6, 5], [2, 2.5]] scaled_space = qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1]) assert_allclose(scaled_space, out) scaled_back_space = qmc.scale(scaled_space, l_bounds=bounds[0], u_bounds=bounds[1], reverse=True) assert_allclose(scaled_back_space, space) # broadcast space = [[0, 0, 0], [1, 1, 1], [0.5, 0.5, 0.5]] l_bounds, u_bounds = 0, [6, 5, 3] out = [[0, 0, 0], [6, 5, 3], [3, 2.5, 1.5]] scaled_space = qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds) assert_allclose(scaled_space, out) def test_scale_random(self): np.random.seed(0) sample = np.random.rand(30, 10) a = -np.random.rand(10) * 10 b = np.random.rand(10) * 10 scaled = qmc.scale(sample, a, b, reverse=False) unscaled = qmc.scale(scaled, a, b, reverse=True) assert_allclose(unscaled, sample) def test_scale_errors(self): with pytest.raises(ValueError, match=r"Sample is not a 2D array"): space = [0, 1, 0.5] qmc.scale(space, l_bounds=-2, u_bounds=6) with pytest.raises(ValueError, match=r"Bounds are not consistent" r" a < b"): space = [[0, 0], [1, 1], [0.5, 0.5]] bounds = np.array([[-2, 6], [6, 5]]) qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1]) with pytest.raises(ValueError, match=r"shape mismatch: objects cannot " r"be broadcast to a " r"single shape"): space = [[0, 0], [1, 1], [0.5, 0.5]] l_bounds, u_bounds = [-2, 0, 2], [6, 5] qmc.scale(space, l_bounds=l_bounds, u_bounds=u_bounds) with pytest.raises(ValueError, match=r"Sample dimension is different " r"than bounds dimension"): space = [[0, 0], [1, 1], [0.5, 0.5]] bounds = np.array([[-2, 0, 2], [6, 5, 5]]) qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1]) with pytest.raises(ValueError, match=r"Sample is not in unit " r"hypercube"): space = [[0, 0], [1, 1.5], [0.5, 0.5]] bounds = np.array([[-2, 0], [6, 5]]) qmc.scale(space, l_bounds=bounds[0], u_bounds=bounds[1]) with pytest.raises(ValueError, match=r"Sample is out of bounds"): out = [[-2, 0], [6, 5], [8, 2.5]] bounds = np.array([[-2, 0], [6, 5]]) qmc.scale(out, l_bounds=bounds[0], u_bounds=bounds[1], reverse=True) def test_discrepancy(self): space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0) space_2 = np.array([[1, 5], [2, 4], [3, 3], [4, 2], [5, 1], [6, 6]]) space_2 = (2.0 * space_2 - 1.0) / (2.0 * 6.0) # From Fang et al. Design and modeling for computer experiments, 2006 assert_allclose(qmc.discrepancy(space_1), 0.0081, atol=1e-4) assert_allclose(qmc.discrepancy(space_2), 0.0105, atol=1e-4) # From Zhou Y.-D. et al. Mixture discrepancy for quasi-random point # sets. Journal of Complexity, 29 (3-4), pp. 283-301, 2013. # Example 4 on Page 298 sample = np.array([[2, 1, 1, 2, 2, 2], [1, 2, 2, 2, 2, 2], [2, 1, 1, 1, 1, 1], [1, 1, 1, 1, 2, 2], [1, 2, 2, 2, 1, 1], [2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 2, 2]]) sample = (2.0 * sample - 1.0) / (2.0 * 2.0) assert_allclose(qmc.discrepancy(sample, method='MD'), 2.5000, atol=1e-4) assert_allclose(qmc.discrepancy(sample, method='WD'), 1.3680, atol=1e-4) assert_allclose(qmc.discrepancy(sample, method='CD'), 0.3172, atol=1e-4) # From Tim P. et al. Minimizing the L2 and Linf star discrepancies # of a single point in the unit hypercube. JCAM, 2005 # Table 1 on Page 283 for dim in [2, 4, 8, 16, 32, 64]: ref = np.sqrt(3**(-dim)) assert_allclose(qmc.discrepancy(np.array([[1]*dim]), method='L2-star'), ref) def test_discrepancy_errors(self): sample = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) with pytest.raises( ValueError, match=r"Sample is not in unit hypercube" ): qmc.discrepancy(sample) with pytest.raises(ValueError, match=r"Sample is not a 2D array"): qmc.discrepancy([1, 3]) sample = [[0, 0], [1, 1], [0.5, 0.5]] with pytest.raises(ValueError, match=r"'toto' is not a valid ..."): qmc.discrepancy(sample, method="toto") def test_discrepancy_parallel(self, monkeypatch): sample = np.array([[2, 1, 1, 2, 2, 2], [1, 2, 2, 2, 2, 2], [2, 1, 1, 1, 1, 1], [1, 1, 1, 1, 2, 2], [1, 2, 2, 2, 1, 1], [2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 2, 2]]) sample = (2.0 * sample - 1.0) / (2.0 * 2.0) assert_allclose(qmc.discrepancy(sample, method='MD', workers=8), 2.5000, atol=1e-4) assert_allclose(qmc.discrepancy(sample, method='WD', workers=8), 1.3680, atol=1e-4) assert_allclose(qmc.discrepancy(sample, method='CD', workers=8), 0.3172, atol=1e-4) # From Tim P. et al. Minimizing the L2 and Linf star discrepancies # of a single point in the unit hypercube. JCAM, 2005 # Table 1 on Page 283 for dim in [2, 4, 8, 16, 32, 64]: ref = np.sqrt(3 ** (-dim)) assert_allclose(qmc.discrepancy(np.array([[1] * dim]), method='L2-star', workers=-1), ref) monkeypatch.setattr(os, 'cpu_count', lambda: None) with pytest.raises(NotImplementedError, match="Cannot determine the"): qmc.discrepancy(sample, workers=-1) with pytest.raises(ValueError, match="Invalid number of workers..."): qmc.discrepancy(sample, workers=-2) @pytest.mark.skipif(Version(np.__version__) < Version('1.17'), reason='default_rng not available for numpy, < 1.17') def test_update_discrepancy(self): # From Fang et al. Design and modeling for computer experiments, 2006 space_1 = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) space_1 = (2.0 * space_1 - 1.0) / (2.0 * 6.0) disc_init = qmc.discrepancy(space_1[:-1], iterative=True) disc_iter = update_discrepancy(space_1[-1], space_1[:-1], disc_init) assert_allclose(disc_iter, 0.0081, atol=1e-4) # n<d rng = np.random.default_rng(241557431858162136881731220526394276199) space_1 = rng.random((4, 10)) disc_ref = qmc.discrepancy(space_1) disc_init = qmc.discrepancy(space_1[:-1], iterative=True) disc_iter = update_discrepancy(space_1[-1], space_1[:-1], disc_init) assert_allclose(disc_iter, disc_ref, atol=1e-4) # errors with pytest.raises(ValueError, match=r"Sample is not in unit " r"hypercube"): update_discrepancy(space_1[-1], space_1[:-1] + 1, disc_init) with pytest.raises(ValueError, match=r"Sample is not a 2D array"): update_discrepancy(space_1[-1], space_1[0], disc_init) x_new = [1, 3] with pytest.raises(ValueError, match=r"x_new is not in unit " r"hypercube"): update_discrepancy(x_new, space_1[:-1], disc_init) x_new = [[0.5, 0.5]] with pytest.raises(ValueError, match=r"x_new is not a 1D array"): update_discrepancy(x_new, space_1[:-1], disc_init) x_new = [0.3, 0.1, 0] with pytest.raises(ValueError, match=r"x_new and sample must be " r"broadcastable"): update_discrepancy(x_new, space_1[:-1], disc_init) def test_discrepancy_alternative_implementation(self): """Alternative definitions from Matt Haberland.""" def disc_c2(x): n, s = x.shape xij = x disc1 = np.sum(np.prod((1 + 1/2*np.abs(xij-0.5) - 1/2*np.abs(xij-0.5)**2), axis=1)) xij = x[None, :, :] xkj = x[:, None, :] disc2 = np.sum(np.sum(np.prod(1 + 1/2*np.abs(xij - 0.5) + 1/2*np.abs(xkj - 0.5) - 1/2*np.abs(xij - xkj), axis=2), axis=0)) return (13/12)**s - 2/n * disc1 + 1/n**2*disc2 def disc_wd(x): n, s = x.shape xij = x[None, :, :] xkj = x[:, None, :] disc = np.sum(np.sum(np.prod(3/2 - np.abs(xij - xkj) + np.abs(xij - xkj)**2, axis=2), axis=0)) return -(4/3)**s + 1/n**2 * disc def disc_md(x): n, s = x.shape xij = x disc1 = np.sum(np.prod((5/3 - 1/4*np.abs(xij-0.5) - 1/4*np.abs(xij-0.5)**2), axis=1)) xij = x[None, :, :] xkj = x[:, None, :] disc2 = np.sum(np.sum(np.prod(15/8 - 1/4*np.abs(xij - 0.5) - 1/4*np.abs(xkj - 0.5) - 3/4*np.abs(xij - xkj) + 1/2*np.abs(xij - xkj)**2, axis=2), axis=0)) return (19/12)**s - 2/n * disc1 + 1/n**2*disc2 def disc_star_l2(x): n, s = x.shape return np.sqrt( 3 ** (-s) - 2 ** (1 - s) / n * np.sum(np.prod(1 - x ** 2, axis=1)) + np.sum([ np.prod(1 - np.maximum(x[k, :], x[j, :])) for k in range(n) for j in range(n) ]) / n ** 2 ) np.random.seed(0) sample = np.random.rand(30, 10) disc_curr = qmc.discrepancy(sample, method='CD') disc_alt = disc_c2(sample) assert_allclose(disc_curr, disc_alt) disc_curr = qmc.discrepancy(sample, method='WD') disc_alt = disc_wd(sample) assert_allclose(disc_curr, disc_alt) disc_curr = qmc.discrepancy(sample, method='MD') disc_alt = disc_md(sample) assert_allclose(disc_curr, disc_alt) disc_curr = qmc.discrepancy(sample, method='L2-star') disc_alt = disc_star_l2(sample) assert_allclose(disc_curr, disc_alt) def test_n_primes(self): primes = n_primes(10) assert primes[-1] == 29 primes = n_primes(168) assert primes[-1] == 997 primes = n_primes(350) assert primes[-1] == 2357 def test_primes(self): primes = primes_from_2_to(50) out = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47] assert_allclose(primes, out)