def test_eggholder_python(): popsize = 1000 dim = 2 testfun = Eggholder() # use a wrapper to monitor function evaluations sdevs = [1.0] * dim max_eval = 100000 limit = -800 for _ in range(5): wrapper = Wrapper(testfun.fun, dim) ret = cmaes.minimize(wrapper.eval, testfun.bounds, input_sigma=sdevs, max_evaluations=max_eval, popsize=popsize) if limit > ret.fun: break assert (limit > ret.fun) # optimization target not reached assert (max_eval + popsize >= ret.nfev) # too much function calls assert (ret.nfev == wrapper.get_count() ) # wrong number of function calls returned assert (almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned assert (ret.fun == wrapper.get_best_y()) # wrong best y returned
def test_rosen_python(): popsize = 31 dim = 5 testfun = Rosen(dim) sdevs = [1.0] * dim max_eval = 100000 limit = 0.00001 for _ in range(5): wrapper = Wrapper(testfun.fun, dim) ret = cmaes.minimize(wrapper.eval, testfun.bounds, input_sigma=sdevs, max_evaluations=max_eval, popsize=popsize) if limit > ret.fun: break assert (limit > ret.fun) # optimization target not reached assert (max_eval + popsize >= ret.nfev) # too much function calls assert (max_eval / popsize + 2 > ret.nit) # too much iterations assert (ret.nfev == wrapper.get_count() ) # wrong number of function calls returned assert (almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned assert (ret.fun == wrapper.get_best_y()) # wrong best y returned
def test_rosen_parallel(): # parallel execution slows down the test since we are using a test function # which is very fast to evaluate # popsize defines the maximal number of used threads #windows cannot pickle function objects if sys.platform.startswith('windows'): return popsize = 8 dim = 2 testfun = Rosen(dim) sdevs = [1.0]*dim max_eval = 10000 limit = 0.00001 for _ in range(5): wrapper = Wrapper(testfun.fun, dim) ret = cmaes.minimize(wrapper.eval, testfun.bounds, input_sigma = sdevs, max_evaluations = max_eval, popsize=popsize, is_parallel=True) if limit > ret.fun: break assert(limit > ret.fun) # optimization target not reached assert(max_eval + popsize > ret.nfev) # too much function calls assert(max_eval // popsize + 2 > ret.nit) # too much iterations assert(ret.nfev == wrapper.get_count()) # wrong number of function calls returned assert(almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned assert(ret.fun == wrapper.get_best_y()) # wrong best y returned
def test_rastrigin_python(): popsize = 100 dim = 3 testfun = Rastrigin(dim) sdevs = [1.0] * dim max_eval = 100000 limit = 0.0001 # stochastic optimization may fail the first time for _ in range(5): # use a wrapper to monitor function evaluations wrapper = Wrapper(testfun.fun, dim) ret = cmaes.minimize(wrapper.eval, testfun.bounds, input_sigma=sdevs, max_evaluations=max_eval, popsize=popsize) if limit > ret.fun: break assert (limit > ret.fun) # optimization target not reached assert (max_eval + popsize >= ret.nfev) # too much function calls assert (max_eval / popsize + 2 > ret.nit) # too much iterations assert (ret.status == 4) # wrong cma termination code assert (ret.nfev == wrapper.get_count() ) # wrong number of function calls returned assert (almost_equal(ret.x, wrapper.get_best_x())) # wrong best X returned assert (ret.fun == wrapper.get_best_y()) # wrong best y returned
def cma_python(self, fun, guess, bounds, sdevs, rg): """CMA_ES Python implementation.""" ret = cmaes.minimize(fun, bounds, guess, input_sigma=sdevs, max_evaluations=self.store.eval_num(), popsize=self.popsize, stop_fittness = self.stop_fittness, rg=rg, runid=self.store.get_count_runs()) return ret.x, ret.fun, ret.nfev
def test_cma_parallel(problem, num): best = math.inf t0 = time.perf_counter(); for i in range(num): ret = cmaes.minimize(problem.fun, bounds = problem.bounds, workers = mp.cpu_count()) if best > ret.fun or i % 100 == 99: print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}" .format(i+1, dtime(t0), best, ret.fun)) best = min(ret.fun, best)
def test_cma_python(problem, num): best = math.inf t0 = time.perf_counter(); for i in range(num): ret = cmaes.minimize(problem.fun, max_evaluations = 100000, bounds = problem.bounds) if best > ret.fun or i % 100 == 99: print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}" .format(i+1, dtime(t0), best, ret.fun)) best = min(ret.fun, best)
def test_cma_python(problem, num): best = math.inf t0 = time.perf_counter() for i in range(num): ret = cmaes.minimize(problem.fun, bounds=problem.bounds) print(ret.nfev, ret.status) best = min(ret.fun, best) print("{0}: time = {1:.1f} best = {2:.1f} f(xmin) = {3:.1f}".format( i + 1, dtime(t0), best, ret.fun))
def parallel_execution_example(dim, n): maxEval = 10000 popsize = 32 testfun = RastriginMean(dim, n) sdevs = [1] * dim t0 = time.perf_counter() ret = cmaes.minimize(testfun.fun, testfun.bounds, max_evaluations=maxEval, popsize=popsize, input_sigma=sdevs, is_parallel=True) print(ret.fun, dtime(t0)) t0 = time.perf_counter() ret = cmaes.minimize(testfun.fun, testfun.bounds, max_evaluations=maxEval, popsize=popsize, input_sigma=sdevs, is_parallel=False) print(ret.fun, dtime(t0))
def minimize(self, fun, bounds, guess=None, sdevs=0.3, rg=Generator(MT19937()), store=None): ret = cmaes.minimize(fun, bounds, self.guess if guess is None else guess, input_sigma=sdevs, max_evaluations=self.max_eval_num(store), popsize=self.popsize, stop_fitness=self.stop_fitness, rg=rg, runid=self.get_count_runs(store), update_gap=self.update_gap) return ret.x, ret.fun, ret.nfev
def test_rosen_delayed(): popsize = 8 dim = 2 testfun = Rosen(dim) sdevs = [1.0] * dim max_eval = 10000 limit = 0.00001 for _ in range(5): wrapper = Wrapper(testfun.fun, dim) ret = cmaes.minimize(wrapper.eval, testfun.bounds, input_sigma=sdevs, max_evaluations=max_eval, popsize=popsize, workers=popsize, delayed_update=True) if limit > ret.fun: break assert (limit > ret.fun) # optimization target not reached assert (max_eval + popsize >= ret.nfev) # too much function calls assert (max_eval // popsize + 2 > ret.nit) # too much iterations
# standard evolutionary algorithms if evolutionary and __name__ == '__main__': mp.freeze_support() from scipy.optimize import Bounds from fcmaes import decpp, cmaescpp, bitecpp, de, cmaes from fcmaes import cmaes from fcmaes import de bounds = Bounds([0.4, 0, 1.5, 0.07, 3, 1e-5, 1e-5, 0.6], [0.8, 0.3, 10, 0.1, 5.99, 0.75, 0.45, 0.95]) #ret = bitecpp.minimize(obj_f, bounds, max_evaluations = 20000) # for cmaescpp, cmaes and de with multiple workers set n_jobs=1 in XGBRegressor #ret = cmaescpp.minimize(obj_f, bounds, popsize=32, max_evaluations = 20000, workers=mp.cpu_count()) #ret = decpp.minimize(obj_f, 8, bounds, popsize=16, max_evaluations = 20000) # delayed state update ret = cmaes.minimize(obj_f, bounds, popsize=16, max_evaluations=20000, workers=mp.cpu_count(), delayed_update=True) #ret = de.minimize(obj_f, 8, bounds, popsize = 16, max_evaluations = 20000, workers=mp.cpu_count())
# standard evolutionary algorithms if evolutionary and __name__ == '__main__': mp.freeze_support() from scipy.optimize import Bounds from fcmaes import decpp, cmaescpp, bitecpp, de, cmaes from fcmaes import cmaes from fcmaes import de bounds = Bounds([0.4, 0, 1.5, 0.07, 3, 1e-5, 1e-5, 0.6], [0.8, 0.3, 10, 0.1, 5.99, 0.75, 0.45, 0.95]) problem = cv_problem(obj_f, bounds) #ret = bitecpp.minimize(problem.fun, problem.bounds, max_evaluations = 20000) # for cmaescpp, cmaes and de with multiple workers set n_jobs=1 in XGBRegressor #ret = cmaescpp.minimize(problem.fun, problem.bounds, popsize=32, max_evaluations = 20000, workers=mp.cpu_count()) #ret = decpp.minimize(problem.fun, problem.dim, problem.bounds, popsize=16, max_evaluations = 20000) # delayed state update ret = cmaes.minimize(problem.fun, problem.bounds, popsize=16, max_evaluations = 20000, workers=mp.cpu_count(), delayed_update=True) #ret = de.minimize(problem.fun, problem.dim, problem.bounds, popsize = 16, max_evaluations = 20000, workers=mp.cpu_count())