def sa(objective_function, Ts=10.0, Tf=0.1, n_T_adj=10, n_range_adj=10, bin_size=10, start_range=1.0, pop_size=15): """ Simulated Annealing (Corana’s version) Parameters - Ts (float) – starting temperature - Tf (float) – final temperature - n_T_adj (int) – number of temperature adjustments in the annealing schedule - n_range_adj (int) – number of adjustments of the search range performed at a constant temperature - bin_size (int) – number of mutations that are used to compute the acceptance rate - start_range (float) – starting range for mutating the decision vector - pop_size (int) – the number of individuals """ logs = [] problem = pg.problem(objective_function) algorithm = pg.algorithm( pg.simulated_annealing(Ts=Ts, Tf=Tf, n_T_adj=n_T_adj, n_range_adj=n_range_adj, bin_size=bin_size, start_range=start_range)) algorithm.set_verbosity(50) population = pg.population(prob=problem, size=pop_size) solution = algorithm.evolve(population) """ get_logs output is a list of tuples with the following structure: - Fevals (int), number of functions evaluation made - Best (float), the best fitness function found so far - Current (float), last fitness sampled - Mean range (float), the mean search range across the decision vector components (relative to the box bounds width) - Temperature (float), the current temperature """ logs = np.array(algorithm.extract(pg.simulated_annealing).get_log())[:, ( 0, 1)] # taking only function evaluations and best fitness algo_ = algorithm.get_name() function_ = objective_function.get_name() return { 'champion solution': solution.champion_f, 'champion coordinates': solution.champion_x, 'log': logs, 'algorithm': algo_, 'problem': function_ }
def benchmark_simulated_annealing(): island = pg_island(algo=simulated_annealing(Ts=1., Tf=.01), prob=problem(rosenbrock(5)), size=10) N = 10 print('Simulated Annealing (pop. size {})'.format( island.get_population().get_f().size)) for k in range(N): island.evolve() island.wait() d = sqrt( float(((island.get_population().champion_x - rosenbrock(5).best_known())**2).mean())) print('SA {:2}/{}: best fitness {:9.2f}, deviation {:9.2f}, fevals {}'. format(k, N, float(island.get_population().champion_f[0]), d, island.get_population().problem.get_fevals()))
def benchmark_simulated_annealing(): island = pg_island( algo=simulated_annealing(Ts=1.,Tf=.01), prob=problem(B2_UDP(getLowerBound(),getUpperBound(),'../../../../../sbml/b2.xml')), size=10) N = 10 import arrow time_start = arrow.utcnow() print('Simulated Annealing (pop. size {})'.format(island.get_population().get_f().size)) for k in range(N): island.evolve() island.wait() delta_t = arrow.utcnow() - time_start print('SA {:2}/{}: best fitness {:9.2f}, fevals {}, duration {}'.format( k,N,float(island.get_population().champion_f[0]), island.get_population().problem.get_fevals(), delta_t))
import pygmo as pg if __name__ == "__main__": #prob = pg.problem(pg.rosenbrock(dim = 30)) udp = pg.schwefel(dim=19) prob = pg.problem(udp) #pop1 = pg.population(prob, size=73) #algo = pg.algorithm(pg.sade(gen=500)) #algo = pg.algorithm(pg.scipy_optimize(method="Nelder-Mead")) algo = pg.algorithm(pg.simulated_annealing()) #algo.set_verbosity(10) for i in range(3): algo.set_verbosity(1) pop = pg.population(prob=prob, size=22) pop = algo.evolve(pop) print(pop.champion_f) #archi = pg.archipelago(n=4,algo=algo, pop=pop1) print(archi) archi.evolve() archi.wait() archi.wait_check() print(archi) import pygmo as pg # The user-defined problem udp = pg.schwefel(dim=20) # The pygmo problem prob = pg.problem(udp)
def _parameter_value_selection(self, trajectories, _maxiter=100): ''' given a set of trajectories (trajectories) it is assumed that the raw trajectories have been processed through _approximate_trajectory_partitioning() before calling this break them up into line segments, and wrap them in line_segment objects then use simulated annealing to find an optimal value for epsilon, and thus minlns note, dual annealing is used here as it should be superior to traditional simulated annealing for this task then the optimal epsilon and minlns are returned maxiter, the maximum number of iterations for the simulated annealing process ''' assert len(trajectories) > 0 lines = list() for t in trajectories: _lss = self._convert_trajectory_to_line_segments(t) lines.extend(_lss) #first, finding the optimal epsilon #with these functions and via simulated annealing def H(line_segments, epsilon): ''' line_segments, the set of line segments epsilon, the epsilon distance used to compute the epsilon neighborhood ''' def p(x, line_segments, epsilon): ''' x, a single line segment to find the epsilon neighborhood around line_segments, the set of line segments where neighbors are found epsilon, the epsilon distance used to compute the epsilon neighborhood ''' Ne_xi = len( x.get_epsilon_neighborhood(line_segments, epsilon) ) Ne_xj_sum = 0 for ls in line_segments: Ne_xj = len( ls.get_epsilon_neighborhood(line_segments, epsilon) ) Ne_xj_sum += Ne_xj return float(Ne_xi/Ne_xj_sum) ret = 0 for ls in line_segments: import numpy as np ret += -p(ls, line_segments, epsilon)*np.log2(p(ls, line_segments, epsilon)) return ret def func(x): ''' a wrapper function to use with scipy.optimize.dual_annealing ''' return H(lines, epsilon=x) #find the largest and smallest values for the distance between line segments, to use with simulated annealing ds = list() prev = None for l in lines: if prev is None: prev = l continue ds.append( self._distance( prev.get_line_segment(), l.get_line_segment() ) ) prev = l _max = max(ds) _min = min(ds) #then find the optimal epsilon with simulated annealing optimal_epsilon = None #approach = 'scipy' approach = 'pygmo' if approach == 'scipy': from scipy.optimize import dual_annealing result = dual_annealing(func, [[_min, _max]], maxiter=_maxiter) optimal_epsilon = result.x[0] if approach == 'pygmo': import pygmo as pg algo = pg.algorithm( pg.simulated_annealing() ) class epsilon_problem: def fitness(self, x): return [func(x)] def get_bounds(self): return ([_min], [_max]) prob = pg.problem( epsilon_problem() ) pop = pg.population(prob, 1300) optimal_epsilon = pop.champion_x[0] #then with the optimal epsilon in hand, we can calculate the average size of the epsilon neighborhoods Nes = list() for l in lines: Nes.append( len( l.get_epsilon_neighborhood(lines, optimal_epsilon) ) ) avg_Ne = float(sum(Nes) / len(Nes)) #given this average, a range for minlns can be formulated minlns_range = [avg_Ne+1, avg_Ne+3] return [optimal_epsilon, minlns_range] #end TRACLUS
def __call__(self, function): scanner_options = { 'sade': dict(gen=self.gen, variant=self.variant, variant_adptv=self.variant_adptv, ftol=self.ftol, xtol=self.xtol, memory=self.memory, seed=self.seed), 'gaco': dict(gen=self.gen, ker=self.ker, q=self.q, oracle=self.oracle, acc=self.acc, threshold=self.threshold, n_gen_mark=self.n_gen_mark, impstop=self.impstop, evalstop=self.evalstop, focus=self.focus, memory=self.memory, seed=self.seed), 'maco': dict(gen=self.gen, ker=self.ker, q=self.q, threshold=self.threshold, n_gen_mark=self.n_gen_mark, evalstop=self.evalstop, focus=self.focus, memory=self.memory, seed=self.seed), 'gwo': dict(gen=self.gen, seed=self.seed), 'bee_colony': dict(gen=self.gen, limit=self.limit, seed=self.seed), 'de': dict(gen=self.gen, F=self.F, CR=self.CR, variant=self.variant, ftol=self.ftol, xtol=self.xtol, seed=self.seed), 'sea': dict(gen=self.gen, seed=self.seed), 'sga': dict(gen=self.gen, cr=self.cr, eta_c=self.eta_c, m=self.m, param_m=self.param_m, param_s=self.param_s, crossover=self.crossover, mutation=self.mutation, selection=self.selection, seed=self.seed), 'de1220': dict(gen=self.gen, allowed_variants=self.allowed_variants, variant_adptv=self.variant_adptv, ftol=self.ftol, xtol=self.xtol, memory=self.memory, seed=self.seed), 'cmaes': dict(gen=self.gen, cc=self.cc, cs=self.cs, c1=self.c1, cmu=self.cmu, sigma0=self.sigma0, ftol=self.ftol, xtol=self.xtol, memory=self.memory, force_bounds=self.force_bounds, seed=self.seed), 'moead': dict(gen=self.gen, weight_generation=self.weight_generation, decomposition=self.decomposition, neighbours=self.neighbours, CR=self.CR, F=self.F, eta_m=self.eta_m, realb=self.realb, limit=self.limit, preserve_diversity=self.preserve_diversity, seed=self.seed), 'compass_search': dict(max_fevals=self.max_fevals, start_range=self.start_range, stop_range=self.stop_range, reduction_coeff=self.reduction_coeff), 'simulated_annealing': dict(Ts=self.Ts, Tf=self.Tf, n_T_adj=self.n_T_adj, n_range_adj=self.n_range_adj, bin_size=self.bin_size, start_range=self.start_range, seed=self.seed), 'pso': dict(gen=self.gen, omega=self.omega, eta1=self.eta1, eta2=self.eta2, max_vel=self.max_vel, variant=self.variant, neighb_type=self.neighb_type, neighb_param=self.neighb_param, memory=self.memory, seed=self.seed), 'pso_gen': dict(gen=self.gen, omega=self.omega, eta1=self.eta1, eta2=self.eta2, max_vel=self.max_vel, variant=self.variant, neighb_type=self.neighb_type, neighb_param=self.neighb_param, memory=self.memory, seed=self.seed), 'nsga2': dict(gen=self.gen, cr=self.cr, eta_c=self.eta_c, m=self.m, eta_m=self.eta_m, seed=self.seed), 'nspso': dict(gen=self.gen, omega=self.omega, c1=self.c1, c2=self.c2, chi=self.chi, v_coeff=self.v_coeff, leader_selection_range=self.leader_selection_range, diversity_mechanism=self.diversity_mechanism, memory=self.memory, seed=self.seed), 'mbh': dict(algo=self.algo, stop=self.stop, perturb=self.perturb, seed=self.seed), 'cstrs_self_adaptive': dict(iters=self.iters, algo=self.algo, seed=self.seed), 'ihs': dict(gen=self.gen, phmcr=self.phmcr, ppar_min=self.ppar_min, ppar_max=self.ppar_max, bw_min=self.bw_min, bw_max=self.bw_max, seed=self.seed), 'xnes': dict(gen=self.gen, eta_mu=self.eta_mu, eta_sigma=self.eta_sigma, eta_b=self.eta_b, sigma0=self.sigma0, ftol=self.ftol, xtol=self.xtol, memory=self.memory, force_bounds=self.force_bounds, seed=self.seed) } if self.log_data: xl = [] yl = [] log_data = self.log_data # class interf_function: def __init__(self, dim): self.dim = dim def fitness(self, x): x = np.expand_dims(x, axis=0) y = function(x) # x = x[0] y = y.tolist() if log_data: xl.append(x) yl.append(y) # print (x, y[0]) return y[0] if function.is_differentiable(): def gradient(self, x): x = np.expand_dims(x, axis=0) g = function(x) g = g.tolist() return g[0] def get_bounds(self): lb = [] ub = [] bounds = function.get_ranges() # warning # check for infinities for i in range(len(bounds)): lb.append(bounds[i, 0]) ub.append(bounds[i, 1]) r = (np.array(lb), np.array(ub)) return r # I need to call pygmo functions directly prob = pg.problem(interf_function(function)) # print (prob.get_thread_safety()) if self.scanner == "sade": # I need a dictionary with algorithms and options algo = pg.algorithm(pg.sade(**scanner_options[self.scanner])) elif self.scanner == "gaco": algo = pg.algorithm(pg.gaco(**scanner_options[self.scanner])) # elif self.scanner == "maco": # is not implemented though in webpage # looks it is # algo = pg.algorithm(pg.maco(**scanner_options[self.scanner])) elif self.scanner == "gwo": algo = pg.algorithm(pg.gwo(**scanner_options[self.scanner])) elif self.scanner == "bee_colony": algo = pg.algorithm(pg.bee_colony(**scanner_options[self.scanner])) elif self.scanner == "de": algo = pg.algorithm(pg.de(**scanner_options[self.scanner])) elif self.scanner == "sea": algo = pg.algorithm(pg.sea(**scanner_options[self.scanner])) elif self.scanner == "sga": algo = pg.algorithm(pg.sga(**scanner_options[self.scanner])) elif self.scanner == "de1220": algo = pg.algorithm(pg.de1220(**scanner_options[self.scanner])) elif self.scanner == "cmaes": algo = pg.algorithm(pg.cmaes(**scanner_options[self.scanner])) # elif self.scanner == "moead": #multiobjective algorithm # algo = pg.algorithm(pg.moead(**scanner_options[self.scanner])) elif self.scanner == "compass_search": algo = pg.algorithm( pg.compass_search(**scanner_options[self.scanner])) elif self.scanner == 'simulated_annealing': algo = pg.algorithm( pg.simulated_annealing(**scanner_options[self.scanner])) elif self.scanner == 'pso': algo = pg.algorithm(pg.pso(**scanner_options[self.scanner])) elif self.scanner == 'pso_gen': algo = pg.algorithm(pg.pso_gen(**scanner_options[self.scanner])) # elif self.scanner == 'nsga2': #multiobjective algorithm # algo = pg.algorithm(pg.nsga2(**scanner_options[self.scanner])) # elif self.scanner == 'nspso': is not implemented though in webpage # looks it is # algo = pg.algorithm(pg.nspso(**scanner_options[self.scanner])) elif self.scanner == 'mbh': if scanner_options[self.scanner]['algo'] == 'de': algo = pg.algorithm( pg.mbh(pg.algorithm(pg.de(**scanner_options['de'])))) # elif self.scanner == 'ihs': #does not work # algo = pg.algorithm(ihs(**scanner_options[self.scanner])) # elif self.scanner == 'xnes': #does not work # algo = pg.algorithm(xnes(**scanner_options[self.scanner])) # uda = algo.extract(xnes) else: print( 'The ' + self.scanner + ' algorithm is not implemented. The ' 'list of algorithms available is', algorithms) sys.exit() # add verbosing flag if self.verbose > 1: algo.set_verbosity(self.verbose) pop = pg.population(prob, self.size) if self.verbose > 9: print('prob', prob) opt = algo.evolve(pop) if self.verbose > 9: print('algo', algo) # best_x = np.expand_dims(opt.champion_x, axis=0) # best_fitness = np.expand_dims(opt.get_f()[opt.best_idx()], axis=0) best_x = np.expand_dims(opt.champion_x, axis=0) best_fitness = np.expand_dims(opt.champion_f, axis=0) if self.verbose > 0: print('best fit:', best_x, best_fitness) if self.log_data: x = np.squeeze(xl, axis=(1, )) y = np.squeeze(yl, axis=(2, )) if self.log_data: return (x, y) else: return (best_x, best_fitness)
#b=pg.mp_bfe(), b=pg.bfe(udbfe=multi_bfre2()), size=16) arc.push_back(isl) for i in range(2): addIsland(arc=archi, algo=pg.algorithm( pg.cstrs_self_adaptive(iters=1000, algo=pg.de(gen=50)))) addIsland(arc=archi, algo=pg.algorithm( pg.cstrs_self_adaptive(iters=1000, algo=pg.pso(gen=50)))) addIsland(arc=archi, algo=pg.algorithm( pg.cstrs_self_adaptive(iters=1000, algo=pg.simulated_annealing()))) addIsland(arc=archi, algo=pg.algorithm( pg.cstrs_self_adaptive(iters=1000, algo=pg.sea(gen=50)))) bpool = ThreadPool(processes=3) def logger(): #while True: print(archi.get_champions_x()) print(sorted(list(map(lambda a: a[0], archi.get_champions_f())))) #print(archi.get_champions_f()) jx = pd.DataFrame(archi.get_champions_x()).to_json(orient='values') jf = pd.DataFrame(archi.get_champions_f()).to_json(orient='values')