def design_model(x, rocket, stage): DV_sum = 0 for i in range(0, len(stage)): #stage[i].TWR=x[i] stage[i].DV_Ratio = x[i] DV_sum = DV_sum + stage[i].DV_Ratio rocket, stage = Rocket_build(mission, rocket, stage, general) #Information(mission,rocket,stage) #print(rocket.mass) rocket.count = rocket.count + 1 prob = pg.problem(Traj_Optimization2()) algo = pg.algorithm(pg.pso_gen( gen=50)) #Choose of heuristic algorithm and number of generations pop = pg.population(prob, 100) #Choose number of individuals pop = algo.evolve(pop) #Evolve the population y = pop.champion_x #Extract the best DV division solution to minimize mass trajectory.alpha[0] = y[0] trajectory.alpha[1] = y[1] trajectory.coast_time = y[2] trajectory.pitch = y[3] trajectory.l[0] = y[4] trajectory.l[1] = y[5] trajectory.l[2] = y[6] trajectory.aux = y[7] Data, new, Obj = init_trajectory(mission, rocket, stage, trajectory, general) if general.constraints: for i in range(0, len(Data[0])): if Data[12][i] / Data[5][i] / g0 > 5 or Dynamic_pressure( 1.225 * exp(-Data[2][i] / 8440), Data[3][i]) > 50000: return 1e20 rocket.optim2 = True Obj = pop.champion_f #print(y, Obj) if Obj == 0: print(rocket.count, rocket.mass, "Worked") print(stage[0].TWR, stage[1].TWR, stage[2].TWR, stage[0].DV_Ratio, stage[1].DV_Ratio, stage[2].DV_Ratio) return rocket.mass else: print(rocket.count, rocket.mass) return rocket.mass * 10
def pso(objective_function, gen=2000, omega=.7, eta1=0.5, eta2=4, max_vel=.05, variant=6, neighb_type=2, neighb_param=4, memory=False, pop_size=15): """ Particle Swarm Optimization (generational) is identical to pso, but does update the velocities of each particle before new particle positions are computed (taking into consideration all updated particle velocities). Each particle is thus evaluated on the same seed within a generation as opposed to the standard PSO which evaluates single particle at a time. Consequently, the generational PSO algorithm is suited for stochastic optimization problems. Parameters: - objective_function - instance of the class of the objective function - gen (int) – number of generations - omega (float) – inertia weight (or constriction factor) - eta1 (float) – social component - eta2 (float) – cognitive component - max_vel (float) – maximum allowed particle velocities (normalized with respect to the bounds width) - variant (int) – algorithmic variant - neighb_type (int) – swarm topology (defining each particle’s neighbours) - neighb_param (int) – topology parameter (defines how many neighbours to consider) - memory (bool) – when true the velocities are not reset between successive calls to the evolve method - pop_size (int) – the number of individuals """ logs = [] problem = pg.problem(objective_function) algorithm = pg.algorithm( pg.pso_gen(gen=gen, omega=omega, eta1=eta1, eta2=eta2, max_vel=max_vel, variant=variant, neighb_type=neighb_type, neighb_param=neighb_param, memory=memory)) algorithm.set_verbosity(50) solution = pg.population(prob=problem, size=pop_size, b=None, seed=None) solution = algorithm.evolve(solution) """ get_logs output is a list of tuples with the following structure: - Gen (int), generation number - Fevals (int), number of functions evaluation made - gbest (float), the best fitness function found so far by the the swarm - Mean Vel. (float), the average particle velocity (normalized) - Mean lbest (float), the average fitness of the current particle locations - Avg. Dist. (float), the average distance between particles (normalized) """ logs = np.array(algorithm.extract(pg.pso_gen).get_log())[:, ( 1, 2)] # taking only function evaluations and best fitness algo_ = algorithm.get_name() function_ = objective_function.get_name() return { 'champion solution': solution.champion_f, 'champion coordinates': solution.champion_x, 'log': logs, 'algorithm': algo_, 'problem': function_ }
print(Obj, gf) return abs(Obj) class Optimization: def fitness(self, x): fit = model(x) return [fit] def get_bounds(self): return ([-1, -1, -1, 250], [1, 1, 1, 400]) prob = pg.problem(Optimization()) algo = pg.algorithm(pg.pso_gen( gen=300)) #Choose of heuristic algorithm and number of generations pop = pg.population(prob, 100) #Choose number of individuals pop = algo.evolve(pop) #Evolve the population y = pop.champion_x #Extract the best DV division solution to minimize mass P_lx = 0 P_ly = y[0] P_lv = y[1] P_lg = y[2] tf = y[3] sol = solve_ivp(traj, [0, tf], [x0, y0, V0, ga0, P_lx, P_ly, P_lv, P_lg], events=[], t_eval=np.linspace(0, tf, 1000)) xf = sol.y[0][-1]
def __call__(self, function): scanner_options = { 'sade': dict(gen=self.gen, variant=self.variant, variant_adptv=self.variant_adptv, ftol=self.ftol, xtol=self.xtol, memory=self.memory, seed=self.seed), 'gaco': dict(gen=self.gen, ker=self.ker, q=self.q, oracle=self.oracle, acc=self.acc, threshold=self.threshold, n_gen_mark=self.n_gen_mark, impstop=self.impstop, evalstop=self.evalstop, focus=self.focus, memory=self.memory, seed=self.seed), 'maco': dict(gen=self.gen, ker=self.ker, q=self.q, threshold=self.threshold, n_gen_mark=self.n_gen_mark, evalstop=self.evalstop, focus=self.focus, memory=self.memory, seed=self.seed), 'gwo': dict(gen=self.gen, seed=self.seed), 'bee_colony': dict(gen=self.gen, limit=self.limit, seed=self.seed), 'de': dict(gen=self.gen, F=self.F, CR=self.CR, variant=self.variant, ftol=self.ftol, xtol=self.xtol, seed=self.seed), 'sea': dict(gen=self.gen, seed=self.seed), 'sga': dict(gen=self.gen, cr=self.cr, eta_c=self.eta_c, m=self.m, param_m=self.param_m, param_s=self.param_s, crossover=self.crossover, mutation=self.mutation, selection=self.selection, seed=self.seed), 'de1220': dict(gen=self.gen, allowed_variants=self.allowed_variants, variant_adptv=self.variant_adptv, ftol=self.ftol, xtol=self.xtol, memory=self.memory, seed=self.seed), 'cmaes': dict(gen=self.gen, cc=self.cc, cs=self.cs, c1=self.c1, cmu=self.cmu, sigma0=self.sigma0, ftol=self.ftol, xtol=self.xtol, memory=self.memory, force_bounds=self.force_bounds, seed=self.seed), 'moead': dict(gen=self.gen, weight_generation=self.weight_generation, decomposition=self.decomposition, neighbours=self.neighbours, CR=self.CR, F=self.F, eta_m=self.eta_m, realb=self.realb, limit=self.limit, preserve_diversity=self.preserve_diversity, seed=self.seed), 'compass_search': dict(max_fevals=self.max_fevals, start_range=self.start_range, stop_range=self.stop_range, reduction_coeff=self.reduction_coeff), 'simulated_annealing': dict(Ts=self.Ts, Tf=self.Tf, n_T_adj=self.n_T_adj, n_range_adj=self.n_range_adj, bin_size=self.bin_size, start_range=self.start_range, seed=self.seed), 'pso': dict(gen=self.gen, omega=self.omega, eta1=self.eta1, eta2=self.eta2, max_vel=self.max_vel, variant=self.variant, neighb_type=self.neighb_type, neighb_param=self.neighb_param, memory=self.memory, seed=self.seed), 'pso_gen': dict(gen=self.gen, omega=self.omega, eta1=self.eta1, eta2=self.eta2, max_vel=self.max_vel, variant=self.variant, neighb_type=self.neighb_type, neighb_param=self.neighb_param, memory=self.memory, seed=self.seed), 'nsga2': dict(gen=self.gen, cr=self.cr, eta_c=self.eta_c, m=self.m, eta_m=self.eta_m, seed=self.seed), 'nspso': dict(gen=self.gen, omega=self.omega, c1=self.c1, c2=self.c2, chi=self.chi, v_coeff=self.v_coeff, leader_selection_range=self.leader_selection_range, diversity_mechanism=self.diversity_mechanism, memory=self.memory, seed=self.seed), 'mbh': dict(algo=self.algo, stop=self.stop, perturb=self.perturb, seed=self.seed), 'cstrs_self_adaptive': dict(iters=self.iters, algo=self.algo, seed=self.seed), 'ihs': dict(gen=self.gen, phmcr=self.phmcr, ppar_min=self.ppar_min, ppar_max=self.ppar_max, bw_min=self.bw_min, bw_max=self.bw_max, seed=self.seed), 'xnes': dict(gen=self.gen, eta_mu=self.eta_mu, eta_sigma=self.eta_sigma, eta_b=self.eta_b, sigma0=self.sigma0, ftol=self.ftol, xtol=self.xtol, memory=self.memory, force_bounds=self.force_bounds, seed=self.seed) } if self.log_data: xl = [] yl = [] log_data = self.log_data # class interf_function: def __init__(self, dim): self.dim = dim def fitness(self, x): x = np.expand_dims(x, axis=0) y = function(x) # x = x[0] y = y.tolist() if log_data: xl.append(x) yl.append(y) # print (x, y[0]) return y[0] if function.is_differentiable(): def gradient(self, x): x = np.expand_dims(x, axis=0) g = function(x) g = g.tolist() return g[0] def get_bounds(self): lb = [] ub = [] bounds = function.get_ranges() # warning # check for infinities for i in range(len(bounds)): lb.append(bounds[i, 0]) ub.append(bounds[i, 1]) r = (np.array(lb), np.array(ub)) return r # I need to call pygmo functions directly prob = pg.problem(interf_function(function)) # print (prob.get_thread_safety()) if self.scanner == "sade": # I need a dictionary with algorithms and options algo = pg.algorithm(pg.sade(**scanner_options[self.scanner])) elif self.scanner == "gaco": algo = pg.algorithm(pg.gaco(**scanner_options[self.scanner])) # elif self.scanner == "maco": # is not implemented though in webpage # looks it is # algo = pg.algorithm(pg.maco(**scanner_options[self.scanner])) elif self.scanner == "gwo": algo = pg.algorithm(pg.gwo(**scanner_options[self.scanner])) elif self.scanner == "bee_colony": algo = pg.algorithm(pg.bee_colony(**scanner_options[self.scanner])) elif self.scanner == "de": algo = pg.algorithm(pg.de(**scanner_options[self.scanner])) elif self.scanner == "sea": algo = pg.algorithm(pg.sea(**scanner_options[self.scanner])) elif self.scanner == "sga": algo = pg.algorithm(pg.sga(**scanner_options[self.scanner])) elif self.scanner == "de1220": algo = pg.algorithm(pg.de1220(**scanner_options[self.scanner])) elif self.scanner == "cmaes": algo = pg.algorithm(pg.cmaes(**scanner_options[self.scanner])) # elif self.scanner == "moead": #multiobjective algorithm # algo = pg.algorithm(pg.moead(**scanner_options[self.scanner])) elif self.scanner == "compass_search": algo = pg.algorithm( pg.compass_search(**scanner_options[self.scanner])) elif self.scanner == 'simulated_annealing': algo = pg.algorithm( pg.simulated_annealing(**scanner_options[self.scanner])) elif self.scanner == 'pso': algo = pg.algorithm(pg.pso(**scanner_options[self.scanner])) elif self.scanner == 'pso_gen': algo = pg.algorithm(pg.pso_gen(**scanner_options[self.scanner])) # elif self.scanner == 'nsga2': #multiobjective algorithm # algo = pg.algorithm(pg.nsga2(**scanner_options[self.scanner])) # elif self.scanner == 'nspso': is not implemented though in webpage # looks it is # algo = pg.algorithm(pg.nspso(**scanner_options[self.scanner])) elif self.scanner == 'mbh': if scanner_options[self.scanner]['algo'] == 'de': algo = pg.algorithm( pg.mbh(pg.algorithm(pg.de(**scanner_options['de'])))) # elif self.scanner == 'ihs': #does not work # algo = pg.algorithm(ihs(**scanner_options[self.scanner])) # elif self.scanner == 'xnes': #does not work # algo = pg.algorithm(xnes(**scanner_options[self.scanner])) # uda = algo.extract(xnes) else: print( 'The ' + self.scanner + ' algorithm is not implemented. The ' 'list of algorithms available is', algorithms) sys.exit() # add verbosing flag if self.verbose > 1: algo.set_verbosity(self.verbose) pop = pg.population(prob, self.size) if self.verbose > 9: print('prob', prob) opt = algo.evolve(pop) if self.verbose > 9: print('algo', algo) # best_x = np.expand_dims(opt.champion_x, axis=0) # best_fitness = np.expand_dims(opt.get_f()[opt.best_idx()], axis=0) best_x = np.expand_dims(opt.champion_x, axis=0) best_fitness = np.expand_dims(opt.champion_f, axis=0) if self.verbose > 0: print('best fit:', best_x, best_fitness) if self.log_data: x = np.squeeze(xl, axis=(1, )) y = np.squeeze(yl, axis=(2, )) if self.log_data: return (x, y) else: return (best_x, best_fitness)
def get_name(self): return "SEIR Unsectorial" # PSO Normal algo = pg.algorithm(pg.pso(gen=20)) pop = pg.population(prob, 50) t0 = time() pop = algo.evolve(pop) t1 = time() print('Optimization takes %f seconds' % (t1 - t0)) print(pop.champion_f) print(pop.champion_x) # PSO Mejorada algo = pg.algorithm(pg.pso_gen(gen=50, memory=True, variant=6)) pop = pg.population(prob, 50) t0 = time() pop = algo.evolve(pop) t1 = time() print('Optimization takes %f seconds' % (t1 - t0)) print(pop.champion_f) print(pop.champion_x) # Estudio algo = pg.algorithm(pg.de(gen=50)) pop = pg.population(prob, 50) t0 = time() pop = algo.evolve(pop) t1 = time() print('Optimization takes %f seconds' % (t1 - t0))
def pso_gen(problem, population_size, params): ''' Execute the Pygmo PSO_GEN algorithm on an optimisation problem with the population size and parameters specified. The PSO_GEN possible set of parameters are: * omega: The inertia weight (or constriction factor, depending on the algorithmic variant) * eta1: the social component * eta2: the cognitive component * max_vel: maximum allowed particle velocities * variant: algorithmic variant: 1 -> canonical (with inertia weight) 2 -> same social and cognitive random 3 -> same random for all components 4 -> only one random 5 -> canonical (with constriction factor) 6 -> fully informed (FIPS) * neighb_type: the swarm topology: 1 -> gbest (global best) 2 -> lbest (local best) 3 -> Von Neumann 4 -> Adaptative random * neighb_param: topology parameter (number of neighbours to consider) Parameters ---------- - problem: the problem to optimise. It must comply to the Pygmo requirements, i.e. being an instance of an UDP class - population_size: The size of the swarm - params: dictionnary of parameters for the PSO_GEN algorithm Return ------ - log: the logs of the execution of the optimisation problem with the population size - duration: the total duration of the resolution of the problem ''' # Extract algorithm parameters nb_generation = params["nb_generation"] omega = params["omega"] eta1 = params["eta1"] eta2 = params["eta2"] max_vel = params["max_vel"] variant = params["variant"] neighb_type = params["neighb_type"] neighb_param = params["neighb_param"] algo = pg.algorithm( pg.pso_gen(gen=nb_generation, omega=omega, eta1=eta1, eta2=eta2, max_vel=max_vel, variant=variant, neighb_type=neighb_type, neighb_param=neighb_param, memory=False)) algo.set_verbosity(1) solution = pg.population(problem, size=population_size, b=None) startt = datetime.now() solution = algo.evolve(solution) duration = (datetime.now() - startt) uda = algo.extract(pg.pso_gen) log = uda.get_log() return (log, duration, solution.champion_f, solution.champion_x)
def create_variants(self, n, desc, category, constructor): def assign_2nd_alg(archipelago, algo): if category == 'rings': for island in archipelago.topology.every_other_island(): island.algorithm = algo elif hasattr(archipelago.topology, 'endpoints'): for island in archipelago.topology.endpoints: island.algorithm = algo elif isinstance(archipelago.topology, FullyConnectedTopology): for island in islice(archipelago.topology.islands, None, None, 2): island.algorithm = algo return archipelago def assign_algs(archipelago, algos): ''' Evenly partitions and assigns algorithms to islands. ''' for island,algo in zip(archipelago.topology.islands, cycle(algos)): island.algorithm = algo g = self.generations self.new_topology( desc='{}, de'.format(desc), category=category, algorithms=['de'], archipelago=Archipelago(constructor(de(gen=g),n))) self.new_topology( desc='{}, de1220'.format(desc), category=category, algorithms=['de1220'], archipelago=Archipelago(constructor(de1220(gen=g),n))) self.new_topology( desc='{}, sade'.format(desc), category=category, algorithms=['sade'], archipelago=Archipelago(constructor(sade(gen=g),n))) self.new_topology( desc='{}, ihs'.format(desc), category=category, algorithms=['ihs'], archipelago=Archipelago(constructor(ihs(gen=g),n))) self.new_topology( desc='{}, pso'.format(desc), category=category, algorithms=['pso'], archipelago=Archipelago(constructor(pso(gen=g),n))) self.new_topology( desc='{}, pso_gen'.format(desc), category=category, algorithms=['pso_gen'], archipelago=Archipelago(constructor(pso_gen(gen=g),n))) # self.new_topology( # desc='{}, simulated_annealing'.format(desc), # category=category, # algorithms=['simulated_annealing'], # archipelago=Archipelago(constructor(simulated_annealing(),n))) self.new_topology( desc='{}, bee_colony'.format(desc), category=category, algorithms=['bee_colony'], archipelago=Archipelago(constructor(bee_colony(gen=g),n))) self.new_topology( desc='{}, cmaes'.format(desc), category=category, algorithms=['cmaes'], archipelago=Archipelago(constructor(cmaes(gen=g),n))) self.new_topology( desc='{}, nsga2'.format(desc), category=category, algorithms=['nsga2'], archipelago=Archipelago(constructor(nsga2(gen=g),n))) self.new_topology( desc='{}, xnes'.format(desc), category=category, algorithms=['xnes'], archipelago=Archipelago(constructor(xnes(gen=g),n))) # de + nelder mead combo self.new_topology( desc='{}, de+nelder mead'.format(desc), category=category, algorithms=['de','neldermead'], archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), self.make_nelder_mead())) # de + praxis combo self.new_topology( desc='{}, de+praxis'.format(desc), category=category, algorithms=['de','praxis'], archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), self.make_praxis())) # de + nsga2 combo self.new_topology( desc='{}, de+nsga2'.format(desc), category=category, algorithms=['de','nsga2'], archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), nsga2(gen=g))) # de + de1220 combo self.new_topology( desc='{}, de+de1220'.format(desc), category=category, algorithms=['de','de1220'], archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), de1220(gen=g))) # de + sade combo self.new_topology( desc='{}, de+sade'.format(desc), category=category, algorithms=['de','sade'], archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), sade(gen=g))) # de + pso combo self.new_topology( desc='{}, de+pso'.format(desc), category=category, algorithms=['de','pso'], archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), pso(gen=g))) # extra configurations for fully connected topology if constructor is self.factory.createFullyConnected: self.new_topology( desc='{}, de+pso+praxis'.format(desc), category=category, algorithms=['de','pso','praxis'], archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis()))) self.new_topology( desc='{}, de+pso+praxis+nsga2'.format(desc), category=category, algorithms=['de','pso','praxis','nsga2'], archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), nsga2(gen=g)))) self.new_topology( desc='{}, de+pso+praxis+cmaes'.format(desc), category=category, algorithms=['de','pso','praxis','cmaes'], archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), cmaes(gen=g)))) self.new_topology( desc='{}, de+pso+praxis+xnes'.format(desc), category=category, algorithms=['de','pso','praxis','xnes'], archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), xnes(gen=g))))
def pygmo(self, x0, bnds, options): class pygmo_objective_fcn: def __init__(self, obj_fcn, bnds): self.obj_fcn = obj_fcn self.bnds = bnds def fitness(self, x): return [self.obj_fcn(x)] def get_bounds(self): return self.bnds def gradient(self, x): return pygmo.estimate_gradient_h(lambda x: self.fitness(x), x) timer_start = timer() pop_size = int(np.max([35, 5 * (len(x0) + 1)])) if options['stop_criteria_type'] == 'Iteration Maximum': num_gen = int(np.ceil(options['stop_criteria_val'] / pop_size)) elif options['stop_criteria_type'] == 'Maximum Time [min]': num_gen = int(np.ceil(1E20 / pop_size)) prob = pygmo.problem(pygmo_objective_fcn(self.obj_fcn, tuple(bnds))) pop = pygmo.population(prob, pop_size) pop.push_back(x=x0) # puts initial guess into the initial population # all coefficients/rules should be optimized if they're to be used if options['algorithm'] == 'pygmo_DE': #F = (0.107 - 0.141)/(1 + (num_gen/225)**7.75) F = 0.2 CR = 0.8032 * np.exp(-1.165E-3 * num_gen) algo = pygmo.algorithm(pygmo.de(gen=num_gen, F=F, CR=CR, variant=6)) elif options['algorithm'] == 'pygmo_SaDE': algo = pygmo.algorithm(pygmo.sade(gen=num_gen, variant=6)) elif options['algorithm'] == 'pygmo_PSO': # using generational version algo = pygmo.algorithm(pygmo.pso_gen(gen=num_gen)) elif options['algorithm'] == 'pygmo_GWO': algo = pygmo.algorithm(pygmo.gwo(gen=num_gen)) elif options['algorithm'] == 'pygmo_IPOPT': algo = pygmo.algorithm(pygmo.ipopt()) pop = algo.evolve(pop) x = pop.champion_x obj_fcn, x, shock_output = self.Scaled_Fit_Fun(x, optimizing=False) msg = 'Optimization terminated successfully.' success = True res = { 'x': x, 'shock': shock_output, 'fval': obj_fcn, 'nfev': pop.problem.get_fevals(), 'success': success, 'message': msg, 'time': timer() - timer_start } return res