def cmaES(funcs_l, weights, lambd, mu, var, sigma, ngen): creator.create("MaFitness", base.Fitness, weights=weights) creator.create("Individual", list, fitness=creator.MaFitness) toolbox = base.Toolbox() eval_funcs = lambda x: tuple([f(x) for f in funcs_l]) toolbox.register("evaluate", eval_funcs) S.Swarm.controller.rez_params() S.model = var c = S.extract_genotype() logbook = tools.Logbook() init_func = lambda c, sigma, size: np.random.normal(c, sigma, size) toolbox.register("attr_float", init_func, c, sigma, len(var)) toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float) toolbox.register("population", tools.initRepeat, list, toolbox.individual) strategy = cma.Strategy(centroid=c * len(var), sigma=sigma, lambda_=lambd * len(var)) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=ngen, stats=stats, halloffame=hof) return stats, hof
def main(): np.random.seed(64) # The CMA-ES algorithm strategy = cma.Strategy(centroid=[5.0] * N, sigma=3.0, lambda_=150) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) halloffame = tools.HallOfFame(1) halloffame_array = [] C_array = [] centroid_array = [] ok_count = 0 stop_gen = 200 #print("gen ","min ","max ","mean ","std") for gen in range(NGEN): # 新たな世代の個体群を生成 population = toolbox.generate() # 個体群の評価 fitnesses = toolbox.map( func, population, ) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit fits = [ind.fitness.values[0] for ind in population] length = len(population) mean = sum(fits) / length sum2 = sum(x * x for x in fits) std = abs(sum2 / length - mean**2)**0.5 #print(gen ,min(fits) ,max(fits) ,mean,std) with open('dc.txt', mode='a') as f: f.write(str(gen)) f.write(" ") f.write(str(min(fits))) f.write(" ") f.write(str(mean)) f.write("\n") """ if min(fits) <= np.exp(-10) : ok_count = 1 stop_gen = gen break """ # 個体群の評価から次世代の計算のためのパラメタ更新 toolbox.update(population) # hall-of-fameの更新 halloffame.update(population) halloffame_array.append(halloffame[0]) C_array.append(strategy.C) centroid_array.append(strategy.centroid) #print(halloffame) return population, ok_count, stop_gen, halloffame
def main(): # to generate the aleatory values we need a seed numpy.random.seed(128) file1 = open('config.txt', 'r') line = file1.readline() line = line.strip('\n').strip('\r').split(',') pred_link_eval.top = int(line[2]) strategy = cma.Strategy(centroid=[5.0] * N, sigma=float(line[0]), lambda_=int(line[1])) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) algorithms.eaGenerateUpdate(toolbox, ngen=int(line[3]), stats=stats, halloffame=hof, verbose=True) file1.close() output = open('output.txt', 'w') for item in hof[0]: output.write("%s," % item) output.close()
def find_best_model(strategy, ngen=100, pop_size=300, ind_size=1000, sigma=0.001): creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() pool = multiprocessing.Pool() toolbox.register("map", pool.map) toolbox.register("evaluate", eval_individual, get_strategy_signal=strategy) strategy = cma.Strategy(centroid=[0.5] * ind_size, sigma=sigma, lambda_=pop_size) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("sig/noise", lambda x: np.mean(x) / np.std(x)) stats.register("std", np.std) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=ngen, stats=stats, halloffame=hof) return np.round(hof[0]).astype(np.int)
def set_up(self): try: if self.maximise: creator.create("FitnessMax", base.Fitness, weights=(1.0, )) else: creator.create("FitnessMax", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) except: pass self.toolbox = base.Toolbox() self.logbook = tools.Logbook() self.toolbox.register("evaluate", self.eval_function) #define strategy the algorithm will use, along with the population generation and update methods self.strategy = cma.Strategy(centroid=[0] * self.solution_size, sigma=self.sigma, lambda_=self.pop_size) self.toolbox.register("generate", self.strategy.generate, creator.Individual) self.toolbox.register("update", self.strategy.update) #establish stats to be recorded - default standard stats are set below self.stats = tools.Statistics(lambda ind: ind.fitness.values) self.stats.register("avg", np.mean) self.stats.register("std", np.std) self.stats.register("min", np.min) self.stats.register("max", np.max) self.hof = tools.HallOfFame(1)
def main(): numpy.random.seed(128) #population count =50 individuals pop = toolbox.population(n=50) print("Before\n\n") print map(toolbox.check, pop, "") print("\n\n") print(pop[0]) #define cma strategy, where it restarts sigma is step-size and lambda_ offspring strategy = cma.Strategy(centroid=pop[0], sigma=3.0, lambda_=5.0) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) #get the best of the results - here I am getting every one from population for testing hof = tools.HallOfFame(50) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) #define number of generation and stats and pass in the hallOfFame for cmaes to do its work algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof) print("After\n\n") print map(toolbox.check, hof, "") print("\n\n")
def main(): # CMA-ES CONFIGURATION creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) strategy = cma.Strategy( centroid=[np.random.uniform(-1, 1) for _ in range(IND_SIZE)], sigma=5.0, lambda_=LAMBDA, mu=MU) toolbox = base.Toolbox() toolbox.register("evaluate", evalFitness) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) hof = tools.HallOfFame(1) pop, logbook = algorithms.eaGenerateUpdate(toolbox, ngen=NGEN, stats=stats, halloffame=hof) return pop, logbook, hof
def mycmaes(): global count print("CMA-ES REPETITION",count) count+=1 # The cma module uses the numpy random number generator numpy.random.seed() # The CMA-ES algorithm takes a population of one individual as argument # The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html # for more details about the rastrigin and other tests for CMA-ES strategy = cma.Strategy(centroid=vec_def[0:D], sigma=sigma, lambda_=pop_CMAES) toolbox1.register("generate", strategy.generate, creator.Individual1) toolbox1.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg1", numpy.mean) stats.register("std1", numpy.std) stats.register("min1", numpy.min) stats.register("max1", numpy.max) #logger = tools.EvolutionLogger(stats.functions.keys()) # The CMA-ES algorithm converge with good probability with those settings start = time.time() algorithms.eaGenerateUpdate(toolbox1, ngen=ngen_CMAES, stats=stats, halloffame=hof) end = time.time() elapsed = end - start print(elapsed) # print "Best individual is %s, %s" % (hof[0], hof[0].fitness.values) print(hof[0]) # return hof[0].fitness.values[0] print(hof[0].fitness.values[0]) return hof[0]
def reset(self, fun, lambda_=20, size=36): creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) self.toolbox = base.Toolbox() # For parallel processing: # pool = multiprocessing.Pool(8) # self.toolbox.register("map", pool.map) self.toolbox.register("evaluate", fun) N = size strategy = cma.Strategy(centroid=[5.0] * N, sigma=5.0, lambda_=lambda_) self.toolbox.register("generate", strategy.generate, creator.Individual) self.toolbox.register("update", strategy.update) self.hof = tools.HallOfFame(1) self.stats = tools.Statistics(lambda ind: ind.fitness.values) self.stats.register("avg", np.mean) self.stats.register("std", np.std) self.stats.register("min", np.min) self.stats.register("max", np.max) self.logbook = tools.Logbook() self.logbook.header = ["gen", "evals"] + self.stats.fields
def main(): stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) #pop,log = algorithms.cma.Strategy(pop,toolbox,cxpb=0.5,mutpb=0.2,ngen=10000,stats=stats,halloffame=hof,verbose=True) np.random.seed(64) # The CMA-ES algorithm strategy = cma.Strategy(centroid=[5.0] * N, sigma=3.0, _lambda=250) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) halloffame = tools.HallOfFame(1) halloffame_array = [] C_array = [] centroid_array = [] for i in range(NGEN): # 新たな世代の個体群を生成 population = toolbox.generate() # 個体群の評価 fitnesses = toolbox.map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit fits = [ind.fitness.values[0] for ind in population] length = len(population) mean = sum(fits) / length sum2 = sum(x * x for x in fits) std = abs(sum2 / length - mean**2)**0.5 #print("gen:",i," Min %s" % min(fits)," Max %s" % max(fits)," Avg %s" % mean," Std %s" % std) print(i, max(fits), mean) with open('BipedalWalker-v2.txt', mode='a') as f: f.write(str(i)) f.write(" ") f.write(str(max(fits))) f.write(" ") f.write(str(mean)) f.write("\n") # 個体群の評価から次世代の計算のためのパラメタ更新 toolbox.update(population) # hall-of-fameの更新 halloffame.update(population) halloffame_array.append(halloffame[0]) C_array.append(strategy.C) centroid_array.append(strategy.centroid) record = stats.compile(population) logbook.record(gen=i, nevals=1, **record)
def main(): numpy.random.seed(64) # The CMA-ES algorithm strategy = cma.Strategy(centroid=[5.0] * N, sigma=3.0, lambda_=20 * N) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) halloffame = tools.HallOfFame(1) halloffame_array = [] C_array = [] centroid_array = [] for gen in range(NGEN): # 新たな世代の個体群を生成 population = toolbox.generate() # 個体群の評価 fitnesses = toolbox.map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit # 個体群の評価から次世代の計算のためのパラメタ更新 toolbox.update(population) # hall-of-fameの更新 halloffame.update(population) halloffame_array.append(halloffame[0]) C_array.append(strategy.C) centroid_array.append(strategy.centroid) # 計算結果を描画 import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.patches import Ellipse plt.ion() fig = plt.figure() ax = fig.add_subplot(111) X = numpy.arange(-5.12, 5.12, 0.1) Y = numpy.arange(-5.12, 5.12, 0.1) X, Y = numpy.meshgrid(X, Y) Z = [[benchmarks.rastrigin((x, y))[0] for x, y in zip(xx, yy)] for xx, yy in zip(X, Y)] ax.imshow(Z, cmap=cm.jet, extent=[-5.12, 5.12, -5.12, 5.12]) for x, sigma, xmean in zip(halloffame_array, C_array, centroid_array): # 多変量分布の分散を楕円で描画 Darray, Bmat = numpy.linalg.eigh(sigma) ax.add_artist( Ellipse((xmean[0], xmean[1]), numpy.sqrt(Darray[0]), numpy.sqrt(Darray[1]), numpy.arctan2(Bmat[1, 0], Bmat[0, 0]) * 180 / numpy.pi, color="g", alpha=0.7)) ax.plot([x[0]], [x[1]], c='r', marker='o') ax.axis([-5.12, 5.12, -5.12, 5.12]) plt.draw() plt.show(block=True)
def cmaes(dim, f, y_target=0.0): '''Return x for which f(x) is minimal. Stop early when y-target is reached.''' if dim < 2 or 10000 < dim: print("nparams value is invalid, must be in [2, 10000]") return None population_size = max( math.ceil(4 + 3 * math.log(dim) + 0.5), dim // 2) # dim/2 is result of experiments by Maarten on dim > 100 population_size *= 2 ngen = 600 # 25 + int(0.2*dim) # result of experiments by Maarten print("dimension of problem space ", dim) print("population size ", population_size) print("generations ", ngen) #random.seed(42) #np.random.seed(42) creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) nhops = 10 for hop in range(nhops): centroid = [random.randint(-3, 3) for i in range(dim)] strategy = cma.Strategy(centroid=centroid, sigma=0.5, lambda_=population_size) toolbox = base.Toolbox() toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) evaluation_count, best_x, best_y = 0, [1] * dim, None try: for gen in range(ngen): population = toolbox.generate() if False: for i in range(len(population)): for j in range(dim): population[i][j] = round( population[i][j] * 1000) / 1000 for x in population: y = f(x) x.fitness.values = (y, ) evaluation_count += 1 if best_y is None or best_y > y: best_x, best_y = copy.deepcopy(x), copy.deepcopy(y) if best_y <= y_target: break toolbox.update(population) x_str = ", ".join([ f"{xi:.3f}" if xi != round(xi) else f"{int(round(xi))}" for xi in best_x ]) print( f"evaluations {evaluation_count} evaluations f({x_str}) = {best_y:.3f}" ) except: pass return best_x, best_y
def __init__( self, centroid=None, sigma=None, popSize=200, # lambda_ in the algorithm evalFunc=defaultEvaluate, hofn=5): global randomizers randomizers = InitRandomizers() if (centroid is None): centroid = defaultInitializer() if (sigma is None): sigma = 0.20 self.scaler = MaxAbsScaler() self.scaler.fit([ centroid, ]) # Reset centroid centroid = self.scaler.transform([ centroid, ])[0] hof = tools.HallOfFame(hofn) self.hof = hof self.popSize = popSize toolbox = base.Toolbox() stats = tools.Statistics(key=lambda ind: 1.0 / ind.fitness.values[0]) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) self.stats = stats # Our fitness already takes into account all the molecules simultaneously. # Therefore, there is no need for a multi-objective optimization. creator.create("FitnessMin", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox.register("evaluate", evalFunc, scaler=self.scaler) strategy = cma.Strategy(centroid=centroid, sigma=sigma, lambda_=popSize) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) self.toolbox = toolbox
def test_cma(): NDIM = 5 strategy = cma.Strategy(centroid=[0.0]*NDIM, sigma=1.0) toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.sphere) toolbox.register("generate", strategy.generate, creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) pop, _ = algorithms.eaGenerateUpdate(toolbox, ngen=100) best, = tools.selBest(pop, k=1) assert best.fitness.values < (1e-8,), "CMA algorithm did not converged properly."
def main(dim, nworkers, script, folder, ngen): print("nparams ", dim) print("nworkers ", nworkers) print("script ", script) print("folder ", folder) print("ngenerations ", ngen) if dim < 2 or 10000 < dim: print("nparams value is invalid, must be in [2, 10000]") return if nworkers < 1 or 200 < nworkers : print("nworkers value is invalid, must be in [1, 200]") return if not os.path.exists(script): print("script does not exist") return if not os.path.exists(folder): print("folder does not exist") return if ngen <= 1: print("ngen value is invalid, must be >= ") print("Hint : the old pySOT 'incl-centre' setting is replaced by ngen setting.") return Worker.set_params(dim, script, folder) random.seed(42) np.random.seed(42) lambda_ = int(4 + 3 * math.log(dim)) if dim > 100: lambda_ *= 2 # for dim == 1000, 4 + 3 * log(dim) is not enough lambda_ = max(lambda_, nworkers) print("population ", lambda_, "(is evaluated in parallel)") strategy = cma.Strategy(centroid=[0.5]*dim, sigma=0.5, lambda_=lambda_) creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) for gen in range(ngen): population = Worker.generate(toolbox) fitnesses = Worker.objfunctionn(population) for ind, fit in zip(population, fitnesses): ind.fitness.values = (fit,) print(f"generations {gen+1} evaluations {Worker.call_count} best {Worker.best_y} sigma {strategy.sigma}") if Worker.best_y < 0.1: break toolbox.update(population)
def __init__(self, ind_size, problem): self.ind_size = ind_size # creator creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) # toolbox self.toolbox = base.Toolbox() self.toolbox.register("evaluate", problem) self.toolbox.register("generate", self.gen_new_pop, creator.Individual) self.toolbox.register("update", self.update_new_pop) # strategy self.strategy = cma.Strategy(centroid=[0.5] * self.ind_size, sigma=0.15, lambda_=30)
def run_cma(vertices, edges, alpha, beta, gamma, cb, lambda_=200, generations=250): """ Runs a CMA-ES """ ### SETUP x, y, theta, xy, n_v, n_e, args = get_initial_arguments( vertices, edges, alpha, beta, gamma, cb) fitness_function = make_fitness_function(args) creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("evaluate", fitness_function) ### RUNNING np.random.seed(128) N = len(xy) strategy = cma.Strategy(centroid=xy, sigma=0.001, lambda_=lambda_) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=generations, stats=stats, halloffame=hof) return hof
def main(): numpy.random.seed(64) # The CMA-ES algorithm strategy = cma.Strategy(centroid=[5.0]*N, sigma=3.0, lambda_=20*N) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) halloffame = tools.HallOfFame(1) halloffame_array = [] C_array = [] centroid_array = [] print("gen ","min ","max ","mean ","std") for gen in range(NGEN): # 新たな世代の個体群を生成 population = toolbox.generate() # 個体群の評価 """ for ind in population: ax.scatter(ind[0],ind[1],c='blue',marker='.') """ fitnesses = toolbox.map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit fits = [ind.fitness.values[0] for ind in population] length = len(population) mean = sum(fits) / length sum2 = sum(x*x for x in fits) std = abs(sum2 / length - mean**2)**0.5 print(gen ,min(fits) ,max(fits) ,mean,std) # 個体群の評価から次世代の計算のためのパラメタ更新 toolbox.update(population) # hall-of-fameの更新 halloffame.update(population) halloffame_array.append(halloffame[0]) C_array.append(strategy.C) centroid_array.append(strategy.centroid) """
def optimize(self, expression: base.Expression, problem_size, generations, storages, evaluation_time): def evaluate(weights): program_generator = self._gp_optimizer.program_generator output_path = program_generator._output_path_generated program_generator.generate_global_weight_initializations( output_path, weights) program_generator.run_c_compiler(output_path) runtime, convergence_factor, _ = program_generator.evaluate( output_path, infinity=self._gp_optimizer.infinity, number_of_samples=1) program_generator.restore_global_initializations(output_path) return convergence_factor, self._toolbox.register("evaluate", evaluate) lambda_ = int(round((4 + 3 * log(problem_size)) * 2)) if self._gp_optimizer.is_root(): print("Running CMA-ES", flush=True) strategy = cma.Strategy(centroid=[1.0] * problem_size, sigma=0.3, lambda_=lambda_) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) self._toolbox.register("generate", strategy.generate, creator.RelaxationFactors) self._toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) generator = self._gp_optimizer.program_generator if generator.run_exastencils_compiler( knowledge_path=generator.knowledge_path_generated, settings_path=generator.settings_path_generated) != 0: raise RuntimeError( "Could not initialize code generator for relaxation factor optimization" ) _, logbook = algorithms.eaGenerateUpdate(self._toolbox, ngen=generations, halloffame=hof, verbose=False, stats=stats) if self._gp_optimizer.is_root(): print(logbook, flush=True) return hof[0]
def __init__(self, eval_fitness: Callable, individual_size: int, random_seed: int, conf: OptimizerCmaEsCfg, stats, map_func=map, from_checkpoint=None, reset_hof=False): super(OptimizerCmaEs, self).__init__(eval_fitness, individual_size, random_seed, conf, stats, map_func, from_checkpoint) toolbox = self.toolbox if from_checkpoint: # todo: DRY. Some parts of this are also in the other optimizer, and can be moved to the parent class cp = get_checkpoint(from_checkpoint) toolbox.initial_generation = cp["generation"] + 1 if not reset_hof: self.hof = cp["halloffame"] toolbox.recorded_individuals = cp["recorded_individuals"] toolbox.logbook = cp["logbook"] toolbox.initial_seed = cp["last_seed"] toolbox.strategy = cp["strategy"] else: toolbox.recorded_individuals = [] toolbox.initial_generation = 0 toolbox.initial_seed = random_seed toolbox.logbook = tools.Logbook() toolbox.logbook = self.create_logbook(conf) if conf.mu: mu = conf.mu assert conf.population_size >= mu, "The population size must be higher or equal to the chosen mu." else: mu = conf.population_size // 2 if conf.population_size // 2 > 0 else 1 toolbox.strategy = cma.Strategy(centroid=[0.0] * individual_size, sigma=conf.sigma, lambda_=conf.population_size, mu=mu) toolbox.register("generate", toolbox.strategy.generate, Individual) toolbox.register("update", toolbox.strategy.update) toolbox.register("strip_strategy_from_population", self.strip_strategy_from_population, mutation_learned=False)
def run_cmaes_p_concat(v, k, r, seed, generations, sig=None): suzuki = approx.suzuki_vals(k) if sig == None: sig = 1e-5 / len(suzuki) chain = hchain.HeisenbergChain(len(v), v) random.seed(seed) np.random.seed(seed) # Error from target def target_error(ind): if NORMALISE: norm_ind = norm_f(ind) else: norm_ind = ind final_ind = approx.r_copies(approx.expand_vals(ind), r) return approx.error(chain, final_ind, t=2 * chain.n), toolbox = base.Toolbox() toolbox.register("evaluate", target_error) strategy = cma.Strategy(centroid=suzuki, sigma=sig) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) pop, log = algorithms.eaGenerateUpdate(toolbox, ngen=generations, stats=stats, halloffame=hof, verbose=True) return pop, log, hof
def main(N, out_sol_dict): ''' Procedure setting up all the necessary parameters and components for CMAES evolution Parameters: ----------- N: Dimension of the problem (number of variables) out_sol_dict: Dictionnary to store the results ''' # CMAES strategy strategy = cma.Strategy(centroid=[5.0] * N, sigma=5.0, lambda_=20 * N) # Register the generation and update procedure for the algorithm workflow toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) # Create a set containing the best individual recorded hof = tools.HallOfFame(1) # Create a statistical object and tell it what you want to monitor stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) # Start the generation and update the population of solutions:w _, logbook = algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof) # Get best solution and save it best_sol = tools.selBest(hof, 1)[0] out_sol_dict["solution"] = list(best_sol) out_sol_dict["fit"] = float(best_sol.fitness.values[0]) # Plot convergence gen, avg = logbook.select("gen", "avg") plt.figure() plt.title("Convergence curve") plt.xlabel("Generations") plt.ylabel("Best obtained Fitness value at gen N") plt.grid(True) plt.plot(gen, avg, "r--") plt.savefig("conv.pdf", dpi=600)
def CMAOpt(X, Y, Adj): Xopt = np.zeros(X.shape) Yopt = np.zeros(Y.shape) fopt = lambda x: -f(x) nevals = X.shape[0] * 50 * 50 #10*X.shape[1] for i in range(X.shape[0]): creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", np.ndarray, fitness=creator.FitnessMax) toolbox = base.Toolbox() toolbox.register("evaluate", f) toolbox.decorate("evaluate", tupleize) neigh = np.where(Adj[i, :])[0] if neigh.shape[0] > 2: sigma = 2.0 * ((X[i] - X[neigh])**2).max() else: sigma = 0.2 strategy = cma.Strategy(centroid=X[i], sigma=sigma, lambda_=50) #10*X.shape[1]) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) toolbox.decorate("generate", checkBounds(f.lb, f.ub)) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("max", np.max) hof = tools.HallOfFame(1, similar=np.array_equal) try: algorithms.eaGenerateUpdate(toolbox, ngen=100, stats=stats, halloffame=hof, verbose=False) #algorithms.eaGenerateUpdate(toolbox, ngen=50, stats=stats, halloffame=hof, verbose=False) #algorithms.eaGenerateUpdate(toolbox, ngen=50, stats=stats, halloffame=hof, verbose=False) Xopt[i, :] = hof[0] Yopt[i] = f(hof[0]) except: Xopt[i, :] = X[i, :] Yopt[i] = Y[i] return Xopt, Yopt, nevals
def __init__(self, evalFitness, individual_size, conf, map_func=map, hof=tools.HallOfFame(5), trainer_parameters=None, checkpoint=None): self.toolbox = toolbox = base.Toolbox() self.conf = conf self.hof = hof creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, typecode='b', fitness=creator.FitnessMax) if checkpoint: cp = algorithms.get_checkpoint(checkpoint) toolbox.strategy = strategy = cp["strategy"] else: toolbox.strategy = strategy = cma.Strategy(centroid=[0.0] * individual_size, sigma=conf["sigma"], lambda_=conf["population_size"]) toolbox.register("map", map_func) toolbox.register("evaluate", evalFitness) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update)
def cmaes01(dim, f, y_target=0.0): '''Return x for which f(x) is minimal, with x[i] only 0 or 1. Stop early when y-target is reached.''' if dim < 2 or 10000 < dim: print("nparams value is invalid, must be in [2, 10000]") return None population_size = max( round(4 + 3 * math.log(dim) + 0.5), dim // 2) # dim/2 is result of experiments by Maarten on dim > 100 ngen = 25 + int(0.2 * dim) # result of experiments by Maarten print("dimension of problem space ", dim) print("population size ", population_size) print("generations ", ngen) random.seed(42) np.random.seed(42) strategy = cma.Strategy(centroid=[1] * dim, sigma=0.5, lambda_=population_size) creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) evaluation_count, best_x, best_y = 0, None, None for gen in range(ngen): population = toolbox.generate() for i in range(len(population)): for j in range(dim): population[i][j] = 1 if population[i][j] >= 0.5 else 0 for x in population: y = f(x) x.fitness.values = (y, ) evaluation_count += 1 if best_y is None or best_y > y: best_x, best_y = copy.deepcopy(x), copy.deepcopy(y) print( f"generation {gen+1}, evaluations {evaluation_count}, best {best_y}, sigma {strategy.sigma}" ) if best_y <= y_target: break toolbox.update(population) return best_x, best_y
def __init__(self, individual_size: int, configuration: dict): self.individual_size = individual_size config = OptimizerCmaEsDeapCfg(**configuration) creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, typecode='b', fitness=creator.FitnessMax) strategy = cma.Strategy(centroid=[0.0] * individual_size, sigma=config.sigma, lambda_=config.population_size) self.toolbox = base.Toolbox() self.toolbox.register("generate", strategy.generate, creator.Individual) self.toolbox.register("update", strategy.update) self.population = None
def fit(self, X, y): strategy = cma.Strategy( centroid=[self.mean] * self.n_dim, sigma=self.sigma, ) toolbox = self.create_toolbox(X, y) toolbox.register("generate", self._generate_pop_with_fitness, strategy.generate) toolbox.register("update", strategy.update) self.hall_of_fame = tools.HallOfFame(1) self.pop, self.logbook = algorithms.eaGenerateUpdate( toolbox, ngen=self.n_gen, stats=self._build_stats(), halloffame=self.hall_of_fame, verbose=self.verbose) self.cleanup() return self
def do_cmaes(gp_ind, toolbox, data_t): ephemerals_indxs = collect_ephemeral_indices(gp_ind) # [(0,5),....(1,4),...] ephemerals_vals = [gp_ind[indx[0]][indx[1]].value for indx in ephemerals_indxs] if not ephemerals_vals: return None creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Individual", list, fitness=creator.FitnessMin) c_toolbox = base.Toolbox() c_toolbox.register("evaluate", eval_cmaes, gp_ind, ephemerals_indxs, toolbox, data_t) strategy = cma.Strategy(ephemerals_vals, sigma=0.1) c_toolbox.register("generate", strategy.generate, creator.Individual) c_toolbox.register("update", strategy.update) hof_2 = tools.HallOfFame(1) stats_2 = tools.Statistics(lambda ind: ind.fitness.values) stats_2.register("avg", np.mean) stats_2.register("std", np.std) stats_2.register("min", np.min) stats_2.register("max", np.max) algorithms.eaGenerateUpdate(c_toolbox, ngen=250, stats=stats_2, halloffame=hof_2) return hof_2[0]
def setup(): toolbox = base.Toolbox() toolbox.register("attribute", space_search_random, MIN, MAX) toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attribute, n=len(opal_options.ga_variables) * opal_options.ga_locations_to_study) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("mate", tools.cxOnePoint) toolbox.register("mutate", mutate, mutpb=opal_options.ga_mutation_prob) toolbox.decorate("mate", checkBounds(MIN, MAX)) toolbox.decorate("mutate", checkBounds(MIN, MAX)) if opal_options.ga_selection_method == 1: toolbox.register("select", tools.selBest) elif opal_options.ga_selection_method == 2: toolbox.register("select", tools.selNSGA2) else: opal_options.toolbox.register("select", tools.selSPEA2) toolbox.register("evaluate", fitness) #To parallelise using SCOOP #toolbox.register("map", futures.map) if opal_options.ga_CMA: #Covariance Matrix Adaptation Evolution Strategy (CMA-ES) [Hansen2001] strategy = cma.Strategy(centroid=opal_options.ga_centroid, sigma=opal_options.ga_sigma) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) return toolbox
def main(): # The cma module uses the numpy random number generator numpy.random.seed(128) # The CMA-ES algorithm takes a population of one individual as argument # The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html # for more details about the rastrigin and other tests for CMA-ES strategy = cma.Strategy(centroid=[5.0] * N, sigma=5.0, lambda_=20 * N) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) #logger = tools.EvolutionLogger(stats.functions.keys()) # The CMA-ES algorithm converge with good probability with those settings algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof) # print "Best individual is %s, %s" % (hof[0], hof[0].fitness.values) return hof[0].fitness.values[0]