def cma_es(params): global mmin,p_rastrigin # The cma module uses the numpy random number generator numpy.random.seed(128) cc, ccov1, ccovmu = params # The CMA-ES algorithm takes a population of one individual as argument # The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html # for more details about the rastrigin and other tests for CMA-ES strategy = cma.Strategy(centroid=[5.0]*N, sigma=5.0, lambda_=20*N) strategy.setParams(cc, ccov1, ccovmu) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) #logger = tools.EvolutionLogger(stats.functions.keys()) # The CMA-ES algorithm converge with good probability with those settings algorithms.eaGenerateUpdate(toolbox, ngen=500, stats=stats, halloffame=hof) # print "Best individual is %s, %s" % (hof[0], hof[0].fitness.values) if hof[0].fitness.values[0] < mmin : mmin = hof[0].fitness.values[0] p_rastrigin = params return hof[0].fitness.values[0]
def find_best_model(strategy, ngen=100, pop_size=300, ind_size=1000, sigma=0.001): creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() pool = multiprocessing.Pool() toolbox.register("map", pool.map) toolbox.register("evaluate", eval_individual, get_strategy_signal=strategy) strategy = cma.Strategy(centroid=[0.5] * ind_size, sigma=sigma, lambda_=pop_size) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("sig/noise", lambda x: np.mean(x) / np.std(x)) stats.register("std", np.std) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=ngen, stats=stats, halloffame=hof) return np.round(hof[0]).astype(np.int)
def main(): IND_SIZE = 25 creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("indices", random.sample, range(IND_SIZE), IND_SIZE) toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.indices) # toolbox.register("population", tools.initRepeat, list, toolbox.individual) # toolbox.population(n=100) # print(" ",toolbox.individual()) umda = umda_tsp(100) toolbox = base.Toolbox() toolbox.register("evaluate", umda.evaluate) toolbox.register("generate", umda.generate, creator.Individual) toolbox.register("update", umda.update) # toolbox.register("evaluate", umda.evaluate) stats = tools.Statistics(lambda ind: ind.fitness.values) # stats.register("avg", np.mean) # stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=30, stats=stats)
def main(): # to generate the aleatory values we need a seed numpy.random.seed(128) file1 = open('config.txt', 'r') line = file1.readline() line = line.strip('\n').strip('\r').split(',') pred_link_eval.top = int(line[2]) strategy = cma.Strategy(centroid=[5.0] * N, sigma=float(line[0]), lambda_=int(line[1])) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) algorithms.eaGenerateUpdate(toolbox, ngen=int(line[3]), stats=stats, halloffame=hof, verbose=True) file1.close() output = open('output.txt', 'w') for item in hof[0]: output.write("%s," % item) output.close()
def main(): # to generate the aleatory values we need a seed numpy.random.seed(128) file1 = open('config.txt', 'r') line = file1.readline() line = line.strip('\n').strip('\r').split(',') pred_link_eval.top = int(line[2]) strategy = cma.Strategy(centroid=[5.0]*N, sigma=float(line[0]), lambda_=int(line[1])) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) algorithms.eaGenerateUpdate(toolbox, ngen=int(line[3]), stats=stats, halloffame=hof, verbose=True) file1.close() output = open('output.txt', 'w') for item in hof[0]: output.write("%s," % item) output.close()
def run(): """Run the evolution.""" if args.verbose and __name__ == '__main__': print "objective: minimise", eval_func.__doc__ if args.seed is not None: np.random.seed(args.seed) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", np.min) try: algorithms.eaGenerateUpdate(toolbox, ngen=args.generations, stats=stats, halloffame=hof, verbose=True) except KeyboardInterrupt: print 'user terminated early' (score,) = hof[0].fitness.values print 'Score: %.2f $/MWh' % score print 'List:', [max(0, param) for param in hof[0]] set_generators(hof[0]) nem.run(context) context.verbose = True print context if args.transmission: x = context.exchanges.max(axis=0) print np.array_str(x, precision=1, suppress_small=True) f = open('results.json', 'w') obj = {'exchanges': x.tolist(), 'generators': context} json.dump(obj, f, cls=nem.Context.JSONEncoder) f.close()
def main(): numpy.random.seed(128) #population count =50 individuals pop = toolbox.population(n=50) print("Before\n\n") print map(toolbox.check, pop, "") print("\n\n") print(pop[0]) #define cma strategy, where it restarts sigma is step-size and lambda_ offspring strategy = cma.Strategy(centroid=pop[0], sigma=3.0, lambda_=5.0) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) #get the best of the results - here I am getting every one from population for testing hof = tools.HallOfFame(50) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) #define number of generation and stats and pass in the hallOfFame for cmaes to do its work algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof) print("After\n\n") print map(toolbox.check, hof, "") print("\n\n")
def main(): numpy.random.seed(128) # population count =50 individuals pop = toolbox.population(n=50) print ("Before\n\n") print map(toolbox.check, pop, "") print ("\n\n") print (pop[0]) # define cma strategy, where it restarts sigma is step-size and lambda_ offspring strategy = cma.Strategy(centroid=pop[0], sigma=3.0, lambda_=5.0) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) # get the best of the results - here I am getting every one from population for testing hof = tools.HallOfFame(50) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) # define number of generation and stats and pass in the hallOfFame for cmaes to do its work algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof) print ("After\n\n") print map(toolbox.check, hof, "") print ("\n\n")
def cmaES(funcs_l, weights, lambd, mu, var, sigma, ngen): creator.create("MaFitness", base.Fitness, weights=weights) creator.create("Individual", list, fitness=creator.MaFitness) toolbox = base.Toolbox() eval_funcs = lambda x: tuple([f(x) for f in funcs_l]) toolbox.register("evaluate", eval_funcs) S.Swarm.controller.rez_params() S.model = var c = S.extract_genotype() logbook = tools.Logbook() init_func = lambda c, sigma, size: np.random.normal(c, sigma, size) toolbox.register("attr_float", init_func, c, sigma, len(var)) toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float) toolbox.register("population", tools.initRepeat, list, toolbox.individual) strategy = cma.Strategy(centroid=c * len(var), sigma=sigma, lambda_=lambd * len(var)) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=ngen, stats=stats, halloffame=hof) return stats, hof
def run(): """Run the evolution.""" if args.verbose and __name__ == '__main__': print "objective: minimise", eval_func.__doc__ np.random.seed(args.seed) hof = tools.HallOfFame(1) stats_fit = tools.Statistics(lambda ind: ind.fitness.values) stats_hof = tools.Statistics(lambda ignored: hof[0].fitness.values) mstats = tools.MultiStatistics(fitness=stats_fit, hallfame=stats_hof) mstats.register("min", np.min) try: algorithms.eaGenerateUpdate(toolbox, ngen=args.generations, stats=mstats, halloffame=hof, verbose=True) except KeyboardInterrupt: # pragma: no cover print 'user terminated early' context.set_capacities(hof[0]) nemo.run(context) context.verbose = True print print context score, penalty, reason = cost(context) print 'Score: %.2f $/MWh' % score constraints_violated = [] if reason > 0: print 'Penalty: %.2f $/MWh' % penalty print 'Constraints violated:', for label, code in reasons.iteritems(): if reason & code: constraints_violated += [label] print label, print if args.transmission: np.set_printoptions(precision=5) x = context.exchanges.max(axis=0) print np.array_str(x, precision=1, suppress_small=True) obj = {'exchanges': x.tolist(), 'generators': context} with open('results.json', 'w') as f: json.dump(obj, f, cls=nemo.Context.JSONEncoder, indent=True) with open(args.output, 'w') as f: bundle = { 'options': vars(args), 'parameters': [max(0, cap) for cap in hof[0]], 'score': score, 'penalty': penalty, 'constraints_violated': constraints_violated } json.dump(bundle, f) print 'Done'
def rfCMAOptim_helper(train,target,max_evals,lb,ub,population=500): """ optimizer for hyperOptim. Uses a random forest along with CMA. :param train: :param target: :param max_evals: :param lb: :param ub: :param population: :return: """ train=np.array(train) N=train.shape[1] model=RandomForestRegressor(n_estimators=(40+2*N)) model.fit(train,target) creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() toolbox.register("attr_float", random.random) toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, n=train.shape[0]) #both feasible and distance directly access lb abd ub middle_point = [np.mean(k) for k in zip(lb, ub)] def evaluator(individual): """Feasability function for the individual. Returns True if feasible False otherwise.""" #its important to set it to >=, <= and not <,> if np.all(individual>=lb and individual<=ub): return model.predict([individual]) else: return -1.0*np.sqrt(sum((np.array(individual)-middle_point)**2)), toolbox.register("evaluate",evaluator) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) strategy = cma.Strategy(centroid=middle_point, sigma=5.0, lambda_=population) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) algorithms.eaGenerateUpdate(toolbox, ngen=50, halloffame=hof,stats=stats) return (hof[0],evaluator(hof[0]))
def main(): # CMA-ES CONFIGURATION creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) strategy = cma.Strategy( centroid=[np.random.uniform(-1, 1) for _ in range(IND_SIZE)], sigma=5.0, lambda_=LAMBDA, mu=MU) toolbox = base.Toolbox() toolbox.register("evaluate", evalFitness) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) hof = tools.HallOfFame(1) pop, logbook = algorithms.eaGenerateUpdate(toolbox, ngen=NGEN, stats=stats, halloffame=hof) return pop, logbook, hof
def optimize(self, GEN=100, disp=True): pop, logbook = algorithms.eaGenerateUpdate(self.toolbox, ngen=GEN, stats=self.stats, halloffame=self.hof, verbose=disp) for i in range(len(logbook)): self.logbook.record(**logbook[i]) return pop, self.logbook
def QAPEA(fName, pobSize, genNums): """ Función que utiliza el algoritmo UMDA para encontrar un óptimo sobre una instancia del problema de asignación cuadrática. """ createMinRace() vertices, mDistance, mFlux = qap.Read_QAP_Instance(fName) n, N = vertices, pobSize trunc_par=0.5 strategy = UMDA(n,N,trunc_par) toolbox = base.Toolbox() toolbox.register("evaluate", evalQAPEA,mDistance, mFlux,n=n) #funcion generate UMDA toolbox.register("generate", strategy.generate, creator.Individual) #funcion update UMDA toolbox.register("update", strategy.update) # Np equality function (operators.eq) between two arrays returns the # equality element wise, which raises an exception in the if similar() # check of the hall of fame. Using a different equality function like # np.array_equal or np.allclose solve this issue. hof = tools.HallOfFame(1, similar=np.array_equal) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", np.min) stats.register("max", np.max) rdo= algorithms.eaGenerateUpdate(toolbox, ngen=genNums, stats=stats, halloffame=hof,verbose=True) evals = [ dic['min'] for dic in rdo[1] ] return (hof[0],evals)
def main(seed): random.seed(seed) NGEN = 50 # Initialize the PBIL EDA pbil = PBIL(ndim=50, learning_rate=0.3, mut_prob=0.1, mut_shift=0.05, lambda_=20) toolbox.register("generate", pbil.generate, creator.Individual) toolbox.register("update", pbil.update) # Statistics computation stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) pop, logbook = algorithms.eaGenerateUpdate(toolbox, NGEN, stats=stats, verbose=True)
def fit(self, X, y): strategy = cma.Strategy( centroid=[self.mean] * self.n_dim, sigma=self.sigma, ) toolbox = self.create_toolbox(X, y) toolbox.register( "generate", self._generate_pop_with_fitness, strategy.generate) toolbox.register("update", strategy.update) self.hall_of_fame = tools.HallOfFame(1) self.pop, self.logbook = algorithms.eaGenerateUpdate( toolbox, ngen=self.n_gen, stats=self._build_stats(), halloffame=self.hall_of_fame, verbose=self.verbose ) self.cleanup() return self
def train(self, stats, number_generations, checkpoint, cb_before_each_generation=None): if self.conf["use_original_cma_trainer"]: return orig_algorithms.eaGenerateUpdate(self.toolbox, ngen=number_generations, halloffame=self.hof, stats=stats) else: return algorithms.eaGenerateUpdate(self.toolbox, ngen=number_generations, stats=stats, halloffame=self.hof, checkpoint=checkpoint, cb_before_each_generation=cb_before_each_generation)
def run_cma(vertices, edges, alpha, beta, gamma, cb, lambda_=200, generations=250): """ Runs a CMA-ES """ ### SETUP x, y, theta, xy, n_v, n_e, args = get_initial_arguments( vertices, edges, alpha, beta, gamma, cb) fitness_function = make_fitness_function(args) creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("evaluate", fitness_function) ### RUNNING np.random.seed(128) N = len(xy) strategy = cma.Strategy(centroid=xy, sigma=0.001, lambda_=lambda_) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=generations, stats=stats, halloffame=hof) return hof
def main(): # toolbox.population(n=100) # print(" ",toolbox.individual()) ehsba_tsp = deap_ehsba_tsp(100) toolbox = base.Toolbox() toolbox.register("evaluate", ehsba_tsp.evaluate) toolbox.register("generate", ehsba_tsp.generate, creator.Individual) toolbox.register("update", ehsba_tsp.update) # toolbox.register("evaluate", umda.evaluate) stats = tools.Statistics(lambda ind: ind.fitness.values) # stats.register("avg", np.mean) # stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=200, stats=stats)
def main(): numpy.random.seed() # The CMA-ES One Plus Lambda algorithm takes a initialized parent as argument parent = creator.Individual((numpy.random.rand() * 5) - 1 for _ in range(N)) parent.fitness.values = toolbox.evaluate(parent) strategy = cma.StrategyOnePlusLambda(parent, sigma=5.0, lambda_=10) toolbox.register("generate", strategy.generate, ind_init=creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) algorithms.eaGenerateUpdate(toolbox, ngen=200, halloffame=hof, stats=stats)
def CMAOpt(X, Y, Adj): Xopt = np.zeros(X.shape) Yopt = np.zeros(Y.shape) fopt = lambda x: -f(x) nevals = X.shape[0] * 50 * 50 #10*X.shape[1] for i in range(X.shape[0]): creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", np.ndarray, fitness=creator.FitnessMax) toolbox = base.Toolbox() toolbox.register("evaluate", f) toolbox.decorate("evaluate", tupleize) neigh = np.where(Adj[i, :])[0] if neigh.shape[0] > 2: sigma = 2.0 * ((X[i] - X[neigh])**2).max() else: sigma = 0.2 strategy = cma.Strategy(centroid=X[i], sigma=sigma, lambda_=50) #10*X.shape[1]) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) toolbox.decorate("generate", checkBounds(f.lb, f.ub)) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("max", np.max) hof = tools.HallOfFame(1, similar=np.array_equal) try: algorithms.eaGenerateUpdate(toolbox, ngen=100, stats=stats, halloffame=hof, verbose=False) #algorithms.eaGenerateUpdate(toolbox, ngen=50, stats=stats, halloffame=hof, verbose=False) #algorithms.eaGenerateUpdate(toolbox, ngen=50, stats=stats, halloffame=hof, verbose=False) Xopt[i, :] = hof[0] Yopt[i] = f(hof[0]) except: Xopt[i, :] = X[i, :] Yopt[i] = Y[i] return Xopt, Yopt, nevals
def main(): N, LAMBDA = 30, 1000 MU = int(LAMBDA/4) strategy = EDA(centroid=[5.0]*N, sigma=[5.0]*N, mu=MU, lambda_=LAMBDA) toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.rastrigin) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", tools.mean) stats.register("std", tools.std) stats.register("min", min) stats.register("max", max) algorithms.eaGenerateUpdate(toolbox, ngen=150, stats=stats, halloffame=hof) return hof[0].fitness.values[0]
def run(self, nGens, nprocs=1): # Start processes if (nprocs > 1): pool = multiprocessing.Pool(processes=nprocs) self.toolbox.register("map", pool.map) # Run CMA-ES and store final things self.output = algorithms.eaGenerateUpdate(self.toolbox, ngen=nGens, stats=self.stats, halloffame=self.hof, verbose=True)
def run(self): hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) pop, logbook = algorithms.eaGenerateUpdate(self.toolbox, ngen=200, stats=stats, halloffame=hof) return pop, logbook, hof
def do_cmaes(gp_ind, toolbox, data_t): ephemerals_indxs = collect_ephemeral_indices(gp_ind) # [(0,5),....(1,4),...] ephemerals_vals = [gp_ind[indx[0]][indx[1]].value for indx in ephemerals_indxs] if not ephemerals_vals: return None creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Individual", list, fitness=creator.FitnessMin) c_toolbox = base.Toolbox() c_toolbox.register("evaluate", eval_cmaes, gp_ind, ephemerals_indxs, toolbox, data_t) strategy = cma.Strategy(ephemerals_vals, sigma=0.1) c_toolbox.register("generate", strategy.generate, creator.Individual) c_toolbox.register("update", strategy.update) hof_2 = tools.HallOfFame(1) stats_2 = tools.Statistics(lambda ind: ind.fitness.values) stats_2.register("avg", np.mean) stats_2.register("std", np.std) stats_2.register("min", np.min) stats_2.register("max", np.max) algorithms.eaGenerateUpdate(c_toolbox, ngen=250, stats=stats_2, halloffame=hof_2) return hof_2[0]
def test_cma(): NDIM = 5 strategy = cma.Strategy(centroid=[0.0]*NDIM, sigma=1.0) toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.sphere) toolbox.register("generate", strategy.generate, creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) pop, _ = algorithms.eaGenerateUpdate(toolbox, ngen=100) best, = tools.selBest(pop, k=1) assert best.fitness.values < (1e-8,), "CMA algorithm did not converged properly."
def main(): # The cma module uses the numpy random number generator numpy.random.seed(128) # The CMA-ES algorithm takes a population of one individual as argument # The centroid is set to a vector of 5.0 see http://www.lri.fr/~hansen/cmaes_inmatlab.html # for more details about the rastrigin and other tests for CMA-ES strategy = cma.Strategy(centroid=[5.0] * N, sigma=5.0, lambda_=20 * N) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) #logger = tools.EvolutionLogger(stats.functions.keys()) # The CMA-ES algorithm converge with good probability with those settings algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof) # print "Best individual is %s, %s" % (hof[0], hof[0].fitness.values) return hof[0].fitness.values[0]
def run(): """Run the evolution.""" if args.verbose and __name__ == '__main__': print "objective: minimise", eval_func.__doc__ if args.seed is not None: np.random.seed(args.seed) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", np.min) algorithms.eaGenerateUpdate(toolbox, ngen=args.generations, stats=stats, halloffame=hof, verbose=True) (score,) = hof[0].fitness.values print 'Score: %.2f $/MWh' % score print 'List:', hof[0] set_generators(hof[0]) nem.run(context) context.verbose = True print context if args.transmission: print context.exchanges.max(axis=0)
def main(): N, LAMBDA = 30, 1000 MU = int(LAMBDA/4) strategy = EMNA(centroid=[5.0]*N, sigma=5.0, mu=MU, lambda_=LAMBDA) toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.sphere) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) # Numpy equality function (operators.eq) between two arrays returns the # equality element wise, which raises an exception in the if similar() # check of the hall of fame. Using a different equality function like # numpy.array_equal or numpy.allclose solve this issue. hof = tools.HallOfFame(1, similar=numpy.array_equal) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) algorithms.eaGenerateUpdate(toolbox, ngen=150, stats=stats, halloffame=hof) return hof[0].fitness.values[0]
def main(): N, LAMBDA = 30, 1000 MU = int(LAMBDA / 4) strategy = EMNA(centroid=[5.0] * N, sigma=5.0, mu=MU, lambda_=LAMBDA) toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.sphere) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) # Numpy equality function (operators.eq) between two arrays returns the # equality element wise, which raises an exception in the if similar() # check of the hall of fame. Using a different equality function like # numpy.array_equal or numpy.allclose solve this issue. hof = tools.HallOfFame(1, similar=numpy.array_equal) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) algorithms.eaGenerateUpdate(toolbox, ngen=150, stats=stats, halloffame=hof) return hof[0].fitness.values[0]
def main(): pop = toolbox.population(n=MU) hof = tools.HallOfFame(maxsize=THOF) stats=0 if METHODE == 1: pop, logbook = algorithms.eaSimple(pop, toolbox, CXPB, MUTPB, NGEN, halloffame=hof) elif METHODE == 2: pop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof) elif METHODE == 3: pop, logbook = algorithms.eaGenerateUpdate(toolbox, NGEN, stats, hof) elif METHODE == 4: pop, logbook = algorithms.eaMuCommaLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, halloffame=hof) return pop, hof, logbook
def optimize(self, expression: base.Expression, problem_size, generations, storages, evaluation_time): def evaluate(weights): program_generator = self._gp_optimizer.program_generator output_path = program_generator._output_path_generated program_generator.generate_global_weight_initializations( output_path, weights) program_generator.run_c_compiler(output_path) runtime, convergence_factor, _ = program_generator.evaluate( output_path, infinity=self._gp_optimizer.infinity, number_of_samples=1) program_generator.restore_global_initializations(output_path) return convergence_factor, self._toolbox.register("evaluate", evaluate) lambda_ = int(round((4 + 3 * log(problem_size)) * 2)) if self._gp_optimizer.is_root(): print("Running CMA-ES", flush=True) strategy = cma.Strategy(centroid=[1.0] * problem_size, sigma=0.3, lambda_=lambda_) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) self._toolbox.register("generate", strategy.generate, creator.RelaxationFactors) self._toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) generator = self._gp_optimizer.program_generator if generator.run_exastencils_compiler( knowledge_path=generator.knowledge_path_generated, settings_path=generator.settings_path_generated) != 0: raise RuntimeError( "Could not initialize code generator for relaxation factor optimization" ) _, logbook = algorithms.eaGenerateUpdate(self._toolbox, ngen=generations, halloffame=hof, verbose=False, stats=stats) if self._gp_optimizer.is_root(): print(logbook, flush=True) return hof[0]
def run_cmaes_p_concat(v, k, r, seed, generations, sig=None): suzuki = approx.suzuki_vals(k) if sig == None: sig = 1e-5 / len(suzuki) chain = hchain.HeisenbergChain(len(v), v) random.seed(seed) np.random.seed(seed) # Error from target def target_error(ind): if NORMALISE: norm_ind = norm_f(ind) else: norm_ind = ind final_ind = approx.r_copies(approx.expand_vals(ind), r) return approx.error(chain, final_ind, t=2 * chain.n), toolbox = base.Toolbox() toolbox.register("evaluate", target_error) strategy = cma.Strategy(centroid=suzuki, sigma=sig) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) pop, log = algorithms.eaGenerateUpdate(toolbox, ngen=generations, stats=stats, halloffame=hof, verbose=True) return pop, log, hof
def main(N, out_sol_dict): ''' Procedure setting up all the necessary parameters and components for CMAES evolution Parameters: ----------- N: Dimension of the problem (number of variables) out_sol_dict: Dictionnary to store the results ''' # CMAES strategy strategy = cma.Strategy(centroid=[5.0] * N, sigma=5.0, lambda_=20 * N) # Register the generation and update procedure for the algorithm workflow toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) # Create a set containing the best individual recorded hof = tools.HallOfFame(1) # Create a statistical object and tell it what you want to monitor stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) # Start the generation and update the population of solutions:w _, logbook = algorithms.eaGenerateUpdate(toolbox, ngen=250, stats=stats, halloffame=hof) # Get best solution and save it best_sol = tools.selBest(hof, 1)[0] out_sol_dict["solution"] = list(best_sol) out_sol_dict["fit"] = float(best_sol.fitness.values[0]) # Plot convergence gen, avg = logbook.select("gen", "avg") plt.figure() plt.title("Convergence curve") plt.xlabel("Generations") plt.ylabel("Best obtained Fitness value at gen N") plt.grid(True) plt.plot(gen, avg, "r--") plt.savefig("conv.pdf", dpi=600)
def main(seed): random.seed(seed) NGEN = 50 #Initialize the PBIL EDA pbil = PBIL(ndim=50, learning_rate=0.3, mut_prob=0.1, mut_shift=0.05, lambda_=20) toolbox.register("generate", pbil.generate, creator.Individual) toolbox.register("update", pbil.update) # Statistics computation stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("Avg", tools.mean) stats.register("Std", tools.std) stats.register("Min", min) stats.register("Max", max) pop = algorithms.eaGenerateUpdate(toolbox, NGEN, stats=stats, verbose=True)
def fit(self, X, y): strategy = cma.Strategy( centroid=[self.mean] * self.n_dim, sigma=self.sigma, ) toolbox = self.create_toolbox(X, y) toolbox.register("generate", self._generate_pop_with_fitness, strategy.generate) toolbox.register("update", strategy.update) self.hall_of_fame = tools.HallOfFame(1) self.pop, self.logbook = algorithms.eaGenerateUpdate( toolbox, ngen=self.n_gen, stats=self._build_stats(), halloffame=self.hall_of_fame, verbose=self.verbose) self.cleanup() return self
def train(self, maximize_fitness=True): # Experiments management- creating logs dir for the experiment description = "centroid_0_sigma_50_ngen_2000_3_episodes" if not os.path.exists("logs"): os.makedirs("logs") this_run_directory_name = type(self.fitness_obj).__name__ + "_" + \ self.fitness_obj.game_name() + \ str(datetime.now().strftime("_%Y-%m-%d-%H-%M-")) + description this_run_directory_full_path = os.path.join("logs", this_run_directory_name) os.mkdir(this_run_directory_full_path) shutil.copyfile( "TrainEs.py", os.path.join(this_run_directory_full_path, "TrainEs.py")) shutil.copyfile( "GameFitness.py", os.path.join(this_run_directory_full_path, "GameFitness.py")) open(os.path.join(this_run_directory_full_path, "params.txt"), 'w').write(str(self.params)) # ES definitions and initialization according to the params if maximize_fitness: creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax, n=self.fitness_obj.num_features()) else: creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin, n=self.fitness_obj.num_features()) toolbox = base.Toolbox() toolbox.register("evaluate", self.fitness_obj.evaluate_task) strategy = cma.Strategy(centroid=[self.params["centroid"]] * self.fitness_obj.num_features(), sigma=self.params["sigma"], lambda_=self.params["gen_size_factor"] * self.fitness_obj.num_features()) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("Avg", np.mean) stats.register("Std", np.std) stats.register("Min", np.min) stats.register("Max", np.max) start_time = time() pop, logbook = algorithms.eaGenerateUpdate(toolbox, ngen=self.params["ngen"], stats=stats, halloffame=hof) elapsed_time = time() - start_time print('%.2f seconds' % elapsed_time) print(hof) print("final fitness: " + str(self.fitness_obj.evaluate_task(hof[0]))) open(os.path.join(this_run_directory_full_path, "final_model.txt"), 'w').write(str(hof[0])) open(os.path.join(this_run_directory_full_path, "time_to_train.txt"), 'w').write('%.2f seconds' % elapsed_time) # Plotting the training process plt.plot([stat['Avg'] for stat in logbook]) plt.plot([stat['Min'] for stat in logbook]) plt.plot([stat['Max'] for stat in logbook]) plt.title('Fitness over generations') plt.ylabel('Fitness') plt.xlabel('Generation') plt.legend(['avg', 'min', 'max'], loc='upper left') plt.savefig(os.path.join(this_run_directory_full_path, "fitness_graph")) plt.show() return stats, hof
import multiprocessing pool = multiprocessing.Pool() toolbox.register("map", pool.map) num_genomes_in_hof = 3 hof = evo_utils.HallOfFamePriorityYoungest(num_genomes_in_hof) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) num_gens = 1000 population, logbook = algorithms.eaGenerateUpdate(toolbox, ngen=num_gens, stats=stats, halloffame=hof) #Save video of best agent save_video(hof[0], dummy_agent, dummy_env, num_steps=200, file_name='hof_best_agent.mp4') #Save best agent dummy_agent.set_weights(hof[0]) dummy_agent.save_agent(obs_normalise=normalise_obs, domain_params_in_obs=domain_params_in_obs)
parameters_dict.update(experiment_dict) with open(paramsfile, 'w') as fp: json.dump(parameters_dict, fp) cluster = LocalCluster(n_workers=n_workers) client = Client(cluster) def dask_map(func, *seqs, **kwargs): results_future = client.map(func, *seqs, **kwargs) return client.gather(results_future) toolbox.register("map", dask_map) start = time() pop, logbook = algorithms.eaGenerateUpdate(toolbox, ngen=n_gen, stats=stats, verbose=True) end = time() print(f"Total time taken: {end-start:.2f} seconds") print("Final Population:\n", *pop, sep='\n') if strategy.track_fitnesses: fig, axes = plt.subplots(figsize=(12, 6)) axes.plot(np.arange(len(strategy.fitness_max)), strategy.fitness_max, label='maximum') axes.plot(np.arange(len(strategy.fitness_min)), strategy.fitness_min, label='minimum') axes.set_title('Fitness Across the Generations')
strategy = cma.Strategy( centroid=nolearn_genome, sigma=args.sigma ) hof = tools.HallOfFame(10) toolbox = base.Toolbox() toolbox.register("evaluate", fitness_EA) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) if args.pm: toolbox.register("map", map_dask) elif args.db: toolbox.register("map", map_dask_bag) if __name__ == '__main__': start = time() pop, logbook = algorithms.eaGenerateUpdate( toolbox, ngen=args.n_gen, stats=stats, halloffame=hof, verbose=args.verbose ) end = time() # print(logbook) print("\nEvolutionary algorithm complete with args:") print(*map(lambda x: f"{x[0]}:{x[1]}", vars(args).items())) print(f"Total time taken: {end-start:.2f}s")
creator.create("Individual", np.ndarray, fitness=creator.FitnessMax) if __name__ == '__main__': n, N = 100, 100 trunc_par = 0.5 strategy = UMDA(n, N, trunc_par) toolbox = base.Toolbox() toolbox.register("evaluate", evalOneMax) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) # Np equality function (operators.eq) between two arrays returns the # equality element wise, which raises an exception in the if similar() # check of the hall of fame. Using a different equality function like # np.array_equal or np.allclose solve this issue. hof = tools.HallOfFame(1, similar=np.array_equal) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) algorithms.eaGenerateUpdate(toolbox, ngen=15, stats=stats, halloffame=hof, verbose=True) print(hof[0].fitness.values[0])