def run_evol_alg(self, ea_pars={}, of="mlu", parallel=None, max_cpus=1, save_file=None): ## parallel & max_cpus - being ignored for now !!!! ## save_file as well self.of = of popsize = ea_pars.get("pop_size", 100) maxevals = ea_pars.get("max_evaluations", 10000) localrate = ea_pars.get("local_opt_rate", 0.2) numgen = int(maxevals / popsize) + 1 indsize = len(self.ord_users) * len(self.ord_services) if self.opt_rout_weig: indsize += self.composite.network.number_edges() #lower_limits = [0]*(len(self.ord_users)*len(self.ord_services)) upper_limits = [] for u in range(len(self.ord_users)): for k in range(len(self.ord_services)): upper_limits.append(self.number_servers[k] - 1) if self.opt_rout_weig: #lower_limits.extend ( [0]*self.composite.network.number_edges() ) upper_limits.extend([self.maxw] * self.composite.network.number_edges()) creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("individual", tools.initIterate, creator.Individual, self.generate_solution_random) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", self.evaluate_serv_assig) # toolbox.register("mate", tools.cxTwoPoint) toolbox.register("mate", tools.cxUniform, indpb=0.05) toolbox.register("mutate", self.single_inteligent_mut, localrate=localrate, maxtries=50) # toolbox.register("mutate", tools.mutUniformInt, low = 1, up = upper_limits, indpb=0.05) toolbox.register("select", tools.selTournament, tournsize=3) pop = toolbox.population(n=popsize) hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=1.0, ngen=numgen, stats=stats, halloffame=hof, verbose=True) #print(pop) #print(log) best_sol = hof[0] res = {} res["popsize"] = popsize res["evals"] = maxevals if self.opt_rout_weig: size_assig = len(self.ord_users) * len(self.ord_services) assig = self.decode_serv_assig(best_sol[:size_assig]) w = self.decode_weights(best_sol[size_assig:]) res["of_value"] = self.composite.of_normalized_penalty( assig, True, self.mincost, sp_weights=w) of = self.composite.of_normalized_penalty(assig, False, self.mincost, sp_weights=w, return_dic=True) res["of_fh"] = of["fh"] res["of_e2e"] = of["e2e"] res["of_cost"] = of["cost"] res["of_cong"] = of["cong"] else: assig = self.decode_serv_assig(best_sol) res["of_value"] = self.composite.of_normalized_penalty( assig, True, self.mincost) of = self.composite.of_normalized_penalty(assig, False, self.mincost, return_dic=True) res["of_fh"] = of["fh"] res["of_e2e"] = of["e2e"] res["of_cost"] = of["cost"] if not self.composite.congestion_cost: ## reporting congestion even if not used in the optimization dem = self.composite.generate_e2e_demands(assig.dic_assig) l = Loads(self.composite.network) l.add_loads_from_paths(self.composite.del_paths, dem) if self.composite.cong_of == "mlu": cong = l.mlu()[0] elif self.composite.cong_of == "alu": cong = l.alu() elif self.composite.cong_of == "fortz": co = CongestionOptimization(self.composite.network, dem) phiu = co.phi_uncap() cong = l.fortz_of(phiu) - 1.0 print("Congestion (not optimized):", cong) res["of_cong"] = cong else: res["of_cong"] = of["cong"] print("Objective function value: ", res["of_value"]) # NEED TO CHECK HOW TO SAVE Logbook to file # if (save_file is not None): # f = open(save_file, "w") # f.write(log) # f.close() return assig, res
def main(difficulty, p_cross, p_mutate, p_flip, bias, n_pop, n_gen, load_dir=None, start_game=False): # combined resolution of all monitors # screen_res = 3*1920, 1080 screen_res = 2560, 1440 # location of the retry button retry_pos = screen_res[0] / 2 + 200, screen_res[1] / 2 + 240 # somewhere else inside the game window click_pos = screen_res[0] / 2 + 280, screen_res[1] / 2 + 240 # delay added between actions click_delay = 0.01 # path to where the game stores high scores and death logs root = Path( '/home/j/.wine/drive_c/users/j/Local Settings/Application Data/') root = os.path.join(root, 'Chicken_Wings_2020_test_ver_{}'.format(difficulty)) hiscores = os.path.join(root, 'hiscores.txt') deathlog = os.path.join(root, 'deathlog.txt') # path to where results will be stored results_dir = 'results' bot_hiscores = os.path.join(results_dir, 'bot_hiscores.txt') bot_deathlog = os.path.join(results_dir, 'bot_deathlog.txt') # define fitness creator.create('Fitness', base.Fitness, weights=(1., 1.)) creator.create('Individual', list, fitness=creator.Fitness) # register an individual and a population t = base.Toolbox() t.register('individual', tools.initRepeat, creator.Individual, 0, n=0) t.register('population', tools.initRepeat, list, t.individual, n=n_pop) # register evolutionary operators t.register("mate", cxOnePointBiased, bias=bias) t.register("mutate", mutUniformIntBiased, low=-1, up=1, indpb=p_flip, bias=bias) t.register("select", tools.selTournament, tournsize=3) t.register( "evaluate", partial(evaluate, click_delay=click_delay, click_pos=click_pos, retry_pos=retry_pos, hiscores=hiscores, deathlog=deathlog, bot_hiscores=bot_hiscores, bot_deathlog=bot_deathlog)) # create folder to save results if not os.path.isdir(results_dir): os.mkdir(results_dir) open(bot_hiscores, 'w').close() open(bot_deathlog, 'w').close() res = Results(results_dir) # initialise the algorithm if load_dir: history = load_all(load_dir, creator.Individual) res.gen, init_pop = history[-1] for gen, pop in history: print_stats(pop, gen) else: init_pop = t.population() # start the game if start_game: exe_path = Path('/home/j/chicken_wings/{}.exe'.format(difficulty)) game = Thread(target=wine, args=(exe_path, ), daemon=True) game.start() time.sleep(10) # begin training ea = algorithms.eaMuPlusLambda ea(init_pop, t, n_gen, n_gen, p_cross, p_mutate, n_gen, None, res, False)
def defensegan_ga(fgsm_image, params, netG, z_array): initial_population = torch.tensor(np.asarray(z_array), device=device) initial_population = initial_population.view(params['r'], params['nz']).numpy() def evalFunc(individual): individual = torch.from_numpy(individual).view(1, params['nz'], 1, 1) fitness = np.linalg.norm( netG(individual).view(28, 28).detach().numpy() - fgsm_image.view(28, 28).detach().numpy(), ord=2)**2, return fitness def initIndividual(icls, content): return icls(content) def initPopulation(pcls, ind_init): return pcls(ind_init(c) for c in initial_population) creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", np.ndarray, fitness=creator.FitnessMin) # minimizing the fitness value toolbox = base.Toolbox() CXPB, MUTPB = 0.95, 0.05 toolbox.register("attr_float", random.random) toolbox.register("individual", initIndividual, creator.Individual) toolbox.register("population", initPopulation, list, toolbox.individual) toolbox.register("evaluate", evalFunc) toolbox.register("mate", tools.cxUniform, indpb=0.1) toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=0.1, indpb=0.1) toolbox.register("select", tools.selRoulette) random.seed(777) # pop = toolbox.population(n=POPULATION) pop = toolbox.population() # print("Start of evolution") # Evaluate the entire population # print(fitnesses) -> [(84,), (105,), (96,), (104,), (94,), ... ] 이런식으로 저장됨. fitnesses = list(map(toolbox.evaluate, pop)) minfit = 1000000.0 elit = None for ind, fit in zip(pop, fitnesses): if fit[0] < minfit: minfit = fit[0] elit = ind ind.fitness.values = fit # Extracting all the fitnesses of fits = [ind.fitness.values[0] for ind in pop] # Select the next generation individuals # len(pop) -> 50, len(pop[0]) -> 5 offspring = toolbox.select(pop, len(pop) - 1) # Clone the selected individuals offspring = [elit] + list(map(toolbox.clone, offspring)) # Apply crossover and mutation on the offspring ''' they modify those individuals within the toolbox container and we do not need to reassign their results. ''' # TODO: gaussian mutation maybe better.. # want to customize mutation method... there is no proper mutation operator in deap.tools... for child1, child2 in zip(offspring[::2], offspring[1::2]): if random.random() < MUTPB: size = min(len(child1), len(child2)) for i in range(5): cxpoint = random.randint(2, size - 1) mtpoint = cxpoint - 1 # cxpoint -1 위치 : mutate beta = random.random() child1[mtpoint] = child1[mtpoint] - beta * (child1[mtpoint] - child2[mtpoint]) child2[mtpoint] = child1[mtpoint] + beta * (child1[mtpoint] - child2[mtpoint]) # crossover : one point crossover (temporary crossover algorithm) # child1[cxpoint:], child2[cxpoint:] = child2[cxpoint:], child1[cxpoint:] del child1.fitness.values del child2.fitness.values for child1, child2 in zip(offspring[::2], offspring[1::2]): if random.random() < CXPB: toolbox.mate(child1, child2) toolbox.mate(child1, child2) toolbox.mate(child1, child2) toolbox.mate(child1, child2) del child1.fitness.values del child2.fitness.values for mutant in offspring: if random.random() < MUTPB: toolbox.mutate(mutant) del mutant.fitness.values # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # The population is entirely replaced by the offspring pop[:] = offspring # Gather all the fitnesses in one list and print the stats fits = [ind.fitness.values[0] for ind in pop] length = len(pop) mean = sum(fits) / length sum2 = sum(x * x for x in fits) std = abs(sum2 / length - mean**2)**0.5 # print("mean:{}, std:{}\n".format(mean, std)) return [z for z in pop]
def generaGenetico(): creator.create("FitnessMin", base.Fitness, weights=(-1,)) #Es -1 porque queremos minimizar. creator.create("Individuo", list, fitness=creator.FitnessMin) #Hacemos que la fitness sea el FitnessMin toolbox1 = base.Toolbox() toolbox1.register("attr_int", random.randint, 0, NUMERO_COLORES-1) #Este atributo es cada casilla del cromosoma, queremos que sea un INT entre [0, Colores-1] toolbox1.register("individuo", tools.initRepeat, creator.Individuo, toolbox1.attr_int, n=LONGITUD_CROMOSOMA) mejorSolucionEncontrada=None temperatura= LONGITUD_CROMOSOMA*250 + NUMERO_COLORES #Le asignamos un poco más del fitness máximo #El usuario ha pedidio iteraciones de SA if ITERACIONES_SA>0: poblacion_sa=list() for i in range(0,POBLACION_INICIAL): mejorSolucionEncontrada, temperatura, estado= simulatedAnnelingGeneraPoblacion(mejorSolucionEncontrada,temperatura) poblacion_sa.append(estado) for i in range(0, ITERACIONES_SA-1): mejorSolucionEncontrada, temperatura, poblacion_sa= ejecutaSimulatedAnneling(poblacion_sa,mejorSolucionEncontrada,temperatura) #EL usuario ha pedido iteraciones de AG if ITERACIONES_AG>0: toolbox1.register('población', tools.initRepeat, container=list, func=toolbox1.individuo, n=POBLACION_INICIAL) toolbox1.register('evaluate', evaluacion1) toolbox1.register('mate', tools.cxOnePoint) toolbox1.register('mutate', tools.mutUniformInt,low=0, up=NUMERO_COLORES-1, indpb=PROB_GENMUT) #La mutación hace que cambia un el valor del individuo entre 0 y nº colores -1. toolbox1.register('select', tools.selTournament, tournsize=3) salon_fama1 = tools.HallOfFame(1) random.seed(12345) if ITERACIONES_SA>0: poblacion_inicial=poblacion_sa else: poblacion_inicial=toolbox1.población() población, registro = algorithms.eaSimple(poblacion_inicial, toolbox1, cxpb=PROB_CRUCE, # Probabilidad de que dos individuos contiguos se crucen mutpb=PROB_MUTACION, # Probabilidad de que un individuo mute ngen=ITERACIONES_AG, # Número de generaciones halloffame=salon_fama1) #Elegimos el ganador if ITERACIONES_AG>0: ganador= salon_fama1[0] if ITERACIONES_SA>0 and evaluacion1(ganador)[0] > evaluacion1(mejorSolucionEncontrada)[0]: ganador=mejorSolucionEncontrada else: ganador=mejorSolucionEncontrada print("LONGITUD DEL CROMOSOMA = "+str(LONGITUD_CROMOSOMA)) print("NÚMERO DE COLORES = "+str(NUMERO_COLORES)) print("POBLACIÓN INCIAL = "+str(POBLACION_INICIAL)) print("PROBABILIDAD DE CRUCE = "+str(PROB_CRUCE)) print("PROBABILIDAD DE MUTACION = "+str(PROB_MUTACION)) print("La solución se representará como una lista donde la posición indica el vértice y el valor en la lista indicará un tipo de color") print('La mejor solución encontrada ha sido:') print(ganador) print('Individuo con fitness: '+str(evaluacion1(ganador)[0])) return ganador
def run(): for i in range(number_of_runs): ################################################################### #EVOLUTIONARY ALGORITHM ################################################################### #TYPE #Create minimizing fitness class w/ single objective: creator.create('FitnessMin', base.Fitness, weights=(-1.0, )) #Create individual class: creator.create('Individual', list, fitness=creator.FitnessMin) #TOOLBOX toolbox = base.Toolbox() #Register function to create a number in the interval [1-100?]: #toolbox.register('init_params', ) #Register function to use initRepeat to fill individual w/ n calls to rand_num: toolbox.register('individual', tools.initRepeat, creator.Individual, np.random.random, n=number_of_params) #Register function to use initRepeat to fill population with individuals: toolbox.register('population', tools.initRepeat, list, toolbox.individual) #GENETIC OPERATORS: # Register evaluate fxn = evaluation function, individual to evaluate given later toolbox.register('evaluate', scorefxn_helper) # Register mate fxn = two points crossover function toolbox.register('mate', tools.cxTwoPoint) # Register mutate by swapping two points of the individual: toolbox.register('mutate', tools.mutPolynomialBounded, eta=0.1, low=0.0, up=1.0, indpb=0.2) # Register select = size of tournament set to 3 toolbox.register('select', tools.selTournament, tournsize=3) #EVOLUTION! pop = toolbox.population(n=number_of_individuals) hof = tools.HallOfFame(1) stats = tools.Statistics(key=lambda ind: [ind.fitness.values, ind]) stats.register('all', np.copy) # using built in eaSimple algo pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=crossover_rate, mutpb=mutation_rate, ngen=number_of_generations, stats=stats, halloffame=hof, verbose=False) # print(f'Run number completed: {i}') ################################################################### #MAKE LISTS ################################################################### # Find best scores and individuals in population arr_best_score = [] arr_best_ind = [] for a in range(len(logbook)): scores = [] for b in range(len(logbook[a]['all'])): scores.append(logbook[a]['all'][b][0][0]) #print(a, np.nanmin(scores), np.nanargmin(scores)) arr_best_score.append(np.nanmin(scores)) #logbook is of type 'deap.creator.Individual' and must be loaded later #don't want to have to load it to view data everytime, thus numpy ind_np = np.asarray(logbook[a]['all'][np.nanargmin(scores)][1]) ind_np_conv = convert_individual(ind_np, arr_conversion_matrix, number_of_params) arr_best_ind.append(ind_np_conv) #arr_best_ind.append(np.asarray(logbook[a]['all'][np.nanargmin(scores)][1])) # print('Best individual is:\n %s\nwith fitness: %s' %(arr_best_ind[-1],arr_best_score[-1])) ################################################################### #PICKLE ################################################################### arr_to_pickle = [arr_best_score, arr_best_ind] def get_filename(val): filename_base = dir_to_use + '/' + stripped_name + '_' if val < 10: toret = '000' + str(val) elif 10 <= val < 100: toret = '00' + str(val) elif 100 <= val < 1000: toret = '0' + str(val) else: toret = str(val) return filename_base + toret + '.pickled' counter = 0 filename = get_filename(counter) while os.path.isfile(filename) == True: counter += 1 filename = get_filename(counter) pickle.dump(arr_to_pickle, open(filename, 'wb'))
lower_w = -1 sigma = 1 tau = 1 / np.sqrt(n_weights) mut_prob = 0.1 mate_prob = 0.8 average_pops = [] std_pops = [] best_per_gen = [] player_means = [] best_overall = 0 noimprove = 0 log = tools.Logbook() tlbx = base.Toolbox() env.state_to_log() ############################################################################### ################################ Functions #################################### ############################################################################### # Register and create deap functions and classes def register_deap_functions(): creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", np.ndarray, fitness=creator.FitnessMax, lifepoints=1)
def cmaes(dim, f, y_target=0.0): '''Return x for which f(x) is minimal. Stop early when y-target is reached.''' if dim < 2 or 10000 < dim: print("nparams value is invalid, must be in [2, 10000]") return None population_size = max( math.ceil(4 + 3 * math.log(dim) + 0.5), dim // 2) # dim/2 is result of experiments by Maarten on dim > 100 population_size *= 2 ngen = 600 # 25 + int(0.2*dim) # result of experiments by Maarten print("dimension of problem space ", dim) print("population size ", population_size) print("generations ", ngen) #random.seed(42) #np.random.seed(42) creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) nhops = 10 for hop in range(nhops): centroid = [random.randint(-3, 3) for i in range(dim)] strategy = cma.Strategy(centroid=centroid, sigma=0.5, lambda_=population_size) toolbox = base.Toolbox() toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) evaluation_count, best_x, best_y = 0, [1] * dim, None try: for gen in range(ngen): population = toolbox.generate() if False: for i in range(len(population)): for j in range(dim): population[i][j] = round( population[i][j] * 1000) / 1000 for x in population: y = f(x) x.fitness.values = (y, ) evaluation_count += 1 if best_y is None or best_y > y: best_x, best_y = copy.deepcopy(x), copy.deepcopy(y) if False: x_str = ", ".join([ f"{xi:.6f}" if xi != round(xi) else f"{int(round(xi))}" for xi in best_x ]) print(f" gen {gen}, f({x_str}) = {best_y:.6f}") if best_y <= y_target: break toolbox.update(population) x_str = ", ".join([ f"{xi:.12f}" if xi != round(xi) else f"{int(round(xi))}" for xi in best_x ]) print( f"evaluations {evaluation_count} evaluations f({x_str}) = {best_y:.12f}" ) except: pass return best_x, best_y
def driver(NGEN=NGEN, CXPB=CXPB, MUTPB=MUTPB, POP_SIZE=POP_SIZE): # Initialize our fitness function creator.create("FitnessMax", base.Fitness, weights=(1.0, )) # Set our individual class to be composed of resort maps creator.create("Individual", Resort_Map, fitness=creator.FitnessMax) toolbox = base.Toolbox() # Register a function to make random maps for population generation toolbox.register("individual", gen_algoth.rand_map) # Register the population toolbox.register("population", tools.initRepeat, list, toolbox.individual, n=POP_SIZE) toolbox.register("mate", gen_algoth.cross) toolbox.register("mutate", gen_algoth.mutate) toolbox.register("select", tools.selTournament, tournsize=5) toolbox.register("evaluate", gen_algoth.call_fitness) pop = toolbox.population() invalid = [ind for ind in pop if ind.fitness == None] fitnesses = list(toolbox.map(toolbox.evaluate, invalid)) for ind, fit in zip(invalid, fitnesses): ind.fitness = fit total_fitness = 0 progression_avg = [] progression_max = [] for g in range(NGEN): sys.stdout.write("\rRunning generation " + str(g) + " Average Fitness: {}".format(total_fitness / POP_SIZE) + "\033[K") selected = toolbox.select(pop, POP_SIZE) total_fitness = 0 # Clone selected offspring = list(map(toolbox.clone, selected)) for child1, child2 in zip(offspring[::2], offspring[1::2]): if random.random() < CXPB: toolbox.mate(child1, child2) child1.fitness = None child2.fitness = None for mutant in offspring: if random.random() < MUTPB: toolbox.mutate(mutant) mutant.fitness = None invalid = [ind for ind in offspring if ind.fitness == None] fitnesses = list(toolbox.map(toolbox.evaluate, invalid)) for ind, fit in zip(invalid, fitnesses): ind.fitness = fit # Replace the population with the offspring pop[:] = offspring fitness_list = [ind.fitness for ind in pop] total_fitness = sum(fitness_list) max_fitness = max(fitness_list) progression_avg.append(total_fitness / POP_SIZE) progression_max.append(max_fitness) print("") fittest = pop[0] fit = fittest.fitness for ind in pop[1:]: if ind.fitness > fit: fit = ind.fitness fittest = ind return (ind, fit, progression_avg, progression_max)
def optimization(p_data, p_model, p_type, p_params, p_iter): """ optimization with genetic algorithms Parameters ---------- p_data: pd.DataFrame p_model: str p_params: dict Returns ---------- References ---------- https://deap.readthedocs.io """ # Delete, if exists, genetic algorithm functional classes try: del creator.FitnessMax_en del creator.Individual_en except AttributeError: pass # Initialize genetic algorithm object creator.create("FitnessMax_en", base.Fitness, weights=(1.0, )) creator.create("Individual_en", list, fitness=creator.FitnessMax_en) toolbox_en = base.Toolbox() # Define how each gene will be generated (e.g. criterion is a random choice from the criterion list). toolbox_en.register("attr_ratio", random.choice, p_params['ratio']) toolbox_en.register("attr_c", random.choice, p_params['c']) # This is the order in which genes will be combined to create a chromosome toolbox_en.register("Individual_en", tools.initCycle, creator.Individual_en, (toolbox_en.attr_ratio, toolbox_en.attr_c), n=1) # Population definition toolbox_en.register("population", tools.initRepeat, list, toolbox_en.Individual_en) # -------------------------------------------------------------------------------- Mutation function -- # def mutate_en(individual): # select which parameter to mutate gene = random.randint(0, len(p_params) - 1) if gene == 0: individual[0] = random.choice(p_params['ratio']) elif gene == 1: individual[1] = random.choice(p_params['c']) return individual, # ------------------------------------------------------------------------------ Evaluation function -- # def evaluate_en(eva_individual): # output of genetic algorithm chromosome = {'ratio': eva_individual[0], 'c': eva_individual[1]} # evaluation with fitness metric for classification model if p_type == 'classification': # model results model = logistic_reg(p_data=p_data, p_params=chromosome, p_model=p_model, p_iter=p_iter) # fitness measure model_fit = model['metrics']['auc'] # always return a tupple return model_fit, # evaluation with fitness metric for regression model elif p_type == 'regression': # model results model = ols_reg(p_data=p_data, p_params=chromosome, p_model=p_model, p_iter=p_iter) # Fitness measure model_fit = model['metrics']['r2'] # always return a tupple return model_fit, # error in type of model else: return 'error: invalid type of model' toolbox_en.register("mate", tools.cxOnePoint) toolbox_en.register("mutate", mutate_en) toolbox_en.register("select", tools.selTournament, tournsize=10) toolbox_en.register("evaluate", evaluate_en) population_size = 50 crossover_probability = 0.8 mutation_probability = 0.1 number_of_generations = 4 en_pop = toolbox_en.population(n=population_size) en_hof = tools.HallOfFame(10) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) # Genetic Algorithm Implementation en_pop, en_log = algorithms.eaSimple(population=en_pop, toolbox=toolbox_en, stats=stats, cxpb=crossover_probability, mutpb=mutation_probability, ngen=number_of_generations, halloffame=en_hof, verbose=True) # transform the deap objects into list so it can be serialized and stored with pickle en_pop = [list(pop) for pop in list(en_pop)] en_log = [list(log) for log in list(en_log)] en_hof = [list(hof) for hof in list(en_hof)] return {'population': en_pop, 'logs': en_log, 'hof': en_hof}
def generate(x, y, points_classes, mode='fast', verbose=False, return_fitness_solution=False, return_evolution_data=False, NGEN=20, TAM_POPULATION=10, mating_prob=0.5, mating_mutation_prob=0.7, mutation_prob=0.1, tournsize=3): """ Convert oRGB color into sRGB color. Parameterss ----------- x : float list list of x components. y : float list list of y components. mode : string Avaible values: 'fast', 'custom' and 'optimized'. Default value is 'fast'. verbose: bool Flag for displaying log messages. Default value is False. return_fitness_solution: bool If true, the function will also return the fitness value of the solution. Default value is False. return_evolution_data: bool Default value is False. If true, the function will also return a list with best fitness value for every generation. Default value is False. NGEN: int Number of generations in the genetic algorithm. Default value for 'fast' mode is 20 and for 'optimized' is 300. TAM_POPULATION: int. Size of the genetic algorithm population. Default value for 'fast' and 'optimized' modes is 10. mating_prob: float Probability of mating in crossover function. Value between 1 and 0. Default value for 'fast' and 'optimized' modes is 0.5 mating_mutation_prob: float Probability of mutation during crossover function. Value between 1 and 0. Default value for 'fast' and 'optimized' modes is 0.7 mutation_prob: float Probability of mutation in mutation function. Value between 1 and 0. Default value for 'fast' and 'optimized' modes is 0.1 tournsize: int Size of the tournament in selection function. Default value for 'fast' and 'optimized' modes is 3 Returns ------- list : [c0, c1, c2, ..., ck-1] List of size K (the number of point classes) with the generated colors in RGB space. Each color is a three element list with values inside interval [0-1] Notes ----- There are three modes for using this function: - 'fast': option by default. Uses default values for algorithm params. - 'optimized': same as 'fast', but with more generations. This option is indicated for cases with K >= 20. Notice that it will increase executing time. - 'custom': it will take params indicated in the call of the function. Fast mode is indicated for 20 diferent classes or below. It will provide best solution generated in 2.5s or at least 20 generations. In case there is a need for using diferent params, custom mode allows customization of all parameters. If only more executions are needed, optimized mode uses 300 generations, but that will also increase notably execution time. """ classes = list(OrderedDict.fromkeys(points_classes)) points_classes = numpy.array([classes.index(i) for i in points_classes]) points = numpy.array(list(zip(x, y))) counter_classes = Counter(points_classes) # Number of points and number of classses N = points.shape[0] K = len(classes) if K == 1: return randcolor_RGB() distances = _distances objective = _objective_norm if N > 100: distances = _distances_centroid objective = _objective_cent NGEN, TAM_POPULATION, mating_prob, mating_mutation_prob, mutation_prob, tournsize, use_time = _values_params( mode, NGEN, TAM_POPULATION, mating_prob, mating_mutation_prob, mutation_prob, tournsize) # Configuration for optimization - maximization creator.create('FitnessMax', base.Fitness, weights=(1.0, )) # Individual definition creator.create('Individual', list, fitness=creator.FitnessMax) toolbox = base.Toolbox() toolbox.register('color', randcolor_oRGB) toolbox.register('individual', tools.initRepeat, creator.Individual, toolbox.color, n=K) toolbox.register('Population', tools.initRepeat, list, toolbox.individual) # Geometric distances geom_distances = distances(N, K, points, points_classes) # Toolbox configuration toolbox.register('evaluate', objective, geom_distances, points_classes, counter_classes) toolbox.register('select', tools.selTournament, tournsize=tournsize) toolbox.register('mate', tools.cxOnePoint) toolbox.register('mutate', _mutation, indpb=mutation_prob) # Initial population population = toolbox.Population(TAM_POPULATION) MIN_GEN = NGEN gen = 0 # Genetic Algorithm maxims = [] best_indivs = [] start_time = time() while (use_time and (time() - start_time < 2.5 or gen < MIN_GEN)) or ( not use_time and gen < MIN_GEN): offspring = algorithms.varAnd(population, toolbox, cxpb=mating_prob, mutpb=mating_mutation_prob) fits = toolbox.map(toolbox.evaluate, offspring) for fit, ind in zip(fits, offspring): ind.fitness.values = fit population = toolbox.select(offspring, k=len(population)) top = tools.selBest(population, k=1) top_fitness = objective(geom_distances, points_classes, counter_classes, top[0]) if verbose: print("Generation:", gen, "Best fitness:", top_fitness) maxims.append(top_fitness) best_indivs.append(top[0]) gen += 1 max_fitness = max(maxims) winner = best_indivs[maxims.index(max_fitness)] winner_RGB = [ list(oRGB_to_RGB(color[0], color[1], color[2])) for color in winner ] array_colores = [winner_RGB[points_classes[i]] for i in range(0, N)] evolution_data = [list(range(0, gen)), maxims] if return_fitness_solution or return_evolution_data: res = list() res.append(array_colores) if return_fitness_solution: res.append(max_fitness) if return_evolution_data: res.append(evolution_data) return res return array_colores
def final_result_table(self): # problem constants: HARD_CONSTRAINT_PENALTY = 10 # the penalty factor for a hard-constraint violation # Genetic Algorithm constants: POPULATION_SIZE = 300 P_CROSSOVER = 0.9 # probability for crossover P_MUTATION = 0.1 # probability for mutating an individual MAX_GENERATIONS = 300 HALL_OF_FAME_SIZE = 30 # set the random seed: RANDOM_SEED = 42 random.seed(RANDOM_SEED) toolbox = base.Toolbox() # create the nurse scheduling problem instance to be used: nsp = staffSchedulingProblem(HARD_CONSTRAINT_PENALTY, fileNAME=self.original_file) # define a single objective, maximizing fitness strategy: creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) # create the Individual class based on list: creator.create("Individual", list, fitness=creator.FitnessMin) # create an operator that randomly returns 0 or 1: toolbox.register("Integers", random.randint, 0, 4) # create the individual operator to fill up an Individual instance: toolbox.register("individualCreator", tools.initRepeat, creator.Individual, toolbox.Integers, len(nsp)) # create the population operator to generate a list of individuals: toolbox.register("populationCreator", tools.initRepeat, list, toolbox.individualCreator) # fitness calculation def getCost(individual): return nsp.getCost(individual), # return a tuple toolbox.register("evaluate", getCost) # genetic operators: toolbox.register("select", tools.selTournament, tournsize=2) toolbox.register("mate", tools.cxTwoPoint) # toolbox.register("mutate", tools.mutFlipBit, indpb=1.0/len(nsp)) toolbox.register("mutate", tools.mutShuffleIndexes, indpb=1.0 / len(nsp)) toolbox.register("mutate", tools.mutUniformInt, low=0, up=4, indpb=1.0 / len(nsp)) # create initial population (generation 0): population = toolbox.populationCreator(n=POPULATION_SIZE) # prepare the statistics object: stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", numpy.min) stats.register("avg", numpy.mean) # define the hall-of-fame object: hof = tools.HallOfFame(HALL_OF_FAME_SIZE) # perform the Genetic Algorithm flow with hof feature added: population, logbook = eaSimpleWithElitism(population, toolbox, cxpb=P_CROSSOVER, mutpb=P_MUTATION, ngen=MAX_GENERATIONS, stats=stats, halloffame=hof, verbose=True) # print best solution found: best = hof.items[0] print("-- Best Individual = ", best) print("-- Best Fitness = ", best.fitness.values[0]) print() print("-- Schedule = ") table_html_str = nsp.printScheduleInfo(best) # # extract statistics: # minFitnessValues, meanFitnessValues = logbook.select("min", "avg") # # plot statistics: # sns.set_style("whitegrid") # plt.plot(minFitnessValues, color='red') # plt.plot(meanFitnessValues, color='green') # plt.xlabel('Generation') # plt.ylabel('Min / Average Fitness') # plt.title('Min and Average fitness over Generations') # plt.show() return table_html_str
def ga(self, pop_count, generations_number, cx_prob, mut_prob, mut_change_exam_prob, evaluate_func, available_timeslots, exams, timeslot_to_day, timeslot_to_dayslot, print_best, select_method=None, meals1_indexes=None, meals2_indexes=None, meals3_indexes=None, meals4_indexes=None, meals5_indexes=None, meals=None): creator.create("FitnessMax", base.Fitness, weights=(1.0, )) # base.Fitness is a type of deap. logging.info('creator.FitnessMax: {}'.format(creator.FitnessMax)) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() toolbox.register("random_timeslot", random.randint, 0, max(meals1_indexes)) toolbox.register("random_timeslot2", random.randint, max(meals1_indexes) + 1, max(meals2_indexes)) toolbox.register("random_timeslot3", random.randint, max(meals2_indexes) + 1, max(meals3_indexes)) toolbox.register("random_timeslot4", random.randint, max(meals3_indexes) + 1, max(meals4_indexes)) toolbox.register("random_timeslot5", random.randint, max(meals4_indexes) + 1, max(meals5_indexes)) # truyền biến vào hàm random_timeslot trong random_timeslot gọi hàm random.randint(), 2 biến 0 và (available_timeslots-1) # hàm này dùng để xác định khoảng giá trị để random toolbox.register("individual", tools.initCycle, creator.Individual, [ toolbox.random_timeslot, toolbox.random_timeslot2, toolbox.random_timeslot3, toolbox.random_timeslot4, toolbox.random_timeslot5, ], n=10) a = toolbox.individual() # Tạo hàm individual, trong đó gọi hàm random_timeslot rồi lưu vào creator.individual, chạy hàm n lần toolbox.register("population", tools.initRepeat, list, toolbox.individual) # lưu kết qquả của hàm individual ở trên vào trong population của toolbox. # mate/crossover function: ghép đôi và tiến hoá toolbox.register("mate", tools.cxOnePoint) # tạo ra tuple của 2 thằng ngẫu nhiên. sử dụng random int của python logging.info('available_timeslots: {}'.format(available_timeslots)) logging.info('mut_change_exam_prob: {}'.format(mut_change_exam_prob)) # toolbox.register("mutate", tools.mutUniformInt, low=0, up= 1, indpb=mut_change_exam_prob) toolbox.register("mutate", tools.mutUniformInt, low=0, up=1, indpb=mut_change_exam_prob) # toolbox.register("mutate", tools.mutShuffleIndexes, indpb=mut_change_exam_prob) toolbox.register("evaluate", evaluate_func) # gọi hàm evaluate_func if select_method: select_func, select_kwargs = select_method else: select_func, select_kwargs = tools.selTournament, {'tournsize': 3} toolbox.register("select", select_func, **select_kwargs) pop = toolbox.population(n=pop_count) # chọn ngẫu nhiên 100 cá thể # tập hợp mẫu tạo ra để chọn các cá thể, trong naỳ có 50 phần tử, mỗi phần tử có len=14. best_ever = pop[0] # Calculate fitness for first generation, chọn ra 100 cá thể, lấy cá thể tốt nhất list_pop = [] list_best_ever = [] for ind in pop: ind.fitness.values = toolbox.evaluate(ind) list_pop.append((ind, ind.fitness.values)) if ind.fitness.values > best_ever.fitness.values: best_ever = toolbox.clone(ind) list_pop.sort(key=lambda x: x[1], reverse=True) for i in range(50): list_best_ever.append(list_pop[i][0]) # Lai ghép 50 thế hệ logging.info("Algorithm start") for gen in range(generations_number): # while best_ever.fitness.values[0] < 0.02: offspring = toolbox.select(list_best_ever, len(list_best_ever)) # Clone the selected individuals # (nhân bản, set offspring = với pop, lấy 100 phần tử từ pop offspring = list(map(toolbox.clone, offspring)) # Apply crossover on the offspring for child1, child2 in zip(offspring[::2], offspring[1::2]): # offspring[::2] : lấy ra các phần tử ở vị trí chẵn 0, 2, 4, 6,... # offspring[1::2]: lấy ra các phần tử ở vj trí lẻ 1, 3, 5, 7,... a = random.random() if a < cx_prob: # cx_prob = 0.2 toolbox.mate(child1, child2) # del dùng để xoá phần tử trong list del child1.fitness.values del child2.fitness.values # Apply mutation on the offspring for mutant in offspring: self.check_ind(mutant) if random.random( ) < mut_prob: # mut_prob = 0.1 cái này dùng để giới hạn lại số lần đột biến logging.info('mutant: {}'.format(mutant)) # toolbox.mutate(mutant) self.Mutate_new(mutant, low=0, up=1, indpb=mut_change_exam_prob, meals1_indexes=max(meals1_indexes), meals2_indexes=max(meals2_indexes), meals3_indexes=max(meals3_indexes), meals4_indexes=max(meals4_indexes), meals5_indexes=max(meals5_indexes)) print('mutant: {}'.format(mutant)) print( 'toolbox.mutate(mutant): ', self.Mutate_new(mutant, low=0, up=1, indpb=mut_change_exam_prob, meals1_indexes=max(meals1_indexes), meals2_indexes=max(meals2_indexes), meals3_indexes=max(meals3_indexes), meals4_indexes=max(meals4_indexes), meals5_indexes=max(meals5_indexes))) up = available_timeslots - 1 indpb = mut_change_exam_prob del mutant.fitness.values # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] # tìm những cá thể không có fitness, là những cá thể đã biến đổi trước đó (lai hoặc đột biến) fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) print('fitnesses 2: ', list(fitnesses)) # tính fitness cho các cá thể đó lưu lại thành 1 danh sách for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # gán tương ứng các giá trị fitness đã được tính thành chỉ # số fitness của cá thể tìm ra trong invalid_ind # The population is entirely replaced by the offspring pop[:] = offspring current_best = tools.selBest(pop, k=1)[0] if current_best.fitness.values > best_ever.fitness.values: best_ever = toolbox.clone(ind) logging.info("Algorithm end") # Printing result def print_individual(ind): logging.info("Printing individual") logging.info("Raw:{}".format(ind)) logging.info("Fitness:{}".format(ind.fitness.values)) logging.info(toolbox.evaluate(ind, inverse=False)) logging.info('meals: {}'.format(meals)) type_food = '' for index in ind: if meals[index].is_breakfast == 1: type_food = 'Sáng: ' if meals[index].is_brunch == 1: type_food = 'Xế: ' if meals[index].is_soup == 1: type_food = 'Canh : ' if meals[index].is_main_lunch == 1: type_food = 'Mặn: ' if meals[index].is_lunch == 1: type_food = 'Tráng miệng: ' print('Stt: {} Món ăn: {} {}'.format( index, type_food, meals[index].name)) if print_best: print_individual(best_ever) detail = self.create({ 'date_start': date.today() + timedelta(days=-date.today().weekday()), 'date_end': date.today() + timedelta(days=-date.today().weekday() + 4), }) menu_line_env = self.env['menu.automatic.weekly.line'] menu_food_line = self.env['meal.food.line'] list_meals = [] for i in range(5): line_weekly = menu_line_env.create({ 'menu_automatic_weekly_id': detail.id, 'day_in_week': i + 2, 'breakfast1': meals[best_ever[0 + i * 10]].id, 'breakfast2': meals[best_ever[5 + i * 10]].id, 'main_lunch': meals[best_ever[3 + i * 10]].id, 'soup1': meals[best_ever[2 + i * 10]].id, 'soup2': meals[best_ever[7 + i * 10]].id, 'lunch': meals[best_ever[4 + i * 10]].id, 'tea1': meals[best_ever[1 + i * 10]].id, 'tea2': meals[best_ever[6 + i * 10]].id, }) list_meals.append(meals[best_ever[0 + i * 10]]) list_meals.append(meals[best_ever[5 + i * 10]]) list_meals.append(meals[best_ever[3 + i * 10]]) list_meals.append(meals[best_ever[2 + i * 10]]) list_meals.append(meals[best_ever[7 + i * 10]]) list_meals.append(meals[best_ever[4 + i * 10]]) list_meals.append(meals[best_ever[1 + i * 10]]) list_meals.append(meals[best_ever[6 + i * 10]]) for line in list_meals: for nutrition in line.line_ids: temp = menu_food_line.create({ 'nutrition_id': nutrition.nutrition_id.id, 'quantity': nutrition.quantity, 'protein_a': nutrition.protein_a, 'protein_v': nutrition.protein_v, 'lipit_a': nutrition.lipit_a, 'lipit_v': nutrition.lipit_v, 'gluco': nutrition.gluco, 'calo': nutrition.calo, 'menu_automatic_id': detail.id }) return best_ever.fitness.values[0]
def main(problem: Problem = None, seed=None): config = problem.config random.seed(seed) # DEAP framework setup # We define a bi-objective fitness function. # 1. Maximize the sparseness minus an amount due to the distance between members # 2. Minimize the distance to the decision boundary creator.create("FitnessMulti", base.Fitness, weights=config.fitness_weights) creator.create("Individual", problem.deap_individual_class(), fitness=creator.FitnessMulti) toolbox = base.Toolbox() problem.toolbox = toolbox # We need to define the individual, the evaluation function (OOBs), mutation # toolbox.register("individual", tools.initIterate, creator.Individual) toolbox.register("individual", problem.deap_generate_individual) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", problem.deap_evaluate_individual) toolbox.register("mutate", problem.deap_mutate_individual) toolbox.register("select", tools.selNSGA2) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("min", numpy.min, axis=0) stats.register("max", numpy.max, axis=0) stats.register("avg", numpy.mean, axis=0) stats.register("std", numpy.std, axis=0) logbook = tools.Logbook() logbook.header = "gen", "evals", "min", "max", "avg", "std" # Generate initial population. log.info("### Initializing population....") pop = toolbox.population(n=config.POPSIZE) # Evaluate the initial population. # Note: the fitness functions are all invalid before the first iteration since they have not been evaluated. invalid_ind = [ind for ind in pop if not ind.fitness.valid] problem.pre_evaluate_members(invalid_ind) fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit problem.archive.process_population(pop) # This is just to assign the crowding distance to the individuals (no actual selection is done). pop = toolbox.select(pop, len(pop)) record = stats.compile(pop) logbook.record(gen=0, evals=len(invalid_ind), **record) print(logbook.stream) # Initialize the archive. problem.on_iteration(0, pop, logbook) # Begin the generational process for gen in range(1, config.NUM_GENERATIONS): # invalid_ind = [ind for ind in pop] # fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) # for ind, fit in zip(invalid_ind, fitnesses): # ind.fitness.values = fit # Vary the population offspring = tools.selTournamentDCD(pop, len(pop)) offspring = [ind.clone() for ind in offspring] problem.reseed(pop, offspring) for ind1, ind2 in zip(offspring[::2], offspring[1::2]): toolbox.mutate(ind1) toolbox.mutate(ind2) del ind1.fitness.values, ind2.fitness.values # Evaluate the individuals with an invalid fitness to_eval = offspring + pop invalid_ind = [ind for ind in to_eval] problem.pre_evaluate_members(invalid_ind) fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit problem.archive.process_population(offspring + pop) # Select the next generation population pop = toolbox.select(pop + offspring, config.POPSIZE) record = stats.compile(pop) logbook.record(gen=gen, evals=len(invalid_ind), **record) print(logbook.stream) problem.on_iteration(gen, pop, logbook) return pop, logbook
def run_mo(self, ea_pars={}, display=True): popsize = ea_pars.get("pop_size", 100) maxevals = ea_pars.get("max_evaluations", 10000) # mutprob = ea_pars.get("mut_rate", 0.05) localrate = ea_pars.get("local_opt_rate", 0.2) algorithm = ea_pars.get("algorithm", "eaMuPlusLambda") numgen = int(maxevals / popsize) + 1 indsize = len(self.ord_users) * len(self.ord_services) if self.opt_rout_weig: indsize += self.composite.network.number_edges() #lower_limits = [0]*(len(self.ord_users)*len(self.ord_services)) upper_limits = [] for u in range(len(self.ord_users)): for k in range(len(self.ord_services)): upper_limits.append(self.number_servers[k] - 1) if self.opt_rout_weig: #lower_limits.extend ( [0]*self.composite.network.number_edges() ) upper_limits.extend([self.maxw] * self.composite.network.number_edges()) if self.composite.congestion_cost: creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0, -1.0)) else: creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0)) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("individual", tools.initIterate, creator.Individual, self.generate_solution_random) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", self.evaluate_serv_mo) toolbox.register("mate", tools.cxTwoPoint) toolbox.register("mutate", self.single_inteligent_mut, localrate=localrate, maxtries=50) # toolbox.register("mutate", tools.mutUniformInt, low = 1, up = upper_limits, indpb=mutprob) toolbox.register("select", tools.selNSGA2) if algorithm == "spea": toolbox.register("select", tools.selSPEA2) pop = toolbox.population(n=popsize) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean, axis=0) # stats.register("std", numpy.std, axis = 0) stats.register("min", numpy.min, axis=0) # stats.register("max", numpy.max, axis = 0) if algorithm == "eaMuPlusLambda" or algorithm == "spea": pop = toolbox.select(pop, len(pop)) hof = tools.ParetoFront() algorithms.eaMuPlusLambda(pop, toolbox, mu=popsize, lambda_=popsize, cxpb=0.5, mutpb=0.5, stats=stats, ngen=numgen, halloffame=hof, verbose=display) front = numpy.array([ind.fitness.values for ind in hof]) elif algorithm == "nsga": invalid_ind = [ind for ind in pop if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit pop = toolbox.select(pop, len(pop)) logbook = tools.Logbook() logbook.header = "gen", "evals", "avg", "min" record = stats.compile(pop) logbook.record(gen=0, evals=len(invalid_ind), **record) print(logbook.stream) for gen in range(1, numgen): offspring = tools.selTournamentDCD(pop, len(pop)) offspring = [toolbox.clone(ind) for ind in offspring] for ind1, ind2 in zip(offspring[::2], offspring[1::2]): if random.random() <= 0.9: toolbox.mate(ind1, ind2) toolbox.mutate(ind1) toolbox.mutate(ind2) del ind1.fitness.values, ind2.fitness.values invalid_ind = [ ind for ind in offspring if not ind.fitness.valid ] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit pop = toolbox.select(pop + offspring, len(pop)) record = stats.compile(pop) logbook.record(gen=gen, evals=len(invalid_ind), **record) print(logbook.stream) front = numpy.array([ind.fitness.values for ind in pop]) print(front) return pop, stats, front
def SPEA2(): creator.create("FitnessMax", base.Fitness, weights=( -1.0, -1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) #@UndefinedVariable toolbox = base.Toolbox() toolbox.register("attr_float", my_rand) toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, n=20) #@UndefinedVariable toolbox.register("evaluate", benchmarks.zdt1) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("mate", tools.cxSimulatedBinaryBounded, eta=0.5, low=U, up=V) toolbox.register("mutate", tools.mutPolynomialBounded, eta=0.5, low=U, up=V, indpb=1) toolbox.register("select", tools.selSPEA2) #toolbox.register("select", tools.selNSGA2) #binary tournament selection toolbox.register("selectTournament", tools.selTournament, tournsize=2) # Step 1 Initialization pop = toolbox.population(n=N) archive = [] curr_gen = 1 while True: # Step 2 Fitness assignement for ind in pop: ind.fitness.values = toolbox.evaluate(ind) for ind in archive: ind.fitness.values = toolbox.evaluate(ind) # Step 3 Environmental selection archive = toolbox.select(pop + archive, k=Nbar) # Step 4 Termination if curr_gen >= GEN: final_set = archive break # Step 5 Mating Selection mating_pool = toolbox.selectTournament(archive, k=N) offspring_pool = map(toolbox.clone, mating_pool) # Step 6 Variation # crossover 100% and mutation 6% for child1, child2 in zip(offspring_pool[::2], offspring_pool[1::2]): toolbox.mate(child1, child2) for mutant in offspring_pool: if random.random() < 0.06: toolbox.mutate(mutant) pop = offspring_pool print "gen", curr_gen curr_gen += 1 # x, y = [], [] # for ind in pop: # x.append(ind.fitness.values[0]) # y.append(ind.fitness.values[1]) # pylab.scatter(x,y) # pylab.show() x, y = [], [] for ind in final_set: x.append(ind.fitness.values[0]) y.append(ind.fitness.values[1]) return x, y
# # Evaluation parameters # NUM_JOINTS = num_joints NDIM = 3 AMP, FRQ, PHS = 0, 1, 2 TOURN_SIZE = 3 # # Register individual creator # - amplitude : [0.1 1.5] # - frequency : [0.1 3.0] # - phase : [0.0 2*pi] # tb = base.Toolbox() BOUND_LO = [0.1] + [0.1] + [0.0] BOUND_HI = [1.5] + [3.0] + [math.pi * 2] # # Create individual object (array of floating point values) # def uniform(lo, hi): """Return a list of uniform values given the lists of hi and lo bounds.""" return [random.uniform(a, b) for a, b in zip(lo, hi)] tb.register("attr_uniform", uniform, BOUND_LO, BOUND_HI) tb.register("individual", tools.initIterate, creator.Individual, tb.attr_uniform) #
def init_individual(index, columns, initializer=None): """ Initializes the networks belonging to each individual as a pandas DataFrame. This is what the similarities are calculated against. Parameters: :param index: the index names that you want :param columns: the column names you want :param initializer: determines if individuals are random or initialized; if True, initialized from the initializer input, else random :Returns: pandas DataFrame """ ind = pd.DataFrame(0,index=index, columns=columns) if initializer is not None: # sets up the DataFrame with the initializer data ind.loc[:, 2:] = initializer.loc[:,1:] ind.loc[:, 'in'] = initializer.loc[:, 'in'] # sets the age for i in index: if ind.loc[i,'in'] != 0: ind.loc[i,'age'] = 1 else: ind.loc[i,'age'] = 0 # randomly flips a company in or out of the system if random.random() < 0.05: if ind.loc[i,'in'] == 0: ind.loc[i,'in'] = 1 ind.loc[i,'age'] = 1 for j in index: if i == j: ind.loc[i,j] = 0 else: if random.random() < 0.2: ind.loc[i,j] = 1 ind.loc[j,i] = 1 else: ind.loc[i,:] = 0 ind.loc[:,i] = 0 # randomly flips correlations if ind.loc[i,'in'] == 1: for j in index: if random.random() < 0.05 and i != j: ind.loc[i,j] = abs(ind.loc[i,j] - 1) ind.loc[j,i] = ind.at[i,j] else: for i in index: # randomly places companies in or out of the network if random.random() < 0.2: ind.loc[i,'in'] = 1 ind.loc[i,'age'] = 1 # randomly assigns correlations for companies in the network if ind.loc[i,'in'] == 1: for j in index: if i == j: ind.loc[i,j] = 0 else: if random.random() < 0.2 ind.loc[i,j] = 1 ind.loc[j,i] = ind.at[i,j] ind.fillna(0) return ind class Individual(object): """ Creates the individual class. init parameters: :param index: the index names that you want :param columns: the column names you want :param initializer: determines if individuals are random or initialized; if True, initialized from the initializer input, else random Attributes: :attr network: adjacency network :attr age: age of each individual :Returns: list of n-sized chunks """ def __init__(self, index, columns, initializer=None): self.network = init_individual(index,columns,initializer) self.age = 1 # structuring initializers # FitnessMax => weights fitness by whoever is highest creator.create("FitnessMax", base.Fitness, weights=(1.0,)) ## WITHOUT INITIALIZATION ## creator.create("Individual", Individual, fitness=creator.FitnessMax) ## WITH INITIALIZATION ## # creator.create("Individual", Individual, fitness=creator.FitnessMax, comparisons[0]) toolbox = base.Toolbox()2 # creates the population toolbox.register("individual", creator.Individual, stocks, full_cols, comparisons[0]) toolbox.register("population", tools.initRepeat, list, toolbox.individual) def eval_fitness(individual, comparison): """ Evaluates the fitness of each individual, in terms of similarity. Parameters: :param individual: the individual to be evaluated :param comparison: the reference that similarity will be calculated against :Returns: float with similarity measure in range [0,1] """ adjacency = individual.network.loc[:,['in']+stocks] for i in individual.network.index: individual.network.loc[i,'age'] += 1 return jaccard(adjacency,comparison), def mut_individual(individual, pexist): """ Mutates the individual network init parameters: :param individual: individual to be mutated :param pexist: the probability of adding/removing a company from the network the pseudo-code goes as follows: for each stock, with probability pexist switch in/out if now out, reset every thing to 0 if now in, randomly initialize adjacencies and set age=1 choose a 10 adjacency terms from stocks that are in, and changing them reset the ages :Returns: new individual network with age reset """ network = individual.network for i in network.index.values: age = network.loc[i,'age'] if random.random() < AGEDEP(age, pexist): if network.loc[i,'in'] == 1: network.loc[i, :] = 0 network.loc[:, i] = 0 if network.loc[i,'in'] == 0: network.loc[i,'in'] = 1 network.loc[i,'age'] = 1 for j in network.columns.values[2:]: if random.random() < 0.1 and i != j: network.loc[i,j] = 1 network.loc[j,i] = network.at[i,j] relevant = network.loc[network['in']==1] for _ in range(10): i = random.choice(relevant.index.values) j = random.choice(relevant.columns.values[2:]) network.loc[i,j] = abs(network.at[i,j]-1) network.loc[j,i] = network.at[i,j] if network.loc[i][1:].sum() == 0: network.loc[i,'in'] = 0 network.loc[i,'age'] = 0 individual.network = network individual.age = 1 return individual, def cross_over(ind1, ind2): """ Crossing-over with 2 individuals init parameters: :param ind1: individual 1 :param ind2: individual 2 the pseudo-code goes as follows: randomly choose a crossing-over point swap elements [0:cx,0:cx] from the 2 individuals reset the ages :Returns: new individual network with age reset """ network1 = ind1.network network2 = ind2.network size = min(len(network1.index), len(network2.index)) cx = random.randint(1, size - 1) temp = network1.copy() temp.iloc[:cx,:cx] = network2.iloc[:cx,:cx] network2.iloc[:cx,:cx] = network1.iloc[:cx,:cx] network1 = temp ind1.network = network1 ind2.network = network2 ind1.age = 1 ind2.age = 1 return ind1, ind2 # set the DEAP equations toolbox.register("evaluate", eval_fitness) toolbox.register("mutate", mut_individual, pexist=0.3) toolbox.register("select", tools.selBest) toolbox.register("select2", tools.selTournament, tournsize=4) toolbox.register("mate", cross_over) def main(): """ Genetic algorithm the pseudo-code goes as follows: initialize the population and parameters initialize the statistics desired GA_algorithm: calculate fitnesses and print statistics for NGEN generations: create the offspring re-evaluate fitnesses and select population update ages of inidividuals print statistics return to max-fitnesses and oldest individuals at the end :Returns: new individual network with age reset """ NGEN = len(comparisons) MU = 50 CXPB = 0.5 MUTPB = 0.5 pop = toolbox.population(n=MU) # hof = tools.HallOfFame(15) hof=None stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) # stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) def GA_algorithm(population, toolbox, cxpb, mutpb, ngen, stats=None, halloffame=None, verbose=__debug__): # original algorithm logbook = tools.Logbook() logbook.header = ['gen', 'nevals'] + (stats.fields if stats else []) fitnesses = toolbox.map(toolbox.evaluate, population, [comparisons[0]]*len(population)) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(population) record = stats.compile(population) if stats else {} logbook.record(gen=0, nevals=len(population), **record) if verbose: print(logbook.stream) for g in range(1,NGEN): # Vary the pool of individuals offspring = algorithms.varAnd(population, toolbox, cxpb, mutpb) # Evaluate the individuals with an invalid fitness fitnesses = toolbox.map(toolbox.evaluate, population+offspring, [comparisons[g]]*len(population+offspring)) for ind, fit in zip(population+offspring, fitnesses): ind.fitness.values = fit if halloffame is not None: halloffame.update(offspring) population[:10] = toolbox.select(population+offspring, 10) population[10:] = toolbox.select2(population+offspring, 40) for ind in population: ind.age += 1 # Append the current generation statistics to the logbook record = stats.compile(population) if stats else {} logbook.record(gen=g, nevals=len(population+offspring), **record) if verbose: print(logbook.stream) ages = [ind.age for ind in population] max_age = max(ages) oldest_pop = [i.network for i in population if i.age == max_age] oldest_pop[0].to_csv('oldest_GA_init.csv') max_fit = max(fitnesses) max_pop = [i.network for i in population if i.fitness.values == max_fit] max_pop[0].to_csv('max_pop_GA_init.csv') return population, logbook GA_algorithm(pop, toolbox, cxpb=CXPB, mutpb=MUTPB, ngen=NGEN, stats=stats, halloffame=hof) return pop, stats, hof if __name__ == "__main__": main()
def _fit(self, X, y): X, y = check_X_y(X, y, "csr") # Initialization cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) scorer = check_scoring(self.estimator, scoring=self.scoring) n_features = X.shape[1] if self.max_features is not None: if not isinstance(self.max_features, numbers.Integral): raise TypeError( "'max_features' should be an integer between 1 and {} features." " Got {!r} instead.".format(n_features, self.max_features)) elif self.max_features < 1 or self.max_features > n_features: raise ValueError( "'max_features' should be between 1 and {} features." " Got {} instead.".format(n_features, self.max_features)) max_features = self.max_features else: max_features = n_features if not isinstance(self.n_gen_no_change, (numbers.Integral, np.integer, type(None))): raise ValueError( "'n_gen_no_change' should either be None or an integer." " {} was passed.".format(self.n_gen_no_change)) estimator = clone(self.estimator) # Genetic Algorithm toolbox = base.Toolbox() toolbox.register("individual", _createIndividual, creator.Individual, n=n_features, max_features=max_features) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", _evalFunction, estimator=estimator, X=X, y=y, cv=cv, scorer=scorer, fit_params=self.fit_params, max_features=max_features, caching=self.caching, scores_cache=self.scores_cache) toolbox.register("mate", tools.cxUniform, indpb=self.crossover_independent_proba) toolbox.register("mutate", tools.mutFlipBit, indpb=self.mutation_independent_proba) toolbox.register("select", tools.selTournament, tournsize=self.tournament_size) if self.n_jobs == 0: raise ValueError("n_jobs == 0 has no meaning.") elif self.n_jobs > 1: pool = multiprocessing.Pool(processes=self.n_jobs) toolbox.register("map", pool.map) elif self.n_jobs < 0: pool = multiprocessing.Pool( processes=max(cpu_count() + 1 + self.n_jobs, 1)) toolbox.register("map", pool.map) pop = toolbox.population(n=self.n_population) hof = tools.HallOfFame(1, similar=np.array_equal) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean, axis=0) stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) if self.verbose > 0: print("Selecting features with genetic algorithm.") _, log = _eaFunction(pop, toolbox, cxpb=self.crossover_proba, mutpb=self.mutation_proba, ngen=self.n_generations, ngen_no_change=self.n_gen_no_change, stats=stats, halloffame=hof, verbose=self.verbose) if self.n_jobs != 1: pool.close() pool.join() # Set final attributes support_ = np.array(hof, dtype=np.bool)[0] self.estimator_ = clone(self.estimator) self.estimator_.fit(X[:, support_], y) self.generation_scores_ = np.array( [score for score, _ in log.select("max")]) self.n_features_ = support_.sum() self.support_ = support_ return self
def run_ga_optimization(self, optimization_setting: OptimizationSetting, population_size=100, ngen_size=30, output=True): """""" # Get optimization setting and target settings = optimization_setting.generate_setting_ga() target_name = optimization_setting.target_name if not settings: self.output("优化参数组合为空,请检查") return if not target_name: self.output("优化目标未设置,请检查") return # Define parameter generation function def generate_parameter(): """""" return random.choice(settings) def mutate_individual(individual, indpb): """""" size = len(individual) paramlist = generate_parameter() for i in range(size): if random.random() < indpb: individual[i] = paramlist[i] return individual, # Create ga object function global ga_target_name global ga_strategy_class global ga_setting global ga_vt_symbol global ga_interval global ga_start global ga_rate global ga_slippage global ga_size global ga_pricetick global ga_capital global ga_end global ga_mode global ga_inverse ga_target_name = target_name ga_strategy_class = self.strategy_class ga_setting = settings[0] ga_vt_symbol = self.vt_symbol ga_interval = self.interval ga_start = self.start ga_rate = self.rate ga_slippage = self.slippage ga_size = self.size ga_pricetick = self.pricetick ga_capital = self.capital ga_end = self.end ga_mode = self.mode ga_inverse = self.inverse # Set up genetic algorithem toolbox = base.Toolbox() toolbox.register("individual", tools.initIterate, creator.Individual, generate_parameter) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("mate", tools.cxTwoPoint) toolbox.register("mutate", mutate_individual, indpb=1) toolbox.register("evaluate", ga_optimize) toolbox.register("select", tools.selNSGA2) total_size = len(settings) pop_size = population_size # number of individuals in each generation lambda_ = pop_size # number of children to produce at each generation mu = int(pop_size * 0.8) # number of individuals to select for the next generation cxpb = 0.95 # probability that an offspring is produced by crossover mutpb = 1 - cxpb # probability that an offspring is produced by mutation ngen = ngen_size # number of generation pop = toolbox.population(pop_size) hof = tools.ParetoFront() # end result of pareto front stats = tools.Statistics(lambda ind: ind.fitness.values) np.set_printoptions(suppress=True) stats.register("mean", np.mean, axis=0) stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) # Multiprocessing is not supported yet. # pool = multiprocessing.Pool(multiprocessing.cpu_count()) # toolbox.register("map", pool.map) # Run ga optimization self.output(f"参数优化空间:{total_size}") self.output(f"每代族群总数:{pop_size}") self.output(f"优良筛选个数:{mu}") self.output(f"迭代次数:{ngen}") self.output(f"交叉概率:{cxpb:.0%}") self.output(f"突变概率:{mutpb:.0%}") start = time() algorithms.eaMuPlusLambda( pop, toolbox, mu, lambda_, cxpb, mutpb, ngen, stats, halloffame=hof ) end = time() cost = int((end - start)) self.output(f"遗传算法优化完成,耗时{cost}秒") # Return result list results = [] for parameter_values in hof: setting = dict(parameter_values) target_value = ga_optimize(parameter_values)[0] results.append((setting, target_value, {})) return results
def parameter_tuning(X_train, feat_vec, seizure_times, f_s, window_length, window_overlap, num_channels=145): #creating types creator.create("FitnessMax", base.Fitness, weights=(1.0, )) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() #defining genes #ranges are given by Gardner paper t_per_min = 10 * f_s t_per_max = 200 * f_s MIN = np.tile([0.02, 0.25, 0.3, 0, 10, t_per_min], num_channels) MAX = np.tile([.2, 10, 1, 1, 100, t_per_max], num_channels) toolbox.register("attr_v", random.uniform, MIN[0], MAX[0]) toolbox.register("attr_g", random.uniform, MIN[1], MAX[1]) toolbox.register("attr_p", random.uniform, MIN[2], MAX[2]) toolbox.register("attr_w", random.uniform, MIN[3], MAX[3]) toolbox.register("attr_N", random.uniform, MIN[4], MAX[4]) toolbox.register("attr_T", random.uniform, MIN[5], MAX[5]) #defining an individual as a group of the four genes toolbox.register("individual", tools.initCycle, creator.Individual, (toolbox.attr_v, toolbox.attr_g, toolbox.attr_p, toolbox.attr_w, toolbox.attr_N, toolbox.attr_T), num_channels) #defining the population as a list of individuals toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.decorate("population", within_constraints(MIN, MAX)) # register the fitness function toolbox.register( "evaluate", lambda x: fitness_fn(x, X_train, feat_vec, seizure_times, f_s, window_length, window_overlap, num_channels)) # register the crossover operator # other options are: cxOnePoint, cxUniform (requires an indpb input, probably can just use CXPB) # there are others, more particular than these options toolbox.register("mate", tools.cxTwoPoint) toolbox.decorate("mate", within_constraints(MIN, MAX)) # register a mutation operator with a probability to mutate of 0.05 # can change: mu, sigma, and indpb # there are others, more particular than this toolbox.register("mutate", tools.mutGaussian, mu=1, sigma=10, indpb=0.03) toolbox.decorate("mutate", within_constraints(MIN, MAX)) # operator for selecting individuals for breeding the next generation # other options are: tournament: randonly picks tournsize out of population, chosses fittest, and has that be # a parent. continues until number of parents is equal to size of population. # there are others, more particular than this toolbox.register("select", tools.selTournament, tournsize=3) #toolbox.register("select", tools.selRoulette) #create an initial population of size 20 pop = toolbox.population(n=20) # CXPB is the probability with which two individuals are crossed CXPB = 0.3 # MUTPB is the probability for mutating an individual MUTPB = 0.5 # NGEN is the number of generations until final parameters are picked NGEN = 40 print("Start of evolution") # find the fitness of every individual in the population fitnesses = list(map(toolbox.evaluate, pop)) #assigning each fitness to the individual it represents for ind, fit in zip(pop, fitnesses): ind.fitness.values = fit #defining variables to keep track of best indivudals throughout species best_species_genes = tools.selBest(pop, 1)[0] best_species_value = best_species_genes.fitness.values best_gen = 0 next_mean = 1 prev_mean = 0 #start evolution for g in range(NGEN): if abs(next_mean - prev_mean) > 0.005: prev_mean = next_mean # Select the next generation's parents parents = toolbox.select(pop, len(pop)) # Clone the parents and call them offspring: crossover and mutation will be performed below offspring = list(map(toolbox.clone, parents)) # Apply crossover to children in offspring with probability CXPB for child1, child2 in zip(offspring[::2], offspring[1::2]): # cross two individuals with probability CXPB if random.random() < CXPB: toolbox.mate(child1, child2) del child1.fitness.values del child2.fitness.values # Apply mutation to children in offspring with probability MUTPB for mutant in offspring: if random.random() < MUTPB: toolbox.mutate(mutant) del mutant.fitness.values # Find the fitnessess for all the children for whom fitness changed invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Offspring becomes the new population pop[:] = offspring #updating best species values #if max(fitnesses): if max(fitnesses) > best_species_value: best_species_genes = tools.selBest(pop, 1)[0] best_species_value = best_species_genes.fitness.values best_gen = g #best_next_obj = max(fitnesses) fits = [ind.fitness.values[0] for ind in pop] length = len(pop) next_mean = sum(fits) / length best_ind_finalgen = tools.selBest(pop, 1)[0] print("Best individual in final population is %s with fitness value %s" % (best_ind_finalgen, best_ind_finalgen.fitness.values)) print( "Best individual in species is %s and occurred during generation %s with fitness %s" % (best_species_genes, best_gen, best_species_value)) #will return v for all channels, g for all channels, p for all channels, weights for all channels, adapt_rate for all channels, and Tper for all channels return best_species_genes[0::6], best_species_genes[ 1::6], best_species_genes[2::6], best_species_genes[ 3::6], best_species_genes[4::6], best_species_genes[5::6]
def SelectPictureGA(para_grid): """ 利用GA进行素材调优 """ # 初始化 creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, fitness=creator.FitnessMax) # 初始种群生成 toolbox = base.Toolbox() toolbox.register("attr_float", select_parameter, para_grid) toolbox.register("population", tools.initRepeat, list, toolbox.attr_float) # 进化器生成 toolbox.register("evaluate", eva_max) toolbox.register("mate", tools.cxTwoPoint) toolbox.register("mutate", mutCounter, para_grid=para_grid) toolbox.register("select", tools.selTournament, tournsize=3) # 生成器初始化参数 population = toolbox.population(n=40) CXPB, MUTPB, NGEN = 0.5, 0.2, 50 # 此处添加模拟点击率函数 WriteRandom(population) # 模拟点击率函数结束 # 选择初始化种群 fitnesses = map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit for g in range(NGEN): # Select the next generation individuals offspring = toolbox.select(population, len(population)) # Clone the selected individuals offspring = list(map(toolbox.clone, offspring)) # Apply crossover and mutation on the offspring for child1, child2 in zip(offspring[::2], offspring[1::2]): if random.random() < CXPB: toolbox.mate(child1, child2) del child1.fitness.values del child2.fitness.values for mutant in offspring: if random.random() < MUTPB: toolbox.mutate(mutant) del mutant.fitness.values # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] # 此处添加模拟点击率函数 WriteRandom(invalid_ind) # 模拟点击率函数结束 fitnesses = map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # The population is entirely replaced by the offspring population[:] = offspring return population
import random from deap import base from deap import creator from deap import tools # 这里这个base.Fitness是干嘛的??? creator.create("FitnessMax", base.Fitness, weights=(1.0, )) # 这里的list,fitness是参数,干嘛的??? creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() # base是个很基本的类啊!!!看来很重要 # Attribute generator: define 'attr_bool' to be an attribute ('gene') # which corresponds to integers sampled uniformly # from the range [0,1] (i.e. 0 or 1 with equal # probability) toolbox.register("attr_bool", random.randint, 0, 1) # 包含了0,1的随机整数。不明白这里是干嘛的??? # Structure initializers: define 'individual' to be an individual # consisting of 100 'attr_bool' elements ('genes') toolbox.register( "individual", tools.initRepeat, creator.Individual, # tools.initRepeat是干嘛的??? toolbox.attr_bool, 100) # define the population to be a list of 'individual's toolbox.register("population", tools.initRepeat, list, toolbox.individual)
def es_toolbox(strategy_name, i_shape, evaluate, model_type, imutpb=None, imutmu=None, imutsigma=None): """Initializes and configures the DEAP toolbox for evolving the parameters of a model. Args: strategy_name (str): The strategy that is being used for evolution i_shape (int or tuple): Size or shape of an individual in the population evaluate (function): Function to evaluate an entire population model_type (str): A string specifying whether we're optimizing on a neural network or field programmable gate array imutpb (float): Mutation probability for each individual's attribute imutmu (float): Mean parameter for the Gaussian Distribution we're mutating an attribute from imutsigma (float): Sigma parameter for the Gaussian Distribution we're mutating an attribute from Returns: toolbox (deap.base.Toolbox): Configured DEAP Toolbox for the algorithm. """ logger.log("Initializing toolbox...") # Set seed seed = int(time.time()) random.seed(seed) logger.log('TOOLBOX.PY random seed is {}'.format(seed)) logger.start_timer() # Initialize Toolbox toolbox = base.Toolbox() logger.stop_timer('TOOLBOX.PY Initializing toolbox') logger.start_timer() # Defining tools specific to model size = np.prod(i_shape) if model_type == "nn": # ATTRIBUTE toolbox.register("attribute", random.random) logger.stop_timer('TOOLBOX.PY register("attribute")') # INDIVIDUAL logger.start_timer() toolbox.register("individual", getattr(tools, 'initRepeat'), getattr(creator, 'Individual'), getattr(toolbox, 'attribute'), n=i_shape) logger.stop_timer('TOOLBOX.PY register("individual")') # MUTATION logger.start_timer() toolbox.register("mutate", getattr(tools, 'mutGaussian'), mu=imutmu, sigma=imutsigma, indpb=imutpb) logger.stop_timer('TOOLBOX.PY register("mutate")') # POPULATION logger.start_timer() def init_population(ind_class, n): pop = np.random.uniform(low=-1, high=1, size=(n, size)) return [ind_class(ind) for ind in pop] toolbox.register("population", init_population, getattr(creator, 'Individual')) logger.stop_timer('TOOLBOX.PY register("population")') # MATING logger.start_timer() toolbox.register("mate", getattr(tools, 'cxTwoPoint')) logger.stop_timer('TOOLBOX.PY register("mate")') elif model_type == "fpga": # ATTRIBUTE logger.start_timer() toolbox.register("attribute", np.random.choice, [False, True]) logger.stop_timer('TOOLBOX.PY register("attribute")') # MUTATION logger.start_timer() def mutate_individual(ind, indpb): idx = np.argwhere( np.random.choice([False, True], size, p=[1 - indpb, indpb])) ind[idx] = np.invert(ind[idx]) return ind toolbox.register("mutate", mutate_individual, indpb=imutpb) logger.stop_timer('TOOLBOX.PY register("mutate")') # POPULATION logger.start_timer() def init_population(ind_class, n): pop = np.random.choice([False, True], size=(n, size)) return [ind_class(ind) for ind in pop] toolbox.register("population", init_population, getattr(creator, 'Individual')) logger.stop_timer('TOOLBOX.PY register("population")') # MATING logger.start_timer() from varro.fpga.cross_over import cross_over toolbox.register("mate", cross_over) logger.stop_timer('TOOLBOX.PY register("mate")') # SELECTION METHOD logger.start_timer() if strategy_name == 'nsr-es': toolbox.register("select_elite", getattr( tools, 'selSPEA2')) # Use Multi-objective selection method toolbox.register("select", getattr(tools, 'selRandom')) else: toolbox.register("select_elite", getattr(tools, 'selTournament'), tournsize=3) toolbox.register("select", getattr(tools, 'selRandom')) logger.stop_timer('TOOLBOX.PY register("select")') # EVALUATE logger.start_timer() toolbox.register("evaluate", evaluate) logger.stop_timer('TOOLBOX.PY register("evaluate")') return toolbox
def gaVRPTW(instName, unitCost, initCost, waitCost, delayCost, indSize, popSize, cxPb, mutPb, NGen, exportCSV=False, customizeData=False): if customizeData: jsonDataDir = os.path.join(BASE_DIR, 'data', 'json_customize') else: jsonDataDir = os.path.join(BASE_DIR, 'data', 'json') jsonFile = os.path.join(jsonDataDir, '%s.json' % instName) with open(jsonFile) as f: instance = load(f) creator.create('FitnessMax', base.Fitness, weights=(1.0, )) creator.create('Individual', list, fitness=creator.FitnessMax) toolbox = base.Toolbox() # Attribute generator toolbox.register('indexes', random.sample, range(1, indSize + 1), indSize) # Structure initializers toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.indexes) toolbox.register('population', tools.initRepeat, list, toolbox.individual) # Operator registering toolbox.register('evaluate', evalVRPTW, instance=instance, unitCost=unitCost, initCost=initCost, waitCost=waitCost, delayCost=delayCost) toolbox.register('select', tools.selRoulette) toolbox.register('mate', cxPartialyMatched) toolbox.register('mutate', mutInverseIndexes) pop = toolbox.population(n=popSize) # Results holders for exporting results to CSV file csvData = [] print('Start of evolution') # Evaluate the entire population fitnesses = list(map(toolbox.evaluate, pop)) for ind, fit in zip(pop, fitnesses): ind.fitness.values = fit print(' Evaluated %d individuals' % len(pop)) # Begin the evolution for g in range(NGen): print('-- Generation %d --' % g) # Select the next generation individuals offspring = toolbox.select(pop, len(pop)) # Clone the selected individuals offspring = list(map(toolbox.clone, offspring)) # Apply crossover and mutation on the offspring for child1, child2 in zip(offspring[::2], offspring[1::2]): if random.random() < cxPb: toolbox.mate(child1, child2) del child1.fitness.values del child2.fitness.values for mutant in offspring: if random.random() < mutPb: toolbox.mutate(mutant) del mutant.fitness.values # Evaluate the individuals with an invalid fitness invalidInd = [ind for ind in offspring if not ind.fitness.valid] fitnesses = map(toolbox.evaluate, invalidInd) for ind, fit in zip(invalidInd, fitnesses): ind.fitness.values = fit print(' Evaluated %d individuals' % len(invalidInd)) # The population is entirely replaced by the offspring pop[:] = offspring # Gather all the fitnesses in one list and print the stats fits = [ind.fitness.values[0] for ind in pop] length = len(pop) mean = sum(fits) / length sum2 = sum(x * x for x in fits) std = abs(sum2 / length - mean**2)**0.5 print(' Min %s' % min(fits)) print(' Max %s' % max(fits)) print('Avg %s' % mean) print(' Std %s' % std) # Write data to holders for exporting results to CSV file if exportCSV: csvRow = { 'generation': g, 'evaluated_individuals': len(invalidInd), 'min_fitness': min(fits), 'max_fitness': max(fits), 'avg_fitness': mean, 'std_fitness': std, } csvData.append(csvRow) print('-- End of (successful) evolution --') bestInd = tools.selBest(pop, 1)[0] print('Best individual: %s' % bestInd) print('Fitness: %s' % bestInd.fitness.values[0]) printRoute(ind2route(bestInd, instance)) print('Total cost: %s' % (1 / bestInd.fitness.values[0])) if exportCSV: csvFilename = '%s_uC%s_iC%s_wC%s_dC%s_iS%s_pS%s_cP%s_mP%s_nG%s.csv' % ( instName, unitCost, initCost, waitCost, delayCost, indSize, popSize, cxPb, mutPb, NGen) csvPathname = os.path.join(BASE_DIR, 'results', csvFilename) print('Write to file: %s' % csvPathname) makeDirsForFile(pathname=csvPathname) if not exist(pathname=csvPathname, overwrite=True): with open(csvPathname, 'w') as f: fieldnames = [ 'generation', 'evaluated_individuals', 'min_fitness', 'max_fitness', 'avg_fitness', 'std_fitness' ] writer = DictWriter(f, fieldnames=fieldnames, dialect='excel') writer.writeheader() for csvRow in csvData: writer.writerow(csvRow)
def run_ga(logger): if logger is None: raise Exception("Error: logger is None!") stats = tools.Statistics(lambda individual: individual.fitness.values) stats.register("best", np.min, axis=0) logbook = tools.Logbook() logbook.header = 'ga', "best" result = [] toolbox = base.Toolbox() toolbox.register("individual", init_individual) toolbox.register("population", tools.initRepeat, list, toolbox.individual) # toolbox.register("mate", tools.cxTwoPoint) toolbox.register("mate", Utils.get_instance().crossover_new_method) toolbox.register("mutate", Utils.get_instance().mutate_new_method) toolbox.register("select", tools.selTournament, tournsize=10) toolbox.register("evaluate", Utils.get_instance().cal_fitness) for time in range(Utils.get_instance().num_run): pop = toolbox.population(Utils.get_instance().pop_size) best_ind = toolbox.clone(pop[0]) prev = -1 # use for termination count_term = 0 # use for termination for _ in range(Utils.get_instance().num_generation): offsprings = map(toolbox.clone, toolbox.select(pop, len(pop) - 1)) offsprings = algorithms.varAnd(offsprings, toolbox, Utils.get_instance().cx_pb, Utils.get_instance().mut_pb) offsprings.append(best_ind) min_value = float('inf') invalid_individuals = [] fitness = toolbox.map(toolbox.evaluate, offsprings) for ind, fit in zip(offsprings, fitness): if fit == float('inf'): print("!!!") else: invalid_individuals.append(ind) fitness = toolbox.map(toolbox.evaluate, invalid_individuals) for ind, fit in zip(invalid_individuals, fitness): ind.fitness.values = [fit] if min_value > fit: min_value = fit best_ind = toolbox.clone(ind) b = round(min_value, 6) if prev == b: count_term += 1 else: count_term = 0 pop[:] = invalid_individuals[:] logger.info(f"{time} - {_} : {b}") prev = b if count_term == Utils.get_instance().terminate: break record = stats.compile(pop) logbook.record(ga=time + 1, **record) logger.info(logbook.stream) result.append(min_value) avg = np.mean(result) std = np.std(result) mi = np.min(result) ma = np.max(result) logger.info([mi, ma, avg, std])
def CMAES_MO(var,weights,funcs_l,sigma,verbose = True, MAXITER = 100, STAGNATION_ITER =10, lambda_=3, mu =5): NRESTARTS = 10 # Initialization + 9 I-POP restarts creator.create("MaFitness", base.Fitness, weights=weights) creator.create("Individual", list, fitness=creator.MaFitness) toolbox = base.Toolbox() eval_funcs = lambda x: tuple([f(x) for f in funcs_l]) toolbox.register("evaluate", eval_funcs) S.Swarm.controller.rez_params() S.model = var c = S.extract_genotype() init_func = lambda c, sigma, size: np.random.normal(c, sigma, size) toolbox.register("attr_float", init_func, c, sigma, len(var)) toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", eval_funcs) halloffame = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: np.array([ind.fitness.values])) stats.register("average", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) logbooks = list() bestvalues = list() medianvalues = list() i = 0 t=0 while i < (NRESTARTS): pop = toolbox.population(n=mu) strategy = cma.StrategyMultiObjective(centroid=c, sigma=sigma, lambda_=lambda_,population=pop) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) logbooks.append(tools.Logbook()) logbooks[-1].header = "gen", "evals", "restart", "regime", "std", "min", "avg", "max" conditions = {"MaxIter": False, "TolHistFun": False, "EqualFunVals": False, "TolX": False, "TolUpSigma": False, "Stagnation": False, "ConditionCov": False, "NoEffectAxis": False, "NoEffectCoor": False} while not any(conditions.values()): if(t%5 == 0): S.Swarm.controller.visibility = True else: S.Swarm.controller.visibility = False t = t + 1 # Generate a new population population = pop + toolbox.generate() # Evaluate the individuals fitnesses = toolbox.map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit halloffame.update(population) record = stats.compile(population) logbooks[-1].record(gen=t, restart = i, **record) if verbose: print(logbooks[-1].stream) # Update the strategy with the evaluated individuals toolbox.update(population) # Log the best and median value of this population bestvalues.append(population[-1].fitness.values) medianvalues.append(population[int(round(len(population)/2.))].fitness.values) if t >= MAXITER: # The maximum number of iteration per CMA-ES ran conditions["MaxIter"] = True if len(bestvalues) > STAGNATION_ITER and len(medianvalues) > STAGNATION_ITER and \ np.median(bestvalues[-20:]) >= np.median( bestvalues[-STAGNATION_ITER:-STAGNATION_ITER + 20]) and \ np.median(medianvalues[-20:]) >= np.median( medianvalues[-STAGNATION_ITER:-STAGNATION_ITER + 20]): # Stagnation occured conditions["Stagnation"] = True pop = [p.fitness.values for p in population[-1:-mu]] stop_causes = [k for k, v in conditions.items() if v] print( "Stopped because of condition%s %s" % ((":" if len(stop_causes) == 1 else "s:"), ",".join(stop_causes))) i += 1
def __init__( self, problem: Problem, mutation: Union[Mutation, DeapMutation] = None, crossover: DeapCrossover = None, selection: DeapSelection = None, encoding: Optional[Union[str, Dict[str, Any]]] = None, objectives: Optional[Union[str, List[str]]] = None, objective_weights: Optional[List[float]] = None, pop_size: int = None, max_evals: int = None, mut_rate: float = None, crossover_rate: float = None, deap_verbose: bool = None, ): self._default_crossovers = { TypeAttribute.LIST_BOOLEAN: DeapCrossover.CX_UNIFORM, TypeAttribute.LIST_INTEGER: DeapCrossover.CX_ONE_POINT, TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY: DeapCrossover.CX_ONE_POINT, TypeAttribute.PERMUTATION: DeapCrossover.CX_UNIFORM_PARTIALY_MATCHED, } self._default_mutations = { TypeAttribute.LIST_BOOLEAN: DeapMutation.MUT_FLIP_BIT, TypeAttribute.LIST_INTEGER: DeapMutation.MUT_UNIFORM_INT, TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY: DeapMutation.MUT_UNIFORM_INT, TypeAttribute.PERMUTATION: DeapMutation.MUT_SHUFFLE_INDEXES, } self._default_selection = DeapSelection.SEL_TOURNAMENT self.params_objective_function = ParamsObjectiveFunction( objective_handling=ObjectiveHandling.MULTI_OBJ, objectives=objectives, weights=objective_weights, sense_function=ModeOptim.MAXIMIZATION, ) self.evaluate_sol, _ = build_evaluate_function_aggregated( problem=problem, params_objective_function=self.params_objective_function ) self.problem = problem if pop_size is not None: self._pop_size = pop_size else: self._pop_size = 100 if max_evals is not None: self._max_evals = max_evals else: self._max_evals = 100 * self._pop_size print( "No value specified for max_evals. Using the default 10*pop_size - This should really be set carefully" ) if mut_rate is not None: self._mut_rate = mut_rate else: self._mut_rate = 0.1 if crossover_rate is not None: self._crossover_rate = crossover_rate else: self._crossover_rate = 0.9 self.problem = problem if deap_verbose is not None: self._deap_verbose = deap_verbose else: self._deap_verbose = True # set encoding register_solution: EncodingRegister = problem.get_attribute_register() self._encoding_name = None self._encoding_variable_name = None if encoding is not None and isinstance(encoding, str): # check name specified is in problem register print(encoding) if encoding in register_solution.dict_attribute_to_type.keys(): self._encoding_name = encoding self._encoding_variable_name = register_solution.dict_attribute_to_type[ self._encoding_name ]["name"] self._encoding_type = register_solution.dict_attribute_to_type[ self._encoding_name ]["type"][0] self.n = register_solution.dict_attribute_to_type[self._encoding_name][ "n" ] if self._encoding_type == TypeAttribute.LIST_INTEGER: self.arrity = register_solution.dict_attribute_to_type[ self._encoding_name ]["arrity"] self.arrities = [self.arrity for i in range(self.n)] else: self.arrity = None if self._encoding_type == TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY: self.arrities = register_solution.dict_attribute_to_type[ self._encoding_name ]["arrities"] # else: # self.arrities = None if encoding is not None and isinstance(encoding, Dict): # check there is a type key and a n key if ( "name" in encoding.keys() and "type" in encoding.keys() and "n" in encoding.keys() ): self._encoding_name = "custom" self._encoding_variable_name = encoding["name"] self._encoding_type = encoding["type"][0] self.n = encoding["n"] if "arrity" in encoding.keys(): self.arrity = encoding["arrity"] self.arrities = [self.arrity for i in range(self.n)] if "arrities" in encoding.keys(): self.arrities = register_solution.dict_attribute_to_type[ self._encoding_name ]["arrities"] else: print( "Erroneous encoding provided as input (encoding name not matching encoding of problem or custom " "definition not respecting encoding dict entry format, trying to use default one instead" ) if self._encoding_name is None: if len(register_solution.dict_attribute_to_type.keys()) == 0: raise Exception( "An encoding of type TypeAttribute should be specified or at least 1 TypeAttribute " "should be defined in the RegisterSolution of your Problem" ) print(register_solution.dict_attribute_to_type) print(register_solution.dict_attribute_to_type.keys()) self._encoding_name = list(register_solution.dict_attribute_to_type.keys())[ 0 ] self._encoding_variable_name = register_solution.dict_attribute_to_type[ self._encoding_name ]["name"] self._encoding_type = register_solution.dict_attribute_to_type[ self._encoding_name ]["type"][ 0 ] # TODO : while it's usually a list we could also have a unique value(not a list) self.n = register_solution.dict_attribute_to_type[self._encoding_name]["n"] if self._encoding_type == TypeAttribute.LIST_INTEGER: self.arrity = register_solution.dict_attribute_to_type[ self._encoding_name ]["arrity"] self.arrities = [self.arrity for i in range(self.n)] else: self.arrity = None if self._encoding_type == TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY: self.arrities = register_solution.dict_attribute_to_type[ self._encoding_name ]["arrities"] # else: # self.arrities = None if self._encoding_type == TypeAttribute.LIST_BOOLEAN: self.arrity = 2 self.arities = [2 for i in range(self.n)] print( "Encoding used by the GA: " + self._encoding_name + ": " + str(self._encoding_type) + " of length " + str(self.n) ) self._objectives = objectives print("_objectives: ", self._objectives) self._objective_weights = objective_weights if ( (self._objective_weights is None) or self._objective_weights is not None and (len(self._objective_weights) != len(self._objectives)) ): print( "Objective weight issue: no weight given or size of weights and objectives lists mismatch. " "Setting all weights to default 1 value." ) self._objective_weights = [1 for i in range(len(self._objectives))] if selection is None: self._selection_type = self._default_selection else: self._selection_type = selection nobj = len(self._objectives) ref_points = tools.uniform_reference_points(nobj=nobj) # DEAP toolbox setup self._toolbox = base.Toolbox() # Define representation creator.create("fitness", base.Fitness, weights=tuple(self._objective_weights)) creator.create( "individual", list, fitness=creator.fitness ) # associate the fitness function to the individual type # Create the individuals required by the encoding if self._encoding_type == TypeAttribute.LIST_BOOLEAN: self._toolbox.register( "bit", random.randint, 0, 1 ) # Each element of a solution is a bit (i.e. an int between 0 and 1 incl.) self._toolbox.register( "individual", tools.initRepeat, creator.individual, self._toolbox.bit, n=self.n, ) # An individual (aka solution) contains n bits elif self._encoding_type == TypeAttribute.PERMUTATION: self._toolbox.register( "permutation_indices", random.sample, range(self.n), self.n ) self._toolbox.register( "individual", tools.initIterate, creator.individual, self._toolbox.permutation_indices, ) elif self._encoding_type == TypeAttribute.LIST_INTEGER: self._toolbox.register("int_val", random.randint, 0, self.arrity - 1) self._toolbox.register( "individual", tools.initRepeat, creator.individual, self._toolbox.int_val, n=self.n, ) elif self._encoding_type == TypeAttribute.LIST_INTEGER_SPECIFIC_ARRITY: gen_idx = lambda: [ random.randint(0, arrity - 1) for arrity in self.arrities ] self._toolbox.register( "individual", tools.initIterate, creator.individual, gen_idx ) self._toolbox.register( "population", tools.initRepeat, list, self._toolbox.individual, n=self._pop_size, ) # A population is made of pop_size individuals # Define objective function self._toolbox.register( "evaluate", self.evaluate_problem, ) # Define crossover if crossover is None: self._crossover = self._default_crossovers[self._encoding_type] else: self._crossover = crossover # if self._encoding_type == TypeAttribute.LIST_BOOLEAN: if self._crossover == DeapCrossover.CX_UNIFORM: self._toolbox.register("mate", tools.cxUniform, indpb=self._crossover_rate) elif self._crossover == DeapCrossover.CX_ONE_POINT: self._toolbox.register("mate", tools.cxOnePoint) elif self._crossover == DeapCrossover.CX_TWO_POINT: self._toolbox.register("mate", tools.cxTwoPoint) # elif self._encoding_type == TypeAttribute.PERMUTATION: elif self._crossover == DeapCrossover.CX_UNIFORM_PARTIALY_MATCHED: self._toolbox.register("mate", tools.cxUniformPartialyMatched, indpb=0.5) elif self._crossover == DeapCrossover.CX_ORDERED: self._toolbox.register("mate", tools.cxOrdered) elif self._crossover == DeapCrossover.CX_PARTIALY_MATCHED: self._toolbox.register("mate", tools.cxPartialyMatched) else: print("Crossover of specified type not handled!") # Define mutation if mutation is None: self._mutation = self._default_mutations[self._encoding_type] else: self._mutation = mutation if isinstance(self._mutation, Mutation): self._toolbox.register( "mutate", generic_mutate_wrapper, problem=self.problem, encoding_name=self._encoding_variable_name, indpb=self._mut_rate, solution_fn=self.problem.get_solution_type(), custom_mutation=mutation, ) elif isinstance(self._mutation, DeapMutation): if self._mutation == DeapMutation.MUT_FLIP_BIT: self._toolbox.register( "mutate", tools.mutFlipBit, indpb=self._mut_rate ) # Choice of mutation operator elif self._mutation == DeapMutation.MUT_SHUFFLE_INDEXES: self._toolbox.register( "mutate", tools.mutShuffleIndexes, indpb=self._mut_rate ) # Choice of mutation operator elif self._mutation == DeapMutation.MUT_UNIFORM_INT: # self._toolbox.register("mutate", tools.mutUniformInt, low=0, up=self.arrity-1, indpb=self._mut_rate) self._toolbox.register( "mutate", tools.mutUniformInt, low=0, up=self.arrities, indpb=self._mut_rate, ) # No choice of selection: In NSGA, only 1 selection: Non Dominated Sorted Selection self._toolbox.register("select", tools.selNSGA3, ref_points=ref_points)
def NSGA(funcs_l, weights, var, sigma, MU=4, NGEN=50,wide_search=1.5): IND_SIZE = len(var) creator.create("MaFitness", base.Fitness, weights=weights) creator.create("Individual", list, fitness=creator.MaFitness) toolbox = base.Toolbox() records = {} eval_funcs = lambda x: tuple([f(x) for f in funcs_l]) toolbox.register("evaluate", eval_funcs) S.model = var c = S.extract_genotype() print("c = ",c) init_func = lambda c, sigma, size: np.random.normal(c, sigma, size) bound_max = list(wide_search*c) bound_min = list(-wide_search*c) toolbox.register("attr_float", init_func, c, sigma, len(var)) toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float) toolbox.register("population", tools.initRepeat, list, toolbox.individual) paretofront = tools.ParetoFront() #toolbox.register("mutate", tools.mutGaussian, mu=c, sigma=sigma, indpb=1.0 / IND_SIZE) toolbox.register("mutate", tools.mutPolynomialBounded, low=bound_min, up=bound_max, eta=20.0, indpb=1.0 / IND_SIZE) toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=bound_min, up=bound_min, eta=20.0) toolbox.register("select", tools.selNSGA2) CXPB = 0.6 L = [] turing_spot = tools.Statistics(lambda ind: ind.fitness.values[0]) rectanglitude = tools.Statistics(lambda ind: ind.fitness.values[1]) mstats = tools.MultiStatistics(Rectanglitude=rectanglitude ,Turing_Spot = turing_spot) mstats.register("avg", np.mean, axis=0) mstats.register("max", np.max, axis=0) logbook = tools.Logbook() pop = toolbox.population(n=MU) # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in pop if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # This is just to assign the crowding distance to the individuals # no actual selection is done pop = toolbox.select(pop, len(pop)) record = mstats.compile(pop) #print("Record =" ,record) logbook.record( **record) print(logbook.stream) # Begin the generational process for gen in range(1, NGEN): # Vary the population if(gen%5==0): S.Swarm.controller.withVisiblite(True) offspring = tools.selTournamentDCD(pop, len(pop)) offspring = [toolbox.clone(ind) for ind in offspring] for ind1, ind2 in zip(offspring[::2], offspring[1::2]): if random.random() <= CXPB: toolbox.mate(ind1, ind2) toolbox.mutate(ind1) toolbox.mutate(ind2) del ind1.fitness.values, ind2.fitness.values # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if not ind.fitness.valid] fitnesses = toolbox.map(toolbox.evaluate, invalid_ind) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness.values = fit # Select the next generation population pop = toolbox.select(pop + offspring, MU) record = mstats.compile(pop) logbook.record(g=gen, **record) records[gen] = record print(logbook.stream) return pop, paretofront,records
data = input_output.read_dataset_list(DATASET_FOLDER, files) data_test = input_output.read_dataset_list(DATASET_FOLDER, [file_test]) X_train, y_train, X_test, y_test = input_output.balance_dataset( data, data_columns=list(range(0, n_att)), label_column=75, test_size=0) #_, _, X_test, y_test = input_output.balance_dataset(data_test, data_columns = list(range(0,n_att)), label_column = 76, test_size = 0.999) X_test = data_test[data_test.columns[0:75]].values y_test = data_test[data_test.columns[76]].values # One hot vector y_train = np.array([[x, int(not x)] for x in y_train]) y_test = np.array([[x, int(not x)] for x in y_test]) toolbox = base.Toolbox() # Individual and population toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=3) toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("compile", gp.compile, pset=pset) # Evaluation method toolbox.register("evaluate", fitness.eval_tree, clf=classifier, X_train=X_train, y_train=y_train, X_test=X_test, y_true=y_test,
def evolution( env: Environment, number_of_rays: int, ray_distribution: str, angle_lower_bound: int, angle_upper_bound: int, length_lower_bound: int, length_upper_bound: int, no_of_reflective_segments: int, distance_limit: int, length_limit: int, population_size: int, number_of_generations: int, xover_prob: float, mut_angle_prob: float, mut_length_prob: float, shift_segment_prob: float, rotate_segment_prob: float, resize_segment_prob: float, tilt_base_prob: float, base_length: int, base_slope: int, base_angle_limit_min: int, base_angle_limit_max: int): # Initiating evolutionary algorithm creator.create("Fitness", base.Fitness, weights=(1.0, )) base.Fitness.weights = (1.0, 10.0, 5.0, -1.0) creator.create("Individual", Component, fitness=creator.Fitness) toolbox = base.Toolbox() toolbox.register("individual", creator.Individual, env=env, number_of_rays=number_of_rays, ray_distribution=ray_distribution, angle_lower_bound=angle_lower_bound, angle_upper_bound=angle_upper_bound, length_lower_bound=length_lower_bound, length_upper_bound=length_upper_bound, no_of_reflective_segments=no_of_reflective_segments, distance_limit=distance_limit, length_limit=length_limit, base_length=base_length, base_slope=base_slope) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", evaluate) if env.quality_criterion == "nsgaii": toolbox.register("select", tools.selNSGA2) else: toolbox.register("select", tools.selTournament, tournsize=2) # Initiating first population pop = toolbox.population(n=population_size) # Evaluating fitness fitnesses = [] for item in pop: fitnesses.append(evaluate(item, env)) for ind, fit in zip(pop, fitnesses): ind.fitness = fit if env.quality_criterion != "nsgaii": if env.configuration == "two connected": stats_line = f"generation, best fitness, average fitness, fitness array, left segment angle, " \ f"left segment length, right segment angle, right segment length \n" else: stats_line = f"generation, best fitness, average fitness, fitness array, reflective segments \n" log_stats_init(f"stats", stats_line) # Initiating elitism if env.quality_criterion == "nsgaii": pop = toolbox.select(pop, len(pop)) hof = tools.ParetoFront() hof.update(pop) else: hof = HallOfFame(1) hof.update(pop) print("Start of evolution") # Begin the evolution for g in range(number_of_generations): # A new generation print(f"-- Generation {g} --") # Select the next generation individuals if env.quality_criterion == "nsgaii": offspring = tools.selTournamentDCD(pop, len(pop)) offspring = [toolbox.clone(ind) for ind in offspring] else: offspring = toolbox.select(pop, len(pop)) offspring = list(map(toolbox.clone, offspring)) # Apply crossover and mutation on the offspring for child1, child2 in zip(offspring[::2], offspring[1::2]): # cross two individuals with probability xover_prob if env.configuration == "multiple free": if random.random() < xover_prob: x_over_multiple_segments(child1, child2) # fitness values of the children must be recalculated later child1.fitness = None child2.fitness = None if env.configuration == "two connected": if random.random() < xover_prob: x_over_two_segments(child1, child2) # fitness values of the children must be recalculated later child1.fitness = None child2.fitness = None for mutant in offspring: if env.configuration == "multiple free": if random.random() < shift_segment_prob: mutant.reflective_segments = shift_one_segment( mutant.reflective_segments, "x") mutant.fitness = None if random.random() < shift_segment_prob: mutant.reflective_segments = shift_one_segment( mutant.reflective_segments, "y") mutant.fitness = None if random.random() < rotate_segment_prob: mutant.reflective_segments = rotate_one_segment( mutant.reflective_segments) mutant.fitness = None if random.random() < resize_segment_prob: mutant.reflective_segments = resize_one_segment( mutant.reflective_segments) mutant.fitness = None if random.random() < tilt_base_prob: mutant.base_slope = tilt_base(mutant.base_slope, base_angle_limit_min, base_angle_limit_max) mutant.calculate_base() mutant.original_rays = mutant.sample_rays( number_of_rays, ray_distribution) mutant.fitness = None if env.configuration == "two connected": if random.random() < mut_angle_prob: mutate_angle(mutant) mutant.fitness = None if random.random() < mut_length_prob: mutate_length(mutant, length_upper_bound, length_lower_bound) mutant.fitness = None # Evaluate the individuals with an invalid fitness invalid_ind = [ind for ind in offspring if ind.fitness is None] fitnesses = [] for item in invalid_ind: fitnesses.append(evaluate(item, env)) for ind, fit in zip(invalid_ind, fitnesses): ind.fitness = fit if env.quality_criterion == "nsgaii": pop = toolbox.select(offspring + pop, population_size) else: pop[:] = offspring hof.update(pop) best_ind = hof[0] fitnesses = [] for item in pop: fitnesses.append(item.fitness) print(fitnesses) if env.configuration == "two connected" and env.quality_criterion != "nsgaii": stats_line = f"{g+1}, {best_ind.fitness}, {sum(fitnesses) / population_size}, {best_ind.fitness_array}, " \ f"left angle: {180-best_ind.left_angle+best_ind.base_slope}, " \ f"left length: {best_ind.left_length_coef*best_ind.base_length}, " \ f"right angle: {best_ind.right_angle-best_ind.base_slope}, " \ f"right length: {best_ind.right_length_coef*best_ind.base_length} " log_stats_append(f"stats", stats_line) if env.configuration == "multiple free" and env.quality_criterion != "nsgaii": stats_line = f"{g + 1}, {best_ind.fitness}, {sum(fitnesses) / population_size}, {best_ind.fitness_array}, " for reflective_segment in best_ind.reflective_segments: dimensions = f" start: {reflective_segment.p1}, end: {reflective_segment.p2}" stats_line = stats_line + dimensions log_stats_append(f"stats", stats_line) print(f"Best individual has fitness: {best_ind.fitness}") draw(best_ind, f"best{g}", env) if env.quality_criterion == "nsgaii": unique = choose_unique(hof, env.configuration) stats_line = f"index, fitness array" log_stats_init(f"stats", stats_line) for index in range(len(unique)): draw(unique[index], f"unique{index}", env) if env.configuration == "two connected": stats_line = f"{index}, {unique[index].fitness}," \ f"left angle: {180-unique[index].left_angle+unique[index].base_slope}, " \ f"left length: {unique[index].left_length_coef*unique[index].base_length}, " \ f"right angle: {unique[index].right_angle-unique[index].base_slope}, " \ f"right length: {unique[index].right_length_coef*unique[index].base_length} " else: stats_line = f"{index}, {unique[index].fitness}, {unique[index].base_slope}" for reflective_segment in unique[index].reflective_segments: dimensions = f" start: {reflective_segment.p1}, end: {reflective_segment.p2}" stats_line = stats_line + dimensions log_stats_append(f"stats", stats_line) print("-- End of (successful) evolution --") print("--")