def optimize(self): # 设置优化方向:最大化收益回撤比,最大化夏普比率 toolbox = base.Toolbox() # Toolbox是deap库内置的工具箱,里面包含遗传算法中所用到的各种函数 pool = multiprocessing.Pool(processes=(multiprocessing.cpu_count() - 1)) toolbox.register("map", pool.map) # 初始化 toolbox.register( "individual", tools.initIterate, creator.Individual, self.parameter_generate) # 注册个体:随机生成的策略参数parameter_generate() toolbox.register("population", tools.initRepeat, list, toolbox.individual) # 注册种群:个体形成种群 toolbox.register("mate", tools.cxTwoPoint) # 注册交叉:两点交叉 toolbox.register("mutate", self.mutArrayGroup, parameterlist=self.parameter_generate, indpb=0.6) # 注册变异:随机生成一定区间内的整数 toolbox.register("evaluate", object_func) # 注册评估:优化目标函数object_func() toolbox.register("select", tools.selNSGA2) # 注册选择:NSGA-II(带精英策略的非支配排序的遗传算法) # 遗传算法参数设置 MU = 8 # 设置每一代选择的个体数 LAMBDA = 5 # 设置每一代产生的子女数 pop = toolbox.population(20) # 设置族群里面的个体数量 CXPB, MUTPB, NGEN = 0.5, 0.3, 10 # 分别为种群内部个体的交叉概率、变异概率、产生种群代数 hof = tools.ParetoFront() # 解的集合:帕累托前沿(非占优最优集) # 解的集合的描述统计信息 # 集合内平均值,标准差,最小值,最大值可以体现集合的收敛程度 # 收敛程度低可以增加算法的迭代次数 stats = tools.Statistics(lambda ind: ind.fitness.values) np.set_printoptions(suppress=True) # 对numpy默认输出的科学计数法转换 stats.register("mean", np.mean, axis=0) # 统计目标优化函数结果的平均值 stats.register("std", np.std, axis=0) # 统计目标优化函数结果的标准差 stats.register("min", np.min, axis=0) # 统计目标优化函数结果的最小值 stats.register("max", np.max, axis=0) # 统计目标优化函数结果的最大值 # 运行算法 algorithms.eaMuPlusLambda( pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof, verbose=True) # esMuPlusLambda是一种基于(μ+λ)选择策略的多目标优化分段遗传算法 return pop
def multi_objetivo_ga(c, m): """ Los parámetros de entrada son la probabilidad de cruce y la probabilidad de mutación""" NGEN = 100 MU = 2000 LAMBDA = 2000 CXPB = c MUTPB = m pop = toolbox.ini_poblacion(n=MU) hof = tools.ParetoFront() algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, halloffame=hof) return pop, hof
def _check_mo_improvements(self, offspring: List[Any]) -> Tuple[bool, bool]: complexity_decreased = False fitness_improved = False offspring_archive = tools.ParetoFront() offspring_archive.update(offspring) is_archive_improved = not is_equal_archive(self.archive, offspring_archive) if is_archive_improved: best_ind_in_prev = min(self.archive.items, key=self.get_main_metric) best_ind_in_current = min(offspring_archive.items, key=self.get_main_metric) fitness_improved = self.get_main_metric(best_ind_in_current) < self.get_main_metric(best_ind_in_prev) for offspring_ind in offspring_archive.items: if self.get_main_metric(offspring_ind) <= self.get_main_metric(best_ind_in_prev) \ and self.get_suppl_metric(offspring_ind) < self.get_suppl_metric(best_ind_in_prev): complexity_decreased = True break return fitness_improved, complexity_decreased
def main(folder_name, instrumented_app_dir, seed=None): random.seed(seed) # generate initial population print "### Initialising population ...." population = toolbox.population(n=settings.POPULATION_SIZE) hof = tools.ParetoFront() stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean, axis=0) stats.register("std", numpy.std, axis=0) stats.register("min", numpy.min, axis=0) stats.register("max", numpy.max, axis=0) population, logbook = evolve(population, toolbox, settings.POPULATION_SIZE, settings.OFFSPRING_SIZE, cxpb=settings.CXPB, mutpb=settings.MUTPB, ngen=settings.GENERATION, stats=stats, halloffame=hof, verbose=True) # draw graphs two_d_line.plot(logbook, 0, instrumented_app_dir) two_d_line.plot(logbook, 1, instrumented_app_dir) two_d_line.plot(logbook, 2, instrumented_app_dir) two_d_line.plotProportionParetoOptimal(logbook, instrumented_app_dir) two_d_line.plotPopulationDiameter(logbook, instrumented_app_dir) two_d_line.plotRelativeDiameter(logbook, instrumented_app_dir) two_d_line.plotPopulationDiversity(logbook, instrumented_app_dir) two_d_line.plotStats(logbook, instrumented_app_dir, 'kconnec', 'kconnec', 'weight') two_d_line.plotHypervolume(logbook, instrumented_app_dir) time.sleep(5) os.system('cd ' + instrumented_app_dir + '/intermediate && mkdir ' + folder_name) os.system('cd ' + instrumented_app_dir + '/intermediate && mv *.pdf ' + folder_name + '/') os.system( 'cd ' + instrumented_app_dir + '/intermediate/ ' + folder_name + ' && pdfunite obj_0.pdf obj_1.pdf obj_2.pdf hv_standard.pdf ' 'proportion_pareto_optimal.pdf population_diameter.pdf rel_diameter.pdf ' 'population_diversity.pdf kconnec.pdf summary.pdf')
def main(): random.seed(64) NGEN = 50 MU = 50 LAMBDA = 100 CXPB = 0.7 MUTPB = 0.2 pop = toolbox.population(n=MU) hof = tools.ParetoFront() stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean, axis=0) stats.register("std", numpy.std, axis=0) stats.register("min", numpy.min, axis=0) stats.register("max", numpy.max, axis=0) algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,halloffame=hof) return pop, stats, hof
def main(): random.seed(64) MU, LAMBDA = 100, 100 pop = toolbox.population(n=MU) hof = tools.ParetoFront() stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean, axis=0) stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA, cxpb=0.5, mutpb=0.2, ngen=10, stats=stats, halloffame=hof) return pop, stats, hof
def main(): CXPB, MUTPB, NGEN = 0.7, 0.3, 200 MU, LAMBDA = 300, 300 pop = toolbox.population(MU) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) logbook = tools.Logbook() pareto = tools.ParetoFront() pop, logbook = algorithms.eaMuPlusLambda(pop , toolbox , mu=MU, lambda_=LAMBDA, cxpb=CXPB, mutpb=MUTPB, ngen=NGEN, stats=stats, halloffame=pareto, verbose=False) return pop, logbook, pareto
def main(): start_time = time.time() random.seed(64) NGEN = 2000 MU = 200 LAMBDA = 100 CXPB = 0.5 MUTPB = 0.5 pop = toolbox.population(n=MU) hall = tools.ParetoFront() algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, halloffame=hall) print "\nExecution stopped after : " + str(time.time() - start_time) #print the hall of fame hall_string = "" inds_string = ";" for i in hall: hall_string += "(d : %f e: %f n: %d)" % (calculate_dist( problem, i)[0], i.fitness.values[0], i.fitness.values[1]) inds_string += str(i) + ";" if ("-s" in sys.argv) or ("--save" in sys.argv): dump_file = open(instance + ".solution", "wb") pareto_list = [] for i in hall: # also check if valid solution pareto_list.append([j for j in i]) pickle.dump(pareto_list, dump_file) for i in pop: if i.fitness.values[0] < 100000.0: print calculate_dist( problem, i)[0], i.fitness.values[0], i.fitness.values[1] print str(i) + ";" return hall_string + inds_string
def main(exp_id, checkpoint_name=None): if "random_seed" in config.global_config: random.seed(config.global_config["random_seed"]) if checkpoint_name: # A file name has been given, then load the data from the file cp = pickle.load(open(checkpoint_name, "rb")) pop = cp["population"] start_gen = cp["generation"] + 1 hof = cp["halloffame"] logbook = cp["logbook"] random.setstate(cp["rndstate"]) if "config" in cp: config.global_config.update(cp["config"]) else: pop_size = config.global_config["ga"]["pop_size"] pop = toolbox.population(n=pop_size) start_gen = 0 hof = tools.ParetoFront() logbook = tools.Logbook() stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean, axis=0) stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) # nsga_number 0 stands for vanilla GA algorithm = alg.nsga if nsga_number != 0 else alg.vanilla_ga pop, log = algorithm(pop, start_gen, toolbox, cxpb=0.6, mutpb=0.2, ngen=config.global_config["ga"]["n_gen"], stats=stats, halloffame=hof, logbook=logbook, verbose=True, exp_id=exp_id) return pop, log, hof
def run_many(): global logbook global pareto_front global log_file global DATA_FILE_NAME global IMAGE1_FILE_NAME global IMAGE2_FILE_NAME # run many times to analysis the result for i in range(20): print('The ' + str(i + 1) + ' run!') # Define Log object logbook = tools.Logbook() pareto_front = tools.ParetoFront() log_file = open(LOG_FILE_NAME + str(i + 1) + LOG_FILE_NAME_END, 'w+') DATA_FILE_NAME = DATA_FILE_NAME_INIT + str(i + 1) IMAGE1_FILE_NAME = IMAGE1_FILE_NAME_INIT + str(i + 1) IMAGE2_FILE_NAME = IMAGE2_FILE_NAME_INIT + str(i + 1) termPrediction()
def main(population_size, max_generations): """ This function is the main genetic algorithm. It generates various elements until the target phrase is reached. Every generation, it prints relevant information to the terminal. Once it terminates, it will have found a Pareto optimal set of items. :param population_size: the size of the population :param max_generations: the number of generations before terminating :return the population, statistics, and the best individuals """ toolbox.register("evaluate", evaluate_fitness) toolbox.register("mate", crossover) toolbox.register("mutate", tools.mutUniformInt, low=1, up=MAX_ATTRIBUTE_SIZE, indpb=0.05) toolbox.register("select", tools.selNSGA2) NGEN = max_generations MU = population_size LAMBDA = 100 CXPB = 0.7 MUTPB = 0.2 pop = toolbox.population(n=MU) hof = tools.ParetoFront() stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean, axis=0) stats.register("std", numpy.std, axis=0) stats.register("min", numpy.min, axis=0) stats.register("max", numpy.max, axis=0) algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof) return pop, stats, hof
def main(): random.seed(64) NGEN = (st.sidebar.slider("How many deck generations to evolve?", min_value=3, max_value=100, value=30) or 30) MU = (st.sidebar.slider( "How many decks should a generation have?", min_value=2, max_value=50, value=15, ) or 15) LAMBDA = 100 CXPB = 0.8 MUTPB = 0.2 pop = toolbox.population(n=MU) hof = tools.ParetoFront() stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean, axis=0) stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) with st.spinner( "Simulating, evolving, mutating, changing, procreating..."): algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof) st.success("Simulation finished!!") # st.balloons() best_deck_list = list(hof[0]) # list of cards in best deck df_best_deck = get_deck_val(best_deck_list) df_best_deck.compute_deck_stats() return pop, stats, hof
def test_filter_duplicates(): archive = tools.ParetoFront() archive_items = [chain_first(), chain_second(), chain_third()] population = [chain_first(), chain_second(), chain_third(), chain_fourth()] archive_items_fitness = ((-0.80001, 0.25), (-0.7, 0.1), (-0.9, 0.7)) population_fitness = ((-0.8, 0.25), (-0.59, 0.25), (-0.9, 0.7), (-0.7, 0.1)) weights = tuple([-1 for _ in range(len(population_fitness[0]))]) for ind_num in range(len(archive_items)): archive_items[ind_num].fitness = MultiObjFitness( values=archive_items_fitness[ind_num], weights=weights) for ind_num in range(len(population)): population[ind_num].fitness = MultiObjFitness( values=population_fitness[ind_num], weights=weights) archive.update(archive_items) filtered_archive = filter_duplicates(archive, population) assert len(filtered_archive) == 1 assert filtered_archive[0].fitness.values[0] == -0.80001 assert filtered_archive[0].fitness.values[1] == 0.25
def search(self): if self.pre_config: pop = self.toolbox.population() else: pop = self.toolbox.population(n=self.pop_size) hof = tools.ParetoFront() # self.random_search(pop, self.toolbox, generations=self.generations, halloffame=hof) algorithms.eaSimple(pop, self.toolbox, cxpb=0.7, mutpb=0.3, ngen=self.generations, verbose=True, halloffame=hof) # if self.time is not None: # self.eaSimpleTimed(pop, self.toolbox, cxpb=0.7, mutpb=0.3, budget=self.time, verbose=True, halloffame=hof) return pop, hof
def optimize(data): """ 优化函数,返回优化后保存的参数结果 :param data: 优化数据集 :return: 优化后保存的参数结果 """ creator.create("FitnessMax", base.Fitness, weights=(1.0, 1.0)) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() # Structure initializers toolbox.register("individual", tools.initIterate, creator.Individual, mul_parameter) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", capital, data) toolbox.register("mate", tools.cxTwoPoint) toolbox.register("mutate", mut_flip_bit, indpb=0.05) toolbox.register("select", tools.selNSGA2) random.seed(64) # pool = mp.ProcessingPool(config.processes) pool = multiprocessing.Pool(processes=config.processes) toolbox.register("map", pool.map) MU = 20 # 每一代选择的个体数 LAMBDA = 100 # 每一代产生的子女数 pop = toolbox.population(config.population_num) CXPB, MUTPB, NGEN = 0.5, 0.2, config.ngen_num # 分别为交叉概率、变异概率、产生种群代数 hof = tools.ParetoFront() # 非占优最优集 stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean, axis=0) stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof) pool.close() return pop
def my_nsga2(n, nbgen, evaluate, ref_point=np.array([1, 1]), IND_SIZE=5, weights=(-1.0, -1.0)): """NSGA-2 NSGA-2 :param n: taille de la population :param nbgen: nombre de generation :param evaluate: la fonction d'évaluation :param ref_point: le point de référence pour le calcul de l'hypervolume :param IND_SIZE: la taille d'un individu :param weights: les poids à utiliser pour la fitness (ici ce sera (-1.0,) pour une fonction à minimiser et (1.0,) pour une fonction à maximiser) """ creator.create("MaFitness", base.Fitness, weights=weights) creator.create("Individual", list, fitness=creator.MaFitness) toolbox = base.Toolbox() paretofront = tools.ParetoFront() # à compléter # Pour récupérer l'hypervolume, nous nous contenterons de mettre les différentes aleur dans un vecteur s_hv qui sera renvoyé par la fonction. pointset = [np.array(ind.fitness.getValues()) for ind in paretofront] s_hv = [hv.hypervolume(pointset, ref_point)] # Begin the generational process for gen in range(1, nbgen): if (gen % 10 == 0): print("+", end="", flush=True) else: print(".", end="", flush=True) # à completer pointset = [np.array(ind.fitness.getValues()) for ind in paretofront] s_hv.append(hv.hypervolume(pointset, ref_point)) return population, paretofront, s_hv
def build_toolbox(self) -> None: """ Function to build our toolbox that mainly takes into account the parsed ga parameters from our upload Args: self - holds the functions make_individual, bounds, Returns: None - the base individual is written on self.default_individual """ # Now toolbox with genetic algorithm functions is created toolbox = base.Toolbox() toolbox.register("candidate", self.make_candidate) toolbox.register("individual", tools.initIterate, deepcopy(self.default_individual), toolbox.candidate) toolbox.register("population", tools.initRepeat, list, toolbox.individual) # Function for mating is set to be simulated binary bounded (means parameters can't leave the given range of # bounds) toolbox.register("mate", self.mate_method, eta=self.eta, low=self.low, up=self.up) # Function for mutation is set to be polynomial bounded and again parameters can#t leave their bounds toolbox.register("mutate", self.mutate_method, eta=self.eta, low=self.low, up=self.up, indpb=self.indpb) # Function for selection is set to be selNSGA2 which performs a multi-objective selection toolbox.register("select", self.select_method) self.toolbox = toolbox if len(self.weights) > 1: self.hall_of_fame = tools.ParetoFront() else: self.hall_of_fame = tools.HallOfFame(maxsize=10)
def genetic_algorithm(dataset: datasets.Dataset, population_size: int, crossover_rate: float, mutation_rate: float, no_generations: int, verbose=False): toolbox = base.Toolbox() toolbox.register("individual", tools.initRepeat, creator.Individual, initIndividual, n=dataset.no_squares) toolbox.register("population", tools.initRepeat, list, toolbox.individual) toolbox.register("evaluate", evaluate, dataset.square_sizes) toolbox.register("mate", crossover) toolbox.register("mutate", mutation, dataset) toolbox.register("select", tools.selNSGA2) toolbox.register("selectBest", tools.selBest) stats = None if verbose: stats = tools.Statistics(key=lambda ind: ind.fitness.values) stats.register("avg", np.mean, axis=0) stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) pop = toolbox.population(n=population_size) hof = tools.ParetoFront() algorithms.eaSimple(pop, toolbox, crossover_rate, mutation_rate, ngen=no_generations, stats=stats, halloffame=hof, verbose=verbose) return hof
def run_mu_lambda(self): MU, LAMBDA = 2, 10 pop = self.toolbox.population(n=MU) hof = tools.ParetoFront() stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", numpy.mean, axis=0) stats.register("std", numpy.std, axis=0) stats.register("min", numpy.min, axis=0) stats.register("max", numpy.max, axis=0) algorithms.eaMuPlusLambda(pop, self.toolbox, mu=MU, lambda_=LAMBDA, cxpb=0.2, mutpb=0.6, ngen=1000, stats=stats, halloffame=hof) print("Min:" + str(stats.min))
def hibachi(pop,gen,rseed): """ set up stats and population size, then start the process """ MU, LAMBDA = pop, pop NGEN = gen np.random.seed(rseed) random.seed(rseed) pop = toolbox.population(n=MU) hof = tools.ParetoFront(similar=pareto_eq) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean, axis=0) stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) pop, log = algorithms.eaMuPlusLambda(pop,toolbox,mu=MU,lambda_=LAMBDA, cxpb=0.7, mutpb=0.3, ngen=NGEN, stats=stats, verbose=True, halloffame=hof) return pop, stats, hof, log
def getHof(): # Initialize variables to use eaSimple numPop = 730 numGen = 100 pop = toolbox.population(n=numPop) hof = tools.ParetoFront() stats_loss = tools.Statistics(key=lambda ind: ind.fitness.values[0]) stats_size = tools.Statistics(key=lambda ind: ind.fitness.values[1]) mstats = tools.MultiStatistics(fitness=stats_loss, size=stats_size) mstats.register("avgLoss", np.mean) mstats.register("stdLoss", np.std) mstats.register("minLoss", np.min) mstats.register("maxLoss", np.max) # Launch genetic algorithm pop, log = algorithms.eaMuPlusLambda(pop, toolbox, mu= 730, lambda_=730,cxpb=0.4, mutpb=0.6, ngen=numGen, stats=mstats, halloffame=hof, verbose=True) # Return the hall of fame return hof
def main(): if config.testing: NGEN = 40 else: NGEN = config.number_of_generations MU = 100 LAMBDA = 200 CXPB = 0.3 MUTPB = 0.6 pop = toolbox.population(n=MU) hof = tools.ParetoFront() size_stats = tools.Statistics(key=lambda ind: ind.fitness.values[0]) page_link_stats = tools.Statistics(key=lambda ind: ind.fitness.values[1]) # lang_link_stats = tools.Statistics(key=lambda ind: ind.fitness.values[2]) # page_view_stats = tools.Statistics(key=lambda ind: ind.fitness.values[3]) quality_stats = tools.Statistics(key=lambda ind: ind.fitness.values[4]) importance_stats = tools.Statistics(key=lambda ind: ind.fitness.values[5]) stats = tools.MultiStatistics( size=size_stats, page_links=page_link_stats, # lang_links=lang_link_stats, # page_views=page_view_stats, quality=quality_stats, importance=importance_stats) stats.register("avg", numpy.mean, axis=0) stats.register("std", numpy.std, axis=0) stats.register("min", numpy.min, axis=0) algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof) return pop, stats, hof
def experimento(): toolbox = base.Toolbox() pset = gp.PrimitiveSet("MAIN", 1) pset.addPrimitive(operator.add, 2) pset.addPrimitive(operator.sub, 2) pset.addPrimitive(operator.mul, 2) pset.addPrimitive(protectedDiv, 2) pset.addPrimitive(operator.neg, 1) pset.addPrimitive(math.cos, 1) pset.addPrimitive(math.sin, 1) pset.addEphemeralConstant("rand101", lambda: random.randint(-1, 1)) pset.renameArguments(ARG0='x') cs.configuraPoblacion(toolbox, pset) ce.configuracionAlgoritmo(toolbox, pset) alg_param = {} alg_param['cxpb'] = 0.75 alg_param['mutpb'] = 0.2 alg_param['pop_size'] = 25 alg_param['ngen'] = 100 init_pop = toolbox.population(n=len(dc.anos)) # Ejecución de la evolucion population, logbook = condicionParada( init_pop, toolbox, cxpb=alg_param['cxpb'], mutpb=alg_param['mutpb'], ngen=alg_param['ngen'], verbose=True, stats=ee.configuraEstadisticasEvolucion()) pareto = tools.ParetoFront() pareto.update(population) for i in pareto: print(ee.calcularPuntos(toolbox, i)) return population, logbook, toolbox, pareto
def test_pareto_front(): archive_chain_first = chain_third() archive_chain_second = chain_first() archive_chain_third = chain_third() population = [ archive_chain_first, archive_chain_second, archive_chain_third ] eval_fitness = [(-0.9821, 0.8), (-0.8215, 0.6), (-0.9821, 0.8)] for chain_num, chain in enumerate(population): fitness = MultiObjFitness( values=eval_fitness[chain_num], weights=tuple([-1 for _ in range(len(eval_fitness[chain_num]))])) chain.fitness = fitness front = tools.ParetoFront() front.__class__ = FedotParetoFront front.update(population) assert len(front) == 2
def main(): NGEN = 40 MU = 100 LAMBDA = 200 CXPB = 0.3 MUTPB = 0.6 pop = toolbox.population(n=MU) hof = tools.ParetoFront() price_stats = tools.Statistics(key=lambda ind: ind.fitness.values[0]) time_stats = tools.Statistics(key=lambda ind: ind.fitness.values[1]) stats = tools.MultiStatistics(price=price_stats, time=time_stats) stats.register("avg", numpy.mean, axis=0) stats.register("std", numpy.std, axis=0) stats.register("min", numpy.min, axis=0) algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof) return pop, stats, hof
def _explain(self, cp, lambda_, mu, mut, ngen, stats, verbose): self._ops.mut_prob = mut if stats: stats = tools.Statistics() stats.register("pop", copy.deepcopy) else: stats = None hof = tools.ParetoFront( lambda ind1, ind2: ind1.fitness.values == ind1.fitness.values) pop = self.toolbox.population(n=ngen) res, logbook = algorithms.eaMuPlusLambda(pop, self.toolbox, mu=mu, lambda_=lambda_, cxpb=cp, mutpb=mut, ngen=ngen, stats=stats, halloffame=hof, verbose=verbose) return hof, logbook
def multi_objetivo_ga(): NGEN = 100 MU = 100 LAMBDA = 100 CXPB = 0.6 MUTPB = 0.4 pop = toolbox.ini_poblacion(n=MU) hof = tools.ParetoFront() random.seed(64) # semilla del generador de números aleatorios algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, halloffame=hof) return pop, hof
def main(objectives=5): creator.create("Fitness", base.Fitness, weights = (-1,-1,-1,-1,-1)) creator.create("Individual", list, fitness=creator.Fitness) toolbox = base.Toolbox() toolbox.register("attr_float", random.random) toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_float, IND_SIZE) toolbox.register("population", tools.initRepeat, list, toolbox.individual) CXPB, MUTPB, NGEN = 0.7, 0.2, 50 toolbox.register("mate", crxover) toolbox.register("mutate", mutate) toolbox.register("evaluate", evalDZLT5) toolbox.register("select", tools.selNSGA2) pop = toolbox.population(n = n_subproblem) hof = tools.ParetoFront() flag = 2 d_metric = [] ea_w2 = moead_dzlt5(pop, toolbox, n_subproblem, CXPB, MUTPB, ngen=NGEN, halloffame=hof) paretoPoints, weight = ea_w2.execute() pf = np.array(toolbox.map(toolbox.evaluate, paretoPoints)) plotResult(pf) weight = np.array(weight) return pop, hof
def init_evolution(self, mu, lambda_, cxpb, mutpb, ngen): """初始化遗传算法,设置""" # 一套参数是一个个体,产生个体的编码 self.toolbox.register("individual", tools.initIterate, creator.Individual, self.ma_gen_parameter) # 创建种群 self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual) # 计算适应度的函数 self.toolbox.register("evaluate", indicator_fitness, self.signal, self.signal_bin, self._apply_price, ma_cross) # 交叉、变异和选择 self.toolbox.register("mate", tools.cxTwoPoint) self.toolbox.register("mutate", tools.mutGaussian, mu=0.0, sigma=0.5, indpb=0.5) self.toolbox.register("select", tools.selNSGA2) pop = self.toolbox.population(n=10) self.toolbox.register("map", self.pool.map) hof = tools.ParetoFront() # 非占优最优集 self.stats.register("avg", np.mean, axis=0) self.stats.register("std", np.std, axis=0) self.stats.register("min", np.min, axis=0) self.stats.register("max", np.max, axis=0) pop, logbook = algorithms.eaMuPlusLambda(pop, self.toolbox, mu, lambda_, cxpb, mutpb, ngen, stats=self.stats, halloffame=hof, verbose=True) self.pool.close() return pop
def print_pop_fitness(app_folder_path): logbook = postprocess.logbook_analysis.get_logbook(app_folder_path) print logbook print "" hof = tools.ParetoFront() iteration = 0 pop = [] for gen_pop in logbook.select("pop_fitness"): print("Iteration " + str(iteration)) print("Population: " + str(gen_pop)) for pop_ind_fitness in gen_pop: ind = creator.Individual([]) ind.fitness.values = pop_ind_fitness pop.append(ind) iteration = iteration + 1 hof.update(pop) print "HoF" for hof_member in hof: print(hof_member.fitness)