def __init__(self, objfunc, var_bounds, individual_size, max_iter, max_or_min, **kwargs): super().__init__(objfunc) self.max_iter = max_iter # 定义个体 / 种群 self.individual = BinaryIndividual(ranges=var_bounds, eps=0.001) self.population = Population(indv_template=self.individual, size=individual_size).init() # Create genetic operators. # selection = RouletteWheelSelection() selection = TournamentSelection() crossover = UniformCrossover(pc=0.8, pe=0.5) mutation = FlipBitBigMutation(pm=0.1, pbm=0.55, alpha=0.6) self.engine = GAEngine(population=self.population, selection=selection, crossover=crossover, mutation=mutation, analysis=[FitnessStore]) @self.engine.fitness_register def fitness(indv): """ 适应度函数: 注意这里默认为优化得到最小值 :param indv: :return: """ x = indv.solution if max_or_min == 'max': return objfunc(x, **kwargs) else: return -objfunc(x, **kwargs) @self.engine.analysis_register class ConsoleOutputAnalysis(OnTheFlyAnalysis): interval = 1 master_only = True def register_step(self, g, population, engine): best_indv = population.best_indv(engine.fitness) msg = 'Generation: {}, best fitness: {:.3f}'.format( g, engine.fitness(best_indv)) # self.logger.info(msg) def finalize(self, population, engine): best_indv = population.best_indv(engine.fitness) x = best_indv.solution y = engine.fitness(best_indv) msg = 'Optimal solution: ({}, {})'.format(x, y)
def tune_weights(self): old_fitness = self.individual.fitness weights_scaling = self.individual.get_subtree_scaling() weights_translation = self.individual.get_subtree_translation() # Create array with range for each scaling and translation parameter range = [ self.scale_range, ] * len(weights_scaling) + [ self.translation_range, ] * len(weights_translation) indv_template = DecimalIndividual(ranges=range, eps=0.1) population = Population(indv_template=indv_template, size=self.pop_size) population.init() engine = GAEngine( population=population, selection=TournamentSelection(), crossover=GaussianCrossover(pc=1.0), mutation=NoMutation(), fitness=self.fitness_function_GAFT, analysis=[ new_early_stopping_analysis(scale_range=self.scale_range) ]) engine.logger = NoLoggingLogger() # Run the GA with the specified number of iterations try: engine.run(ng=self.max_iterations) except ValueError: pass # Get the best individual. best_indv = engine.population.best_indv(engine.fitness) # Log the tuning process print( f"Tuner {np.round(old_fitness, 3)} {np.round(-engine.ori_fmax, 3)}" ) # Only use the new individual if it was really improved if old_fitness > -engine.ori_fmax: weights_scaling, weights_translation = self.split_list( best_indv.solution) self.individual.set_subtree_scaling(weights_scaling) self.individual.set_subtree_translation(weights_translation) self.individual.fitness = -engine.ori_fmax return deepcopy(self.individual)
def __init__(self ,k ,total_implied_variance ,slice_before ,slice_after ,tau): self.k =k self.total_implied_variance =total_implied_variance self.slice_before =slice_before self.slice_after = slice_after self.tau = tau # Define population. indv_template = BinaryIndividual(ranges=[(1e-5, 20),(1e-5, 20),(1e-5, 20)], eps=0.001) self.population = Population(indv_template=indv_template, size=30).init() # Create genetic operators. selection = TournamentSelection() crossover = UniformCrossover(pc=0.8, pe=0.5) mutation = FlipBitMutation(pm=0.1) # Create genetic algorithm engine. self.engine = GAEngine(population=self.population, selection=selection, crossover=crossover, mutation=mutation, analysis=[FitnessStore]) # Define fitness function. @self.engine.fitness_register @self.engine.minimize def fitness(indv): a, b, m, rho, sigma = indv.solution model_total_implied_variance=svi_raw(self.k,np.array([a, b, m, rho, sigma]),self.tau) value = norm(self.total_implied_variance - model_total_implied_variance,ord=2) # if bool(len(self.slice_before)) and np.array(model_total_implied_variance < self.slice_before).any(): # value +=(np.count_nonzero(~np.array(model_total_implied_variance < self.slice_before))*100) # # value = 1e6 # # if bool(len(self.slice_after)) and np.array(model_total_implied_variance > self.slice_after).any(): # value += float(np.count_nonzero(~np.array(model_total_implied_variance > self.slice_after)) * 100) # # value = 1e6 # if np.isnan(value): # value = 1e6 value = float(value) return value
from gaft.analysis.fitness_store import FitnessStore # Define population. # 先对你所指定的初始种群进行编码 indv_template = BinaryIndividual(ranges=[(0, 10)], eps=0.001) ''' :param ranges: value ranges for all entries in solution. :type ranges: list of range tuples. e.g. [(0, 1), (-1, 1)] :param eps: decrete precisions for binary encoding, default is 0.001. :type eps: float or float list with the same length with ranges. ''' population = Population(indv_template=indv_template, size=30).init() # Create genetic operators. selection = TournamentSelection() crossover = UniformCrossover(pc=0.8, pe=0.5) ''' Crossover operator with uniform crossover algorithm, see https://en.wikipedia.org/wiki/Crossover_(genetic_algorithm) :param pc: The probability of crossover (usaully between 0.25 ~ 1.0) :type pc: float in (0.0, 1.0] :param pe: Gene exchange probability. ''' mutation = FlipBitMutation(pm=0.1) # pm is the possibility of the mutation # Create genetic algorithm engine. engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation,
fig = plt.figure() ax = fig.add_subplot(111) ax.plot(geracoes, n_nines) plt.show() if __name__ == "__main__": individuo = BinaryIndividual(ranges=[(-100, 100), (-100, 100)], eps=0.000001) populacao = Population(indv_template=individuo, size=100) populacao.init() selecao = TournamentSelection() crossover = UniformCrossover(pc=0.65, pe=0.65) mutacao = FlipBitMutation(pm=0.008) engine = GAEngine(population=populacao, selection=selecao, crossover=crossover, mutation=mutacao, analysis=[FitnessStore, ConsoleOutput]) @engine.fitness_register def aptidao(ind): x, y = ind.solution return 0.5 - ((sin(sqrt(x**2 + y**2))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2)
def generate(): import random good_seed = int(sys.argv[1]) print('currrrrrrrrrrrrrrrrrrrrrrrrrrrrr', good_seed) envSeqDec = ChallengeProveEnvironment() # Initialise a New Challenge Environment to post entire policy #env = ChallengeEnvironment(experimentCount = 20000) eps = 0.1 # equal to actions space resolution # range/eps pop_size = 4 cross_prob = 0.6 exchange_prob = 0.7 mutation_pob = 0.8 generation = 4 REWARDS = [] NEW = [] POLICY = [] tmp_reward = [] tmp_policy = [] policy_450 = [] reward_generation = [] time = [] random.seed(good_seed) # best_action = ([0], [0.8, 1]) turb = 0 test_action = ([[i/10 for i in range(0, 11)],[i/10 for i in range(0,11)]]) # test_action = ([0, 0.1, 0.2, 0.3, 0.4, 0.5], [0.6, 0.7, 0.8, 0.9, 1]) # 2. Define population indv_template = OrderIndividual(ranges=[(0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1)], eps=eps, actions = test_action) # low_bit and high_bit population = Population(indv_template=indv_template, size = pop_size) population.init() # Initialize population with individuals. # 3. Create genetic operators # Use built-in operators here. #selection = RouletteWheelSelection() selection = TournamentSelection() crossover = UniformCrossover(pc=cross_prob, pe=exchange_prob) # PE = Gene exchange probability mutation = FlipBitMutation(pm=mutation_pob) # 0.1 todo The probability of mutation # 4. Create genetic algorithm engine to run optimization engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation,) # analysis=[FitnessStore]) # 5. Define and register fitness function @engine.fitness_register #@engine.dynamic_linear_scaling(target='max', ksi0=2, r=0.9) def fitness(indv): p = [0 for _ in range(10)] p = indv.solution # encode policy = {'1': [p[0], p[1]], '2': [p[2], p[3]], '3': [p[4], p[5]], '4': [p[6], p[7]], '5': [p[8], p[9]]} reward = envSeqDec.evaluatePolicy(policy) # Action in Year 1 only #print('Sequential Result : ', reward) tmp_reward.append(reward) reward_generation.append(reward) tmp_policy.append(policy) #print('Policy : ', policy) #print(policy_450,'**************************good solution***************') #print(policy_bad,'**************************bad solution***************') return reward + uniform(-turb, turb) @engine.analysis_register class ConsoleOutput(OnTheFlyAnalysis): master_only = True interval = 1 def register_step(self, g, population, engine): best_indv = population.best_indv(engine.fitness) msg = 'Generation: {}, best fitness: {:.3f}'.format(g + 1, engine.fmax) #best_reward = max(tmp_reward[g + pop_size * (generation - 1): g + pop_size * generation]) #print(pop_size * (g - 0), pop_size * (g + 1),'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&') REWARDS.append(max(reward_generation[pop_size * (g - 0): pop_size * (g + 1)])) #best_policy = POLICY[tmp_reward.index(best_reward)] #POLICY.append(best_policy) engine.logger.info(msg) engine.run(ng = generation) # print(policy_450) x = list(range(len(REWARDS))) plt.plot(x, REWARDS) plt.title(f'Sequential Rewards {good_seed}') plt.savefig(f'./GA_trick/GA_seed_{good_seed}.jpg') # #plt.savefig(f'./res_geneticAlgorithm/Sequential_Rewards_eps:{eps}_popsize:{pop_size}_generation:{generation}_mutation_pob:{mutation_pob}_exchange_prob:{exchange_prob}_cross_prob:{cross_prob}.jpg') plt.show()
obsSOM, true_labels = utils.loadSOM( save_dir=config.OUTPUT_TRAINED_MODELS_PATH, file_name=config.ZSEL_SOM_3D_MODEL_NAME) setupEnv() # print(MODELS_VALUES.shape) # print(MODELS_DICT) indv_template = DecimalIndividual(ranges=[ (0, 1) for _ in range(config.NB_MODELS) ], eps=0.001) population = Population(indv_template=indv_template, size=POPULATION_SIZE) population.init() selection = TournamentSelection( tournament_size=10) # RouletteWheelSelection() crossover = UniformCrossover(pc=CROSSOVER_PROBABILITY, pe=GE_PROBABILITY) mutation = FlipBitMutation(pm=MUTATION_PROBABILITY) engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation, analysis=[FitnessStore]) @engine.fitness_register def fitness(indv): global MODELS_VALUES global MODELS_DICT global CASE global true_labels