def ga(df, start, end, _positionList, ranges=[(20,100),(0.01, 1),(0.01, 1),(0.01, 1),(1, 5)], eps=0.01): indv_template = BinaryIndividual(ranges=ranges, eps=eps) population = Population(indv_template=indv_template, size=100) population.init() # Initialize population with individuals. # Use built-in operators here. selection = RouletteWheelSelection() crossover = UniformCrossover(pc=0.8, pe=0.5) mutation = FlipBitMutation(pm=0.3) engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation, analysis=[FitnessStore]) @engine.fitness_register def fitness(indv): n, upper, lower, adds, cutoff = indv.solution df['KAMA'] = talib.KAMA(df.close, int(n)) df['VAR'] = talib.VAR(df.close-df.KAMA.shift(1) - df.close.shift(1)+df.KAMA.shift(2),10) profitsList, buypriceList, sellpriceList, fits,positionList = profitsCal(df, start, end, _positionList, upper=upper, lower=lower, adds = adds, cutoff=cutoff) return float(fits) @engine.analysis_register class ConsoleOutput(OnTheFlyAnalysis): master_only = True interval = 1 def register_step(self, g, population, engine): best_indv = population.best_indv(engine.fitness) msg = 'Generation: {}, best fitness: {:.3f}'.format(g, engine.fmax) print(best_indv.solution) engine.logger.info(msg) engine.run(ng=30) return population.best_indv(engine.fitness).solution, _positionList
def test_new_population(self): ''' Make sure population can clone a new population. ''' population = Population(indv_template=self.indv_template, size=10) population.init() new_population = population.new() self.assertEqual(new_population.size, 10) self.assertListEqual(new_population.individuals, [])
def tain_svm(): indv_template = BinaryIndividual(ranges=[(-8, 8), (-8, 8), (-8, 8)], eps=[0.001, 0.001, 0.001]) population = Population(indv_template=indv_template, size=1000) population.init() # Initialize population with individuals. # In[ ]: selection = RouletteWheelSelection() crossover = UniformCrossover(pc=0.8, pe=0.5) # mutation = FlipBitMutation(pm=0.1) mutation = FlipBitBigMutation(pm=0.1, pbm=0.55, alpha=0.6) engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation, analysis=[ConsoleOutput, FitnessStore]) ############################################################# indv = engine.population.best_indv(engine.fitness).variants c, e, g = indv.variants[1], indv.variants[2], indv.variants[-1] clf = svm.svR(C=c, epsilon=e, gamma=g, kernel='rbf') data_x, data_y = preprocess_pca() clf.fit(data_x, data_y) predictval = clf.predict(data_x) reaval = data_y print(predictval) # In[ ]: engine.run(ng=100)
def test_all_fits(self): population = Population(indv_template=self.indv_template, size=10) population.init() all_fits = population.all_fits(fitness=self.fitness) self.assertEqual(len(all_fits), 10) for fit in all_fits: self.assertTrue(type(fit) is float)
def generate(self): best_policy = None best_reward = -float('Inf') candidates = [] eps = 1 # equal to actions space resolution, eps is step size pop_size = 4 cross_prob = 1 exchange_prob = 1 mutation_pob = 1 generation = 4 tmp_reward = [] tmp_policy = [] random.seed(54) turb = 5 try: # Agents should make use of 20 episodes in each training run, if making sequential decisions # Define population indv_template = DecimalIndividual(ranges=[(0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1),(0, 1), (0, 1)], eps=eps) population = Population(indv_template=indv_template, size = pop_size) population.init() # Initialize population with individuals. # Create genetic operators # Use built-in operators here. selection = RouletteWheelSelection() crossover = UniformCrossover(pc=cross_prob, pe=exchange_prob) # PE = Gene exchange probability mutation = FlipBitMutation(pm=mutation_pob) # 0.1 todo The probability of mutation # Create genetic algorithm engine to run optimization engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation,) # Define and register fitness function @engine.fitness_register def fitness(indv): p = [0 for _ in range(10)] p = indv.solution policy = {'1': [p[0], p[1]], '2': [p[2], p[3]], '3': [p[4], p[5]], '4': [p[6], p[7]], '5': [p[8], p[9]]}xw reward = self.environment.evaluatePolicy(policy) # Action in Year 1 only print('Sequential Result : ', reward) tmp_reward.append(reward) tmp_policy.append(policy) tmp_single = [] return reward + uniform(-turb, turb) # run engine.run(ng = generation) best_reward = max(tmp_reward) best_policy = tmp_policy[-pop_size] except (KeyboardInterrupt, SystemExit): print(exc_info()) return best_policy, best_reward
def test_selection(self): indv = BinaryIndividual(ranges=[(0, 30)]) p = Population(indv) p.init() selection = TournamentSelection() father, mother = selection.select(p, fitness=self.fitness) self.assertTrue(isinstance(father, BinaryIndividual)) self.assertTrue(isinstance(mother, BinaryIndividual))
def test_selection(self): indv = BinaryIndividual(ranges=[(0, 30)]) p = Population(indv) p.init() selection = RouletteWheelSelection() father, mother = selection.select(p, fitness=self.fitness) self.assertTrue(isinstance(father, BinaryIndividual)) self.assertTrue(isinstance(mother, BinaryIndividual)) self.assertNotEqual(father.chromsome, mother.chromsome)
def test_initialization(self): ''' Make sure a population can be initialized correctly. ''' population = Population(indv_template=self.indv_template, size=10) self.assertListEqual(population.individuals, []) population.init() self.assertEqual(len(population.individuals), 10) # Check individual. self.assertTrue(isinstance(population[0], BinaryIndividual))
def tune_weights(self): old_fitness = self.individual.fitness weights_scaling = self.individual.get_subtree_scaling() weights_translation = self.individual.get_subtree_translation() # Create array with range for each scaling and translation parameter range = [ self.scale_range, ] * len(weights_scaling) + [ self.translation_range, ] * len(weights_translation) indv_template = DecimalIndividual(ranges=range, eps=0.1) population = Population(indv_template=indv_template, size=self.pop_size) population.init() engine = GAEngine( population=population, selection=TournamentSelection(), crossover=GaussianCrossover(pc=1.0), mutation=NoMutation(), fitness=self.fitness_function_GAFT, analysis=[ new_early_stopping_analysis(scale_range=self.scale_range) ]) engine.logger = NoLoggingLogger() # Run the GA with the specified number of iterations try: engine.run(ng=self.max_iterations) except ValueError: pass # Get the best individual. best_indv = engine.population.best_indv(engine.fitness) # Log the tuning process print( f"Tuner {np.round(old_fitness, 3)} {np.round(-engine.ori_fmax, 3)}" ) # Only use the new individual if it was really improved if old_fitness > -engine.ori_fmax: weights_scaling, weights_translation = self.split_list( best_indv.solution) self.individual.set_subtree_scaling(weights_scaling) self.individual.set_subtree_translation(weights_translation) self.individual.fitness = -engine.ori_fmax return deepcopy(self.individual)
from math import sin from gaft import GAEngine from gaft.components import BinaryIndividual, Population from gaft.operators import RouletteWheelSelection, UniformCrossover, FlipBitMutation from gaft.analysis import ConsoleOutput # Analysis plugin base class. from gaft.plugin_interfaces.analysis import OnTheFlyAnalysis indv_template = BinaryIndividual(ranges=[(0, 15)], eps=0.001) population = Population(indv_template=indv_template, size=50) population.init() # Initialize population with individuals. # Use built-in operators here. selection = RouletteWheelSelection() crossover = UniformCrossover(pc=0.8, pe=0.5) mutation = FlipBitMutation(pm=0.1) engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation, analysis=[ConsoleOutput]) @engine.fitness_register def fitness(indv): x, = indv.solution return (-3) * (x - 30)**2 * sin(x)
def generate(self): import random eps = 0.2 # equal to actions space resolution # range/eps pop_size = 2 cross_prob = 0.6 exchange_prob = 0.7 mutation_pob = 0.8 generation = 6 REWARDS = [] tmp_reward = [] tmp_policy = [] bad_p = [] good_p = [] turb = 0 try: # Agents should make use of 20 episodes in each training run, if making sequential decisions first_action = [] for a1 in [i / 10 for i in range(0, 11, 2)]: for a2 in [i / 10 for i in range(0, 11, 2)]: first_action.append([a1, a2]) # [0,0] is absolutely bad action first_action = first_action[1:] action_reward = [] for i in range(len(first_action)): ar = self.environment.evaluateAction(first_action[i]) self.environment.reset() action_reward.append(ar[1]) # get the best policy for first year best_action = first_action[action_reward.index(max(action_reward))] test_action = ([[i / 10 for i in range(0, 11, 2)], [i / 10 for i in range(0, 11, 2)]]) # 2. Define population indv_template = OrderIndividual( ranges=[(0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1)], eps=eps, actions=test_action, best_1=best_action) # low_bit and high_bit population = Population(indv_template=indv_template, size=pop_size) population.init() # Initialize population with individuals. # 3. Create genetic operators # Use built-in operators here. selection = LinearRankingSelection() crossover = UniformCrossover( pc=cross_prob, pe=exchange_prob) # PE = Gene exchange probability mutation = FlipBitMutation( pm=mutation_pob) # 0.1 todo The probability of mutation # 4. Create genetic algorithm engine to run optimization engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation) # 5. Define and register fitness function @engine.fitness_register def fitness(indv): p = [0 for _ in range(10)] p = indv.solution # encode policy = { '1': [p[0], p[1]], '2': [p[2], p[3]], '3': [p[4], p[5]], '4': [p[6], p[7]], '5': [p[8], p[9]] } reward = self.environment.evaluatePolicy(policy) tmp_reward.append(reward) tmp_policy.append(policy) return reward + uniform(-turb, turb) @engine.analysis_register class ConsoleOutput(OnTheFlyAnalysis): master_only = True interval = 1 def register_step(self, g, population, engine): best_indv = population.best_indv(engine.fitness) msg = 'Generation: {}, best fitness: {:.3f}'.format( g + 1, engine.fmax) REWARDS.append( max(tmp_reward[pop_size * (g - 0):pop_size * (g + 1)])) engine.logger.info(msg) engine.run(ng=generation) best_reward = max(tmp_reward) best_policy = tmp_policy[-pop_size] except (KeyboardInterrupt, SystemExit): print(exc_info()) return best_policy, best_reward
from gaft.components import Population # 人口 from gaft.operators import FlipBitMutation # 翻转突变 from gaft.operators import UniformCrossover # 均匀交叉 from gaft.components import BinaryIndividual # 二元个体 from gaft.operators import RouletteWheelSelection # 轮盘选择 from gaft.analysis.console_output import ConsoleOutput # 输出 # 定义编码 individual_template = BinaryIndividual(ranges=[(0, 10)], eps=0.001) # 定义种群 _population = Population(indv_template=individual_template, size=20) # 种群初始化 _population.init() # 遗传操作 selection = RouletteWheelSelection() # 个体选择:轮盘赌 crossover = UniformCrossover(pc=0.8, pe=0.5) # 交叉算子:均匀交叉 mutation = FlipBitMutation(pm=0.1) # 变异算子:翻转突变 # 遗传算法引擎 _engine = GAEngine(population=_population, selection=selection, crossover=crossover, mutation=mutation, analysis=[ConsoleOutput]) # 适应度:目标 @_engine.fitness_register # @_engine.minimize def fitness(individual):
elif CASE.upper() == 'SEL': obsSOM, true_labels = utils.loadSOM( save_dir=config.OUTPUT_TRAINED_MODELS_PATH, file_name=config.ZSEL_SOM_3D_MODEL_NAME) setupEnv() # print(MODELS_VALUES.shape) # print(MODELS_DICT) indv_template = ProbabilisticIndividual(ranges=[ (0, 1) for _ in range(config.NB_MODELS) ], eps=0.001) population = Population(indv_template=indv_template, size=POPULATION_SIZE) population.init() selection = RouletteWheelSelection() crossover = UniformCrossover(pc=CROSSOVER_PROBABILITY, pe=GE_PROBABILITY) mutation = FlipBitMutation(pm=MUTATION_PROBABILITY) engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation, analysis=[FitnessStore]) @engine.fitness_register def fitness(indv): global MODELS_VALUES global MODELS_DICT global CASE
noves = re.search(r'(^9*)', precisao) n_nines.append(len(noves[0])) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(geracoes, n_nines) plt.show() if __name__ == "__main__": individuo = BinaryIndividual(ranges=[(-100, 100), (-100, 100)], eps=0.000001) populacao = Population(indv_template=individuo, size=100) populacao.init() selecao = TournamentSelection() crossover = UniformCrossover(pc=0.65, pe=0.65) mutacao = FlipBitMutation(pm=0.008) engine = GAEngine(population=populacao, selection=selecao, crossover=crossover, mutation=mutacao, analysis=[FitnessStore, ConsoleOutput]) @engine.fitness_register def aptidao(ind): x, y = ind.solution return 0.5 - ((sin(sqrt(x**2 + y**2))**2 - 0.5) / (1 + 0.001 *
def generate(): import random good_seed = int(sys.argv[1]) print('currrrrrrrrrrrrrrrrrrrrrrrrrrrrr', good_seed) envSeqDec = ChallengeProveEnvironment() # Initialise a New Challenge Environment to post entire policy #env = ChallengeEnvironment(experimentCount = 20000) eps = 0.1 # equal to actions space resolution # range/eps pop_size = 4 cross_prob = 0.6 exchange_prob = 0.7 mutation_pob = 0.8 generation = 4 REWARDS = [] NEW = [] POLICY = [] tmp_reward = [] tmp_policy = [] policy_450 = [] reward_generation = [] time = [] random.seed(good_seed) # best_action = ([0], [0.8, 1]) turb = 0 test_action = ([[i/10 for i in range(0, 11)],[i/10 for i in range(0,11)]]) # test_action = ([0, 0.1, 0.2, 0.3, 0.4, 0.5], [0.6, 0.7, 0.8, 0.9, 1]) # 2. Define population indv_template = OrderIndividual(ranges=[(0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1), (0, 1)], eps=eps, actions = test_action) # low_bit and high_bit population = Population(indv_template=indv_template, size = pop_size) population.init() # Initialize population with individuals. # 3. Create genetic operators # Use built-in operators here. #selection = RouletteWheelSelection() selection = TournamentSelection() crossover = UniformCrossover(pc=cross_prob, pe=exchange_prob) # PE = Gene exchange probability mutation = FlipBitMutation(pm=mutation_pob) # 0.1 todo The probability of mutation # 4. Create genetic algorithm engine to run optimization engine = GAEngine(population=population, selection=selection, crossover=crossover, mutation=mutation,) # analysis=[FitnessStore]) # 5. Define and register fitness function @engine.fitness_register #@engine.dynamic_linear_scaling(target='max', ksi0=2, r=0.9) def fitness(indv): p = [0 for _ in range(10)] p = indv.solution # encode policy = {'1': [p[0], p[1]], '2': [p[2], p[3]], '3': [p[4], p[5]], '4': [p[6], p[7]], '5': [p[8], p[9]]} reward = envSeqDec.evaluatePolicy(policy) # Action in Year 1 only #print('Sequential Result : ', reward) tmp_reward.append(reward) reward_generation.append(reward) tmp_policy.append(policy) #print('Policy : ', policy) #print(policy_450,'**************************good solution***************') #print(policy_bad,'**************************bad solution***************') return reward + uniform(-turb, turb) @engine.analysis_register class ConsoleOutput(OnTheFlyAnalysis): master_only = True interval = 1 def register_step(self, g, population, engine): best_indv = population.best_indv(engine.fitness) msg = 'Generation: {}, best fitness: {:.3f}'.format(g + 1, engine.fmax) #best_reward = max(tmp_reward[g + pop_size * (generation - 1): g + pop_size * generation]) #print(pop_size * (g - 0), pop_size * (g + 1),'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&') REWARDS.append(max(reward_generation[pop_size * (g - 0): pop_size * (g + 1)])) #best_policy = POLICY[tmp_reward.index(best_reward)] #POLICY.append(best_policy) engine.logger.info(msg) engine.run(ng = generation) # print(policy_450) x = list(range(len(REWARDS))) plt.plot(x, REWARDS) plt.title(f'Sequential Rewards {good_seed}') plt.savefig(f'./GA_trick/GA_seed_{good_seed}.jpg') # #plt.savefig(f'./res_geneticAlgorithm/Sequential_Rewards_eps:{eps}_popsize:{pop_size}_generation:{generation}_mutation_pob:{mutation_pob}_exchange_prob:{exchange_prob}_cross_prob:{cross_prob}.jpg') plt.show()