def main(): μ = 15 λ = 100 Individual.cnf_filename = "input.cnf" ea = EA(μ, λ) ea.search()
def main(): μ = 15 λ = 100 message = SecretMessage("FreneticArray") Individual.message = message ea = EA(μ, λ) ea.search()
def main(): μ = 15 λ = 100 Individual.cnf_filename = "input.cnf" termination_conditions = [ FitnessTarget(100), DateTarget(datetime.datetime.now() + datetime.timedelta(hours=8)), NumberOfGenerations(10000) ] ea = EA(μ, λ) ea.search(termination_conditions)
def mpi_main(args): DEBUG_FLAG = args.debug ea = EA(1) ea.load_config(args.config) reseedPeriod = int(args.reseed) taskNum = int(args.task_num) np.random.seed(1) seed = np.random.randint(0, 2**32 - 1, size=(taskNum), dtype=np.uint32) seed = seed.tolist() print(seed) for i in range(int(args.generation)): if ((reseedPeriod > 0) and (i % reseedPeriod == 0)): for j in range(taskNum): seed[j] = random.randint(0, 2**32 - 1) ea_time = time.time() pop = ea.ask() ea_time = time.time() - ea_time fitnesses = [] workloads = [] num_workers = int(args.num_workers) - 1 gc.collect() prep_time = time.time() for j in range(len(pop)): workloads.append((pop[j], args.task, seed, args.debug)) prep_time = time.time() - prep_time eval_time = time.time() success = False while (success is False): try: with MPICommExecutor(MPI.COMM_WORLD, root=0) as executor: if executor is not None: results = executor.map(eval, workloads) success = True except OverflowError: success = False eval_time = time.time() - eval_time reducedResult = EvalSummary() reducedResult.reduce(results, 'pfit') ea.tell(reducedResult, args.task, seed) ea.write_history(args.output) #print(ea.fitnesses) print( 'iter: {0} fit: {1}, pfit:{7} Q: {2}, ea_time: {3}, prep_time: {4}, eval_time: {5}, max_depth:{6}' .format(i, ea.fitnesses[0], np.mean(reducedResult.get_res('Q')[0]), ea_time, prep_time, eval_time, ea.pop[0].maxDepth, np.mean(reducedResult.get_res('pfit')[0])))
def main(args): ea = EA(1) ea.load_config(args.config) reseedPeriod = int(args.reseed) taskNum = int(args.task_num) np.random.seed(0) seed = np.random.randint(0, 2**32 - 1, size=(taskNum), dtype=np.uint32) seed = seed.tolist() print(seed) for i in range(int(args.generation)): if ((reseedPeriod > 0) and (i % reseedPeriod == 0)): for j in range(taskNum): seed[j] = random.randint(0, 2**32 - 1) ea_time = time.time() pop = ea.ask() ea_time = time.time() - ea_time fitnesses = [] workloads = [] num_workers = int(args.num_workers) - 1 gc.collect() prep_time = time.time() for j in range(len(pop)): workloads.append((pop[j], args.task, seed)) prep_time = time.time() - prep_time eval_time = time.time() if (num_workers > 1): with mp.Pool(num_workers) as pool: results = pool.map(eval, workloads) else: results = [] for w in workloads: results.append(eval(w)) eval_time = time.time() - eval_time ea.tell(results, args.task, seed) ea.write_history(args.output) print( 'iter: {0} fit: {1}, Q: {2}, ea_time: {3}, prep_time: {4}, eval_time: {5}, max_depth:{6}' .format(i, ea.fitnesses[0], np.mean(ea.Q), ea_time, prep_time, eval_time, ea.pop[0].maxDepth))
def run_ea(self): iterations_completed = 0 # PlotEvolution.x_limit = self.maximum_generations.get() while self.ea_iterations != iterations_completed: ea_config = EAConfig(self.child_pool_size.get(), self.adult_pool_size.get(), self.crossover_rate.get(), self.crossover_points.get(), self.mutation_scheme.get(), self.mutation_rate.get(), self.adult_selection_scheme.get(), self.parent_selection_scheme.get(), self.elitism.get(), self.inferiorism.get(), self.maximum_generations.get(), self.tournament_size.get(), self.tournament_random_choice_rate.get(), self.boltzmann_temperature.get()) genotype_class = self.genotype_classes[self.problem_selected.get()] fitness_class = self.fitness_classes[self.problem_selected.get()] ea = EA(genotype_class, fitness_class, ea_config) # Configure problems if self.problem_selected.get() == 1: self.genotype_classes[ 1].bit_vector_length = self.one_max_length.get() self.fitness_classes[1].random = bool( self.one_max_random.get()) elif self.problem_selected.get() == 2: self.genotype_classes[ 2].bit_vector_length = self.lolz_prefix_length.get() self.fitness_classes[2].z = self.lolz_prefix_z.get() elif self.problem_selected.get() == 3: self.genotype_classes[3].symbols = self.gss_symbols.get() self.genotype_classes[3].length = self.gss_length.get() elif self.problem_selected.get() == 4: self.genotype_classes[4].symbols = self.lss_symbols.get() self.genotype_classes[4].length = self.lss_length.get() elif self.problem_selected.get() == 5: self.fitness_classes[ 5].max_time_steps = self.flatland_time_steps.get() self.fitness_classes[ 5].number_of_scenarios = self.flatland_num_scenarios.get() self.fitness_classes[5].dynamic_scenarios = bool( self.flatland_dynamic_scenarios.get()) elif self.problem_selected.get() == 6: self.fitness_classes[6].pulling = bool( self.beer_tracker_pulling.get()) self.fitness_classes[6].world_wrap = bool( self.beer_tracker_world_wrap.get()) # Configure CTRNN for pulling scenario if self.fitness_classes[6].pulling: self.genotype_classes[6].topology[1] = 3 self.genotype_classes[6].topology[-1] = 3 # Adjust scale for scenario self.fitness_classes[6].captured_scale = 0.75 self.fitness_classes[6].avoided_scale = 0.25 self.genotype_classes[6].weight_lower_bound = -7.0 self.genotype_classes[6].weight_upper_bound = 7.0 # Configure CTRNN for no world wrap scenario if not self.fitness_classes[6].world_wrap: self.genotype_classes[6].topology[0] = 7 self.genotype_classes[6].weight_lower_bound = -7.0 self.genotype_classes[6].weight_upper_bound = 7.0 # Adjust scale for scenario self.fitness_classes[6].captured_scale = 0.9 self.fitness_classes[6].avoided_scale = 0.1 self.genotype_classes[6].calculate_ctrnn_intervals() # Report settings and start evolution solution = ea.evolve() genotype_class.report_genotype_settings() ea_config.report() # # Plot aggregated data if self.aggregate_plot_data.get() == 1: PlotEvolution.accumulate_average_data( ea.gen_avg_fitness, ea.gen_best_fitness, ea.gen_standard_deviation) self.accumulations += 1 if self.accumulations == self.accumulation_bound.get(): PlotEvolution.plot_evolution( PlotEvolution.aggregated_avg_fitness, PlotEvolution.aggregated_best_fitness, PlotEvolution.aggregated_standard_deviation) self.accumulations = 0 PlotEvolution.clear_aggregated_data() else: PlotEvolution.plot_evolution(ea.gen_avg_fitness, ea.gen_best_fitness, ea.gen_standard_deviation) # If Flatland, run simulation if self.problem_selected.get() == 5: view = FlatlandView(fitness_class.flatland_scenarios, solution, self.flatland_time_steps.get()) view.after(20, view.agenda_loop()) view.mainloop() # If Beer Tracker, run simulation if self.problem_selected.get() == 6: view = BeerTrackerView(solution.translate_to_phenotype(), self.beer_tracker_world_wrap.get(), self.beer_tracker_pulling.get()) view.after(20, view.agenda_loop()) view.mainloop() iterations_completed += 1
#! /usr/bin/env python3 from ea import EA EA = EA(NO_GENERATIONS=60, POPULATION_SIZE=300, INIT_NO_FRAMES=300, FINAL_NO_FRAMES=4000, FRACTION_MUTATE=0.3, NUMBER_OF_PARTNERS=300) best_network = EA.evolve_best_individual()
if cons.EXPERIMENT_LOAD: # reuse fitness landscapes and initial populations with open('experiment.pkl', 'rb') as fp: ea = dill.load(fp) nkcs = dill.load(fp) if len(nkcs) != cons.F or len(ea) != N_RES: print('loaded experiment does not match constants') sys.exit() for _ in range(cons.F): for _ in range(cons.E): ea[r].update_perf(evals[r], perf_best[r], perf_avg[r]) r += 1 else: # create new fitness landscapes and initial populations for f in range(cons.F): nkcs.append(NKCS()) for _ in range(cons.E): ea.append(EA()) ea[r].run_initial(nkcs[f]) ea[r].update_perf(evals[r], perf_best[r], perf_avg[r]) r += 1 if cons.EXPERIMENT_SAVE: # save initial populations with open('experiment.pkl', 'wb') as fp: dill.dump(ea, fp) # run the experiments r = 0 bar = tqdm(total=N_RES) #: progress bar for f in range(cons.F): # F NKCS functions for e in range(cons.E): # E experiments if cons.ACQUISITION == 'ea': ea[r].run_ea(nkcs[f], evals[r], perf_best[r], perf_avg[r])
def error_filter(xcptn): if isinstance(xcptn, subprocess.CalledProcessError): return xcptn.stderr else: return None if __name__ == "__main__": # Problem definition dimensions = 10 evaluator = Rastrigin(dimensions=dimensions) # Evolutionary Algorithm config = { 'population_size': 40, # mu 'offspring': 40, # lamda 'generations': 10, 'initial_sigma': 0.01, 'learning_rate': 1.0 / np.sqrt(dimensions) } ea = EA(config=config, fitness_evaluator=evaluator) g = ea.next_generation(ea.initialize()) print("╭─(Running evolution...)") with SimpleDisplay(error_filter) as display: answer = run_logging(g, 1, display) # answer = run_single(g) for i in answer.individuals: print(i.fitness)
import argparse import os from ea import EA parser = argparse.ArgumentParser(description="Texas holdem bot EA") parser.add_argument('--config', type=argparse.FileType('r', 0), default=os.path.abspath("../config_starter.json")) args = parser.parse_args() ea = EA(128, 64, 100, 10, args.config) #ea = EA(16, 8, 6, 10, args.config) ea.run() print("\n\n") print("Age\tFit\tWin\tLoss\tTime") for s in sorted(ea.this_generation.population, key=lambda p: p.fitness, reverse=True): print("{0}\t{1}\t{2}\t{3}\t{4}\n {5}\n".format(s.generation, round(s.fitness), s.wins, s.losses, \ s.average_time, s.get_config_file()))