def create_pso_alg(pf_schedule, generate_, **params): def fit_converter(func): def wrap(*args, **kwargs): x = func(*args, **kwargs) m = Utility.makespan(x) return FitnessStd(values=(m, 0.0)) return wrap def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1): mapping_update(w, c1, c2, p.mapping, best.mapping, pop) ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max) toolbox = Toolbox() toolbox.register("population", generate_) toolbox.register("fitness", fit_converter(pf_schedule)) toolbox.register("update", componoud_update) pso_alg = partial(run_pso, toolbox=toolbox, **params) return pso_alg
def toolbox(self, mapMatrix, rankList, ordFilter): _wf, rm, estimator = self.env() estimator.transfer_time = 500 heft_schedule = self.heft_schedule() heft_particle = generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter, heft_schedule) heft_gen = lambda n: ([ deepcopy(heft_particle) if random.random() > 1.00 else generate( _wf, rm, estimator, mapMatrix, rankList, ordFilter) for _ in range(n - 1) ] + [deepcopy(heft_particle)]) #heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter) for _ in range(n)] def componoud_update(w, c1, c2, p, best, pop): #doMap = random.random() #if doMap < 0.1: mapping_update(w, c1, c2, p.mapping, best.mapping, pop) #else: ordering_update(w, c1, c2, p.ordering, best.ordering, pop) toolbox = Toolbox() toolbox.register("population", heft_gen) toolbox.register("fitness", fitness, _wf, rm, estimator) toolbox.register("update", componoud_update) return toolbox
def toolbox(self): _wf, rm, estimator = self.env() heft_schedule = self.heft_schedule() heft_particle = om_order.generate(_wf, rm, estimator, heft_schedule) heft_gen = lambda n: [ deepcopy(heft_particle) if random.random() > 1.00 else om_order.generate( _wf, rm, estimator) for _ in range(n) ] def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1): om_map.update(w, c1, c2, p.mapping, best.mapping, pop) om_order.ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max) toolbox = Toolbox() toolbox.register("population", heft_gen) toolbox.register("fitness", om_order.fitness, _wf, rm, estimator) toolbox.register("update", componoud_update) return toolbox
def toolbox(self, mapMatrix, rankList, ordFilter): _wf, rm, estimator = self.env() heft_schedule = self.heft_schedule() heft_particle = rd_order.generate(_wf, rm, estimator, mapMatrix, rankList, ordFilter, heft_schedule) heft_gen = lambda n: [ deepcopy(heft_particle) if random.random() > 1.00 else rd_order.generate( _wf, rm, estimator, mapMatrix, rankList, ordFilter) for _ in range(n) ] #def componoud_update(w, c1, c2, p, best, pop, g): def componoud_update(w, c1, c2, p, best, pop): #if g%2 == 0: rd_map.update(w, c1, c2, p.mapping, best.mapping, pop) #else: rd_order.ordering_update(w, c1, c2, p.ordering, best.ordering, pop) toolbox = Toolbox() toolbox.register("population", heft_gen) toolbox.register("fitness", rd_order.fitness, _wf, rm, estimator) toolbox.register("update", componoud_update) return toolbox
def toolbox(self, transfer): _wf, rm, estimator = self.env() estimator.transfer_time = transfer heft_schedule = self.heft_schedule() heft_particle = generate(_wf, rm, estimator, heft_schedule) heft_gen = lambda n: ([ deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n - 1) ] + [deepcopy(heft_particle)]) #heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n)]) def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1): #doMap = random.random() #if doMap < 0.5: mapping_update(w, c1, c2, p.mapping, best.mapping, pop) ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max) toolbox = Toolbox() toolbox.register("population", heft_gen) toolbox.register("fitness", fitness, _wf, rm, estimator) toolbox.register("update", componoud_update) return toolbox
def alg(fixed_schedule_part, initial_schedule, current_time=0.0, initial_population=None): ga_functions = GAFunctions2(wf, rm, estimator) generate = partial(ga_generate, ga_functions=ga_functions, fixed_schedule_part=fixed_schedule_part, current_time=current_time, init_sched_percent=init_sched_percent, initial_schedule=initial_schedule) toolbox = Toolbox() toolbox.register("generate", generate) toolbox.register( "evaluate", fit_converter( ga_functions.build_fitness(fixed_schedule_part, current_time))) toolbox.register("clone", deepcopy) toolbox.register("mate", ga_functions.crossover) toolbox.register("sweep_mutation", ga_functions.sweep_mutation) toolbox.register("mutate", ga_functions.mutation) # toolbox.register("select_parents", ) toolbox.register("select", tools.selRoulette) pop, logbook, best = run_ga(toolbox=toolbox, **alg_params) resulted_schedule = ga_functions.build_schedule( best, fixed_schedule_part, current_time) result = (best, pop, resulted_schedule, None), logbook return result
def toolbox(self): _wf, rm, estimator = self.env() heft_schedule = self.heft_schedule() heft_particle = generate(_wf, rm, estimator, heft_schedule) heft_gen = lambda n: [deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator) for _ in range(n)] def componoud_update(w, c1, c2, p, best, pop, min=-1, max=1): mapping_update(w, c1, c2, p.mapping, best.mapping, pop) ordering_update(w, c1, c2, p.ordering, best.ordering, pop, min=min, max=max) def compound_force_vector_matrix(): raise NotImplementedError() def compound_velocity_and_postion(): raise NotImplementedError() toolbox = Toolbox() toolbox.register("generate", heft_gen) toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks) toolbox.register("force_vector_matrix", compound_force_vector_matrix) toolbox.register("velocity_and_position", compound_velocity_and_postion, beta=0.0) toolbox.register("G", G) toolbox.register("kbest", Kbest) return toolbox
def test_fixed_ordering(self): _wf = wf("Montage_25") rm = ExperimentResourceManager(rg.r([10, 15, 25, 30])) estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None, ideal_flops=20, transfer_time=100) sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator) heft_schedule = run_heft(_wf, rm, estimator) heft_mapping = schedule_to_position(heft_schedule) heft_gen = lambda: heft_mapping if random.random( ) > 0.95 else generate(_wf, rm, estimator) toolbox = Toolbox() # toolbox.register("generate", generate, _wf, rm, estimator) toolbox.register("generate", heft_gen) toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks) toolbox.register("force_vector_matrix", force_vector_matrix, rm) toolbox.register("velocity_and_position", velocity_and_position, _wf, rm, estimator) toolbox.register("G", G) toolbox.register("kbest", Kbest) statistics = Statistics() statistics.register( "min", lambda pop: numpy.min([p.fitness.mofit for p in pop])) statistics.register( "avr", lambda pop: numpy.average([p.fitness.mofit for p in pop])) statistics.register( "max", lambda pop: numpy.max([p.fitness.mofit for p in pop])) statistics.register( "std", lambda pop: numpy.std([p.fitness.mofit for p in pop])) logbook = Logbook() logbook.header = ("gen", "G", "kbest", "min", "avr", "max", "std") pop_size = 100 iter_number = 100 kbest = pop_size ginit = 5 final_pop = run_gsa(toolbox, statistics, logbook, pop_size, iter_number, kbest, ginit) best = min(final_pop, key=lambda x: toolbox.fitness(x).mofit) solution = { MAPPING_SPECIE: list(zip(sorted_tasks, best)), ORDERING_SPECIE: sorted_tasks } schedule = build_schedule(_wf, estimator, rm, solution) Utility.validate_static_schedule(_wf, schedule) makespan = Utility.makespan(schedule) print("Final makespan: {0}".format(makespan)) pass
def _solve_domain(self, domain_factory: Callable[[], D]) -> None: self.domain = domain_factory() tournament_ratio = self.params_gphh.tournament_ratio pop_size = self.params_gphh.pop_size n_gen = self.params_gphh.n_gen min_tree_depth = self.params_gphh.min_tree_depth max_tree_depth = self.params_gphh.max_tree_depth crossover_rate = self.params_gphh.crossover_rate mutation_rate = self.params_gphh.mutation_rate creator.create("FitnessMin", Fitness, weights=(self.weight,)) creator.create("Individual", PrimitiveTree, fitness=creator.FitnessMin) self.toolbox = Toolbox() self.toolbox.register("expr", genHalfAndHalf, pset=self.pset, min_=min_tree_depth, max_=max_tree_depth) self.toolbox.register("individual", tools.initIterate, creator.Individual, self.toolbox.expr) self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual) self.toolbox.register("compile", gp.compile, pset=self.pset) if self.evaluation_method == EvaluationGPHH.SGS: self.toolbox.register("evaluate", self.evaluate_heuristic, domains=self.training_domains) # if self.evaluation_method == EvaluationGPHH.SGS_DEVIATION: # self.toolbox.register("evaluate", self.evaluate_heuristic_sgs_deviation, domains=self.training_domains) elif self.evaluation_method == EvaluationGPHH.PERMUTATION_DISTANCE: self.toolbox.register("evaluate", self.evaluate_heuristic_permutation, domains=self.training_domains) # self.toolbox.register("evaluate", self.evaluate_heuristic, domains=[self.training_domains[1]]) self.toolbox.register("select", tools.selTournament, tournsize=int(tournament_ratio * pop_size)) self.toolbox.register("mate", gp.cxOnePoint) self.toolbox.register("expr_mut", gp.genFull, min_=0, max_=max_tree_depth) self.toolbox.register("mutate", gp.mutUniform, expr=self.toolbox.expr_mut, pset=self.pset) self.toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17)) self.toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17)) stats_fit = tools.Statistics(lambda ind: ind.fitness.values) stats_size = tools.Statistics(len) mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size) mstats.register("avg", np.mean) mstats.register("std", np.std) mstats.register("min", np.min) mstats.register("max", np.max) pop = self.toolbox.population(n=pop_size) hof = tools.HallOfFame(1) self.hof = hof pop, log = algorithms.eaSimple(pop, self.toolbox, crossover_rate, mutation_rate, n_gen, stats=mstats, halloffame=hof, verbose=True) self.best_heuristic = hof[0] print('best_heuristic: ', self.best_heuristic) self.func_heuristic = self.toolbox.compile(expr=self.best_heuristic) self.policy = GPHHPolicy(self.domain, self.domain_model, self.func_heuristic, features=self.list_feature, params_gphh=self.params_gphh, recompute_cpm=True)
def do_exp(wf_name): _wf = wf(wf_name) rm = ExperimentResourceManager(rg.r([10, 15, 25, 30])) estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None, ideal_flops=20, transfer_time=100) empty_fixed_schedule_part = Schedule({node: [] for node in rm.get_nodes()}) heft_schedule = run_heft(_wf, rm, estimator) ga_functions = GAFunctions2(_wf, rm, estimator) generate = partial(ga_generate, ga_functions=ga_functions, fixed_schedule_part=empty_fixed_schedule_part, current_time=0.0, init_sched_percent=0.05, initial_schedule=heft_schedule) stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields toolbox = Toolbox() toolbox.register("generate", generate) toolbox.register( "evaluate", fit_converter( ga_functions.build_fitness(empty_fixed_schedule_part, 0.0))) toolbox.register("clone", deepcopy) toolbox.register("mate", ga_functions.crossover) toolbox.register("sweep_mutation", ga_functions.sweep_mutation) toolbox.register("mutate", ga_functions.mutation) # toolbox.register("select_parents", ) # toolbox.register("select", tools.selTournament, tournsize=4) toolbox.register("select", tools.selRoulette) pop, logbook, best = run_ga(toolbox=toolbox, logbook=logbook, stats=stats, **GA_PARAMS) resulted_schedule = ga_functions.build_schedule(best, empty_fixed_schedule_part, 0.0) Utility.validate_static_schedule(_wf, resulted_schedule) ga_makespan = Utility.makespan(resulted_schedule) return ga_makespan
def make_toolbox(self, g: igraph.Graph) -> Toolbox: toolbox = Toolbox() individual_size = g.vcount() toolbox.register("individual", self.new_individual_func, container=creator.Individual, graph=g, n=individual_size) toolbox.register("mate", self.crossover_func) toolbox.register("mutate", self.mutate_func, graph=g, probability=self.mutation_rate) toolbox.register("select", self.selection_func) toolbox.register("dominance", self.dominance_func) toolbox.register("evaluate", self.evaluate_func, graph=g) return toolbox
def toolbox(self, rankList): _wf, rm, estimator = self.env() heft_schedule = self.heft_schedule() heft_particle = generate(_wf, rm, estimator, rankList, heft_schedule) #heft_gen = lambda n: ([deepcopy(heft_particle) if random.random() > 1.00 else generate(_wf, rm, estimator, rankList) for _ in range(n-1)] + [deepcopy(heft_particle)]) heft_gen = lambda n: ([ deepcopy(heft_particle) if random.random() > 1.00 else generate( _wf, rm, estimator, rankList) for _ in range(n) ]) def componoud_update(w, c1, c2, p, best, pop): mapping_update(w, c1, c2, p.mapping, best.mapping, pop) ordering_update(w, c1, c2, p.ordering, best.ordering, pop) toolbox = Toolbox() toolbox.register("population", heft_gen) toolbox.register("fitness", fitness, _wf, rm, estimator) toolbox.register("update", componoud_update) return toolbox
def create_gsa_alg(pf_schedule, generate_, **params): def fit_converter(func): def wrap(*args, **kwargs): x = func(*args, **kwargs) m = Utility.makespan(x) return FitnessStd(values=(m, 0.0)) return wrap def compound_force(p, pop, kbest, G): mapping_force = force(p.mapping, (p.mapping for p in pop), kbest, G) ordering_force = force(p.ordering, (p.ordering for p in pop), kbest, G) return (mapping_force, ordering_force) def compound_update(w, c, p, min=-1, max=1): gsa_mapping_update(w, c, p.mapping) gsa_ordering_update(w, c, p.ordering, min, max) pass W, C = params["w"], params["w"] if "generations_count_before_merge" in params and "generations_count_after_merge" in params: all_iterations_count = int( params["generations_count_before_merge"]) + int( params["generations_count_after_merge"]) else: all_iterations_count = None toolbox = Toolbox() toolbox.register("generate", generate_) toolbox.register("fitness", fit_converter(pf_schedule)) toolbox.register("estimate_force", compound_force) toolbox.register("update", compound_update, W, C) toolbox.register("G", partial(G, all_iter_number=all_iterations_count)) toolbox.register("kbest", partial(Kbest, all_iter_number=all_iterations_count)) pso_alg = partial(run_gsa, toolbox=toolbox, **params) return pso_alg
def run_dcga(wf, estimator, rm, heft_mapping, heft_ordering, **params): cxpb = params["cxpb"] #0.9 mutpb = params["mutpb"] #0.9 ngen = params["ngen"] #100 pop_size = params["pop_size"] #100 ctx = {'env': Env(wf, rm, estimator)} toolbox = Toolbox() toolbox.register("select", tools.selTournament, tournsize=2) toolbox.register("mate", _mate, ctx) toolbox.register("mutate", _mutate, ctx) toolbox.register( "evaluate", lambda x: [ fitness_mapping_and_ordering(ctx, { MAPPING_SPECIE: x[0], ORDERING_SPECIE: x[1] }) ]) # heft_mapping = extract_mapping_from_ga_file("../../temp/heft_etalon_full_tr100_m100.json", rm) pop_mapping = mapping_heft_based_initialize(ctx, pop_size, heft_mapping, 3) pop_ordering = ordering_heft_based_initialize(ctx, pop_size, heft_ordering, 3) pop = [ListBasedIndividual(el) for el in zip(pop_mapping, pop_ordering)] for p in pop: p.fitness = Fitness(0) stat = tools.Statistics(key=lambda x: x.fitness) stat.register("solsstat", lambda pop: [{"best": numpy.max(pop).values[0]}]) final_pop, logbook = deap.algorithms.eaSimple(pop, toolbox, cxpb, mutpb, ngen, stat) best = max(final_pop, key=lambda x: x.fitness) return best.fitness.values[0], logbook
def alg(fixed_schedule_part, initial_schedule, current_time=0.0, initial_population=None, only_new_pops=False): n = alg_params["n"] ### generate heft_based population init_ind_count = int(n * init_sched_percent) heft_particle = initial_schedule if isinstance(initial_schedule, (CompoundParticle, GsaCompoundParticle)) \ else generate_func(wf, rm, estimator, initial_schedule, fixed_schedule_part, current_time) init_arr = [deepcopy(heft_particle) for _ in range(init_ind_count)] generated_arr = [ generate_func(wf, rm, estimator, schedule=None, fixed_schedule_part=fixed_schedule_part, current_time=current_time) for _ in range(n - init_ind_count) ] heft_based_population = init_arr + generated_arr ### generate new population random_population = [ generate_func(wf, rm, estimator, schedule=None, fixed_schedule_part=fixed_schedule_part, current_time=current_time) for _ in range(n) ] populations = { "inherited": initial_population, "heft_based": heft_based_population, "random": random_population } if "inherited" in populations and (populations["inherited"] is None or len(populations["inherited"])): del populations["inherited"] def migration(populations, k): pops = [ pop for name, pop in sorted(populations.items(), key=lambda x: x[0]) ] migRing(pops, k, selection=emigrant_selection) task_map = {task.id: task for task in wf.get_all_unique_tasks()} node_map = {node.name: node for node in rm.get_nodes()} schedule_builder = ParticleScheduleBuilder(wf, rm, estimator, task_map, node_map, fixed_schedule_part) pf_schedule = partial(schedule_builder, current_time=current_time) toolbox = Toolbox() toolbox.register( "run_alg", algorithm(pf_schedule=pf_schedule, generate_=lambda n: None, **alg_params)) toolbox.register("migration", migration) lb, st = deepcopy(log_book), deepcopy(stats) pop, logbook, best = run_mpga(toolbox=toolbox, logbook=lb, stats=st, initial_populations=populations, **alg_params) resulted_schedule = pf_schedule(best) result = (best, pop, resulted_schedule, None), logbook return result
heft_schedule = run_heft(_wf, rm, estimator) heft_mapping = schedule_to_position(heft_schedule) heft_mapping.velocity = MappingParticle.Velocity({}) heft_gen = lambda n: [ deepcopy(heft_mapping) if random.random() > 1.0 else generate(_wf, rm, estimator, 1)[0] for _ in range(n) ] W, C1, C2 = 0.1, 0.6, 0.2 GEN, N = 300, 50 toolbox = Toolbox() toolbox.register("population", heft_gen) toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks) toolbox.register("update", update) stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) logbook = tools.Logbook() logbook.header = ["gen", "evals"] + stats.fields def do_exp():
def provide_features() -> None: # define subjects features.provide('instrumented_subjects_path', args.instrumented_subjects_path) features.provide('continue_on_subject_failure', args.continue_on_subject_failure) features.provide('continue_on_repetition_failure', args.continue_on_repetition_failure) # define budget and repetitions features.provide('repetitions', args.repetitions) features.provide('repetitions_offset', args.repetitions_offset) features.provide( 'budget_manager', BudgetManager(time_budget=args.time_budget, evaluations_budget=args.evaluations_budget)) # define devices configuration features.provide('emulators_number', args.emulators_number) features.provide('real_devices_number', args.real_devices_number) features.provide('avd_series', args.avd_series) features.provide('avd_manager', AvdManager()) features.provide('strategy', possible_strategies[args.strategy]) features.provide('test_suite_evaluator', possible_test_suite_evaluators[args.evaluator]) # define test runner test_runner = possible_test_runners[args.test_runner] features.provide('test_runner', test_runner) test_runner.register_minimum_api() # define coverage fetcher app instrumentator coverage_fetcher = possible_coverage_fetchers[args.coverage] features.provide('coverage_fetcher', coverage_fetcher) coverage_fetcher.register_app_instrumentator() features.provide('emma_instrument_path', args.emma_instrument_path) # define individual and population generators features.provide('individual_generator', possible_individual_generators[args.individual_generator]) features.provide('population_generator', PopulationGenerator) # define extras features.provide('verbose_level', args.verbose) features.provide('write_logbook', args.write_logbook) features.provide('write_history', args.write_history) features.provide('write_hall_of_fame', args.write_hall_of_fame) features.provide('compress', args.compress) # singletons toolbox = Toolbox() toolbox.register("selectBest", tools.selBest) features.provide('toolbox', toolbox) features.provide('device_manager', DeviceManager()) features.provide('evaluate_scripts_folder_path', args.evaluate_scripts_folder_path) features.provide('evaluate_scripts_repetition_number', args.evaluate_scripts_repetition_number) features.provide('evaluate_scripts_algorithm_name', args.evaluate_scripts_algorithm_name) features.provide('skip_subject_if_logbook_in_results', args.skip_subject_if_logbook_in_results)
def mainPart(x_, y_, pset, pop_n=100, random_seed=1, cxpb=0.8, mutpb=0.1, ngen=5, alpha=1, tournsize=3, max_value=10, double=False, score=None, **kargs): """ Parameters ---------- score double x_ y_ pset pop_n random_seed cxpb mutpb ngen alpha tournsize max_value kargs Returns ------- """ max_ = pset.max_ if score is None: score = [r2_score, explained_variance_score] random.seed(random_seed) toolbox = Toolbox() if isinstance(pset, PrimitiveSet): PTrees = ExpressionTree Generate = genHalfAndHalf mutate = mutNodeReplacement mate = cxOnePoint elif isinstance(pset, FixedPrimitiveSet): PTrees = FixedExpressionTree Generate = generate_ mate = partial(cxOnePoint_index, pset=pset) mutate = mutUniForm_index else: raise NotImplementedError("get wrong pset") if double: creator.create("Fitness_", Fitness, weights=(1.0, 1.0)) else: creator.create("Fitness_", Fitness, weights=(1.0,)) creator.create("PTrees_", PTrees, fitness=creator.Fitness_) toolbox.register("generate_", Generate, pset=pset, min_=None, max_=max_) toolbox.register("individual", initIterate, container=creator.PTrees_, generator=toolbox.generate_) toolbox.register('population', initRepeat, container=list, func=toolbox.individual) # def selection toolbox.register("select_gs", selTournament, tournsize=tournsize) # def mate toolbox.register("mate", mate) # def mutate toolbox.register("mutate", mutate, pset=pset) if isinstance(pset, PrimitiveSet): toolbox.decorate("mate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) toolbox.decorate("mutate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) # def elaluate toolbox.register("evaluate", calculate, pset=pset, x=x_, y=y_, score_method=score[0], **kargs) toolbox.register("evaluate2", calculate, pset=pset, x=x_, y=y_, score_method=score[1], **kargs) stats1 = Statistics(lambda ind: ind.fitness.values[0]) stats = MultiStatistics(score1=stats1, ) stats.register("avg", np.mean) stats.register("max", np.max) pop = toolbox.population(n=pop_n) haln = 5 hof = HallOfFame(haln) if double: population, logbook = multiEaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, alpha=alpha, halloffame=hof, pset=pset) else: population, logbook = eaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, halloffame=hof, pset=pset) return population, logbook, hof
def mainPart(x_, y_, pset, max_=5, pop_n=100, random_seed=2, cxpb=0.8, mutpb=0.1, ngen=5, tournsize=3, max_value=10, double=False, score=None, cal_dim=True, target_dim=None, inter_add=True, iner_add=True, random_add=False, store=True): """ Parameters ---------- target_dim max_ inter_add iner_add random_add cal_dim score double x_ y_ pset pop_n random_seed cxpb mutpb ngen tournsize max_value Returns ------- """ if score is None: score = [r2_score, explained_variance_score] if cal_dim: assert all([isinstance(i, Dim) for i in pset.dim_list ]), "all import dim of pset should be Dim object" random.seed(random_seed) toolbox = Toolbox() if isinstance(pset, ExpressionSet): PTrees = ExpressionTree Generate = genHalfAndHalf mutate = mutNodeReplacement mate = cxOnePoint elif isinstance(pset, FixedSet): PTrees = FixedTree Generate = generate_index mutate = mutUniForm_index mate = partial(cxOnePoint_index, pset=pset) else: raise NotImplementedError("get wrong pset") if double: Fitness_ = creator.create("Fitness_", Fitness, weights=(1.0, 1.0)) else: Fitness_ = creator.create("Fitness_", Fitness, weights=(1.0, )) PTrees_ = creator.create("PTrees_", PTrees, fitness=Fitness_, dim=dnan, withdim=0) toolbox.register("generate", Generate, pset=pset, min_=1, max_=max_) toolbox.register("individual", initIterate, container=PTrees_, generator=toolbox.generate) toolbox.register('population', initRepeat, container=list, func=toolbox.individual) # def selection toolbox.register("select_gs", selTournament, tournsize=tournsize) toolbox.register("select_kbest_target_dim", selKbestDim, dim_type=target_dim, fuzzy=True) toolbox.register("select_kbest_dimless", selKbestDim, dim_type="integer") toolbox.register("select_kbest", selKbestDim, dim_type='ignore') # def mate toolbox.register("mate", mate) # def mutate toolbox.register("mutate", mutate, pset=pset) if isinstance(pset, ExpressionSet): toolbox.decorate( "mate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) toolbox.decorate( "mutate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) # def elaluate toolbox.register("evaluate", calculatePrecision, pset=pset, x=x_, y=y_, scoring=score[0], cal_dim=cal_dim, inter_add=inter_add, iner_add=iner_add, random_add=random_add) toolbox.register("evaluate2", calculatePrecision, pset=pset, x=x_, y=y_, scoring=score[1], cal_dim=cal_dim, inter_add=inter_add, iner_add=iner_add, random_add=random_add) toolbox.register("parallel", parallelize, n_jobs=1, func=toolbox.evaluate, respective=False) toolbox.register("parallel2", parallelize, n_jobs=1, func=toolbox.evaluate2, respective=False) pop = toolbox.population(n=pop_n) haln = 10 hof = HallOfFame(haln) stats1 = Statistics(lambda ind: ind.fitness.values[0] if ind and ind.y_dim in target_dim else 0) stats1.register("max", np.max) stats2 = Statistics(lambda ind: ind.y_dim in target_dim if ind else 0) stats2.register("countable_number", np.sum) stats = MultiStatistics(score1=stats1, score2=stats2) population, logbook = eaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, halloffame=hof, pset=pset, store=store) return population, hof
def _set_toolbox(self, value): """setter of toolbox""" if value == -1: value = Toolbox() check_var("toolbox", value, "Toolbox") self._toolbox = value