def mainPart(x_, y_, pset, max_=5, pop_n=100, random_seed=2, cxpb=0.8, mutpb=0.1, ngen=5, tournsize=3, max_value=10, double=False, score=None, cal_dim=True, target_dim=None, inter_add=True, iner_add=True, random_add=False, store=True): """ Parameters ---------- target_dim max_ inter_add iner_add random_add cal_dim score double x_ y_ pset pop_n random_seed cxpb mutpb ngen tournsize max_value Returns ------- """ if score is None: score = [r2_score, explained_variance_score] if cal_dim: assert all([isinstance(i, Dim) for i in pset.dim_list ]), "all import dim of pset should be Dim object" random.seed(random_seed) toolbox = Toolbox() if isinstance(pset, ExpressionSet): PTrees = ExpressionTree Generate = genHalfAndHalf mutate = mutNodeReplacement mate = cxOnePoint elif isinstance(pset, FixedSet): PTrees = FixedTree Generate = generate_index mutate = mutUniForm_index mate = partial(cxOnePoint_index, pset=pset) else: raise NotImplementedError("get wrong pset") if double: Fitness_ = creator.create("Fitness_", Fitness, weights=(1.0, 1.0)) else: Fitness_ = creator.create("Fitness_", Fitness, weights=(1.0, )) PTrees_ = creator.create("PTrees_", PTrees, fitness=Fitness_, dim=dnan, withdim=0) toolbox.register("generate", Generate, pset=pset, min_=1, max_=max_) toolbox.register("individual", initIterate, container=PTrees_, generator=toolbox.generate) toolbox.register('population', initRepeat, container=list, func=toolbox.individual) # def selection toolbox.register("select_gs", selTournament, tournsize=tournsize) toolbox.register("select_kbest_target_dim", selKbestDim, dim_type=target_dim, fuzzy=True) toolbox.register("select_kbest_dimless", selKbestDim, dim_type="integer") toolbox.register("select_kbest", selKbestDim, dim_type='ignore') # def mate toolbox.register("mate", mate) # def mutate toolbox.register("mutate", mutate, pset=pset) if isinstance(pset, ExpressionSet): toolbox.decorate( "mate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) toolbox.decorate( "mutate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) # def elaluate toolbox.register("evaluate", calculatePrecision, pset=pset, x=x_, y=y_, scoring=score[0], cal_dim=cal_dim, inter_add=inter_add, iner_add=iner_add, random_add=random_add) toolbox.register("evaluate2", calculatePrecision, pset=pset, x=x_, y=y_, scoring=score[1], cal_dim=cal_dim, inter_add=inter_add, iner_add=iner_add, random_add=random_add) toolbox.register("parallel", parallelize, n_jobs=1, func=toolbox.evaluate, respective=False) toolbox.register("parallel2", parallelize, n_jobs=1, func=toolbox.evaluate2, respective=False) pop = toolbox.population(n=pop_n) haln = 10 hof = HallOfFame(haln) stats1 = Statistics(lambda ind: ind.fitness.values[0] if ind and ind.y_dim in target_dim else 0) stats1.register("max", np.max) stats2 = Statistics(lambda ind: ind.y_dim in target_dim if ind else 0) stats2.register("countable_number", np.sum) stats = MultiStatistics(score1=stats1, score2=stats2) population, logbook = eaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, halloffame=hof, pset=pset, store=store) return population, hof
def execute(toolbox: base.Toolbox, cases: int = 100) -> List[str]: population = toolbox.population(n=POPULATION_SIZE) hall_of_fame = tools.ParetoFront() stats = tools.Statistics(lambda i: i.fitness.values) stats.register("avg", numpy.mean, axis=0) stats.register("std", numpy.std, axis=0) stats.register("min", numpy.min, axis=0) stats.register("max", numpy.max, axis=0) logbook = tools.Logbook() logbook.header = "gen", "evals", "std", "min", "avg", "max", "best" # Evaluate every individuals for individual in population: individual.fitness.values = toolbox.evaluate(individual) hall_of_fame.update(population) record = stats.compile(population) logbook.record(gen=0, evals=len(population), **record) print(logbook.stream) generated_cases = list last_fitness = float('inf') current_fitness = None generation_count = 1 while generation_count <= MAX_GENERATIONS and ( last_fitness != current_fitness or current_fitness == float('inf')): last_fitness = current_fitness # Select the next generation individuals offspring = toolbox.select(population, floor(POPULATION_SIZE * 0.9)) # Clone the selected individuals offspring = list(toolbox.map(toolbox.clone, offspring)) # Add new individuals from the population offspring += toolbox.population(n=POPULATION_SIZE - len(offspring)) # Apply crossover and mutation on the offspring for child1, child2 in zip(offspring[::2], offspring[1::2]): if not random() < MATE_RATIO: continue toolbox.mate(child1, child2) del child1.fitness.values del child2.fitness.values for mutant in offspring: if not random() < MUTATION_RATIO: continue toolbox.mutate(mutant) del mutant.fitness.values # Evaluate the individuals with an invalid fitness invalid_ind = [ individual for individual in offspring if not individual.fitness.valid ] for individual in offspring: individual.fitness.values = toolbox.evaluate(individual) generated_cases = tools.selBest(population, k=cases) current_fitness = sum( toolbox.map(op.itemgetter(0), toolbox.map(toolbox.evaluate, generated_cases))) best = choice(generated_cases) word = "".join(best) # Select the next generation population population = toolbox.select(population + offspring, POPULATION_SIZE) record = stats.compile(population) logbook.record(gen=generation_count, evals=len(invalid_ind), best=word, **record) print(logbook.stream) generation_count += 1 return [''.join(case) for case in generated_cases]
def learn_causal_structure( toolbox: base.Toolbox, pop_size: int = 10, crossover_pr: float = 1, mutation_pr: float = 0.2, num_elites: int = 1, max_gens: int = 50, ): """ Perform the structur learning task using a genetic algorithm :param toolbox: registry of tools provided by DEAP :param pop_size: the number of individuals per generation :param crossover_pr: the crossover rate for every (monogamous) couple :param mutation_pr: the mutation rate for every individual :param num_elites: :param max_gens: the maximum number of generations :return: """ # initialize a collection of instrumentation utilities to facilitate later analysis instrumentation = initialize_instrumentation() # ====== 0️⃣ initialize population ====== population = toolbox.population(n=pop_size) # ====== 1️⃣ Evaluate the entire population ====== n_evals = evaluate_population(population, toolbox) # Log initial stats for later analysis log_generation_stats(0, population, n_evals, **instrumentation) # ====== 2️⃣ the loop is the only termination criterion ====== for gen in range(max_gens): elites = get_fittest_individuals(population, num_elites) # ====== 3️⃣ Parent selection ====== # Select the next generation individuals offspring = toolbox.select(population, len(population)) # Clone the selected individuals offspring = list(map(toolbox.clone, offspring)) # ====== 4️⃣ Apply crossover and mutation on the offspring ====== for child1, child2 in zip(offspring[::2], offspring[1::2]): # crossover probability applies to every couple if random.random() < crossover_pr: toolbox.mate(child1, child2) child1.fitness = np.nan child2.fitness = np.nan # mutation probability applies to every individual for mutant in offspring: if random.random() < mutation_pr: toolbox.mutate(mutant) mutant.fitness = np.nan # ====== 5️⃣ Evaluate the individuals with an invalid fitness ====== n_evals = evaluate_population(offspring, toolbox) # Log intermediary stats for later analysis log_generation_stats(gen + 1, population, n_evals, **instrumentation) # ====== 6️⃣ Replacement ====== # The population is entirely replaced by the offspring, except for the top elites fittest_offsprings = get_fittest_individuals(offspring, pop_size - num_elites) population[:] = elites + fittest_offsprings # ====== 7️⃣ Return final population ====== return population, instrumentation
class GPHH(Solver, DeterministicPolicies): T_domain = D training_domains: List[T_domain] verbose: bool weight: int pset: PrimitiveSet toolbox: Toolbox policy: DeterministicPolicies params_gphh: ParametersGPHH # policy: GPHHPolicy evaluation_method: EvaluationGPHH reference_permutations: Dict permutation_distance: PermutationDistance def __init__( self, training_domains: List[T_domain], domain_model: SchedulingDomain, weight: int, # set_feature: Set[FeatureEnum]=None, params_gphh: ParametersGPHH = ParametersGPHH.default(), reference_permutations=None, # reference_makespans=None, training_domains_names=None, verbose: bool = False, ): self.training_domains = training_domains self.domain_model = domain_model self.params_gphh = params_gphh # self.set_feature = set_feature self.set_feature = self.params_gphh.set_feature print("self.set_feature: ", self.set_feature) print("Evaluation: ", self.params_gphh.evaluation) # if set_feature is None: # self.set_feature = {FeatureEnum.RESSOURCE_TOTAL, # FeatureEnum.TASK_DURATION, # FeatureEnum.N_SUCCESSORS, # FeatureEnum.N_SUCCESSORS, # FeatureEnum.RESSOURCE_AVG} self.list_feature = list(self.set_feature) self.verbose = verbose self.pset = self.init_primitives(self.params_gphh.set_primitves) self.weight = weight self.evaluation_method = self.params_gphh.evaluation self.initialize_cpm_data_for_training() if self.evaluation_method == EvaluationGPHH.PERMUTATION_DISTANCE: self.init_reference_permutations( reference_permutations, training_domains_names ) self.permutation_distance = self.params_gphh.permutation_distance # if self.evaluation_method == EvaluationGPHH.SGS_DEVIATION: # self.init_reference_makespans(reference_makespans, training_domains_names) def init_reference_permutations( self, reference_permutations={}, training_domains_names=[] ) -> None: self.reference_permutations = {} for i in range(len(self.training_domains)): td = self.training_domains[i] td_name = training_domains_names[i] if td_name not in reference_permutations.keys(): # Run CP td.set_inplace_environment(False) solver = DOSolver( policy_method_params=PolicyMethodParams( base_policy_method=BasePolicyMethod.SGS_PRECEDENCE, delta_index_freedom=0, delta_time_freedom=0, ), method=SolvingMethod.CP, ) solver.solve(domain_factory=lambda: td) raw_permutation = solver.best_solution.rcpsp_permutation full_permutation = [x + 2 for x in raw_permutation] full_permutation.insert(0, 1) full_permutation.append(np.max(full_permutation) + 1) print("full_perm: ", full_permutation) self.reference_permutations[td] = full_permutation else: self.reference_permutations[td] = reference_permutations[td_name] # def init_reference_makespans(self, reference_makespans={}, training_domains_names=[]) -> None: # self.reference_makespans = {} # for i in range(len(self.training_domains)): # td = self.training_domains[i] # td_name = training_domains_names[i] # # for td in self.training_domains: # print('td:',td) # if td_name not in reference_makespans.keys(): # # Run CP # td.set_inplace_environment(False) # solver = DOSolver(policy_method_params=PolicyMethodParams(base_policy_method=BasePolicyMethod.FOLLOW_GANTT, # delta_index_freedom=0, # delta_time_freedom=0), # method=SolvingMethod.CP) # solver.solve(domain_factory=lambda: td) # # state = td.get_initial_state() # states, actions, values = rollout_episode(domain=td, # max_steps=1000, # solver=solver, # from_memory=state, # verbose=False, # outcome_formatter=lambda # o: f'{o.observation} - cost: {o.value.cost:.2f}') # # makespan = sum([v.cost for v in values]) # self.reference_makespans[td] = makespan # else: # self.reference_makespans[td] = reference_makespans[td_name] def _solve_domain(self, domain_factory: Callable[[], D]) -> None: self.domain = domain_factory() tournament_ratio = self.params_gphh.tournament_ratio pop_size = self.params_gphh.pop_size n_gen = self.params_gphh.n_gen min_tree_depth = self.params_gphh.min_tree_depth max_tree_depth = self.params_gphh.max_tree_depth crossover_rate = self.params_gphh.crossover_rate mutation_rate = self.params_gphh.mutation_rate creator.create("FitnessMin", Fitness, weights=(self.weight,)) creator.create("Individual", PrimitiveTree, fitness=creator.FitnessMin) self.toolbox = Toolbox() self.toolbox.register( "expr", genHalfAndHalf, pset=self.pset, min_=min_tree_depth, max_=max_tree_depth, ) self.toolbox.register( "individual", tools.initIterate, creator.Individual, self.toolbox.expr ) self.toolbox.register( "population", tools.initRepeat, list, self.toolbox.individual ) self.toolbox.register("compile", gp.compile, pset=self.pset) if self.evaluation_method == EvaluationGPHH.SGS: self.toolbox.register( "evaluate", self.evaluate_heuristic, domains=self.training_domains ) # if self.evaluation_method == EvaluationGPHH.SGS_DEVIATION: # self.toolbox.register("evaluate", self.evaluate_heuristic_sgs_deviation, domains=self.training_domains) elif self.evaluation_method == EvaluationGPHH.PERMUTATION_DISTANCE: self.toolbox.register( "evaluate", self.evaluate_heuristic_permutation, domains=self.training_domains, ) # self.toolbox.register("evaluate", self.evaluate_heuristic, domains=[self.training_domains[1]]) self.toolbox.register( "select", tools.selTournament, tournsize=int(tournament_ratio * pop_size) ) self.toolbox.register("mate", gp.cxOnePoint) self.toolbox.register("expr_mut", gp.genFull, min_=0, max_=max_tree_depth) self.toolbox.register( "mutate", gp.mutUniform, expr=self.toolbox.expr_mut, pset=self.pset ) self.toolbox.decorate( "mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17) ) self.toolbox.decorate( "mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17) ) stats_fit = tools.Statistics(lambda ind: ind.fitness.values) stats_size = tools.Statistics(len) mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size) mstats.register("avg", np.mean) mstats.register("std", np.std) mstats.register("min", np.min) mstats.register("max", np.max) pop = self.toolbox.population(n=pop_size) hof = tools.HallOfFame(1) self.hof = hof pop, log = algorithms.eaSimple( pop, self.toolbox, crossover_rate, mutation_rate, n_gen, stats=mstats, halloffame=hof, verbose=True, ) self.best_heuristic = hof[0] print("best_heuristic: ", self.best_heuristic) self.func_heuristic = self.toolbox.compile(expr=self.best_heuristic) self.policy = GPHHPolicy( self.domain, self.domain_model, self.func_heuristic, features=self.list_feature, params_gphh=self.params_gphh, recompute_cpm=True, ) def _get_next_action( self, observation: D.T_agent[D.T_observation] ) -> D.T_agent[D.T_concurrency[D.T_event]]: action = self.policy.sample_action(observation) # print('action_1: ', action.action) return action def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool: return True def init_primitives(self, pset) -> PrimitiveSet: for i in range(len(self.list_feature)): pset.renameArguments(**{"ARG" + str(i): self.list_feature[i].value}) return pset def evaluate_heuristic(self, individual, domains) -> float: vals = [] func_heuristic = self.toolbox.compile(expr=individual) # print('individual', individual) for domain in domains: ### initial_state = domain.get_initial_state() do_model = build_do_domain(domain) modes = [ initial_state.tasks_mode.get(j, 1) for j in sorted(domain.get_tasks_ids()) ] modes = modes[1:-1] cpm = self.cpm_data[domain]["cpm"] cpm_esd = self.cpm_data[domain]["cpm_esd"] raw_values = [] for task_id in domain.get_available_tasks(initial_state): input_features = [ feature_function_map[lf]( domain=domain, cpm=cpm, cpm_esd=cpm_esd, task_id=task_id, state=initial_state, ) for lf in self.list_feature ] output_value = func_heuristic(*input_features) raw_values.append(output_value) normalized_values = [ x + 1 for x in sorted( range(len(raw_values)), key=lambda k: raw_values[k], reverse=False ) ] normalized_values_for_do = [ normalized_values[i] - 2 for i in range(len(normalized_values)) if normalized_values[i] not in {1, len(normalized_values)} ] solution = RCPSPSolution( problem=do_model, rcpsp_permutation=normalized_values_for_do, rcpsp_modes=modes, ) last_activity = max(list(solution.rcpsp_schedule.keys())) do_makespan = solution.rcpsp_schedule[last_activity]["end_time"] vals.append(do_makespan) fitness = [np.mean(vals)] # fitness = [np.max(vals)] return fitness # def evaluate_heuristic_sgs_deviation(self, individual, domains) -> float: # vals = [] # func_heuristic = self.toolbox.compile(expr=individual) # # selected_domains = random.sample(domains, 3) # selected_domains = domains # # for domain in selected_domains: # policy = GPHHPolicy(domain, domain, # func_heuristic, # features=self.list_feature, # params_gphh=self.params_gphh, recompute_cpm=False, cpm_data=self.cpm_data # ) # state = domain.get_initial_state().copy() # domain.set_inplace_environment(True) # we can use True because we don't use the value # # states, actions, values = rollout_episode(domain=domain, # max_steps=10000, # solver=policy, # from_memory=state, # verbose=False, # outcome_formatter=lambda # o: f'{o.observation} - cost: {o.value.cost:.2f}') # # makespan = states[-1].t # ref_makespan = self.reference_makespans[domain] # makespan_deviation = (makespan - ref_makespan) / ref_makespan # # print('mk: ', makespan, ' - mk_dev: ', makespan_deviation, ' - ref: ', ref_makespan) # vals.append(makespan_deviation) # # # fitness = [np.mean(vals)] # fitness = [np.mean(vals)] # return fitness def initialize_cpm_data_for_training(self): self.cpm_data = {} for domain in self.training_domains: do_model = build_do_domain(domain) cpm, cpm_esd = compute_cpm(do_model) self.cpm_data[domain] = {"cpm": cpm, "cpm_esd": cpm_esd} def evaluate_heuristic_permutation(self, individual, domains) -> float: vals = [] func_heuristic = self.toolbox.compile(expr=individual) # print('individual', individual) for domain in domains: raw_values = [] initial_state = domain.get_initial_state() regenerate_cpm = False if regenerate_cpm: do_model = build_do_domain(domain) cpm, cpm_esd = compute_cpm(do_model) else: cpm = self.cpm_data[domain]["cpm"] cpm_esd = self.cpm_data[domain]["cpm_esd"] for task_id in domain.get_available_tasks(state=initial_state): input_features = [ feature_function_map[lf]( domain=domain, cpm=cpm, cpm_esd=cpm_esd, task_id=task_id, state=initial_state, ) for lf in self.list_feature ] output_value = func_heuristic(*input_features) raw_values.append(output_value) most_common_raw_val = max(raw_values, key=raw_values.count) most_common_count = raw_values.count(most_common_raw_val) heuristic_permutation = [ x + 1 for x in sorted( range(len(raw_values)), key=lambda k: raw_values[k], reverse=False ) ] if self.permutation_distance == PermutationDistance.KTD: dist, p_value = stats.kendalltau( heuristic_permutation, self.reference_permutations[domain] ) dist = -dist if self.permutation_distance == PermutationDistance.HAMMING: dist = distance.hamming( heuristic_permutation, self.reference_permutations[domain] ) if self.permutation_distance == PermutationDistance.KTD_HAMMING: ktd, _ = stats.kendalltau( heuristic_permutation, self.reference_permutations[domain] ) dist = -ktd + distance.hamming( heuristic_permutation, self.reference_permutations[domain] ) penalty = most_common_count / len(raw_values) # penalty = 0. penalized_distance = dist + penalty vals.append(penalized_distance) fitness = [np.mean(vals)] # fitness = [np.max(vals)] return fitness def test_features(self, domain, task_id, observation): for f in FeatureEnum: print("feature: ", f) calculated_feature = feature_function_map[f]( domain=domain, task_id=task_id, state=observation ) print("\tcalculated_feature: ", calculated_feature) def set_domain(self, domain): self.domain = domain if self.policy is not None: self.policy.domain = domain
def mainPart(x_, y_, pset, pop_n=100, random_seed=1, cxpb=0.8, mutpb=0.1, ngen=5, alpha=1, tournsize=3, max_value=10, double=False, score=None, **kargs): """ Parameters ---------- score double x_ y_ pset pop_n random_seed cxpb mutpb ngen alpha tournsize max_value kargs Returns ------- """ max_ = pset.max_ if score is None: score = [r2_score, explained_variance_score] random.seed(random_seed) toolbox = Toolbox() if isinstance(pset, PrimitiveSet): PTrees = ExpressionTree Generate = genHalfAndHalf mutate = mutNodeReplacement mate = cxOnePoint elif isinstance(pset, FixedPrimitiveSet): PTrees = FixedExpressionTree Generate = generate_ mate = partial(cxOnePoint_index, pset=pset) mutate = mutUniForm_index else: raise NotImplementedError("get wrong pset") if double: creator.create("Fitness_", Fitness, weights=(1.0, 1.0)) else: creator.create("Fitness_", Fitness, weights=(1.0,)) creator.create("PTrees_", PTrees, fitness=creator.Fitness_) toolbox.register("generate_", Generate, pset=pset, min_=None, max_=max_) toolbox.register("individual", initIterate, container=creator.PTrees_, generator=toolbox.generate_) toolbox.register('population', initRepeat, container=list, func=toolbox.individual) # def selection toolbox.register("select_gs", selTournament, tournsize=tournsize) # def mate toolbox.register("mate", mate) # def mutate toolbox.register("mutate", mutate, pset=pset) if isinstance(pset, PrimitiveSet): toolbox.decorate("mate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) toolbox.decorate("mutate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) # def elaluate toolbox.register("evaluate", calculate, pset=pset, x=x_, y=y_, score_method=score[0], **kargs) toolbox.register("evaluate2", calculate, pset=pset, x=x_, y=y_, score_method=score[1], **kargs) stats1 = Statistics(lambda ind: ind.fitness.values[0]) stats = MultiStatistics(score1=stats1, ) stats.register("avg", np.mean) stats.register("max", np.max) pop = toolbox.population(n=pop_n) haln = 5 hof = HallOfFame(haln) if double: population, logbook = multiEaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, alpha=alpha, halloffame=hof, pset=pset) else: population, logbook = eaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, halloffame=hof, pset=pset) return population, logbook, hof