def statsa(): stats = Statistics(key=lambda ind: sum(ind.fitness.values)) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) stats.register("len", len) return stats
def statsa(): stats = Statistics(key=lambda ind: soft_maximum_worst_case(ind)) stats.register("avg", numpy.mean) stats.register("std", numpy.std) stats.register("min", numpy.min) stats.register("max", numpy.max) stats.register("len", len) return stats
def Statis_func(stats=None): if stats is None: stats = {"fitness_dim_max": ("max",), "dim_is_target": ("sum",)} func = {"max": np.max, "mean": np.mean, "min": np.min, "std": np.std, "sum": np.sum} att = { "fitness": lambda ind: ind.fitness.values[0], "fitness_dim_max": lambda ind: ind.fitness.values[0] if ind.dim_score else -np.inf, "fitness_dim_min": lambda ind: ind.fitness.values[0] if ind.dim_score else np.inf, "dim_is_target": lambda ind: 1 if ind.dim_score else 0, # special "coef": lambda ind: score_dim(ind.y_dim, "coef", fuzzy=False), "integer": lambda ind: score_dim(ind.y_dim, "integer", fuzzy=False), "length": lambda ind: len(ind), "height": lambda ind: ind.height, "h_bgp": lambda ind: ind.h_bgp, # mutil-target "weight_fitness": lambda ind: ind.fitness.wvalues, "weight_fitness_dim": lambda ind: ind.fitness.wvalues if ind.dim_score else -np.inf, # weight have mul the "-" } sa_all = {} for a, f in stats.items(): if a in att: a_s = att[a] elif isinstance(callable, a): a_s = a a = str(a).split(" ")[1] else: raise TypeError("the key must be in definition or a function") sa = Statistics(a_s) if isinstance(f, str): f = [f, ] for i, fi in enumerate(f): assert fi in func ff = func[fi] sa.register(fi, ff) sa_all["Cal_%s" % a] = sa stats = MultiStatistics(sa_all) return stats
def train(self, pop = 20, gen = 10): from deap import algorithms from deap import base from deap import creator from deap import tools import random import numpy as np from deap.tools import Statistics # creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0)) # creator.create("Individual", list, fitness=creator.FitnessMulti) creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, fitness=creator.FitnessMax) toolbox = base.Toolbox() # Attribute generator toolbox.register("attr_bool", random.randint, 0, 1) # Structure initializers toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=len(self.X.columns)) toolbox.register("population", tools.initRepeat, list, toolbox.individual, n=pop) # Operator registering toolbox.register("evaluate", self.eval_classifer) toolbox.register("mate", tools.cxUniform, indpb=0.1) toolbox.register("mutate", tools.mutFlipBit, indpb=0.05) # toolbox.register("select", tools.selNSGA2) MU, LAMBDA = pop, pop population = toolbox.population(n=MU) # hof = tools.ParetoFront() s = Statistics(key=lambda ind: ind.fitness.values) s.register("mean", np.mean) s.register("max", max) # pop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA, cxpb=0.7, mutpb=0.3, ngen=gen, stats=s, halloffame=hof) for i in range(gen): offspring = algorithms.varAnd(population, toolbox, cxpb=0.95, mutpb=0.1) fits = toolbox.map(toolbox.evaluate, offspring) for fit, ind in zip(fits, offspring): ind.fitness.values = fit population = tools.selBest(offspring, int(0.05*len(offspring))) + tools.selTournament(offspring, len(offspring)-int(0.05*len(offspring)), tournsize=3) # population = toolbox.select(offspring, k=len(population)) print s.compile(population) top10 = tools.selBest(population, k=10) print top10 return top10[0]
def create_statistics(fit_index, index): """ Create deap.tools.Statistics instance for the given name and index :param fit_index: string name for the statistic :param index: int This is the index of the statistic in a multi-objective fitness tuple :return: deap.tools.Statistics """ stats = Statistics(lambda ind: ind.fitness.values[index]) stats.register("{}_avg".format(fit_index), np.mean) stats.register("{}_std".format(fit_index), np.std) stats.register("{}_min".format(fit_index), np.min) stats.register("{}_max".format(fit_index), np.max) return stats
def train(self, pop = 20, gen = 10): from deap import algorithms from deap import base from deap import creator from deap import tools from deap.tools import Statistics # import random from scipy.stats import rv_discrete # creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0)) # creator.create("Individual", list, fitness=creator.FitnessMulti) creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() # Attribute generator custm = rv_discrete(name='custm', values=(self.a_w.index, self.a_w.values)) toolbox.register("attr_int", custm.rvs) # Structure initializers toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_int, n=len(self.s)) toolbox.register("population", tools.initRepeat, list, toolbox.individual, n=pop) # Operator registering toolbox.register("evaluate", self.eval_classifer) toolbox.register("mate", tools.cxUniform, indpb=0.5) toolbox.register("mutate", tools.mutUniformInt, low=min(self.a.index), up=max(self.a.index), indpb=0.1) toolbox.register("select", tools.selNSGA2) MU, LAMBDA = pop, pop population = toolbox.population(n=MU) hof = tools.ParetoFront() s = Statistics(key=lambda ind: ind.fitness.values) s.register("mean", np.mean) s.register("min", min) # pop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA, cxpb=0.7, mutpb=0.3, ngen=gen, stats=s, halloffame=hof) for i in range(gen): offspring = algorithms.varAnd(population, toolbox, cxpb=0.95, mutpb=0.1) fits = toolbox.map(toolbox.evaluate, offspring) for fit, ind in zip(fits, offspring): ind.fitness.values = fit population = tools.selBest(offspring, int(0.05*len(offspring))) + tools.selTournament(offspring, len(offspring)-int(0.05*len(offspring)), tournsize=3) # population = toolbox.select(offspring, k=len(population)) print s.compile(population) top10 = tools.selBest(population, k=10) return top10
def __init__(self, bset): self.bset = bset pop = [Population.INDIVIDUAL_CLASS(self.bset) for _ in range(self.POPULATION_SIZE)] super(Population, self).__init__(pop) self.stats = Statistics(lambda ind: ind.fitness.values) self.stats.register("avg", np.mean) self.stats.register("std", np.std) self.stats.register("min", np.min) self.stats.register("max", np.max) self.logbook = Logbook() self.logbook.header = ['gen'] + self.stats.fields self.hof = HallOfFame(1) self.generation = 0 # do an initial evaluation for ind in self: ind.fitness.values = ind.evaluate()
def genetic_algorithm(verbose=False, hack=False): pop = toolbox.population(n=MU) hof = ParetoFront() # retrieve the best non dominated individuals of the evolution # Statistics created for compiling four different statistics over the generations stats = Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean, axis=0) # axis=0: compute the statistics on each objective independently stats.register("std", np.std, axis=0) stats.register("min", np.min, axis=0) stats.register("max", np.max, axis=0) if hack: _, logbook, all_generations = \ eaMuPlusLambda_hack(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof, verbose=verbose) return pop, stats, hof, logbook, all_generations else: _, logbook = \ eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof, verbose=verbose) return pop, stats, hof, logbook
class Population(list): """ A collection of individuals """ INDIVIDUAL_CLASS = Individual POPULATION_SIZE = 100 CLONE_BEST = 5 MAX_MATE_ATTEMPTS = 10 MATE_MUTATE_CLONE = (80, 18, 2) def __init__(self, bset): self.bset = bset pop = [Population.INDIVIDUAL_CLASS(self.bset) for _ in range(self.POPULATION_SIZE)] super(Population, self).__init__(pop) self.stats = Statistics(lambda ind: ind.fitness.values) self.stats.register("avg", np.mean) self.stats.register("std", np.std) self.stats.register("min", np.min) self.stats.register("max", np.max) self.logbook = Logbook() self.logbook.header = ['gen'] + self.stats.fields self.hof = HallOfFame(1) self.generation = 0 # do an initial evaluation for ind in self: ind.fitness.values = ind.evaluate() def select(self, k): """Probablistic select *k* individuals among the input *individuals*. The list returned contains references to the input *individuals*. :param k: The number of individuals to select. :returns: A list containing k individuals. The individuals returned are randomly selected from individuals according to their fitness such that the more fit the individual the more likely that individual will be chosen. Less fit individuals are less likely, but still possibly, selected. """ # adjusted pop is a list of tuples (adjusted fitness, individual) adjusted_pop = [(1.0 / (1.0 + i.fitness.values[0]), i) for i in self] # normalised_pop is a list of tuples (float, individual) where the float indicates # a 'share' of 1.0 that the individual deserves based on it's fitness relative to # the other individuals. It is sorted so the best chances are at the front of the list. denom = sum([fit for fit, ind in adjusted_pop]) normalised_pop = [(fit / denom, ind) for fit, ind in adjusted_pop] normalised_pop = sorted(normalised_pop, key=lambda i: i[0], reverse=True) # randomly select with a fitness bias # FIXME: surely this can be optimized? selected = [] for x in range(k): rand = random.random() accumulator = 0.0 for share, ind in normalised_pop: accumulator += share if rand <= accumulator: selected.append(ind) break if len(selected) == 1: return selected[0] else: return selected def evolve(self): """ Evolve this population by one generation """ self.logbook.record(gen=self.generation, **self.stats.compile(self)) self.hof.update(self) print(self.logbook.stream) # the best x of the population are cloned directly into the next generation offspring = self[:self.CLONE_BEST] # rest of the population clone, mate, or mutate at random for idx in range(len(self) - self.CLONE_BEST): # decide how to alter this individual rand = random.randint(0,100) for _ in range(0, self.MAX_MATE_ATTEMPTS): try: if rand < self.MATE_MUTATE_CLONE[0]: # MATE/CROSSOVER receiver, contributor = self.select(2) child = receiver.clone() child.mate(contributor) break elif rand < (self.MATE_MUTATE_CLONE[0] + self.MATE_MUTATE_CLONE[1]): # MUTATE ind = self.select(1) child = ind.clone() child.mutate() break else: child = self.select(1).clone() break except BirthError: continue # generate new blood when reproduction fails so badly else: child = Population.INDIVIDUAL_CLASS(self.bset) offspring.append(child) self[:] = offspring self.generation += 1 # evaluate every individual and sort for ind in self: if not len(ind.fitness.values): ind.fitness.values = ind.evaluate() self.sort(key=lambda i: i.fitness.values[0])
def mainPart(x_, y_, pset, max_=5, pop_n=100, random_seed=2, cxpb=0.8, mutpb=0.1, ngen=5, tournsize=3, max_value=10, double=False, score=None, cal_dim=True, target_dim=None, inter_add=True, iner_add=True, random_add=False, store=True): """ Parameters ---------- target_dim max_ inter_add iner_add random_add cal_dim score double x_ y_ pset pop_n random_seed cxpb mutpb ngen tournsize max_value Returns ------- """ if score is None: score = [r2_score, explained_variance_score] if cal_dim: assert all([isinstance(i, Dim) for i in pset.dim_list ]), "all import dim of pset should be Dim object" random.seed(random_seed) toolbox = Toolbox() if isinstance(pset, ExpressionSet): PTrees = ExpressionTree Generate = genHalfAndHalf mutate = mutNodeReplacement mate = cxOnePoint elif isinstance(pset, FixedSet): PTrees = FixedTree Generate = generate_index mutate = mutUniForm_index mate = partial(cxOnePoint_index, pset=pset) else: raise NotImplementedError("get wrong pset") if double: Fitness_ = creator.create("Fitness_", Fitness, weights=(1.0, 1.0)) else: Fitness_ = creator.create("Fitness_", Fitness, weights=(1.0, )) PTrees_ = creator.create("PTrees_", PTrees, fitness=Fitness_, dim=dnan, withdim=0) toolbox.register("generate", Generate, pset=pset, min_=1, max_=max_) toolbox.register("individual", initIterate, container=PTrees_, generator=toolbox.generate) toolbox.register('population', initRepeat, container=list, func=toolbox.individual) # def selection toolbox.register("select_gs", selTournament, tournsize=tournsize) toolbox.register("select_kbest_target_dim", selKbestDim, dim_type=target_dim, fuzzy=True) toolbox.register("select_kbest_dimless", selKbestDim, dim_type="integer") toolbox.register("select_kbest", selKbestDim, dim_type='ignore') # def mate toolbox.register("mate", mate) # def mutate toolbox.register("mutate", mutate, pset=pset) if isinstance(pset, ExpressionSet): toolbox.decorate( "mate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) toolbox.decorate( "mutate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) # def elaluate toolbox.register("evaluate", calculatePrecision, pset=pset, x=x_, y=y_, scoring=score[0], cal_dim=cal_dim, inter_add=inter_add, iner_add=iner_add, random_add=random_add) toolbox.register("evaluate2", calculatePrecision, pset=pset, x=x_, y=y_, scoring=score[1], cal_dim=cal_dim, inter_add=inter_add, iner_add=iner_add, random_add=random_add) toolbox.register("parallel", parallelize, n_jobs=1, func=toolbox.evaluate, respective=False) toolbox.register("parallel2", parallelize, n_jobs=1, func=toolbox.evaluate2, respective=False) pop = toolbox.population(n=pop_n) haln = 10 hof = HallOfFame(haln) stats1 = Statistics(lambda ind: ind.fitness.values[0] if ind and ind.y_dim in target_dim else 0) stats1.register("max", np.max) stats2 = Statistics(lambda ind: ind.y_dim in target_dim if ind else 0) stats2.register("countable_number", np.sum) stats = MultiStatistics(score1=stats1, score2=stats2) population, logbook = eaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, halloffame=hof, pset=pset, store=store) return population, hof
def compound_update(w, c, p, min=-1, max=1): mapping_update(w, c, p.mapping) ordering_update(w, c, p.ordering, min, max) pass toolbox = Toolbox() # toolbox.register("generate", generate, _wf, rm, estimator) toolbox.register("generate", heft_gen) toolbox.register("fitness", fitness, _wf, rm, estimator) toolbox.register("estimate_force", compound_force) toolbox.register("update", compound_update, W, C) toolbox.register("G", G) toolbox.register("kbest", Kbest) stats = Statistics() stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop])) stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop])) stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop])) stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop])) logbook = Logbook() logbook.header = ["gen", "G", "kbest"] + stats.fields def do_exp(): pop, _logbook, best = run_gsa(toolbox, stats, logbook, pop_size, 0, iter_number, None, kbest, ginit, **{"w":W, "c":C})
heft_schedule = run_heft(_wf, rm, estimator) heft_mapping = schedule_to_position(heft_schedule) heft_gen = lambda n: [deepcopy(heft_mapping) if random.random() > 1.0 else generate(_wf, rm, estimator, 1)[0] for _ in range(n)] toolbox = Toolbox() # toolbox.register("generate", generate, _wf, rm, estimator) toolbox.register("generate", heft_gen) toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks) toolbox.register("force_vector_matrix", force_vector_matrix) toolbox.register("velocity_and_position", velocity_and_position, beta=0.0) toolbox.register("G", G) toolbox.register("kbest", Kbest) stats = Statistics() stats.register("min", lambda pop: numpy.min([p.fitness.mofit for p in pop])) stats.register("avr", lambda pop: numpy.average([p.fitness.mofit for p in pop])) stats.register("max", lambda pop: numpy.max([p.fitness.mofit for p in pop])) stats.register("std", lambda pop: numpy.std([p.fitness.mofit for p in pop])) logbook = Logbook() logbook.header = ("gen", "G", "kbest", "min", "avr", "max", "std") pop_size = 40 iter_number = 200 kbest = pop_size ginit = 2
def train(self, pop=20, gen=10): from deap import algorithms from deap import base from deap import creator from deap import tools from deap.tools import Statistics # import random from scipy.stats import rv_discrete # creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0)) # creator.create("Individual", list, fitness=creator.FitnessMulti) creator.create("FitnessMin", base.Fitness, weights=(-1.0, )) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() # Attribute generator custm = rv_discrete(name='custm', values=(self.a_w.index, self.a_w.values)) toolbox.register("attr_int", custm.rvs) # Structure initializers toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_int, n=len(self.s)) toolbox.register("population", tools.initRepeat, list, toolbox.individual, n=pop) # Operator registering toolbox.register("evaluate", self.eval_classifer) toolbox.register("mate", tools.cxUniform, indpb=0.5) toolbox.register("mutate", tools.mutUniformInt, low=min(self.a.index), up=max(self.a.index), indpb=0.1) toolbox.register("select", tools.selNSGA2) MU, LAMBDA = pop, pop population = toolbox.population(n=MU) hof = tools.ParetoFront() s = Statistics(key=lambda ind: ind.fitness.values) s.register("mean", np.mean) s.register("min", min) # pop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA, cxpb=0.7, mutpb=0.3, ngen=gen, stats=s, halloffame=hof) for i in range(gen): offspring = algorithms.varAnd(population, toolbox, cxpb=0.95, mutpb=0.1) fits = toolbox.map(toolbox.evaluate, offspring) for fit, ind in zip(fits, offspring): ind.fitness.values = fit population = tools.selBest(offspring, int( 0.05 * len(offspring))) + tools.selTournament( offspring, len(offspring) - int(0.05 * len(offspring)), tournsize=3) # population = toolbox.select(offspring, k=len(population)) print s.compile(population) top10 = tools.selBest(population, k=10) return top10
def mainPart(x_, y_, pset, pop_n=100, random_seed=1, cxpb=0.8, mutpb=0.1, ngen=5, alpha=1, tournsize=3, max_value=10, double=False, score=None, **kargs): """ Parameters ---------- score double x_ y_ pset pop_n random_seed cxpb mutpb ngen alpha tournsize max_value kargs Returns ------- """ max_ = pset.max_ if score is None: score = [r2_score, explained_variance_score] random.seed(random_seed) toolbox = Toolbox() if isinstance(pset, PrimitiveSet): PTrees = ExpressionTree Generate = genHalfAndHalf mutate = mutNodeReplacement mate = cxOnePoint elif isinstance(pset, FixedPrimitiveSet): PTrees = FixedExpressionTree Generate = generate_ mate = partial(cxOnePoint_index, pset=pset) mutate = mutUniForm_index else: raise NotImplementedError("get wrong pset") if double: creator.create("Fitness_", Fitness, weights=(1.0, 1.0)) else: creator.create("Fitness_", Fitness, weights=(1.0,)) creator.create("PTrees_", PTrees, fitness=creator.Fitness_) toolbox.register("generate_", Generate, pset=pset, min_=None, max_=max_) toolbox.register("individual", initIterate, container=creator.PTrees_, generator=toolbox.generate_) toolbox.register('population', initRepeat, container=list, func=toolbox.individual) # def selection toolbox.register("select_gs", selTournament, tournsize=tournsize) # def mate toolbox.register("mate", mate) # def mutate toolbox.register("mutate", mutate, pset=pset) if isinstance(pset, PrimitiveSet): toolbox.decorate("mate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) toolbox.decorate("mutate", staticLimit(key=operator.attrgetter("height"), max_value=max_value)) # def elaluate toolbox.register("evaluate", calculate, pset=pset, x=x_, y=y_, score_method=score[0], **kargs) toolbox.register("evaluate2", calculate, pset=pset, x=x_, y=y_, score_method=score[1], **kargs) stats1 = Statistics(lambda ind: ind.fitness.values[0]) stats = MultiStatistics(score1=stats1, ) stats.register("avg", np.mean) stats.register("max", np.max) pop = toolbox.population(n=pop_n) haln = 5 hof = HallOfFame(haln) if double: population, logbook = multiEaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, alpha=alpha, halloffame=hof, pset=pset) else: population, logbook = eaSimple(pop, toolbox, cxpb=cxpb, mutpb=mutpb, ngen=ngen, stats=stats, halloffame=hof, pset=pset) return population, logbook, hof