Beispiel #1
0
def main():
    random.seed()
    MU, LAMBDA = 100, 200
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)
    
    algorithms.eaMuCommaLambda(pop, toolbox, mu=MU, lambda_=LAMBDA, 
                               cxpb=0.6, mutpb=0.3, ngen=2000, 
                               stats=stats, halloffame=hof)
    
    return pop, stats, hof
Beispiel #2
0
def main():
    global guessedY, chosenChromosome
    random.seed()
    MU, LAMBDA = 10, 100
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, logbook = algorithms.eaMuCommaLambda(pop,
                                              toolbox,
                                              mu=MU,
                                              lambda_=LAMBDA,
                                              cxpb=0.6,
                                              mutpb=0.3,
                                              ngen=10,
                                              stats=stats,
                                              halloffame=hof)

    chosenChromosome = pop[0]  # we have the best chromosome in final pop
    fillG(pop[0])
    guessedY = np.matmul(gMatrix, wMatrix)

    saveLearned("current_generation.txt", pop, MU, IND_SIZE)
    saveLearned("current_weights.txt", wMatrix, numOfClusters, numOfClasses)

    return pop, logbook, hof
Beispiel #3
0
def main():
    random.seed()
    MU, LAMBDA = 10, 100
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, logbook = algorithms.eaMuCommaLambda(pop,
                                              toolbox,
                                              mu=MU,
                                              lambda_=LAMBDA,
                                              cxpb=0.6,
                                              mutpb=0.3,
                                              ngen=25,
                                              stats=stats,
                                              halloffame=hof)

    vs, gamas, w = rbf.validation(p.train_data, p.train_ystar, pop)
    answer = rbf.test(p.test_data, w, vs, gamas)
    plt.plot(p.test_data, p.test_cn, vs, gamas, answer)
    return pop, logbook, hof
def solver(problem: SubProblem, genetic_functions: ClientOrientedGenome, pop=100, gen=500, verbose=False,
           chart=False, min_decrease=-1000000, decrease_step=2) -> PartialMatch:
    population = pop
    toolbox = base.Toolbox()
    toolbox.register("rand_warehouse", random.randint, 0, problem.warehouses_num() - 1)
    toolbox.register("individual", tools.initRepeat, creator.HIndividual, toolbox.rand_warehouse, problem.clients_num())
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", genetic_functions.eval_individual)
    # toolbox.register("mate", genetic_functions.crossover)
    toolbox.register("mate", genetic_functions.crossover_one_point)
    # toolbox.register("mate", tools.cxOnePoint)
    toolbox.register("mutate", genetic_functions.mutate, percentage_clients=0.05)
    #toolbox.register("mutate", tools.mutUniformInt, low=0, up=(len(problem.warehouses)-1), indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=int(population * 0.15))
    #toolbox.register("select", tools.selBest)


    pop = toolbox.population(n=population)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)
    stats.register("decrease", GeneticInterruptor(decrease_step=decrease_step, min_decrease=min_decrease))

    try:
        pop, log = algorithms.eaMuCommaLambda(pop, toolbox, mu=int(population * 0.3), lambda_=int(population * 0.5),
                                              cxpb=0.1, mutpb=0.8, ngen=gen, stats=stats, halloffame=hof,
                                              verbose=verbose)
    except NotDecreasingInterrupt:
        pass

    if chart:
        gen = log.select("gen")
        fit_mins = log.select("min")
        size_avgs = log.select("avg")
        #print (fit_mins, " ", size_avgs)
        fig, ax1 = plt.subplots()
        line1 = ax1.plot(gen, fit_mins, "b-", label="Minimum Fitness")
        ax1.set_xlabel("Generation")
        ax1.set_ylabel("Fitness", color="b")
        for tl in ax1.get_yticklabels():
            tl.set_color("b")

        ax2 = ax1.twinx()
        line2 = ax2.plot(gen, size_avgs, "r-", label="Average")
        ax2.set_ylabel("Average", color="r")
        for tl in ax2.get_yticklabels():
            tl.set_color("r")

        lns = line1 + line2
        labs = [l.get_label() for l in lns]
        ax1.legend(lns, labs, loc="center right")

        plt.show()

    return individual_to_partial_match(hof[0], problem)
Beispiel #5
0
def eaMuCommaLamda_with_stats(cxpb,mutpb,m,l,tb):      
    pop = tb.population(n=m+l)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("min", np.min)
    stats.register("max", np.max)
    pop, logbook = algorithms.eaMuCommaLambda(pop, tb, m, l, cxpb, mutpb, ngen=100, stats=stats, halloffame=hof, verbose=False)
    return pop, logbook, hof
Beispiel #6
0
 def create_ga_hall_of_fame(self):
     self.show('Create population')
     pop = self.ga_toolbox_.population(n=self.pop_size)
     self.show('\tpopulation: OK')
     self.show('Create hof')
     hof = tools.HallOfFame(self.hof_size)
     self.show('\thof: OK')
     self.show('\nSTART GA SEARCH')
     algorithms.eaMuCommaLambda(pop,
                                self.ga_toolbox_,
                                mu=self.mu,
                                lambda_=self.lambda_,
                                cxpb=self.cxpb,
                                mutpb=self.mutpb,
                                ngen=self.num_generations,
                                stats=self.ga_stats_,
                                halloffame=hof,
                                verbose=self.verbose)
     return hof
Beispiel #7
0
 def create_ga_hall_of_fame(self):
     self.show("Create population")
     pop = self.ga_toolbox_.population(n=self.pop_size)
     self.show("\tpopulation: OK")
     self.show("Create hof")
     hof = tools.HallOfFame(self.hof_size)
     self.show("\thof: OK")
     self.show("\nSTART GA SEARCH")
     algorithms.eaMuCommaLambda(
         pop,
         self.ga_toolbox_,
         mu=self.mu,
         lambda_=self.lambda_,
         cxpb=self.cxpb,
         mutpb=self.mutpb,
         ngen=self.num_generations,
         stats=self.ga_stats_,
         halloffame=hof,
         verbose=self.verbose,
     )
     return hof
Beispiel #8
0
def main():
    np.random.seed(64)
    MU, LAMBDA = 15,150
    population = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    """
    invalid_ind = [ind for ind in population if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit
    """
    stop_gen = 200
    ok_count = 0
    """
    invalid_ind = [ind for ind in population if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    if halloffame is not None:
        halloffame.update(population)

    for gen in range(NGEN):
        offspring = varOr(population, toolbox, LAMBDA, cxpb = 0.6, mutpb = 0.3)
        invalid_ind = [ind for ind in population if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit
        if halloffame is not None:
            halloffame.update(offspring)
        fits = [ind.fitness.values[0] for ind in population]
        length = len(population)
        mean = sum(fits) / length
        sum2 = sum(x*x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5
        print(gen ,min(fits) ,max(fits) ,mean,std)

        if min(fits) <= np.exp(-10) :
            ok_count = 1
            stop_gen = gen
            break

        population = toolbox.select(offspring, MU)
    """
    pop,logbook,stop_gen,ok_count = algorithms.eaMuCommaLambda(population,toolbox,mu=MU,lambda_=LAMBDA,cxpb=0.6,mutpb=0.3,ngen=200,stats=stats,halloffame=hof)
    print(logbook["min"])
    return population,stop_gen,ok_count
Beispiel #9
0
	def organize(self):
		self.resetGenAlg()
		algorithms.eaMuCommaLambda(
			population = self.pop, 
			toolbox = self.toolbox, 

			lambda_ = self.numChildrenPerIteration, 
			mu = self.numSurvivors, 
			
			cxpb = self.probOfCross, 
			mutpb = self.probOfMutation, 
			
			ngen = self.numberOfEvolutionsPerIteration, 
			
			halloffame = self.hof, 
			verbose = False
		)


		self.completionPerc = len(tools.Logbook())/ self.numberOfEvolutionsPerIteration

		bestGroups = self.hof[0][0]
		bestConfigProfiles = self.hof[0][1]

		# print(bestGroups)
		# print(bestConfigProfiles[0].dimensions)
		# breakpoint()

		avgCharacteristicsArray = []
		for group in bestGroups:
			groupSize = len(group)
			avgCharacteristics = PlayerCharacteristics()
			for currPlayer in group:
				currState = self.playerModelBridge.getPlayerCurrState(currPlayer)
				avgCharacteristics.ability += currState.characteristics.ability / groupSize
				avgCharacteristics.engagement += currState.characteristics.engagement / groupSize
			avgCharacteristicsArray.append(avgCharacteristics)

		return {"groups": bestGroups, "profiles": bestConfigProfiles, "avgCharacteristics": avgCharacteristicsArray}
Beispiel #10
0
def main():
    random.seed()
    MU, LAMBDA = 100, 200
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", tools.mean)
    stats.register("std", tools.std)
    stats.register("min", min)
    stats.register("max", max)

    algorithms.eaMuCommaLambda(pop,
                               toolbox,
                               mu=MU,
                               lambda_=LAMBDA,
                               cxpb=0.6,
                               mutpb=0.3,
                               ngen=2000,
                               stats=stats,
                               halloffame=hof)

    return pop, stats, hof
Beispiel #11
0
def main():
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(maxsize=THOF)
    stats=0
    if METHODE == 1:
        pop, logbook = algorithms.eaSimple(pop, toolbox, CXPB, MUTPB, NGEN, halloffame=hof)
    
    elif METHODE == 2:
        pop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats, halloffame=hof)
        
    elif METHODE == 3:
        pop, logbook = algorithms.eaGenerateUpdate(toolbox, NGEN, stats, hof) 
        
    elif METHODE == 4:
        pop, logbook = algorithms.eaMuCommaLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, halloffame=hof)    

    return pop, hof, logbook
Beispiel #12
0
def main():
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)
    halloffame = tools.HallOfFame(maxsize=1)
    population = toolbox.population(n=NPOPULATION)
    if ALGORITHM == 0:
        population, logbook = sgaAlgorithm(population, toolbox, NGEN, CXPB,
                                           MUTPB, halloffame, stats)
    else:
        population, logbook = algorithms.eaMuCommaLambda(
            population, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,
            halloffame)

    return population, halloffame, logbook
Beispiel #13
0
def ea_m_comma_l_withstats(m, l, population, cxpb, mutpb, ngen):
    pop = toolbox.population(population)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    #stats.register("avg", np.mean)
    stats.register("min", np.min)
    #stats.register("max", np.max)
    pop, logbook = algorithms.eaMuCommaLambda(pop,
                                              toolbox,
                                              m,
                                              l,
                                              cxpb=cxpb,
                                              mutpb=mutpb,
                                              ngen=ngen,
                                              stats=stats,
                                              halloffame=hof,
                                              verbose=True)
    return pop, logbook, ho
 def run_algorithm(self, cxpb=0.5, mutpb=0.3, ngen=1, mu=10):
     hof = tools.HallOfFame(1)
     stats = tools.Statistics(lambda ind: ind.fitness.values)
     stats.register("avg", np.mean)
     stats.register("min", np.min)
     stats.register("max", np.max)
     pop = self.toolbox.population(n=mu)
     pop, logbook = algorithms.eaMuCommaLambda(pop,
                                               self.toolbox,
                                               mu=mu,
                                               lambda_=100,
                                               cxpb=cxpb,
                                               mutpb=mutpb,
                                               ngen=ngen,
                                               stats=stats,
                                               halloffame=hof)
     train_acc = self.evaluate(hof[0])[0]
     return pop, hof, train_acc
Beispiel #15
0
def main():
    random.seed()
    MU, LAMBDA = 15, 150
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    pop, logbook,stop_gen,ok_count = algorithms.eaMuCommaLambda(pop, toolbox, mu=MU, lambda_=LAMBDA,
        cxpb=0.6, mutpb=0.3, ngen=200, stats=stats, halloffame=hof)
    gen,fitmin = logbook.select("gen","min")
    for i,fit in enumerate(fitmin):
        print(i, fit)
    print(pop)
    return pop, logbook, hof
Beispiel #16
0
def main():
    random.seed()
    MU, LAMBDA = 10, 100
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    pop, logbook = algorithms.eaMuCommaLambda(pop, toolbox, mu=MU, lambda_=LAMBDA,
                                              cxpb=0.6, mutpb=0.3, ngen=20, stats=stats, halloffame=hof)

    with open("ind.csv", "w+") as ind_csv:
        for i in range(number_of_groups * dimension + number_of_groups):
            ind_csv.write(str(pop[0][i]) + ',')

    print("Accuracy= ", (accuracy(ind=pop[0]) / len(test)))

    return pop, logbook, hof
Beispiel #17
0
def train(toolbox, mu=10, m_lambda=100, cxpb=0.6, mutpb=0.3, ngen=10):
    pop = toolbox.population_guess()
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, logbook = al.eaMuCommaLambda(pop,
                                      toolbox,
                                      mu=mu,
                                      lambda_=m_lambda,
                                      cxpb=cxpb,
                                      mutpb=mutpb,
                                      ngen=ngen,
                                      stats=stats,
                                      halloffame=hof)

    return pop, logbook, hof
Beispiel #18
0
    def solve_problem(self, MU=10, LAMBDA=20, NGEN=30):

        pop = self.toolbox.population(n=MU)
        hof = tools.HallOfFame(1)
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("std", np.std)
        stats.register("min", np.min)
        stats.register("max", np.max)

        pop, logbook = algorithms.eaMuCommaLambda(pop,
                                                  self.toolbox,
                                                  mu=MU,
                                                  lambda_=LAMBDA,
                                                  cxpb=0.6,
                                                  mutpb=0.3,
                                                  ngen=NGEN,
                                                  stats=stats,
                                                  halloffame=hof)

        return pop, logbook, hof
Beispiel #19
0
def main():
    random.seed()
    MU, LAMBDA = 10, 100
    pop = toolbox.population(n=pop_num)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, logbook = algorithms.eaMuCommaLambda(pop,
                                              toolbox,
                                              mu=MU,
                                              lambda_=LAMBDA,
                                              cxpb=0.6,
                                              mutpb=0.3,
                                              ngen=number_of_generation,
                                              stats=stats,
                                              halloffame=hof)

    return pop
Beispiel #20
0
def computeComma():
    random.seed(47)
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    algorithms.eaMuCommaLambda(toolbox, pop, MU, LAMBDA, 0.3, 0.5, N_GEN,  hof)
    return sorted(list(hof[-1]))
Beispiel #21
0
    def run_optimization_GA(self):

        # Shape of optimization parameters
        OPT_SHAPE = (len(self.opt_vec))
        
        # flattening of optimization parameters (size of an individual genome)
        IND_SIZE = np.prod(OPT_SHAPE)
        
        # population size for parameter optimization
        # 3 * # attributes per individual
        POP_SIZE = IND_SIZE * 4
        
        # number of islands (subpopulations that evolve independently until a migration)
        NISLANDS = 3
        
        # set max number of generations to run for
        NGEN = 60
        
        # Migrations frequency
        MIG_FREQ = 20
        
        # Evolution strategy variables
        MIN_VALUE = 0.0            # individual attribute min 
        MAX_VALUE = 7.0     # individual attribute max
        MIN_STRATEGY = 0.0         # min value of strength of mutation
        MAX_STRATEGY = 1.5      # max value of strength of mutation
        
        # If we want to run optimization in parallel, all information must be accessed
        # through picklable data types in python
        #ffobj.optimization_shape=(ffobj.guest.ncomp, ffobj.grid.ncomp, ffobj.model.num_params)
        #pickled = convert_ffobj_to_dict(ffobj)
        
        opt_weights = (-1.0,)
        
        
        
        
        creator.create("FitnessMin", base.Fitness, weights = opt_weights)
        creator.create("Individual", list, fitness=creator.FitnessMin, strategy = None)
        creator.create("Strategy", list)
        
        toolbox = base.Toolbox()
        
        # function calls to chromosome intialization (random vs intelligent assignment)
        #toolbox.register("rand_float", np.random.uniform)
        #toolbox.register("assign_guess", self.assign_UFF_starting) 
        
        # create individual intialization method (random vs intelligent assignment)
        toolbox.register("individual", self.generateES, self.opt_vec, creator.Individual, creator.Strategy,
                                                                                IND_SIZE,
                                                                                MIN_VALUE,
                                                                                MAX_VALUE,
                                                                                MIN_STRATEGY,
                                                                                MAX_STRATEGY)
        #toolbox.register("individual", toolbox.assign_guess, creator.Individual)
        
        
        
        # objective function for this minimization 
        # toolbox.register("evaluate", self.deap_multi_evalFitness)
        toolbox.register("evaluate", self.construct_curr_UC_GA)
        
        # define evolution strategies
        toolbox.register("mate", tools.cxESBlend, alpha=0.5)
        toolbox.decorate("mate", self.checkStrategy(MIN_VALUE,
                                               MAX_VALUE,
                                               MAX_STRATEGY,
                                               MAX_STRATEGY)
                        )

        ###toolbox.register("mutate", tools.mutPolynomialBounded, eta = 0.0001, low = 0.0, up = 10000.0, indpb = 0.1)
        toolbox.register("mutate", tools.mutESLogNormal, c = 1.0, indpb = 0.9)
        toolbox.decorate("mutate", self.checkStrategy(MIN_VALUE,
                                                 MAX_VALUE,
                                                 MAX_STRATEGY,
                                                 MAX_STRATEGY)
                        )
        ###toolbox.register("mutate", tools.mutESLogNormal, c = 1, indpb = 0.1)
        
        toolbox.register("select", tools.selTournament, tournsize = int(POP_SIZE/2))
        ###toolbox.register("select", tools.selTournament, k = 10, tournsize = 64)
        
        
        # parallelize or no
        #pool = multiprocessing.Pool(processes = 7)
        #toolbox.register("map", pool.map)
        
        
        
        # create a population of individuals
        toolbox.register("population", tools.initRepeat, list, toolbox.individual, n = POP_SIZE)
        population = toolbox.population()

        # create islands to contain distinct populations
        islands = [toolbox.population() for i in range(NISLANDS)]
        
        # create a hall of fame for each island
        hofsize = max(1, int(POP_SIZE/10))
        famous = [tools.HallOfFame(maxsize = hofsize) for i in range(NISLANDS)]
        
        # create a stats log for each island
        stats = [tools.Statistics(lambda ind: ind.fitness.values) for i in range(NISLANDS)]
        
        for i in range(NISLANDS):
            stats[i].register("avg", np.mean)
            stats[i].register("std", np.std)
            stats[i].register("min", np.min)
            stats[i].register("max", np.max)
        
        
        # MU, LAMDA parameters
        MU, LAMBDA = POP_SIZE, POP_SIZE*2
        
        # run optimization with periodic migration between islands
        for i in range(int(NGEN/MIG_FREQ)):
            print("----------------")
            print("Evolution period: " + str(i))
            print("----------------")
            for k in range(len(islands)):
                print("------------------------")
                print("Island " + str(k) + " evolution:")
                print("------------------------")
                #islands[k], log = algorithms.eaGenerateUpdate(toolbox, ngen = MIG_FREQ, halloffame = famous[k], stats = stats[k])
                islands[k], log = algorithms.eaMuCommaLambda(islands[k], toolbox, mu=MU, lambda_ = LAMBDA, cxpb = 0.4, mutpb = 0.6, ngen = MIG_FREQ, halloffame = famous[k], stats = stats[k])
            print("---------------")
            print("MIGRATION!")
            print("---------------")
            self.custom_migRing(islands, 10, tools.selBest, replacement = tools.selWorst)
        
        # Create final population for the last run
        final_famous = tools.HallOfFame(maxsize = 1)
        final_stats = tools.Statistics(lambda ind: ind.fitness.values)
        final_stats.register("avg", np.mean)
        final_stats.register("std", np.std)
        final_stats.register("min", np.min)
        final_stats.register("max", np.max)
        toolbox.register("final_population", tools.initRepeat, list, toolbox.individual, n = hofsize * NISLANDS)
        final_population = toolbox.final_population()
        
        # copy over each island's famous individuals into last 
        for i in range(NISLANDS):
            for j in range(hofsize):
                final_population[i*j + j] = famous[i][j]
        
        # make sure our ultimate hall of fame starts out as the best we've ever seen
        final_famous.update(final_population)
        
        # reset MU, LAMBDA and rerun final evolution
        MU, LAMBDA = hofsize*NISLANDS, hofsize*NISLANDS*2
        final_pop, log = algorithms.eaMuCommaLambda(final_population, toolbox, mu=MU, lambda_ = LAMBDA, cxpb = 0.4, mutpb = 0.6, ngen = MIG_FREQ, halloffame = final_famous, stats = final_stats)


        self.opt_vec = np.array(final_famous[0])
Beispiel #22
0
def main():
    print("nome dell'eseguibile: ", sys.argv[0])
    print("tipo di algoritmo da utilizzare: ", tipoAlgoritmo)
    random.seed()
    MU, LAMBDA = 10, 100
    pop = toolbox.population(n=MU)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    if tipoAlgoritmo == 'eaSimple':
        '''
eaSimple:
L'algoritmo prende in considerazione una popolazione e la evolve al suo posto usando il metodo varAnd()*

Parte di un algoritmo evolutivo che applica solo la parte di variazione(crossover e mutazione)Gli individui modificati
hanno la loro idoneità invalidata. Gli individui sono clonati, quindi la popolazione resituita è indipendente
dalla popolazione di input.

1. valuta gli individui con un fitness non valido
2. entra nel ciclo generazionale dove viene applicata la procedura di selezione per
   sostituire interamente la popolazione parentale. Il rapporto di sostituzione 1:1 di questo algoritmo
   richiede che la procedura di selezione sia stocastica e di selezionare più volte lo stesso individui

3. applica la funzione varAnd per produrre la popolazione di prossima generazione.
4. valuta i nuovi individui e calcola le statistiche su questa popolazione.
5 restituisce una tupla con la popolazione finale e un Logbook dell'evoluzione.
'''
        print("Sto eseguendo algoritmo eaSimple")
        pop, logbook = algorithms.eaSimple(pop,
                                           toolbox,
                                           cxpb=cx_pb,
                                           mutpb=mut_pb,
                                           ngen=num_gen,
                                           stats=stats,
                                           halloffame=hof)

    elif tipoAlgoritmo == 'eaMuPlusLambda':
        '''
eaMuPlusLambda:
l'algoritmo prende in considerazione una popolazione e la evolve al suo posto utilizzando la funzione varOr()*
come sopra solo che usa(crossover, mutazione o riproduzione)

La variazione procede in questo modo:
lambda_ iterazione, seleziona una delle tre operazione: crossover, mutazione o riproduzione.
Nel caso di un crossover, due individui sono selezionati a caso dalla popolazione parentale P_\mathrm{p}
quest individui sono clonati usando il metodo toolbox.clone() e poi accoppiati usando il metodo toolbox.mate().
SOlo il primo figlio viene aggiunto alla popolazione proble P_\mathrm{o}
il secondo figlio viene scartato.
Nel caso di una mutazione, un individuo viene selezionato a caso da P_\mathrm{r}
viene clonato e poi mutato usando il metodo toolbox.mutate().
Nel caso di una riproduzione, un individuo è selezionato a caso da P_\mathrm{p}
clonato e aggiunto a P_\mathrm{o}.

Questa variazione si chiama Or perchè una prole non risulterà mai da entrambe le operazioni 
crossover e mutazione
la somma di entrambe le probabilità deve essere in [0,1], la probabilita di riproduzione è 1 -cxpb -mutpb

1. vengono valutate le persone che hanno un'idonetà non valida.
2. il ciclo evolutivo inizia producendo i figli della popolazione, i figli sono generati dalla funzione varOr().
3. la prole viene poi valutata e la popolazione di nuova generazione viene selezionata
sia dalla prole che dalla popolazione.
'''
        print("Sto eseguendo algoritmo eaMuCommaLambda")
        pop, logbook = algorithms.eaMuCommaLambda(pop,
                                                  toolbox,
                                                  mu=MU,
                                                  lambda_=LAMBDA,
                                                  cxpb=cx_pb,
                                                  mutpb=mut_pb,
                                                  ngen=num_gen,
                                                  stats=stats,
                                                  halloffame=hof)

    elif tipoAlgoritmo == 'eaMuCommaLambda':
        '''
eaMuCommaLambda ( usa varOr() ):
1. vengono valutate le generazioni che hanno un'idonèta 
2. il ciclo evolutivo inizia producendo i figli della popolazione, i figli 
sono generati dalla funzione varOr(). La prole viene poi valutata e la nuova generazione
di popolazione viene solezionata solo dalla prole
3. quando vengono fatte le generazioni ngen, l'algoritmo restituisce una tupla con la
popolazione finale e un logbook del'evoluzione.

NOTA: è necessario tenere d'occhio il rapporto lambda:mu è di 1:!, poichè una selezione
non elastica non comporta lacuna selezione in quanto l'operatore seleziona gli individui labda da un pool di mu.
'''
        print("Sto eseguendo algoritmo eaMuPlusLambda")
        pop, logbook = algorithms.eaMuPlusLambda(pop,
                                                 toolbox,
                                                 mu=MU,
                                                 lambda_=LAMBDA,
                                                 cxpb=cx_pb,
                                                 mutpb=mut_pb,
                                                 ngen=num_gen,
                                                 stats=stats,
                                                 halloffame=hof)
    else:
        print("valore dell'algoritmo sbagliato")

    # print("HALL OF FAME:\n", hof)

    listaProva = []
    listaProva.append(hof)
    arrayHOF = []
    for i in range(0, 7):
        arrayHOF.append(listaProva[0][0][i])

    # print(arrayHOF)
    avg_list = []
    avg_list.append(logbook)

    arrayGen = []
    arrayNevals = []
    arrayAvg = []
    arrayStd = []
    arrayMin = []
    arrayMax = []

    for i in range(0, len(logbook)):
        arrayGen.append(avg_list[0][i]["gen"])
        arrayNevals.append(avg_list[0][i]["nevals"])
        arrayAvg.append(avg_list[0][i]["avg"])
        arrayStd.append(avg_list[0][i]["std"])
        arrayMin.append(avg_list[0][i]["min"])
        arrayMax.append(avg_list[0][i]["max"])

    with open('risultato.json', 'w') as f:
        json.dump(arrayGen, f, ensure_ascii=True, indent=4)

    print(arrayHOF)
    # for i in range(0,len(arrayMax)):
    #     print("{x: ",i ,",y: ", arrayMax[i],"},")
    # sono già ordinati come escono dal logbook
    # FitnessEAltre.stampaGrafico(arrayGen, arrayHOF)
    # FitnessEAltre.stampaTuttiGrafici(arrayGen, arrayNevals, arrayAvg, arrayStd, arrayMin, arrayMax)

    return pop, logbook, hof
def main():
    pop = toolbox.population(n=MU)
    pop, logbook = algorithms.eaMuCommaLambda(pop, toolbox, MU,
                                                  None, CXPB, 1 - CXPB,
                                                  NGEN, verbose=True)
    return pop, logbook
Beispiel #24
0
    def run_optimization_GA(self):

        # Shape of optimization parameters
        OPT_SHAPE = (len(self.opt_vec))

        # flattening of optimization parameters (size of an individual genome)
        IND_SIZE = np.prod(OPT_SHAPE)

        # population size for parameter optimization
        # 3 * # attributes per individual
        POP_SIZE = IND_SIZE * 4

        # number of islands (subpopulations that evolve independently until a migration)
        NISLANDS = 3

        # set max number of generations to run for
        NGEN = 60

        # Migrations frequency
        MIG_FREQ = 20

        # Evolution strategy variables
        MIN_VALUE = 0.0  # individual attribute min
        MAX_VALUE = 7.0  # individual attribute max
        MIN_STRATEGY = 0.0  # min value of strength of mutation
        MAX_STRATEGY = 1.5  # max value of strength of mutation

        # If we want to run optimization in parallel, all information must be accessed
        # through picklable data types in python
        #ffobj.optimization_shape=(ffobj.guest.ncomp, ffobj.grid.ncomp, ffobj.model.num_params)
        #pickled = convert_ffobj_to_dict(ffobj)

        opt_weights = (-1.0, )

        creator.create("FitnessMin", base.Fitness, weights=opt_weights)
        creator.create("Individual",
                       list,
                       fitness=creator.FitnessMin,
                       strategy=None)
        creator.create("Strategy", list)

        toolbox = base.Toolbox()

        # function calls to chromosome intialization (random vs intelligent assignment)
        #toolbox.register("rand_float", np.random.uniform)
        #toolbox.register("assign_guess", self.assign_UFF_starting)

        # create individual intialization method (random vs intelligent assignment)
        toolbox.register("individual", self.generateES, self.opt_vec,
                         creator.Individual, creator.Strategy, IND_SIZE,
                         MIN_VALUE, MAX_VALUE, MIN_STRATEGY, MAX_STRATEGY)
        #toolbox.register("individual", toolbox.assign_guess, creator.Individual)

        # objective function for this minimization
        # toolbox.register("evaluate", self.deap_multi_evalFitness)
        toolbox.register("evaluate", self.construct_curr_UC_GA)

        # define evolution strategies
        toolbox.register("mate", tools.cxESBlend, alpha=0.5)
        toolbox.decorate(
            "mate",
            self.checkStrategy(MIN_VALUE, MAX_VALUE, MAX_STRATEGY,
                               MAX_STRATEGY))

        ###toolbox.register("mutate", tools.mutPolynomialBounded, eta = 0.0001, low = 0.0, up = 10000.0, indpb = 0.1)
        toolbox.register("mutate", tools.mutESLogNormal, c=1.0, indpb=0.9)
        toolbox.decorate(
            "mutate",
            self.checkStrategy(MIN_VALUE, MAX_VALUE, MAX_STRATEGY,
                               MAX_STRATEGY))
        ###toolbox.register("mutate", tools.mutESLogNormal, c = 1, indpb = 0.1)

        toolbox.register("select",
                         tools.selTournament,
                         tournsize=int(POP_SIZE / 2))
        ###toolbox.register("select", tools.selTournament, k = 10, tournsize = 64)

        # parallelize or no
        #pool = multiprocessing.Pool(processes = 7)
        #toolbox.register("map", pool.map)

        # create a population of individuals
        toolbox.register("population",
                         tools.initRepeat,
                         list,
                         toolbox.individual,
                         n=POP_SIZE)
        population = toolbox.population()

        # create islands to contain distinct populations
        islands = [toolbox.population() for i in range(NISLANDS)]

        # create a hall of fame for each island
        hofsize = max(1, int(POP_SIZE / 10))
        famous = [tools.HallOfFame(maxsize=hofsize) for i in range(NISLANDS)]

        # create a stats log for each island
        stats = [
            tools.Statistics(lambda ind: ind.fitness.values)
            for i in range(NISLANDS)
        ]

        for i in range(NISLANDS):
            stats[i].register("avg", np.mean)
            stats[i].register("std", np.std)
            stats[i].register("min", np.min)
            stats[i].register("max", np.max)

        # MU, LAMDA parameters
        MU, LAMBDA = POP_SIZE, POP_SIZE * 2

        # run optimization with periodic migration between islands
        for i in range(int(NGEN / MIG_FREQ)):
            print("----------------")
            print("Evolution period: " + str(i))
            print("----------------")
            for k in range(len(islands)):
                print("------------------------")
                print("Island " + str(k) + " evolution:")
                print("------------------------")
                #islands[k], log = algorithms.eaGenerateUpdate(toolbox, ngen = MIG_FREQ, halloffame = famous[k], stats = stats[k])
                islands[k], log = algorithms.eaMuCommaLambda(
                    islands[k],
                    toolbox,
                    mu=MU,
                    lambda_=LAMBDA,
                    cxpb=0.4,
                    mutpb=0.6,
                    ngen=MIG_FREQ,
                    halloffame=famous[k],
                    stats=stats[k])
            print("---------------")
            print("MIGRATION!")
            print("---------------")
            self.custom_migRing(islands,
                                10,
                                tools.selBest,
                                replacement=tools.selWorst)

        # Create final population for the last run
        final_famous = tools.HallOfFame(maxsize=1)
        final_stats = tools.Statistics(lambda ind: ind.fitness.values)
        final_stats.register("avg", np.mean)
        final_stats.register("std", np.std)
        final_stats.register("min", np.min)
        final_stats.register("max", np.max)
        toolbox.register("final_population",
                         tools.initRepeat,
                         list,
                         toolbox.individual,
                         n=hofsize * NISLANDS)
        final_population = toolbox.final_population()

        # copy over each island's famous individuals into last
        for i in range(NISLANDS):
            for j in range(hofsize):
                final_population[i * j + j] = famous[i][j]

        # make sure our ultimate hall of fame starts out as the best we've ever seen
        final_famous.update(final_population)

        # reset MU, LAMBDA and rerun final evolution
        MU, LAMBDA = hofsize * NISLANDS, hofsize * NISLANDS * 2
        final_pop, log = algorithms.eaMuCommaLambda(final_population,
                                                    toolbox,
                                                    mu=MU,
                                                    lambda_=LAMBDA,
                                                    cxpb=0.4,
                                                    mutpb=0.6,
                                                    ngen=MIG_FREQ,
                                                    halloffame=final_famous,
                                                    stats=final_stats)

        self.opt_vec = np.array(final_famous[0])
    def _genetic_optimalisation(self, optimalisation_type='multi'):
        if optimalisation_type == 'diversity_single':
            creator.create("FitnessMulti", base.Fitness, weights=(-1.0, ))
        elif optimalisation_type == 'quality_single':
            creator.create("FitnessMulti", base.Fitness, weights=(1.0, ))
        elif optimalisation_type == 'precision_single':
            creator.create("FitnessMulti", base.Fitness, weights=(1.0, ))
        elif optimalisation_type == 'recall_single':
            creator.create("FitnessMulti", base.Fitness, weights=(1.0, ))
        else:
            creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0))
        creator.create("Individual", np.ndarray, fitness=creator.FitnessMulti)

        IND_SIZE = len(self.ensemble_)

        toolbox = base.Toolbox()
        toolbox.register("attr_bool", randint, 0, 1)
        toolbox.register("individual",
                         tools.initRepeat,
                         creator.Individual,
                         toolbox.attr_bool,
                         n=IND_SIZE)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)
        toolbox.register("mate", tools.cxTwoPoint)
        toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
        if optimalisation_type == 'multi':
            toolbox.register("select", tools.selNSGA2)
            toolbox.register("evaluate",
                             MCE._evaluate,
                             y_predicts=self._y_predict,
                             y_true=self._y_valid,
                             pairwise_div_stat=self._pairwise_diversity_stats)
        else:
            toolbox.register("select", tools.selTournament, tournsize=50)
            if optimalisation_type == 'quality_single':
                toolbox.register("evaluate",
                                 MCE._evaluate_q,
                                 y_predicts=self._y_predict,
                                 y_true=self._y_valid)
            elif optimalisation_type == 'precision_single':
                toolbox.register("evaluate",
                                 MCE._evaluate_p,
                                 y_predicts=self._y_predict,
                                 y_true=self._y_valid)
            elif optimalisation_type == 'recall_single':
                toolbox.register("evaluate",
                                 MCE._evaluate_r,
                                 y_predicts=self._y_predict,
                                 y_true=self._y_valid)
            else:
                toolbox.register(
                    "evaluate",
                    MCE._evaluate_d,
                    pairwise_div_stat=self._pairwise_diversity_stats)

        result = algorithms.eaMuCommaLambda(toolbox.population(n=100), toolbox,
                                            100, 100, 0.2, 0.1, 500)[0]
        fitnesses = list(map(toolbox.evaluate, result))

        return result, fitnesses
 ax2.plot(x, promedios - desviacion, linestyle='--', color='b')
 ax2.plot(x, promedios + desviacion, linestyle='--', color='g')
 mejor = 0
 for i in range(len(pop)):
     if (evaluacion(pop[i]) > evaluacion(pop[mejor])):
         mejor = i
 mejorPlus = pop[mejor]
 #Se guarda el mejor resultado del segundo algoritmo
 #Algoritmo 3
 print("Ejecutando algoritmo eaMuCommaLambda...")
 for i in range(10):
     pop, log = algorithms.eaMuCommaLambda(pop,
                                           toolbox,
                                           10,
                                           10,
                                           0.5,
                                           0.5,
                                           100,
                                           stats=stats,
                                           verbose=False)
     df2 = pd.DataFrame(log)
     df2['algoritmo'] = 'eaMuCommaLambda'
     df2['corrida'] = i
     dfC = dfC.append(df2)
 dfC = dfC.reset_index(drop=True)
 dfPromediosC = dfC.groupby(['algoritmo',
                             'gen']).agg({'max': ['mean', 'std']})
 x = dfC['gen'].unique()
 promedios = dfPromediosC['max']['mean'].values
 desviacion = dfPromediosC['max']['std'].values
 #Se guardan los valores pertinentes y se grafica
Beispiel #27
0

toolbox.register("evaluate", fitness, Controller)
toolbox.register("mate", tools.cxBlend, alpha=0.1)
toolbox.register("mutate", tools.mutESLogNormal, c=1., indpb=0.3)
toolbox.register("select", tools.selTournament, tournsize=3)

stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
fittest = tools.HallOfFame(10)

population = toolbox.population(n=MU)

population, logbook = algorithms.eaMuCommaLambda(population,
                                                 toolbox,
                                                 mu=MU,
                                                 lambda_=LAMBDA,
                                                 cxpb=0.2,
                                                 mutpb=0.7,
                                                 ngen=NUMBER_OF_GENERATIONS,
                                                 stats=stats,
                                                 halloffame=fittest,
                                                 verbose=True)

pd.DataFrame(logbook).to_csv("./logbookseed{}.csv".format(seed), index=False)
pd.DataFrame(np.array(fittest)[0, ]).to_csv(
    "./fittestseed{}mixedFull.csv".format(seed), header=False, index=False)
Beispiel #28
0
            lambda_=population_size,
            halloffame=hof,
            cxpb=0.5,
            mutpb=0.5,
            ngen=num_generations,
            stats=stats,
            verbose=True)
        print(hof)

        # run eaMuCommaLambda algorithm and save stats logbook in log_mucommalambda
        _, log_mucommalambda = algorithms.eaMuCommaLambda(
            population=deepcopy(population),
            toolbox=toolbox,
            mu=population_size,
            lambda_=population_size,
            halloffame=hof,
            cxpb=0.5,
            mutpb=0.5,
            ngen=num_generations,
            stats=stats,
            verbose=True)
        print(hof)

        # create column of Algorithm for DataFrame (hard way)
        algorithm_column = ["eaSimple"] * (num_generations + 1) + [
            "eaMuPlusLambda"
        ] * (num_generations + 1) + ["eaMuCommaLambda"] * (num_generations + 1)
        # create column of Iteration for DataFrame (hard way)
        iteration_column = [iteration] * (num_generations + 1) * 3
        # create column of Population sizes for DataFrame
        population_size_column = log_simple.select(
Beispiel #29
0
toolbox.register("mate", crossover)

toolbox.decorate("mate", checkStrategy(MIN_STRAT))
toolbox.decorate("mutate", checkStrategy(MAX_STRAT))

stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
hof = tools.HallOfFame(1)

pop = toolbox.population(n=10)
bestGen = algorithms.eaMuCommaLambda(pop,
                                     toolbox,
                                     mu=10,
                                     lambda_=100,
                                     cxpb=0.6,
                                     mutpb=0.3,
                                     ngen=10,
                                     stats=stats,
                                     halloffame=hof)

# h = [0.27410710813427114, 0.9429356773357587, -0.4972272296162646, -0.383643504427578, -0.049263178659204675, 1.2095610712762808, -0.08520108713144234, 0.2015061304259363, -1.0912474851945488, -0.26995261833327494, 1.6228039021635832, 0.5890993706358603, 0.30464525370334306, 1.1859646242939061, 0.23004490426038354, 1.0756668040623485, 2.5099395970808027, 1.509158177912565, 0.12177842923276472, 0.7258487240251309, -1.5438305279695512]
h = hof[0]
print(h)
err = evaluateInd(h)
print(err)

printRes(h)
Beispiel #30
0
def main():

    pop = toolbox.population(n=10)

    #statistics
    stats = tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("max", numpy.max)
    stats.register("min", numpy.min)
    stats.register("mean", numpy.mean)
    stats.register("std", numpy.std)

    # define the hall-of-fame object:
    hof = tools.HallOfFame(1)

    #eaSimple
    print("------- EaSimple -------")

    df = pd.DataFrame()
    for i in range(10):
        pop, log = algorithms.eaSimple(pop,
                                       toolbox,
                                       1.0,
                                       0.5,
                                       100,
                                       stats=stats,
                                       halloffame=hof)
        df2 = pd.DataFrame(log)
        df2['algoritmo'] = 'eaSimple'
        df2['corrida'] = i
        df = df.append(df2)

    df = df.reset_index(drop=True)

    for i in range(1010):
        if i > 0 and df.at[i, 'max'] < df.at[i - 1, 'max']:
            df.at[i, 'max'] = df.at[i - 1, 'max']

    print(df.to_string())

    # print best solution found:
    best = hof.items[0]
    print("Best Ever Individual = ", best)
    print("Best Ever Fitness = ", best.fitness.values[0])

    print("--- TSP best route --- ")
    tsp.printResult(best)

    df_promedios = df.groupby(['algoritmo',
                               'gen']).agg({'max': ['mean', 'std']})
    print(df_promedios.to_string())

    x = df['gen'].unique()

    promedios = df_promedios['max']['mean'].values
    desviacion = df_promedios['max']['std'].values
    plt.plot(x, promedios, color='r')
    plt.plot(x, promedios - desviacion, linestyle='--', color='b')
    plt.plot(x, promedios + desviacion, linestyle='--', color='g')

    plt.show()

    #--------------------------------------------------------------------------------------------

    #eaMuPlusLambda
    print("------- EaMuPlusLambda -------")

    hof2 = tools.HallOfFame(1)
    df = pd.DataFrame()
    for i in range(10):
        pop, log = algorithms.eaMuPlusLambda(pop,
                                             toolbox,
                                             5,
                                             10,
                                             0.5,
                                             0.5,
                                             100,
                                             stats=stats,
                                             halloffame=hof2)
        df2 = pd.DataFrame(log)
        df2['algoritmo'] = 'eaMuPlusLambda'
        df2['corrida'] = i
        df = df.append(df2)

    df = df.reset_index(drop=True)

    for i in range(1010):
        if i > 0 and df.at[i, 'max'] < df.at[i - 1, 'max']:
            df.at[i, 'max'] = df.at[i - 1, 'max']

    print(df.to_string())

    # print best solution found:
    best = hof2.items[0]
    print("Best Ever Individual = ", best)
    print("Best Ever Fitness = ", best.fitness.values[0])

    print("--- TSP best route --- ")
    tsp.printResult(best)

    df_promedios = df.groupby(['algoritmo',
                               'gen']).agg({'max': ['mean', 'std']})
    print(df_promedios.to_string())

    x = df['gen'].unique()

    promedios = df_promedios['max']['mean'].values
    desviacion = df_promedios['max']['std'].values
    plt.plot(x, promedios, color='r')
    plt.plot(x, promedios - desviacion, linestyle='--', color='b')
    plt.plot(x, promedios + desviacion, linestyle='--', color='g')

    plt.show()

    #--------------------------------------------------------------------------------------------

    #eaMuCommaLambda

    print("------- EaMuCommaLambda -------")
    hof3 = tools.HallOfFame(1)
    df = pd.DataFrame()
    for i in range(10):
        pop, log = algorithms.eaMuCommaLambda(pop,
                                              toolbox,
                                              5,
                                              10,
                                              0.5,
                                              0.5,
                                              100,
                                              stats=stats,
                                              halloffame=hof3)
        df2 = pd.DataFrame(log)
        df2['algoritmo'] = 'eaMuCommaLambda'
        df2['corrida'] = i
        df = df.append(df2)

    df = df.reset_index(drop=True)

    for i in range(1010):
        if i > 0 and df.at[i, 'max'] < df.at[i - 1, 'max']:
            df.at[i, 'max'] = df.at[i - 1, 'max']

    print(df.to_string())

    # print best solution found:
    best = hof3.items[0]
    print("Best Ever Individual = ", best)
    print("Best Ever Fitness = ", best.fitness.values[0])

    print("--- TSP best route --- ")
    tsp.printResult(best)

    df_promedios = df.groupby(['algoritmo',
                               'gen']).agg({'max': ['mean', 'std']})
    print(df_promedios.to_string())

    x = df['gen'].unique()

    promedios = df_promedios['max']['mean'].values
    desviacion = df_promedios['max']['std'].values
    plt.plot(x, promedios, color='r')
    plt.plot(x, promedios - desviacion, linestyle='--', color='b')
    plt.plot(x, promedios + desviacion, linestyle='--', color='g')

    plt.show()
Beispiel #31
0
def ES(evaluate,myparams,pool=None, run_name="runXXX"):
    """Mu plus lambda ES."""
    
    params={"IND_SIZE":1, 
            "MU":100,
            "LAMBDA":200,
            "CXPB":1,
            "MUTPB":1,
            "NGEN":1000,
            "STATS":stats,
            "MIN": 0,
            "MAX": 1,
            "MIN_STRATEGY":0,
            "MAX_STRATEGY":1,
            "MU": 100,
            "LAMBDA": 1000,
            "ALPHA": 0.1,
            "C": 1.0,
            "INDPB": 0.03,
            "TOURNSIZE":3,
            "VARIANT": "+"
           }
    
    
    for key in myparams.keys():
        params[key]=myparams[key]

    if ("MU" in myparams.keys) and ("LAMBDA" not in myparams.keys()):
        params["LAMBDA"]=int(2*params["MU"])
        
    toolbox = base.Toolbox()
    toolbox.register("individual", generateES, creator.Individual, creator.Strategy,
        params["IND_SIZE"], params["MIN"], params["MAX"], params["MIN_STRATEGY"], params["MAX_STRATEGY"])
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("mate", tools.cxESBlend, alpha=params["ALPHA"])
    toolbox.register("mutate", tools.mutESLogNormal, c=params["C"], indpb=params["INDPB"])
    toolbox.register("select", tools.selTournament, tournsize=params["TOURNSIZE"])
    toolbox.register("evaluate", evaluate)

    # Parallelism
#    if(not in_ipython):
#        toolbox.register("map", futures.map)

#    toolbox.register("map", mymap)
    if(pool):
        toolbox.register("map", pool.map)

    
#    toolbox.decorate("mate", checkStrategy(params["MIN_STRATEGY"], params["MAX_STRATEGY"]))
#    toolbox.decorate("mutate", checkStrategy(params["MIN_STRATEGY"], params["MAX_STRATEGY"]))
    toolbox.decorate("mate", checkStrategyMin(params["MIN_STRATEGY"]))
    toolbox.decorate("mutate", checkStrategyMin(params["MIN_STRATEGY"]))

    pop = toolbox.population(n=params["MU"])
    hof = tools.HallOfFame(1)
    
    if (params["VARIANT"]=="+"):
        print("Mu+Lambda ES")
        rpop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, mu=params["MU"], lambda_=params["LAMBDA"], 
                                                  cxpb=params["CXPB"], mutpb=params["MUTPB"], ngen=params["NGEN"], stats=params["STATS"], halloffame=hof, verbose=False)
    else:
        print("Mu,Lambda ES")
        rpop, logbook = algorithms.eaMuCommaLambda(pop, toolbox, mu=params["MU"], lambda_=params["LAMBDA"], 
                                                   cxpb=params["CXPB"], mutpb=params["MUTPB"], ngen=params["NGEN"], stats=params["STATS"], halloffame=hof, verbose=False)
        
    return rpop, logbook, hof


    def get_worst_individual_from_pop(indivuduals):
        return tools.selWorst(indivuduals, 1)

    



        

algo_dict = {
        "simple" : algorithms.eaSimple,
        "mu+lambda" : lambda population, toolbox, cxpb, mutpb, ngen,  halloffame, verbose : algorithms.eaMuPlusLambda(population=population, toolbox=toolbox, mu=Algorithms.mu, lambda_=Algorithms.lambda_, cxpb=cxpb, mutpb=mutpb, ngen=ngen, halloffame=halloffame, verbose=verbose),
        "mu,lambda" : lambda population, toolbox, cxpb, mutpb, ngen,  halloffame, verbose : algorithms.eaMuCommaLambda(population=population, toolbox=toolbox, mu=Algorithms.mu, lambda_=Algorithms.lambda_, cxpb=cxpb, mutpb=mutpb, ngen=ngen, halloffame=halloffame, verbose=verbose),        
        "custom"    : Algorithms.basic_self,
        "lgml"      : Algorithms.lgml_algorithm,
        "earlyswitcher": Algorithms.early_switcher
        }



def get_algorithm(key):
        """
        Returns the algorithm function associated with the key
        Defaults to eaSimple if key not found
        """
        if key not in algo_dict.keys():
            print(f"Key {key} not found out of available algorithm options. Using Simple Algorithm")
            return algorithms.eaSimple
population = toolbox.population(n=MU)
population2 = toolbox2.population(n=MU)
best_pred = min(population, key=attrgetter("fitness"))
best_prey = max(population2, key=attrgetter("fitness"))
globalLogbook = pd.DataFrame(
    columns=["gen", "nevals", "avg", "std", "min", "max"])
globalLogbook2 = pd.DataFrame(
    columns=["gen", "nevals", "avg", "std", "min", "max"])

for i in range(NUMBER_OF_GENERATIONS):
    population, logbook = algorithms.eaMuCommaLambda(population,
                                                     toolbox,
                                                     mu=MU,
                                                     lambda_=LAMBDA,
                                                     cxpb=0.2,
                                                     mutpb=0.7,
                                                     ngen=1,
                                                     stats=stats,
                                                     halloffame=fittest,
                                                     verbose=True,
                                                     start=start1)
    population2, logbook2 = algorithms.eaMuCommaLambda(population2,
                                                       toolbox2,
                                                       mu=MU,
                                                       lambda_=LAMBDA,
                                                       cxpb=0.2,
                                                       mutpb=0.7,
                                                       ngen=1,
                                                       stats=stats2,
                                                       halloffame=fittest2,
                                                       verbose=True,