def main():
    pop = toolbox.population(n = POPULATION_SIZE)

    CXPB, MUTPB, NGEN = 0.6, 0.2, 100

    if DEBUG: print("-- Life Span --")
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    for g in xrange(NGEN):
        print("-- Generation %i --" % (g+1))

        elite = toolbox.elite(pop)
        elite = list(map(toolbox.clone, elite))
        offspring = toolbox.select(pop, POPULATION_SIZE-2)
        offspring = list(map(toolbox.clone, offspring))

        for child1, child2 in zip(offspring[::2],offspring[1::2]):
            if random.random() < CXPB:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values


        for mutant in offspring:
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        offspring = elite + offspring

        if DEBUG: print("-- Calculating fitness for prole --")
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate,invalid_ind)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        fits = [ind.fitness.values[0] for ind in offspring]
        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x*x for x in fits)
        std = abs(sum2/length - mean**2)**0.5

        print("  Min %s" % min(fits))
        print("  Max %s" % max(fits))
        print("  Avg %s" % mean)
        print("  Std %s" % std)

        if DEBUG: print("   Best Chromossome: %s"%tools.selBest(offspring, k = 1))

        pop[:] = offspring

    print("   The Walker: %s"% tools.selBest(pop, k = 1))
예제 #2
0
	def train(self, pop = 20, gen = 10):
		from deap import algorithms
		from deap import base
		from deap import creator
		from deap import tools
		from deap.tools import Statistics
		# import random
		

		from scipy.stats import rv_discrete

		# creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0))
		# creator.create("Individual", list, fitness=creator.FitnessMulti)

		creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
		creator.create("Individual", list, fitness=creator.FitnessMin)

		toolbox = base.Toolbox()
		# Attribute generator
		custm = rv_discrete(name='custm', values=(self.a_w.index, self.a_w.values))

		toolbox.register("attr_int", custm.rvs)
		# Structure initializers
		toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_int, n=len(self.s))
		toolbox.register("population", tools.initRepeat, list, toolbox.individual, n=pop)

		# Operator registering
		toolbox.register("evaluate", self.eval_classifer)
		toolbox.register("mate", tools.cxUniform, indpb=0.5)
		toolbox.register("mutate", tools.mutUniformInt, low=min(self.a.index), up=max(self.a.index), indpb=0.1)
		toolbox.register("select", tools.selNSGA2)

		MU, LAMBDA = pop, pop
		population = toolbox.population(n=MU)
		hof = tools.ParetoFront()
		
		s = Statistics(key=lambda ind: ind.fitness.values)
		s.register("mean", np.mean)
		s.register("min", min)

		# pop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA, cxpb=0.7, mutpb=0.3, ngen=gen, stats=s, halloffame=hof)
		for i in range(gen):
			offspring = algorithms.varAnd(population, toolbox, cxpb=0.95, mutpb=0.1)
			fits = toolbox.map(toolbox.evaluate, offspring)
			for fit, ind in zip(fits, offspring):
				ind.fitness.values = fit

			population = tools.selBest(offspring, int(0.05*len(offspring))) + tools.selTournament(offspring, len(offspring)-int(0.05*len(offspring)), tournsize=3)
			# population = toolbox.select(offspring, k=len(population))
			print s.compile(population)
		top10 = tools.selBest(population, k=10)
		return top10
예제 #3
0
def main():
    global sut, rgen, nbugs, start, CXPB, MUTPB, NGEN
    sut = sut.sut()
    rgen = random.Random(opts.seed)
    CXPB, MUTPB, NGEN = 0.5, 0.2, 1
    best, timeclock = [], []
    nbugs, g, prev_best = 0, 0, 1

    init()
    start = time.time()

    while True:
        print("Building population")
        pop = toolbox.population(n=100)
        print("  n=%d" % len(pop))

        print("Start of evolution")

        fitnesses = list(map(toolbox.evaluate, pop))
        for ind, fit in zip(pop, fitnesses):
            ind.fitness.values = fit

        while True:
            g += 1
            generation(g, pop)

            best_test = tools.selBest(pop, 1)[0]
            elapsed = time.time()-start
            if 0.02 > (best_test.fitness.values[0] - prev_best) / prev_best or elapsed > (0.95 * opts.timeout):
                break
            else:
                prev_best = max(best_test.fitness.values[0], prev_best)

        best_test = tools.selBest(pop, 1)[0]
        print("Best test has %s lines covered" % int(best_test.fitness.values[0]))
        best.append(best_test.fitness.values[0])

        elapsed = time.time()-start
        timeclock.append(elapsed) if not timeclock else timeclock.append(elapsed - timeclock[-1])
        average_time = sum(timeclock) / len(timeclock)
        print("elapsed: %f seconds out of %f seconds" % (elapsed, opts.timeout))

        if (elapsed + average_time) > opts.timeout:
            break

    overall_best = max(best)
    print("Overall best test has %s lines covered" % int(overall_best))

    if (opts.coverage):
        sut.internalReport()
예제 #4
0
	def train(self, pop = 20, gen = 10):
		from deap import algorithms
		from deap import base
		from deap import creator
		from deap import tools
		import random
		import numpy as np

		from deap.tools import Statistics

		# creator.create("FitnessMulti", base.Fitness, weights=(1.0, -1.0))
		# creator.create("Individual", list, fitness=creator.FitnessMulti)

		creator.create("FitnessMax", base.Fitness, weights=(1.0,))
		creator.create("Individual", list, fitness=creator.FitnessMax)

		toolbox = base.Toolbox()
		# Attribute generator
		toolbox.register("attr_bool", random.randint, 0, 1)
		# Structure initializers
		toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=len(self.X.columns))
		toolbox.register("population", tools.initRepeat, list, toolbox.individual, n=pop)

		# Operator registering
		toolbox.register("evaluate", self.eval_classifer)
		toolbox.register("mate", tools.cxUniform, indpb=0.1)
		toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
		# toolbox.register("select", tools.selNSGA2)

		MU, LAMBDA = pop, pop
		population = toolbox.population(n=MU)
		# hof = tools.ParetoFront()
		
		s = Statistics(key=lambda ind: ind.fitness.values)
		s.register("mean", np.mean)
		s.register("max", max)

		# pop, logbook = algorithms.eaMuPlusLambda(pop, toolbox, mu=MU, lambda_=LAMBDA, cxpb=0.7, mutpb=0.3, ngen=gen, stats=s, halloffame=hof)
		for i in range(gen):
			offspring = algorithms.varAnd(population, toolbox, cxpb=0.95, mutpb=0.1)
			fits = toolbox.map(toolbox.evaluate, offspring)
			for fit, ind in zip(fits, offspring):
				ind.fitness.values = fit

			population = tools.selBest(offspring, int(0.05*len(offspring))) + tools.selTournament(offspring, len(offspring)-int(0.05*len(offspring)), tournsize=3)
			# population = toolbox.select(offspring, k=len(population))
			print s.compile(population)
		top10 = tools.selBest(population, k=10)
		print top10
		return top10[0]
예제 #5
0
파일: tpot.py 프로젝트: kharkovsailing/tpot
 def _combined_selection_operator(self, individuals, k):
     """Regular selection + elitism."""
     best_inds = int(0.1 * k)
     rest_inds = k - best_inds
     return (tools.selBest(individuals, 1) * best_inds +
             tools.selDoubleTournament(individuals, k=rest_inds, fitness_size=3,
                                       parsimony_size=2, fitness_first=True))
예제 #6
0
파일: idsfunc.py 프로젝트: nixor/GANIDS
def selElites(pop): #Selector function

    attkTypes = len(attkUniqs) # 4, Numbers of attacks in integer
    attkPop = []
    elitesSub = []
    elitesAll = []

    for i in xrange(attkTypes): #create lists within the attkPop list
        attkPop.append([]) #equals to the number of attkTypes

    for i in xrange(attkTypes):
        for j, k in enumerate(pop):
            if k[-1] == attkUniqs[i]: #if last field is the same attack
                attkPop[i].append(k) #type then add to the attkPop

    for i in attkPop:
        elitesSub.append(tools.selBest(i, elitesNo))


    for i in elitesSub: #appending all elites to elitesAll list
        i = list(i for i,_ in itertools.groupby(i)) #eliminate duplicate elites by attk type
        for j in i:
            elitesAll.append(j)

    #for i in elitesAll:
    #    i = list(i for i,_ in itertools.groupby(i))

    return elitesAll #This will be returned to create part of new
예제 #7
0
파일: ga.py 프로젝트: Piggelinus/Project
def compose(toolbox, window, tonic):
    global chords_in_vector

    population = toolbox.population(n=pop_size)

    NGEN = num_gens

    for gen in range(NGEN):
        offspring = algorithms.varAnd(population, toolbox, cxpb=cx_pb, mutpb=mut_pb)
        fits = toolbox.map(toolbox.evaluate, offspring)

        total_fitness = 0

        for fit, ind in zip(fits, offspring):
            ind.fitness.values = fit
            total_fitness += fit

        population = toolbox.select(offspring, k=len(population))

        #print("Total fitness in gen %s : %s" % (gen, total_fitness))
        #print("Best : %s" % evaluate(tools.selBest(population, k=1)[0]))

    top = tools.selBest(population, k=1)[0]

    np.set_printoptions(threshold=np.nan)
    #pprint("%s, %s" % (window, top))
    #pprint(evaluate(top))

    return top, evaluate(top)
예제 #8
0
def main_program(pop):    
    HOF = []
    fitnesses = toolbox.map(toolbox.evaluate, pop) # eval. fitness of pop
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    
    for g in range(ngen):  
        if g%5==0:
            print(str(g) + ' of ' + str(ngen))       
        offspring = toolbox.select(pop, len(pop)) #select which individuals to mate
        offspring = list(map(toolbox.clone, offspring))
        
        for child1, child2 in zip(offspring[::2], offspring[1::2]): #determine whether to have a cross over
            if random.random() < cxpb:
                toolbox.mate(child1[0], child2[0])
                del child1.fitness.values, child2.fitness.values
    
        for mutant in offspring: #determine whether to mutate
            if random.random() < mutpb:
                toolbox.mutate(mutant[0])
                del mutant.fitness.values      
        
        invalids = [ind for ind in offspring if not ind.fitness.valid] #assign fitness scores to new offspring
        fitnesses = toolbox.map(toolbox.evaluate, invalids)
        for ind, fit in zip(invalids, fitnesses):
            ind.fitness.values = fit  
        
        pop[:] = offspring #update population with offspring
        log.record(gen=g,**stats.compile(pop))
    return tools.selBest(pop,k=1)[0][0], log, HOF
예제 #9
0
    def learn(self):
        """ Runs genetic algorithms.

        By using DEAP, generates an initial population and runs for n generations.

        Returns:
            A dict with the features weight and others params.
        """
        solution = {}

        # Generate Population
        population = self.deap_toolbox.population(n=self.population)

        # Evolve
        for gen in range(self.generations):
            offspring = algorithms.varAnd(population, self.deap_toolbox, cxpb=0.5, mutpb=0.1)
            fits = self.deap_toolbox.map(self.deap_toolbox.evaluate, offspring)
            for fit, ind in zip(fits, offspring):
                ind.fitness.values = fit
            population = self.deap_toolbox.select(offspring, k=len(population))

        # Select best
        best = tools.selBest(population, k=1)[0]
        best_fitness, = self.fitness_wrapper(best)
        revisions_weight, fixes_weight, authors_weight = self.decode_individual(best)

        solution["revisions"] = revisions_weight
        solution["fixes"] = fixes_weight
        solution["authors"] = authors_weight
        solution["fitness"] = best_fitness
        solution["bits"] = self.bits
        solution["generations"] = self.generations

        return solution
예제 #10
0
파일: acegafunc.py 프로젝트: nixor/GANIDS
def aceComparison(elites):
    supremes = []

    for i in uniq_attack:
        space = []
        jail = []
        for j in elites:
            if j[-1] == i:
                space.append(j)
        global ace
        ace = tools.selBest(space, 1)
        ace = ace[0]

        if fitnessDiff_opt == True:
            for idx, ind in enumerate(space):
                if (((ace.fitness.values[0] - ind.fitness.values[0]) <= fitnessDiff_value) and (idx > 0)):
                    jail.append(ind)
            for ind in jail:
                space.remove(ind)

        if matchEliminate_opt == True:
            jail = []
            for idx, ind in enumerate(space):
                if matchEliminate(ace, ind) and ind != ace:
                    jail.append(ind)
            for ind in jail:
                space.remove(ind)

        for i in space:
            supremes.append(i)

    return supremes
예제 #11
0
def example_nevzpominam():
    # Creating appropriate type
    creator.create("FitnessMax", base.Fitness, weights=(1.0,))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    # Initialization
    IND_SIZE = 100
    toolbox = base.Toolbox()
    toolbox.register("attr_bool", random.randint, 0, 1)
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=IND_SIZE)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", evalOneMax)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=3)

    population = toolbox.population(n=300)

    NGEN=40

    for gen in range(NGEN):
        offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.1)
        fits = toolbox.map(toolbox.evaluate, offspring)
        for fit, ind in zip(fits, offspring):
            ind.fitness.values = fit
        population = toolbox.select(offspring, k=len(population))
    top10 = tools.selBest(population, k=10)
예제 #12
0
def main():
    NGEN = 10
    MU = 25
    LAMBDA = 25
    CXPB = 0.7
    MUTPB = 0.2
    
    random.seed(60)

    print("Beginning Initial Learning\n")
    apriori.learn(MIN_SUPPORT_THRESHOLD, MIN_CONF_THRESHOLD, DEFAULT_COVERAGE_THRESHOLD, verbose = True)
    print("\n\nInitial Learning complete")
    
    pop = toolbox.population(n=MU)
    hof = tools.ParetoFront()
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)
    
    result = algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,
                              halloffame=hof)
    
    
    best_ind = tools.selBest(pop, 1)[0]

    logbook = result[1]
    print("Best individual found using GA is %s, %s" % (best_ind, best_ind.fitness.values))
    return logbook.select("gen", "avg", "min", "max")
예제 #13
0
파일: gasnids.py 프로젝트: nixor/GANIDS
def selElites(pop): #Selector function
    """ Needs **UPDATE**
The basic idea of this selector is to:
1. Accept the generated individuals
2. Categorize individuals into different attack type lists
3. Then append them back together to pass on as elites
for the next generation.
"""
    attkTypes = len(attkUniqs) # 4, Numbers of attacks in integer
    attkPop = []
    elitesSub = []
    elitesAll = []

    for i in xrange(attkTypes): #create lists within the attkPop list
        attkPop.append([]) #equals to the number of attkTypes

    for i in xrange(attkTypes):
        for j, k in enumerate(pop):
            if k[-1] == attkUniqs[i]: #if last field is the same attack
                attkPop[i].append(k) #type then add to the attkPop

    for i in attkPop:
        elitesSub.append(tools.selBest(i, elitesNo))


    for i in elitesSub: #appending all elites to elitesAll list
        i = list(i for i,_ in itertools.groupby(i)) #eliminate duplicate elites by attk type
        for j in i:
            elitesAll.append(j)

    #for i in elitesAll:
    #    i = list(i for i,_ in itertools.groupby(i))

    return elitesAll #This will be returned to create part of new
예제 #14
0
def compose(params):
    global window, chords_in_vector

    start = time()

    pop_size = 35     # int(params[0])
    num_gens = 65     # int(params[1])
    tourn_size = int(params[0])
    ind_pb = params[1]
    cx_pb = params[2]
    mut_pb = params[3]

    creator.create("FitnessMulti", base.Fitness, weights=(-1.0, 1.0))
    creator.create("Individual", list, fitness=creator.FitnessMulti)

    toolbox = base.Toolbox()

    IND_SIZE = chords_in_vector - len(window)

    toolbox.register("attr_chord", cg.generate_chord)
    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_chord, n=IND_SIZE)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    toolbox.register("evaluate", predict)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", mutate_individual, indpb=ind_pb)
    toolbox.register("select", tools.selTournament, tournsize=tourn_size)

    population = toolbox.population(n=pop_size)

    NGEN = num_gens
    for gen in range(NGEN):
        offspring = algorithms.varAnd(population, toolbox, cxpb=cx_pb, mutpb=mut_pb)
        fits = toolbox.map(toolbox.evaluate, offspring)
        for fit, ind in zip(fits, offspring):
            ind.fitness.values = fit
        population = toolbox.select(offspring, k=len(population))

    top = tools.selBest(population, k=1)[0]

    np.set_printoptions(threshold=np.nan, suppress=True)
    #pprint("%s, %s" % (window, top))
    #pprint(predict(top))

    score = predict(top)

    end = time()

    run_time = end - start

    #time_dist = power(absolute(20 - run_time), 2) + 1
    #f = -log(score[1] / time_dist)

    f = -log(score[1]) * 100000000

    print(params)
    print("{:2.10f}".format(f))
    results.append([list(params), f])
    return f
예제 #15
0
    def select_nextGen(self):
        """
        Selects the components for the next generation
        """

        #TODO: Complete this function
        next_gen_pop = dict();


        model_pick = int(self.params['percentNextGen'] * self.params['model_pop_size']);
        conn_pick = int(self.params['percentNextGen'] * self.params['conn_pop_size']);


        ##Select node
        next_gen_pop['node_pop'] = self.pop['node_pop']; #move all to next generation

        ##Select weights
        next_gen_pop['connWeights_IH_pop'] = tools.selBest(self.pop['connWeights_IH_pop'],conn_pick);
        next_gen_pop['connWeights_HH_pop'] = tools.selBest(self.pop['connWeights_HH_pop'],conn_pick);
        next_gen_pop['connWeights_HO_pop'] = tools.selBest(self.pop['connWeights_HO_pop'],conn_pick);

        ##Select connectivity
        next_gen_pop['connActive_IH_pop'] = tools.selBest(self.pop['connActive_IH_pop'],conn_pick);
        next_gen_pop['connActive_HH_pop'] = tools.selBest(self.pop['connActive_HH_pop'],conn_pick);
        next_gen_pop['connActive_HO_pop'] = tools.selBest(self.pop['connActive_HO_pop'],conn_pick);

        ##select model
        next_gen_pop['model_pop'] = tools.selBest(self.pop['model_pop'],model_pick);

        return next_gen_pop;
예제 #16
0
 def learn(self):
     weights = {}
     population = self.toolbox.population(n=FeatureWeightLearner.POPULATION)
     NGEN = FeatureWeightLearner.GENERATIONS
     for gen in range(NGEN):
         offspring = algorithms.varAnd(population, self.toolbox, cxpb=0.5, mutpb=0.1)
         fits = self.toolbox.map(self.toolbox.evaluate, offspring)
         for fit, ind in zip(fits, offspring):
             ind.fitness.values = fit
         population = self.toolbox.select(offspring, k=len(population))
     top10 = tools.selBest(population, k=10)
     best = tools.selBest(population, k=1)[0]
     #print(top10)
     revisions_weight, fixes_weight, authors_weight = FeatureWeightLearner.decode_individual(best)
     weights["revisions"] = revisions_weight
     weights["fixes"] = fixes_weight
     weights["authors"] = authors_weight
     return weights
예제 #17
0
def main():
    pop = toolbox.population(n=50)#染色體個數
    print(pop)
    CXPB, MUTPB, NGEN = 0.5, 0.2, 40#交配率,突變率,迭代數
    '''
    # CXPB  is the probability with which two individuals
    #       are crossed
    #
    # MUTPB is the probability for mutating an individual
    #
    # NGEN  is the number of generations for which the
    #       evolution runs
    '''
    # Evaluate the entire population
    fitnesses = map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
        #print(ind.fitness.values )
    print("  Evaluated %i individuals" % len(pop))
    print("-- Iterative %i times --" % NGEN)

    for g in range(NGEN):
        #if g % 1 == 0:
            #print("-- Generation %i --" % g)
        # Select the next generation individuals選擇子代
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))
        # Change map to list,The documentation on the official website is wrong
        # Apply crossover and mutation on the offspring子代的基因交叉、變異產生新的子代
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < CXPB:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness重新評估個體
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]#
        fitnesses = map(toolbox.evaluate, invalid_ind)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit
            print(ind.fitness.values)

        # The population is entirely replaced by the offspring
        pop[:] = offspring

    print("-- End of (successful) evolution --")

    best_ind = tools.selBest(pop, 1)[0]

    return best_ind, best_ind.fitness.values  # return the result:Last individual,The Return of Evaluate function
예제 #18
0
def output_results(pop, logbook):
    print logbook    
    print '\n'
    for p in pop:
        for i in individual_to_tollgates(p):
            print i
    print '\nBEST\n\n'
    for i in individual_to_tollgates(tools.selBest(pop, 1)[0]):
        print i
예제 #19
0
def main(dictionary, g1, g2, g3):
    global features
    preprocess_data(dictionary, g1, g2, g3)
    data = pd.read_csv('student-mat-pre.csv', delimiter = ';')
    cols = list(data.columns)
    cols.remove('G1')
    cols.remove('G2')
    cols.remove('G3')

    for col in cols:
        data[col] = (data[col] - data[col].mean()) / data[col].std(ddof=0)

    data.to_csv('temp')
    features = []

    with open('temp') as f:
        for index, line in enumerate(f):
            params = line.strip().split(',')

            if index != 0:
                for i in range(len(params)):
                    params[i] = float(params[i])

            features.append(params[1:-3])

    features = features[1:len(features)]

    tree_c = DecisionTreeClassifier(criterion='entropy')
    gnb_c = GaussianNB()

    creator.create('FitnessMax', base.Fitness, weights=(1.0,))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    toolbox = base.Toolbox()
    toolbox.register('bit', random.random)
    toolbox.register('individual', tools.initRepeat, creator.Individual, toolbox.bit, n=30)
    toolbox.register('population',tools.initRepeat, list, toolbox.individual, n=200)
    toolbox.register('evaluate', fitness_value)
    toolbox.register('mate', tools.cxUniform, indpb=0.1)
    toolbox.register('mutate', tools.mutFlipBit, indpb=0.05)
    toolbox.register('select', tools.selNSGA2)

    population = toolbox.population()
    fits = toolbox.map(toolbox.evaluate, population)

    for fit, ind in zip(fits, population):
        ind.fitness.values = (fit,)

    for gen in range(100):
        offspring = algorithms.varOr(population, toolbox, lambda_ = 10, cxpb=0.5, mutpb=0.1)
        fits = toolbox.map(toolbox.evaluate, offspring)
        for fit, ind in zip(fits, offspring):
            ind.fitness.values = (fit,)
        population = toolbox.select(offspring+population, k=20)

    individual = tools.selBest(population, k=1)
예제 #20
0
파일: advanced.py 프로젝트: Gab0/e-vchess
def evolve_machines(POP):
    population = POP

    offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.1)

    fits = toolbox.map(toolbox.evaluate, population)
    for fit, ind in zip(fits, population):
        ind.fitness = fit
    population = toolbox.select(population, k=len(population))
    top10 = tools.selBest(population, k=10)

    return population
예제 #21
0
	def evaluate_possible_solutions(self):
		
		invalid_ind = [ind for ind in self.offspring if not ind.fitness.valid]
		fitnesses = map(self.toolbox.evaluate, invalid_ind)
		for ind, fit in zip(invalid_ind, fitnesses):
			ind.fitness.values = fit
		
		if self.offspring != []:
			self.pop[:] = self.offspring
		
		self.steady_state_crossover()
		
		self.best_solution = tools.selBest(self.pop, 1)[0]
예제 #22
0
def loop(pop, c_prob, m_prob, max_loop):
    for i in xrange(max_loop):
        # 最良個体の表示
        best = tools.selBest(pop, 1)[0]
        print i, "世代:", best.fitness.values
        # 次世代の集団
        next_pop = toolbox.select(pop, len(pop))
        next_pop = list(map(toolbox.clone, next_pop))
        # 交叉
        for i in xrange(len(next_pop)):
            for j in xrange(i, len(next_pop)):
                if random.random() < c_prob:
                    toolbox.crossover(next_pop[i], next_pop[j])
        # 突然変位
        for x in next_pop:
            if random.random() < m_prob:
                toolbox.mutate(x)
        # 評価
        evaluate_pop(next_pop)
        # 世代交代
        pop = next_pop
    return tools.selBest(pop, 1)[0] # 最良個体を返す
    def getBestIndividual(self):
        """Determines the best individual.
        """
        best_ind = tools.selBest(self.population, 1)[0]

        # If the first or the end point are not presents the original line should be returned
        if best_ind.fitness.values[0] > len(self.original.coords):
#             print "Returning the original"
            return self.original.to_wkb()

        wkb = self.createSimplifiedLine(best_ind)

        return wkb
예제 #24
0
def evolve(toolbox):
    pop = toolbox.population(n = POPULATION_SIZE)
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    for g in range(NGEN):
        print("-- Generation %i --" % g)

        elite = toolbox.elite(pop)
        elite = list(map(toolbox.clone, elite))
        offspring = toolbox.select(pop, POPULATION_SIZE-2)
        offspring = list(map(toolbox.clone, offspring))

        for child1, child2 in zip(offspring[::2],offspring[1::2]):
            if random.random() < CXPB:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values


        for mutant in offspring:
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        offspring = elite + offspring

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        if len(offspring) > 0:
            pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        mean = sum(fits) / length

        print("  Min %s" % min(fits))
        print("  Max %s" % max(fits))
        print("  Avg %s" % mean)

        if min(fits) < 2:
            break

    return tools.selBest(pop, k = 1)[0]
예제 #25
0
파일: main.py 프로젝트: 4sp1r3/monad
def main():
    # Generate the population
    pop = toolBox.toolbox.population(n=POPULATION_SIZE)

    hof = tools.HallOfFame(1)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop, toolBox.toolbox, cxpb=CROSS_OVER_PROB,
                                   mutpb=MUTATION_PROB, ngen=NO_OF_GENERATION, stats=stats,
                                   halloffame=hof, verbose=True)



    ## Evaluate the entire population
    #fitnesses = list(map(toolBox.toolbox.evaluate, pop))
    #for ind, fit in zip(pop, fitnesses):

    #    ind.fitness.values = fit

    # Iterate trough a number of generations
    # for g in range(NGEN):
    #    print("-- Generation %i --" % g)
    #    # Select individuals based on their fitness
    #    offspring = toolBox.toolbox.select(pop, len(pop))
    #    # Cloning those individuals into a new population
    #    offspring = list(map(toolBox.toolbox.clone, offspring))

    #    # Calling the crossover function
    #    crossover(offspring)
    #    mutation(offspring)

    #    invalidfitness(offspring)

    # The Best Individual found
    best_ind = tools.selBest(pop, 1)[0]
    individual = sorted(best_ind, key=itemgetter(3))
    individual = sorted(individual, key=itemgetter(0))
    #print "InsertBusTrip and TimeTable......"
    print("Best individual is %s, %s" % (individual, best_ind.fitness.values))
    print ("Length of best individual: " + str(len(best_ind)))
    fitnessClass = Fitness()
    timetable = fitnessClass.genTimetable(best_ind)
    databaseClass = DB()
    #databaseClass.insertBusTrip(timetable)
    evaluate_timetable.eval(best_ind)
예제 #26
0
def test_cma():
    NDIM = 5

    strategy = cma.Strategy(centroid=[0.0]*NDIM, sigma=1.0)
    
    toolbox = base.Toolbox()
    toolbox.register("evaluate", benchmarks.sphere)
    toolbox.register("generate", strategy.generate, creator.__dict__[INDCLSNAME])
    toolbox.register("update", strategy.update)

    pop, _ = algorithms.eaGenerateUpdate(toolbox, ngen=100)
    best, = tools.selBest(pop, k=1)

    assert best.fitness.values < (1e-8,), "CMA algorithm did not converged properly."
def eaSimple(population, toolbox, cxpb, mutpb, ngen, stats=None,
             halloffame=None, verbose=__debug__):
    logbook = tools.Logbook()
    logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in population if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    if halloffame is not None:
        halloffame.update(population)

    record = stats.compile(population) if stats else {}
    logbook.record(gen=0, nevals=len(invalid_ind), **record)
    if verbose:
        print logbook.stream

    # Begin the generational process
    gen = 1
    while evalNumThreaten(tools.selBest(population, k=1)[0])[0] != 0:
        gen += 1
        # Select the next generation individuals
        offspring = toolbox.select(population, len(population))

        # Vary the pool of individuals
        offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Update the hall of fame with the generated individuals
        if halloffame is not None:
            halloffame.update(offspring)

        # Replace the current population by the offspring
        population[:] = offspring

        # Append the current generation statistics to the logbook
        record = stats.compile(population) if stats else {}
        logbook.record(gen=gen, nevals=len(invalid_ind), **record)
        if verbose:
            print logbook.stream
    print 'total generations is',gen
    return population, logbook
예제 #28
0
    def run(self):
        meta_spec = self.experiment.spec['meta']
        ray.init(**meta_spec.get('resources', {}))
        max_generation = meta_spec['max_generation']
        pop_size = meta_spec['max_trial'] or calc_population_size(self.experiment)
        logger.info(f'EvolutionarySearch max_generation: {max_generation}, population size: {pop_size}')
        trial_data_dict = {}
        config_hash = {}  # config hash_str to trial_index

        toolbox = self.init_deap()
        population = toolbox.population(n=pop_size)
        for gen in range(1, max_generation + 1):
            logger.info(f'Running generation: {gen}/{max_generation}')
            ray_id_to_config = {}
            pending_ids = []
            for individual in population:
                config = dict(individual.items())
                hash_str = util.to_json(config, indent=0)
                if hash_str not in config_hash:
                    trial_index = self.experiment.info_space.tick('trial')['trial']
                    config_hash[hash_str] = config['trial_index'] = trial_index
                    ray_id = run_trial.remote(self.experiment, config)
                    ray_id_to_config[ray_id] = config
                    pending_ids.append(ray_id)
                individual['trial_index'] = config_hash[hash_str]

            trial_data_dict.update(get_ray_results(pending_ids, ray_id_to_config))

            for individual in population:
                trial_index = individual.pop('trial_index')
                trial_data = trial_data_dict.get(trial_index, {'fitness': 0})  # if trial errored
                individual.fitness.values = trial_data['fitness'],

            preview = 'Fittest of population preview:'
            for individual in tools.selBest(population, k=min(10, pop_size)):
                preview += f'\nfitness: {individual.fitness.values[0]}, {individual}'
            logger.info(preview)

            # prepare offspring for next generation
            if gen < max_generation:
                population = toolbox.select(population, len(population))
                # Vary the pool of individuals
                population = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.5)

        ray.worker.cleanup()
        return trial_data_dict
예제 #29
0
def showResults(World):
    ValidationDataset =\
        promoterz.evaluation.gekko.globalEvaluationDataset(World.EnvironmentParameters,
                                                           World.genconf.deltaDays,
                                                           World.genconf.proofSize)
    for LOCALE in World.locales:
        LOCALE.population = [ ind for ind in LOCALE.population if ind.fitness.valid ]
        B = World.genconf.finaltest['NBBESTINDS']
        BestIndividues = tools.selBest(LOCALE.population,B)

        Z = min(World.genconf.finaltest['NBADDITIONALINDS'], len(LOCALE.population)-B)
        Z = min(0, Z)
        print("Selecting %i+%i individues, random test;" % (B,Z))
        AdditionalIndividues = promoterz.evolutionHooks.Tournament(LOCALE.population, Z, Z*2)

        print("%i selected;" % len(AdditionalIndividues))
        AdditionalIndividues = [ x for x in AdditionalIndividues\
                                 if x not in BestIndividues ]

        FinalIndividues = BestIndividues + AdditionalIndividues

        print("%i selected;" % len(FinalIndividues))
        for FinalIndividue in FinalIndividues:
            proof = stratSettingsProofOfViability
            AssertFitness, FinalProfit = proof(World,
                                              FinalIndividue,
                                              ValidationDataset)
            print("Testing Strategy:\n")
            if AssertFitness or FinalProfit > 50:
                print("Following strategy is viable.")
            else:
                print("Strategy Fails.")
            FinalIndividueSettings = World.tools.constructPhenotype(
                    FinalIndividue)

            Show = json.dumps(FinalIndividueSettings, indent=2)
            logInfo("~" * 18)
            
            logInfo(" %.3f final profit ~~~~" % FinalProfit)
            print(" -- Settings for Gekko config.js -- ")
            print(Show)
            print(" -- Settings for Gekko --ui webpage -- ")
            logInfo(pasteSettingsToUI(FinalIndividueSettings))
            
            print("\nRemember to check MAX and MIN values for each parameter.")
            print("\tresults may improve with extended ranges.")
예제 #30
0
def main():
    pop = toolbox.population(n=50)
    print(pop)
    CXPB, MUTPB, NGEN = 0.5, 0.2, 40
    
    fitnesses = map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
        #print(ind.fitness.values )
    print("  Evaluated %i individuals" % len(pop))
    print("-- Iterative %i times --" % NGEN)

    for g in range(NGEN):
        offspring = toolbox.select(pop, len(pop))
        
        offspring = list(map(toolbox.clone, offspring))
        
        
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < CXPB:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]#
        fitnesses = map(toolbox.evaluate, invalid_ind)

        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit
            print(ind.fitness.values)

        
        pop[:] = offspring

    print("-- End of (successful) evolution --")

    best_ind = tools.selBest(pop, 1)[0]

    return best_ind, best_ind.fitness.values
예제 #31
0
def main():
    global snake
    global pset

    ## THIS IS WHERE YOUR CORE EVOLUTIONARY ALGORITHM WILL GO #
    #random.seed(318)
    pop = toolbox.population(n=POP_SIZE)
    hof = tools.HallOfFame(5)

    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", lambda val: round(numpy.mean(val), 2))
    mstats.register("std", lambda val: round(numpy.std(val), 2))
    mstats.register("min", numpy.min)
    mstats.register("max", numpy.max)

    start = timer()

    try:
        pop, log = algorithms.eaSimple(
            pop,
            toolbox,
            CRX_PB,  # CHANCE OF CROSSOVER
            MUT_PB,  # CHANCE OF MUTATION
            TOTAL_GENS,  # NO Generations
            halloffame=hof,
            verbose=True,
            stats=mstats)
    except KeyboardInterrupt:
        pool.terminate()
        pool.join()
        raise KeyboardInterrupt

    end = timer()

    # Total score as..
    # Attempt parsimony length prevention first
    best = tools.selBest(pop, 1)
    for ind in best:
        runs = []

        for run in range(500):
            #displayStrategyRun(ind)
            runs.append(runGame(ind)[0])

        print("Max:   " + str(max(runs)))
        print("Mean:  " + str(numpy.mean(runs)))
        print("St.dv: " + str(numpy.std(runs)))

        nodes, edges, labels = gp.graph(ind)
        g = nx.Graph()
        g.add_nodes_from(nodes)
        g.add_edges_from(edges)
        pos = nx.graphviz_layout(g, prog="dot")

        nx.draw_networkx_nodes(g, pos)
        nx.draw_networkx_edges(g, pos)
        nx.draw_networkx_labels(g, pos, labels)
        plt.show()

        while True:
            #displayRunPythonista(ind)
            displayStrategyRun(ind)
예제 #32
0
def onGA(fleet: Fleet,
         hp: HyperParameters,
         critical_points: Dict,
         save_to: str = None,
         best_ind: IndividualType = None,
         savefig=False):
    customers_to_visit = {
        ev_id: fleet.vehicles[ev_id].assigned_customers
        for ev_id in fleet.vehicles_to_route
    }
    indices = block_indices(customers_to_visit, hp.r)

    # Fitness objects
    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
    creator.create("Individual",
                   list,
                   fitness=creator.FitnessMin,
                   feasible=False,
                   acceptable=False)

    # Toolbox
    toolbox = base.Toolbox()

    toolbox.register("individual",
                     random_individual,
                     indices=indices,
                     starting_points=critical_points,
                     customers_to_visit=customers_to_visit,
                     charging_stations=fleet.network.charging_stations,
                     allowed_charging_operations=hp.r)
    toolbox.register("evaluate",
                     fitness,
                     indices=indices,
                     critical_points=critical_points,
                     fleet=fleet,
                     hp=hp)
    toolbox.register("mate",
                     crossover,
                     indices=indices,
                     allowed_charging_operations=hp.r,
                     index=None)
    toolbox.register("mutate",
                     mutate,
                     indices=indices,
                     starting_points=critical_points,
                     customers_to_visit=customers_to_visit,
                     charging_stations=fleet.network.charging_stations,
                     allowed_charging_operations=hp.r,
                     index=None)
    toolbox.register("select",
                     tools.selTournament,
                     tournsize=hp.tournament_size)
    toolbox.register("select_worst", tools.selWorst)
    toolbox.register("decode",
                     decode,
                     indices=indices,
                     critical_points=critical_points,
                     fleet=fleet,
                     hp=hp)

    # BEGIN ALGORITHM
    t_init = time.time()

    # Random population
    if best_ind is None:
        pop = [
            creator.Individual(toolbox.individual())
            for i in range(hp.num_individuals)
        ]
    else:
        pop = [creator.Individual(best_ind)]
        pop += [
            creator.Individual(toolbox.individual())
            for i in range(hp.num_individuals - 1)
        ]

    # Evaluate the initial population and get fitness of each individual
    for k, ind in enumerate(pop):
        fit, feasible, acceptable = toolbox.evaluate(ind)
        ind.fitness.values = (fit, )
        ind.feasible = feasible
        ind.acceptable = acceptable

    print(f'  Evaluated {len(pop)} individuals')
    bestOfAll = tools.selBest(pop, 1)[0]
    print(
        f"Best individual  : {bestOfAll}\n Fitness: {bestOfAll.fitness.wvalues[0]} Feasible: {bestOfAll.feasible}"
    )

    # These will save statistics
    cs_capacity = fleet.network.nodes[
        fleet.network.charging_stations[0]].capacity
    opt_data = GenerationsData([], [], [], [], [], [], fleet, hp, bestOfAll,
                               bestOfAll.feasible, bestOfAll.acceptable,
                               len(fleet), cs_capacity)
    print("################  Start of evolution  ################")
    # Begin the evolution
    for g in range(hp.max_generations):
        # A new generation
        print(f"-- Generation {g}/{hp.max_generations} --")
        opt_data.generations.append(g)

        # Select the best individuals, if given
        if hp.elite_individuals:
            best_individuals = list(
                map(toolbox.clone, tools.selBest(pop, hp.elite_individuals)))

        # Select and clone the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        offspring = list(map(toolbox.clone, offspring))

        # Mutation
        for mutant in offspring:
            if np.random.random() < hp.MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Crossover
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if np.random.random() < hp.MUTPB:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        # Evaluate the individuals with invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        for ind in invalid_ind:
            fit, feasible, acceptable = toolbox.evaluate(ind)
            ind.fitness.values = (fit, )
            ind.feasible = feasible
            ind.acceptable = acceptable

        print(f'  Evaluated {len(invalid_ind)} individuals')

        # The population is entirely replaced by a sorted offspring
        pop[:] = offspring
        pop[:] = tools.selBest(pop, len(pop))

        # Insert best individuals from previous generation
        if hp.elite_individuals:
            pop[:] = best_individuals + pop[:-hp.elite_individuals]

        # Update best individual
        bestInd = tools.selBest(pop, 1)[0]
        if bestInd.fitness.wvalues[0] > bestOfAll.fitness.wvalues[0]:
            bestOfAll = bestInd

        # Real-time info
        print(
            f"Best individual  : {bestInd}\n Fitness: {bestInd.fitness.wvalues[0]} Feasible: {bestInd.feasible}"
        )

        worstInd = tools.selWorst(pop, 1)[0]
        print(
            f"Worst individual : {worstInd}\n Fitness: {worstInd.fitness.wvalues[0]} Feasible: {worstInd.feasible}"
        )

        print(
            f"Curr. best-of-all: {bestOfAll}\n Fitness: {bestOfAll.fitness.wvalues[0]} Feasible: {bestOfAll.feasible}"
        )

        # Statistics
        fits = [sum(ind.fitness.wvalues) for ind in pop]
        mean = np.average(fits)
        std = np.std(fits)

        print(f"Max {max(fits)}")
        print(f"Min {min(fits)}")
        print(f"Avg {mean}")
        print(f"Std {std}")

        opt_data.best_fitness.append(-max(fits))
        opt_data.worst_fitness.append(-min(fits))
        opt_data.average_fitness.append(mean)
        opt_data.std_fitness.append(std)
        opt_data.best_individuals.append(bestInd)

        print()

    t_end = time.time()
    print("################  End of (successful) evolution  ################")

    algo_time = t_end - t_init
    print('Algorithm time:', algo_time)

    fit, feasible, acceptable = toolbox.evaluate(bestOfAll)
    fit, feasible, acceptable = toolbox.evaluate(bestOfAll)
    routes = toolbox.decode(bestOfAll)

    opt_data.bestOfAll = bestOfAll
    opt_data.feasible = feasible
    opt_data.acceptable = acceptable
    opt_data.algo_time = algo_time
    opt_data.fleet = fleet

    if save_to:
        path = save_to
        try:
            os.mkdir(path)
        except FileExistsError:
            pass
        opt_data.save_opt_data(path, savefig=savefig)

    for r in routes.values():
        L = r[0][1]
        if sum([1 for Lk in L if Lk < 0]):
            print('negative')
    return routes, opt_data, toolbox
예제 #33
0
if __name__ == "__main__":
#    random.seed(64)
    N_POPULATION = N_PROCESSES
    
    pool = multiprocessing.Pool(processes=N_PROCESSES)
    toolbox.register("map", pool.map)
    
    pop = toolbox.population(n=N_POPULATION)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    algorithms.eaSimple(pop, toolbox, cxpb=0.3, mutpb=0.1, ngen=N_GENERATIONS, 
                        stats=stats, halloffame=hof, verbose=0)

    pool.close()

    best_ind = tools.selBest(pop, 1)[0]

    

#    print("Best individual of current population is %s, %s" % (best_ind, best_ind.fitness.values))
#    print("Best individual ever is %s, %s" % (hof[0],hof[0].fitness.values))
    if N_GENERATIONS > 20:
        with open(r'output/results_ga_3.txt', 'a') as output_file:
            output_file.write("\n" + str(best_ind.fitness.values[0]) + "    " + str(N_BLOCKS) + "    " + str(N_GENERATIONS) + "    " + str(DAYS) + "    " + str(time.ctime()) + "    " + str(hof[0]))

예제 #34
0
    def run_algorithm(self, seed):
        # Set up custom hall of fame to keep track of the top chromosomes and their generations
        hall_of_fame = hof.HallOfFame(self.HOF_SIZE)
        # set up standard hall of fame that comes with DEAP
        hall_of_fame_with_dupes = tools.HallOfFame(self.HOF_SIZE)
        # Get date and time
        date_time = datetime.datetime.now().strftime("%m-%d_%I%M%p")

        # print out to file
        file_name = date_time + '.txt'
        sys.stdout = open(file_name, 'w')

        print 'Seed:', seed

        i = 0
        while i < self.GENERATIONS and not self.isConverged:
            i += 1
            print('--------------------------------' + 'Generation: ' +
                  str(i) + '-----------------------------------')
            # evaluate each chromosome in the population and assign its fitness score
            for index, x in enumerate(self.population):
                # update the chromosome, write out to JSON tactical file
                chromosome_parameters.update_chromosome(
                    x[0].value, x[1].value, x[2].value, x[3].value, x[4].value)
                # use Ace0 to evaluate the chromosome
                x.fitness.values = helper.evaluate(self.repetitions)

            # Select the best chromosome from this generation and display it
            best_chromosome = tools.selBest(self.population, 1)[0]
            print "Best chromosome is: ", helper.list_to_string(
                best_chromosome), best_chromosome.fitness.values

            # Select worst chromosome and display
            worst_chromosome = tools.selWorst(self.population, 1)[0]
            print "Worst chromosome is: ", helper.list_to_string(
                worst_chromosome), worst_chromosome.fitness.values

            # Get the over all fitness values
            sum_fits = sum(ind.fitness.values[0] for ind in self.population)
            average_fitness = sum_fits / self.POP
            print 'Generation average fitness: ', average_fitness

            # save best and average fitness to plot lists
            self.plot_best_fitness.append(best_chromosome.fitness.values)
            self.plot_average_fitness.append(average_fitness)
            self.plot_worst_fitness.append(worst_chromosome.fitness.values)

            # Update the hall of fame to track the best chromosomes from each generation
            hall_of_fame.update(self.population, i)
            hall_of_fame_with_dupes.update(self.population)

            # hall_of_fame.print_hof()

            # this is where we evolve the population
            # Select the next generation individuals
            offspring = self.toolbox.select(self.population,
                                            len(self.population))
            # Clone the selected individuals so we can apply crossover
            offspring = list(map(self.toolbox.clone, offspring))

            # Apply crossover on the offspring
            for child1, child2 in zip(offspring[::2], offspring[::-2]):
                if random.random() < self.CROSSOVER_PROBABILITY:
                    # mate the two children
                    self.toolbox.mate(child1, child2)

            # Apply mutation on the offspring
            for mutant in offspring:
                if random.random() < self.MUTATION_PROBABILITY:
                    # print 'Mutated Chromosome before: ', helper.list_to_string(mutant)
                    for index, x in enumerate(mutant):
                        mutant[index].value = helper.convert_range(
                            mutant[index].value, mutant[index].min,
                            mutant[index].max)

                    self.toolbox.mutate(mutant)

                    for index, x in enumerate(mutant):
                        mutant[index].value = helper.change_back(
                            mutant[index].value, mutant[index].min,
                            mutant[index].max)
                        helper.bounds_check(mutant[index])

                    # print 'Mutated Chromosome after: ', helper.list_to_string(mutant)

            # The population is entirely replaced by the offspring
            self.population[:] = offspring

            if float(best_chromosome.fitness.values[0]
                     ) - average_fitness < 0.0001:
                self.converge_tracker += 1
                if self.converge_tracker >= self.converge_tracker_max:
                    print 'CONVERGED'
                    self.isConverged = True
            else:
                self.converge_tracker = 0

            # # Elitism
            self.population[0] = hall_of_fame_with_dupes[0]

        print(
            '-------------------------------------Hall Of Fame Regular----------------------------------------'
        )
        for chromosomes in hall_of_fame_with_dupes:
            print 'Chromosome: ', helper.list_to_string(
                chromosomes), 'Fitness: ', chromosomes.fitness

        print(
            '-------------------------------------Hall Of Fame with Gen----------------------------------------'
        )
        hall_of_fame.print_hof()

        print(
            '-------------------------------------Stats----------------------------------------'
        )
        print("Pop size: " + str(self.POP))
        print("Generations: " + str(self.GENERATIONS))
        print("Crossover Prob: " + str(self.CROSSOVER_PROBABILITY))
        print("Mutation Prob: " + str(self.MUTATION_PROBABILITY))

        # Select the best chromosome from this generation and display it
        best_chromosome = tools.selBest(self.population, 1)[0]
        print "Best chromosome is: ", helper.list_to_string(
            best_chromosome), best_chromosome.fitness.values

        # Select worst chromosome and display
        worst_chromosome = tools.selWorst(self.population, 1)[0]
        print "Worst chromosome is: ", helper.list_to_string(
            worst_chromosome), worst_chromosome.fitness.values

        # Get the over all fitness values
        sum_fits = sum(ind.fitness.values[0] for ind in self.population)
        average_fitness = sum_fits / self.POP
        print 'Generation average fitness: ', average_fitness

        title = 'Seed: ' + str(seed)

        visualization.draw_plot(title, self.plot_average_fitness,
                                self.plot_best_fitness,
                                self.plot_worst_fitness,
                                'average per generation', 'best fitness',
                                'worst fitness', 'generation', 'fitness', 1,
                                250, 150, date_time)

        del creator.fitness
        del creator.Tactic
def parameter_tuning(X_train, feat_vec, seizure_times, f_s, window_length,
                     window_overlap):

    #creating types
    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMax)

    toolbox = base.Toolbox()

    t_per_min = 10 * f_s
    t_per_max = 200 * f_s

    #defining genes
    #ranges are given by Gardner paper
    toolbox.register("attr_v", random.uniform, .02, .2)
    toolbox.register("attr_g", random.uniform, .25, 10)
    toolbox.register("attr_p", random.uniform, .3, 1)
    toolbox.register("attr_N", random.uniform, 10, 100)
    toolbox.register("attr_T", random.uniform, t_per_min, t_per_max)

    #defining an individual as a group of the five genes
    toolbox.register("individual", tools.initCycle, creator.Individual,
                     (toolbox.attr_v, toolbox.attr_g, toolbox.attr_p,
                      toolbox.attr_N, toolbox.attr_T), 1)

    #defining the population as a list of individuals
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # register the fitness function
    toolbox.register(
        "evaluate",
        fitness_fn(X_train, feat_vec, seizure_times, f_s, window_length,
                   window_overlap))

    MIN = [0.02, 0.25, 0.3, 10, t_per_min]
    MAX = [.2, 10, 1, 100, t_per_max]
    # register the crossover operator
    # other options are: cxOnePoint, cxUniform (requires an indpb input, probably can just use CXPB)
    # there are others, more particular than these options
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.decorate("mate", within_constraints(MIN, MAX))

    # register a mutation operator with a probability to mutate of 0.05
    # can change: mu, sigma, and indpb
    # there are others, more particular than this
    toolbox.register("mutate", tools.mutGaussian, mu=1, sigma=10, indpb=0.03)
    toolbox.decorate("mutate", within_constraints(MIN, MAX))

    # operator for selecting individuals for breeding the next generation
    # other options are: tournament: randonly picks tournsize out of population, chosses fittest, and has that be
    # a parent. continues until number of parents is equal to size of population.
    # there are others, more particular than this
    toolbox.register("select", tools.selTournament, tournsize=3)
    #toolbox.register("select", tools.selRoulette)

    #create an initial population of size 20
    pop = toolbox.population(n=20)

    # CXPB  is the probability with which two individuals are crossed
    CXPB = 0.3

    # MUTPB is the probability for mutating an individual
    MUTPB = 0.5

    # NGEN  is the number of generations until final parameters are picked
    NGEN = 40

    print("Start of evolution")

    # find the fitness of every individual in the population
    fitnesses = list(map(toolbox.evaluate, pop))

    #assigning each fitness to the individual it represents
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    #defining variables to keep track of best indivudals throughout species
    best_species_genes = tools.selBest(pop, 1)[0]
    best_species_value = best_species_genes.fitness.values
    best_gen = 0

    next_mean = 1
    prev_mean = 0

    #start evolution
    for g in range(NGEN):
        if abs(next_mean - prev_mean) > 0.005:

            prev_mean = next_mean
            # Select the next generation's parents
            parents = toolbox.select(pop, len(pop))

            # Clone the parents and call them offspring: crossover and mutation will be performed below
            offspring = list(map(toolbox.clone, parents))

            # Apply crossover to children in offspring with probability CXPB
            for child1, child2 in zip(offspring[::2], offspring[1::2]):

                # cross two individuals with probability CXPB
                if random.random() < CXPB:
                    toolbox.mate(child1, child2)
                    del child1.fitness.values
                    del child2.fitness.values

            # Apply mutation to children in offspring with probability MUTPB
            for mutant in offspring:
                if random.random() < MUTPB:
                    toolbox.mutate(mutant)
                    del mutant.fitness.values

            # Find the fitnessess for all the children for whom fitness changed
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = map(toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            # Offspring becomes the new population
            pop[:] = offspring

            #updating best species values
            if max(fitnesses):
                if max(fitnesses) > best_species_value:
                    best_species_genes = tools.selBest(pop, 1)[0]
                    best_species_value = best_species_genes.fitness.values
                    best_gen = g
                best_next_obj = max(fitnesses)

            fits = [ind.fitness.values[0] for ind in pop]
            length = len(pop)
            next_mean = sum(fits) / length

    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual in final population is %s with fitness value %s" %
          (best_ind, best_ind.fitness.values))
    print(
        "Best individual in species is %s and occurred during generation %s with fitness %s"
        % (best_species_genes, best_gen, best_species_value))
    return best_species_genes[0], best_species_genes[1], best_species_genes[
        2], best_species_genes[3], best_species_genes[4]
예제 #36
0
    length = len(population)
    mean = sum(fits) / length
    sum2 = sum(x * x for x in fits)
    std = abs(sum2 / length - mean**2)**0.5

    gensMin.append(min(fits))
    gensMax.append(max(fits))
    gensAvg.append(mean)
    gensStd.append(std)

    # print("  Min %s" % int(min(fits)))
    print("  Max %s" % int(max(fits)))
    # print("  Avg %s" % mean)
    # print("  Std %s" % std)
    population = toolbox.select(offspring, k=len(population))
topk = tools.selBest(population, k=1)
#print(top10)
for solution in topk:
    print("Pontos: %i/243" % int(fitnessFromDNA64(solution)[0]))
    printBoardFromDNA64(solution)
    print("")

plt.subplot(111)
plt.plot(gensMax, label="Max")
plt.plot(gensAvg, label="Avg")
plt.plot(gensMin, label="Min")
plt.legend(bbox_to_anchor=(0.8, 0.0, 0.2, .102),
           loc=3,
           ncol=1,
           mode="expand",
           borderaxespad=0.)
예제 #37
0
def gaVRPTW(instName,
            unitCost,
            initCost,
            waitCost,
            delayCost,
            indSize,
            popSize,
            cxPb,
            mutPb,
            NGen,
            exportCSV=False,
            customizeData=False):
    if customizeData:
        jsonDataDir = os.path.join(BASE_DIR, 'data', 'json_customize')
    else:
        jsonDataDir = os.path.join(BASE_DIR, 'data', 'json')
    jsonFile = os.path.join(jsonDataDir, '%s.json' % instName)
    with open(jsonFile) as f:
        instance = load(f)
    creator.create('FitnessMax', base.Fitness, weights=(1.0, ))
    creator.create('Individual', list, fitness=creator.FitnessMax)
    toolbox = base.Toolbox()
    # Attribute generator
    toolbox.register('indexes', random.sample, range(1, indSize + 1), indSize)
    # Structure initializers
    toolbox.register('individual', tools.initIterate, creator.Individual,
                     toolbox.indexes)
    toolbox.register('population', tools.initRepeat, list, toolbox.individual)
    # Operator registering
    toolbox.register('evaluate',
                     evalVRPTW,
                     instance=instance,
                     unitCost=unitCost,
                     initCost=initCost,
                     waitCost=waitCost,
                     delayCost=delayCost)
    toolbox.register('select', tools.selRoulette)
    toolbox.register('mate', cxPartialyMatched)
    toolbox.register('mutate', mutInverseIndexes)
    pop = toolbox.population(n=popSize)
    # Results holders for exporting results to CSV file
    csvData = []
    print 'Start of evolution'
    # Evaluate the entire population
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    print '  Evaluated %d individuals' % len(pop)
    # Begin the evolution
    for g in range(NGen):
        print '-- Generation %d --' % g
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))
        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < cxPb:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values
        for mutant in offspring:
            if random.random() < mutPb:
                toolbox.mutate(mutant)
                del mutant.fitness.values
        # Evaluate the individuals with an invalid fitness
        invalidInd = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalidInd)
        for ind, fit in zip(invalidInd, fitnesses):
            ind.fitness.values = fit
        print '  Evaluated %d individuals' % len(invalidInd)
        # The population is entirely replaced by the offspring
        pop[:] = offspring
        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]
        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5
        print '  Min %s' % min(fits)
        print '  Max %s' % max(fits)
        print '  Avg %s' % mean
        print '  Std %s' % std
        # Write data to holders for exporting results to CSV file
        if exportCSV:
            csvRow = {
                'generation': g,
                'evaluated_individuals': len(invalidInd),
                'min_fitness': min(fits),
                'max_fitness': max(fits),
                'avg_fitness': mean,
                'std_fitness': std,
            }
            csvData.append(csvRow)
    print '-- End of (successful) evolution --'
    bestInd = tools.selBest(pop, 1)[0]
    #print 'Best individual: %s' % bestInd
    #print 'Fitness: %s' % bestInd.fitness.values[0]
    #printRoute(ind2route(bestInd, instance))
    #print 'Total cost: %s' % (1 / bestInd.fitness.values[0])

    # We need returning json, not print on console and/or writing csv
    return_json = {}
    our_order = bestInd[:]
    return_json['order'] = order_map(our_order)
    return_json['route'] = route_map(ind2route(bestInd, instance))
    return_json['Fitness'] = bestInd.fitness.values[0]
    return_json['cost'] = 1 / bestInd.fitness.values[0]
    return return_json
    if exportCSV:
        csvFilename = '%s_uC%s_iC%s_wC%s_dC%s_iS%s_pS%s_cP%s_mP%s_nG%s.csv' % (
            instName, unitCost, initCost, waitCost, delayCost, indSize,
            popSize, cxPb, mutPb, NGen)
        csvPathname = os.path.join(BASE_DIR, 'results', csvFilename)
        print 'Write to file: %s' % csvPathname
        makeDirsForFile(pathname=csvPathname)
        if not exist(pathname=csvPathname, overwrite=True):
            with open(csvPathname, 'w') as f:
                fieldnames = [
                    'generation', 'evaluated_individuals', 'min_fitness',
                    'max_fitness', 'avg_fitness', 'std_fitness'
                ]
                writer = DictWriter(f, fieldnames=fieldnames, dialect='excel')
                writer.writeheader()
                for csvRow in csvData:
                    writer.writerow(csvRow)
예제 #38
0
파일: GA.py 프로젝트: jrbustosm/GA_espresso
            del mutant.fitness.values

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
    fitnesses = map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    print("  Evaluated %i individuals" % len(invalid_ind))

    # The population is entirely replaced by the offspring
    pop[:] = offspring

    # Gather all the fitnesses in one list and print the stats
    fits = [ind.fitness.values[0] for ind in pop]

    length = len(pop)
    mean = sum(fits) / length
    sum2 = sum(x * x for x in fits)
    std = abs(sum2 / length - mean**2)**0.5

    print("".join(tools.selBest(pop, 1)[0]))
    print("  Min %s" % min(fits))
    print("  Max %s" % max(fits))
    print("  Avg %s" % mean)
    print("  Std %s" % std)

best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" %
      ("".join(best_ind), best_ind.fitness.values))
예제 #39
0
                     n=len(data))
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    population = toolbox.population(n=pop_size)

    NGEN = 500
    best_fit = 0
    best_gen = 0
    differential = 30
    start_time = time.time()
    for gen in range(NGEN):
        offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.1)
        fits = toolbox.map(toolbox.evaluate, offspring)
        for fit, ind in zip(fits, offspring):
            ind.fitness.values = fit
        population = toolbox.select(offspring, k=len(population))
        current_gen_best_individual = tools.selBest(population, k=1)[0]
        current_gen_best_fit = current_gen_best_individual.fitness.values[0]
        if current_gen_best_fit > best_fit:
            best_individual = current_gen_best_individual
            best_fit = current_gen_best_fit
            best_gen = gen
    time_diff = time.time() - start_time
    results_list.append([
        time_diff, best_individual.fitness.values[0], best_gen, best_individual
    ])
    print(time_diff)
    print(best_individual.fitness.values[0], best_individual)
print(results_list)
with open("DEAP_results.csv", "w", newline="") as file:
    writer = csv.writer(file, delimiter=';')
    writer.writerows(results_list)
예제 #40
0
def evolve(predict_type,
           fitness_measure,
           trX,
           trY,
           max_layers=5,
           num_gens=10,
           gen_size=40,
           teX=[],
           teY=[],
           layer_types=[
               'relu', 'softplus', 'dropout', 'bias_add', 'sigmoid', 'tanh',
               'none', 'normalize'
           ],
           layer_sizes=[0, 0, 10, 50, 100, 200, 500, 1000],
           end_types=[
               'sum', 'prod', 'min', 'max', 'mean', 'none', 'sigmoid', 'tanh'
           ],
           train_types=[
               'GradientDescent', 'GradientDescent', 'GradientDescent',
               'Adagrad', 'Momentum', 'Adam', 'Ftrl', 'RMSProp'
           ],
           cross_prob=0.2,
           mut_prob=0.2,
           tourn_size=5,
           train_iters=5,
           squash_errors=True):
    '''

    :param predict_type: String denoting the type of neural network to evolve. Two options: 'regression' and
        'classification'.
    :param fitness_measure: String denoting the type of measurement to use for evaluating the performance of the network
        type. Options:
		- 'rmse': Root mean squared error between the predicted values and known values. Use for regression.
		- 'r_squared': Coefficient of determination for determining how well the data fits the model. Use for
		    regression.
		- 'accuracy': Fraction of samples that were classified correctly. Use for classification, and can be used for
		    multi-class classification.
		- 'sensitivity': Fraction of positive samples correctly identified as positive. Use for classification with two
		    classes, and the second class is the positive class.
		- 'specificity': Fraction of negative samples correctly identified as negative. Use for classification with two
		    classes, and the first class is the negative class.
    :param trX: Numpy array with input data to use for training. Will pull randomly from this array to create test and
        training sets.
    :param trY: Numpy array with output data to use for training.
    :param max_layers: Integer denoting the maximum number of layers that exist between the input and output layer. Set
        at 5 by default.
    :param num_gens: Number of generations to simulate. Set at 10 by default.
    :param gen_size: Number of individual members per generation. Set at 40 by default.
    :param teX: If a specific test set is desired, enter the input data here as a numpy array.
    :param teY: Test output data as a numpy array.
    :param layer_types: List of strings denoting the layer types possible to be used. Set to ['relu', 'softplus',
        'dropout', 'bias_add', 'sigmoid', 'tanh', 'none', 'normalize'] by default.
    :param layer_sizes: List of integers denoting the layer sizes possible to be used. List must be of the same length
        as layer_types. Set to [0, 0, 10, 50, 100, 200, 500, 1000] by default.
    :param end_types: List of strings denoting the options for the type of transformation that gives the output. List
        must be of same length as layer_types. Set to ['sum', 'prod', 'min', 'max', 'mean', 'none', 'sigmoid', 'tanh']
        by default.
    :param train_types: List of strings denoting the optimizer types possible to be used. List must be of same length as
        layer_types. Set to ['GradientDescent', 'GradientDescent', 'GradientDescent', 'Adagrad', 'Momentum', 'Adam',
        'Ftrl', 'RMSProp'] by default.
    :param cross_prob: Float value denoting the probability of crossing the genetics of different individuals. Set at
        0.2 by default.
    :param mut_prob: Float value denoting the probability of changing the genetics of a single individual. Set at 0.2 by
        default.
    :param tourn_size: Integer denoting the number of individuals to carry from each generation. Set at 5 by default.
    :param train_iters: Integer denoting the number of training iterations to use for each neural network. Set at 5 by
        default.
    :param squash_errors: Boolean value denoting whether to give a fail value if the network results in an error. Set to
        True by default.
    :return: List of strings giving the best net_type, string denoting the best optimizer, and Float value denoting the
        measure of the best network type.
    '''

    # Checks that the different options have the same size.
    if not len(layer_types) == len(layer_sizes) == len(end_types) == len(
            train_types):
        print('Input attribute lists have different sizes.')
        return None

    # Gets the type of network to check.
    if predict_type == 'regression':
        predictor = tf_functions.Regresser
    elif predict_type == 'classification':
        predictor = tf_functions.Classifier
        end_types = ['softmax'] * len(layer_types)

    # Gets the type of success measure to use.
    if fitness_measure == 'rmse':
        measure = rmse
        creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
        creator.create("Individual", list, fitness=creator.FitnessMin)
        fail_val = np.inf
    elif fitness_measure == 'r_squared':
        measure = r_squared
        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        fail_val = 0
    elif fitness_measure == 'accuracy':
        measure = accuracy
        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        fail_val = 0
    elif fitness_measure == 'specificity':
        measure = specificity
        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        fail_val = 0
    elif fitness_measure == 'sensitivity':
        measure = sensitivity
        creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
        creator.create("Individual", list, fitness=creator.FitnessMax)
        fail_val = 0

    toolbox = base.Toolbox()

    # Attribute generator.
    toolbox.register("attr_ints", random.randint, 0, len(layer_types) - 1)

    # Structure initializers.
    toolbox.register("individual",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.attr_ints,
                     n=(max_layers * 2) + 2)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Operator registering.
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate",
                     tools.mutUniformInt,
                     low=0,
                     up=len(layer_types) - 1,
                     indpb=0.5)
    toolbox.register("select", tools.selTournament, tournsize=tourn_size)

    # Gets the initial population.
    pop = toolbox.population(n=gen_size)

    # Performs an initial selection.
    for ind in pop:

        # Turns the individual's genes into a net_type and an optimizer.
        net_type = []
        for i in range(max_layers):
            net_type.append(layer_types[ind[i * 2]])
            net_type.append(layer_sizes[ind[i * 2 + 1]])

        net_type.append(end_types[ind[-2]])

        # Splits into test and train if needed.
        if teX == []:
            ttrX, ttrY, tteX, tteY = test_train_split(trX, trY)
        else:
            ttrX, ttrY, tteX, tteY = trX, trY, teX, teY

        # Attempts to test network if errors to be squashed.
        if squash_errors:
            try:

                # Sets up, trains, and tests network.
                ind_predictor = predictor(net_type,
                                          optimizer=train_types[ind[-1]])
                ind_predictor.train(ttrX, ttrY, train_iters)

                p = ind_predictor.predict(tteX)
                m = measure(p, tteY)

                ind_predictor.close()

            # Upon an error, gives the worst possible value.
            except:
                m = fail_val

            if np.isnan(m):
                m = fail_val

        else:
            ind_predictor = predictor(net_type, optimizer=train_types[ind[-1]])
            ind_predictor.train(ttrX, ttrY, train_iters)

            p = ind_predictor.predict(tteX)
            m = measure(p, tteY)

            ind_predictor.close()

        ind.fitness.values = (m, )

    # Begins the evolution.
    for g in range(num_gens):

        # Selects the next generation individuals.
        offspring = toolbox.select(pop, len(pop))
        # Clones the selected individuals.
        offspring = list(map(toolbox.clone, offspring))

        # Applies crossover and mutation on the offspring.
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < cross_prob:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < mut_prob:

                toolbox.mutate(mutant)

                del mutant.fitness.values

        # Evaluates the individuals with an invalid fitness.
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        for ind in invalid_ind:
            net_type = []
            for i in range(max_layers):
                net_type.append(layer_types[ind[i * 2]])
                net_type.append(layer_sizes[ind[i * 2 + 1]])

            net_type.append(end_types[ind[-2]])

            if squash_errors:
                try:
                    ind_predictor = predictor(net_type,
                                              optimizer=train_types[ind[-1]])
                    ind_predictor.train(ttrX, ttrY, train_iters)

                    p = ind_predictor.predict(tteX)
                    m = measure(p, tteY)

                    ind_predictor.close()

                except:
                    m = fail_val

                if np.isnan(m):
                    m = fail_val

            else:
                ind_predictor = predictor(net_type,
                                          optimizer=train_types[ind[-1]])
                ind_predictor.train(ttrX, ttrY, train_iters)

                p = ind_predictor.predict(tteX)
                m = measure(p, tteY)

                ind_predictor.close()

            ind.fitness.values = (m, )

        # The population is entirely replaced by the offspring.
        pop[:] = offspring

    # Gets the best individual remaining after
    best_ind = tools.selBest(pop, 1)[0]

    net_type = []
    for i in range(max_layers):
        net_type.append(layer_types[best_ind[i * 2]])
        net_type.append(layer_sizes[best_ind[i * 2 + 1]])

    net_type.append(end_types[best_ind[-2]])
    optimizer = train_types[best_ind[-1]]

    return net_type, optimizer, best_ind.fitness.values
    
    pop, logbook = algorithms.eaMuCommaLambda(pop, toolbox, mu=MU, lambda_=LAMBDA, 
        cxpb=0.6, mutpb=0.3, ngen=500, stats=stats, halloffame=hof)
    
    return pop, logbook, hof
    




pop, logbook, hof = main()




best = tools.selBest(pop, 1)[0]
G = get_G_from_individual(best)
W = calculate_W(G, y)
temp2 = G.dot(W)
y_pred = np.argmax(temp2, axis = 1)
y_pred = y_pred[:, np.newaxis]

i = np.where(y_pred == classes)[0]
j = np.where(y_pred != classes)[0]
correct = X[i, :]
incorrect = X[j, :]
v, r = find_centers_radial(best)

# c = plt.Circle((0,0), 2, facecolor='none', edgecolor='red')

예제 #42
0
def gaROADEF2003(instName,iniMethod = 'RS',indSize=0,popSize = 100, cxPb=0.5, mutPb=0.05, NGen=100, \
                 simple_flag = False, Mul_Flag = False, strip_flag =False,track_flag = False, exportCSV=False):
    '''
    :param iniMethod: one from ['RS', 'RNDS', 'HRHS', 'FS']
    :param simple_flag:  True for no stereo constraint and pair constraint
    :param strip_flag: True for strip instance for customized data
    :param Mul_Flag: True for NSGA2
        [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
        non-dominated sorting genetic algorithm for multi-objective
        optimization: NSGA-II", 2002.
    '''

    if strip_flag or track_flag:
        jsonDataDir = os.path.join(BASE_DIR, 'data', 'json_customize')
        jsonFile = os.path.join(jsonDataDir, '%s.json' % instName)
        with open(jsonFile) as f:
            instance = load(f)
        indSize = instance['strip-number']
    else:
        jsonDataDir = os.path.join(BASE_DIR, 'data', 'json_ROADEF2003')
        jsonFile = os.path.join(jsonDataDir, '%s.json' % instName)
        with open(jsonFile) as f:
            instance = load(f)
        indSize = instance['strip-number'] * 2

    creator.create('FitnessMax',
                   base.Fitness,
                   weights=(1.0, -1.0) if Mul_Flag else (1.0, ))
    creator.create('Individual', list, fitness=creator.FitnessMax)
    toolbox = base.Toolbox()
    # Attribute generator
    toolbox.register('indexes', random.sample, range(1, indSize + 1), indSize)
    # Evaluate define
    toolbox.register('evaluate', evalMulROADEF2003 if Mul_Flag else evalROADEF2003, \
                     simple_flag = simple_flag, strip_flag = strip_flag, \
                     track_flag = track_flag, instance=instance)
    # Structure initializers
    toolbox.register('individual', tools.initIterate, creator.Individual,
                     toolbox.indexes)
    toolbox.register('heuristic',
                     Heuristic,
                     instance,
                     simple_flag=simple_flag,
                     strip_flag=strip_flag)
    toolbox.register('individual_heuri', tools.initIterate, creator.Individual,
                     toolbox.heuristic)
    if iniMethod == 'RS':
        # STRATEGY 1: RS
        toolbox.register('population', tools.initRepeat, list,
                         toolbox.individual)
        pop = toolbox.population(n=popSize)
    elif iniMethod == 'RNDS':
        # STRATEGY 2: RNDS
        toolbox.register('population', initRNDS, list, toolbox.individual,
                         toolbox.evaluate)
        pop = toolbox.population(n=popSize)
    elif iniMethod == 'HRHS':
        # STRATEGY 3: HRHS
        toolbox.register('population', tools.initRepeat, list,
                         toolbox.individual_heuri)
        pop = toolbox.population(n=popSize)
    elif iniMethod == 'FS':
        # STRATEGY 4 : FS
        toolbox.register('population', tools.initRepeat, list,
                         toolbox.individual)
        omega = 2
        pop_big = toolbox.population(n=omega * popSize)
        fitnesses = list(map(toolbox.evaluate, pop_big))
        for ind, fit in zip(pop_big, fitnesses):
            ind.fitness.values = fit
        pop = tools.selBest(pop_big, popSize)
    else:
        print('Currently unsupported initialization method!')
        exit(1)
    # Operator registering
    toolbox.register('select',
                     tools.selNSGA2 if Mul_Flag else tools.selRoulette)
    toolbox.register('mate', cxSameSitCopyFirst)
    toolbox.register('mutate', mutExchangeLocation)

    if iniMethod == 'RNDS':
        # check if the pop successfully initialized with RNDS
        fitnesses = list(map(toolbox.evaluate, pop))
        for fitness in fitnesses:
            if fitnesses.count(fitness) > 1:
                print('***Error for same fitness value')

    # check the validity of pop initialization
    if not checkPopValidity(pop): print('not a valid pop initialization!!!')
    csvData = []
    print('Start of evolution')
    # Evaluate the entire population
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    print('  Evaluated %d individuals' % len(pop))
    # Begin the evolution
    for g in range(NGen):
        print('-- Generation %d --' % g)
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))
        # Apply crossover and mutation on the offspring
        for child1, child2, i in zip(offspring[::2], offspring[1::2],
                                     range(1, len(offspring), 2)):
            if random.random() < cxPb and child1 != child2:
                #                print(offspring[i - 1], offspring[i])
                child1_oldFit = 0
                child2_olfFit = 0
                count = 0
                #  Preserve the solution with greater fitness
                while (child1_oldFit <= child1.fitness.values[0] or child2_olfFit <= child2.fitness.values[0]) \
                and count < 10 :
                    child1_oldFit = child1.fitness.values[0]
                    child2_olfFit = child2.fitness.values[0]
                    toolbox.mate(child1, child2)
                    count = count + 1
#                if not count < 10: print('crossover 10 times')
                del child1.fitness.values
                del child2.fitness.values
                #                print (offspring[i - 1], offspring[i] )
                # check the validity of pop
                if not checkPopValidity(offspring):
                    print('not a valid pop after cross over!!!')

        for mutant in offspring:
            if random.random() < mutPb and mutant.fitness.valid:
                #                print(mutant)
                mutant_oldFit = 0
                count = 0
                while (mutant_oldFit <= mutant.fitness.values[0]) \
                and count < 10:
                    mutant_oldFit = mutant.fitness.values[0]
                    toolbox.mutate(mutant)
                    count = count + 1
#                if not count < 10: print('mutation 10 times')
                del mutant.fitness.values
                #                print(mutant)
                # check the validity of pop
                if not checkPopValidity(offspring):
                    print('not a valid pop after mutation!!!')

        # Evaluate the individuals with an invalid fitness
        invalidInd = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalidInd)
        for ind, fit in zip(invalidInd, fitnesses):
            ind.fitness.values = fit
        print('  Evaluated %d individuals' % len(invalidInd))
        # The population is entirely replaced by the offspring
        pop[:] = toolbox.select(offspring + pop, popSize)
        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0]
                for ind in pop]  #if not [0], returns tuple instead of float
        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5
        print('  Min %s' % min(fits))
        print('  Max %s' % max(fits))
        print('  Avg %s' % mean)
        print('  Std %s' % std)
        # Write data to holders for exporting results to CSV file
        if exportCSV:
            csvRow = {
                'generation': g,
                'evaluated_individuals': len(invalidInd),
                'min_fitness': min(fits),
                'max_fitness': max(fits),
                'avg_fitness': mean,
                'std_fitness': std,
            }
            csvData.append(csvRow)

    print('-- End of (successful) evolution --')
    bestInd = tools.selBest(pop, 1)[0]
    print('Best individual: %s' % bestInd)
    print('Fitness: ', bestInd.fitness.values)
    print(
        ind2solution(bestInd,
                     instance,
                     simple_flag=simple_flag,
                     strip_flag=strip_flag,
                     track_flag=track_flag))
    print('Total cost: %s' % (1 / bestInd.fitness.values[0]))
    print('End of evolution')

    if not strip_flag:
        solutionFile = os.path.join(BASE_DIR, 'Solutions',
                                    'solution' + instName[8:])
        #testSolution = [4, 6, 8, 2, 10, 12, 14]
        testSolution = readsolution2solution(solutionFile, instance)
        verifySolution(testSolution, instance)
        print(toolbox.evaluate(testSolution))
    if exportCSV:
        csvFilename = '%s_iS%s_pS%s_cP%s_mP%s_nG%s.csv' % (
            instName, indSize, popSize, cxPb, mutPb, NGen)
        csvPathname = os.path.join(BASE_DIR, 'results', csvFilename)
        print('Write to file: %s' % csvPathname)
        makeDirsForFile(pathname=csvPathname)
        if not exist(pathname=csvPathname, overwrite=True):
            with open(csvPathname, 'w') as f:
                fieldnames = [
                    'generation', 'evaluated_individuals', 'min_fitness',
                    'max_fitness', 'avg_fitness', 'std_fitness'
                ]
                writer = DictWriter(f, fieldnames=fieldnames, dialect='excel')
                writer.writeheader()
                for csvRow in csvData:
                    writer.writerow(csvRow)
예제 #43
0
    toolbox,
    cxpb=probabilidadCruce,
    mutpb=probabilidadMutacion,  # Probabilidades de cruce y mutacion
    ngen=numeroGeneraciones,
    verbose=False,
    stats=mstats
)  # Numero de generaciones a completar y estadisticas a recoger
# Por cada generación, la estructura de logbook va almacenando un resumen de los
# avances del algoritmo.

file = open("estadisticas_" + nombreArchivo + ".txt", 'a')
file.write(str(logbook) + os.linesep)
file.close()

file = open("fenotipo_" + nombreArchivo + ".txt", 'a')
file.write(str(feno(tools.selBest(population, 1)[0])) + os.linesep)
file.close()

file = open("solucion_" + nombreArchivo + ".txt", 'a')
file.write(str(len(tools.selBest(population, 1)[0])) + os.linesep)
salida = set(tools.selBest(population, 1)[0])
a = 0
for i in salida:
    if a == 0:
        if len(i) > 1:
            file.write(str(i)[1:-1])
        else:
            file.write(str(i)[1:-2])
        a = 1
    else:
        if len(i) > 1:
예제 #44
0
    def run(self):
        """Run the optimization."""
        pool = multiprocessing.Pool(processes=self.nproc)
        self.toolbox.register("map", pool.map)

        random.seed(42)
        pop = self.toolbox.population(n=self.pop)

        opt_log.info('Starting optimization')

        # fitnesses = self.toolbox.map(self.toolbox.evaluate, pop)
        # for ind, fit in zip(pop, fitnesses):
        #     ind.fitness.values = fit

        # fits = [ind.fitness.values[0] for ind in pop]
        fits = [.0]

        # Begin the evolution
        opt_log.info('Starting the evolution')
        opt_log.info(f'Max generations={self.max_gen} Pop={len(pop)}')
        gen = 0
        while max(fits) < 1 and gen < self.max_gen:
            # A new generation
            gen += 1

            # Select the next generation individuals
            offspring = self.toolbox.select(pop, len(pop))

            # Clone the selected individuals
            offspring = list(map(self.toolbox.clone, offspring))

            # Apply crossover and mutation on the offspring
            for child1, child2 in zip(offspring[::2], offspring[1::2]):

                # cross two individuals with probability CXPB
                if random.random() < self.cxpb:
                    self.toolbox.mate(child1, child2)

                    # fitness values of the children
                    # must be recalculated later
                    del child1.fitness.values
                    del child2.fitness.values

            for mutant in offspring:

                # mutate an individual with probability MUTPB
                if random.random() < self.mutpb:
                    self.toolbox.mutate(mutant)
                    # https://stackoverflow.com/a/44722548
                    del mutant.fitness.values

            # Evaluate the individuals with an invalid fitness
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = self.toolbox.map(self.toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit

            opt_log.debug(f"Evaluated {len(invalid_ind)} individuals")

            # The population is entirely replaced by the offspring
            pop[:] = offspring

            # Gather all the fitnesses in one list and print the stats
            fits = [ind.fitness.values[0] for ind in pop]

            length = len(pop)
            mean = sum(fits) / length
            sum2 = sum(x * x for x in fits)
            std = abs(sum2 / length - mean**2)**0.5

            opt_log.info(f"Gen {gen} {mean:.2f} +- {std:.2f} ({min(fits):.2f},"
                         f"{max(fits):.2f})")

        pool.close()
        pool.join()

        opt_log.info("Optimization complete")

        best_ind = tools.selBest(pop, 1)[0]
        opt_log.info(f"Optimal scoring function is: (energy * "
                     f"{best_ind[0]:.2f}) / (satisfaction *"
                     f" {best_ind[1]:.2f})")
        opt_log.info(f"This function will give a correlation of"
                     f" {best_ind.fitness.values[0]:.2f}")
예제 #45
0
creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
creator.create("Individual", list, fitness=creator.FitnessMin)

toolbox = base.Toolbox()

toolbox.register("indices", random.sample, range(NUM_CITIES), NUM_CITIES)
toolbox.register("individual", tools.initIterate, creator.Individual,
                 toolbox.indices)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)

toolbox.register("evaluate", evalF)
toolbox.register("mate", tools.cxOrdered)
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)

CXPB, MUTPB, NGEN = 0.5, 0.2, 1000
population = toolbox.population(n=50)

print("\n\tBest 20 of the initial population:")
printPop(tools.selBest(population, k=20))

for _ in itertools.repeat(None, NGEN):
    offspring = algorithms.varAnd(population, toolbox, CXPB, MUTPB)
    fits = toolbox.map(toolbox.evaluate, offspring)
    for fit, ind in zip(fits, offspring):
        ind.fitness.values = fit
    population = toolbox.select(offspring, k=len(population))

print("\n\tBest 5 of the final population:")
printPop(tools.selBest(population, k=5))
    stats = tools.Statistics(lambda ind: ind.fitness.values)

    stats.register("avg", numpy.mean, axis=0)

    stats.register("std", numpy.std, axis=0)

    stats.register("min", numpy.min, axis=0)

    stats.register("max", numpy.max, axis=0)

    algorithms.eaMuPlusLambda(pop,
                              toolbox,
                              MU,
                              LAMBDA,
                              CXPB,
                              MUTPB,
                              NGEN,
                              stats,
                              halloffame=hof)

    return pop, stats, hof


if __name__ == "__main__":

    pop = main()[0]
    best_ind = tools.selBest(pop, -1)
    for i in best_ind:
        print("best_ind", i)
        print("best_value", i.fitness.values)
예제 #47
0
        if min(fits) == 0:
            break
        #print("gen:",i,"  Min %s" % min(fits),"  Max %s" % max(fits),"  Avg %s" % mean)
        #print("gen:",i,"  Min %s" % min(fits),"  Max %s" % max(fits),"  Avg %s" % mean,"  Std %s" % std)
        #print(i,max(fits),mean)

    #nvmap.fig.show()
    #time.sleep(100000000)
    return pop, hof


if __name__ == '__main__':
    print("pop_num = ", POPNUM)
    print("gen_num ", NGEN)
    pop, hof = main()
    expr = tools.selBest(pop, 1)[0]
    print(expr)
"""
2回連続で最小値が一致していたらnovelty_searchを行う.

0 4.075023050190184 1156.1453120427354 166.4136630695671
1 0.022914707811938422 659.4723034874191 87.58800368033285
2 0.022914707811938422 565.286954344603 46.92552132727429
3 0.022914707811938422 1155.8698271088651 32.86141289300736
4 0.16929126310766854 130.72871631851797 10.603308629914354
5 0.26926918751105927 116.1383051077541 13.440188120269836
6 0.0790438329543217 226.68121785607093 11.451844704279138
7 0.04616787809424775 126.51163292258249 8.864807308145636
8 0.042111253129909064 69.50050986331198 3.726453945503491
9 0.005903559769173528 86.89761470235598 1.2720889780771634
10 0.005863780260742529 0.6259415090656157 0.12919107959593795
예제 #48
0
toolbox = base.Toolbox()

toolbox.register("attr_bool", random.randint, 0, 1)
toolbox.register("individual",
                 tools.initRepeat,
                 creator.Individual,
                 toolbox.attr_bool,
                 n=100)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)


def evalOneMax(individual):
    return sum(individual),


toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
toolbox.register("select", tools.selTournament, tournsize=3)

population = toolbox.population(n=300)

NGEN = 40
for gen in range(NGEN):
    offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.1)
    fits = toolbox.map(toolbox.evaluate, offspring)
    for fit, ind in zip(fits, offspring):
        ind.fitness.values = fit
    population = toolbox.select(offspring, k=len(population))
top10 = tools.selBest(population, k=10)
    def main(game, enemy):
        file_aux=open(experiment_name+'/results_enemy' + \
                          str(enemy) + str(algorithm) + '.txt', 'a')
        file_aux.write(f'\ngame {game} \n')
        file_aux.write('gen, best, mean, std, median, q1, q3, life')
        file_aux.close()

        #Creating the population
        pop = toolbox.populationCreator(n=pop_size) # Population is created as a list object
        pop_array = np.array(pop)
        generationCounter=0
        print("Start of evolution")

        # Evaluating all the population
        # fitnessValues=list(map(toolbox.evaluate, pop_array)) -> Won't work. Used Kamiel's
        fitnessValue = evaluate(pop_array)
        fitnessValue = fitnessValue[0].tolist()
        fitnesses = []
        lifes = []
        for value in  fitnessValue:
            fitnesses.append(value[0])
            lifes.append(value[1])
        for count, individual in enumerate(fitnesses):
            # Rewrites the fitness value in a way the DEAP algorithm can understand
            fitnesses[count] = (-individual, )

        # Assigning the fitness value to each individual
        for individual, fitnessValue in zip(pop, fitnesses):
            individual.fitness.values=fitnessValue

        # Extract each fitness value
        fitnessValues=[individual.fitness.values[0]
                       for individual in pop]

        # Saves first generation
        fits = fitnessValues
        g = generationCounter
        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x*x for x in fits)
        std = abs(sum2 / length - abs(mean)**2)**0.5
        q1 = np.percentile(fits, 25)
        median = np.percentile(fits, 50)
        q3 = np.percentile(fits, 75)
        max_life = max(lifes)
        file_aux = open(experiment_name+'/results_enemy' +
                        str(enemy)+'Tournement.txt', 'a')
        file_aux.write(
            f'\n{str(g)}, {str(round(max(fits)))}, {str(round(mean,6))}, {str(round(std,6))}, {str(round(median,6))}, {str(round(q1,6))}, {str(round(q3,6))}, {str(round(max_life,6))}')
        file_aux.close()


        # Beggin the genetic loop
        # First, we start with the stopping condition
        while max(fitnessValues) < 100 and generationCounter < max_generations:
            begin_time = datetime.datetime.now()
            print("Being evolution time:", begin_time,"!!!")
            # Update generation counter
            generationCounter=generationCounter + 1
            print("-- Generation %i --" % generationCounter)

            # Begin genetic operators
            # 1. Selection: since we already defined the tournament before
            # we only need to select the population and its lenght
            # Selected individuals now will be in a list
            print("selection...")
            offspring=toolbox.select(pop, len(pop))
            for i in offspring:
                print(i.fitness.values[0])

            # Cloning the selected indv so we can apply the next genetic operators without affecting the original pop
            offspring=list(map(toolbox.clone, offspring))
            print("done")
            # 2. Crossover. Note taht the mate function takes two individuals as arguments and
            # modifies them in place, meaning they don't need to be reassigned
            print("Crossover...")
            for child1, child2 in zip(offspring[::2], offspring[1::2]):
                if random.random() < p_crossover:
                    toolbox.mate(child1, child2)
                    del child1.fitness.values
                    del child2.fitness.values
            print("done")
            # 3. Mutation
            print("Mutation...")
            for mutant in offspring:
                # if random.random() < p_mutation:
                if random.random() < (1 - (generationCounter/max_generations)):
                    toolbox.mutate(mutant)
                    del mutant.fitness.values
            # Individuals that werent mutated remain intact, their fitness values don't need to eb recalculated
            # The rest of the individuals will have this value EMPTY
            # We now find those individuals and calculate the new fitness
            print("...re-evaluating fitness...")
            freshIndividuals=[ind for ind in offspring if not ind.fitness.valid]
            # Eval not work!!! :(( used Kamiels
            # freshFitnessValues=list(map(toolbox.evaluate, freshIndividuals))
            # for individual, fitnessValue in zip(freshIndividuals, freshFitnessValues):
            #     individual.fitness.values=fitnessValue
            pop_array = np.array(freshIndividuals)
            values = evaluate(pop_array)
            values = values[0].tolist()
            fitnesses = []
            for value in values:
                fitnesses.append(value[0])
                lifes.append(value[1])

            for count, individual in enumerate(fitnesses):
                fitnesses[count] = (individual, )

            for ind, fit in zip(freshIndividuals, fitnesses):
                ind.fitness.values = fit
            print("done")
            # Changes best individuals of population with worst individuals of the offspring
            amount_swithed_individuals=int(len(pop)/10)
            worst_offspring=deap.tools.selWorst(
                offspring, amount_swithed_individuals, fit_attr='fitness')
            best_gen=deap.tools.selBest(
                pop, amount_swithed_individuals, fit_attr='fitness')
            for count, individual in enumerate(worst_offspring):
                index=offspring.index(individual)
                offspring[index]=best_gen[count]

            # End of the proccess -> replace the old population wiht the new one
            pop[:]=offspring
            print(f"There are {len(pop)} individuals in the population ")

            # Gather all the fitnesses in one list and print the stats
            fits=[ind.fitness.values[0] for ind in pop]

            length = len(pop)
            mean = sum(fits) / length
            sum2 = sum(x*x for x in fits)
            std = abs(sum2 / length - abs(mean)**2)**0.5
            q1 = np.percentile(fits, 25)
            median = np.percentile(fits, 50)
            q3 = np.percentile(fits, 75)
            max_life = max(lifes)

            # For plotting
            maxFitness = max(fits)
            meanFitness = sum(fits)/len(pop)
            maxFitnessValues.append(maxFitness)
            meanFitnessValues.append(meanFitness)

            print("  Min %s" % min(fits))
            print("  Max %s" % max(fits))
            print("  Avg %s" % mean)
            print("  Std %s" % std)
            # Plot
            plt.plot(maxFitnessValues)
            plt.plot(meanFitnessValues)
            ymax = max(maxFitnessValues)
            plt.ylabel("Values")
            plt.xlabel("Generations")
            plt.title("Maximum fitness: "+ str(ymax) + " Algorithm :" + str(algorithm))
            plt.savefig("adrian-testing" + "-algorithm-" + str(algorithm) +"-enemy-"+ str(enemy)+".png")
            plt.show()
            # DataFrame
            df.loc[int(algorithm)]=[min(fits),max(fits),mean,std]
            df.to_csv("adrian-testing-4" + "-algorithm-" + str(algorithm) +"-enemy-"+ str(enemy)+".csv", index=False)
            print("Saving...")
            # saves results for first pop
            file_aux=open(experiment_name+'/results_enemy' + \
                        str(enemy)+'Tournement.txt', 'a')
            file_aux.write(
                f'\n{str(g)}, {str(round(max(fits),6))}, {str(round(mean,6))}, {str(round(std,6))}, {str(round(median,6))}, {str(round(q1,6))}, {str(round(q3,6))}, {str(round(max_life,6))}')
            file_aux.close()
            print("Evolution ended in:", datetime.datetime.now() - begin_time)
            print("-- End of (successful) evolution --")
            best_ind=tools.selBest(pop, 1)[0]
            print("Best individual is %s, %s" %
                            (best_ind, best_ind.fitness.values))
            np.savetxt(experiment_name+'/best_game_'+str(game) + \
                            ',enemy_'+str(enemy)+'Tournement.txt', best_ind)
            print("Done. New generation...")
예제 #50
0
    def generate(self,
                 n_pop,
                 cxpb=0.8,
                 mutxpb=0.3,
                 ngen=20,
                 set_toolbox=False):

        if self.verbose == 1:
            print(
                "Population: {}, crossover_probablity: {}, mutation_probablity: {}, total generations: {}"
                .format(n_pop, cxpb, mutxpb, ngen))

        if not set_toolbox:
            self.toolbox = self._default_toolbox()
        else:
            raise Exception(
                "Please create a toolbox.Use create_toolbox to create and register_toolbox to register. Else set set_toolbox = False to use defualt toolbox"
            )
        pop = self.toolbox.population(n_pop)
        CXPB, MUTPB, NGEN = cxpb, mutxpb, ngen

        # Evaluate the entire population
        print("EVOLVING.......")
        fitnesses = list(map(self.toolbox.evaluate, pop))

        for ind, fit in zip(pop, fitnesses):
            ind.fitness.values = fit

        for g in range(NGEN):
            print("-- GENERATION {} --".format(g + 1))
            offspring = self.toolbox.select(pop, len(pop))
            self.fitness_in_generation[str(g + 1)] = max(
                [ind.fitness.values[0] for ind in pop])
            # Clone the selected individuals
            offspring = list(map(self.toolbox.clone, offspring))

            # Apply crossover and mutation on the offspring
            for child1, child2 in zip(offspring[::2], offspring[1::2]):
                if random.random() < CXPB:
                    self.toolbox.mate(child1, child2)
                    del child1.fitness.values
                    del child2.fitness.values

            for mutant in offspring:
                if random.random() < MUTPB:
                    self.toolbox.mutate(mutant)
                    del mutant.fitness.values

            # Evaluate the individuals with an invalid fitness
            weak_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = list(map(self.toolbox.evaluate, weak_ind))
            for ind, fit in zip(weak_ind, fitnesses):
                ind.fitness.values = fit
            print("Evaluated %i individuals" % len(weak_ind))

            # The population is entirely replaced by the offspring
            pop[:] = offspring

            # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        print("-- Only the fittest survives --")

        self.best_ind = tools.selBest(pop, 1)[0]
        print("Best individual is %s, %s" %
              (self.best_ind, self.best_ind.fitness.values))
        self.get_final_scores(pop, fits)
        f_obj = ff.keshihua()
        np_ind = np.asarray(self.best_ind)
        feature_idx = np.where(np_ind == 1)[0]
        fitness = f_obj.huatu \
            (self.model, self.x[:, feature_idx], self.y, self.x_test[:, feature_idx], self.y_test,
             self.x_development[:, feature_idx], self.y_development)
        return pop
예제 #51
0
    def solve(self):
        """Setup the deap module and find the best permutation."""

        maximum_score = self.maximum_score()

        if maximum_score <= 0.0:
            print("Nothing to solve, aborting.")
            exit()

        toolbox = self.setup_deap()

        # Create population
        population = toolbox.population(n=self.population)

        # Perform evoluationary algorithm
        result = None
        self.solution_iterator.initialize_progressbar()

        maximum_fit = -10 * 6
        maximum_score_object = None

        for step in self.solution_iterator:
            self.current_step = step
            self.solution_iterator.update_progressbar(100 * maximum_fit /
                                                      maximum_score)
            offspring = algorithms.varAnd(population,
                                          toolbox,
                                          cxpb=0.5,
                                          mutpb=0.1)

            fits = toolbox.map(toolbox.evaluate, offspring)
            for fit, ind in zip(fits, offspring):
                score = fit[0].score()
                # Update maximum fit
                if score > maximum_fit:
                    maximum_fit = score
                    maximum_score_object = fit[0]

                if int(score) == maximum_score:
                    result = ind
                    break
                ind.fitness.values = score,

            population = toolbox.select(offspring, k=len(population))
            if maximum_score_object:
                self.solution_iterator.register_fitness(maximum_score_object)

            if result:
                break
        else:
            result = tools.selBest(population, k=1)[0]

        self.solution_iterator.update_progressbar(100 * maximum_fit /
                                                  maximum_score,
                                                  final=True)

        self.solution = finalize_solution(result, self)
        new_score = evaluate_permutation(self.solution, self)
        if new_score[0].score() > maximum_fit:
            print("Improved final solution: {} > {}".format(
                new_score[0].score(), maximum_fit))
        else:
            print("Could not improve final solution")

        self.solution_generated_groups = sorted_teams_from_solution(
            self.solution, self.assignable_individuals,
            self.generated_group_prefix)
        self.solution_groups = self.assignable_groups + self.solution_generated_groups
        self.solution_schedule = self.generate_schedule_from_solution(
            self.solution, self.solution_groups)

        return result
예제 #52
0
def main():
    random.seed(128)
    # multiprocesssing pool
    p = Pool(4)
    pop = toolbox.population(n=100)
    # CXPB  is the probability with which two individuals
    #       are crossed
    #
    # MUTPB is the probability for mutating an individual
    #
    # NGEN  is the number of generations for which the
    #       evolution runs
    CXPB, MUTPB, NGEN = 0.5, 0.2, 40

    print("Start of evolution")

    # Evaluate the entire population
    fitnesses = list(p.map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    print("  Evaluated %i individuals" % len(pop))

    # Begin the evolution
    for g in range(NGEN):
        print("-- Generation %i --" % g)

        offspring = toolbox.select(pop, len(pop))
        offspring = list(p.map(toolbox.clone, offspring))

        for child1, child2 in zip(offspring[::2], offspring[1::2]):

            if random.random() < CXPB:
                toolbox.mate(child1, child2)

                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:

            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = p.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        print("  Evaluated %i individuals" % len(invalid_ind))

        pop[:] = offspring

        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5

        print("  Min %s" % min(fits))
        print("  Max %s" % max(fits))
        print("  Avg %s" % mean)
        print("  Std %s" % std)

    print("-- End of (successful) evolution --")

    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
    e.calc_labels(best_ind)
예제 #53
0
def evolve(Xsource, Ysource, Xtarget, Ytarget, file, mutation_rate, full_init, dim_p=20, eta_p=0.1):
    """
    Running GA algorithms, where each individual is a set of target pseudo labels.
    :return: the best solution of GAs.
    """
    exe_time = 0
    start = time.time()
    global ns, nt, C, Xs, Ys, Xt, Yt, YY, K, A, e, M0, L, dim, eta
    dim = dim_p
    eta = eta_p
    archive = []
    Xs = Xsource
    Ys = Ysource
    Xt = Xtarget
    Yt = Ytarget
    ns, nt = Xs.shape[0], Xt.shape[0]
    C = len(np.unique(Ys))

    # Transform data using gfk
    gfk = GFK.GFK(dim=dim)
    _, Xs_new, Xt_new = gfk.fit(Xs, Xt)
    Xs_new, Xt_new = Xs_new.T, Xt_new.T
    X = np.hstack((Xs_new, Xt_new))
    X /= np.linalg.norm(X, axis=0)
    Xs = X[:, :ns].T
    Xt = X[:, ns:].T

    # do not using any feature transformation
    # X = np.hstack((Xs.T, Xt.T))
    # X /= np.linalg.norm(X, axis=0)
    # Xs = X[:, :ns].T
    # Xt = X[:, ns:].T

    # coral = CORAL.CORAL()
    # Xs = coral.fit(Xs, Xt)
    # X = np.hstack((Xs.T, Xt.T))
    # X /= np.linalg.norm(X, axis=0)
    # Xs = X[:, :ns].T
    # Xt = X[:, ns:].T

    # tca = TCA.TCA(kernel_type='linear', dim=dim, lamb=1, gamma=1.0)
    # Xs_new, Xt_new = tca.fit(Xs, Xt)
    # X = np.hstack((Xs_new.T, Xt_new.T))
    # X /= np.linalg.norm(X, axis=0)
    # Xs = X[:, :ns].T
    # Xt = X[:, ns:].T

    # build some matrices that are not changed
    K = kernel(kernel_type, X, X2=None, gamma=gamma)
    A = np.diagflat(np.vstack((np.ones((ns, 1)), np.zeros((nt, 1)))))
    e = np.vstack((1.0 / ns * np.ones((ns, 1)), -1.0 / nt * np.ones((nt, 1))))
    M0 = e * e.T * C
    L = Helpers.laplacian_matrix(X.T, p)

    YY = np.zeros((ns, C))
    for c in range(1, C + 1):
        ind = np.where(Ys == c)
        YY[ind, c - 1] = 1
    YY = np.vstack((YY, np.zeros((nt, C))))

    pos_min = 1
    pos_max = C

    # parameters for GA
    N_BIT = nt
    MUTATION_RATE = 1.0/N_BIT*C
    MPB = mutation_rate
    CXPB = 1-mutation_rate

    creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
    creator.create("Ind", list, fitness=creator.FitnessMin)
    creator.create("Pop", list)

    # for creating the population
    toolbox.register("bit", random.randint, a=pos_min, b=pos_max)
    toolbox.register("ind", tools.initRepeat, creator.Ind, toolbox.bit, n=N_BIT)
    toolbox.register("pop", tools.initRepeat, creator.Pop, toolbox.ind)
    # for evaluation
    toolbox.register("evaluate", fitness_evaluation)
    # for genetic operators
    toolbox.register("select", tools.selTournament, tournsize=2)
    toolbox.register("crossover", tools.cxUniform, indpb=0.5)
    toolbox.register("mutate", tools.mutUniformInt, low=pos_min, up=pos_max,
                     indpb=MUTATION_RATE)
    # pool = multiprocessing.Pool(4)
    # toolbox.register("map", pool.map)
    # initialize some individuals by predefined classifiers
    pop = toolbox.pop(n=N_IND)

    classifiers = list([])
    classifiers.append(KNeighborsClassifier(1))
    # classifiers.append(SVC(kernel="linear", C=0.025, random_state=np.random.randint(2 ** 10)))
    # classifiers.append(GaussianProcessClassifier(1.0 * RBF(1.0), random_state=np.random.randint(2 ** 10)))
    # classifiers.append(KNeighborsClassifier(3))
    # classifiers.append(SVC(kernel="rbf", C=1, gamma=2, random_state=np.random.randint(2 ** 10)))
    # classifiers.append(DecisionTreeClassifier(max_depth=5, random_state=np.random.randint(2 ** 10)))
    # classifiers.append(KNeighborsClassifier(5))
    # classifiers.append(GaussianNB())
    # classifiers.append(RandomForestClassifier(max_depth=5, n_estimators=10, random_state=np.random.randint(2 ** 10)))
    # classifiers.append(AdaBoostClassifier(random_state=np.random.randint(2 ** 10)))

    step = N_IND/len(classifiers)
    for ind_index, classifier in enumerate(classifiers):
        classifier.fit(Xs, Ys)
        Yt_pseu = classifier.predict(Xt)
        for bit_idex, value in enumerate(Yt_pseu):
            pop[ind_index*step][bit_idex] = value

    if full_init:
        Helpers.opposite_init(pop, pos_min, pos_max)

    # evaluate the initialized populations
    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit,

    hof = tools.HallOfFame(maxsize=1)
    hof.update(pop)
    exe_time = exe_time + time.time()-start

    for g in range(N_GEN):
        file.write("*****Iteration %d*****\n" % (g+1))
        start = time.time()
        # selection
        offspring = toolbox.select(pop, len(pop))
        offspring = map(toolbox.clone, offspring)

        # applying crossover
        for c1, c2 in zip(offspring[::2], offspring[1::2]):
            if np.random.rand() < CXPB:
                toolbox.crossover(c1, c2)
                del c1.fitness.values
                del c2.fitness.values

        # applying mutation
        for mutant in offspring:
            if np.random.rand() < MPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # opposite_local(offspring)

        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fitness in zip(invalid_ind, fitnesses):
                ind.fitness.values = fitness,

        # now select the best individual from offspring
        # pass it to the single step meda to refine the label
        best_inds = tools.selBest(offspring, N_IND/10)

        for best_ind in best_inds:
            Yt_pseu = label_evolve(best_ind)
            new_ind = toolbox.ind()
            for index, label in enumerate(Yt_pseu):
                new_ind[index] = label
            new_ind.fitness.values = fitness_evaluation(new_ind),
            offspring.append(new_ind)
            archive.append(new_ind)

        # The population is entirely replaced by the offspring
        exe_time = exe_time + time.time() - start
        pop[:] = tools.selBest(offspring + list(hof), len(pop))
        hof.update(pop)
        file.write('Average distance: %f\n' %(Helpers.pop_distance(pop)))
        file.write('Best fitness: %f\n' %(hof[0].fitness.values[0]))

        best_ind = tools.selBest(pop, 1)[0]
        acc = np.mean(best_ind == Yt)
        file.write("Accuracy of the best individual: %f\n" % acc)

        no_10p = int(0.1*N_IND)
        top10 = tools.selBest(pop, no_10p)
        vote_label = Helpers.voting(top10)
        acc = np.mean(vote_label == Yt)
        file.write("Accuracy of the 10%% population: %f\n" % acc)

        # Use the whole population
        vote_label = Helpers.voting(pop)
        acc = np.mean(vote_label == Yt)
        file.write("Accuracy of the population: %f\n" % acc)

    file.write("*****Final result*****\n")
    best_ind = tools.selBest(pop, 1)[0]
    acc = np.mean(best_ind == Yt)
    file.write("Accuracy of the best individual: %f\n" % acc)

    best_evolve = label_evolve(best_ind)
    acc = np.mean(best_evolve == Yt)
    return_acc = acc
    file.write("Accuracy of the evovled best individual: %f\n" % acc)

    top10 = tools.selBest(pop, 10)
    vote_label = Helpers.voting(top10)
    acc = np.mean(vote_label == Yt)
    file.write("Accuracy of the 10%% population: %f\n" % acc)

    # Use the whole population
    vote_label = Helpers.voting(pop)
    acc = np.mean(vote_label == Yt)
    file.write("Accuracy of the population: %f\n" % acc)

    # Use the archive
    if len(archive)>0:
        vote_label = Helpers.voting(archive)
        acc = np.mean(vote_label == Yt)
        file.write("Accuracy of the archive: %f\n" % acc)

        file.write('GA-MEDA time: %f' % (exe_time)+'\n')
        return return_acc
예제 #54
0
def main():
    random.seed(42)

    # create an initial population of 300 individuals (where
    # each individual is a list of integers)
    pop = toolbox.population(n=300)

    # CXPB  is the probability with which two individuals
    #       are crossed
    #
    # MUTPB is the probability for mutating an individual
    CXPB, MUTPB = 0.5, 0.2

    print("Start of evolution")

    # Evaluate the entire population
    fitnesses = [toolbox.evaluate(ind) for ind in pop]
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    print("  Evaluated %i individuals" % len(pop))

    # Extracting all the fitnesses of the individuals
    fits = [ind.fitness.values[0] for ind in pop]

    # Variable keeping track of the number of generations
    g = 0

    # Begin the evolution
    while max(fits) < 100 and g < 1000:
        # A new generation
        g = g + 1
        print("-- Generation %i --" % g)

        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = [toolbox.clone(ind) for ind in offspring]

        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):

            # cross two individuals with probability CXPB
            if random.random() < CXPB:
                toolbox.mate(child1, child2)

                # fitness values of the children
                # must be recalculated later
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:

            # mutate an individual with probability MUTPB
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        print("Evaluated %i individuals" % len(invalid_ind))

        # The population is entirely replaced by the offspring
        pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x*x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5

        print("  Min %s" % min(fits))
        print("  Max %s" % max(fits))
        print("  Avg %s" % mean)
        print("  Std %s" % std)

    print("-- End of (successful) evolution --")

    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
예제 #55
0
    "mutate", tools.mutFlipBit, indpb=0.05
)  # the registered method "mutate" will lunch the method "tools.mutFlipBit" and will fix the second argument of the method (porbability of each attribute to flip)
toolbox.register(
    "select", tools.selTournament, tournsize=3
)  # the registered method "select" will lunch the methot "selTournament" and fix the argument tournsize = 3 -> look at "selTournament" for more info.
"""
START OF EXPERIMENT
"""

# generate a population of 300 "Individual", each of them is a list off 100 random numbers in the range [0,1]
pop = toolbox.population(n=300)

# compute for ngen times the new generation, evaluating the previous one. It uses the methods defined in lines [34 to 37] and it needs this names in order to work correctly.
result = algorithms.eaSimple(pop,
                             toolbox,
                             cxpb=0.5,
                             mutpb=0.2,
                             ngen=10,
                             verbose=False)  # NGEN = 10
# print results
print('Current best fitness:', evalOneMax(tools.selBest(pop, k=1)[0]))

# compute for ngen times the new generation, evaluating the previous one. It uses the methods defined in lines [34 to 37] and it needs this names in order to work correctly.
result = algorithms.eaSimple(pop,
                             toolbox,
                             cxpb=0.5,
                             mutpb=0.2,
                             ngen=50,
                             verbose=False)  # NGEN = 50
# print results
print('Current best fitness:', evalOneMax(tools.selBest(pop, k=1)[0]))
예제 #56
0
def main():
    random.seed(64)
    pop = toolbox.population(n=30)
    
    CXPB, MUTPB, NGEN = 0.5, 0.2, 10
    
    print("Start of evolution")
    
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    
    print("  Evaluated %i individuals" % len(pop))
    
    for g in range(NGEN):
        print("-- Generation %i --" % g)
        
        offspring = toolbox.select(pop, len(pop))
        
        offspring = list(map(toolbox.clone, offspring))
    
        
        for child1, child2 in zip(offspring[::2], offspring[1::2]):

            if random.random() < CXPB:
                toolbox.mate(child1, child2)
                
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:

            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values
    
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit
        
        print("  Evaluated %i individuals" % len(invalid_ind))
        
        pop[:] = offspring
        
        fits = [ind.fitness.values[0] for ind in pop]
        
        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x*x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5
        
        print("  Min %s" % min(fits))
        print("  Max %s" % max(fits))
        print("  Avg %s" % mean)
        print("  Std %s" % std)
        print ('Total Time is  ' + str(time.time()-start_time) + ' seconds.')
    
    print("-- End of (successful) evolution --")
    
    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
    
    new_features = []
    for i in range(0,len(best_ind)-1):
        if best_ind[i] == 1:
            new_features.append(i)
    ##############################################################
    ##############################################################
    ##############################################################
    ####Ensemble Learning
    print('#####################################################')
    new_X_train = Xtrain[new_features] 
    new_Y_train = Ytrain

    new_X_test = Xtest[new_features]
    new_Y_test = Ytest

    clf = MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(150,50,15,5,3), random_state=1)
    y_pred = clf.fit(new_X_train, new_Y_train).predict(new_X_test)
    f = open('MLP_GA.txt','w')
    print(classification_report(Ytest,y_pred))
    f.write(classification_report(Ytest,y_pred))
    f.write('\n')
    f.write('ROC = ' + str(roc_auc_score(Ytest,y_pred)))
    f.write('\n')
    f.close()
    def generate(self, n_pop, cxpb=0.5, mutxpb=0.2, ngen=5, set_toolbox=False):
        """ 
            Generate evolved population
            Parameters
            -----------
                n_pop : {int}
                        population size
                cxpb  : {float}
                        crossover probablity
                mutxpb: {float}
                        mutation probablity
                n_gen : {int}
                        number of generations
                set_toolbox : {boolean}
                              If True then you have to create custom toolbox before calling 
                              method. If False use default toolbox.
            Returns
            --------
                Fittest population
        """

        if self.verbose == 1:
            print(
                "Population: {}, crossover_probablity: {}, mutation_probablity: {}, total generations: {}"
                .format(n_pop, cxpb, mutxpb, ngen))

        if not set_toolbox:
            self.toolbox = self._default_toolbox()
        else:
            raise Exception(
                "Please create a toolbox.Use create_toolbox to create and register_toolbox to register. Else set set_toolbox = False to use defualt toolbox"
            )
        pop = self.toolbox.population(n_pop)
        CXPB, MUTPB, NGEN = cxpb, mutxpb, ngen

        # Evaluate the entire population
        print("EVOLVING.......")
        fitnesses = list(map(self.toolbox.evaluate, pop))

        for ind, fit in zip(pop, fitnesses):
            ind.fitness.values = fit

        for g in range(NGEN):
            print("-- GENERATION {} --".format(g + 1))
            offspring = self.toolbox.select(pop, len(pop))
            self.fitness_in_generation[str(g + 1)] = max(
                [ind.fitness.values[0] for ind in pop])
            # Clone the selected individuals
            offspring = list(map(self.toolbox.clone, offspring))

            # Apply crossover and mutation on the offspring
            for child1, child2 in zip(offspring[::2], offspring[1::2]):
                if random.random() < CXPB:
                    self.toolbox.mate(child1, child2)
                    del child1.fitness.values
                    del child2.fitness.values

            for mutant in offspring:
                if random.random() < MUTPB:
                    self.toolbox.mutate(mutant)
                    del mutant.fitness.values

            # Evaluate the individuals with an invalid fitness
            weak_ind = [ind for ind in offspring if not ind.fitness.valid]
            fitnesses = list(map(self.toolbox.evaluate, weak_ind))
            for ind, fit in zip(weak_ind, fitnesses):
                ind.fitness.values = fit
            print("Evaluated %i individuals" % len(weak_ind))

            # The population is entirely replaced by the offspring
            pop[:] = offspring

            # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5
        if self.verbose == 1:
            print("  Min %s" % min(fits))
            print("  Max %s" % max(fits))
            print("  Avg %s" % mean)
            print("  Std %s" % std)

        print("-- Only the fittest survives --")

        self.best_ind = tools.selBest(pop, 1)[0]
        print("Best individual is %s, %s" %
              (self.best_ind, self.best_ind.fitness.values))
        self.get_final_scores(pop, fits)

        return pop
예제 #58
0
if (verbose):
    print("Building first part of training dataset...")
clfin = []
tops = []
for population in training:
    pareto = tools.sortNondominated(population, len(population))
    top = pareto[0]  #the actual pareto front
    #using best and worst elements to build training data
    for member in top:
        #careful avoiding duplicates
        if member not in tops:
            tops.append(member)
        if member not in clfin:
            clfin.append(member)
    elems = len(clfin)
    for member in tools.selBest(population, k=len(population))[:-elems]:
        if member not in clfin:
            clfin.append(member)

##############
# running nsga

if (verbose):
    print("Starting nsga runs")


def uniform(low, up, size=None):
    try:
        return [random.uniform(a, b) for a, b in zip(low, up)]
    except TypeError:
        return [
예제 #59
0
    def main(self,NGen=1000,NIndiv=100,DoPlot=True):
        #os.system("rm png/*.png")
        #random.seed(64)
        #np.random.seed(64)
        toolbox=self.toolbox
        # pool = multiprocessing.Pool(processes=6)
        # toolbox.register("map", pool.map)
        self.pop = toolbox.population(n=NIndiv)
        self.hof = tools.HallOfFame(1, similar=numpy.array_equal)
        #self.hof = tools.ParetoFront(1, similar=numpy.array_equal)

        # stats = tools.Statistics(lambda ind: ind.fitness.values)
        # stats.register("avg", numpy.mean)
        # stats.register("std", numpy.std)
        # stats.register("min", numpy.min)
        # stats.register("max", numpy.max)


        for indiv in self.pop:
            indiv.fill(0)

        #print "Best indiv start",
        #self.ArrayMethodsMachine.PM.PrintIndiv(self.IslandBestIndiv)
        #print
        if self.IslandBestIndiv is not None:
            #SModelArrayMP,Alpha=self.ArrayMethodsMachine.DeconvCLEAN()
            #AModelArrayMP=None
            DicoModelMP=self.ListInitIslands[self.iIsland]
            if DicoModelMP is not None:
                SModelArrayMP,AModelArrayMP=DicoModelMP["S"],DicoModelMP["Alpha"]
            else:
                SModelArrayMP,_=self.ArrayMethodsMachine.DeconvCLEAN()
                AModelArrayMP=np.zeros_like(SModelArrayMP)

            if NGen==0: 
                self.ArrayMethodsMachine.PM.ReinitPop(self.pop,SModelArrayMP,AlphaModel=AModelArrayMP)
                self.ArrayMethodsMachine.KillWorkers()
                return self.pop[0]


            if np.max(np.abs(self.IslandBestIndiv))==0:
                #print "NEW"
                self.ArrayMethodsMachine.PM.ReinitPop(self.pop,SModelArrayMP,AlphaModel=AModelArrayMP)
            else:
                #print "MIX"
                NIndiv=len(self.pop)/10
                pop0=self.pop[0:NIndiv]
                pop1=self.pop[NIndiv::]

                pop1=self.pop
                pop0=[]

                pop1=self.pop[0:1]
                pop0=self.pop[1::]

                # self.ArrayMethodsMachine.PM.ReinitPop(pop0,SModelArray)

                # half with the best indiv
                SModelArrayBest=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"S")
                AlphaModel=None
                if "Alpha" in self.ArrayMethodsMachine.PM.SolveParam:
                    AlphaModel=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"Alpha")
                GSigModel=None
                if "GSig" in self.ArrayMethodsMachine.PM.SolveParam:
                    GSigModel=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"GSig")
                self.ArrayMethodsMachine.PM.ReinitPop(pop1,SModelArrayBest,AlphaModel=AlphaModel,GSigModel=GSigModel)

                # half of the pop with the MP model
                #SModelArrayBest0=SModelArrayBest.copy()
                #mask=(SModelArrayBest0==0)
                #SModelArrayBest0[mask]=SModelArrayMP[mask]
                #self.ArrayMethodsMachine.PM.ReinitPop(pop0,SModelArrayBest0,AlphaModel=AlphaModel,GSigModel=GSigModel)
                self.ArrayMethodsMachine.PM.ReinitPop(pop0,SModelArrayMP,AlphaModel=AModelArrayMP)

                # _,Chi20=self.ArrayMethodsMachine.GiveFitnessPop(pop0)
                # _,Chi21=self.ArrayMethodsMachine.GiveFitnessPop(pop1)
                # print
                # print Chi20
                # print Chi21
                # stop



                self.pop=pop1+pop0
        #print




        # if self.IslandBestIndiv is not None:

        #     if np.max(np.abs(self.IslandBestIndiv))==0:
        #         #print "deconv"
        #         SModelArray,Alpha=self.ArrayMethodsMachine.DeconvCLEAN()

        #         #print "Estimated alpha",Alpha
        #         AlphaModel=np.zeros_like(SModelArray)+Alpha
        #         #AlphaModel[SModelArray==np.max(SModelArray)]=0

        #         self.ArrayMethodsMachine.PM.ReinitPop(self.pop,SModelArray)#,AlphaModel=AlphaModel)

        #         #print self.ArrayMethodsMachine.GiveFitness(self.pop[0],DoPlot=True)
        #         #stop
        #         #print self.pop
        #     else:
        #         SModelArray=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"S")
        #         AlphaModel=None
        #         if "Alpha" in self.ArrayMethodsMachine.PM.SolveParam:
        #             AlphaModel=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"Alpha")
                
        #         GSigModel=None
        #         if "GSig" in self.ArrayMethodsMachine.PM.SolveParam:
        #             GSigModel=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"GSig")
                
        #         self.ArrayMethodsMachine.PM.ReinitPop(self.pop,SModelArray,AlphaModel=AlphaModel,GSigModel=GSigModel)

        # set best Chi2
        # _=self.ArrayMethodsMachine.GiveFitnessPop([self.IslandBestIndiv])
        _=self.ArrayMethodsMachine.GiveFitnessPop(self.pop)



        self.pop, log= algorithms.eaSimple(self.pop, toolbox, cxpb=0.3, mutpb=0.5, ngen=NGen, 
                                           halloffame=self.hof, 
                                           #stats=stats,
                                           verbose=False, 
                                           ArrayMethodsMachine=self.ArrayMethodsMachine,
                                           DoPlot=DoPlot,
                                           MutConfig=self.MutConfig)

        self.ArrayMethodsMachine.KillWorkers()

        # #:param mu: The number of individuals to select for the next generation.
        # #:param lambda\_: The number of children to produce at each generation.
        # #:param cxpb: The probability that an offspring is produced by crossover.
        # #:param mutpb: The probability that an offspring is produced by mutation.

        # mu=70
        # lambda_=50
        # cxpb=0.3
        # mutpb=0.5
        # ngen=1000

        # self.pop, log= algorithms.eaMuPlusLambda(self.pop, toolbox, mu, lambda_, cxpb, mutpb, ngen,
        #                               stats=None, halloffame=None, verbose=__debug__,
        #                               ArrayMethodsMachine=self.ArrayMethodsMachine)

        V = tools.selBest(self.pop, 1)[0]

        #print "Best indiv end"
        #self.ArrayMethodsMachine.PM.PrintIndiv(V)
        
        # V.fill(0)
        # S=self.ArrayMethodsMachine.PM.ArrayToSubArray(V,"S")
        # G=self.ArrayMethodsMachine.PM.ArrayToSubArray(V,"GSig")
        
        # S[0]=1.
        # #S[1]=2.
        # G[0]=1.
        # #G[1]=2.

        # MA=self.ArrayMethodsMachine.PM.GiveModelArray(V)

        # # print "Sum best indiv",MA.sum(axis=1)
        # # print "Size indiv",V.size
        # # print "indiv",V
        # # print self.ArrayMethodsMachine.ListPixData
        # # print MA[0,:]

        return V
예제 #60
0
toolbox.register('mutate', tools.mutFlipBit, indpb= 0.01)
toolbox.register('select', tools.selRoulette)

populacao = toolbox.population(20)
prob_crossover = 1.0
prob_mutacao = 0.01
num_geracoes = 100

estatisticas = tools.Statistics(key= lambda individuo: individuo.fitness.values)
estatisticas.register('max', numpy.max)
estatisticas.register('min', numpy.min)
estatisticas.register('med', numpy.mean)
estatisticas.register('std', numpy.std)

populacao, info = algorithms.eaSimple(populacao, toolbox, prob_crossover, prob_mutacao, num_geracoes, estatisticas)

melhores = tools.selBest(populacao, 1)

for i in melhores:
  print(i)
  print(i.fitness)
  soma = 0
  for s in range(len(lista_produtos)):
    if(i[s] == 1):
      soma += valores[s]
      print(f'Nome: {nomes[s]} - Valor: {valores[s]}')
  print(f'Melhor solução: {soma}')

valores_grafico = info.select('max')
plt.plot(valores_grafico)
plt.title('Acompanhamento dos valores')