Exemplo n.º 1
0
    def steady_state_crossover(self):
        self.offspring = list(map(self.toolbox.clone, self.pop))
        lista1 = list(self.offspring)
        children = []
        parents = tools.selRandom(self.offspring, self.ss_gap)
        for parent1, parent2 in zip(parents[::2], parents[1::2]):
            child1, child2 = self.toolbox.clone(parent1), self.toolbox.clone(
                parent2)
            self.toolbox.mate(child1, child2)

            if random.random() < self.mutation_rate:
                self.toolbox.mutate(child1)

            if random.random() < self.mutation_rate:
                self.toolbox.mutate(child2)

            del child1.fitness.values
            del child2.fitness.values

            children.append(child1)
            children.append(child2)

        for child in children:
            self.offspring.remove(tools.selRandom(self.offspring, 1)[0])
            self.offspring.append(child)
Exemplo n.º 2
0
def selection(individuals, k, tournsize, alpha, fit_attr="fitness"):
    chosen = []
    N = int(k*alpha)
    for i in range(N):
        aspirants = tools.selRandom(individuals, tournsize)
        chosen.append(max(aspirants, key=attrgetter(fit_attr)))

    new = tools.selRandom(individuals,k-N)
    return chosen+new
Exemplo n.º 3
0
def Tournament(individuals, finalselect, tournsize):
    chosen = []
    for i in range(finalselect):
        aspirants = tools.selRandom(individuals, tournsize)
        chosen.append(max(individuals,
                          key=selectCriteria))
    return chosen
Exemplo n.º 4
0
def _selMetaTournament(individuals,
                       k,
                       tournsize,
                       meta_model,
                       primitives_to_hash_dic,
                       gptree,
                       pset,
                       df,
                       le,
                       max_pipeline_size,
                       fit_attr="fitness"):
    """Select the best individual among *tournsize* randomly chosen
    individuals, *k* times. The list returned contains
    references to the input *individuals*.

    :param individuals: A list of individuals to select from.
    :param k: The number of individuals to select.
    :param tournsize: The number of individuals participating in each tournament.
    :param fit_attr: The attribute of individuals to use as selection criterion
    :returns: A list of selected individuals.

    This function uses the :func:`~random.choice` function from the python base
    :mod:`random` module.
    """
    chosen = []
    for i in range(k):
        aspirants = selRandom(individuals, tournsize)
        test_df = create_ranking_df_from_pop(aspirants, primitives_to_hash_dic,
                                             gptree, pset, df,
                                             max_pipeline_size)
        top_offspring_index = rank_pop(meta_model, test_df, 1, le)
        top_offspring = [aspirants[i] for i in top_offspring_index]
        chosen.append(top_offspring[0])
    return chosen
Exemplo n.º 5
0
    def mutate_nodes(self, pop = None):
        """
        Mutates the nodes - fnParameters, including bias and bias weight
        """
        #TODO: consider mutating the prodCosntant [EXPERIMENT]
        if verbose:
            print ">> Mutation -> Nodes";

        if pop == None:
            pop = self.pop['node_pop'];

        if random.random() < self.params['prob_mutation']:
            ind = tools.selRandom(pop,1).pop();
            mutant = self.tbox.clone(ind);
            self.tbox.mutate_fnParams(mutant);
            mutant.id = None;
            del mutant.fitness.values;
            if verbose:
                print "parent", ind;
                print "child", mutant;


            return mutant;

        else:
            return None;
Exemplo n.º 6
0
    def mutate_models(self, pop = None):
        """
        Mutates the models  - {lateral connection, gaussian mutation of inputs, context layer}

        """
        if verbose:
            print ">> Mutation -> Models";

        if pop == None:
            pop = self.pop['model_pop'];

        if random.random() < self.params['prob_mutation']:
            ind = tools.selRandom(pop,1).pop();
            mutant = self.tbox.clone(ind);
            self.tbox.mutate_conn(mutant);
            mutant.id = None;
            del mutant.fitness.values;
            if verbose:
                print "parent", ind;
                print "child", mutant;

            return mutant;

        else:
            return None;
Exemplo n.º 7
0
    def mutate_connHO(self, pop = None):
        """
        Mutates the connection components (Hidden to Output layers weights)
        """

        if verbose:
            print ">> Mutation -> connHO";

        if pop == None:
            pop = self.pop['connActive_HO_pop'];

        if random.random() < self.params['prob_mutation']:
            ind = tools.selRandom(pop,1).pop();
            mutant = self.tbox.clone(ind);
            for i in xrange(len(mutant)):
                self.tbox.mutate_conn(mutant[i]);
            mutant.id = None;
            del mutant.fitness.values;
            if verbose:
                print "parent", ind;
                print "child", mutant;


            return mutant;

        else:
            return None;
Exemplo n.º 8
0
    def mutate_weightsHO(self, pop = None):
        """
        Mutates the weight components (Hidden to Output layers weights)
        """

        if verbose:
            print ">> Mutation -> WeightHO";

        if pop == None:
            pop = self.pop['connWeights_HO_pop'];

        if random.random() < self.params['prob_mutation']:
            ind = tools.selRandom(pop,1).pop();
            mutant = self.tbox.clone(ind);
            self.tbox.mutate_weights(mutant);
            mutant.id = None;
            del mutant.fitness.values;
            if verbose:
                print "parent", ind;
                print "child", mutant;


            return mutant;

        else:
            return None;
Exemplo n.º 9
0
def batch_tournament_selection(individuals,
                               k,
                               tournsize,
                               batch_size,
                               fit_attr="fitness"):
    fitness_cases_num = len(individuals[0].case_values)
    idx_cases_batch = np.arange(0, fitness_cases_num)
    np.random.shuffle(idx_cases_batch)
    _batches = np.array_split(idx_cases_batch,
                              max(fitness_cases_num // batch_size, 1))
    batch_ids = np.arange(0, len(_batches))
    assert len(_batches[0]) >= batch_size or fitness_cases_num < batch_size

    chosen = []
    while len(chosen) < k:
        batches: list = copy.deepcopy(batch_ids.tolist())
        while len(batches) > 0 and len(chosen) < k:
            idx_candidates = selRandom(individuals, tournsize)
            cand_fitness_for_this_batch = []
            for idx in idx_candidates:
                cand_fitness_for_this_batch.append(
                    np.mean(idx.case_values[_batches[batches[0]]]))
            idx_winner = np.argmin(cand_fitness_for_this_batch)
            winner = idx_candidates[idx_winner]
            chosen.append(winner)
            batches.pop(0)
    return chosen
Exemplo n.º 10
0
    def get_subset(self):
        if self.population:

            # random selection
            #sample = tools.selRandom(self.population, self.subset_size)

            # best selection
            #sample = tools.selBest(self.population, self.subset_size)

            # worst selection
            #sample = tools.selWorst(self.population, self.subset_size)

            # bestish selection
            sample = tools.selTournament(self.population, self.subset_size, 3)

            if self.best:
                sample[0] = self.best

            # if not unique, sample one more time
            sample_strings = [ind.__str__() for ind in sample]
            if len(set(sample_strings)) < len(sample_strings):
                for i, ind_i in enumerate(sample):
                    for j, ind_j in enumerate(sample[i + 1:]):
                        if ind_i == ind_j:
                            sample[i + 1 + j] = tools.selRandom(
                                self.population, 1)[0]

            print 'sample', sample
            self.subset = sample
            sample = self.pre_process(sample)
        else:
            sample = self.get_default()

        return sample
Exemplo n.º 11
0
	def selTournament(self, individuals, k, tournsize, fit_attr="fitness"):
		
		chosen = []
		for i in xrange(k):
			aspirants = tools.selRandom(individuals, tournsize)
			best = self.utils.getBest(aspirants)
			chosen.append(best)
		return chosen
Exemplo n.º 12
0
def _evolve(toolbox, optimizer, seed, gen=0, mu=1, lambda_=1, cxpb=1, mutp=1):
    random.seed(seed)
    np.random.seed(seed)

    pop = remove_twins(toolbox.population(n=mu))
    pop = list(toolbox.map(optimizer, pop))

    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitness = list(toolbox.map(toolbox.evaluate, invalid_ind))
    for ind, fit in zip(invalid_ind, fitness):
        ind.fitness.values = fit

    pop = toolbox.select(pop, mu)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", np.nanmin, axis=0)
    stats.register("max", np.nanmax, axis=0)
    stats.register("diversity", lambda pop: len(set(map(str, pop))))

    logbook = tools.Logbook()
    logbook.header = "gen", 'evals', 'min', 'max', 'diversity'

    record = stats.compile(pop)
    logbook.record(gen=0, evals=(len(invalid_ind)), **record)
    print(logbook.stream)
    if record['min'][0] == 0.0:
        return pop, logbook

    for g in range(1, gen):
        offspring = tools.selRandom(pop, mu)
        offspring = [toolbox.clone(ind) for ind in offspring]

        # mate
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= cxpb:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values
        # mutate
        for mutant in offspring:
            if random.random() <= mutp:
                toolbox.mutate(mutant)
                del mutant.fitness.values
        # re-evaluate
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitness = list(toolbox.map(toolbox.evaluate, invalid_ind))
        for ind, fit in zip(invalid_ind, fitness):
            ind.fitness.values = fit

        # select
        pop = toolbox.select(remove_twins(pop + offspring), mu)
        record = stats.compile(pop)
        logbook.record(gen=g, evals=len(invalid_ind), **record)
        print(logbook.stream)
        if record['min'][0] < 1E-4:
            break

    return pop, logbook
Exemplo n.º 13
0
def _evolve(toolbox, optimizer, seed, gen=0, mu=1, lambda_=1, cxpb=1, mutp=1):
    random.seed(seed)
    np.random.seed(seed)

    pop = remove_twins(toolbox.population(n=mu))
    pop = list(toolbox.map(optimizer, pop))

    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitness = list(toolbox.map(toolbox.evaluate, invalid_ind))
    for ind, fit in zip(invalid_ind, fitness):
        ind.fitness.values = fit

    pop = toolbox.select(pop, mu)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("min", np.nanmin, axis=0)
    stats.register("max", np.nanmax, axis=0)
    stats.register("diversity", lambda pop: len(set(map(str, pop))))

    logbook = tools.Logbook()
    logbook.header = "gen", 'evals', 'min', 'max', 'diversity'

    record = stats.compile(pop)
    logbook.record(gen=0, evals=(len(invalid_ind)), **record)
    print(logbook.stream)
    if record['min'][0] == 0.0:
        return pop, logbook

    for g in range(1, gen):
        offspring = tools.selRandom(pop, mu)
        offspring = [toolbox.clone(ind) for ind in offspring]

        # mate
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= cxpb:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values
        # mutate
        for mutant in offspring:
            if random.random() <= mutp:
                toolbox.mutate(mutant)
                del mutant.fitness.values
        # re-evaluate
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitness = list(toolbox.map(toolbox.evaluate, invalid_ind))
        for ind, fit in zip(invalid_ind, fitness):
            ind.fitness.values = fit

        # select
        pop = toolbox.select(remove_twins(pop + offspring), mu)
        record = stats.compile(pop)
        logbook.record(gen=g, evals=len(invalid_ind), **record)
        print(logbook.stream)
        if record['min'][0] < 1E-4:
            break

    return pop, logbook
Exemplo n.º 14
0
def selTournamentPlus(individuals, k, tournsize):
    """
    Select individuals based on the sum of case values
    :param individuals: population
    :param k: number of offspring
    :param tournsize: tournament size
    :return:
    """
    chosen = []
    for i in range(k):
        aspirants = selRandom(individuals, tournsize)
        chosen.append(min(aspirants, key=lambda x: np.sum(x.case_values)))
    return chosen
def selCustom(individuals, k):

    #using DEAP's sortNondominated, we get a list of fronts,
    #where each front 'i' dominates front 'i+1'.
    #we use this to create a new attribute for each individual, called "rank"
    #the rank is then used as the fitness value for the tournament selection,
    #as specified by Ombuki et. al
    pareto_fronts = tools.sortNondominated(individuals, k)
    for front_rank in range(1, len(pareto_fronts) + 1):
        front = pareto_fronts[front_rank - 1]
        for ind in front:
            setattr(ind, 'rank', front_rank)

    #the first rank is the "elite" (Pareto-optimal) set of solutions
    #to which we want to guarantee a spot in the next generation
    #therefore, we extract them before the tournament selection takes place
    elite = pareto_fronts.pop(0)
    individuals_excluding_elite = [i for i in individuals if i not in elite]
    #we update k, the number of individuals to be chosen by the tournament selection
    k -= len(elite)

    #as specified by the paper
    tournsize = 4
    r_thresh = 0.8

    chosen = []
    for i in range(k):
        aspirants = tools.selRandom(individuals_excluding_elite, tournsize)
        if random.random() < r_thresh:
            chosen_individual = min(aspirants, key=attrgetter("rank"))
        else:
            chosen_individual = tools.selRandom(aspirants, 1)[0]
        chosen.append(chosen_individual)

    #add in the elite solutions
    chosen += elite
    return chosen
Exemplo n.º 16
0
    def selTournament(individuals, k, tournsize, noise_factor = 0):

        # add noise if necessary
        if noise_factor > 0:

            # add some noise to the fitness values
            pop_std = np.std([ind.fitness.values[0] for ind in individuals])
            if (pop_std == 0):
                pop_std = 1
            noise = np.random.normal(0,  np.sqrt(noise_factor) * pop_std,
                                    len(individuals))
            noisy_individuals = []

            for index, ind in enumerate(individuals):
                noisy_individuals.append((ind,
                                ind.fitness.values[0] + noise[index]))

            chosen = []
            for i in range(k):
                aspirants = tools.selRandom(individuals, tournsize)
                indices = [individuals.index(j) for j in aspirants]
                best = min([noisy_individuals[j] for j in indices],
                            key = lambda x:x[1])

                chosen.append(best[0])
            return chosen

        else:

            # normal tournament selection
            chosen = []

            for i in range(k):

                aspirants = tools.selRandom(individuals, tournsize)
                chosen.append(max(aspirants, key=attrgetter("fitness")))
            return chosen
Exemplo n.º 17
0
def improveTournament(individuals, k, tournsize, fit_attr="fitness"):
    for i in xrange(k):
    	chosen = []
    	chosenTemp=[]
    	totalCountersTemp=9999999
        aspirants = tools.selRandom(individuals, tournsize)
        
        for aspira in aspirants:
        	totalCounters = checkCounters(aspira)
        	#os._exit(1)
        	if totalCounters <= totalCountersTemp:
        		chosenTemp = aspira
        		totalCountersTemp=totalCounters
		chosen.append(chosenTemp)
    return chosen
Exemplo n.º 18
0
	def steady_state_crossover(self):
		self.offspring = list(map(self.toolbox.clone, self.pop))
		lista1 = list(self.offspring)
		children = []
		parents = tools.selRandom(self.offspring, self.ss_gap)
		for parent1, parent2 in zip(parents[::2], parents[1::2]):
			child1, child2 = self.toolbox.clone(parent1), self.toolbox.clone(parent2)
			self.toolbox.mate(child1, child2)

			if random.random() < self.mutation_rate:
				self.toolbox.mutate(child1)

			if random.random() < self.mutation_rate:
				self.toolbox.mutate(child2)
				
			del child1.fitness.values
			del child2.fitness.values
			
			children.append(child1)
			children.append(child2)
			
		for child in children:
			self.offspring.remove(tools.selRandom(self.offspring, 1)[0])
			self.offspring.append(child)
Exemplo n.º 19
0
def selTournamentRemove(individuals, k, tournsize):
    """Select *k* individuals from the input *individuals* using *k*
    tournaments of *tournsize* individuals and remove them from the initial list.
    The list returned contains references to the input *individuals*.
    
    :param individuals: A list of individuals to select from.
    :param k: The number of individuals to select.
    :param tournsize: The number of individuals participating in each tournament.
    :returns: A list of selected individuals.
    
    This function uses the :func:`~random.choice` function from the python base
    :mod:`random` module.
    """
    chosen = []
    for i in xrange(k):
        aspirants = tools.selRandom(individuals, tournsize)
        chosen.append(max(aspirants, key=attrgetter("fitness")))
        individuals.remove(max(aspirants, key=attrgetter("fitness")))
    return chosen
Exemplo n.º 20
0
    def replace_rand_by_new_inds(self, pop, num):

        if MPI_RANK != 0: return None

        r_pop = tools.selRandom(pop, num)

        nd = self.get_nondominated_inds(pop)
        nd_num = len(nd)
        n_pop_f = int(self.n_pop * 0.3)

        for ppp in r_pop:

            new_ind = self.toolbox.individual()

            if nd_num < n_pop_f and ppp in nd:
                continue

            for j in range(len(ppp)):
                ppp[j] = new_ind[j]

            del ppp.fitness.values

        return pop
Exemplo n.º 21
0
def selTournament(individuals, k, tournsize, fit_attr="fitness"):

    # reproducing selTournament here because I can't find a way to get
    # algorithms.selTournament to take more than one fitness value
    # into account

    chosen = []
    for i in xrange(k):
        aspirants = tools.selRandom(individuals, tournsize)

        # get DEAP's best
        best = max(aspirants, key=attrgetter(fit_attr))
        bestFitness = calculateFitness(best.fitness.getValues())

        # check for individuals with a better fitness once depth penalty has been applied
        for individual in aspirants:
            thisFitness = calculateFitness(individual.fitness.getValues())
            if (thisFitness > bestFitness):
                best = individual
                bestFitness = thisFitness

        chosen.append(best)
    return chosen
Exemplo n.º 22
0
                #cc1 cc2 is totally new
                cc1, cc2 = toolbox.mateG(c1, c2)
                cxOut.append(cc1)
                cxOut.append(cc2)

        # mutation
        mutantOut = []
        for mutant in offspring:
            if random.random() < GMUTPB:
                mut = toolbox.mutateG(mutant, dest)
                if mut != None:
                    mutantOut.append(mut)   
        #compose a big group of population
        #bigPop = pop + mutantOut + cxOut
        bigPop = mutantOut + cxOut + lPop
        gPop = tools.selRandom(bigPop, GPOPU)

        """ L part !!!"""
        # select
        #offspring = toolbox.selectL(lPop, 2) 
        offspring = tools.selBest(lPop, LPOPU) 
        # deep copy out
        # offspring = list(map(toolbox.clone, offspring))
        # cross over
        cxOut = []
        for c1 in offspring:
            if random.random() < LCXPB:
                #cc1 cc2 is totally new
                #random select c2 from gPop
                c2 = tools.selRandom(gPop, 1)[0]
                cc1, cc2 = toolbox.mateL(c1, c2)
Exemplo n.º 23
0
def GAEEII_IVFm(data, dimensions, number_endmembers):
    start_time = time.time()

    number_rows = int(dimensions[0])
    number_columns = int(dimensions[1])
    number_bands = int(dimensions[2])

    number_pixels = number_rows * number_columns

    sigma_MAX = max(number_rows, number_generations)

    data_proj = numpy.asarray(affine_projection(data, number_endmembers))

    creator.create("max_fitness", base.Fitness, weights=(-1.0, -1.0))
    creator.create("individual", list, fitness=creator.max_fitness)

    toolbox = base.Toolbox()
    toolbox.register("create_individual", generate_individual, creator,
                     number_endmembers, number_pixels, number_rows,
                     number_columns)
    toolbox.register("initialize_population", tools.initRepeat, list,
                     toolbox.create_individual)
    toolbox.register("evaluate_individual",
                     multi_fitness,
                     data=data,
                     data_proj=data_proj,
                     number_endmembers=number_endmembers)

    toolbox.register("cross_twopoints", tools.cxTwoPoint)
    toolbox.register("selNSGA2", tools.selNSGA2)

    toolbox.register("gaussian_mutation_op",
                     tools.mutGaussian,
                     mu=0,
                     sigma=0,
                     indpb=mutation_probability)
    toolbox.register("gaussian_mutation",
                     gaussian_mutation,
                     toolbox=toolbox,
                     number_rows=number_rows,
                     number_columns=number_columns)
    toolbox.register("random_mutation",
                     random_mutation,
                     number_pixels=number_pixels,
                     number_endmembers=number_endmembers,
                     mutation_probability=mutation_probability)

    population = toolbox.initialize_population(n=population_size)

    population_fitnesses = [
        toolbox.evaluate_individual(individual) for individual in population
    ]

    for individual, fitness in zip(population, population_fitnesses):
        individual.fitness.values = fitness

    hof = tools.HallOfFame(3)
    hof.update(population)

    current_generation = 0
    current_sigma = sigma_MAX
    generations_fitness_1 = []
    generations_fitness_2 = []
    generations_population = []
    stop_criteria = deque(maxlen=5)
    stop_criteria.extend([1, 2, 3, 4, 5])

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean, axis=0)
    stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    record = stats.compile(population)
    logbook.record(gen=0, evals=len(population), **record)

    while current_generation < number_generations and numpy.var(
            numpy.array(stop_criteria)) > 0.000001:

        toolbox.unregister("gaussian_mutation_op")
        toolbox.register("gaussian_mutation_op",
                         tools.mutGaussian,
                         mu=0,
                         sigma=current_sigma,
                         indpb=mutation_probability)

        offspring = tools.selRandom(population, k=int(population_size / 2))
        offspring = list(map(toolbox.clone, offspring))

        # Crossing
        for child_1, child_2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < crossing_probability:
                toolbox.cross_twopoints(child_1, child_2)
                del child_1.fitness.values
                del child_2.fitness.values
        # Mutation
        for mutant in offspring:
            if random.random() < mutation_probability:
                toolbox.gaussian_mutation(mutant)
                del mutant.fitness.values

        # Fitness
        offspring_fitnesses = [
            toolbox.evaluate_individual(individual) for individual in offspring
        ]
        for individual, fitness in zip(offspring, offspring_fitnesses):
            individual.fitness.values = fitness

        # In Vitro Fertilization Module
        ivfoffspring = list(map(toolbox.clone, population))
        ivffits = [ind.fitness.values[0] for ind in ivfoffspring]
        fatheridx = numpy.argmax(ivffits)
        # fatherfit = numpy.max(ivffits)
        father = creator.individual(ivfoffspring[fatheridx].copy())

        for ind in ivfoffspring[::2]:
            toolbox.random_mutation(ind)
            del ind.fitness.values

        for child1 in ivfoffspring:
            child2 = creator.individual(father.copy())
            toolbox.cross_twopoints(child1, child2)
            del child1.fitness.values
            del child2.fitness.values

        ivffitnesses = [
            toolbox.evaluate_individual(ind) for ind in ivfoffspring
        ]
        for ind, fit in zip(ivfoffspring, ivffitnesses):
            ind.fitness.values = fit

        popmax = max(offspring_fitnesses)
        for ind in ivfoffspring:
            if (ind.fitness.values >= popmax):
                population.append(ind)

        new_population = population + offspring

        # Selection
        selected = toolbox.selNSGA2(new_population, population_size)
        selected = list(map(toolbox.clone, selected))

        population[:] = selected

        hof.update(population)

        record = stats.compile(population)
        logbook.record(gen=current_generation, evals=len(population), **record)
        # print(logbook.stream)

        # Statistics
        fits_1 = [ind.fitness.values[0] for ind in population]
        fits_2 = [ind.fitness.values[1] for ind in population]

        mean_1_offspring = sum(fits_1) / len(population)
        mean_2_offspring = sum(fits_2) / len(population)

        generations_fitness_1.append(numpy.log10(mean_1_offspring))
        generations_fitness_2.append(numpy.log(mean_2_offspring))

        stop_criteria.append(numpy.log10(mean_1_offspring))

        generations_population.append(population.copy())

        current_generation += 1
        current_sigma = sigma_MAX / ((current_generation + 1) / 1.5)

    best_individual = tools.selNSGA2(population, 1)[0]

    # print('Result:', multi_fitness(best_individual,data, data_proj, number_endmembers))

    M = data[:, best_individual]
    duration = time.time() - start_time

    return M, duration, [
        generations_fitness_1, generations_fitness_2, generations_population,
        current_generation
    ]
Exemplo n.º 24
0
def GAEEII(data, dimensions, number_endmembers):
    start_time = time.time()
    population_size = 10
    number_generations = 100
    crossing_probability = 1
    mutation_probability = 0.5
    stop_criteria_MAX = 20
    random.seed(64)

    number_endmembers = number_endmembers

    number_rows = int(dimensions[0])
    number_columns = int(dimensions[1])
    number_bands = int(dimensions[2])

    number_pixels = number_rows * number_columns

    sigma_MAX = max(number_rows, number_generations)

    data_proj = numpy.asarray(affine_tranform(data, number_endmembers))

    # data = numpy.asarray(data)
    # _coeff, score, _latent = princomp(data.T)
    # data_proj = numpy.squeeze(score[0:number_endmembers,:])

    creator.create("min_fitness", base.Fitness, weights=(1.0, ))
    creator.create("individual", list, fitness=creator.min_fitness)

    toolbox = base.Toolbox()
    toolbox.register("create_individual", generate_individual, creator,
                     number_endmembers, number_pixels, number_rows,
                     number_columns)
    toolbox.register("initialize_population", tools.initRepeat, list,
                     toolbox.create_individual)
    toolbox.register("evaluate_individual",
                     multi_fitness,
                     data=data,
                     data_proj=data_proj,
                     number_endmembers=number_endmembers)

    toolbox.register("cross_twopoints", tools.cxTwoPoint)
    toolbox.register("selNSGA2", tools.selNSGA2)

    toolbox.register("gaussian_mutation_op",
                     tools.mutGaussian,
                     mu=0,
                     sigma=0,
                     indpb=mutation_probability)
    toolbox.register("gaussian_mutation",
                     gaussian_mutation,
                     toolbox=toolbox,
                     number_rows=number_rows,
                     number_columns=number_columns)

    population = toolbox.initialize_population(n=population_size)

    ensemble_pop = []
    for i in range(0, 5):
        [_, duration, other] = classical.VCA(data, dimensions,
                                             number_endmembers)
        # print(other[0])
        # print('Time GAEE:',duration)
        ensemble_pop.append(creator.individual(other[0]))

    population[5:] = ensemble_pop

    population_fitnesses = [
        toolbox.evaluate_individual(individual) for individual in population
    ]

    for individual, fitness in zip(population, population_fitnesses):
        individual.fitness.values = fitness

    hof = tools.HallOfFame(3)
    hof.update(population)

    current_generation = 0
    current_sigma = sigma_MAX
    generations_fitness_1 = []
    generations_fitness_2 = []
    generations_population = []
    stop_criteria = deque(maxlen=stop_criteria_MAX)
    stop_criteria.extend(list(range(1, stop_criteria_MAX)))

    while current_generation < number_generations and numpy.var(
            numpy.array(stop_criteria)) > 0.000001:

        toolbox.unregister("gaussian_mutation_op")
        toolbox.register("gaussian_mutation_op",
                         tools.mutGaussian,
                         mu=0,
                         sigma=current_sigma,
                         indpb=mutation_probability)

        offspring = tools.selRandom(population, k=int(population_size / 2))
        offspring = list(map(toolbox.clone, offspring))

        # Crossing
        for child_1, child_2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < crossing_probability:
                toolbox.cross_twopoints(child_1, child_2)
                del child_1.fitness.values
                del child_2.fitness.values
        # Mutation
        for mutant in offspring:
            if random.random() < mutation_probability:
                toolbox.gaussian_mutation(mutant)
                del mutant.fitness.values

        # Fitness
        offspring_fitnesses = [
            toolbox.evaluate_individual(individual) for individual in offspring
        ]
        for individual, fitness in zip(offspring, offspring_fitnesses):
            individual.fitness.values = fitness

        new_population = population + offspring

        # Selection
        selected = toolbox.selNSGA2(new_population, population_size)
        selected = list(map(toolbox.clone, selected))

        population[:] = selected

        hof.update(population)

        # Statistics
        fits_1 = [ind.fitness.values[0] for ind in population]
        # fits_2 = [ind.fitness.values[1] for ind in population]

        mean_1_offspring = sum(fits_1) / len(population)
        # mean_2_offspring = sum(fits_2) / len(population)

        generations_fitness_1.append(numpy.log10(mean_1_offspring))
        # generations_fitness_2.append(numpy.log(mean_2_offspring))

        generations_population.append(population.copy())

        stop_criteria.append(numpy.log10(mean_1_offspring))

        current_generation += 1
        current_sigma = sigma_MAX / ((current_generation + 1) / 4)

    best_individual = tools.selBest(population, 1)[0]
    # best_individual = hof[0]

    # print('Result:', multi_fitness(best_individual,data, data_proj,number_endmembers))

    M = data[:, best_individual]

    duration = time.time() - start_time

    return M, duration, [
        generations_fitness_1, generations_fitness_2, generations_population,
        current_generation
    ]
Exemplo n.º 25
0
Arquivo: main.py Projeto: TpouHuK/vlf
    childs = []
    for i in range(DIES_EVERY_DAY - NEW_EVERY_DAY):
        a, b = tools.selTournament(survivors,
                                   2,
                                   BREED_TOURN_SIZE,
                                   fit_attr="fitness")
        child = breed_brains(a, b)
        child.fitness = None
        childs.append(child)

    for i in range(NEW_EVERY_DAY):
        new = vlf.neural_network.FFNeuralNetwork()
        new.fitness = None
        survivors.append(new)

    mutants = tools.selRandom(survivors, MUTATED_EVERY_DAY)
    for m in mutants:
        mutate_brain(m)
        m.fitness = None

    population = survivors + childs

    for p in population:
        if not p.fitness:
            p.fitness = get_fitness(p)

    f = [p.fitness for p in population]
    best = max(population, key=lambda x: x.fitness)
    print(best.get_flattened_array())
    show(best)
    print("GENERATION", g)
Exemplo n.º 26
0
def Tournament(individuals, finalselect, tournsize):
    chosen = []
    for i in range(finalselect):
        aspirants = tools.selRandom(individuals, tournsize)
        chosen.append(max(individuals, key=selectCriteria))
    return chosen
Exemplo n.º 27
0
def main(seed=None):
    random.seed(seed)

    NGEN = 250
    MU = 100
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selRandom(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = pop + offspring
        fronts = toolbox.sort(pop, len(pop))
        chosen = []
        for i, front in enumerate(fronts):
            # Move is front to chosen population til it is almost full
            if len(chosen) + len(front) <= MU:
                chosen.extend(front)
            else:
                # Assign hypervolume contribution to individuals of front that
                # cannot be completely move over to chosen individuals
                fitness_hv = hypervolume_contrib(front)
                for ind, fit_hv in zip(front, fitness_hv):
                    ind.fitness_hv.values = (fit_hv, )
                # Fill chosen with best indiviuals from inspect front
                # (based on hypervolume contribution)
                chosen.extend(toolbox.select(front, MU - len(chosen)))
                break

        pop = chosen

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    print("Final population hypervolume is %f" %
          hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
def main(epsilon, delta):
	global lambda_values
	global population_size
	global population
	global population_obj_func_vals
	global iterations

	outputs = []

	for lambda_index in range(1, number_of_lambdas+1):
		_lambda = float(lambda_index-1)/(number_of_lambdas-1)

		improved_solutions = []							#improved solutions for a particular lambda
		v_lambda = float('Inf')

		for i in range(1 , population_size+1):
			sample = [[0,0] for x in range(number_of_assets)]
			Q = random.sample([x for x in range(1, number_of_assets+1)],10)

			for asset in Q:
				sample[asset-1] = [1, myRandom(0,1)]
			
			population[i-1] = list(sample)

		for ind,S in enumerate(population):
			[population_obj_func_vals[ind], v_lambda, improved] = evaluate(S, _lambda, 0, v_lambda, False, improved_solutions, UPDATE_H)

		for itr in range(iterations):
			#binary tournament for selecting parents S_star and S_double_star
			chosen = []
			for i in xrange(2):
				aspirants = tools.selRandom(population, 40)
				for i,aspirant in enumerate(aspirants):
					h = []
					f_aspirant = 0
					v = float('Inf')
					imp = False
					[f_aspirant, v, imp] = evaluate(aspirant, _lambda, 0, v, False, h, DONT_UPDATE_H)
					aspirants[i] = [aspirant, f_aspirant]
					
				chosen.append(min(aspirants, key=lambda x: x[1])[0])
			
			S_star = list(chosen[0])
			S_double_star = list(chosen[1])
			
			#Uniform crossover to find child C
			C = []
			[C1, C2] = tools.cxUniform(S_star, S_double_star, ind_prob)
			f1 = f2 = 0
			v = float('Inf')
			imp = False
			h = []
			[f1, v, imp] = evaluate(C1, _lambda, 0, v, False, h, DONT_UPDATE_H)
			[f2, v, imp] = evaluate(C2, _lambda, 0, v, False, h, DONT_UPDATE_H)

			if f1<f2:
				C = C1
			else:
				C = C2

			#find assets in parents but not in child
			A_star = []
			for i in range(number_of_assets):
				if S_star[i][0]==1 and S_double_star[i][0]==0 and C[i][0]==0:
					A_star.append([i+1, S_star[i][1]])
				elif S_star[i][0]==0 and S_double_star[i][0]==1 and C[i][0]==0:
					A_star.append([i+1, S_double_star[i][1]])

			#Mutation
			mutation_index = random.randint(0, number_of_assets-1)
			while C[mutation_index][0]==0:
				mutation_index = random.randint(0, number_of_assets-1)
			m = random.randint(0,1)

			if m == 0:
				# C_i = 0.9*(epsilon + C[mutation_index][1]) - epsilon
				C_i = 0.9*(epsilon[mutation_index] + C[mutation_index][1]) - epsilon[mutation_index]
			else:
				# C_i = 1.1*(epsilon + C[mutation_index][1]) - epsilon
				C_i = 1.1*(epsilon[mutation_index] + C[mutation_index][1]) - epsilon[mutation_index]

			if C_i<0:
				C[mutation_index][0] = 0
				C[mutation_index][1] = 0

			#check if child contains more or less than K assets and fix it
			total_assets_in_child = 0
			child_copy = list(C)
			sorted_child_copy = sorted(child_copy, key=itemgetter(1))
			
			for item in sorted_child_copy:
				if item[0]==1:
					total_assets_in_child += 1
			if total_assets_in_child > K:
				for item in sorted_child_copy[:-K]:
					# C.index(item) = [0,0]
					C[C.index(item)] = [0,0]
			elif total_assets_in_child < K:
				while total_assets_in_child < K:
					if len(A_star) > 0:
						random_index = random.randint(0,len(A_star)-1)
						random_element = A_star.pop(random_index)
						C[random_element[0]-1] = [1, random_element[1]]
					else:
						random_index = random.randint(0,30)
						while C[random_index][0]==1:
							random_index = random.randint(0,30)
						C[random_index] = [1, 0]
					total_assets_in_child += 1
					
			#change the population by adding child to it
			obj_func_val_child = 0
			[obj_func_val_child, v_lambda, improved] = evaluate(C, _lambda, 0, v_lambda, False, improved_solutions, UPDATE_H)
			
			if improved:
				population[population_obj_func_vals.index(max(population_obj_func_vals))] = C
		
		print lambda_index

		outputs.append(evaluate(improved_solutions[-1], _lambda, 0, float('Inf'), False, None, FINAL_SAMPLE))

		H.append(improved_solutions)

	np_out_matrix = np.matrix(outputs)
	np_out_matrix = np_out_matrix.T
	outputs = np_out_matrix.tolist()

	f = open("out2.csv", "w")
	writer = csv.writer(f)
	for row in outputs:
		writer.writerow(tuple(row))
	f.close()
Exemplo n.º 29
0
def generate_aspirant_pairs(pop_size):
    pop_idx = list(range(pop_size))
    return [tools.selRandom(pop_idx, 2) for _ in range(pop_size)]
Exemplo n.º 30
0
def select(pop, mu, lambda_):
    # pop - 親集団インスタンス(creator.SRESindividualインスタンスのリスト)
    offspring = tools.selRandom(tools.selBest(pop, mu), lambda_)
    offspring.sort(cmp=lambda ind1, ind2: cmp(ind1.fitness.values[0], ind2.
                                              fitness.values[0]))
    return offspring
Exemplo n.º 31
0
def main():
    """Complete generational algorithm
    mapを使うとジェネレータが帰ってくる"""
    n=100
    pop = toolbox.population(n)
    CXPB, MUTPB, NGEN = 0.6, 0.3, 100
    elite = 0.16
    random_elite = 0.04

    #初期個体の評価
    fitnesses = toolbox.map(toolbox.evaluate, pop) #[(6782,),(2342,)...]になってる
    
    for ind, fit in zip(pop, fitnesses): #ここでfitnessesが遅延評価される
        #print ind, fit
        ind.fitness.values = fit

    #Select the next generation individuals
    for g in xrange(NGEN):
        
        #g世代の個体についてstatisticsで設定した値を記録
        logbook.add_dictionary(g)
        #すべての個体の適応度をリストにまとめる
        fits = [ind.fitness.values for ind in pop]

        # recordに渡す値は実行する関数と対応
        #関数の引数が複数の場合は(データ、引数)で渡す
        logbook.record(fits, (pop, 1))
        #上位16%の個体は残す
        bestoffspring = tools.selBest(pop, int(n*elite))
        top = tools.selBest(pop, 1)
        print top


        #下位84%の個体からランダムに4%の個体を選び、エリートに加える
        worstoffspring = tools.selWorst(pop, int(n-(n*elite)))
        random_ind = tools.selRandom(worstoffspring, int(n*random_elite))
        save_offspring = bestoffspring + random_ind

        #元のバージョンoffspring = [toolbox.clone(ind) for ind in pop]やめたほうがいい
        #averageがおかしくなる

        #エリート保存(20%)
        save_offspring = list(toolbox.map(toolbox.clone, save_offspring))

        #オペレータを適用する個体(80%)
        #ランダムバージョン
        operated_offspring = tools.selRandom(worstoffspring, 
                                             int(n - len(save_offspring)))

        all_offspring = list(toolbox.map(toolbox.clone, 
                                         save_offspring + operated_offspring))

        #print len(operated_offspring), len(save_offspring), len(all_offspring)
        
        #Apply crossover and mutation on the offspring
        # 偶数番目と奇数番目の個体を取り出して交差
        for child1, child2 in zip(operated_offspring[::2], 
                                  operated_offspring[1::2]):
            if random.random() < CXPB:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        for mutant in operated_offspring:
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        #Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in operated_offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        
        #評価されていない個体を評価する.
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit
        
        #評価値に従って個体を選択
        #The population is entirely replaced by the offspring
        #pop = toolbox.select(pop + offspring, len(offspring))
        pop[:] = save_offspring + operated_offspring
        

    return logbook
def main():

    MU, CXPB, MUTPB, NGEN = 200, 0.6, 0.1, 50
     
    pop_long = toolbox.population(n=MU)
    pop_short = toolbox.population(n=MU)
    
    hof_l = tools.HallOfFame(1)
    hof_s = tools.HallOfFame(1)
    
    
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    
    logbook = tools.Logbook()
    logbook.header = "gen", "type", "evals", "std", "min", "avg", "max"
    
    best_long = tools.selRandom(pop_long, 1)[0]
    best_short = tools.selRandom(pop_short, 1)[0]
    
    for ind in pop_long:
        ind.fitness.values = toolbox.evaluate(ind, best_short,  points=train)  
    
    for ind in pop_short:
        ind.fitness.values = toolbox.evaluate(best_long, ind, points=train)
    
    hof_l.update(pop_long)
    hof_s.update(pop_short)    
    
    
    record = stats.compile(pop_long)
    logbook.record(gen=0, type='long', evals=len(pop_long), **record)
    
    record = stats.compile(pop_short)
    logbook.record(gen=0, type='short', evals=len(pop_short), **record)
    
    print(logbook.stream)
      
    
    # Begin the evolution
    for g in range(1, NGEN):
        # select and clone the offspring
        off_long = toolbox.select(pop_long, MU)
        off_short = toolbox.select(pop_short, MU)
    
        off_long = [toolbox.clone(ind) for ind in off_long]        
        off_short = [toolbox.clone(ind) for ind in off_short]
    
    
        # Apply crossover and mutation
        for ind1, ind2 in zip(off_long[::2], off_long[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)
                del ind1.fitness.values
                del ind2.fitness.values
            elif random.random() <= MUTPB:
                toolbox.mutate(ind)
                del ind.fitness.values
    
        for ind1, ind2 in zip(off_short[::2], off_short[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)
                del ind1.fitness.values
                del ind2.fitness.values
            elif random.random() <= MUTPB:
                toolbox.mutate(ind)
                del ind.fitness.values
        
     
        # Evaluate the individuals     
        #long_representative = tools.selTournament(pop_long, 1, tournsize=3)[0]    
        #short_representative = tools.selTournament(pop_short, 1, tournsize=3)[0]    
        best_long = tools.selBest(pop_long+off_long, 1)[0]    
        best_short = tools.selBest(pop_short+off_short, 1)[0]    
     
          
        for ind in off_long:
            ind.fitness.values = toolbox.evaluate(ind, best_short, points=train)
        
        for ind in off_short:
            ind.fitness.values = toolbox.evaluate(ind, best_long, points=train)
                
        # Replace the old population by the offspring
        pop_long = toolbox.select(pop_long+off_long, MU)
        pop_short = toolbox.select(pop_short+off_short, MU)
        
        record = stats.compile(pop_long)
        logbook.record(gen=g, type='long', evals=len(pop_long), **record)
        
        record = stats.compile(pop_short)
        logbook.record(gen=g, type='short', evals=len(pop_short), **record)
        print(logbook.stream)
        
        hof_l.update(pop_long)
        hof_s.update(pop_short)    
    
                
    print("Best Long individual is %s, %s" % (best_long, best_long.fitness.values))
    print("Best Short individual is %s, %s" % (best_short, best_short.fitness.values))

    return pop_long, pop_short, best_long, best_short, hof_l, hof_s, logbook
Exemplo n.º 33
0
Arquivo: mo_rhv.py Projeto: DEAP/deap
def main(seed=None):
    random.seed(seed)

    NGEN = 250
    MU = 100
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring = tools.selRandom(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = pop + offspring
        fronts = toolbox.sort(pop, len(pop))
        chosen = []
        for i, front in enumerate(fronts):
            # Move is front to chosen population til it is almost full
            if len(chosen) + len(front) <= MU:
                chosen.extend(front)
            else:
                # Assign hypervolume contribution to individuals of front that
                # cannot be completely move over to chosen individuals
                fitness_hv = hypervolume_contrib(front)
                for ind, fit_hv in zip(front, fitness_hv):
                    ind.fitness_hv.values = (fit_hv,)
                # Fill chosen with best indiviuals from inspect front
                # (based on hypervolume contribution)
                chosen.extend(toolbox.select(front, MU - len(chosen)))
                break

        pop = chosen

        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        print(logbook.stream)

    print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0]))

    return pop, logbook
Exemplo n.º 34
0
def main():
    pop_ga = toolbox_ga.population(n=200)
    pop_gp = toolbox_gp.population(n=200)
    
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)
    
    logbook = tools.Logbook()
    logbook.header = "gen", "type", "evals", "std", "min", "avg", "max"
    
    best_ga = tools.selRandom(pop_ga, 1)[0]
    best_gp = tools.selRandom(pop_gp, 1)[0]
    
    for ind in pop_gp:
        ind.fitness.values = toolbox_gp.evaluate(ind, points=best_ga)  
    
    for ind in pop_ga:
        ind.fitness.values = toolbox_gp.evaluate(best_gp, points=ind)
    
    record = stats.compile(pop_ga)
    logbook.record(gen=0, type='ga', evals=len(pop_ga), **record)
    
    record = stats.compile(pop_gp)
    logbook.record(gen=0, type='gp', evals=len(pop_gp), **record)
    
    print(logbook.stream)
    
    CXPB, MUTPB, NGEN = 0.5, 0.2, 50
    
    # Begin the evolution
    for g in range(1, NGEN):
        # Select and clone the offspring
        off_ga = toolbox_ga.select(pop_ga, len(pop_ga))
        off_gp = toolbox_gp.select(pop_gp, len(pop_gp))
        off_ga = [toolbox_ga.clone(ind) for ind in off_ga]        
        off_gp = [toolbox_gp.clone(ind) for ind in off_gp]
    
    
        # Apply crossover and mutation
        for ind1, ind2 in zip(off_ga[::2], off_ga[1::2]):
            if random.random() < CXPB:
                toolbox_ga.mate(ind1, ind2)
                del ind1.fitness.values
                del ind2.fitness.values
    
        for ind1, ind2 in zip(off_gp[::2], off_gp[1::2]):
            if random.random() < CXPB:
                toolbox_gp.mate(ind1, ind2)
                del ind1.fitness.values
                del ind2.fitness.values
    
        for ind in off_ga:
            if random.random() < MUTPB:
                toolbox_ga.mutate(ind)
                del ind.fitness.values
    
        for ind in off_gp:
            if random.random() < MUTPB:
                toolbox_gp.mutate(ind)
                del ind.fitness.values
    
        # Evaluate the individuals with an invalid fitness
        for ind in off_ga:
            ind.fitness.values = toolbox_gp.evaluate(best_gp, points=ind)
        
        for ind in off_gp:
            ind.fitness.values = toolbox_gp.evaluate(ind, points=best_ga)
                
        # Replace the old population by the offspring
        pop_ga = off_ga
        pop_gp = off_gp
        
        record = stats.compile(pop_ga)
        logbook.record(gen=g, type='ga', evals=len(pop_ga), **record)
        
        record = stats.compile(pop_gp)
        logbook.record(gen=g, type='gp', evals=len(pop_gp), **record)
        print(logbook.stream)
        
        
        best_ga = tools.selBest(pop_ga, 1)[0]
        best_gp = tools.selBest(pop_gp, 1)[0]
    

    print("Best individual GA is %s, %s" % (best_ga, best_ga.fitness.values))
    print("Best individual GP is %s, %s" % (best_gp, best_gp.fitness.values))

    return pop_ga, pop_gp, best_ga, best_gp, logbook
Exemplo n.º 35
0
        def optimize(self, pop):
            mstats, log = support.statistics(), support.logbook(
            )  # DEAP statistics and logbook
            front = tools.ParetoFront()  # Initialize ParetoFront class
            self.evals = len(pop)
            gds = []  # Initialize generational distance list
            gen = totalEvals = gd = 0
            while True:
                # Step 3: Update global Pareto front
                prevFront = front.items[:]
                front.update(pop)

                # Record statistics
                record = mstats.compile(front)
                log.record(gen=gen, evals=self.evals, gd=gd, **record)
                print('\r', end='')
                print(log.stream)

                totalEvals += self.evals
                self.evals = 0

                # Step 4: Local search
                for i, c in enumerate(pop):
                    # Fill local archive with solutions from Pareto front that do not dominate c
                    archive = [
                        ind for ind in front if
                        not ind.fitness.dominates(c.fitness) and ind is not c
                    ]
                    archive.append(c)  # Copy candidate into H
                    pop[i] = self.paes(
                        c, archive, front
                    )  # Replace c with improved version by local search

                # Step 5: Recombination
                popInter = []  # Initialize intermediate population
                while len(popInter) < self.sizePop:
                    r = 0
                    while True:
                        # Randomly choose two parents from P + G
                        mom, dad = tools.selRandom(pop + front.items, 2)
                        mom2, dad2 = self.tb.clone(mom), self.tb.clone(dad)

                        # Recombine to form offspring, evaluate
                        child, _ = self.tb.mate(mom2, dad2)
                        child.fitness.values = self.evaluator.evaluate(child)
                        self.evals += 1
                        childInMoreCrowdedArea = childDominated = False
                        for ind in front:
                            if ind.fitness.dominates(child.fitness):
                                childDominated = True
                                break

                        if not childDominated:
                            crowding_distance(pop + front.items + [child])
                            # Check if c is in more crowded grid location than both parents
                            if child.fitness.crowding_dist < mom.fitness.crowding_dist and\
                                    child.fitness.crowding_dist < dad.fitness.crowding_dist:
                                childInMoreCrowdedArea = True

                            # Update pareto front with c as necessary
                            front.update([child])
                        r += 1
                        if not ((childDominated or childInMoreCrowdedArea)
                                and r < self.cr_trials):
                            break

                    childAsList = tools.selTournament(
                        front.items, k=1,
                        tournsize=2) if childDominated else [child]

                    popInter.extend(childAsList)

                # Step 4: Termination
                gd = self.termination(prevFront, front, gen, gds, totalEvals)
                if not isinstance(gd, float):
                    return front, log, totalEvals

                pop = popInter
                gen += 1
Exemplo n.º 36
0
def selBestOrRandom(individuals, k):
    chosen = []
    if random.random() <= 0.1:
        return tools.selRandom(individuals, k)
    else:
        return tools.selBest(individuals, k)
Exemplo n.º 37
0
def varDE(population,
          toolbox,
          cxpb=1.0,
          mutpb=1.0,
          jitter=0,
          low=None,
          up=None):
    """Part of an evolutionary algorithm applying the variation part
    of the differential evolution algorithm. The modified individuals have their
    fitness invalidated. The individuals are cloned so returned population is
    independent of the input population.

    :param population: A list of individuals to vary.
    :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution
                    operators.
    :param cxpb: The crossover probability CR: probability of a 'base' parameter
                 value being replaced with a differentially evolved parameter
                 value
    :param mutpb: The mutation rate F: a scaling of the differential vector that
                  gets added to the base parameter
    :param low: The low bound of each parameter
    :param up: The upper bound of each parameter
    :returns: A list of varied individuals that are independent of their
              parents.

    The variation goes as follows. For each member of the population, we will 
    generate a new offspring. For each offspring, we select 3 random members of 
    the parent population: a base, and 2 parents for the differential. Then we 
    select a random parameter 'index' that we will always change. Then, for each 
    parameter, if a random number [0:1] is < Cx, or if the parameter is 'index', 
    we change the parameter from base by adding the differential of that 
    parameter in each of the second two parents (multiplied by rate factor F),
    (jittered by +/- jitter/2).
    That's it! 
    """

    offspring = [toolbox.clone(ind) for ind in population]

    # Differentially evolve each base offspring:
    for individual in offspring:
        # Keep mutating until all parameters are within all boundaries:
        inside = False
        while inside == False:
            base, a, b = tools.selRandom(population, 3)
            while (base == a or base == b or a == b):
                base, a, b = tools.selRandom(population, 3)
            index = random.randrange(len(individual))
            for j, parameter in enumerate(individual):
                if j == index or random.random() < cxpb:
                    diff = (a[j] - b[j])
                    individual[j] = base[j] + (mutpb * diff) + ((
                        (random.random() * jitter) - (jitter / 2)) * diff)
                    # perform 'bounce' away from boundaries to maintain parameter diversity
                    if individual[j] < low[j]:
                        individual[j] = low[j] + (low[j] - individual[j])
                    elif individual[j] > up[j]:
                        individual[j] = up[j] - (individual[j] - up[j])
            # Check boundary
            inside = True
            for j, parameter in enumerate(individual):
                if individual[j] < low[j]:
                    inside = False
                elif individual[j] > up[j]:
                    inside = False
        del individual.fitness.values

    return offspring
Exemplo n.º 38
0
HoF = tools.HallOfFame(36)
HoF.update(pop)

best = tools.selBest(pop, 1)
print('Best seeded outcome:')
print(best[0].fitness.values)

for i in range(N_GEN):
    print('Start Gen: ', str(i), '...')
    # Define new population
    pop_new = toolbox.population(n=0)

    winners = tools.selBest(pop, int(POP_SIZE *
                                     0.25))  # Keep 25% best in population
    rand = tools.selRandom(pop, int(POP_SIZE *
                                    0.5))  # randomly select 50% to crossover
    new = toolbox.population(n=int(
        POP_SIZE * 0.25))  # introduce 25% of new randomness into population

    #     print(len(winners))
    #     print(len(rand))
    #     print(len(new))

    # SELECT WINNERS
    winners = toolbox.clone(winners)
    for w in winners:
        del w.fitness.values
        mutant = toolbox.clone(w)
        mutant = tools.mutGaussian(mutant, mu=0.0, sigma=0.1, indpb=0.2)
        pop_new.append(mutant[0])
Exemplo n.º 39
0
    def optim(self):
        """

        """

        ##Create population of species to work with
        self.createPopulations();

        #select representatives
        repr = dict();
        repr['hidden_nodes'] = tools.selRandom(self.pop['node_pop'],
                                                                 self.params['numHiddenNodes']);
        repr['out_nodes'] = tools.selRandom(self.pop['node_pop'],
                                                  self.params['numOutputNodes']);
        #select random connections and weights
        repr['model'] = tools.selRandom(self.pop['model_pop'], 1);
        repr['connActive_IH'] = tools.selRandom(self.pop['connActive_IH_pop'], 1);
        repr['connActive_HH'] = tools.selRandom(self.pop['connActive_HH_pop'], 1);
        repr['connActive_HO'] = tools.selRandom(self.pop['connActive_HO_pop'], 1);
        repr['connWeights_IH'] = tools.selRandom(self.pop['connWeights_IH_pop'], 1);
        repr['connWeights_HH'] = tools.selRandom(self.pop['connWeights_HH_pop'], 1);
        repr['connWeights_HO'] = tools.selRandom(self.pop['connWeights_HO_pop'], 1);
        # print "Representatives", repr;

        #Co-op Coevolution
        g = 0;
        for g in  xrange(self.params['NGEN']):

            #Go through components population
            for comp_key  in self.pop.keys():

                next_repr = self.tbox.clone(repr);

                if comp_key == 'node_pop':
                    #Output units
                    for ind in self.pop['node_pop']:
                        #clone representative
                        components = self.tbox.clone(repr);
                        #evaluate the components population
                        components['out_nodes'] = [ind];
                        #assign fitness
                        ind.fitness.values = self.evaluate(components),-1;
                        print "n", ind.fitness.values[0];
                        del components;

                    self.pop['node_pop'] = tools.selTournament(self.pop['node_pop'], len(self.pop['node_pop']), 3);
                    next_repr['out_nodes'] = tools.selBest(self.pop['node_pop'],1);

                    # #clone
                    # components = self.tbox.clone(repr);
                    # #Hidden units
                    # for nodes in self.pop['node_pop']:
                    #     #get representatives
                    #     node_repr = self.tbox.clone(repr['hidden_nodes']);
                    #     for i, n in enumerate(nodes):
                    #         r1 = (node_repr[:i]);
                    #         r2 = (node_repr[i+1:]);
                    #         r1.extend(r2);
                    #         print "Representative:", r1;
                    #         #evaluate the components population
                    #         components['hidden_nodes'] = r1.extend([n]);
                    #         #assign fitness
                    #         ind.fitness.values = self.evaluate(components),-1;
                    #         # print "fitness:", ind.fitness.values[0];


                if comp_key == 'connActive_IH_pop':

                    #CONN IH
                    for ind in self.pop['connActive_IH_pop']:
                        #clone representative
                        components = self.tbox.clone(repr);
                        #evaluate the components population
                        components['connActive_IH'] = ind;
                        #assign fitness
                        ind.fitness.values = self.evaluate(components),-1;
                        print "fitness:", ind.fitness.values[0];
                        del components;
                self.pop['connActive_IH_pop'] = tools.selTournament(self.pop['connActive_IH_pop'],
                                                                    len(self.pop['connActive_IH_pop']), 3);
                next_repr['connActive_IH'] = tools.selBest(self.pop['connActive_IH_pop'],1);

                if comp_key == 'connActive_HH_pop':

                    #CONN IH
                    for ind in self.pop['connWeights_HH_pop']:
                        #clone representative
                        components = self.tbox.clone(repr);
                        #evaluate the components population
                        components['connActive_HH'] = ind;
                        #assign fitness
                        ind.fitness.values = self.evaluate(components),-1;
                        print "fitness:", ind.fitness.values[0];
                        del components;

                self.pop['connActive_HH_pop'] = tools.selTournament(self.pop['connActive_HH_pop'],
                                                                    len(self.pop['connActive_HH_pop']), 3);
                next_repr['connActive_HH'] = tools.selBest(self.pop['connActive_HH_pop'],1);


                if comp_key == 'connActive_HO_pop':

                    #CONN IH
                    for ind in self.pop['connWeights_HO_pop']:
                        #clone representative
                        components = self.tbox.clone(repr);
                        #evaluate the components population
                        components['connActive_HO'] = ind;
                        #assign fitness
                        ind.fitness.values = self.evaluate(components),-1;
                        print "fitness:", ind.fitness.values[0];

                        del components;

                self.pop['connActive_HO_pop'] = tools.selTournament(self.pop['connActive_HO_pop'],
                                                                    len(self.pop['connActive_HO_pop']), 3);
                next_repr['connActive_HO'] = tools.selBest(self.pop['connActive_HO_pop'],1);


                if comp_key == 'connWeights_IH_pop':

                    #CONN IH
                    for ind in self.pop['connWeights_IH_pop']:
                        #clone representative
                        components = self.tbox.clone(repr);
                        #evaluate the components population
                        components['connWeights_IH'] = ind;
                        #assign fitness
                        ind.fitness.values = self.evaluate(components),-1;
                        print "fitness:", ind.fitness.values[0];

                        del components;

                self.pop['connWeights_IH_pop'] = tools.selTournament(self.pop['connWeights_IH_pop'],
                                                                    len(self.pop['connWeights_IH_pop']), 3);
                next_repr['connWeights_IH'] = tools.selBest(self.pop['connWeights_IH_pop'],1);


                if comp_key == 'connWeights_HH_pop':
                    #clone representative
                    components = self.tbox.clone(repr);
                    #CONN IH
                    for ind in self.pop['connWeights_HH_pop']:
                        #evaluate the components population
                        components['connWeights_HH'] = ind;
                        #assign fitness
                        ind.fitness.values = self.evaluate(components),-1;
                        print "fitness:", ind.fitness.values[0];

                self.pop['connWeights_HH_pop'] = tools.selTournament(self.pop['connWeights_HH_pop'],
                                                                    len(self.pop['connWeights_HH_pop']), 3);
                next_repr['connWeights_HH'] = tools.selBest(self.pop['connWeights_HH_pop'],1);

                if comp_key == 'connWeights_HO_pop':
                    #clone representative
                    components = self.tbox.clone(repr);
                    #CONN IH
                    for ind in self.pop['connWeights_HO_pop']:
                        #evaluate the components population
                        components['connWeights_HO'] = ind;
                        #assign fitness
                        ind.fitness.values = self.evaluate(components),-1;
                        print "fitness:", ind.fitness.values[0];

                self.pop['connWeights_HO_pop'] = tools.selTournament(self.pop['connWeights_HO_pop'],
                                                                    len(self.pop['connWeights_HO_pop']), 3);
                next_repr['connWeights_HO'] = tools.selBest(self.pop['connWeights_HO_pop'],1);


            repr = next_repr;

##### TEST ########
# p = pyNDMOptim();
# p.optim();
# sys.exit();

#TODO: Coevolution of neural computation paths
#TODO:
Exemplo n.º 40
0
                #cc1 cc2 is totally new
                cc1, cc2 = toolbox.mateG(c1, c2)
                cxOut.append(cc1)
                cxOut.append(cc2)

        # mutation
        mutantOut = []
        for mutant in offspring:
            if random.random() < GMUTPB:
                mut = toolbox.mutateG(mutant, dest)
                if mut != None:
                    mutantOut.append(mut)
        #compose a big group of population
        #bigPop = pop + mutantOut + cxOut
        bigPop = mutantOut + cxOut + lPop
        gPop = tools.selRandom(bigPop, GPOPU)
        """ L part !!!"""
        # select
        #offspring = toolbox.selectL(lPop, 2)
        offspring = tools.selBest(lPop, LPOPU)
        # deep copy out
        # offspring = list(map(toolbox.clone, offspring))
        # cross over
        cxOut = []
        for c1 in offspring:
            if random.random() < LCXPB:
                #cc1 cc2 is totally new
                #random select c2 from gPop
                c2 = tools.selRandom(gPop, 1)[0]
                cc1, cc2 = toolbox.mateL(c1, c2)
                cxOut.append(cc1)
Exemplo n.º 41
0
def run_pso(instance_name,
            particle_size,
            pop_size,
            max_iteration,
            cognitive_coef,
            social_coef,
            s_limit=3,
            plot=False,
            save=False,
            logs=False):

    instance = load_problem_instance(instance_name)

    if instance is None:
        return

    if plot:
        plot_instance(instance_name=instance_name,
                      customer_number=particle_size)

    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Particle",
                   list,
                   fitness=creator.FitnessMax,
                   speed=list,
                   smin=None,
                   smax=None,
                   best=None)

    toolbox = base.Toolbox()
    toolbox.register("particle",
                     generate_particle,
                     size=particle_size,
                     val_min=1,
                     val_max=particle_size,
                     s_min=-s_limit,
                     s_max=s_limit)
    toolbox.register("population", tools.initRepeat, list, toolbox.particle)
    toolbox.register("update",
                     update_particle,
                     phi1=cognitive_coef,
                     phi2=social_coef)
    toolbox.register('evaluate', calculate_fitness, data=instance)

    pop = toolbox.population(n=pop_size)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    logbook = tools.Logbook()
    logbook.header = ["gen", "evals"] + stats.fields

    best = None
    iter_num = 0
    previous_best = 0

    print('### EVOLUTION START ###')
    start = time.time()

    for g in range(max_iteration):

        fit_count = 0
        for part in pop:
            part.fitness.values = toolbox.evaluate(part)
            if part.fitness.values[0] > previous_best:
                previous_best = part.fitness.values[0]
                iter_num = g + 1
            elif part.fitness.values[0] == previous_best:
                fit_count += 1

        if fit_count > int(numpy.ceil(pop_size * 0.15)):
            rand_pop = toolbox.population(n=pop_size)
            for part in rand_pop:
                part.fitness.values = toolbox.evaluate(part)
            some_inds = tools.selRandom(
                rand_pop, int(numpy.ceil(pop_size * 0.1)))  # random pop here
            mod_pop = tools.selWorst(pop, int(numpy.ceil(pop_size * 0.9)))
        else:
            some_inds = tools.selBest(pop, int(numpy.ceil(
                pop_size * 0.05)))  # elite pop here
            mod_pop = tools.selRandom(pop, int(numpy.ceil(pop_size * 0.95)))

        mod_pop = list(map(toolbox.clone, mod_pop))

        for part in mod_pop:
            if not part.best or part.best.fitness < part.fitness:
                part.best = creator.Particle(part)
                part.best.fitness.values = part.fitness.values
            if not best or best.fitness < part.fitness:
                best = creator.Particle(part)
                best.fitness.values = part.fitness.values

        for part in mod_pop:
            toolbox.update(part, best)

        mod_pop.extend(some_inds)
        pop[:] = mod_pop

        # Gather all the stats in one list and print them
        logbook.record(gen=g + 1, evals=len(pop), **stats.compile(pop))
        print(logbook.stream)

    end = time.time()
    print('### EVOLUTION END ###')
    best_ind = tools.selBest(pop, 1)[0]
    print(f'Best individual: {best_ind}')
    route = create_route_from_ind(best_ind, instance)
    print_route(route)
    print(f'Fitness: { round(best_ind.fitness.values[0],2) }')
    print(f'Total cost: { round(calculate_fitness(best_ind, instance)[1],2) }')
    print(f'Found in (iteration): { iter_num }')
    print(f'Execution time (s): { round(end - start,2) }')
    # print(f'{round(best_ind.fitness.values[0], 2)} & {round(calculate_fitness(best_ind, instance)[1],2)} & {iter_num} & {round(end - start, 2)}')

    if plot:
        plot_route(route=route, instance_name=instance_name)

    return route
Exemplo n.º 42
0
def main(epsilon, delta):
	global lambda_values
	global population_size
	global population
	global population_obj_func_vals
	global iterations

	outputs = []

	for lambda_index in range(1, number_of_lambdas+1):
		_lambda = float(lambda_index-1)/(number_of_lambdas-1)

		improved_solutions = []							#improved solutions for a particular lambda
		v_lambda = float('Inf')

		for i in range(1 , population_size+1):
			sample = [[0,0] for x in range(number_of_assets)]
			Q = random.sample([x for x in range(1, number_of_assets+1)],10)

			for asset in Q:
				sample[asset-1] = [1, myRandom(0,1)]
			
			population[i-1] = list(sample)

		for ind,S in enumerate(population):
			[population_obj_func_vals[ind], v_lambda, improved] = evaluate(S, _lambda, 0, v_lambda, False, improved_solutions, UPDATE_H)

		for itr in range(iterations):
			#binary tournament for selecting parents S_star and S_double_star
			chosen = []
			for i in xrange(2):
				aspirants = tools.selRandom(population, 40)
				for i,aspirant in enumerate(aspirants):
					h = []
					f_aspirant = 0
					v = float('Inf')
					imp = False
					[f_aspirant, v, imp] = evaluate(aspirant, _lambda, 0, v, False, h, DONT_UPDATE_H)
					aspirants[i] = [aspirant, f_aspirant]
					
				chosen.append(min(aspirants, key=lambda x: x[1])[0])
			
			S_star = list(chosen[0])
			S_double_star = list(chosen[1])
			
			#Uniform crossover to find child C
			C = []
			[C1, C2] = tools.cxUniform(S_star, S_double_star, ind_prob)
			f1 = f2 = 0
			v = float('Inf')
			imp = False
			h = []
			[f1, v, imp] = evaluate(C1, _lambda, 0, v, False, h, DONT_UPDATE_H)
			[f2, v, imp] = evaluate(C2, _lambda, 0, v, False, h, DONT_UPDATE_H)

			if f1<f2:
				C = C1
			else:
				C = C2

			#find assets in parents but not in child
			A_star = []
			for i in range(number_of_assets):
				if S_star[i][0]==1 and S_double_star[i][0]==0 and C[i][0]==0:
					A_star.append([i+1, S_star[i][1]])
				elif S_star[i][0]==0 and S_double_star[i][0]==1 and C[i][0]==0:
					A_star.append([i+1, S_double_star[i][1]])

			#Mutation
			mutation_index = random.randint(0, number_of_assets-1)
			while C[mutation_index][0]==0:
				mutation_index = random.randint(0, number_of_assets-1)
			m = random.randint(0,1)

			if m == 0:
				# C_i = 0.9*(epsilon + C[mutation_index][1]) - epsilon
				C_i = 0.9*(epsilon[mutation_index] + C[mutation_index][1]) - epsilon[mutation_index]
			else:
				# C_i = 1.1*(epsilon + C[mutation_index][1]) - epsilon
				C_i = 1.1*(epsilon[mutation_index] + C[mutation_index][1]) - epsilon[mutation_index]

			if C_i<0:
				C[mutation_index][0] = 0
				C[mutation_index][1] = 0

			#check if child contains more or less than K assets and fix it
			total_assets_in_child = 0
			child_copy = list(C)
			sorted_child_copy = sorted(child_copy, key=itemgetter(1))
			
			for item in sorted_child_copy:
				if item[0]==1:
					total_assets_in_child += 1
			if total_assets_in_child > K:
				for item in sorted_child_copy[:-K]:
					# C.index(item) = [0,0]
					C[C.index(item)] = [0,0]
			elif total_assets_in_child < K:
				while total_assets_in_child < K:
					if len(A_star) > 0:
						random_index = random.randint(0,len(A_star)-1)
						random_element = A_star.pop(random_index)
						C[random_element[0]-1] = [1, random_element[1]]
					else:
						random_index = random.randint(0,30)
						while C[random_index][0]==1:
							random_index = random.randint(0,30)
						C[random_index] = [1, 0]
					total_assets_in_child += 1
					
			#change the population by adding child to it
			obj_func_val_child = 0
			[obj_func_val_child, v_lambda, improved] = evaluate(C, _lambda, 0, v_lambda, False, improved_solutions, UPDATE_H)
			
			if improved:
				population[population_obj_func_vals.index(max(population_obj_func_vals))] = C
		
		print lambda_index

		outputs.append(evaluate(improved_solutions[-1], _lambda, 0, float('Inf'), False, None, FINAL_SAMPLE))

		H.append(improved_solutions)

	np_out_matrix = np.matrix(outputs)
	np_out_matrix = np_out_matrix.T
	outputs = np_out_matrix.tolist()

	f = open("out5.csv", "w")
	writer = csv.writer(f)
	for row in outputs:
		writer.writerow(tuple(row))
	f.close()
Exemplo n.º 43
0
 def random_subset(self):
     #return random.sample(self.population, self.subset_size)
     return tools.selRandom(self.population, self.subset_size)
Exemplo n.º 44
0
def main():
    pop_ga = toolbox_ga.population(n=200)
    pop_gp = tools_gp.population(n=200)
    
    stats_ga = tools.Statistics(lambda ind: ind.fitness.values)
    stats_ga.register("avg", tools.mean)
    stats_ga.register("std", tools.std)
    stats_ga.register("min", min)
    stats_ga.register("max", max)
    
    stats_gp = tools.Statistics(lambda ind: ind.fitness.values)
    stats_gp.register("avg", tools.mean)
    stats_gp.register("std", tools.std)
    stats_gp.register("min", min)
    stats_gp.register("max", max)
    
    column_names = ["gen", "evals"]
    column_names.extend(stats_ga.functions.keys())
    logger = tools.EvolutionLogger(column_names)
    logger.logHeader()
    
    best_ga = tools.selRandom(pop_ga, 1)[0]
    best_gp = tools.selRandom(pop_gp, 1)[0]
    
    for ind in pop_gp:
        ind.fitness.values = evalSymbReg(ind, best_ga)  
    
    for ind in pop_ga:
        ind.fitness.values = evalSymbReg(best_gp, ind)
    
    stats_ga.update(pop_ga)
    stats_gp.update(pop_gp)
    
    logger.logGeneration(gen="0 (ga)", evals=len(pop_ga), stats=stats_ga)
    logger.logGeneration(gen="0 (gp)", evals=len(pop_gp), stats=stats_gp)
    
    CXPB, MUTPB, NGEN = 0.5, 0.2, 50
    
    # Begin the evolution
    for g in range(1, NGEN):
        # Select and clone the offspring
        off_ga = toolbox_ga.select(pop_ga, len(pop_ga))
        off_gp = tools_gp.select(pop_gp, len(pop_gp))
        off_ga = [toolbox_ga.clone(ind) for ind in off_ga]        
        off_gp = [tools_gp.clone(ind) for ind in off_gp]
    
    
        # Apply crossover and mutation
        for ind1, ind2 in zip(off_ga[::2], off_ga[1::2]):
            if random.random() < CXPB:
                toolbox_ga.mate(ind1, ind2)
                del ind1.fitness.values
                del ind2.fitness.values
    
        for ind1, ind2 in zip(off_gp[::2], off_gp[1::2]):
            if random.random() < CXPB:
                tools_gp.mate(ind1, ind2)
                del ind1.fitness.values
                del ind2.fitness.values
    
        for ind in off_ga:
            if random.random() < MUTPB:
                toolbox_ga.mutate(ind)
                del ind.fitness.values
    
        for ind in off_gp:
            if random.random() < MUTPB:
                tools_gp.mutate(ind)
                del ind.fitness.values
    
        # Evaluate the individuals with an invalid fitness
        for ind in off_ga:
            ind.fitness.values = evalSymbReg(best_gp, ind)
        
        for ind in off_gp:
            ind.fitness.values = evalSymbReg(ind, best_ga)
                
        # Replace the old population by the offspring
        pop_ga = off_ga
        pop_gp = off_gp
        
        stats_ga.update(pop_ga)
        stats_gp.update(pop_gp)
        
        best_ga = tools.selBest(pop_ga, 1)[0]
        best_gp = tools.selBest(pop_gp, 1)[0]    
    
        logger.logGeneration(gen="%d (ga)" % g, evals=len(off_ga), stats=stats_ga)
        logger.logGeneration(gen="%d (gp)" % g, evals=len(off_gp), stats=stats_gp)

    print("Best individual GA is %s, %s" % (best_ga, best_ga.fitness.values))
    print("Best individual GP is %s, %s" % (gp.stringify(best_gp), best_gp.fitness.values))

    return pop_ga, pop_gp, stats_ga, stats_gp, best_ga, best_gp
Exemplo n.º 45
0
def main():
    pop_ga = toolbox_ga.population(n=200)
    pop_gp = toolbox_gp.population(n=200)

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    logbook = tools.Logbook()
    logbook.header = "gen", "type", "evals", "std", "min", "avg", "max"

    best_ga = tools.selRandom(pop_ga, 1)[0]
    best_gp = tools.selRandom(pop_gp, 1)[0]

    for ind in pop_gp:
        ind.fitness.values = toolbox_gp.evaluate(ind, points=best_ga)

    for ind in pop_ga:
        ind.fitness.values = toolbox_gp.evaluate(best_gp, points=ind)

    record = stats.compile(pop_ga)
    logbook.record(gen=0, type='ga', evals=len(pop_ga), **record)

    record = stats.compile(pop_gp)
    logbook.record(gen=0, type='gp', evals=len(pop_gp), **record)

    print(logbook.stream)

    CXPB, MUTPB, NGEN = 0.5, 0.2, 50

    # Begin the evolution
    for g in range(1, NGEN):
        # Select and clone the offspring
        off_ga = toolbox_ga.select(pop_ga, len(pop_ga))
        off_gp = toolbox_gp.select(pop_gp, len(pop_gp))
        off_ga = [toolbox_ga.clone(ind) for ind in off_ga]
        off_gp = [toolbox_gp.clone(ind) for ind in off_gp]

        # Apply crossover and mutation
        for ind1, ind2 in zip(off_ga[::2], off_ga[1::2]):
            if random.random() < CXPB:
                toolbox_ga.mate(ind1, ind2)
                del ind1.fitness.values
                del ind2.fitness.values

        for ind1, ind2 in zip(off_gp[::2], off_gp[1::2]):
            if random.random() < CXPB:
                toolbox_gp.mate(ind1, ind2)
                del ind1.fitness.values
                del ind2.fitness.values

        for ind in off_ga:
            if random.random() < MUTPB:
                toolbox_ga.mutate(ind)
                del ind.fitness.values

        for ind in off_gp:
            if random.random() < MUTPB:
                toolbox_gp.mutate(ind)
                del ind.fitness.values

        # Evaluate the individuals with an invalid fitness
        for ind in off_ga:
            ind.fitness.values = toolbox_gp.evaluate(best_gp, points=ind)

        for ind in off_gp:
            ind.fitness.values = toolbox_gp.evaluate(ind, points=best_ga)

        # Replace the old population by the offspring
        pop_ga = off_ga
        pop_gp = off_gp

        record = stats.compile(pop_ga)
        logbook.record(gen=g, type='ga', evals=len(pop_ga), **record)

        record = stats.compile(pop_gp)
        logbook.record(gen=g, type='gp', evals=len(pop_gp), **record)
        print(logbook.stream)

        best_ga = tools.selBest(pop_ga, 1)[0]
        best_gp = tools.selBest(pop_gp, 1)[0]

    print("Best individual GA is %s, %s" % (best_ga, best_ga.fitness.values))
    print("Best individual GP is %s, %s" % (best_gp, best_gp.fitness.values))

    return pop_ga, pop_gp, best_ga, best_gp, logbook
Exemplo n.º 46
0
def selBestOrRandom(individuals, k):
    chosen = []
    if random.random() <= 0.1:
        return tools.selRandom(individuals, k)
    else:
        return tools.selBest(individuals, k)