示例#1
0
def genetic():

    population_size = 5  # num of solutions in the population
    num_generations = 10  # num of time we generate new population

    # create a minimizing fitness function, cause we want to minimize RMSE
    creator.create('FitnessMax', base.Fitness, weights=(-1.0, ))
    # create a list to encode solution in it (binary list)
    creator.create('Individual', list, fitness=creator.FitnessMax)

    # create an object of Toolbox class
    toolbox = base.Toolbox()

    toolbox.register("attr_int2", random.randint, 100, 2000)
    toolbox.register("attr_int3", random.randint, 1, 5)
    toolbox.register("attr_int4", random.randint, 1, 5)
    toolbox.register("attr_int5", random.randint, 1, 5)
    toolbox.register("attr_int6", random.randint, 1, 6)
    toolbox.register("individual",
                     tools.initCycle,
                     creator.Individual,
                     (toolbox.attr_int2, toolbox.attr_int3, toolbox.attr_int4,
                      toolbox.attr_int5, toolbox.attr_int6),
                     n=1)

    toolbox.register('population', tools.initRepeat, list, toolbox.individual)

    toolbox.register('mate', tools.cxTwoPoint)
    toolbox.register('mutate',
                     tools.mutUniformInt,
                     low=[100, 1, 1, 1, 1],
                     up=[2000, 5, 5, 5, 6],
                     indpb=0.6)
    toolbox.register('select', tools.selRoulette)
    toolbox.register('evaluate', train_evaluate)

    # create population by calling population function
    population = toolbox.population(n=population_size)

    hof = tools.HallOfFame(3)

    # start GA
    r = algorithms.eaSimple(population,
                            toolbox,
                            cxpb=0.4,
                            mutpb=0.1,
                            ngen=num_generations,
                            halloffame=hof,
                            verbose=False)

    # Print top N solutions
    best_individuals = tools.selBest(hof, k=3)
    best_epochs = None
    best_neuron1 = None
    best_neuron2 = None
    best_neuron3 = None
    best_window = None

    print("\nBest solution is:")
    for bi in best_individuals:
        best_epochs = bi[0]
        best_neuron1 = bi[1]
        best_neuron2 = bi[2]
        best_neuron3 = bi[3]
        best_window = bi[4]

        print('\n epochs = ', best_epochs, ", neurons = [", best_neuron1, ",",
              best_neuron2, ",", best_neuron3, "]", "window size = ",
              best_window)
示例#2
0
def main(teacher, paper, M, K, F, CR):

    threshold = 1 - 7.5 / 50
    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMin)

    toolbox = base.Toolbox()
    toolbox.register("paper_assignment", numpy.random.random_sample, paper)
    toolbox.register("individual",
                     tools.initRepeat,
                     creator.Individual,
                     toolbox.paper_assignment,
                     n=teacher)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("select", tools.selRandom, k=3)
    toolbox.register("selectBest", tools.selBest, k=1)
    toolbox.register("selectWorst", tools.selWorst, k=1)
    toolbox.register("evaluate", evalCost, teacher, paper, threshold)

    pop = toolbox.population(n=M)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"

    # Evaluate the individuals
    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(pop), **record)
    print(logbook.stream)

    result = []
    for g in range(1, K):
        for k, agent in enumerate(pop):
            a, b, c = toolbox.select(pop)
            maxp, = toolbox.selectBest(pop)
            minp, = toolbox.selectWorst(pop)
            y = toolbox.clone(agent)
            index = random.randrange(K)
            for idx1, arr in enumerate(agent):
                for idx2, j in enumerate(arr):
                    if idx2 == index or random.random() < CR:
                        y[idx1][idx2] = a[idx1][idx2] + F * (b[idx1][idx2] -
                                                             c[idx1][idx2])
                        if y[idx1][idx2] > 1 or y[idx1][idx2] < 0:
                            y[idx1][idx2] = (int)(numpy.random.random_sample())
            y.fitness.values = toolbox.evaluate(y)
            if y.fitness > agent.fitness:
                pop[k] = y
        hof.update(pop)
        record = stats.compile(pop)
        logbook.record(gen=g, evals=len(pop), **record)
        result.append(record["min"])
        print(logbook.stream)

    result.append(hof[0].fitness.values[0])
    final = getFinalMatrix(hof[0], teacher, paper, threshold)
    print("Best individual calculated by DE algorithm is ", final,
          hof[0].fitness.values[0])
    return result
def run():

    for i in range(number_of_runs):
        ###################################################################
        #EVOLUTIONARY ALGORITHM
        ###################################################################
        #TYPE
        #Create minimizing fitness class w/ single objective:
        creator.create('FitnessMin', base.Fitness, weights=(-1.0, ))
        #Create individual class:
        creator.create('Individual', list, fitness=creator.FitnessMin)

        #TOOLBOX
        toolbox = base.Toolbox()
        #Register function to create a number in the interval [1-100?]:
        #toolbox.register('init_params', )
        #Register function to use initRepeat to fill individual w/ n calls to rand_num:
        toolbox.register('individual',
                         tools.initRepeat,
                         creator.Individual,
                         np.random.random,
                         n=number_of_params)
        #Register function to use initRepeat to fill population with individuals:
        toolbox.register('population', tools.initRepeat, list,
                         toolbox.individual)

        #GENETIC OPERATORS:
        # Register evaluate fxn = evaluation function, individual to evaluate given later
        toolbox.register('evaluate', scorefxn_helper)
        # Register mate fxn = two points crossover function
        toolbox.register('mate', tools.cxTwoPoint)
        # Register mutate by swapping two points of the individual:
        toolbox.register('mutate',
                         tools.mutPolynomialBounded,
                         eta=0.1,
                         low=0.0,
                         up=1.0,
                         indpb=0.2)
        # Register select = size of tournament set to 3
        toolbox.register('select', tools.selTournament, tournsize=3)

        #EVOLUTION!
        pop = toolbox.population(n=number_of_individuals)
        hof = tools.HallOfFame(1)

        stats = tools.Statistics(key=lambda ind: [ind.fitness.values, ind])
        stats.register('all', np.copy)

        # using built in eaSimple algo
        pop, logbook = algorithms.eaSimple(pop,
                                           toolbox,
                                           cxpb=crossover_rate,
                                           mutpb=mutation_rate,
                                           ngen=number_of_generations,
                                           stats=stats,
                                           halloffame=hof,
                                           verbose=False)
        # print(f'Run number completed: {i}')

        ###################################################################
        #MAKE LISTS
        ###################################################################
        # Find best scores and individuals in population
        arr_best_score = []
        arr_best_ind = []
        for a in range(len(logbook)):
            scores = []
            for b in range(len(logbook[a]['all'])):
                scores.append(logbook[a]['all'][b][0][0])
            #print(a, np.nanmin(scores), np.nanargmin(scores))
            arr_best_score.append(np.nanmin(scores))
            #logbook is of type 'deap.creator.Individual' and must be loaded later
            #don't want to have to load it to view data everytime, thus numpy
            ind_np = np.asarray(logbook[a]['all'][np.nanargmin(scores)][1])
            ind_np_conv = convert_individual(ind_np, arr_conversion_matrix,
                                             number_of_params)
            arr_best_ind.append(ind_np_conv)
            #arr_best_ind.append(np.asarray(logbook[a]['all'][np.nanargmin(scores)][1]))

        # print('Best individual is:\n %s\nwith fitness: %s' %(arr_best_ind[-1],arr_best_score[-1]))

        ###################################################################
        #PICKLE
        ###################################################################
        arr_to_pickle = [arr_best_score, arr_best_ind]

        def get_filename(val):
            filename_base = dir_to_use + '/' + stripped_name + '_'
            if val < 10:
                toret = '000' + str(val)
            elif 10 <= val < 100:
                toret = '00' + str(val)
            elif 100 <= val < 1000:
                toret = '0' + str(val)
            else:
                toret = str(val)
            return filename_base + toret + '.pickled'

        counter = 0
        filename = get_filename(counter)
        while os.path.isfile(filename) == True:
            counter += 1
            filename = get_filename(counter)

        pickle.dump(arr_to_pickle, open(filename, 'wb'))
示例#4
0
    #scaler.fit(data)
    #datos = scaler.transform(data)
    datos = data

    k = 2

    toolbox = base.Toolbox()

    toolbox.register("evaluate", operadores.ev_silhouette, datos=datos,
                     n_clusters=k)
    toolbox.register("mate", tools.cxOnePoint)
    toolbox.register("mutate", operadores.mut_centroide_random, datos=datos,
                     indpb=0.2)
    toolbox.register("select", tools.selRandom, k=100)

    halloffame = tools.HallOfFame(5, similar=np.array_equal)

    inicio = time.time()
    pob_inicial = generar_poblacion_centroides(k, 100, [np.min(datos, axis=0),
                                                       np.max(datos, axis=0)])
    fin = time.time()

    print("\nTiempo generacion poblacion {0}".format(fin - inicio))

    inicio = time.time()
    poblacion = clustering_genetico_simple(pob_inicial, toolbox, 0.5, 0.2, 20,
                                           halloffame=halloffame)

    fin = time.time()
    print('\nBest individual:\n', halloffame[0])
    print("\n Puntaje: {0}".format(halloffame[0].fitness.values))
示例#5
0
    def _fit(self, X, y, parameter_dict):

        self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)

        n_samples = _num_samples(X)
        X, y = indexable(X, y)

        if y is not None:
            if len(y) != n_samples:
                raise ValueError(
                    "Target variable (y) has a different number "
                    "of samples (%i) than data (X: %i samples)" % (len(y), n_samples)
                )
        cv = self.cv

        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=RuntimeWarning)
            #  * y.shape[1]
            creator.create("FitnessMax", base.Fitness, weights=(1.0,))
            creator.create(
                "Individual", list, est=clone(self.estimator), fitness=creator.FitnessMax
            )

        toolbox = base.Toolbox()

        name_values, gene_type, maxints = _get_param_types_maxint(parameter_dict)
        if self.gene_type is None:
            self.gene_type = gene_type

        if self.verbose:
            print("Types %s and maxint %s detected" % (self.gene_type, maxints))

        toolbox.register("individual", _initIndividual, creator.Individual, maxints=maxints)
        toolbox.register("population", tools.initRepeat, list, toolbox.individual)

        toolbox.register(
            "evaluate",
            _evalFunction,
            name_values=name_values,
            X=X,
            y=y,
            scorer=self.scorer_,
            cv=cv,
            iid=self.iid,
            verbose=self.verbose,
            error_score=self.error_score,
            fit_params=self.fit_params,
        )

        toolbox.register(
            "mate", _cxIndividual, indpb=self.gene_crossover_prob, gene_type=self.gene_type
        )

        toolbox.register("mutate", _mutIndividual, indpb=self.gene_mutation_prob, up=maxints)
        toolbox.register("select", tools.selTournament, tournsize=self.tournament_size)

        if self.n_jobs > 1:
            pool = Pool(processes=self.n_jobs)
            toolbox.register("map", pool.map)
        pop = toolbox.population(n=self.population_size)
        hof = tools.HallOfFame(1)
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("min", np.min)
        stats.register("max", np.max)

        if self.verbose:
            msg_template = "--- Evolve in {0} possible combinations ---"
            print(msg_template.format(np.prod(np.array(maxints) + 1)))

        pop, logbook = algorithms.eaSimple(
            pop,
            toolbox,
            cxpb=0.5,
            mutpb=0.2,
            ngen=self.generations_number,
            stats=stats,
            halloffame=hof,
            verbose=self.verbose,
        )
        print(hof[0].fitness.values)

        current_best_score_ = hof[0].fitness.values[0]
        current_best_params_ = _individual_to_params(hof[0], name_values)

        print("cbp", current_best_params_)
        if self.verbose:
            print(
                "Best individual is: %s\nwith fitness: %s"
                % (current_best_params_, current_best_score_)
            )

        if current_best_score_ > self.best_score_:
            self.best_score_ = current_best_score_
            self.best_params_ = current_best_params_
示例#6
0
	def evolveGeneration(self):
		"""
		A method to evolve a species of boid using a 
		generational genetic algorithm. Authored by Zach Sipper,
		modified from evolveSteady() by Gianni Orlando. Edited
		by Ryan McArdle.

		:returns: the evolved species, and its fitness
		"""
		
		name = "Generation"

		self.prepareEvolution()
		
		# Initialize fitness goal and individual type
		creator.create("FitnessMax", base.Fitness, weights=(1.0,))
		creator.create("Individual", array.array, typecode='d',fitness=creator.FitnessMax)
		
        #Initialize individuals, populations, and evolution operators
		toolbox = base.Toolbox()
		toolbox.register("individual", self.initBoids, creator.Individual, *self.parameter_bounds)
		toolbox.register("population", tools.initRepeat, list, toolbox.individual)
		toolbox.register("evaluate", self.boidFitness)
		toolbox.register("mate", tools.cxOnePoint)
		toolbox.register("mutate", tools.mutGaussian, indpb=0.5, mu=25.5, sigma=12.5) # 50% chance for each value to mutate
		toolbox.register("mutate2", tools.mutShuffleIndexes, indpb=0.5) # 50% chance for each value to mutate
		toolbox.register("select", tools.selTournament, tournsize= 5)

		toolbox.decorate('mate', self.checkBounds(self.parameter_bounds))
		toolbox.decorate('mutate', self.checkBounds(self.parameter_bounds))
		toolbox.decorate('mutate2', self.checkBounds(self.parameter_bounds))

		stats = tools.Statistics(lambda ind: ind.fitness.values)
		stats.register("avg", np.mean)
		stats.register("std", np.std)
		stats.register("min", np.min)
		stats.register("max", np.max)
		self.logbook = tools.Logbook()
		self.logbook.header = 'gen','evals','min','max','avg','std'
		hof = tools.HallOfFame(1)

		
		pop = toolbox.population(n=self.mu)

        #Evaluate the entire population
		seed = random.randint(1,1e10)
		## Evaluate the entire population for fitness in parallel
		if __name__=="__main__":
			with mp.Pool(self.num_processes) as pool:
				fitnesses = pool.starmap(self.boidFitness, [(boid.tolist(),seed,self.current_evals,self.eval_limit) for boid in pop])

		for ind, fit in zip(pop, fitnesses):
			ind.fitness.values = fit,

		#Extracting all the fitnesses of 
		fits = [ind.fitness.values[0] for ind in pop]

		#Variable keeping track of the number of generations
		g = 0

		## Record the initial population
		self.current_evals += len(pop)
		record = stats.compile(pop)
		logbook = tools.Logbook()
		logbook.header = 'gen','evals','min','max','avg','std'
		logbook.record(gen=0, evals=self.current_evals, **record)
		print(logbook.stream)
		hof.update(pop)
		print(hof[0])
		print(hof[0].fitness.values[0])
		
		#Begin the evolution
		while hof[0].fitness.values[0] < 1.0 and self.current_evals < self.eval_limit:
            #A new generation
			g = g + 1

			# Gather all the fitnesses in one list and print the stats
			fits = [ind.fitness.values for ind in pop]
			
			############################################################
			####	Code between hash lines produced by Zach Sipper and 
			####	edited by Ryan McArdle.
			####
			# Select the next generation individuals
			bestIndv = tools.selBest(pop, k=2)

            # Elitism: put top two individuals in next generation
			nextGeneration = [*bestIndv]
			#nextGeneration += bestIndv

            # perform crossover on pairs until nextGeneration is equal in size to pop
			# case that len(pop) is even
			if len(pop) % 2 == 0:
				while len(nextGeneration) < (len(pop)):
					parents = toolbox.select(pop,2)
					parents = list(map(toolbox.clone,parents))
					toolbox.mate(parents[0], parents[1])
					for parent in parents:
						del parent.fitness.values

					# mutation of children with 'MUTPB' chance
					for parent in parents:
						if random.random() < self.MUTPB:
							toolbox.mutate(parent)
							del parent.fitness.values

					# add the pair of children to nextGeneration
					nextGeneration.extend(parents)

			# case the len(pop) is odd
			if len(pop) % 2 == 1:
				while len(nextGeneration) < len(pop)-2:
					parents = toolbox.select(pop, 2)
					parents = list(map(toolbox.clone, parents))
					toolbox.mate(parents[0], parents[1])
					for parent in parents:
						del parent.fitness.values

					# mutation of children with 'MUTPB' chance
					for parent in parents:
						if random.random() < self.MUTPB:
							toolbox.mutate(parent)
							del parent.fitness.values

					# add the pair of children to nextGeneration
					nextGeneration.extend(parents)

				# add one more child to nextGeneration
				parents = toolbox.select(pop,2)
				toolbox.mate(parents[0], parents[1])
				child = parents[0]
				del child.fitness.values

				# mutation of child
				if random.random() < self.MUTPB:
					toolbox.mutate(child)
					del child.fitness.values

				# add the child to nextGeneration
				nextGeneration.extend(list(map(toolbox.clone,child)))

			pop = list(map(toolbox.clone,nextGeneration))
			####
			####
			############################################################

			#Evaluate the population with invalid fitnesses
			invalid_ind = [ind for ind in pop if not ind.fitness.valid]
			seed = random.randint(1,1e10)
			if __name__=="__main__":
				with mp.Pool(self.num_processes) as pool:
					fitnesses = pool.starmap(self.boidFitness, [(boid.tolist(),seed,self.current_evals,self.eval_limit) for boid in invalid_ind])

			## Apply found fitness values
			for ind, fit in zip(invalid_ind, fitnesses):
				ind.fitness.values = fit,

            # Extracting all the fitnesses of 
			fits = [ind.fitness.values[0] for ind in pop]

			##################################################
			## Record the new generation
			hof.update(pop)
			record = stats.compile(pop)
			self.current_evals += len(invalid_ind)
			logbook.record(gen=g, evals=self.current_evals, **record)
			print(logbook.stream)
			print(hof[0])
			print(hof[0].fitness.values[0])
			#bestFit, bestDetailFit, bestFitWeight = self.boidFitness(hof[0].tolist(),seed,self.current_evals-len(invalid_ind),self.eval_limit,detail=True)
			#print(bestDetailFit)
			##################################################


		self.plotAndRecord(logbook,hof,name)

		return hof[0]
示例#7
0
	def evolveMuPlusLambda(self,lambda_ratio=7):
		"""
		A method to evolve a species of boid using a 
		(Mu + Lambda) evolutionary strategy. Method authored
		by Olusade Calhoun. Edited by Ryan McArdle.

		:returns: the best individual found by the evolution
		"""

		name = "MuPlusLambda"+str(lambda_ratio)

		self.prepareEvolution(lambda_ratio)


		creator.create("FitnessMax", base.Fitness, weights=(1.0,))
		creator.create("Individual", array.array, typecode="d", fitness=creator.FitnessMax, strategy=None)
		creator.create("Strategy", array.array, typecode="d")

		toolbox = base.Toolbox()
		toolbox.register("individual", self.initBoidsStrat, creator.Individual, creator.Strategy, *self.parameter_bounds, *self.strategy_bounds) 
		toolbox.register("population", tools.initRepeat, list, toolbox.individual)
		toolbox.register("mate", tools.cxESBlend, alpha=0.333)
		toolbox.register("mutate", tools.mutESLogNormal, c=0.01, indpb=0.1)
		toolbox.register("select", tools.selRandom)
		toolbox.register("evaluate", self.boidFitness)

		toolbox.decorate('mate', self.checkBounds(self.parameter_bounds))
		toolbox.decorate('mutate', self.checkBounds(self.parameter_bounds))
		toolbox.decorate('mate', self.checkStrategy(self.strategy_bounds))
		toolbox.decorate('mutate', self.checkStrategy(self.strategy_bounds))

		stats = tools.Statistics(lambda ind: ind.fitness.values)
		stats.register("avg", np.mean)
		stats.register("std", np.std)
		stats.register("min", np.min)
		stats.register("max", np.max)
		self.logbook = tools.Logbook()
		self.logbook.header = 'gen','evals','min','max','avg','std'
		hof = tools.HallOfFame(1)

		pop = toolbox.population(n=self.mu)
		seed = random.randint(1,1e10)

		## Evaluate the entire population for fitness in parallel
		if __name__=="__main__":
			with mp.Pool(self.num_processes) as pool:
				fitnesses = pool.starmap(self.boidFitness, [(boid.tolist(),seed,self.current_evals,self.eval_limit) for boid in pop])

		for ind, fit in zip(pop, fitnesses):
		    ind.fitness.values = fit,

		g = 0

		self.current_evals += len(pop)
		record = stats.compile(pop)
		logbook = tools.Logbook()
		logbook.header = 'gen','evals','min','max','avg','std'
		logbook.record(gen=0, evals=self.current_evals, **record)
		print(logbook.stream)
		hof.update(pop)
		print(hof[0])
		print(hof[0].fitness.values[0])

		## Main loop
		while True:
			if self.current_evals < (self.eval_limit - self.lambda_):
				g+=1
				offspring = toolbox.select(pop, self.lambda_)
				offspring = list(map(toolbox.clone, offspring))

				#crossover and mutation
				for child1, child2 in zip(offspring[::2], offspring[1:2]):
					if random.random() < self.CXPB:
						toolbox.mate(child1, child2)
						del child1.fitness.values
						del child2.fitness.values

				for mutant in offspring:
					if random.random() < self.MUTPB:
						toolbox.mutate(mutant)
						del mutant.fitness.values

				invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
				seed = random.randint(1,1e10)
				if __name__=="__main__":
					with mp.Pool(self.num_processes) as pool:
						fitnesses = pool.starmap(self.boidFitness, [(boid.tolist(),seed,self.current_evals,self.eval_limit) for boid in invalid_ind])

				for ind, fit in zip(invalid_ind, fitnesses):
					ind.fitness.values = fit,

				#replace population with top mu+lam offspring
				newGen = list(map(toolbox.clone, [*pop, *offspring]))

				newGen.sort(key = lambda x: x.fitness.values[0])

				pop = list(map(toolbox.clone, newGen[-self.mu:])) #selects mu highest fitness in population

				##################################################
				## Record the new generation
				hof.update(pop)
				record = stats.compile(pop)
				self.current_evals += len(invalid_ind)
				logbook.record(gen=g, evals=self.current_evals, **record)
				print(logbook.stream)
				print(hof[0])
				print(hof[0].fitness.values[0])
				#bestFit, bestDetailFit, bestFitWeight = self.boidFitness(hof[0].tolist(),seed,self.current_evals-len(invalid_ind),self.eval_limit,detail=True)
				#print(bestDetailFit)
				##################################################

			else:
				break

		self.plotAndRecord(logbook,hof,name)

		return hof[0]
示例#8
0
    def init_algorithm(self, params):

        if params['algorithm_class'] not in ['simple', 'multiobjective']:
            raise ValueError('Non-existent algorithm class.')

        toolbox = base.Toolbox()
        ngen = params['generations']
        nind = params['num_individuals']
        cxpb = 0.5 if params['algorithm_class'] == 'simple' else 0.9
        lb, ub = -1.0, 1.0
        ind_size = self.problem.ind_size

        if nind % 4 != 0:
            raise ValueError('Number of individuals must be multiple of four')

        if hasattr(creator, 'FitnessMin') is False:
            creator.create('FitnessMin', base.Fitness, weights=(-1.0, -1.0))

        if hasattr(creator, 'Individual') is False:
            creator.create('Individual',
                           array.array,
                           typecode='d',
                           fitness=creator.FitnessMin)

        atr = lambda: [random.uniform(lb, ub) for _ in range(ind_size)]
        ind = lambda: tools.initIterate(creator.Individual, atr)
        population = [ind() for _ in range(nind)]

        if params['algorithm_class'] == 'simple':
            self.hof = tools.HallOfFame(1)
            mut = lambda xs: tools.mutGaussian(xs, mu=0, sigma=1, indpb=0.1)
            crs = tools.cxTwoPoint
            sel = lambda p, n: tools.selTournament(p, n, tournsize=3)
        else:
            self.hof = tools.ParetoFront()
            mut = lambda xs: tools.mutPolynomialBounded(
                xs, low=lb, up=ub, eta=20.0, indpb=1.0 / ind_size)
            crs = lambda ind1, ind2: tools.cxSimulatedBinaryBounded(
                ind1, ind2, low=lb, up=ub, eta=20.0)
            sel = tools.selNSGA2

        toolbox.register('evaluate', self.problem.objective_function)
        toolbox.register('mate', crs)
        toolbox.register('mutate', mut)
        toolbox.register('select', sel)

        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register('avg', np.mean, axis=0)
        stats.register('std', np.std, axis=0)
        stats.register('min', np.min, axis=0)
        stats.register('max', np.max, axis=0)

        args = (population, toolbox)
        kwargs = {'cxpb': cxpb, 'ngen': ngen, 'stats': stats}
        kwargs['halloffame'] = self.hof
        if params['algorithm_class'] == 'simple':
            kwargs['mutpb'] = 0.2
            kwargs['verbose'] = True
            self.algorithm = lambda: algorithms.eaSimple(*args, **kwargs)
        else:
            self.algorithm = lambda: self.multiobjective(*args, **kwargs)
示例#9
0
    "mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=10))

# In[ ]:

stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)

population = toolbox.population(n=300)
cxpb, mutpb, ngen, mu, lambda_ = 0.6, 0.15, 300, 200, 400
stats = mstats
halloffame = tools.HallOfFame(1)
verbose = True
wholeFitness = []
singleFitness = []
earlyStoppingThresh = 0.01
earlyStoppingGens = 150
print("Start of evolution")

logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
    ind.fitness.values = fit
示例#10
0
def getParamter(real_matrix, multiple_matrix, testPosition):
    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual",
                   array.array,
                   typecode='d',
                   fitness=creator.FitnessMax)
    toolbox = base.Toolbox()
    # Attribute generator
    toolbox.register("attr_float", random.uniform, 0, 1)
    # Structure initializers
    variable_num = len(multiple_matrix)
    toolbox.register("individual", tools.initRepeat, creator.Individual,
                     toolbox.attr_float, variable_num)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    #################################################################################################
    real_labels = []
    for i in range(0, len(testPosition)):
        real_labels.append(real_matrix[testPosition[i][0], testPosition[i][1]])

    multiple_prediction = []
    for i in range(0, len(multiple_matrix)):
        predicted_probability = []
        predict_matrix = multiple_matrix[i]
        for j in range(0, len(testPosition)):
            predicted_probability.append(predict_matrix[testPosition[j][0],
                                                        testPosition[j][1]])
        normalize = MinMaxScaler()
        predicted_probability = normalize.fit_transform(
            np.array(predicted_probability).reshape(-1, 1))
        predicted_probability = predicted_probability.flatten()
        multiple_prediction.append(predicted_probability)

    #################################################################################################
    toolbox.register("evaluate",
                     fitFunction,
                     parameter1=real_labels,
                     parameter2=multiple_prediction)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", tools.mutFlipBit, indpb=0.05)
    toolbox.register("select", tools.selTournament, tournsize=3)

    random.seed(0)
    pop = toolbox.population(n=100)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=0.5,
                                   mutpb=0.2,
                                   ngen=50,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)
    pop.sort(key=lambda ind: ind.fitness, reverse=True)
    print(pop[0])
    return pop[0]
示例#11
0
        os.makedirs(experiment_name + '/' + str(enemy))

    # default environment fitness is assumed for experiment
    env.state_to_log()  # checks environment state

    strategy = cma.Strategy(centroid=[0] * n_vars, sigma=1, lambda_=20)

    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    # multi processing
    pool = multiprocessing.Pool()
    toolbox.register("map", partial(mymap, pool.map))

    for i in range(10):
        hof = tools.HallOfFame(1, similar=np.array_equal)

        fitness_stats = tools.Statistics(lambda ind: ind.fitness.values)
        fitness_stats.register("time", timeit)
        energy_stats = tools.Statistics(lambda ind: ind.energy)
        stats = tools.MultiStatistics(fitness=fitness_stats,
                                      energy=energy_stats)
        stats.register("avg", np.mean)
        stats.register("std", np.std)
        stats.register("min", np.min)
        stats.register("max", np.max)

        ini = time.time()  # sets total time marker
        gen_start_time = time.time()  # sets gen time marker

        # The CMA-ES algorithm converge with good probability with those settings
示例#12
0
def main():
    np.random.seed(19) 
    x_p = []*N
    fbest = []    #世代ごとのf(x)のベスト
    vbest = []
    Fbest = []
    a = 0.99
    b = 1.01
    y_start = (b - a) * np.random.rand(N) + a
    y_p, fbest_nelder, Vbest_nelder, Fbest_nelder = nelder_mead(f_n, y_start, step=1.0, no_improve_thr=1.0e-5, no_improv_break=nib_n, max_iter=max_iter_n) #ネルダーミード法
    y_p = P1.y_01(y_p) #yの値を0か1にする
    x_p = P1.y_to_x(y_p) #yからxの値を代入
        # for i in range(lambda_cmaes):
        #     x_p[i] = creator.Individual(x_p[i])
        # population = x_p
    fbest.extend(fbest_nelder)
    vbest.extend(Vbest_nelder)
    Fbest.extend(Fbest_nelder)
    '''cnt = 0 #x to xc (普通のxをCMAES用のxに変換)
    for i in range(P1.I*P1.N_t):
        if(1 < x_p[cnt]):
            x_p[cnt] -= P1.Q_t_min[0] - 1
        cnt += 1
    for i in range(P1.I*P1.N_s):
        if(1 < x_p[cnt]):
            x_p[cnt] -= P1.Q_s_min[0] - 1
        cnt += 1
    for i in range(P1.I):
        if(1 < x_p[cnt]):
            x_p[cnt] -= P1.E_g_min/P1.a_ge - 1
        cnt += 1
    for i in range(P1.I):
        if(1 < x_p[cnt]):
            x_p[cnt] -= P1.S_b_min/P1.a_b - 1
        cnt += 1'''
    # x_p, fbest_nelder = nelder_mead(f_n_x, np.array(x_p), step=0.1, no_improve_thr=1.0e-5, no_improv_break=100, max_iter=0)   #さらにネルダーミードするなら
    # The CMA-ES algorithm
    strategy = cma.Strategy(centroid=x_p, sigma=0.1, lambda_=lambda_cmaes) #平均ベクトルをネルダーミードの最終解にする
    toolbox.register("generate", strategy.generate, creator.Individual)
    toolbox.register("update", strategy.update)

    halloffame = tools.HallOfFame(1)

        # halloffame_array = []
        # C_array = []
        # centroid_array = []
        # best = np.ndarray((NGEN, N))     #世代ごとのxのベスト

    for gen in range(NGEN): #ステップ開始
        #新たな世代の個体群を生成
        population = toolbox.generate()
        if gen == 0:
            population[0] = creator.Individual(x_p) #ネルダーミード法の解を混ぜる
        # 個体群の評価
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit

        # 個体群の評価から次世代の計算のためのパラメタ更新
        toolbox.update(population)


        # hall-of-fameの更新
        halloffame.update(population)

        V_c, f_c = P1.evaluate_f(halloffame[0])  
        fbest.append(f_c) #V, Fで入力しているときは1
        vbest.append(V_c)
        bestF = halloffame[0].fitness.values[0]
        Fbest.append(bestF)
        print("{} generation's (bestF, f, V) =({}, {}, {})".format(gen+1, bestF, f_c, V_c))
            # halloffame_array.append(halloffame[0])
            # C_array.append(strategy.C)
            # centroid_array.append(strategy.centroid)

        if (gen+1)%100 == 0:
            y = []
            f = [0]*P1.P
            g = [0]*P1.M
            h = [0]*int(P1.Q)

            x = halloffame[0]# best[gen]
            for n in range(P1.N_x):
                if x[n] < P1.eps[0]:
                    y.append(0.0)
                else:
                    y.append(1.0)
            
            #evaluation
            f, g, h = P1.evaluation(x, y, f, g, h)

            #output
            print(x)
            print(y)
            for p in range(P1.P):
                print("f%d = %.10g " % (p+1, f[p]))

            V = 0.0
            for m in range(P1.M):
                # print("g%d = %.10g" % (m+1, g[m]))
                if g[m] > 0.0:
                    V += g[m]

            for q in range(P1.Q):
                # print("h%d = %.10g" % (q+1, h[q]))
                abs(q)
                V += abs(h[q])

            #check feasibility
            print('Sum of violation = {:.10g}'.format(V))
            print("Tolerance = {:.2g} ".format(P1.eps[0]))
            if P1.checkFeasibility(x, y):
                print("Input solution is feasible.")
            else:
                print("Input solution is infeasible.")

    #グラフ描画 
    x = np.arange(1, len(fbest)+1)
    y = np.array(fbest)

    fig = plt.figure(1)
    fig.subplots_adjust(left=0.2)
    plt.yscale('log')
    plt.plot(x, y)
    plt.xlabel("step, generation")
    plt.ylabel("f")
    fig.savefig("img_f.pdf")

    x = np.arange(1, len(vbest)+1)
    y = np.array(vbest)
    fig = plt.figure(2)
    fig.subplots_adjust(left=0.2)
    plt.yscale('log')
    plt.plot(x, y)
    plt.xlabel("step, generation")
    plt.ylabel("V")
    fig.savefig("img_V.pdf")

    x = np.arange(1, len(Fbest)+1)
    y = np.array(Fbest)

    fig = plt.figure(3)
    fig.subplots_adjust(left=0.2)
    plt.yscale('log')
    plt.plot(x, y)
    plt.xlabel("step, generation")
    plt.ylabel("F")
    fig.savefig("img_F.pdf")
示例#13
0
    def main(self, NGen=1000, NIndiv=100, DoPlot=True):
        #os.system("rm png/*.png")
        #random.seed(64)
        #np.random.seed(64)
        toolbox = self.toolbox

        # pool = multiprocessing.Pool(processes=6)
        # toolbox.register("map", pool.map)
        self.pop = toolbox.population(n=NIndiv)
        self.hof = tools.HallOfFame(1, similar=numpy.array_equal)
        #self.hof = tools.ParetoFront(1, similar=numpy.array_equal)

        # stats = tools.Statistics(lambda ind: ind.fitness.values)
        # stats.register("avg", numpy.mean)
        # stats.register("std", numpy.std)
        # stats.register("min", numpy.min)
        # stats.register("max", numpy.max)

        for indiv in self.pop:
            indiv.fill(0)

        #print "Best indiv start",
        #self.ArrayMethodsMachine.PM.PrintIndiv(self.IslandBestIndiv)
        #print
        if self.IslandBestIndiv is not None:
            #SModelArrayMP,Alpha=self.ArrayMethodsMachine.DeconvCLEAN()
            #AModelArrayMP=None
            DicoModelMP = self.ListInitIslands[self.iIsland]
            if DicoModelMP is not None:
                SModelArrayMP, AModelArrayMP = DicoModelMP["S"], DicoModelMP[
                    "Alpha"]
            else:
                SModelArrayMP, _ = self.ArrayMethodsMachine.DeconvCLEAN()
                AModelArrayMP = np.zeros_like(SModelArrayMP)

            if NGen == 0:
                self.ArrayMethodsMachine.PM.ReinitPop(self.pop,
                                                      SModelArrayMP,
                                                      AlphaModel=AModelArrayMP)
                self.ArrayMethodsMachine.KillWorkers()
                return self.pop[0]

            if np.max(np.abs(self.IslandBestIndiv)) == 0:
                #print "NEW"
                self.ArrayMethodsMachine.PM.ReinitPop(self.pop,
                                                      SModelArrayMP,
                                                      AlphaModel=AModelArrayMP)
            else:
                #print "MIX"
                NIndiv = len(self.pop) / 10
                pop0 = self.pop[0:NIndiv]
                pop1 = self.pop[NIndiv::]

                pop1 = self.pop
                pop0 = []

                pop1 = self.pop[0:1]
                pop0 = self.pop[1::]

                # self.ArrayMethodsMachine.PM.ReinitPop(pop0,SModelArray)

                # half with the best indiv
                SModelArrayBest = self.ArrayMethodsMachine.PM.ArrayToSubArray(
                    self.IslandBestIndiv, "S")
                AlphaModel = None
                if "Alpha" in self.ArrayMethodsMachine.PM.SolveParam:
                    AlphaModel = self.ArrayMethodsMachine.PM.ArrayToSubArray(
                        self.IslandBestIndiv, "Alpha")
                GSigModel = None
                if "GSig" in self.ArrayMethodsMachine.PM.SolveParam:
                    GSigModel = self.ArrayMethodsMachine.PM.ArrayToSubArray(
                        self.IslandBestIndiv, "GSig")
                self.ArrayMethodsMachine.PM.ReinitPop(pop1,
                                                      SModelArrayBest,
                                                      AlphaModel=AlphaModel,
                                                      GSigModel=GSigModel)

                # half of the pop with the MP model
                #SModelArrayBest0=SModelArrayBest.copy()
                #mask=(SModelArrayBest0==0)
                #SModelArrayBest0[mask]=SModelArrayMP[mask]
                #self.ArrayMethodsMachine.PM.ReinitPop(pop0,SModelArrayBest0,AlphaModel=AlphaModel,GSigModel=GSigModel)
                self.ArrayMethodsMachine.PM.ReinitPop(pop0,
                                                      SModelArrayMP,
                                                      AlphaModel=AModelArrayMP)

                # _,Chi20=self.ArrayMethodsMachine.GiveFitnessPop(pop0)
                # _,Chi21=self.ArrayMethodsMachine.GiveFitnessPop(pop1)
                # print
                # print Chi20
                # print Chi21
                # stop

                self.pop = pop1 + pop0
        #print

        # if self.IslandBestIndiv is not None:

        #     if np.max(np.abs(self.IslandBestIndiv))==0:
        #         #print "deconv"
        #         SModelArray,Alpha=self.ArrayMethodsMachine.DeconvCLEAN()

        #         #print "Estimated alpha",Alpha
        #         AlphaModel=np.zeros_like(SModelArray)+Alpha
        #         #AlphaModel[SModelArray==np.max(SModelArray)]=0

        #         self.ArrayMethodsMachine.PM.ReinitPop(self.pop,SModelArray)#,AlphaModel=AlphaModel)

        #         #print self.ArrayMethodsMachine.GiveFitness(self.pop[0],DoPlot=True)
        #         #stop
        #         #print self.pop
        #     else:
        #         SModelArray=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"S")
        #         AlphaModel=None
        #         if "Alpha" in self.ArrayMethodsMachine.PM.SolveParam:
        #             AlphaModel=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"Alpha")

        #         GSigModel=None
        #         if "GSig" in self.ArrayMethodsMachine.PM.SolveParam:
        #             GSigModel=self.ArrayMethodsMachine.PM.ArrayToSubArray(self.IslandBestIndiv,"GSig")

        #         self.ArrayMethodsMachine.PM.ReinitPop(self.pop,SModelArray,AlphaModel=AlphaModel,GSigModel=GSigModel)

        # set best Chi2
        # _=self.ArrayMethodsMachine.GiveFitnessPop([self.IslandBestIndiv])
        _ = self.ArrayMethodsMachine.GiveFitnessPop(self.pop)

        self.pop, log = algorithms.eaSimple(
            self.pop,
            toolbox,
            cxpb=0.3,
            mutpb=0.5,
            ngen=NGen,
            halloffame=self.hof,
            #stats=stats,
            verbose=False,
            ArrayMethodsMachine=self.ArrayMethodsMachine,
            DoPlot=DoPlot,
            MutConfig=self.MutConfig)

        self.ArrayMethodsMachine.KillWorkers()

        # #:param mu: The number of individuals to select for the next generation.
        # #:param lambda\_: The number of children to produce at each generation.
        # #:param cxpb: The probability that an offspring is produced by crossover.
        # #:param mutpb: The probability that an offspring is produced by mutation.

        # mu=70
        # lambda_=50
        # cxpb=0.3
        # mutpb=0.5
        # ngen=1000

        # self.pop, log= algorithms.eaMuPlusLambda(self.pop, toolbox, mu, lambda_, cxpb, mutpb, ngen,
        #                               stats=None, halloffame=None, verbose=__debug__,
        #                               ArrayMethodsMachine=self.ArrayMethodsMachine)

        V = tools.selBest(self.pop, 1)[0]

        #print "Best indiv end"
        #self.ArrayMethodsMachine.PM.PrintIndiv(V)

        # V.fill(0)
        # S=self.ArrayMethodsMachine.PM.ArrayToSubArray(V,"S")
        # G=self.ArrayMethodsMachine.PM.ArrayToSubArray(V,"GSig")

        # S[0]=1.
        # #S[1]=2.
        # G[0]=1.
        # #G[1]=2.

        # MA=self.ArrayMethodsMachine.PM.GiveModelArray(V)

        # # print "Sum best indiv",MA.sum(axis=1)
        # # print "Size indiv",V.size
        # # print "indiv",V
        # # print self.ArrayMethodsMachine.ListPixData
        # # print MA[0,:]

        return V
示例#14
0
def evolve_weights(toolbox,
                   centroids,
                   initial_weights,
                   values,
                   ngen,
                   npop,
                   stop_after,
                   cxpb,
                   mutpb,
                   force,
                   verbose=False):
    NC, ND = centroids.shape
    N = len(values)

    pop = [WeightIndividual(NC, ND, None) for _ in range(npop)]
    for i, ind in enumerate(pop):
        if i == 0:
            ind.weights[:, :] = initial_weights
        else:
            #random
            for k in range(NC):
                ind.weights[k, :] = np.random.random(ND)
                ind.weights[k, :] /= np.sum(ind.weights[k, :])
                ind.weights[k, :] = fix_weights(ind.weights[k, :], force=force)

        ind.check()

    hof = tools.HallOfFame(1, similar=similar_op_weights)

    stats = tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("min", np.min)
    stats.register("max", np.max)

    logbook = tools.Logbook()
    logbook.header = "gen", "min", "avg", "max", "best"

    #logger.info("Evaluating initial population")
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    hof.update(pop)

    record = stats.compile(pop) if stats else {}

    logbook.record(gen=0, best=hof[0].fitness.values[0], **record)
    if verbose: print(logbook.stream)

    evaluations = 0
    no_improvements = 0

    #logger.info("Starting evolution!")
    for gen in range(1, ngen + 1):
        prev_fitness = hof[0].fitness.values[0]
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))

        # Vary the pool of individuals
        offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        evals_gen = len(invalid_ind)

        # Update the hall of fame with the generated individuals
        if hof is not None:
            hof.update(offspring)

        # Replace the current population by the offspring
        pop[:] = offspring

        # Append the current generation statistics to the logbook
        record = stats.compile(pop) if stats else {}
        logbook.record(gen=gen, best=hof[0].fitness.values[0], **record)
        #logger.info(logbook.stream)
        if verbose: print(logbook.stream)

        evaluations += evals_gen

        current_fitness = hof[0].fitness.values[0]

        if current_fitness >= prev_fitness:
            no_improvements += 1
        else:
            no_improvements = 0

        if no_improvements > stop_after:
            break

    return pop, stats, hof, logbook, gen, evaluations
示例#15
0
def main():
    # Seed our random number generator
    random.seed(random.SystemRandom().random())
    # We start by importing SCHEDULE.txt with each team specifics.
    print("Importing team schedules")
    teams_to_schedule = []  # Master list of teams to schedule
    conflicting_teams = []  # Master list of teams that can't play at same time
    general_population = []  # Holds our current population of schedules
    with open("SCHEDULE.txt", "r") as input_file:
        team_number = 1
        for line in input_file:
            if (len(line.strip()) == 0):
                continue
            single_data_line = line.strip().split("-")
            schedule_write = [
            ]  # Single line list to add to master list after setting

            # String has been split. Check if we're adding 1 or 2 teams to the schedule.
            # Teams with both a V and JV require two separate teams
            # Each team needs a unique number, issued by team_number

            if (single_data_line[1] == '1') or (single_data_line[1] == '2'):
                # Single Team Case
                if single_data_line[1] == '1':
                    single_data_line[0] = single_data_line[0] + " V"
                else:
                    single_data_line[0] = single_data_line[0] + " JV"
                # print (single_data_line[0], " has just a V or JV to play")
                schedule_write.append(single_data_line[0])  # Team Name
                schedule_write.append(team_number)  # Unique Team Number
                schedule_write.append(int(
                    single_data_line[1]))  # 1 for V, 2 for JV
                schedule_write.append(int(
                    single_data_line[3]))  # Rank from 1-3
                schedule_write.append(int(single_data_line[4]))  # Start time
                schedule_write.append(int(single_data_line[5]))  # End time
                # print ("Importing :", schedule_write)
                teams_to_schedule.append(schedule_write)
                cop_to_rank = [
                    int(single_data_line[1]),
                    int(single_data_line[3])
                ]
                lvl_and_rank.append(cop_to_rank[:])  # Specify copy
            elif single_data_line[1] == '3':
                # Varsity and JV team, [2] will be Y if they can play at the same time
                if (single_data_line[2] == 'N') or (single_data_line[2]
                                                    == 'n'):
                    # Add team numbers to conflict pool
                    add_conflict = []
                    add_conflict.append(team_number)
                    add_conflict.append(team_number + 1)
                    conflicting_teams.append(add_conflict)
                    global num_of_conflicts
                    num_of_conflicts += 1
                # Parse rank structure for later addition to schedule_write
                parsed_rank = single_data_line[3].strip().split(",")
                # Create varsity team first
                temp_name_string = single_data_line[0] + " V"
                schedule_write.append(temp_name_string)  # Team Name
                schedule_write.append(team_number)  # Unique Team Number
                schedule_write.append(int(1))  # 1 for V, 2 for JV
                schedule_write.append(int(parsed_rank[0]))  # Rank from 1-3
                schedule_write.append(int(single_data_line[4]))  # Start time
                schedule_write.append(int(single_data_line[5]))  # End time
                teams_to_schedule.append(schedule_write)
                cop_to_rank = [int(1), int(parsed_rank[0])]
                lvl_and_rank.append(cop_to_rank[:])  # Specify copy
                # print ("Importing :", schedule_write)
                team_number += 1  # Increment our team counter for special case
                schedule_write = []  # Clear out our list to create 2nd JV team
                temp_name_string = single_data_line[0] + " JV"
                schedule_write.append(temp_name_string)  # Team Name
                schedule_write.append(team_number)  # Unique Team Number
                schedule_write.append(int(2))  # 1 for V, 2 for JV
                schedule_write.append(int(parsed_rank[1]))  # Rank from 1-3
                schedule_write.append(int(single_data_line[4]))  # Start time
                schedule_write.append(int(single_data_line[5]))  # End time
                teams_to_schedule.append(schedule_write)
                cop_to_rank = [int(2), int(parsed_rank[1])]
                lvl_and_rank.append(cop_to_rank[:])  # Specify copy
                # print ("Importing :", schedule_write)
                # print (single_data_line[0], " has both V and JV to play")
            else:
                print("Problem with SCHEDULE.txt, please fix team named:",
                      single_data_line[0])
                exit()
            # store off current team number (also # of teams imported)
            global num_of_teams
            num_of_teams = team_number
            # increment our team_number counter
            team_number += 1
    print("Import successful. Starting Genetic Algorithm.")
    print("Number of teams to schedule: ", num_of_teams)
    # We are done reading our file in...
    # print("\n\nOur conflicting teams: ")
    # print(conflicting_teams)
    print("Our individual teams: ")
    print(teams_to_schedule)

    # Time to set up our Genetic Algo. We have a single objective for fitness,
    # which is to maximize it.
    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))

    # Our individual is a list (with nested lists, needs not be specified)
    creator.create("Individual", list, fitness=creator.FitnessMax)

    # Initialize our toolbox
    toolbox = base.Toolbox()

    # Register our individual and population, call custom individual creation function.
    # Single_help is used to prevent the same ID being used, and creates individual
    # hourly time slots for all courts
    toolbox.register("single_help", single_slot)
    toolbox.register("individual", tools.initRepeat, creator.Individual,
                     toolbox.single_help, tot_slots)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    # Register custom evaluate, mutate, and crossover. Use tournament selection.
    toolbox.register("evaluate", calc_fitness)
    toolbox.register("mate", schedule_cx)
    toolbox.register("mutate", schedule_mut)
    toolbox.register("select", tools.selTournament, tournsize=tour_size)

    pop = toolbox.population(n=pop_size)
    # References to our population are as follows:
    # pop[Individual][TimeSegment][Court][TeamSide]
    # eg pop[4][0][0][0] would reference the 5th individual schedule, first time
    # slot, first court, and the first team scheduled for that court.
    generate_schedule(pop, teams_to_schedule, conflicting_teams)
    print("Initial population successfully generated")
    print("Population Size: ", pop_size, "   Number of Generations: ",
          num_of_gens)
    print("Mutation Prob: ", mutpb, "   Crossover Prob: ", cxpb)
    print("BEGIN GENETIC ALGORITHM")
    # print("Member 1: \n", pop[0])
    hof = tools.HallOfFame(1)

    # After everything has been set, register stats and run gen algo
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   cxpb=cxpb,
                                   mutpb=mutpb,
                                   ngen=num_of_gens,
                                   stats=stats,
                                   halloffame=hof,
                                   verbose=True)

    print("Best last iteration: \n", hof)
    print("Level and rank: \n", lvl_and_rank)
    # print("Our individual teams: ")
    # print(teams_to_schedule)
    return pop, log, hof
示例#16
0
def main(celda, tbs, sinr, capacidad_data):
    print("Carga ok ...ajustando variables")
    tbs_run1 = tbs[celda]
    tbs_run1t = np.transpose(tbs_run1)
    sinr_run1 = sinr[celda]
    sinr_run1t = np.transpose(sinr_run1)

    #otras variables
    maximo = np.max(sinr_run1t[celda])
    minimo = np.min(sinr_run1t[celda])
    #
    multiplicador = (sinr_run1t[celda] - minimo) / maximo
    #
    throughput30 = np.zeros(30)

    #

    def eval_Throughput(individual):
        for cromosoma in range(30):
            throughput30[cromosoma] = int(
                capacidad_data.iloc[int(tbs_run1t[celda][cromosoma]) + 1,
                                    individual[cromosoma]])

        for cromosoma in range(30):

            throughput30[cromosoma] = throughput30[cromosoma] * (
                multiplicador[cromosoma])

        throughput = np.sum(throughput30)
        recursos = np.sum(individual)
        thinit = throughput
        #maximo, 0,8 y 0.45
        if recursos > 100:
            throughput = throughput - 0.81 * throughput
        if np.count_nonzero(individual) != 29:
            throughput = throughput - 0.56 * throughput
        return throughput,

    #
    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual",
                   array.array,
                   typecode='i',
                   fitness=creator.FitnessMax)
    toolbox = base.Toolbox()
    toolbox.register("indices", random.choices, range(9), k=equipos_terminales)

    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.indices)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("mate", tools.cxOrdered)
    toolbox.register("mutate", tools.mutShuffleIndexes,
                     indpb=0.09)  #antes 0.05
    toolbox.register("select", tools.selTournament, tournsize=5)
    #
    toolbox.register("evaluate", eval_Throughput)

    #start with a population of 300 individuals
    pop = toolbox.population(n=300)
    #only save the very best one
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    # use one of the built in GA's with a probablilty of mating of 0.7
    # a probability of mutating 0.2 and 140 generations.
    algorithms.eaSimple(pop,
                        toolbox,
                        0.7,
                        0.2,
                        200,
                        stats=stats,
                        halloffame=hof)  #antes 0.1

    return pop, stats, hof
示例#17
0
	def evolveSteady(self):
		"""
		A method to evolve a species of boid using a 
		steady-state genetic algorithm. Method authored by 
		Gianni Orlando. Edited by Ryan McArdle.

		:returns: the evolved species, and its fitness
		"""
		name = "Steady"

		self.prepareEvolution()

		self.eval_limit = self.eval_limit_steady

		# Initialize fitness goal and individual type
		creator.create("FitnessMax", base.Fitness, weights=(1.0,))
		creator.create("Individual", array.array, typecode='d',fitness=creator.FitnessMax)

        # Initialize individuals, populations, and evolution operators
		toolbox = base.Toolbox()
		toolbox.register("individual", self.initBoids, creator.Individual, *self.parameter_bounds)
		toolbox.register("population", tools.initRepeat, list, toolbox.individual)
		toolbox.register("evaluate", self.boidFitness)
		toolbox.register("mate", tools.cxOnePoint)
		toolbox.register("mutate", tools.mutGaussian, indpb=0.5, mu=25.5, sigma=12.5) # 50% chance for each value to mutate
		toolbox.register("mutate2", tools.mutShuffleIndexes, indpb=0.5) # 50% chance for each value to mutate
		toolbox.register("select", tools.selTournament, tournsize= 5)

		toolbox.decorate('mate', self.checkBounds(self.parameter_bounds))
		toolbox.decorate('mutate', self.checkBounds(self.parameter_bounds))
		toolbox.decorate('mutate2', self.checkBounds(self.parameter_bounds))

		stats = tools.Statistics(lambda ind: ind.fitness.values)
		stats.register("avg", np.mean)
		stats.register("std", np.std)
		stats.register("min", np.min)
		stats.register("max", np.max)
		self.logbook = tools.Logbook()
		self.logbook.header = 'gen','evals','min','max','avg','std'
		hof = tools.HallOfFame(1)

		pop = toolbox.population(n=self.mu)

        # Evaluate the entire population
		seed = random.randint(1,1e10)
		## Evaluate the entire population for fitness in parallel
		if __name__=="__main__":
			with mp.Pool(self.num_processes) as pool:
				fitnesses = pool.starmap(self.boidFitness, [(boid.tolist(),seed,self.current_evals,self.eval_limit) for boid in pop])

		for ind, fit in zip(pop, fitnesses):
			ind.fitness.values = fit,

        # Extracting all the fitnesses of 
		fits = [ind.fitness.values[0] for ind in pop]

        # Variable keeping track of the number of generations
		g = 0

		## Record the initial population
		self.current_evals += len(pop)
		record = stats.compile(pop)
		logbook = tools.Logbook()
		logbook.header = 'gen','evals','min','max','avg','std'
		logbook.record(gen=0, evals=self.current_evals, **record)
		print(logbook.stream)
		hof.update(pop)
		print(hof[0])
		print(hof[0].fitness.values[0])
		
		# Begin the evolution
		while hof[0].fitness.values[0] < 1.0 and self.current_evals < self.eval_limit:
            # A new generation
			g = g + 1

             # Gather all the fitnesses in one list and print the stats
			fits = [ind.fitness.values for ind in pop]

            # Select the next generation individuals
			offspring = pop
			bestIndv = tools.selBest(offspring,k=2)
			worstIndv = tools.selWorst(offspring,k=2)
            # Clone the selected individuals
			offspring = list(map(toolbox.clone, offspring))

            # Apply crossover the two individuals (tournmanet 
            # selection)
			parents = toolbox.select(offspring,2)
			parent1, parent2 = parents[0], parents[1]
			replace1 = offspring[offspring.index(worstIndv[0])]
			replace2 = offspring[offspring.index(worstIndv[1])]
			for child1, child2 in zip([parent1], [parent2]):
				if random.random() < self.CXPB:
					toolbox.mate(child1, child2)
					if random.random() < self.MUTPB:
						toolbox.mutate(child1)
						del child1.fitness.values
					if random.random() < self.MUTPB:
						toolbox.mutate(child2)
						del child2.fitness.values
					if random.random() < self.MUTPB:
						toolbox.mutate2(child1)
						del child1.fitness.values
					if random.random() < self.MUTPB:
						toolbox.mutate2(child2)
						del child1.fitness.values
					offspring[offspring.index(replace1)] = parent1
					offspring[offspring.index(replace2)] = parent2

			pop[:] = offspring

			# Evaluate the population with invalid fitnesses
			invalid_ind = [ind for ind in pop if not ind.fitness.valid]
			seed = random.randint(1,1e10)
			if __name__=="__main__":
				with mp.Pool(self.num_processes) as pool:
					fitnesses = pool.starmap(self.boidFitness, [(boid.tolist(),seed,self.current_evals,self.eval_limit) for boid in invalid_ind])

			## Apply found fitness values
			for ind, fit in zip(invalid_ind, fitnesses):
				ind.fitness.values = fit,

            # Extracting all the fitnesses of 
			fits = [ind.fitness.values[0] for ind in pop]

			##################################################
			## Record the new generation
			hof.update(pop)
			record = stats.compile(pop)
			self.current_evals += len(invalid_ind)
			logbook.record(gen=g, evals=self.current_evals, **record)
			print(logbook.stream)
			print(hof[0])
			print(hof[0].fitness.values[0])
			#bestFit, bestDetailFit, bestFitWeight = self.boidFitness(hof[0].tolist(),seed,self.current_evals-len(invalid_ind),self.eval_limit,detail=True)
			#print(bestDetailFit)
			##################################################

		self.plotAndRecord(logbook,hof,name)

		return hof[0]
示例#18
0
文件: griewank.py 项目: yoshinobc/EA
def main():

    np.random.seed(64)
    pop = toolbox.population(n=POPNUM)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind:ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    ok_count = 0

    #pop,hof = algorithms.eaSimple(pop,toolbox,cxpb=0.5,mutpb=0.01,ngen=200,stats=stats,halloffame=hof,verbose=True)
    fitness = list(map(toolbox.evaluate,pop))

    for ind, fit in zip(pop, fitness):
        ind.fitness.values = fit

    #print("gen ","min ","max ","mean","std")
    stop_gen = 200
    for gen in range(NGEN):
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))
        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            # cross two individuals with probability CXPB
            if random.random() < CXPB:
                toolbox.mate(child1, child2)

                # fitness values of the children
                # must be recalculated later
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:

            # mutate an individual with probability MUTPB
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]

        fitness = list(map(toolbox.evaluate,invalid_ind))
        for ind, fit in zip(invalid_ind, fitness):
            ind.fitness.values = fit

        # The population is entirely replaced by the offspring
        pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        hof.update(pop)
        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x*x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5
        #print(gen  ,min(fits) ,max(fits) ,mean ,std)

        with open('griewank.txt',mode='a') as f:
            f.write(str(gen))
            f.write(" ")
            f.write(str(min(fits)))
            f.write(" ")
            f.write(str(mean))
            f.write("\n")
        """
        time 20.42633295059204
        [-3.191757675565447, 2.158503392015575e-09, 16.294035978157076, -6.375366880774475, -7.618792813179073, 1.1405896238647264e-08, 0.00027775284370455254]
        """
        if min(fits) <= np.exp(-10):
            stop_gen = gen
            ok_count = 1
            break
        
    return pop,hof,ok_count,stop_gen
示例#19
0
	def evolveMuCommaLambda(self,lambda_ratio=7):
		"""
		A method to evolve a species of boid using a 
		(Mu,Lambda) evolutionary strategy. Authored by Ryan 
		McArdle.

		:returns: the best individual found by the evolution
		"""
		
		name = "muCommaLambda"+str(lambda_ratio)

		self.prepareEvolution(lambda_ratio)

		## Create Fitness, Individuals, and Strategy variables
		creator.create("FitnessMax", base.Fitness, weights=(1.0,))
		creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMax, strategy=None)
		creator.create("Strategy", array.array, typecode="d")

		## Sets up our evolutionary approach
		toolbox = base.Toolbox()

		toolbox.register('individual', self.initBoidsStrat, creator.Individual, creator.Strategy, *self.parameter_bounds, *self.strategy_bounds)
		toolbox.register('population', tools.initRepeat, list, toolbox.individual)

		toolbox.register("mate", tools.cxESBlend, alpha=0.333)
		toolbox.register("mutate", tools.mutESLogNormal, c=1.0, indpb=1/6)
		toolbox.register('select', tools.selRandom)
		toolbox.register('evaluate', self.boidFitness)

		toolbox.decorate('mate', self.checkBounds(self.parameter_bounds))
		toolbox.decorate('mutate', self.checkBounds(self.parameter_bounds))

		toolbox.decorate('mate', self.checkStrategy(self.strategy_bounds))
		toolbox.decorate('mutate', self.checkStrategy(self.strategy_bounds))

		
		stats = tools.Statistics(lambda ind: ind.fitness.values)
		stats.register("avg", np.mean)
		stats.register("std", np.std)
		stats.register("min", np.min)
		stats.register("max", np.max)
		self.logbook = tools.Logbook()
		self.logbook.header = 'gen','evals','min','max','avg','std'
		hof = tools.HallOfFame(1)
		
		## Initializes the population with n individuals
		g = 0
		pop = toolbox.population(n=self.mu)
		seed = random.randint(1,1e10)

		## Evaluate the entire population for fitness in parallel
		if __name__=="__main__":
			with mp.Pool(self.num_processes) as pool:
				fitnesses = pool.starmap(self.boidFitness, [(boid.tolist(),seed,self.current_evals,self.eval_limit) for boid in pop])

		## Apply fitness values
		for ind, fit in zip(pop, fitnesses):
			ind.fitness.values = fit,


		## Record initial population in stats logbook
		self.current_evals += len(pop)
		record = stats.compile(pop)
		logbook = tools.Logbook()
		logbook.header = 'gen','evals','min','max','avg','std'
		logbook.record(gen=0, evals=self.current_evals, **record)
		print(logbook.stream)
		hof.update(pop)
		print(hof[0])
		print(hof[0].fitness.values[0])

		## Loop for each generation until stopping criteria
		while self.current_evals < (self.eval_limit - self.lambda_):
			g+=1
			## Select the next generation individuals
			offspring = toolbox.select(pop, self.lambda_)

			## Clone the selected individuals
			offspring = list(map(toolbox.clone, offspring))

			## Apply crossover and mutation on the offspring
			for child1, child2 in zip(offspring[::2], offspring[1::2]):
				if random.random() < self.CXPB:
					toolbox.mate(child1, child2)
					del child1.fitness.values
					del child2.fitness.values

			for mutant in offspring:
				if random.random() < self.MUTPB:
					toolbox.mutate(mutant)
					del mutant.fitness.values

			## Evaluate the individuals with an invalid fitness
			invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
			seed = random.randint(1,1e10)
			if __name__=="__main__":
				with mp.Pool(self.num_processes) as pool:
					fitnesses = pool.starmap(self.boidFitness, [(boid.tolist(),seed,self.current_evals,self.eval_limit) for boid in invalid_ind])

			## Apply found fitness values
			for ind, fit in zip(invalid_ind, fitnesses):
				ind.fitness.values = fit,


			##### MuPlusLambda
			### New generation
			#new_gen = list(map(toolbox.clone, [*pop,*offspring]))

			### Sort the new generation by fitness
			#new_gen.sort(key=lambda x: x.fitness.values[0])

			### Replace population with top mu of new generation
			#pop = list(map(toolbox.clone, new_gen[-self.mu:]))
			#####


			#### MuCommaLambda
			## Sort the new generation by fitness
			offspring.sort(key=lambda x: x.fitness.values[0])

			## Replace population with top mu of new generation
			pop = list(map(toolbox.clone, offspring[-self.mu:]))
			####

			##################################################
			## Record the new generation
			hof.update(pop)
			record = stats.compile(pop)
			self.current_evals += len(invalid_ind)
			logbook.record(gen=g, evals=self.current_evals, **record)
			print(logbook.stream)
			print(hof[0])
			print(hof[0].fitness.values[0])
			#bestFit, bestDetailFit, bestFitWeight = self.boidFitness(hof[0].tolist(),seed,self.current_evals-len(invalid_ind),self.eval_limit,detail=True)
			#print(bestDetailFit)
			##################################################

		self.plotAndRecord(logbook,hof,name)

		return hof[0]
示例#20
0
def search_expression(input_values,
                      output_values,
                      pset,
                      max_height=50,
                      population_size=10,
                      cxpb=0.5,
                      mutpb=0.1,
                      num_evals_limit=500,
                      leading_at_0=None,
                      leading_at_inf=None,
                      hard_penalty_default_value=None,
                      include_leading_powers=False,
                      default_value=50.):
    """Searches expression using evolutionary algorithm.

  Args:
    input_values: Numpy array with shape [num_input_values]. List of input
        values to univariate function.
    output_values: Numpy array with shape [num_output_values]. List of output
        values from the univariate function.
    pset: deap.gp.PrimitiveSet.
    max_height: Integer, the max value of the height of tree.
    population_size: Integer, the size of population.
    cxpb: Float, the probability of mating two individuals.
    mutpb: Float, the probability of mutating an individual.
    num_evals_limit: Integer, the limit of the number of evaluations.
    leading_at_0: Float, desired leading power at 0.
    leading_at_inf: Float, desired leading power at inf.
    hard_penalty_default_value: Float, the default value for hard penalty.
        Default None, the individual will be evaluated by soft penalty instead
        of hard penalty.
    include_leading_powers: Boolean, whether to include leading powers in
        evaluation.
    default_value: Float, default value if leading power error is nan.

  Returns:
    individual: creator.Individual, the best individual in population.
    toolbox: deap.base.Toolbox, it contains the evolution operators.
  """
    toolbox = get_toolbox(pset, max_height)
    toolbox.register('evaluate',
                     evaluate_individual,
                     input_values=input_values,
                     output_values=output_values,
                     toolbox=toolbox,
                     leading_at_0=leading_at_0,
                     leading_at_inf=leading_at_inf,
                     hard_penalty_default_value=hard_penalty_default_value,
                     include_leading_powers=include_leading_powers,
                     default_value=default_value)
    population = toolbox.population(n=population_size)
    halloffame = tools.HallOfFame(1)

    evolutionary_algorithm_with_num_evals_limit(
        population=population,
        toolbox=toolbox,
        cxpb=cxpb,
        mutpb=mutpb,
        num_evals_limit=num_evals_limit,
        halloffame=halloffame)
    return halloffame[0], toolbox
示例#21
0
def main(args):
    global SUPERVISED, NO_SUB_IND, toolbox

    #For elitism, at least the best individual
    #is recorded
    NO_ELI = (int)(POP_SIZE * GP_ELI)
    if NO_ELI < 10:
        NO_ELI = 10

    filename = "iteration"+str(args[0])+".txt"
    file = open(filename,'w+')

    run_index = int(args[0])
    supervised = int(args[1])

    if supervised == 0:
        SUPERVISED = False
    else:
        SUPERVISED = True

    #setWeight()

    NO_SUB_IND = int(args[2])

    toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.sub_individual, n=NO_SUB_IND)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual, n=POP_SIZE)

    random.seed(1617**2*run_index)

    #FitnessFunction.setWeight(src_feature=Core.src_feature, src_label=Core.src_label,
    #                          tarU_feature=Core.tarU_feature, tarU_label=Core.tarU_soft_label)
    time_start = time.clock()
    pop = toolbox.population()
    hof = tools.HallOfFame(NO_ELI)

    #evaluate the population
    fitness = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitness):
        ind.fitness.values = fit

    #Update the HoF
    hof.update(pop)

    towrite = "Supervised: %r \n" \
              "Number of sub tree: %d\n" \
              "Source weight: %f\n" \
              "Diff source and target weight: %f\n" \
              "Target weight: %g \n" % (SUPERVISED, NO_SUB_IND,
                                        FitnessFunction.srcWeight,
                                        FitnessFunction.margWeight,
                                        FitnessFunction.tarWeight)

    for gen in range(NGEN):
        print(gen)

        towrite = towrite + ("----Generation %i -----\n" %gen)

        #Select the next generation individuals
        #Leave space for elitism
        offspringS = toolbox.select(pop, len(pop)-NO_ELI)
        # Clone the selected individuals
        offspring = [toolbox.clone(ind) for ind in offspringS]

        #go through each individual
        for i in range(1, len(offspring), 2):
            if random.random() < GP_CXPB:
                #perform crossover for all the features
                first = offspring[i-1]
                second = offspring[i]
                first, second = crossoverEach(first, second)
                del first.fitness.values
                del second.fitness.values

        for i in range(len(offspring)):
            if random.random() < GP_MUTBP:
                parent = pop[i]
                for j in range(1, len(parent)):
                    if random.random() < GP_MUTSUB:
                        parent[j] = toolbox.mutate(parent[j])
                del parent.fitness.values

        #Now put HOF back to offspring
        for ind in hof:
            offspring.append(toolbox.clone(ind))

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        #Now update the hof for the next iteration
        hof.update(offspring)

        pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x*x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5

        towrite = towrite + ("  Min %s\n" % min(fits))
        towrite = towrite + ("  Max %s\n" % max(fits))
        towrite = towrite + ("  Avg %s\n" % mean)
        towrite = towrite + ("  Std %s\n" % std)

        bestInd = hof[0]

        funcs = [toolbox.compile(expr=tree) for tree in bestInd]
        src_feature = GPUtility.buildNewFeatures(Core.src_feature, funcs)
        tarU_feature = GPUtility.buildNewFeatures(Core.tarU_feature, funcs)
        tarL_feature = GPUtility.buildNewFeatures(Core.tarL_feature, funcs)

        if SUPERVISED:
            src_err, diff_marg, tar_err = FitnessFunction.domain_differece(src_feature=src_feature, src_label=Core.src_label,
                                                                           classifier=Core.classifier,
                                                                           tarU_feature=tarU_feature, tarU_soft_label=Core.tarU_soft_label,
                                                                           tarL_feature=tarL_feature, tarL_label=Core.tarL_label)
        else:
            src_err, diff_marg, tar_err = FitnessFunction.domain_differece(src_feature=src_feature, src_label=Core.src_label,
                                                                           classifier=Core.classifier,
                                                                           tarU_feature=tarU_feature, tarU_soft_label=Core.tarU_soft_label)

        towrite = towrite + ("  Source Error: %f \n  Diff Marg: %f \n  Target Error: %f \n" %(src_err, diff_marg, tar_err))

        acc = 1.0 - FitnessFunction.classification_error(training_feature=src_feature, training_label=Core.src_label,
                                                         classifier=Core.classifier,
                                                         testing_feature=tarU_feature, testing_label=Core.tarU_label)
        towrite = towrite + ("  Accuracy on unlabel target: "+str(acc) + "\n")

        # Update the pseudo label and weight
        Core.classifier.fit(src_feature, Core.src_label)
        Core.tarU_soft_label = Core.classifier.predict(tarU_feature)
        #FitnessFunction.setWeight(Core.src_feature, Core.src_label, Core.tarU_feature, Core.tarU_SoftLabel)

    time_elapsed = (time.clock() - time_start)

    #process the result
    bestInd = hof[0]
    towrite = towrite + "----Final -----\n"

    funcs = [toolbox.compile(expr=tree) for tree in bestInd]
    src_feature = GPUtility.buildNewFeatures(Core.src_feature, funcs)
    tarU_feature = GPUtility.buildNewFeatures(Core.tarU_feature, funcs)
    acc = 1.0 - FitnessFunction.classification_error(training_feature=src_feature, training_label=Core.src_label,
                                                     classifier=Core.classifier,
                                                     testing_feature=tarU_feature, testing_label=Core.tarU_label)
    towrite = towrite + ("Accuracy on the target (TL): %f\n" % acc)
    towrite = towrite + "Accuracy on the target (No TL): %f\n" % (
                    1.0 - FitnessFunction.classification_error(training_feature=Core.src_feature, training_label=Core.src_label,
                                                               classifier=Core.classifier,
                                                               testing_feature=Core.tarU_feature, testing_label=Core.tarU_label))

    towrite = towrite + ("Computation time: %f\n" % time_elapsed)
    towrite = towrite + ("Number of features: %d\n" % len(bestInd))

    file.write(towrite)
    file.close()
def main():
    # Initialize dictionary to keep track of Hall Of Fame (HOF) data
    hofData = {}
    for oneName in NAME_TO_FACTOR:
        hofData[oneName] = []
    # Keep track of HOF over time, so that we can evaluate efficiency over different pT ranges
    bestIndividuals = []
    # Data for plotting efficiency as a function of pT
    pTCutData = {}
    for i, cut in enumerate(PT_CUTS):
        cutRange = 0, 100
        if i == len(PT_CUTS) - 1:
            cutRange = cut, 12345
        else:
            cutRange = cut, PT_CUTS[i + 1]
        pTCutData[cutRange] = []
    # Objects to keep track of seeding algorithm metrics for HOF
    scores = []
    efficiencies = []
    fakeRateList = []
    dupRateList = []
    # Objects that will compile the data for population graphs
    logbook = tools.Logbook()
    popData = {}
    popData["Score"] = tools.Statistics(key=lambda ind: ind.fitness.values[0])
    popData["Efficiency"] = tools.Statistics(
        key=lambda ind: ind.fitness.values[1])
    popData["FakeRate"] = tools.Statistics(
        key=lambda ind: ind.fitness.values[2])
    popData["DuplicateRate"] = tools.Statistics(
        key=lambda ind: ind.fitness.values[3])
    for oneName in NAME_TO_FACTOR:
        popData[oneName] = tools.Statistics(
            key=lambda ind: ind[NAME_TO_INDEX[oneName]])
    # mstats = tools.MultiStatistics(popData)
    # mstats = tools.MultiStatistics(Score=popData["Score"], Efficiency=popData["Efficiency"], FakeRate=popData["FakeRate"], DuplicateRate=popData["DuplicateRate"], sigmaScattering=popData["sigmaScattering"],
    #                                 maxSeedsPerSpM=popData["maxSeedsPerSpM"], maxPt=popData["maxPt"], impactMax=popData["impactMax"], deltaRMax=popData["deltaRMax"], deltaRMin=popData["deltaRMin"], radLengthPerSeed=popData["radLengthPerSeed"])
    stats_score = tools.Statistics(key=lambda ind: ind.fitness.values[0])
    stats_eff = tools.Statistics(key=lambda ind: ind.fitness.values[1])
    stats_fake = tools.Statistics(key=lambda ind: ind.fitness.values[2])
    stats_dup = tools.Statistics(key=lambda ind: ind.fitness.values[3])
    stats_sigmaScattering = tools.Statistics(
        key=lambda ind: ind[NAME_TO_INDEX["sigmaScattering"]])
    stats_maxSeedsPerSPM = tools.Statistics(
        key=lambda ind: ind[NAME_TO_INDEX["maxSeedsPerSpM"]])
    stats_maxPt = tools.Statistics(key=lambda ind: ind[NAME_TO_INDEX["maxPt"]])
    stats_impactMax = tools.Statistics(
        key=lambda ind: ind[NAME_TO_INDEX["impactMax"]])
    stats_deltaRMin = tools.Statistics(
        key=lambda ind: ind[NAME_TO_INDEX["deltaRMin"]])
    stats_deltaRMax = tools.Statistics(
        key=lambda ind: ind[NAME_TO_INDEX["deltaRMax"]])
    stats_radLengthPerSeed = tools.Statistics(
        key=lambda ind: ind[NAME_TO_INDEX["radLengthPerSeed"]])
    mstats = tools.MultiStatistics(Score=stats_score,
                                   Efficiency=stats_eff,
                                   FakeRate=stats_fake,
                                   DuplicateRate=stats_dup,
                                   sigmaScattering=stats_sigmaScattering,
                                   maxSeedsPerSpM=stats_maxSeedsPerSPM,
                                   maxPt=stats_maxPt,
                                   impactMax=stats_impactMax,
                                   deltaRMax=stats_deltaRMax,
                                   deltaRMin=stats_deltaRMin,
                                   radLengthPerSeed=stats_radLengthPerSeed)
    mstats.register("avg", np.mean)
    mstats.register("std", np.std)
    mstats.register("min", np.min)
    mstats.register("max", np.max)

    hof = tools.HallOfFame(1)
    # initialize NPOP copies of initial guess
    pop = toolbox.population_guess()
    indPrint(pop[0])
    firstFit = toolbox.evaluate(pop[0])
    print(firstFit)
    for ind in pop:
        ind.fitness.values = firstFit
    g = 0
    bestEff = 0
    bestDup = 20
    bestFake = 100
    # Stop condition is that the efficiency is greater than 99.4, duplicate % is less than 60, and fakerate is less than 10%
    # or the number of generations reaches NGEN (100).
    while g < NGEN and ((bestEff < 99.4) or bestDup > 60 or bestFake > 10):
        g = g + 1
        print("-- Generation %i --" % g)
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))
        # Apply mutation on the offspring
        maxMutants = 16
        mutantsCount = 1
        numParamsDiffCount = []  # for debugging
        for i, mutant in enumerate(offspring):
            prevInd = []
            for j in range(len(mutant)):
                prevInd.append(mutant[j])
            if mutantsCount == maxMutants:
                break  # I only have 16 cores so evaluating more individuals will slow down
            if random.random() < MUTPB:
                mutantsCount += 1
                toolbox.mutate(mutant)
                numMutatedParams = 0
                for j in range(len(mutant)):  # for debugging
                    if prevInd[j] != mutant[j]:
                        numMutatedParams += 1
                del mutant.fitness.values
                numParamsDiffCount.append(numMutatedParams)
        print(
            f"List of # of mutated params in each individual: {numParamsDiffCount}"
        )
        # Evaluate the individuals with an invalid fitness (the mutated individuals)
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        print(f"Evaluating {len(invalid_ind)} individuals...")
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        # Print and store data
        toPrint = 1
        printCounter = 0
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit
            if (printCounter < 1 and fit[1] == -1):
                print("This ind broke the seeding algo")
                indPrint(
                    ind
                )  # print first toPrint individual(s) that cause seedingalgo to break
                printCounter += 1

        pop[:] = offspring
        hof.update(pop)
        # Gather all the fitnesses in one list and print the stats
        scoreList = [ind.fitness.values[0] for ind in pop]
        effs = [ind.fitness.values[1] for ind in pop]
        fakeRates = [ind.fitness.values[2] for ind in pop]
        dupRates = [ind.fitness.values[3] for ind in pop]
        length = len(pop)
        mean = sum(effs) / length
        print("Efficiency:")
        printStats(effs, length, "Percent")
        print("Duplicate Rate:")
        printStats(dupRates, length, "Percent")
        print("Fake Rate:")
        printStats(fakeRates, length, "Percent")
        print("The best one so far:", end=" ")
        # record data for analyzing best individual
        goodOne = hof[0]
        bestIndividuals.append(goodOne)
        for oneName in hofData:
            paramVal = goodOne[
                NAME_TO_INDEX[oneName]] * NAME_TO_FACTOR[oneName]
            if oneName == "maxSeedsPerSpM":
                hofData[oneName].append(int(paramVal))
            else:
                hofData[oneName].append(paramVal)
        scores.append(goodOne.fitness.values[0])
        bestEff = goodOne.fitness.values[1]
        efficiencies.append(bestEff)
        bestFake = goodOne.fitness.values[2]
        fakeRateList.append(bestFake)
        bestDup = goodOne.fitness.values[3]
        dupRateList.append(bestDup)
        indPrint(goodOne)
        print("Best score (Score, efficiency, fakeRate, dupRate):", end=" ")
        print(goodOne.fitness.values)
        # record data for analyzing the population
        logbook.record(gen=g, **mstats.compile(pop))
    # record efficiency over different pT ranges
    for cutRange in pTCutData:
        cutRangeInputs = []
        for goodOne in bestIndividuals:
            names, params = createNamesAndParams(goodOne)
            names.append("fltPrtPtMin")
            params.append(cutRange[0])
            names.append("fltPrtPtMax")
            params.append(cutRange[1])
            args = paramsToInput(params, names)
            cutRangeInputs.append(args)
        print(f"Evaluating pT range {cutRange}")
        avgScores = toolbox.map(executeAlg, cutRangeInputs)
        for avgScore in avgScores:
            pTCutData[cutRange].append(avgScore["efficiency"])
    plotPtRange(pTCutData)
    # Make plots for the population
    for oneName in NAME_TO_FACTOR:
        #print("Length of data is " + str(len(logbook.chapters[oneName].select("max"))))
        plotLogbook("Score", oneName, logbook)
        plotLogbook("Efficiency", oneName, logbook)
        plotLogbook("DuplicateRate", oneName, logbook)
        plotLogbook("FakeRate", oneName, logbook)

    # Make plots for the best individual
    for oneName in hofData:
        plotHOF("Score", scores, oneName, hofData[oneName])
        plotHOF("Efficiency", efficiencies, oneName, hofData[oneName])
        plotHOF("DuplicateRate", dupRateList, oneName, hofData[oneName])
        plotHOF("FakeRate", fakeRateList, oneName, hofData[oneName])
    # Plot the score, efficiency, dupliceate rate and fake rate all here
    plotScores(efficiencies, fakeRateList, dupRateList)
    print(f"Wrote plots to {plotDirectory}")
    return logbook, hof
def main():

    with open('data.csv') as csvfile:
        readCSV = csv.reader(csvfile, delimiter=',')
        for row in readCSV:

            nCheckboard = int(row[0])
            mCheckboard = int(row[1])
            sizesel = int(row[2])
            populSize = int(row[3])
            numgen = int(row[4])

            # Get the cardinalities from the second line of data.csv

            row = next(readCSV)
            for line in range(len(row)):
                cardinalities.append(int(row[line]))
            print(cardinalities)

    # Dimensions of the checkboard which determine the size of the individual

    numberOfVariables = nCheckboard * mCheckboard

    creator.create("FitnessMax", base.Fitness, weights=(1.0, ))
    creator.create("Individual",
                   array.array,
                   typecode='b',
                   fitness=creator.FitnessMax)

    toolbox = base.Toolbox()

    # Structure initializers
    toolbox.register("individual", initRepeatWithCardinalities,
                     creator.Individual, randomUnderCardinality,
                     int(nCheckboard) * int(mCheckboard))
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    toolbox.register("evaluate",
                     bd.evalCheckboardNeighbours,
                     nCB=nCheckboard,
                     mCB=nCheckboard)
    toolbox.register("select", tools.selBest)

    random.seed(64)

    pop = toolbox.population(n=populSize)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    pop, log = ad.treeEDA(pop,
                          toolbox,
                          sizesel,
                          cardinalities,
                          ngen=numgen,
                          stats=stats,
                          halloffame=hof,
                          vbse=True)

    return pop, log, hof
示例#24
0
creator.create("FitnessMax", base.Fitness, weights=(-1.0, ))
creator.create("Individual", list, fitness=creator.FitnessMax)

toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 0, 4)
toolbox.register("individual", tools.initRepeat, creator.Individual,
                 toolbox.attr_bool, MAX_MOVES)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", evalMaze)
toolbox.register("mutate", tools.mutUniformInt, indpb=0.05, low=0, up=4)
toolbox.register("mate", tools.cxOnePoint)
toolbox.register("select", tools.selTournament, tournsize=3)

pop = toolbox.population(n=500)
hof1 = tools.HallOfFame(1)

pop, log = algorithms.eaSimple(pop,
                               toolbox,
                               cxpb=0.50,
                               mutpb=0.25,
                               ngen=50,
                               halloffame=hof1,
                               verbose=False)

first_agent = hof1[0]
print(hof1[0])

pop2 = toolbox.population(n=500)
hof2 = tools.HallOfFame(1)
示例#25
0
def main(verbose=True):
    NRESTARTS = 10  # Initialization + 9 I-POP restarts
    SIGMA0 = 2.0    # 1/5th of the domain [-5 5]

    toolbox = base.Toolbox()
    toolbox.register("evaluate", benchmarks.rastrigin)

    halloffame = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)
    
    logbooks = list()
    
    nsmallpopruns = 0
    smallbudget = list()
    largebudget = list()
    lambda0 = 4 + int(3 * numpy.log(N))
    regime = 1
    i = 0

    while i < (NRESTARTS + nsmallpopruns):
        # The first regime is enforced on the first and last restart
        # The second regime is run if its allocated budget is smaller than the allocated
        # large population regime budget
        if i > 0 and i < (NRESTARTS + nsmallpopruns) - 1 and sum(smallbudget) < sum(largebudget):
            lambda_ = int(lambda0 * (0.5 * (2**(i - nsmallpopruns) * lambda0) / lambda0)**(numpy.random.rand()**2))
            sigma = 2 * 10**(-2 * numpy.random.rand())
            nsmallpopruns += 1
            regime = 2
            smallbudget += [0]
        else:
            lambda_ = 2**(i - nsmallpopruns) * lambda0
            sigma = SIGMA0
            regime = 1
            largebudget += [0]
        
        t = 0
        
        # Set the termination criterion constants
        if regime == 1:
            MAXITER = 100 + 50 * (N + 3)**2 / numpy.sqrt(lambda_)
        elif regime == 2:
            MAXITER = 0.5 * largebudget[-1] / lambda_
        TOLHISTFUN = 10**-12
        TOLHISTFUN_ITER = 10 + int(numpy.ceil(30. * N / lambda_))
        EQUALFUNVALS = 1. / 3.
        EQUALFUNVALS_K = int(numpy.ceil(0.1 + lambda_ / 4.))
        TOLX = 10**-12
        TOLUPSIGMA = 10**20
        CONDITIONCOV = 10**14
        STAGNATION_ITER = int(numpy.ceil(0.2 * t + 120 + 30. * N / lambda_))
        NOEFFECTAXIS_INDEX = t % N

        equalfunvalues = list()
        bestvalues = list()
        medianvalues = list()
        mins = deque(maxlen=TOLHISTFUN_ITER)

        # We start with a centroid in [-4, 4]**D
        strategy = cma.Strategy(centroid=numpy.random.uniform(-4, 4, N), sigma=sigma, lambda_=lambda_)
        toolbox.register("generate", strategy.generate, creator.Individual)
        toolbox.register("update", strategy.update)
        
        logbooks.append(tools.Logbook())
        logbooks[-1].header = "gen", "evals", "restart", "regime", "std", "min", "avg", "max"
        
        conditions = {"MaxIter" : False, "TolHistFun" : False, "EqualFunVals" : False,
                      "TolX" : False, "TolUpSigma" : False, "Stagnation" : False,
                      "ConditionCov" : False, "NoEffectAxis" : False, "NoEffectCoor" : False}

        # Run the current regime until one of the following is true:
        ## Note that the algorithm won't stop by itself on the optimum (0.0 on rastrigin).
        while not any(conditions.values()):
            # Generate a new population
            population = toolbox.generate()
            
            # Evaluate the individuals
            fitnesses = toolbox.map(toolbox.evaluate, population)
            for ind, fit in zip(population, fitnesses):
                ind.fitness.values = fit
            
            halloffame.update(population)
            record = stats.compile(population)
            logbooks[-1].record(gen=t, evals=lambda_, restart=i, regime=regime, **record)
            if verbose:
                print((logbooks[-1].stream))

            # Update the strategy with the evaluated individuals
            toolbox.update(population)
                
            # Count the number of times the k'th best solution is equal to the best solution
            # At this point the population is sorted (method update)
            if population[-1].fitness == population[-EQUALFUNVALS_K].fitness:
                equalfunvalues.append(1)
            
            # Log the best and median value of this population
            bestvalues.append(population[-1].fitness.values)
            medianvalues.append(population[int(round(len(population)/2.))].fitness.values)

            # First run does not count into the budget
            if regime == 1 and i > 0:
                largebudget[-1] += lambda_
            elif regime == 2:
                smallbudget[-1] += lambda_

            t += 1
            STAGNATION_ITER = int(numpy.ceil(0.2 * t + 120 + 30. * N / lambda_))
            NOEFFECTAXIS_INDEX = t % N

            if t >= MAXITER:
                # The maximum number of iteration per CMA-ES ran
                conditions["MaxIter"] = True
            
            mins.append(record["min"])
            if (len(mins) == mins.maxlen) and max(mins) - min(mins) < TOLHISTFUN:
                # The range of the best values is smaller than the threshold
                conditions["TolHistFun"] = True

            if t > N and sum(equalfunvalues[-N:]) / float(N) > EQUALFUNVALS:
                # In 1/3rd of the last N iterations the best and k'th best solutions are equal
                conditions["EqualFunVals"] = True

            if all(strategy.pc < TOLX) and all(numpy.sqrt(numpy.diag(strategy.C)) < TOLX):
                # All components of pc and sqrt(diag(C)) are smaller than the threshold
                conditions["TolX"] = True
            
            # Need to transfor strategy.diagD[-1]**2 from pyp/numpy.float64 to python
            # float to avoid OverflowError
            if strategy.sigma / sigma > float(strategy.diagD[-1]**2) * TOLUPSIGMA:
                # The sigma ratio is bigger than a threshold
                conditions["TolUpSigma"] = True
            
            if len(bestvalues) > STAGNATION_ITER and len(medianvalues) > STAGNATION_ITER and \
               numpy.median(bestvalues[-20:]) >= numpy.median(bestvalues[-STAGNATION_ITER:-STAGNATION_ITER + 20]) and \
               numpy.median(medianvalues[-20:]) >= numpy.median(medianvalues[-STAGNATION_ITER:-STAGNATION_ITER + 20]):
                # Stagnation occured
                conditions["Stagnation"] = True

            if strategy.cond > 10**14:
                # The condition number is bigger than a threshold
                conditions["ConditionCov"] = True

            if all(strategy.centroid == strategy.centroid + 0.1 * strategy.sigma * strategy.diagD[-NOEFFECTAXIS_INDEX] * strategy.B[-NOEFFECTAXIS_INDEX]):
                # The coordinate axis std is too low
                conditions["NoEffectAxis"] = True

            if any(strategy.centroid == strategy.centroid + 0.2 * strategy.sigma * numpy.diag(strategy.C)):
                # The main axis std has no effect
                conditions["NoEffectCoor"] = True

        stop_causes = [k for k, v in list(conditions.items()) if v]
        print(("Stopped because of condition%s %s" % ((":" if len(stop_causes) == 1 else "s:"), ",".join(stop_causes))))
        i += 1

    return halloffame
示例#26
0
def main():
    random.seed(1)

    pop = toolbox.population(n=250)
    hof = tools.HallOfFame(1)
    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    #pop,log = algorithms.eaSimple(pop,toolbox,cxpb=0.5,mutpb=0.2,ngen=300,stats=stats,halloffame=hof,verbose=True)
    for i in range(NGEN):
        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))

        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))

        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):

            # cross two individuals with probability CXPB
            if random.random() < CXPB:
                toolbox.mate(child1, child2)

                # fitness values of the children
                # must be recalculated later
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:

            # mutate an individual with probability MUTPB
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]

        fitness = list(map(toolbox.evaluate, invalid_ind))
        for ind, fit in zip(invalid_ind, fitness):
            ind.fitness.values = fit

        # The population is entirely replaced by the offspring
        pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        hof.update(pop)
        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5

        #print("gen:",i,"  Min %s" % min(fits),"  Max %s" % max(fits),"  Avg %s" % mean,"  Std %s" % std)
        print(i, max(fits), mean)

    with open('CartPole-v2.txt', mode='a') as f:
        f.write(str(i))
        f.write(" ")
        f.write(str(max(fits)))
        f.write(" ")
        f.write(str(mean))
        f.write("\n")
    """
    if i%50 == 0:
        with open('gen'+str(i)+'checkpoint_maxstepsliner_hardcore','wb') as fp:
            pickle.dump(pop,fp)
    """
    return pop, log, hof
示例#27
0
    return toolbox

if __name__ == "__main__":
    # Problem size
    num_individuals = 10
    num_generations = 125

    # Create a strategy using CMA-ES algorithm
    strategy = cma.Strategy(centroid=[5.0]*num_individuals, sigma=5.0, 
            lambda_=20*num_individuals)

    # Create toolbox based on the above strategy
    toolbox = create_toolbox(strategy)

    # Create hall of fame object
    hall_of_fame = tools.HallOfFame(1)

    # Register the relevant stats
    stats = tools.Statistics(lambda x: x.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    
    # Objects that will compile the data
    sigma = np.ndarray((num_generations, 1))
    axis_ratio = np.ndarray((num_generations, 1))
    diagD = np.ndarray((num_generations, num_individuals))
def create_new_features(X, Y, n=4):
    def hellinger(individual, X, Y):
        func = toolbox.compile(expr=individual)

        X0 = X[Y == -1]
        X1 = X[Y == 1]

        genX0 = np.array([func(*X0[i]) for i in range(X0.shape[0])])
        genX1 = np.array([func(*X1[i]) for i in range(X1.shape[0])])

        genX = np.array([func(*X[i]) for i in range(X.shape[0])])

        X0_all = np.all(genX0[0] == genX0)
        X1_all = np.all(genX1[0] == genX1)

        # genX = np.linspace(np.min(genX), np.max(genX), 10000)

        # print(gp.PrimitiveTree(individual))
        # print(genX0)
        # print(genX1)

        if X0_all:
            k0 = np.vectorize(lambda x: 1 if x == genX0[0] else 0)
        else:
            k0 = ss.gaussian_kde(genX0)

        if X1_all:
            k1 = np.vectorize(lambda x: 1 if x == genX1[0] else 0)
        else:
            k1 = ss.gaussian_kde(genX1)

        p_x0 = k0(genX)
        p_x0 = p_x0 / np.sum(p_x0)
        p_x1 = k1(genX)
        p_x1 = p_x1 / np.sum(p_x1)
        # p_y0 = X0.shape[0] / (X0.shape[0] + X1.shape[0])
        # p_y1 = X1.shape[0] / (X0.shape[0] + X1.shape[0])
        # p_x = (p_y0 * p_x0) + (p_y1 * p_x1)

        # p_y0x = (p_x0 * p_y0) / p_x
        # p_y1x = (p_x1 * p_y1) / p_x

        # print('-'*79)
        # print(np.sum(p_x0))
        # print(np.sum(p_x1))
        # print((p_x0 * p_y0).shape)
        # print(p_x.shape)
        # print(p_x0[0])
        # print(p_x[0])
        # print(p_y0x[0])
        # print(np.sum(p_x))
        # print(np.sum(p_y0x))
        # print(np.sum(p_y1x))
        # print('-'*79)

        # dist = np.sqrt(np.sum(np.square(np.sqrt(p_y0x) - np.sqrt(p_y1x))))
        dist = np.sqrt(np.sum(np.square(np.sqrt(p_x0) - np.sqrt(p_x1))))

        return dist,

    toolbox.register('evaluate', hellinger, Y=Y, X=X)
    toolbox.register('select', tools.selTournament, tournsize=3)
    toolbox.register('mate', gp.cxOnePoint)
    toolbox.register('expr_mut', gp.genFull, min_=0, max_=2)
    toolbox.register('mutate', gp.mutUniform, expr=toolbox.expr_mut, pset=pset)

    toolbox.decorate(
        'mate', gp.staticLimit(key=operator.attrgetter('height'),
                               max_value=17))
    toolbox.decorate(
        'mutate',
        gp.staticLimit(key=operator.attrgetter('height'), max_value=17))

    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(n)

    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register('avg', np.mean)
    mstats.register('std', np.std)
    mstats.register('min', np.min)
    mstats.register('max', np.max)

    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   0.8,
                                   0.1,
                                   20,
                                   stats=mstats,
                                   halloffame=hof,
                                   verbose=True)

    features_expr = [str(gp.PrimitiveTree(ind)) for ind in hof]
    features_func = [toolbox.compile(expr=ind) for ind in hof]

    return features_expr, features_func, str(log)
示例#29
0
                               max_value=17))
    toolbox.decorate(
        "mutate",
        gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

    stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
    stats_size = tools.Statistics(len)
    mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
    mstats.register("avg", numpy.mean)
    mstats.register("std", numpy.std)
    mstats.register("min", numpy.min)
    mstats.register("max", numpy.max)

    # Run algoritm
    pop = toolbox.population(n=300)
    hof = tools.HallOfFame(5)
    pop, log = algorithms.eaSimple(pop,
                                   toolbox,
                                   0.5,
                                   0.1,
                                   1000,
                                   stats=mstats,
                                   halloffame=hof,
                                   verbose=True)

    #print(log)

    # Display solutions as function code and score
    for solution in hof:
        tree = gp.PrimitiveTree(solution)
        print('Program:', str(tree), 'Scores:', solution.fitness)
示例#30
0
    def fit(self, X, y):
        print(
            f'fit(): pop_size={self.pop_size}, n_gen={self.n_gen}, cxpb={self.cxpb}, mutpb={self.mutpb},',
            f'n_sim={self.n_sim}, max_time={self.max_time}')

        # The primitives of the tree and their arities.
        pset = gp.PrimitiveSet("main", 4, prefix='x')
        pset.renameArguments(x0='dz', x1='temp', x2='hum', x3='wind')
        pset.addPrimitive(operator.add, 2)
        pset.addPrimitive(operator.sub, 2)
        pset.addPrimitive(operator.mul, 2)
        pset.addPrimitive(protectedDiv, 2)
        pset.addPrimitive(operator.neg, 1)
        pset.addPrimitive(math.cos, 1)
        pset.addPrimitive(math.sin, 1)
        # Issue: log fails when passed negative numbers.
        # pset.addPrimitive(math.log, 1)
        # Ephemeral constants randomly generate constants to be inserted into trees
        pset.addEphemeralConstant("randunif", ephemeral_uniform)
        pset.addEphemeralConstant("randnorm", ephemeral_normal)

        # HACK: sklearn.model_selection.GridSearchCV calls fit multiple times in parallel.
        # creator complains when creating the same class multiple times.
        # checking to see if the class exists already, while addressing race conditions, fixes the problem.
        if not hasattr(creator, 'FitnessMax'):
            creator.create("FitnessMax", base.Fitness, weights=(1.0, ))

        # HACK: sklearn.model_selection.GridSearchCV calls fit multiple times in parallel.
        # creator complains when creating the same class multiple times.
        # checking to see if the class exists already, while addressing race conditions, fixes the problem.
        if not hasattr(creator, 'Individual'):
            creator.create("Individual",
                           gp.PrimitiveTree,
                           fitness=creator.FitnessMax,
                           pset=pset)

        toolbox = base.Toolbox()
        toolbox.register("expr", gp.genFull, pset=pset, min_=1, max_=3)
        toolbox.register("individual", tools.initIterate, creator.Individual,
                         toolbox.expr)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)
        self.compile_context = dict(
            **pset.context)  # convert to normal objects for pickling
        self.compile_arguments = list(
            pset.arguments)  # convert to normal objects for pickling
        toolbox.register("compile",
                         compile,
                         arguments=self.compile_arguments,
                         context=self.compile_context)

        evaluate = EvaluateWildfire(x=X, y=y, model=self)
        toolbox.register("evaluate", evaluate)
        toolbox.register("select", tools.selTournament, tournsize=2)
        toolbox.register("mate", gp.cxOnePoint)
        toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
        toolbox.register("mutate",
                         gp.mutUniform,
                         expr=toolbox.expr_mut,
                         pset=pset)

        toolbox.decorate(
            "mate",
            gp.staticLimit(key=operator.attrgetter("height"), max_value=17))
        toolbox.decorate(
            "mutate",
            gp.staticLimit(key=operator.attrgetter("height"), max_value=17))

        stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
        stats_size = tools.Statistics(len)
        mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
        mstats.register("avg", np.mean)
        mstats.register("std", np.std)
        mstats.register("min", np.min)
        mstats.register("max", np.max)

        pop = toolbox.population(n=self.pop_size)
        hof = tools.HallOfFame(1)
        pop, log = algorithms.eaSimple(population=pop,
                                       toolbox=toolbox,
                                       cxpb=self.cxpb,
                                       mutpb=self.mutpb,
                                       ngen=self.n_gen,
                                       stats=mstats,
                                       halloffame=hof,
                                       verbose=False)
        tree = gp.PrimitiveTree(hof[0])
        print('tree', tree)
        print('hof', hof)
        print('log', log)
        print('fitnesses', evaluate.fitnesses)

        # save best model
        self.best_ind = {
            'expr': str(tree),
            'context': self.compile_context,
            'arguments': self.compile_arguments
        }
        # self.hof = hof
        # self.best_ind = hof[0]
        # self.best_tree = gp.PrimitiveTree(self.best_ind)
        self.log = log
        self.fitnesses = evaluate.fitnesses