Example #1
0
def run_algorithm(index, db, **settings):
    import sqlalchemy as sql
    import pandas as pd
    import json
    import pickle
    bests = []
    objectives = []
    recursive = True
    fronts = []
    setup_ea(**settings)
    algorithm = settings['algorithm']
    print(settings)
    node_data = []
    prediction_data = []
    with open(settings['datapath']) as f:
        node_data = json.load(f)

    with open(settings['predpath']) as f:
        prediction_data = json.load(f)
    #inital run:
    print(node_data[0])
    settings.update({'network_status': node_data[0]})
    settings.update({'posList': node_data[0]})
    settings.update({'prediction_data': []})
    enl = settings['energy_list']

    settings.update({'energy_list_sim': enl})
    pop, stats, best, archive, archivestats = main(archive=[],
                                                   archivestats={},
                                                   **settings)
    best = best[0]
    bests.append(best)
    front = tools.sortNondominated(pop, len(pop), True)[0]
    fronts.append(front)
    #new_lifetime, new_latency, new_received, new_energy_list, new_node_status, new_missed = evaluate(best, **settings)
    nl, l, nmissed, new_energy_list = evaluate(best, **settings)
    nsteps = min([len(node_data), len(prediction_data)]) - 2
    nsteps = min([nsteps, 20])
    try:
        for i in range(1, nsteps):
            print(f"planning reallocations: {nsteps-i} to go")
            settings.update({'energy_list_sim': new_energy_list})
            settings.update({'network_status': node_data[i]})
            settings.update({'posList': node_data[i]})
            settings.update({'prediction': prediction_data[i]})
            print(node_data[i])
            print(prediction_data[i])
            NGEN = settings['NGEN_realloc'] if 'NGEN_realloc' in settings.keys(
            ) else 10
            settings.update({'NGEN': NGEN})

            pop, stats, best, archive, archivestats = main(
                archive=archive, archivestats=archivestats, **settings)
            best = best[0]
            bests.append(best)
            front = tools.sortNondominated(pop, len(pop), True)[0]
            fronts.append(front)
    except exceptions.NetworkDeadException:
        save(index, db, bests, fronts, settings)
    save(index, db, bests, fronts, settings)
Example #2
0
	def print_best(self, pop, num_to_print, recalc=False, non_dom=False, prevent_dupes=True, print_dominates=False, print_crowding=False):
		if non_dom:
			to_print = tools.sortNondominated(pop, len(pop), first_front_only=True)[0]
		else:
			#to_print = pop[:num_to_print]
			to_print = [item for sublist in tools.sortNondominated(pop, len(pop)) for item in sublist]
			to_print = to_print[:num_to_print]


		dupe = False
		for k,ind in enumerate(sorted(to_print, key=attrgetter("fitness"), reverse=True)): #enumerate(sorted(self.pop, key=attrgetter("fitness"), reverse=True)[:num_to_print])
			if k > 0 and ind == to_print[k-1]:
				dupe = True
			else:
				if dupe:
					print
				dupe = False
			if not (prevent_dupes and dupe):
				print str(k+1)+":",
				if self.use_CGA:
					#This older version grabs a single sample from the decoder network
					#for f in model.positive_features_from_design_vector(model.construct_from_hidden(np.atleast_2d(np.array(ind)))[0].tolist()[0]).keys():

					#This new version grabs and prints the means instead.
					recipe = self.model.features_from_design_vector(self.model.construct_from_hidden(np.atleast_2d(np.array(ind)))[1].tolist()[0]).items()
					#for i in required_ingredients:
					#	print i+":REQ,",
					for f,v in sorted(recipe,key=lambda x:x[1],reverse=True):
						if v>self.print_thresh:
							if self.use_means_not_sampling:
								print f[2:]+",",
							else:
								print f[2:]+":{0:.2f},".format(v),
				else:
					for f,v in self.model.features_from_design_vector(ind).iteritems():
						if v:
							print f[2:]+",",
				print "(",
				if recalc:
					for v in self.toolbox.evaluateSeparately(ind):
						print "{0:.3f}".format(v),
				else:
					for v in ind.fitness.values:
						print "{0:.3f}".format(v),
				print ")",
				if print_crowding:
					print "Crowding: {0:.3f},".format(ind.fitness.crowding_dist),
				if print_dominates and k+1 < len(to_print):
					print "Dominates next:",ind.fitness.dominates(to_print[k+1].fitness)
				else:
					print
			else:
				print ".",
		print
Example #3
0
def pareto_ucb1(individuals, k, nr_samples=500):
    """ Implementation of the pareto Upper-Confidence-Bound1 (pareto UCB1) pseudocode [Drugan&Nowe(2013)]

    :param individuals: individuals provided by the ga (must have fitness attributes)
    :param k: The number of individuals to select.
    :return: [(mean of samples, nr_samples)] - will return the mean of the sampled values
                                               and the amount of samples taken for this dp.
    :return: [(idx, sampled_value)]
    """
    n = len(individuals)
    simulators = {individuals[i]: Simulator(individuals[i]) for i in range(n)}

    # Samples per individual
    N = {individuals[i]: 1 for i in range(n)}

    # Empirical mean vector per individual
    ui = {
        individuals[i]: list(normalize(simulators[individuals[i]].run()))
        for i in range(n)
    }

    # individual fitness values are empirical means
    for i in individuals:
        mttf, pow_usage, size = ui[i]
        i.fitness.values = (mttf, pow_usage, size)

    samples = len(individuals)

    while samples < nr_samples:
        A_star = sortNondominated(individuals, k, first_front_only=True)[0]
        # Adds confidence interval
        add_confidence_interval(individuals, list(N.values()), len(A_star))

        A_p = sortNondominated(individuals, k, first_front_only=True)[0]
        # Removes confidence interval
        add_confidence_interval(individuals,
                                list(N.values()),
                                len(A_star),
                                subtract=True)

        a = np.random.choice(A_p)

        N[a] += 1
        samples += 1

        ui[a] = update_empirical_mean(normalize(simulators[a].run()), ui[a],
                                      N[a])
        a.fitness.values = ui[a]

    return [normalize(ui[individuals[i]], invert=True)
            for i in range(n)], [N[individuals[i]] for i in range(n)]
def test_it_with_bp(play=1, NGEN=100, MU=4 * 25):

    pop, stats = main(play=play, NGEN=NGEN, MU=MU)
    stringh = "_with_bp_without_clustring" + str(play) + "_" + str(NGEN)
    fronts = tools.sortNondominated(pop, len(pop))
    if len(fronts[0]) < 30:
        pareto_front = fronts[0]
    else:

        pareto_front = random.sample(fronts[0], 30)
    print("Pareto Front: ")
    for i in range(len(pareto_front)):
        print(pareto_front[i].fitness.values)

    neter = Neterr(indim, outdim, n_hidden, random)

    print("\ntest: test on one with min validation error",
          neter.test_err(min(pop, key=lambda x: x.fitness.values[1])))
    tup = neter.test_on_pareto_patch_correctone(pareto_front)

    print("\n test: avg on sampled pareto set", tup)

    st = str(neter.test_err(min(
        pop, key=lambda x: x.fitness.values[1]))) + " " + str(tup)
    print(
        note_this_string(st, stringh)
    )  ##################################################################################
Example #5
0
def test_it_with_bp(play=1, NGEN=100, MU=4 * 25, play_with_whole_pareto=0, post_st = ''):
	pop, stats = main(play=play, NGEN=NGEN, MU=MU)
	stringh = "_with_bp" + str(play) + "_" + str(NGEN)+post_st
	fronts = tools.sortNondominated(pop, len(pop))

	'''file_ob = open("./log_folder/log_for_graph.txt", "w+")
	for item in fronts[0]:
		st = str(item.fitness.values[0]) + " " + str(item.fitness.values[1])+"\n"
		file_ob.write( st )
	file_ob.close()'''

	if play_with_whole_pareto or len(fronts[0]) < 30:
		pareto_front = fronts[0]
	else:

		pareto_front = random.sample(fronts[0], 30)

	print("Pareto Front: ")
	for i in range(len(pareto_front)):
		print(pareto_front[i].fitness.values)

	print("\ntest: test on one with min validation error",
		  network_obj_tar.test_err(min(pop, key=lambda x: x.fitness.values[1])))
	tup = network_obj_tar.test_on_pareto_patch_correctone(pareto_front)

	print("\n test: avg on sampled pareto set", tup)

	st = str(network_obj_tar.test_err(min(pop, key=lambda x: x.fitness.values[1]))) + " " + str(tup)
	print(note_this_string(st, stringh))
def test_it_with_bp(play=1, NGEN=100, MU=4 * 25, play_with_whole_pareto=0):
    pop, stats = main(play=play, NGEN=NGEN, MU=MU)
    stringh = "_with_bp_main2_just_src_nll_mse_misc_com" + str(
        play) + "_" + str(NGEN)
    fronts = tools.sortNondominated(pop, len(pop))

    if play_with_whole_pareto or len(fronts[0]) < 30:
        pareto_front = fronts[0]
    else:

        pareto_front = random.sample(fronts[0], 30)
    print("Pareto Front: ")
    for i in range(len(pareto_front)):
        print(pareto_front[i].fitness.values)

    print(
        "\ntest: test on one with min validation error",
        network_obj_tar.test_err(min(pop, key=lambda x: x.fitness.values[1])))
    tup = network_obj_tar.test_on_pareto_patch_correctone(pareto_front)

    print("\n test: avg on sampled pareto set", tup)

    st = str(
        network_obj_tar.test_err(min(
            pop, key=lambda x: x.fitness.values[1]))) + " " + str(tup)
    print(note_this_string(st, stringh))
def get_nondominated(population):
    # k=1 because if k=0 the function returns []; if first_front_only=True, then k is not used
    all_nondominated = tools.sortNondominated(population,
                                              k=1,
                                              first_front_only=True)[0]
    unique_nondominated = set(tuple(ind) for ind in all_nondominated)
    return unique_nondominated
Example #8
0
def migSelFrontsContsInslands(populations, numOfIslands):
    wholePopulation = []

    for population in populations:
        wholePopulation += population

    pareto_fronts = tools.sortNondominated(wholePopulation,
                                           len(wholePopulation))

    wholePopulation = []

    for population in pareto_fronts:
        wholePopulation += population

    islandSize = int(len(wholePopulation) / numOfIslands)

    newIslands = []

    for i in range(0, len(wholePopulation), islandSize):
        newIslands.append(wholePopulation[i:i + islandSize])
        lastIndex = i + islandSize

    newIslands[-1] += wholePopulation[lastIndex:]

    for i, newIs in enumerate(newIslands):
        if (i >= len(populations)):
            populations.append(newIs)
        else:
            populations[i] = newIs

    if (len(populations) > len(newIslands)):
        del populations[len(newIslands):]
Example #9
0
def test_it_without_bp():
    pop, stats = main()
    stringh = "_without_bp"
    fronts = tools.sortNondominated(pop, len(pop))
    if len(fronts[0]) < 30:
        pareto_front = fronts[0]
    else:

        pareto_front = random.sample(fronts[0], 30)
    print("Pareto Front: ")
    for i in range(len(pareto_front)):
        print(pareto_front[i].fitness.values)

    neter = Neterr(indim, outdim, n_hidden, random)

    print("\ntest: test on one with min validation error",
          neter.test_err(min(pop, key=lambda x: x.fitness.values[1])))
    tup = neter.test_on_pareto_patch(pareto_front)

    print("\n test: avg on sampled pareto set", tup[0], "least found avg",
          tup[1])

    st = str(neter.test_err(min(
        pop, key=lambda x: x.fitness.values[1]))) + " " + str(
            tup[0]) + " " + str(tup[1])
    print(note_this_string(st, stringh))
Example #10
0
    def AttachContrib_into_Pop(self, individuals, ind_contrib, world,  qtde_pop):

        #attch pop_contrib
        k = len(individuals) + len(ind_contrib)
        individuals.extend(ind_contrib[:])


        pareto_fronts = tools.sortNondominated(individuals, k)
        #print len(pareto_fronts)


        for front in pareto_fronts:
            #here is the difference
            self.assignCOINdist(front, world)
            ###self.assignCrowdingDist_PURE(front)

        #NOW, choose only qtde_pop individuals
        chosen=[]
        clean_space = qtde_pop
        #loop fronts
        for i in pareto_fronts:
            #if space, push it
            tam=len(i)
            if tam < clean_space:
                chosen.extend(i)
                clean_space -= tam

            else:
                sorted_front = sorted(i, key=attrgetter("fitness.crowding_dist"), reverse=False)
                chosen.extend(sorted_front[:clean_space])
                clean_space -= len(sorted_front[:clean_space])
                break


        return chosen
Example #11
0
    def get_nondominated_inds(self, toolbox, pop):

        if MPI_RANK != 0: return None

        nd_inds = tools.sortNondominated(pop, k=len(pop), first_front_only=True)[0]
        nd_inds = [toolbox.clone(x) for x in nd_inds]

        if self.log_print: print("non dominated %i" % len(nd_inds))

        return nd_inds
Example #12
0
    def get_nondominated_inds(self, toolbox, pop):

        if MPI_RANK != 0: return None

        nd_inds = tools.sortNondominated(pop,
                                         k=len(pop),
                                         first_front_only=True)[0]
        nd_inds = [toolbox.clone(x) for x in nd_inds]

        if self.log_print: print("non dominated %i" % len(nd_inds))

        return nd_inds
Example #13
0
def analysis_result():
    """
        This is used to analysis the data
    """
    data_file = file(DATA_FILE_NAME + DATA_FILE_NAME_END, "rb")
    result = pickle.load(data_file)
    result_vector = pickle.load(data_file)
    logbook = pickle.load(data_file)

    individuals = tools.sortNondominated(result, len(result))

    front = np.array([ind.fitness.values for ind in result])
    plt.figure()
    plt.subplot(1, 3, 1)
    plt.scatter(front[:, 0], front[:, 1], c="b")
    plt.grid(True)

    gen = logbook.select("gen")
    tmax = np.array(logbook.select("max"))

    plt.subplot(1, 3, 2)
    plt.plot(gen, tmax[:, 0])
    plt.grid(True)

    plt.subplot(1, 3, 3)
    plt.plot(gen, tmax[:, 1])
    plt.grid(True)

    plt.savefig(IMAGE1_FILE_NAME + IMGAGE_FINE_NAME_END,
                dpi=(300),
                figsize=(50, 10))

    plt.figure()
    avg = np.array(logbook.select("avg"))

    plt.subplot(1, 3, 1)
    plt.scatter(front[:, 0], front[:, 1], c="b")
    plt.grid(True)

    plt.subplot(1, 3, 2)
    plt.plot(gen, avg[:, 0])
    plt.grid(True)

    plt.subplot(1, 3, 3)
    plt.plot(gen, avg[:, 1])
    plt.grid(True)

    plt.savefig(IMAGE2_FILE_NAME + IMGAGE_FINE_NAME_END,
                dpi=(300),
                figsize=(50, 10))
Example #14
0
def migSelOneFrontOneIsland(populations):
    wholePopulation = []

    for population in populations:
        wholePopulation += population

    pareto_fronts = tools.sortNondominated(wholePopulation,
                                           len(wholePopulation))

    for i, newIs in enumerate(pareto_fronts):
        if (i >= len(populations)):
            populations.append(newIs)
        else:
            populations[i] = newIs

    if (len(populations) > len(pareto_fronts)):
        del populations[len(pareto_fronts):]
Example #15
0
def get_pareto_front(population):
    """Calculates the first pareto-front.

    :population: the population to process
    :returns: a list with the individuals and a set with their fitness values

    """
    pareto_front = tools.sortNondominated(population, 500, True)
    fitness = set()
    solutions = list()
    fitness_size = 0
    for ind in pareto_front[0]:
        fitness.add(ind.fitness.values)
        # add only the first solution with the same fitness
        if fitness_size < len(fitness):
            solutions.append(ind.get_values())
            fitness_size = len(fitness)

    return solutions, fitness
Example #16
0
def test_it_with_bp():
    pop, stats = main( play = 1, NGEN = 40)
    stringh = "_with_bp"
    fronts = tools.sortNondominated(pop, len(pop))

    if len(fronts[0]) < 30:
        pareto_front = fronts[0]
    else:

        pareto_front = random.sample(fronts[0], 30)
    print("Pareto Front: ")
    for i in range(len(pareto_front)):
        print(pareto_front[i].fitness.values)

    print("\ntest: test on one with min validation error",
          network_obj.test_err(min(pop, key=lambda x: x.fitness.values[1])))
    tup = network_obj.test_on_pareto_patch_correctone(pareto_front)

    print("\n test: avg on sampled pareto set", tup)

    st = str(network_obj.test_err(min(pop, key=lambda x: x.fitness.values[1]))) + " " + str(tup)
Example #17
0
def get_par_front_list_of_final_pop(final_pop):
    """
    Returns list with individuals of final pareto frontier

    Parameters
    ----------
    final_pop : object
        Final population object

    Returns
    -------
    list_inds_pareto : list
        List holding pareto optimal ind solutions (sorted by fitness values,
        beginning with lowest cost value)
    """

    print()
    print('Non-dominated pareto-frontier results:')
    print('##############################################################')

    #  Get pareto frontier results (first/best pareto frontier)
    lists_pareto_frontier = tools.sortNondominated(final_pop,
                                                   len(final_pop),
                                                   first_front_only=True)

    list_inds_pareto = []

    #  Add all solutions of lists_pareto_frontier to single list
    for list_par in lists_pareto_frontier:
        for sol in list_par:
            list_inds_pareto.append(sol)

    #  Sort by fitness value
    list_inds_pareto = sorted(list_inds_pareto,
                              key=lambda x: x.fitness.values[0],
                              reverse=False)

    return list_inds_pareto
Example #18
0
 def pop_to_df(self, population):
     data = []
     fronts = tools.sortNondominated(population, len(population))
     for i, front in enumerate(fronts):
         for j, ind in enumerate(front):
             f = self.problem.evaluate(ind)
             feasible = "feasible"
             if f[0] > self.settings['feasiblity_threshold']:
                 feasible = "infeasible"
             i_data = {
                 'front': i,
                 'non_dominated': i == 0,
                 'crowding_distance': np.min([2,
                                              ind.fitness.crowding_dist]),
                 'individual': j,
                 'value': json.dumps(ind),
                 'robustness': f[0],
                 'flowtime': f[1],
                 'makespan': f[2],
                 'collision': feasible,
             }
             data.append(i_data)
     return pd.DataFrame(data)
Example #19
0
def main():
    creator.create("FitnessMax", base.Fitness, weights=(-1.0, -1.0))
    creator.create("Individual", list, fitness=creator.FitnessMax)
    toolbox = base.Toolbox()

    # Operator registering
    toolbox.register("individual", tools.initRepeat, creator.Individual)
    # toolbox.register("population", tools.initRepeat, list, toolbox.individual )

    toolbox.register("evaluate", specializedEval)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate", specializedMutate)
    toolbox.register("select", tools.selSPEA2)

    NGEN = 20
    MU = 40  #number of indi for the next gen
    LAMBDA = 10  #number of children
    CXPB = 0.7
    MUTPB = 0.3

    # ---- generating the population
    allConfs = map(lambda x: list(x), zip(range(200), range(300, 500, 1)))
    population = []
    for index in range(len(allConfs)):
        myGenerator = return_conf(allConfs[index])
        population.append(
            toolbox.individual(lambda: next(myGenerator),
                               len(allConfs[index])))

    for el in pop:
        print type(el)
    # population = eaMuPlusLambda_redefined(population, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN)
    algorithms.eaMuPlusLambda(population, toolbox, MU, LAMBDA, CXPB, MUTPB,
                              NGEN)
    nonDominatedSort = tools.sortNondominated(population, len(population))
    print nonDominatedSort
    return population
def selCustom(individuals, k):

    #using DEAP's sortNondominated, we get a list of fronts,
    #where each front 'i' dominates front 'i+1'.
    #we use this to create a new attribute for each individual, called "rank"
    #the rank is then used as the fitness value for the tournament selection,
    #as specified by Ombuki et. al
    pareto_fronts = tools.sortNondominated(individuals, k)
    for front_rank in range(1, len(pareto_fronts) + 1):
        front = pareto_fronts[front_rank - 1]
        for ind in front:
            setattr(ind, 'rank', front_rank)

    #the first rank is the "elite" (Pareto-optimal) set of solutions
    #to which we want to guarantee a spot in the next generation
    #therefore, we extract them before the tournament selection takes place
    elite = pareto_fronts.pop(0)
    individuals_excluding_elite = [i for i in individuals if i not in elite]
    #we update k, the number of individuals to be chosen by the tournament selection
    k -= len(elite)

    #as specified by the paper
    tournsize = 4
    r_thresh = 0.8

    chosen = []
    for i in range(k):
        aspirants = tools.selRandom(individuals_excluding_elite, tournsize)
        if random.random() < r_thresh:
            chosen_individual = min(aspirants, key=attrgetter("rank"))
        else:
            chosen_individual = tools.selRandom(aspirants, 1)[0]
        chosen.append(chosen_individual)

    #add in the elite solutions
    chosen += elite
    return chosen
Example #21
0
def main(chrom_gen):

    toolbox = base.Toolbox()

    class ClassContainer:
        def __init__(self, ch: Chromosome):
            """ A container that generates a Chromosome object
            """
            self.chromosome = ch

    creator.create("FitnessMulti", base.Fitness, weights=(1.0, 1.0))
    creator.create("Individual", ClassContainer, fitness=creator.FitnessMulti)

    toolbox.register("attr_float", chrom_gen.generate_100cov_chromosome)
    toolbox.register("individual", tools.initIterate, creator.Individual,
                     toolbox.attr_float)
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    object2D = Objective2D(chrom_gen)

    def evaluate_individual(indiv):
        return object2D.compute_objective(indiv.chromosome)

    toolbox.register("evaluate", evaluate_individual)
    toolbox.register("select", tools.selNSGA2)

    mutation = ChromosomeMutator100cov(chrom_gen)
    toolbox.register("mutate", mutation.apply_mutation)
    toolbox.register("mate", multipoint_cx)

    MU = 20  # population size (should be a multiple of 4 because we are using the selTournamentDCD)
    NGEN = 200  # number of generations
    CXPB = 0.7
    online_plot = False  # decide whether to show a dynamic plot with Pareto front across generations or not

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("max", numpy.max, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "pop", "max"

    pop = toolbox.population(n=MU)

    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)

    for ind1, fit in zip(invalid_ind, fitnesses):
        ind1.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, pop=len(invalid_ind), **record)
    # print('Results: \n ============================================================= \n ', logbook.stream)

    ################################################################################################################
    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        offspring_pop = list()
        while len(offspring_pop) < len(pop):
            # select parent 1
            parent1 = apply_tournament_selection(pop, 2)
            offspring1 = toolbox.clone(parent1)
            # select parent 2
            parent2 = apply_tournament_selection(pop, 2)
            offspring2 = toolbox.clone(parent2)

            # apply crossover
            r = random.random()
            if r <= CXPB:
                toolbox.mate(offspring1.chromosome, offspring2.chromosome)

            # apply mutation
            toolbox.mutate(offspring1.chromosome)
            toolbox.mutate(offspring2.chromosome)

            # calculate objectives scores
            offspring1.fitness.values = toolbox.evaluate(offspring1)
            offspring2.fitness.values = toolbox.evaluate(offspring2)

            # add offsprings to the new population
            offspring_pop.append(offspring1)
            offspring_pop.append(offspring2)

        # Select the next generation population
        pop = toolbox.select(pop + offspring_pop, MU)

        # if online_plot:
        #     frontier = numpy.array([ind.fitness.values for ind in pop])
        #     plt.close()
        #     plt.scatter(frontier[:, 0], frontier[:, 1], c="r", marker='o',edgecolors='k')
        #     plt.xlabel('Specificity')
        #     plt.ylabel('Frequency')
        #     plt.pause(0.01)

        record = stats.compile(pop)
        logbook.record(gen=gen, pop=len(invalid_ind), **record)

        # print(logbook.stream)

    for ind in pop:
        # apply corrections
        check_variable_parts(ind.chromosome, chrom_gen.messages)

    # 1. Let's first extract only the Pareto front
    fitnesses = toolbox.map(toolbox.evaluate, pop)
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
    ParetoSet = sortNondominated(pop, len(pop), first_front_only=True)
    pop = ParetoSet[0]

    front = numpy.array([ind.fitness.values for ind in pop])

    # get the max value for each objective
    # Specificity
    max_spec = 0.0
    min_spec = 1.0
    max_freq = 0.0
    min_freq = 1.0
    for individual in pop:
        if individual.fitness.values[0] > max_spec:
            max_spec = individual.fitness.values[0]
        if individual.fitness.values[0] < min_spec:
            min_spec = individual.fitness.values[0]
        if individual.fitness.values[1] > max_freq:
            max_freq = individual.fitness.values[1]
        if individual.fitness.values[1] < min_freq:
            min_freq = individual.fitness.values[1]

    #############################
    # search for the mid point
    # between the corner points
    mid_x = (max_spec + min_spec) / 2
    mid_y = (max_freq + min_freq) / 2
    mid = (mid_x, mid_y)
    # get the closest point to the mid_pt
    distance = []
    for opt in front:
        dist = numpy.sqrt(((mid[0] - opt[0])**2) + ((mid[1] - opt[1])**2))
        distance.append(dist)

    mid_pt = front[distance.index(min(distance))]
    # print('Middle point = ', mid_pt)

    #############################
    # search for the Knee point, the closets point ot the max objectives

    min_dist = 100.0
    for opt1 in front:
        dist = numpy.sqrt(((max_spec - opt1[0])**2) +
                          ((max_freq - opt1[1])**2))
        if dist < min_dist:
            min_dist = dist
            knee_pt = opt1
    # print('Knee point = ', knee_pt)

    min_dist = 100.0
    for opt1 in front:
        dist = numpy.sqrt(((1.0 - opt1[0])**2) + ((1.0 - opt1[1])**2))
        if dist < min_dist:
            min_dist = dist
            knee_pt1 = opt1
    # print('Knee point11 = ', knee_pt1)

    for ch in pop:
        if ch.fitness.values[0] == knee_pt[0] and ch.fitness.values[
                1] == knee_pt[1]:
            knee_solution = ch
            break

    for ch in pop:
        if ch.fitness.values[0] == knee_pt1[0] and ch.fitness.values[
                1] == knee_pt1[1]:
            knee_solution1 = ch
            break

    for ch in pop:
        if ch.fitness.values[0] == mid_pt[0] and ch.fitness.values[
                1] == mid_pt[1]:
            mid_solution = ch
            break

    ## plot the pareto front
    # plt.scatter(front[:,0], front[:,1], c="r", marker='o')
    # # print the mid_pt
    # plt.scatter(mid_pt[0], mid_pt[1], c='b', marker='*')
    # plt.scatter(mid_x, mid_y, c='black', marker='*')
    # # print the knee point
    # plt.scatter(max_spec, max_freq, c='black', marker='^')
    # plt.scatter(knee_pt[0], knee_pt[1], c='g', marker='^')
    # # print the knee11 point
    # plt.scatter(knee_pt1[0], knee_pt1[1], c='y', marker='+')
    #
    # plt.xlabel('Specificity')
    # plt.ylabel('Frequency')
    #
    # plt.show()

    # pareto is a dict with three elements
    # key : name of the point from the pareto front
    # value: chromosome
    pareto = {
        'Knee_Solution': knee_solution.chromosome,
        'Knee_Solution_1': knee_solution1.chromosome,
        'Mid_Solution': mid_solution.chromosome
    }

    #return the three best points
    #       the logbook to see the variation of specificity and frequency values
    #       the execution time
    #       the Non dominated points from the last population
    # return pareto, logbook, execution_time, pop
    return pareto  #, logbook, pop
Example #22
0
    def run(self, verbose=False):

        creator.create("FitnessMulti",
                       base.Fitness,
                       weights=(-1.0, -1.0, -1.0))
        creator.create("Individual",
                       gp.PrimitiveTree,
                       fitness=creator.FitnessMulti)
        start = time.time()
        self.population = self.init_pop()
        elapsed = time.time() - start
        #print(f'elapsed seconds for population initialization: {elapsed}')

        self.population = [
            creator.Individual(indv) for indv in self.population
        ]
        toolbox = base.Toolbox()

        toolbox.register("expr",
                         self.genRampedRegexTree,
                         min_=self.min_ht,
                         max_=self.max_ht,
                         ratio=self.term_ratio,
                         classes=self.classes,
                         pset=self.pset)
        toolbox.register("individual", tools.initIterate, creator.Individual,
                         toolbox.expr)
        toolbox.register("population", tools.initRepeat, list,
                         toolbox.individual)
        toolbox.register("mutate",
                         gp.mutUniform,
                         expr=toolbox.expr,
                         pset=self.pset)

        hof = tools.HallOfFame(1)
        stats = tools.Statistics(lambda ind: ind.fitness.values)
        stats.register("avg", np.mean)
        stats.register("std", np.std)
        stats.register("min", np.min)
        stats.register("max", np.max)

        logbook = tools.Logbook()
        logbook.header = ['gen'] + stats.fields

        for g in range(self.ngen):

            start_time = time.time()
            # get the fitnesses for every individual in the population
            fitnesses = futures.map(self.evaluate_regex, self.population)
            for indv, fits in zip(self.population, fitnesses):
                indv.fitness.values = fits

            # log and record progress
            record = stats.compile(self.population)
            logbook.record(gen=g, **record)
            if verbose:
                print(logbook.stream)
            hof.update(self.population)

            # sort by Pareto-fronts (NSGA-II, Deb, et)
            self.population = [
                indv for front in tools.sortNondominated(
                    self.population, self.pop_size) for indv in front
            ]

            keep_num = int(self.pop_size * 0.9)  # keep 90% of old gen
            new_pop = []
            while len(new_pop) < keep_num:
                rnum = random.random()
                if rnum < self.CXPB:
                    cx_indv1 = tools.selTournament(self.population,
                                                   k=1,
                                                   tournsize=7)[0]
                    cx_indv2 = tools.selTournament(self.population,
                                                   k=1,
                                                   tournsize=7)[0]

                    # cx_indv1, cx_indv2 = gp.cxOnePointLeafBiased(cx_indv1, cx_indv2, self.term_ratio)
                    cx_indv1, cx_indv2 = self.cxLeafOrSubTree(
                        cx_indv1, cx_indv2, self.term_ratio)
                    new_pop.append(cx_indv1)
                    new_pop.append(cx_indv2)
                elif rnum < self.CXPB + self.MUTPB:
                    mutant = toolbox.mutate(
                        tools.selTournament(self.population, k=1,
                                            tournsize=7)[0])[0]
                    new_pop.append(mutant)
                else:
                    new_pop.append(
                        tools.selTournament(self.population, k=1,
                                            tournsize=7)[0])

            self.population = new_pop + toolbox.population(n=self.pop_size -
                                                           keep_num)

            best = tools.selBest(self.population, k=1)[0]
            tree = gp.PrimitiveTree(best)
            print('Best of that gen:')
            print(
                gp.compile(tree, pset=self.pset).s + '\nFitness: ' +
                str({best.fitness.values}))
            elapsed_time = time.time() - start_time
            remaining_min = (elapsed_time * (self.ngen - g)) / 60
            remaining_hours = remaining_min / 60
            print(
                f"Time for last gen: {elapsed_time} secs, Remaining: {remaining_min} minutes, {remaining_hours} hours."
            )
            print('[' + ('*' * (g // self.ngen)) +
                  ((100 - (g // self.ngen)) * ' ') + ']')

        return hof, logbook
def main(seed=None, play=0, NGEN=40, MU=4 * 10):
    # random.seed(seed)

    # MU has to be a multiple of 4. period.
    CXPB = 0.9

    stats = tools.Statistics(lambda ind: ind.fitness.values[1])
    # stats.register("avg", numpy.mean, axis=0)
    # stats.register("std", numpy.std, axis=0)
    stats.register("min", numpy.min, axis=0)
    stats.register("max", numpy.max, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max"
    toolbox.register("evaluate", minimize_src)
    time1 = time.time()
    pop_src = toolbox.population(n=MU)
    time2 = time.time()
    print("After population initialisation", time2 - time1)
    print(type(pop_src))
    # print("population initialized")
    # network_obj = Neterr(indim, outdim, n_hidden, np.random)
    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop_src if not ind.fitness.valid]

    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit
    time3 = time.time()
    print("After feedforward", time3 - time2)
    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop_src = toolbox.select(pop_src, len(pop_src))
    # print( "first population selected, still outside main loop")
    # print(pop)
    record = stats.compile(pop_src)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)
    maxi = 0
    stri = ''
    flag = 0
    # Begin the generational process
    # print(pop.__dir__())
    time4 = time.time()
    for gen in range(1, NGEN):

        # Vary the population
        if gen == 1:
            time6 = time.time()
        if gen == NGEN - 1:
            time7 = time.time()
        print()
        print("here in gen no.", gen)
        offspring = tools.selTournamentDCD(pop_src, len(pop_src))
        offspring = [toolbox.clone(ind) for ind in offspring]
        if play:
            if play == 1:
                pgen = NGEN * 0.1
            elif play == 2:
                pgen = NGEN * 0.9

            if gen == int(pgen):
                print("gen:", gen, "doing clustering")
                to_bp_lis = cluster.give_cluster_head(offspring,
                                                      int(MU * bp_rate))
                assert (to_bp_lis[0] in offspring)
                print("doing bp")
                [
                    item.modify_thru_backprop(indim,
                                              outdim,
                                              network_obj_src.rest_setx,
                                              network_obj_src.rest_sety,
                                              epochs=10,
                                              learning_rate=0.1,
                                              n_par=10) for item in to_bp_lis
                ]

                # Evaluate the individuals with an invalid fitness
                invalid_ind = [
                    ind for ind in offspring if not ind.fitness.valid
                ]
                fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
                for ind, fit in zip(invalid_ind, fitnesses):
                    ind.fitness.values = fit
        if gen == 1:
            time8 = time.time()
        if gen == NGEN - 1:
            time9 = time.time()
        dum_ctr = 0
        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            flag = 0
            if random.random() <= CXPB:
                ind1, ind2 = toolbox.mate(ind1, ind2, gen)
                ind1 = creator.Individual(indim, outdim, ind1)
                ind2 = creator.Individual(indim, outdim, ind2)
                flag = 1
            maxi = max(maxi, ind1.node_ctr, ind2.node_ctr)
            toolbox.mutate(ind1)
            toolbox.mutate(ind2)

            offspring[dum_ctr] = ind1
            offspring[dum_ctr + 1] = ind2
            del offspring[dum_ctr].fitness.values, offspring[dum_ctr +
                                                             1].fitness.values
            dum_ctr += 2
        if gen == 1:
            print("1st gen after newpool", time.time() - time8)
        if gen == NGEN - 1:
            print("last gen after newpool", time.time() - time9)

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop_src = toolbox.select(pop_src + offspring, MU)

        record = stats.compile(pop_src)
        logbook.record(gen=gen, evals=len(invalid_ind), **record)
        anost = logbook.stream
        liso = [item.rstrip() for item in anost.split("\t")]
        mse = float(liso[3])

        print(anost)
        stri += anost + '\n'
        print("generation done")
        # file_ob.write(str(logbook.stream))
        # print(len(pop))
        # file_ob.close()
    time5 = time.time()
    print("Overall time", time5 - time4)
    # print(stri)
    print(
        ' ------------------------------------src done------------------------------------------- '
    )
    fronts = tools.sortNondominated(pop_src, len(pop_src))
    pareto_front = fronts[0]
    print(pareto_front)
    st = '\n\n'
    pareto_log_fileo = open(
        "./log_folder/log_pareto_just_src_nll_mse_misc_com" + str(NGEN) +
        ".txt", "a")
    for i in range(len(pareto_front)):
        print(pareto_front[i].fitness.values)
        st += str(pareto_front[i].fitness.values)
        pareto_log_fileo.write(st + '\n')
    pareto_log_fileo.close()

    return pop_src, logbook
        cdf2 = pd.DataFrame([{
            'gen': gen,
            'avg_res': np.mean(df2['res']),
            'min_res': min(df2['res']),
            'avg_accel': np.mean(df2['accel']),
            'min_accel': min(df2['accel'])
        }])
        print('df to be added is     ')
        print(cdf2)
        cdf3 = df1.append(cdf2, ignore_index=True)
        cdf3.to_csv('../src/static/data/convergence.csv', index=False)
        print('total dataframe to date is    ')
        print(cdf3)

pareto_ = tools.sortNondominated(offspring,
                                 len(all_inds),
                                 first_front_only=True)

paretoFits_ = list(
    map(lambda x: x.fitness.values,
        pareto_[0]))  # TODO this is returning a list index out of range error
pareto_res_ = [x[0] for x in paretoFits_]
pareto_stab_ = [x[1] for x in paretoFits_]
pareto_res.append(pareto_res_)
pareto_stab.append(pareto_stab_)

allinds_ = offspring
allfits_ = list(map(lambda x: x.fitness.values, allinds_))
allres_ = [x[0] for x in allfits_]
allstab_ = [x[1] for x in allfits_]
allres.append(allres_)
Example #25
0
def main(seed=None):
    random.seed(seed)

    NGEN = 250
    MU = 100

    log_interval = 25

    stats = tools.Statistics(lambda ind: ind.fitness.values)
    stats.register("avg", np.mean, axis=0)
    stats.register("std", np.std, axis=0)
    stats.register("min", np.min, axis=0)
    stats.register("max", np.max, axis=0)
    stats.register("median", np.median, axis=0)

    logbook = tools.Logbook()
    logbook.header = "gen", "evals", "std", "min", "avg", "max", "median"

    pop = toolbox.population(n=MU)
    hof = tools.ParetoFront()

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    record = stats.compile(pop)
    logbook.record(gen=0, evals=len(invalid_ind), **record)
    print(logbook.stream)
    hof.update(pop)

    basepath = os.path.dirname(os.path.abspath(__file__))
    log_dir = '{}/logs/{}/'.format(basepath, time.strftime('%y%m%d-%H%M%S'))
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # log initial population
    os.makedirs('{}0'.format(log_dir))
    for i, agent in enumerate(pop):
        agent[0].save_weights('{}0/{}_weights.csv'.format(log_dir, i), overwrite=True)
    log_population(pop, '{}0'.format(log_dir))

    # Begin the generational process
    for gen in range(1, NGEN+1):
        # Get Offspring
        # first get pareto front
        pareto_fronts = tools.sortNondominated(pop, len(pop))
        selection = pareto_fronts[0]
        len_pareto = len(pareto_fronts[0])

        rest = list(chain(*pareto_fronts[1:]))
        if len(rest) % 4:
            rest.extend(random.sample(selection, 4 - (len(rest) % 4)))

        selection.extend(tools.selTournamentDCD(rest, len(rest)))
        offspring = [toolbox.mutate(toolbox.clone(ind)) for ind in selection[:len(pop)]]

        # Revaluate the individuals in last population
        fitnesses = toolbox.map(toolbox.evaluate, pop)
        for ind, fit in zip(pop, fitnesses):
            ind.fitness.values = fit

        # Evaluate the new offspring
        fitnesses = toolbox.map(toolbox.evaluate, offspring)
        for ind, fit in zip(offspring, fitnesses):
            ind.fitness.values = fit

        # Update the hall of fame with the generated individuals
        hof.update(offspring)

        plot_population(pop, offspring, lim = [[10,120],[0,0],[0,4]])

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)
        pareto_fronts = tools.sortNondominated(pop, len(pop))
        plot_selection(pop, pareto_front_size=len(pareto_fronts[0]), lim = [[10,120],[0,0],[0,4]])
        
        record = stats.compile(pop)
        logbook.record(gen=gen, evals=len(offspring)+len(pop), **record)
        print(logbook.stream)

        if gen % log_interval == 0 or gen == NGEN:
            os.makedirs('{}{}'.format(log_dir, gen))
            for i, agent in enumerate(pop):
                agent[0].save_weights('{}{}/{}_weights.csv'.format(log_dir, gen, i), overwrite=True)
            log_population(pop, '{}{}'.format(log_dir, gen))

    with open('{}/gen_stats.txt'.format(log_dir), 'w') as fp:
        np.savetxt(fp, logbook, fmt="%s")

    plot_population(pop)
    print("Final population hypervolume is %f" % hypervolume(pop, [11.0, 11.0, 11.0]))

    os.makedirs('{}hof'.format(log_dir))
    for i, agent in enumerate(hof):
        agent[0].save_weights('{}hof/{}_weights.csv'.format(log_dir, i), overwrite=True)
    log_population(hof, '{}hof'.format(log_dir))

    return pop, logbook
Example #26
0
def nsga2(seed=None, NGEN=100, MU=100, CXPB=0.5, dump_intrval=0):
    """Execute NSGA-II-based algorithm for the M2-2E-VRP.

    Taken and adapted from DEAP example.
    Keyword arguments:
        seed -- seed for randomness
        NGEN -- number of generations (default 100)
        MU -- size of [initial] population (default 100)
        CXPB -- crossover probability (default 0.5)
        dump_intrval -- interval at which dump files are generated (default 0)
    """
    random.seed(seed)

    # begin recording time
    t0 = default_timer()

    # file io time to be subtracted
    file_time = 0

    # initial population generation
    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    pop = toolbox.select(pop, MU)

    # Begin the generational process
    for gen in range(1, NGEN + 1):

        # print during each Generation in order to follow execution flow
        print "Generation " + str(gen)

        # Vary the population
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)

            toolbox.mutate(ind1)
            toolbox.mutate(ind2)
            del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)

        # dump every 'intrval' (usually 50) generations
        if dump_intrval > 0 and gen % dump_intrval == 0:
            # determine execution time
            tx = default_timer()
            total_time = (tx - t0)
            exec_time = total_time - file_time
            # obtain pareto front
            fronts = tools.sortNondominated(pop,
                                            len(pop),
                                            first_front_only=True)
            front = fronts[0]
            # generate dump
            generate_dump(MU, gen, total_time, exec_time, front)
            ty = default_timer()
            file_time += (ty - tx)

    return pop
Example #27
0
def run_algorithm(index, db, **settings):
    import sqlalchemy as sql
    import pandas as pd
    import json
    import pickle
    bests = []
    objectives = []
    recursive = True
    fronts = []
    setup_ea(**settings)

    actual_times = []
    actual_latencies = []
    missed_packages = []
    algorithm = settings['algorithm']
    #inital run:
    pop, stats, best, archive, archivestats = main(archive=[],
                                                   archivestats={},
                                                   **settings)
    best = best[0]
    settings.update({'enable_errors': True})
    new_lifetime, new_latency, new_received, new_energy_list, new_node_status, new_missed = evaluate(
        best, **settings)
    actual_times.append(new_lifetime)
    actual_latencies.append(new_latency)
    settings.update({'energy_list_sim': new_energy_list})
    settings.update({'network_status': new_node_status})
    NGEN = settings['NGEN_realloc'] if 'NGEN_realloc' in settings.keys(
    ) else 10
    if settings['algorithm'] == 'dtas':
        NGEN = int(NGEN * 2.5)
    settings.update({'NGEN': NGEN})
    bests.append(list(best))
    front = tools.sortNondominated(pop, len(pop), True)[0]
    fronts.append(front)
    if algorithm == 'nsga2' or algorithm == 'rmota':
        objectives.append(best.fitness.values)
    else:
        objectives.append((best.fitness.values[0], best.latency))
    run_number = 2
    while True:
        try:
            settings.update({'run_number': run_number})
            try:
                new_alloc = getNewValidAssignment(best, new_node_status,
                                                  archive, **settings)
            except exceptions.NoValidNodeException:
                raise exceptions.NetworkDeadException

            if len(archive) == 0:
                #print("arhcive len 0!")
                new_alloc = creator.Individual(new_alloc)
                archive.append(new_alloc)

            #print(new_alloc)
            settings.update({'enable_errors': False})
            settings.update({'NGEN': NGEN})
            #run for 10 iterations to find suitable new allocation
            pop, stats, best, archive, archivestats = main(
                archive=archive, archivestats=archivestats, **settings)
            best = best[0]
            settings.update({'next_alloc': best})
            settings.update({'enable_errors': True})
            #run real sim to check next failure
            new_lifetime, new_latency, new_received, new_energy_list, new_node_status, new_missed = evaluate(
                allocation=new_alloc, repeat=True, **settings)
            actual_times.append(new_lifetime)
            actual_latencies.append(new_latency)
            missed_packages.append(new_missed)
            settings.update({'energy_list_sim': new_energy_list})
            settings.update({'network_status': new_node_status})
            #print(f"runtimes so far: {actual_times}")
            #print(f"latencies so far: {actual_latencies}")
            bests.append(list(best))
            bests.append(list(new_alloc))
            front = tools.sortNondominated(pop, len(pop), True)[0]
            fronts.append(front)
            if algorithm == 'nsga2' or algorithm == 'rmota':
                objectives.append(best.fitness.values)
            else:
                objectives.append((best.fitness.values[0], best.latency))
            run_number += 1
        except exceptions.NetworkDeadException:
            #print(f"Time Elapsed for iteration {i}: {time.time() - start}")
            #old_results = pd.read_sql("results", con=db)
            #min_index = old_results.index.max() + 1 if len(old_results) > 0 else 0
            cleaned_latencies = [x for x in actual_latencies if x < 99999]
            results = {
                'index':
                index,
                'lifetime': [sum([x[0] for x in objectives])],
                'latency': [max([x[1] for x in objectives])],
                'actual_lifetime': [sum(actual_times)],
                'actual_latency': [max(cleaned_latencies)]
                if len(cleaned_latencies) > 0 else 99999,
                'actual_lifetimes':
                json.dumps(actual_times),
                'actual_latencies':
                json.dumps(actual_latencies),
                'missed_packages':
                json.dumps(missed_packages),
                'algorithm':
                algorithm,
                'settings':
                json.dumps(settings),
                'ntasks':
                settings['nTasks'],
                'nnodes':
                settings['nNodes'],
                'task_topology':
                settings['task_creator'],
                'network_topology':
                settings['network_creator'],
                'front':
                pickle.dumps(fronts),
                'bests':
                pickle.dumps(bests)
            }
            df = pd.DataFrame(results, index=[index])
            df.set_index('index', inplace=True)
            #print(df)
            try:
                df.to_sql('results', db, if_exists='append')
            except Exception as e:
                df.to_csv(f"results/{index}.csv")
            print(f"{actual_times}, {actual_latencies}")
            #print(stats)
            break
Example #28
0
def mainNSGA(seed=None):
    toolbox.register("evaluate", evalOneMax)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate",
                     tools.mutUniformInt,
                     low=knobs_low,
                     up=knobs_up,
                     indpb=0.05)
    toolbox.register("select", tools.selNSGA2)
    random.seed(seed)

    # MU  is the size of population, total number of individuals
    #	    in each generation
    #
    # CXPB  is the probability with which two individuals
    #       are crossed
    #
    # MUTPB is the probability for mutating an individual
    #
    # NGEN  is the number of generations for which the
    #       evolution runs

    MU = 200
    CXPB = 0.8
    MUTPB = 0.8
    NGEN = 20

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    # Begin the generational process
    for gen in range(1, NGEN):
        # Vary the population
        print(" ======Beginning %i th generation======: " % gen)
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)
                del ind1.fitness.values, ind2.fitness.values
            if random.random() <= MUTPB:
                toolbox.mutate(ind1)
                toolbox.mutate(ind2)
                del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)

        fitness_list = []
        fronts_lists = tools.sortNondominated(pop,
                                              len(pop),
                                              first_front_only=True)[0]
        fronts = []

        for i in range(len(fronts_lists)):
            if fronts_lists[i] not in fronts:
                fronts.append(fronts_lists[i])
                fitness_list.append(fronts_lists[i].fitness.values)
        print " Pareto front is:"
        ga_data[gen] = {"fitness": fitness_list, "front": fronts}
        pprint.pprint(fitness_list)
        pprint.pprint(fronts)

        print("  Evaluated %i individuals\n" % len(invalid_ind))

    jsonConfigFile = "./ga_data.json"
    with open(jsonConfigFile, "w") as jFile:
        json.dump(ga_data, jFile, indent=4, separators=(',', ': '))

    print("-- End of (successful) evolution --")

    return pop
Example #29
0
def mainNSGA(seed=None):
    with open("evaluated_inds.log", "w") as myfile:
        myfile.write("===Evaluated genomes===:\n")

    with open("runtime_pareto.log", "w") as myfile:
        myfile.write("===runtime_pareto===:\n")

    with open("runtime_time.log", "w") as myfile:
        myfile.write("===runtime_time===:\n")

    ga_data = {}

    toolbox.register("evaluate", evalOneMax)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register("mutate",
                     tools.mutUniformInt,
                     low=knobs_low,
                     up=knobs_up,
                     indpb=0.05)
    toolbox.register("select", tools.selNSGA2)
    random.seed(seed)
    #MU = 40
    #CXPB = 0.8
    #MUTPB = 0.8
    #NGEN = 30

    MU = 80
    CXPB = 0.8
    MUTPB = 0.8
    NGEN = 30

    pop = toolbox.population(n=MU)

    # Evaluate the individuals with an invalid fitness
    invalid_ind = [ind for ind in pop if not ind.fitness.valid]
    fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
    for ind, fit in zip(invalid_ind, fitnesses):
        ind.fitness.values = fit

    # This is just to assign the crowding distance to the individuals
    # no actual selection is done
    pop = toolbox.select(pop, len(pop))

    # Begin the generational process
    for gen in range(1, NGEN):
        start = time.time()
        # Vary the population
        print(" ======Beginning %i th generation======: " % gen)
        offspring = tools.selTournamentDCD(pop, len(pop))
        offspring = [toolbox.clone(ind) for ind in offspring]

        for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
            if random.random() <= CXPB:
                toolbox.mate(ind1, ind2)
                del ind1.fitness.values, ind2.fitness.values
            if random.random() <= MUTPB:
                toolbox.mutate(ind1)
                toolbox.mutate(ind2)
                del ind1.fitness.values, ind2.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        # Select the next generation population
        pop = toolbox.select(pop + offspring, MU)

        fitness_list = []
        fronts_lists = tools.sortNondominated(pop,
                                              len(pop),
                                              first_front_only=True)[0]
        fronts = []

        for i in range(len(fronts_lists)):
            if fronts_lists[i] not in fronts:
                fronts.append(fronts_lists[i])
                fitness_list.append(fronts_lists[i].fitness.values)
        print " Pareto front is:"
        ga_data[gen] = {"fitness": fitness_list, "front": fronts}
        with open("runtime_pareto.log", "a") as myfile:
            myfile.write(str(ga_data[gen]) + "\n")
        end = time.time()
        with open("runtime_time.log", "a") as myfile:
            myfile.write(str(end - start) + "\n")
        pprint.pprint(fitness_list)
        pprint.pprint(fronts)

        print("  Evaluated %i individuals\n" % len(invalid_ind))

    jsonConfigFile = "./ga_data.json"
    with open(jsonConfigFile, "w") as jFile:
        json.dump(ga_data, jFile, indent=4, separators=(',', ': '))

    #with open(jsonConfigFile) as jFile:
    #   ga_data = json.load(jFile)
    #   pprint.pprint(ga_data)

    print("-- End of (successful) evolution --")

    return pop
        print("Run", n, "out of", NRUN)
    for gen in range(NGEN_1):
        offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.1)
        fits = toolbox.map(toolbox.evaluate, offspring)
        for fit, ind in zip(fits, offspring):
            ind.fitness.values = fit
        population = toolbox.select(offspring, k=len(population))
    training.append(population)

#building training dataset
if(verbose):
    print("Building first part of training dataset...")
clfin = []
tops = []
for population in training:
    pareto = tools.sortNondominated(population, len(population))
    top = pareto[0] #the actual pareto front
    #using best and worst elements to build training data
    for member in top:
        #careful avoiding duplicates
        if member not in tops:
            tops.append(member)
        if member not in clfin:
            clfin.append(member)
    elems = len(clfin)
    for member in tools.selBest(population, k=len(population))[:-elems]:
        if member not in clfin:
            clfin.append(member)
        
##############
# running nsga
Example #31
0
def deap_evolve(loLWL, loB, loT, loVolDisp, loCwp, hiLWL, hiB, hiT, hiVolDisp,
                hiCwp, LCB, V, popsize, maxgen):
    originalPopsize = popsize

    def evaluate(individual):
        LWL, B, T, VolDisp, Cwp = individual
        Rt = calculate_holtrop_resistance(
            LWL, B, T, LCB, VolDisp, V, Cm, Cwp
        )  # TODO check ...lwl, bwl, draft, lcb, vol_disp, velocity, r_n, f_n, c_b, c_p, c_m, c_wp, rho, g
        KM = calculate_jensen_acceleration(
            LWL,
            B,
            T,
            VolDisp,
            V,
            heading=0,
            wave_amplitude=1,
            long_position=0
        )  # TODO change this so the heading and velocity can be passed in by the user along with wave amplitude, longitudinal position etc. Also change from KM to proper variable name.

        return (Rt, KM)

    def valid(individual):
        LWL, B, T, VolDisp, Cwp = individual

        if LWL > hiLWL or LWL < loLWL or B > hiB or B < loB or T > hiT or T < loT or VolDisp > hiVolDisp or VolDisp < loVolDisp or Cwp > hiCwp or Cwp < loCwp or Cwp > 1:
            return False
        elif isinstance(evaluate(individual)[0], complex):
            return False
        else:
            return True

    # TODO is this the best way to handle constraints?
    def valid_initial(individual):
        LWL, B, T, VolDisp, Cwp = individual

        if LWL > hiLWL or LWL < loLWL or B > hiB or B < loB or T > hiT or T < loT or VolDisp > hiVolDisp or VolDisp < loVolDisp or Cwp > hiCwp or Cwp < loCwp or Cwp > 1:
            return False
        elif isinstance(evaluate(individual)[0], complex):
            return False
        else:
            return True

    # Set individuals to minimise the function
    # TODO is this correct, should I be minimising both?
    creator.create("FitnessMinMax", base.Fitness, weights=(-1.0, +1.0))
    creator.create("Individual", list, fitness=creator.FitnessMinMax)
    # Setting random values for parameters within the correct ranges (for the first generation)
    toolbox = base.Toolbox()
    toolbox.register("attributeLWL", np.random.uniform, loLWL, hiLWL)
    toolbox.register("attributeBeam", np.random.uniform, loB, hiB)
    toolbox.register("attributeDraft", np.random.uniform, loT, hiT)
    toolbox.register("attributeVolDisp", np.random.uniform, loVolDisp,
                     hiVolDisp)
    toolbox.register("attributeCwp", np.random.uniform, loCwp, hiCwp)
    # Registering individuals and population in the toolbox
    toolbox.register(
        "individual",
        tools.initCycle,
        creator.Individual,
        (
            toolbox.attributeLWL,
            toolbox.attributeBeam,
            toolbox.attributeDraft,
            toolbox.attributeVolDisp,
            toolbox.attributeCwp,
            # TODO need to add more variables into the individual so that it can be checked if the Holtrop method is valid also where is LCB coming from?
        ),
        n=1)
    toolbox.register("population",
                     tools.initRepeat,
                     list,
                     toolbox.individual,
                     n=popsize)
    # Register evolutionary operators in the toolbox
    toolbox.register("mate",
                     tools.cxSimulatedBinaryBounded,
                     eta=0.5,
                     low=[loLWL, loB, loT, loVolDisp, loCwp],
                     up=[hiLWL, hiB, hiT, hiVolDisp, hiCwp])
    toolbox.register("mutate",
                     tools.mutPolynomialBounded,
                     eta=0.5,
                     low=[loLWL, loB, loT, loVolDisp, loCwp],
                     up=[hiLWL, hiB, hiT, hiVolDisp, hiCwp],
                     indpb=mutprob)
    toolbox.register(
        "select", tools.selNSGA2
    )  ## This final value of "k" may be wrong, this is a pure guess
    toolbox.register("evaluate", evaluate)
    # Registering statistics recording in the toolbox
    stats = tools.Statistics(key=lambda ind: ind.fitness.values)
    stats.register("avg", np.mean)
    stats.register("std", np.std)
    stats.register("min", np.min)
    stats.register("max", np.max)
    # Creating first generation

    pop = toolbox.population()

    pop = list(filter(valid_initial, pop))
    #    pop = list(filter(valid2, pop)) # TODO do this valid2 function so that it filters out those with complex resistance values maybe this should be for the offspring really

    while len(pop) < originalPopsize:
        popsize = originalPopsize - len(pop)
        newpop = toolbox.population()
        [pop.append(item) for item in newpop]
        pop = list(filter(valid_initial, pop))
        # print("popsize = " + popsize)

    # Initialising best_inds list
    best_inds = []
    worst_inds = []
    all_inds = []
    all_fits = []
    pareto_res = []
    pareto_stab = []
    all_indi = []
    allres = []
    allstab = []
    # Creating logbook for statistics
    logbook = tools.Logbook()

    for gen in range(maxgen):
        print("Current generation is:   ", gen)
        offspring = []

        elite = tools.selBest(pop, 1)
        # Select and clone next generation individuals
        offspring = map(toolbox.clone, toolbox.select(pop, popsize - 1))

        # Apply crossover and mutation on the offspring
        offspring = algorithms.varAnd(offspring, toolbox, crossprob, mutprob)

        # Filtering out individuals that violate Fn and Length/Disp constraints
        offspring = list(filter(valid, offspring))

        # Increasing
        while len(offspring) < originalPopsize:
            popsize = originalPopsize - len(offspring)
            toolbox.register("population",
                             tools.initRepeat,
                             list,
                             toolbox.individual,
                             n=popsize)
            newpop = toolbox.population(
            )  # TODO how are the resistance results stored by DEAP? How can I access them for use in the validity check?
            [offspring.append(item) for item in newpop]
            offspring = list(filter(valid, offspring))

            # Evaluate individuals with invalid fitness
        invalid_ind = [
            ind for ind in offspring if not ind.fitness.valid
        ]  # TODO I don't get what this is doing, why am I evaluating the invalid ones? -- NB this has been canged from "if not" to "if"
        fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(
                invalid_ind, fitnesses
        ):  # TODO what exactly am i doing with these invalid individuals? I don't want to appent them to the list of individuals do I? Especially if the validity check is now checking for out of bounds Holtrop results
            ind.fitness.values = fit
            #     # Append all fitnesses from current generation to all_fits
            all_fits.append(fit)
            # Append all individuals from current generation to all_inds
            all_inds.append(ind)
            #print(all_fits)

        # TODO this may not be the right thing in the right place
        if gen == 0:
            df0 = pd.DataFrame(all_fits,
                               columns=['res', 'accel'
                                        ])  # This is a df of all the fitnesses
            #cdf0 = pd.DataFrame(columns=['gen', 'avg_res', 'min_res', 'avg_accel', 'min_accel']) # this is an empty dataframe to store the convergence statistics
            # need to append things to this empty df
            cdf0 = pd.DataFrame([{
                'gen': gen,
                'avg_res': np.mean(df0['res']),
                'min_res': min(df0['res']),
                'avg_accel': np.mean(df0['accel']),
                'min_accel': min(df0['accel'])
            }])
            # cdf0['gen'] = gen # adding columns to convergence df with stats computed from the fitness df
            # cdf0['avg_res'] = np.mean(df0['res'])
            # cdf0['min_res'] = min(df0['res'])
            # cdf0['avg_accel'] = np.mean(df0['accel'])
            # cdf0['min_accel'] = min(df0['accel'])
            cdf0.to_csv('../src/static/data/convergence.csv',
                        index=False)  # saving convergence df
            print('initial df is     ')
            print(cdf0)
        else:
            df1 = pd.read_csv('../src/static/data/convergence.csv')
            df2 = pd.DataFrame(
                all_fits, columns=['res', 'accel']
            )  # TODO something is making the number of individuals saved in the CSV grow massively
            cdf2 = pd.DataFrame([{
                'gen': gen,
                'avg_res': np.mean(df2['res']),
                'min_res': min(df2['res']),
                'avg_accel': np.mean(df2['accel']),
                'min_accel': min(df2['accel'])
            }])
            print('df to be added is     ')
            print(cdf2)
            cdf3 = df1.append(cdf2, ignore_index=True)
            cdf3.to_csv('../src/static/data/convergence.csv', index=False)
            print('total dataframe to date is    ')
            print(cdf3)

    pareto_ = tools.sortNondominated(offspring,
                                     len(all_inds),
                                     first_front_only=True)

    paretoFits_ = list(map(
        lambda x: x.fitness.values,
        pareto_[0]))  # TODO this is returning a list index out of range error
    pareto_res_ = [x[0] for x in paretoFits_]
    pareto_stab_ = [x[1] for x in paretoFits_]
    pareto_res.append(pareto_res_)
    pareto_stab.append(pareto_stab_)

    allinds_ = offspring
    allfits_ = list(map(lambda x: x.fitness.values, allinds_))
    allres_ = [x[0] for x in allfits_]
    allstab_ = [x[1] for x in allfits_]
    allres.append(allres_)
    allstab.append(allstab_)

    # Replace population with offspring
    pop[:] = offspring[:]
    # Record statistics for the current population
    record = stats.compile(pop)
    logbook.record(gen=gen, nevals=len(invalid_ind), **record)

    #Plotting
    resFitAll = [x[0] for x in all_fits]
    stabFitAll = [x[1] for x in all_fits]
    LWLAll = [x[0] for x in all_inds]
    BeamAll = [x[1] for x in all_inds]
    DraftAll = [x[2] for x in all_inds]
    DispAll = [x[3] for x in all_inds]
    CwpAll = [x[4] for x in all_inds]
    # TODO how do I save all the other parameters?

    number_of_runs = len(all_inds)

    return record, logbook, resFitAll, stabFitAll, LWLAll, BeamAll, DraftAll, DispAll, CwpAll, number_of_runs
Example #32
0
    def selNSGA2COIN(self, individuals, k, world, type_selection='R', exp_type='B', best_gmm=None, kmm=None, ga=None):

        #the difference is that I use the COIN distance instead of Crowding Distance

        """Apply NSGA-II selection operator on the *individuals*. Usually, the
        size of *individuals* will be larger than *k* because any individual
        present in *individuals* will appear in the returned list at most once.
        Having the size of *individuals* equals to *k* will have no effect other
        than sorting the population according to their front rank. The
        list returned contains references to the input *individuals*. For more
        details on the NSGA-II operator see [Deb2002]_.

        :param individuals: A list of individuals to select from.
        :param k: The number of individuals to select.
        :param nd: Specify the non-dominated algorithm to use: 'standard' or 'log'.
        :returns: A list of selected individuals.

        .. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
           non-dominated sorting genetic algorithm for multi-objective
           optimization: NSGA-II", 2002.
        """
        pareto_fronts = tools.sortNondominated(individuals, k)
        #print len(pareto_fronts)


        for front in pareto_fronts:
            #here is the difference
            self.assignCOINdist(front, world)
            ###self.assignCrowdingDist_PURE(front)

            # herd=[]
            # for i in front:
            #     if i not in herd:
            #         herd.append(i)

        #if only one front and Tour type is not Random (it is NSGA-II cluster diversity)
        # if len(pareto_fronts)==1:
        #     print "aqui"
        chosen=[]
        if len(pareto_fronts) == 1 and type_selection != 'R': #modified
            #quit()
            #keep some minimum diversity

            #sort the unique front
            sorted_front = sorted(pareto_fronts[0], key=attrgetter("fitness.crowding_dist"), reverse=False)

            #make fitness list
            fitnesses = ga.GetFitness(pareto_fronts[0])
            ga.AttachFitness(pareto_fronts[0],fitnesses)
            #create list of fitness
            front_fit=[]
            for i in pareto_fronts[0]:
                front_fit.append([i.fitness.values[0],i.fitness.values[1]])


            #if Gaussian, get the prediction
            if exp_type == 'B':
                #
                print "gaussian mix"
                result_clusters = best_gmm.predict(front_fit)

            #if Kmeans
            elif exp_type == 'C':
                #
                print "kmeans"
                quit()


            #now, I get one list for each cluster... in order of the COIN dist
            #
            #
            #tenho o numero de clusters ja no gmm
            final_front=[]
            for i in xrange(best_gmm.n_components):
                final_front.append([])
            for i in xrange(len(front_fit)):
                final_front[result_clusters[i]].append(pareto_fronts[0][i])

            #here final_front is clustered and ordered by COIN dist

            if type_selection == 'C':

                #divide between the clusters
                count=0
                i=0
                while i < k:

                    #loop between clusters
                    for j in xrange(best_gmm.n_components):

                        #still a place to fill
                        if i<k:

                            #insert
                            #but check if it is possible
                            if len(final_front[j]) > count:
                                #insert and increment
                                chosen.append(final_front[j][count])
                                i += 1

                        #else, go away and exit loop
                        else:
                            break

                    count +=1

            elif type_selection == 'A':

                count = 0
                for j in xrange(best_gmm.n_components):

                    #insert
                    if len(final_front[j]) > 0:
                        chosen.append(final_front[j][0])
                        count +=1
                k = k - count
                for i in xrange(k):
                    chosen.append(pareto_fronts[0][i])




            #chosen.extend(herd[:])
            #k = k - len(herd)
            #
            #
            #
            #

        else:#regular
            chosen = list(chain(*pareto_fronts[:-1]))
            k = k - len(chosen)
            # print "..." +  str(k)

            if k > 0:
                sorted_front = sorted(pareto_fronts[-1], key=attrgetter("fitness.crowding_dist"), reverse=False)
                ###sorted_front = sorted(pareto_fronts[-1], key=attrgetter("fitness.crowding_dist"), reverse=True)
                chosen.extend(sorted_front[:k])

        return chosen, sorted_front[:k],sorted_front[k:]
Example #33
0
if __name__ == "__main__":
    first_echelon, second_echelon, demand, vehicles = obtain_input_data(
        instance_file)
    Solution.set_environment(vehicles, demand, first_echelon, second_echelon)
    pareto_front = obtain_pareto_front(pareto_file)
    pop = []
    for i in range(10):
        print "Run " + str(i)
        pop.append(nsga2(NGEN=200, MU=200, dump_intrval=50))
    pop_final = []
    for i in range(10):
        pop_final += pop[i]
    pop_final = set(pop_final)
    pop_fronts = tools.sortNondominated(pop_final,
                                        len(pop_final),
                                        first_front_only=True)
    pop_front = pop_fronts[0]
    for i in range(4):
        pop_front = sorted(pop_front,
                           key=lambda ind: ind.fitness.values[3 - i])
    dom_count = 0
    pareto_error = 0
    for sol in pop_front:
        if sol not in pareto_front:
            dom_count += 1
            for par_sol in pareto_front:
                if dominates(sol.fitness.values, par_sol.fitness.values):
                    pareto_error += 1
    with open("tiny_comparison_test.txt", 'w') as file:
        file.write("pop front\n")