Beispiel #1
0
def optimize(results, dir):
    pg.mp_bfe.init_pool(50)
    prob = pg.problem(MotGunOptimizationProblem(dir))
    bfe = pg.bfe(pg.mp_bfe())
    nsga2 = pg.nsga2()
    nsga2.set_bfe(bfe)
    algo = pg.algorithm(nsga2)
    pop = pg.population(prob=prob, size=256, b=bfe)
    iteration = 1
    while True:
        print(f"\033[31mITERATION: {iteration}\033[m")
        plt.title(f'Iteration {iteration}')
        plt.xlabel('Emittance (nm)')
        plt.ylabel('Charge (fC)')
        pg.plot_non_dominated_fronts(pop.get_f())
        plt.savefig(results / f'{iteration}.png', dpi=300)
        plt.clf()
        assert len(pop.get_x()) == len(pop.get_f())
        with open(results / f'iteration_{iteration}.txt', 'w+') as f:
            f.write(
                '[Mot Z Offset (mm), Phase (deg)] -> [Emittance 4D sqrt (nm), Charge (fC)]\n'
            )
            for i in range(len(pop.get_x())):
                f.write('{} -> {}\n'.format(pop.get_x()[i], pop.get_f()[i]))
        pop = algo.evolve(pop)
        iteration += 1
Beispiel #2
0
def nsga2(ppn):
    print('*** NSGA-II ALGORITHM ***')

    # INSTANCIATE A PYGMO PROBLEM CONSTRUCTING IT FROM A UDP
    inittime = perf_counter()
    prob = pg.problem(PPNOProblem(ppn))
    generations = trials = nochanges = 0
    best_f = best_x = None

    # INSTANCIATE THE PYGMO ALGORITM NSGA-II
    algo = pg.algorithm(pg.nsga2(gen=GENERATIONS_PER_TRIAL))

    # INSTANCIATE A POPULATION
    pop = pg.population(prob, size=POPULATION_SIZE)
    while True:

        # RUN THE EVOLUION
        pop = algo.evolve(pop)
        trials += 1
        generations += GENERATIONS_PER_TRIAL

        # EXTRACT RESULTS AND SEARCH THE BEST
        fits, vectors = pop.get_f(), pop.get_x()
        new_f = new_x = None
        for fit, vector in zip(fits, vectors):

            # VALID SOLUTION
            if fit[1] <= 0:
                if isinstance(new_f, type(None)):
                    new_f = fit
                    new_x = vector
                elif new_f[0] > fit[0]:
                    new_f = fit
                    new_x = vector
        if not isinstance(new_f, type(None)):
            if isinstance(best_f, type(None)):
                best_f = new_f
                best_x = new_x
            else:
                if best_f[0] > new_f[0]:
                    best_f = new_f
                    best_x = new_x
                    nochanges = 0
                else:
                    nochanges += 1
        if not isinstance(best_f, type(None)):
            print('Generations: %i ' % (generations), end='')
            print('Cost: %.2f Pressure deficit: %0.3f ' %
                  (best_f[0], best_f[1]))
        if (perf_counter() - inittime) >= MAX_TIME:
            print('Maximum evolution time was reached.')
            break
        elif trials >= MAX_TRIALS:
            print('Maximum number of trials was reached.')
            break
        elif nochanges >= MAX_NO_CHANGES:
            print('Objective function value was repeated %i times.' %
                  (nochanges))
            break
    return (best_f, best_x)
Beispiel #3
0
def main(argv):
    help_message = 'test.py <inputfile> <outputfile>'
    if len(argv) < 2:
        print(help_message)
        sys.exit(2)
    else:
        inputfile = argv[0]
        outputfile = argv[1]

    print("Reading data from " + inputfile)
    # Setting up the user defined problem in pygmo
    prob = pg.problem(TestOptimizer(inputfile))
    solution_size = 8
    # Start with an initial set of 100 sets
    pop = pg.population(prob, size=solution_size)
    # Set the algorithm to non-dominated sorting GA
    algo = pg.algorithm(pg.nsga2(gen=40))
    # Optimize
    pop = algo.evolve(pop)

    # This returns a set of optimal vectors and corresponding fitness values
    fits, vectors = pop.get_f(), pop.get_x()

    print("Writing output to " + outputfile)
    jsonfile = pygeoj.load(filepath=inputfile)
    num_districts = len(jsonfile)
    counter = 0
    for feature in jsonfile:
        for sol in range(solution_size):
            feature.properties = {"sol" + str(sol): str(vectors[sol][counter])}
        counter += 1
    # Save output
    jsonfile.save(outputfile)
Beispiel #4
0
    def _optimize(self):
        """
        Optimize the portfolio model's parameters.

        This is the MULTI-OBJECTIVE version that uses the NSGA-2 optimizer.

        :return: None.
        """
        class Problem:
            """
            Wrapper for the Model-class that connects it with
            the optimizer. This is necessary because the optimizer
            creates a deep-copy of the problem-object passed to it,
            so it does not work when passing the Model-object directly.
            """
            def __init__(self, model):
                """
                :param model: Object-instance of the Model-class.
                """
                self.model = model

            def fitness(self, params):
                """Calculate and return the fitness for the given parameters."""
                return self.model.fitness(params=params)

            def get_bounds(self):
                """Get boundaries of the search-space."""
                return self.model.bounds

            def get_nobj(self):
                """Get number of fitness-objectives."""
                return self.model.num_objectives

        # Create a problem-instance.
        problem = Problem(model=self)

        # Create an NSGA-2 Multi-Objective optimizer.
        optimizer = pg.algorithm(pg.nsga2(gen=500))

        # Create a population of candidate solutions.
        population = pg.population(prob=problem, size=200)

        # Optimize the problem.
        population = optimizer.evolve(population)

        # Save the best-found parameters and fitnesses for later use.
        self.best_parameters = population.get_x()
        self.best_fitness = population.get_f()

        # Sorted index for the fitnesses.
        idx_sort = np.argsort(self.best_fitness[:, 0])

        # Sort the best-found parameters and fitnesses.
        self.best_parameters = self.best_parameters[idx_sort]
        self.best_fitness = self.best_fitness[idx_sort]
Beispiel #5
0
    def __init__(
            self, simulator, start_date, end_date,
            generations=100, crossover=0.8, mutation=0.1,
            num_montecarlo_runs=50):


        # create optimization problem
        prob = pygmo.problem(self.ProblemDefinition(
            simulator,
            start_date=start_date,
            end_date=end_date,
            num_montecarlo_runs=num_montecarlo_runs))
        # create population
        self.pop = pygmo.population(prob, size=20 * 4)
        # select algorithm
        self.algo = pygmo.algorithm(pygmo.nsga2(gen=generations, cr=crossover, m=mutation))
Beispiel #6
0
    def run(self):
        """
        Run the optimisation process using PSO algorithm.
        :param converge_info: optional run the optimisation with convergence information
        :param converge_info: optional run the optimisation with population information
        :return:
        """
        print("Start the optimisation process...")

        if self.algorithm_type == 'nsga-2':
            uda = pg.nsga2(gen=self.generation)
        elif self.algorithm_type == 'moea-d':
            uda = pg.moead(gen=self.generation)
        elif self.algorithm_type == 'ihs':
            uda = pg.ihs(gen=self.generation)

        algo = pg.algorithm(uda)
        pop = pg.population(self.problem, self.pop_size)
        pop = algo.evolve(pop)
        self.pop = pop
Beispiel #7
0
def make_two_obj_networks(
    inputs: dict, thetas: list, n_sensors: list, gen: int, population_size: int
) -> dict:
    """Generate networks optimised for two objectives for a range of theta values
    (coverage distances) and numbers of sensors. Networks are generated with the
    NSGA2 algorithm.

    Parameters
    ----------
    inputs : dict
        Output area weights and locations as generated by get_two_obj_inputs
    thetas : list
        Theta (coverage distance) values to generate networks for
    n_sensors : list
        Generate networks with this many sensors
    gen : int
        Number of generations (iterations) to run the optimisation for
    population_size : int
        Number of candidate networks in each generation

    Returns
    -------
    dict
        Optimised networks and coverage scores
    """
    results = {}
    for t in thetas:
        results[f"theta{t}"] = {}
        for ns in n_sensors:
            print("theta", t, ", n_sensors", ns)
            prob = build_problem(inputs, n_sensors=ns, theta=t)
            pop = run_problem(
                prob,
                uda=pg.nsga2(gen=gen),
                population_size=population_size,
                verbosity=1,
            )
            results[f"theta{t}"][f"{ns}sensors"] = pop

    return results
Beispiel #8
0
def get_optimization_algorithm(randseed):
    '''
    Returns an optimisation algorithm
    '''
    opt_alg = None
    if (cfg.MOG_ALG == "nsga2"):
        # cr: crossover probability, m: mutation probability
        # eta_c: distribution index for crossover, eta_m: distribution index for mutation
        opt_alg = pyg.algorithm(
            pyg.nsga2(gen=1,
                      cr=0.925,
                      m=0.05,
                      eta_c=10,
                      eta_m=50,
                      seed=randseed))
    elif (cfg.MOG_ALG == "moead"):
        opt_alg = pyg.algorithm(
            pyg.moead(gen=1,
                      weight_generation="grid",
                      decomposition="tchebycheff",
                      neighbours=5,
                      CR=1,
                      F=0.5,
                      eta_m=20,
                      realb=0.9,
                      limit=2,
                      preserve_diversity=True))
    elif (cfg.MOG_ALG == "nspso"):
        opt_alg = pyg.algorithm(
            pyg.nspso(gen=1,
                      omega=0.6,
                      c1=0.01,
                      c2=0.5,
                      chi=0.5,
                      v_coeff=0.5,
                      leader_selection_range=2,
                      diversity_mechanism="crowding distance",
                      memory=False))
    opt_alg.set_verbosity(1)
    return opt_alg
Beispiel #9
0
    def __init__(self,
                 problem,
                 surrogate=None,
                 size=100,
                 generation=10,
                 cr=0.9,
                 eta_c=10,
                 m=0.1,
                 eta_m=10,
                 seed=None,
                 *args,
                 **kwargs):

        # Set seed
        seed = np.random.randint(1e8) if seed is None else seed

        a = pg.algorithm(
            pg.nsga2(gen=generation,
                     cr=cr,
                     eta_c=eta_c,
                     m=m,
                     eta_m=eta_m,
                     seed=seed))

        if surrogate is not None:
            prob = construct_moead_problem_with_surrogate(problem, surrogate)
        else:
            prob = problem

        pop = pg.population(prob=pg.problem(prob), size=size, seed=seed)

        self.pop_size = size
        self.problem = prob
        self.population = pop
        self.algorithm = a

        return None
def main():
    generation = 0
    if len(sys.argv)>2:
        if(sys.argv[2] == 'show'):
            show = True
        else:
            show=False
    else:
        show=False
    file_base_name = sys.argv[1]
    plotdata = plotDataOut()
    save = save_data()
    prob = problem(psm.problem_susmicro())
    pop = population(prob,  size = 60, seed = 3453412)
    specific_algo = nsga2(gen = 1, seed = 3453213)
#    pop = population(prob, size = 210, seed = 3453412)
#    algo = algorithm(moead(gen = 20)) # 250
    algo = algorithm(specific_algo)
    initial_inputs = pop.get_x()
    initial_outputs = pop.get_f()
    ndf, dl, dc, ndl = fast_non_dominated_sorting(initial_outputs)
    save.initial_ndf = copy.deepcopy(ndf)
    save.initial_inputs = copy.deepcopy(initial_inputs)
    save.initial_outputs = copy.deepcopy(initial_outputs)

    start = timer()
    for generation in range(0, 50):
        save.pop = copy.deepcopy(pop)
        ################# initial pop ##################
        #get a list of the non-dominated front of the first set (random points)
        ndf, dl, dc, ndl = fast_non_dominated_sorting(pop.get_f())
        ndf_x = []
        for val in ndf[0]:
            ndf_x.append(pop.get_x()[val])
        save.initial_surr_ndf_x = copy.deepcopy(ndf_x)
        print("evaluate the initial ndf in surrogate")
        inputs = pop.get_x()
        outputs = pop.get_f()
        save.inputs = copy.deepcopy(inputs)
        save.outputs = copy.deepcopy(outputs)
        data_file = "./pop_"+str(generation).zfill(4)+".pickle"
        save.pop = copy.deepcopy(pop)
        try:
            pickle.dump(save, open(data_file, "wb+" ) )
        except:
            print("error opening '"+data_file+"' pickle file")
            exit()
        pop = algo.evolve(pop)
    end = timer()


    ndf, dl, dc, ndl = fast_non_dominated_sorting(pop.get_f())
    ndf_x = []
    for val in ndf[0]:
        ndf_x.append(pop.get_x()[val])
    save.final_surr_ndf_x = copy.deepcopy(ndf_x)
    final_inputs = pop.get_x()
    final_outputs = pop.get_f()
    save.inputs = copy.deepcopy(final_inputs)
    save.outputs = copy.deepcopy(final_outputs)

    plotdata.final_inputs = copy.deepcopy(final_inputs)
    plotdata.final_outputs = copy.deepcopy(final_outputs)
    with open(file_base_name+"_plot_data.pickle","wb") as f:
        f.write(pickle.dumps(plotdata))
    # Plot
    fig, ax = plt.subplots()
#    x_vals_f = [(row[0] + row[1]) / 2.0 for row in final_outputs]
    x_vals_f = [row[0] for row in final_outputs]#[dist(row[0], row[1]) for row in final_outputs]
    y_vals_f = [row[1] for row in final_outputs]#[row[2] * 2.0 for row in final_outputs]
    ax.scatter(x_vals_f, y_vals_f, c="purple", alpha=0.6, label='Final ndf surrogate model')
#    x_vals_i = [(row[0] + row[1])/2.0 for row in initial_outputs]
    x_vals_i = [row[0] for row in initial_outputs]#[dist(row[0], row[1]) for row in initial_outputs]
    y_vals_i = [row[1] for row in initial_outputs]#[row[2] * 2.0 for row in initial_outputs]
    ax.scatter(x_vals_i, y_vals_i, c="green", alpha=0.6, label='initial evaluation')
    ax.set_title('Initial to Surrogate population')
    ax.set_ylabel('ppf')
    ax.set_xlabel('1/radius')
    ax.legend(loc=1)
    #fig.savefig('surrogate-wims.png')
    #fig.show()
    # if you are debugging probably just show to screen
    if(show):
        print("\a")
        plt.show()
        fig.savefig(file_base_name+'-graph.svg')
    else:
        # if not debugging save the figure
        fig.savefig(file_base_name+'-graph.png')
        fig.savefig(file_base_name+'-graph.svg')

    # Plot only NDF
    #fig, ax = plt.subplots()
    initials = [[a,b] for a,b in zip(x_vals_i,y_vals_i)]
    finals = [[a,b] for a,b in zip(x_vals_f,y_vals_f)]
    plt.ylim([0,6])
    plt.xlim([0,0.6])
    ax = plot_non_dominated_fronts(initials, marker='o')
    plt.ylim([0,6])
    plt.xlim([0,0.6])
    ax = plot_non_dominated_fronts(finals, marker='x',)

    ax.set_title('Surrogate NDF to Serpent NDF')
    ax.set_ylabel('ppf')
    ax.set_xlabel('1/radius (relative)')
#    ax.legend(loc=1)
    if(show):
        print("\a")
        plt.show()
        fig.savefig(file_base_name+'-ndf_only.svg')
    else:
        # if not debugging save the figure
        fig.savefig(file_base_name+'-ndf_only.png')
        fig.savefig(file_base_name+'-ndf_only.svg')


    ndf_simplified = [] # copy.deepcopy(ndf[0])
    for idx in range(len(pop.get_x())):#ndf[0]:
        x = pop.get_x()[idx]
        new_row = [aw_round(val) for val in x]
        ndf_simplified.append(new_row)
    #ndf_simplified = list(set(ndf_simplified))
    print(str(len(ndf_simplified)))
    ndf_no_repeat = np.unique(ndf_simplified, axis=0)
    print("data from populations:"+str(len(pop))+" "+str(len(ndf_no_repeat)))
    print(str(ndf_no_repeat))

    line = "input_test_list = ["
    # get whole final population and print it out...
    for vals in ndf_no_repeat:
        line +="["
        for v in vals:
            line += str(v)+", "
        line += "],\n"
    line += "]\n"
    print(line)
    ndf, dl, dc, ndl = fast_non_dominated_sorting(pop.get_f())
    for idx in ndf_simplified:
        f = ndf_simplified#pop.get_f()[idx]
        print("f: "+str(f))
    print("NDF len:" + str(len(ndf_no_repeat)))
    print("Execution time for evolution: " + str(end-start))
Beispiel #11
0
def make_multi_obj_networks(
    lad20cd: str,
    population_groups: dict,
    objectives: list,
    thetas: list,
    n_sensors: list,
    gen: int,
    population_size: int,
    save_path: Path,
    workplace_name: str = "workplace",
    include_oa_coverage: bool = True,
):
    """Generate networks optimised for multiple objectives (all age groups defined in
    `population_groups` and place of work), for a range of theta values
    (coverage distances) and numbers of sensors. Networks are generated with the
    NSGA2 algorithm.

    Parameters
    ----------
    lad20cd : str
        Local authority code to generate results for
    population_groups : dict
        Parameters for residential population objectives
    objectives : list
        Names of the two objectives to include. Must be length two and names must match
        an entry in `population_groups` or `workplace_name`
    thetas : list
        Theta (coverage distance) values to generate networks for
    n_sensors : list
        Generate networks with this many sensors
    gen : int
        Number of generations (iterations) to run the optimisation for
    population_size : int
        Number of candidate networks in each generation
    save_path : Path
        Where to save networks
    workplace_name: str, optional
        Name of the place of work objective, by default "workplace"
    """
    inputs = [
        get_multi_obj_inputs(
            lad20cd,
            obj,
            population_groups,
            workplace_name,
        ) for obj in objectives
    ]

    for inp_idx, inp in enumerate(tqdm(inputs, desc="objectives")):
        for t in tqdm(thetas, desc="theta"):
            for ns in tqdm(n_sensors, desc="n_sensors"):
                prob = build_problem(inp, n_sensors=ns, theta=t)
                pop = run_problem(
                    prob,
                    uda=pg.nsga2(gen=gen),
                    population_size=population_size,
                    verbosity=0,
                )
                net_name = f"theta_{t}_nsensors_{ns}_objs_{inp_idx}"
                net_path = Path(save_path, net_name + ".pkl")
                with open(net_path, "wb") as f:
                    pickle.dump(
                        {
                            "lad20cd": lad20cd,
                            "objectives": list(inp["oa_weight"].keys()),
                            "theta": t,
                            "n_sensors": ns,
                            "population": pop,
                        },
                        f,
                    )
                scores, solutions = extract_all(pop)
                scores = -scores
                if include_oa_coverage:
                    oa_coverage = get_pop_oa_coverage(solutions, inp, lad20cd,
                                                      t)
                else:
                    oa_coverage = np.array([])
                net_path = Path(save_path, net_name + ".json")
                with open(net_path, "w") as f:
                    json.dump(
                        {
                            "lad20cd": lad20cd,
                            "objectives": list(inp["oa_weight"].keys()),
                            "theta": t,
                            "n_sensors": ns,
                            "oa11cd": inp["oa11cd"].tolist(),
                            "sensors": solutions.astype(int).tolist(),
                            "obj_coverage": scores.tolist(),
                            "oa_coverage": oa_coverage.tolist(),
                        },
                        f,
                    )
Beispiel #12
0
    def optimize(self,
                 niter=500,
                 minimizer_kwargs=None,
                 nmin=1000,
                 kforce=100.,
                 gradient=False,
                 print_fun=None,
                 popsize=50,
                 stepsize=0.05,
                 optimizer="evolution",
                 seed=None):

        self.kforce = kforce

        if type(seed) == type(None):
            seed = np.random.randint(999999)
        else:
            seed = int(seed)
        np.random.seed(seed)
        pygmo.set_global_rng_seed(seed=seed)
        self.set_x0()

        bounds = gist_bounds(self.xmin, self.xmax, Tconst=True)
        min_bounds = bounds.get_bounds_for_minimizer()

        if optimizer == "evolution":
            ### This works , because pygmo makes deepcopies of this object
            ### in order to remain "thread safe" during all following operations
            prob = pygmo.problem(self)
            if self.decomp:
                if (popsize % 4) != 0:
                    popsize = (popsize / 4) * 4
                if popsize < 5:
                    popsize = 8

            pop = pygmo.population(prob=prob, size=popsize)
            if self.decomp:
                ### For NSGA2, popsize must be >4 and also
                ### a multiple of four.
                algo = pygmo.algorithm(pygmo.nsga2(gen=niter))
                #algo = pygmo.algorithm(pygmo.moead(gen=niter))
            else:
                algo = pygmo.algorithm(pygmo.sade(gen=niter))
            if self.verbose:
                algo.set_verbosity(1)
            pop = algo.evolve(pop)

            for x in pop.get_x():
                print_fun(x)
                print_fun.flush()

        elif optimizer == "brute":
            self.anal_grad = False
            self.anal_boundary = False
            N_dim = self._x0.size
            niter_count = np.zeros(self._x0.size, dtype=int)

            for i in range(self._x0.size):
                self._x0[i] = min_bounds[i][0]
                _diff = min_bounds[i][1] - min_bounds[i][0]
                niter_count[i] = int(_diff / stepsize)

            prop = propagator(self._x0, N_dim, stepsize, niter_count)
            stop = False
            _stop = False

            if nmin > 0:
                self.anal_grad = True
                self.anal_boundary = False
                prob = pygmo.problem(self)
                pop = pygmo.population(prob=prob, size=1)
                algo = pygmo.algorithm(pygmo.nlopt("slsqp"))
                algo.maxeval = nmin
                if self.verbose:
                    algo.set_verbosity(1)

            while (not stop):
                if nmin > 0:
                    self.anal_grad = gradient

                    if self.anal_boundary:
                        min_bounds = None
                        bounds = None

                    pop.set_x(0, self._x0)
                    pop = algo.evolve(pop)
                    x = pop.get_x()[0]

                else:
                    x = self._x0

                if print_fun != None:
                    print_fun(x)
                    print_fun.flush()

                ### propagate self._x0
                prop.add()
                if _stop:
                    stop = True
                _stop = prop.are_we_done()

        elif optimizer == "basinhopping":
            prob = pygmo.problem(self)
            pop = pygmo.population(prob=prob, size=popsize)
            algo = pygmo.algorithm(uda=pygmo.mbh(
                pygmo.nlopt("slsqp"), stop=100, perturb=self.steps * 0.1))
            if self.verbose:
                algo.set_verbosity(1)
            pop = algo.evolve(pop)

            for x in pop.get_x():
                print_fun(x)
                print_fun.flush()

        else:
            raise ValueError("Optimizer %s is not known." % optimizer)
import pygmo as po
import numpy as np
import myUDPnodes
import time

generations = 400
sizePop = 36
#pathsave    = '/home/oscar/Documents/PythonProjects/kuramotoAO/optimizationResults/'
pathsave = '/Users/p277634/python/kaoModel/optimResult/'
filenameTXT = 'NSGA2nodes.txt'
filenameNPZ = 'NSGA2nodes.npz'

print('Running: ', filenameNPZ[:-4])

# algorithm
algo = po.algorithm(po.nsga2(gen=generations))
algo.set_verbosity(1)
# problem
prob = po.problem(myUDP.KAOnodesMultiObj())
# population
pop = po.population(prob=prob, size=sizePop)
# evolution
start = time.time()
popE = algo.evolve(pop)
print('time evolution: ', time.time() - start)

# save TXT fie with general description of the optimization
bestFstr = 'ideal found fit: ' + str(po.ideal(
    popE.get_f())) + '; best fit possible: -1'
#bestChamp  = 'champion decission vector'
#bestXstr  = 'velocity: ' + str(popE.champion_x[0]) + ', kL:' + str(popE.champion_x[1]),', kG: ' + str(popE.champion_x[2])
# To account for the fact that the zero index array in util functions
# are actually the 1st feval
working_fevals = max_fevals - 1
pop_size = 24
seed = 33

# For the each problem in the problem suite
for i in range(problem_number):
    problem_function = getattr(pg.problems, problem_name)
    if (problem_name == "dtlz"):
        problem = pg.problem(problem_function(i + 1, dim=dim, fdim=fdim))
    else:
        problem = pg.problem(problem_function(i + 1, param=dim))
    algo_moead = pg.algorithm(pg.moead(gen=1))
    algo_nsga2 = pg.algorithm(pg.nsga2(gen=1))

    # Hypervolume calculations, mean taken over n number of times
    hv_rbfmopt_plot = calculate_mean_rbf(n, max_fevals, working_fevals, seed,
                                         problem, cycle)
    hv_moead_plot = calculate_mean_pyg(n, algo_moead, working_fevals, pop_size,
                                       seed, problem)
    hv_nsga2_plot = calculate_mean_pyg(n, algo_nsga2, working_fevals, pop_size,
                                       seed, problem)
    fevals_plot = range(0, max_fevals)

    save_values(
        'storedvalues/rbfmopt_hv_' + problem.get_name() + '_fevals' +
        str(max_fevals) + '.txt', hv_rbfmopt_plot.tolist())
    save_values(
        'storedvalues/moead_hv_' + problem.get_name() + '_fevals' +
def NSGA2_pygmo(model, fevals, lb, ub, cf=None):
    """Finds the estimated Pareto front of a GPy model using NSGA2 [1]_.

    Parameters
    ----------
    model : GPy.models.gp_regression.GPRegression
        GPy regression model on which to find the Pareto front of its mean
        prediction and standard deviation.
    fevals : int
        Maximum number of times to evaluate a location using the model.
    lb : (D, ) numpy.ndarray
        Lower bound box constraint on D
    ub : (D, ) numpy.ndarray
        Upper bound box constraint on D
    cf : callable, optional
        Constraint function that returns True if it is called with a
        valid decision vector, else False.

    Returns
    -------
    X_front : (F, D) numpy.ndarray
        The F D-dimensional locations on the estimated Pareto front.
    musigma_front : (F, 2) numpy.ndarray
        The corresponding mean response and standard deviation of the locations
        on the front such that a point X_front[i, :] has a mean prediction
        musigma_front[i, 0] and standard deviation musigma_front[i, 1].

    Notes
    -----
    NSGA2 [1]_ discards locations on the pareto front if the size of the front
    is greater than that of the population size. We counteract this by storing
    every location and its corresponding mean and standard deviation and
    calculate the Pareto front from this - thereby making the most of every
    GP model evaluation.

    References
    ----------
    .. [1] Kalyanmoy Deb, Amrit Pratap, Sameer Agarwal, and T. Meyarivan.
       A fast and elitist multiobjective genetic algorithm: NSGA-II.
       IEEE Transactions on Evolutionary Computation 6, 2 (2001), 182–197.
    """
    # internal class for the pygmo optimiser
    class GPY_WRAPPER(object):
        def __init__(self, model, lb, ub, cf, evals):
            # model = GPy model
            # lb = np.array of lower bounds on X
            # ub = np.array of upper bounds on X
            # cf = callable constraint function
            # evals = total evaluations to be carried out
            self.model = model
            self.lb = lb
            self.ub = ub
            self.nd = lb.size
            self.got_cf = cf is not None
            self.cf = cf
            self.i = 0  # evaluation pointer

        def get_bounds(self):
            return (self.lb, self.ub)

        def get_nobj(self):
            return 2

        def fitness(self, X):
            X = np.atleast_2d(X)
            f = model_fitness(X, self.model, self.cf, self.got_cf,
                              self.i, self.i + X.shape[0])
            self.i += X.shape[0]
            return f

    # fitness function for the optimiser
    def model_fitness(X, model, cf, got_cf, start_slice, end_slice):
        valid = True

        # if we select a location that violates the constraint,
        # ensure it cannot dominate anything by having its fitness values
        # maximally bad (i.e. set to infinity)
        if got_cf:
            if not cf(X):
                f = [np.inf, np.inf]
                valid = False

        if valid:
            mu, sigmaSQR = model.predict(X, full_cov=False)
            # note the negative sigmaSQR here as NSGA2 is minimising
            # so we want to minimise the negative variance
            f = [mu.flat[0], -np.sqrt(sigmaSQR).flat[0]]

        # store every point ever evaluated
        model_fitness.X[start_slice:end_slice, :] = X
        model_fitness.Y[start_slice:end_slice, :] = f

        return f

    # get the problem dimensionality
    D = lb.size

    # NSGA-II settings
    POPSIZE = D * 100
    N_GENS = int(np.ceil(fevals / POPSIZE))
    TOTAL_EVALUATIONS = POPSIZE * N_GENS

    nsga2 = pg.algorithm(pg.nsga2(gen=1,
                                  cr=0.8,       # cross-over probability.
                                  eta_c=20.0,   # distribution index (cr)
                                  m=1 / D,        # mutation rate
                                  eta_m=20.0))  # distribution index (m)

    # preallocate the storage of every location and fitness to be evaluated
    model_fitness.X = np.zeros((TOTAL_EVALUATIONS, D))
    model_fitness.Y = np.zeros((TOTAL_EVALUATIONS, 2))

    # problem instance
    gpy_problem = GPY_WRAPPER(model, lb, ub, cf, TOTAL_EVALUATIONS)
    problem = pg.problem(gpy_problem)

    # initialise the population
    population = pg.population(problem, size=POPSIZE)

    # evolve the population
    for i in range(N_GENS):
        population = nsga2.evolve(population)

    # indices non-dominated points across the entire NSGA-II run
    front_inds = pg.non_dominated_front_2d(model_fitness.Y)

    X_front = model_fitness.X[front_inds, :]
    musigma_front = model_fitness.Y[front_inds, :]

    # convert the standard deviations back to positive values; nsga2 minimises
    # the negative standard deviation (i.e. maximises the standard deviation)
    musigma_front[:, 1] *= -1

    return X_front, musigma_front
Beispiel #16
0
def ecm_nsga2(data,
              k=2,
              popsize=40,
              gen=500,
              cr=0.95,
              eta_c=10,
              m=0.01,
              eta_m=10):
    '''Entropy c-means - NSGA-II Clustering

    Parameters
    ----------

    data : array, shape (n_data_points, n_features)
        The data array.

    k : int, default: 2
    The number of clusters.

    popsize : int, default: 40
        The population size.

    gen : int, default: 500
        The number of generations to be iterated.

    cr : float, default: 0.95
        NSGA-II parameter for crossover probability.

    eta_c : float, default: 10
        NSGA-II parameter for crossover distribution index.

    m : float, default: 0.01
        NSGA-II parameter for mutation probability.

    eta_m : float, default: 10
        NSGA-II parameter for mutation distribution index.

    Returns
    -------

    vectors: array, shape (popsize, k * n_features)
        The resulting cluster centers, flattened to 1 dimensional arrays.

    pareto_front: array, shape (popsize, 2)
        The pareto front of mapped solutions

    '''

    # set up the problem
    spobj = ecm(k * data.shape[1])
    spobj.set_data(data)
    prob = pg.problem(spobj)

    # create population
    pop = pg.population(prob, popsize)
    # select the MO algorithm
    algo = pg.algorithm(
        pg.nsga2(
            gen=gen,
            cr=cr,
            eta_c=eta_c,
            m=m,
            eta_m=eta_m,
        ))
    # run optimization
    pop = algo.evolve(pop)

    # extract results
    pareto_front, vectors = pop.get_f(), pop.get_x()
    # sort the Pareto front and solutions
    sorted_idxs = np.argsort(pareto_front[:, 0])
    pareto_front = pareto_front[sorted_idxs, :]
    vectors = vectors[sorted_idxs, :]

    return vectors, pareto_front
Beispiel #17
0
 #reader = ProbReader('../../../Nemo/example')
 reader.load()
   
 outputPath='../../result/moea/nsga2/'+ str(NSGA2_triCriteria.AllowPerc)+'/'+para+'/'
 if not os.path.isdir(outputPath):
     os.makedirs(outputPath)
 
 for i in range(0,30):
     time_start=time.time()
     # create UDP
     prob = pg.problem(NSGA2_triCriteria())
     print (prob)
     # create population
     pop = pg.population(prob, size=104)
     # select algorithm
     algo = pg.algorithm(pg.nsga2(gen=2000))
     # run optimization
     pop = algo.evolve(pop)
     # extract results
     fits, vectors = pop.get_f(), pop.get_x()
     # extract and print non-dominated fronts
     ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(fits)
     time_end=time.time()
     exec_time= time_end- time_start
     np.around(fits,6)
     frontStr=''
     
     isOptimal = False 
     for fit in fits:
         if sum(fit) == 0:
             isOptimal = True
Beispiel #18
0
# In[14]:

#testing
prob.fitness([758, 698, 50, 50, 52, 52])

# In[ ]:

from datetime import datetime
start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')  #start

# In[15]:

# create population
pop = pg.population(prob, size=60)
# select algorithm
algo = pg.algorithm(pg.nsga2(gen=5))
# run optimization
pop = algo.evolve(pop)
# extract results
fits, vectors = pop.get_f(), pop.get_x()

# In[16]:

ending_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')  #end

# In[10]:

import pickle

# In[11]:
Beispiel #19
0
    def create_variants(self, n, desc, category, constructor):
        def assign_2nd_alg(archipelago, algo):
            if category == 'rings':
                for island in archipelago.topology.every_other_island():
                    island.algorithm = algo
            elif hasattr(archipelago.topology, 'endpoints'):
                for island in archipelago.topology.endpoints:
                    island.algorithm = algo
            elif isinstance(archipelago.topology, FullyConnectedTopology):
                for island in islice(archipelago.topology.islands, None, None, 2):
                    island.algorithm = algo
            return archipelago

        def assign_algs(archipelago, algos):
            '''
            Evenly partitions and assigns algorithms to islands.
            '''
            for island,algo in zip(archipelago.topology.islands, cycle(algos)):
                island.algorithm = algo

        g = self.generations

        self.new_topology(
          desc='{}, de'.format(desc),
          category=category,
          algorithms=['de'],
          archipelago=Archipelago(constructor(de(gen=g),n)))
        self.new_topology(
          desc='{}, de1220'.format(desc),
          category=category,
          algorithms=['de1220'],
          archipelago=Archipelago(constructor(de1220(gen=g),n)))
        self.new_topology(
          desc='{}, sade'.format(desc),
          category=category,
          algorithms=['sade'],
          archipelago=Archipelago(constructor(sade(gen=g),n)))
        self.new_topology(
          desc='{}, ihs'.format(desc),
          category=category,
          algorithms=['ihs'],
          archipelago=Archipelago(constructor(ihs(gen=g),n)))
        self.new_topology(
          desc='{}, pso'.format(desc),
          category=category,
          algorithms=['pso'],
          archipelago=Archipelago(constructor(pso(gen=g),n)))
        self.new_topology(
          desc='{}, pso_gen'.format(desc),
          category=category,
          algorithms=['pso_gen'],
          archipelago=Archipelago(constructor(pso_gen(gen=g),n)))
        # self.new_topology(
        #   desc='{}, simulated_annealing'.format(desc),
        #   category=category,
        #   algorithms=['simulated_annealing'],
        #   archipelago=Archipelago(constructor(simulated_annealing(),n)))
        self.new_topology(
          desc='{}, bee_colony'.format(desc),
          category=category,
          algorithms=['bee_colony'],
          archipelago=Archipelago(constructor(bee_colony(gen=g),n)))
        self.new_topology(
          desc='{}, cmaes'.format(desc),
          category=category,
          algorithms=['cmaes'],
          archipelago=Archipelago(constructor(cmaes(gen=g),n)))
        self.new_topology(
          desc='{}, nsga2'.format(desc),
          category=category,
          algorithms=['nsga2'],
          archipelago=Archipelago(constructor(nsga2(gen=g),n)))
        self.new_topology(
          desc='{}, xnes'.format(desc),
          category=category,
          algorithms=['xnes'],
          archipelago=Archipelago(constructor(xnes(gen=g),n)))
        # de + nelder mead combo
        self.new_topology(
          desc='{}, de+nelder mead'.format(desc),
          category=category,
          algorithms=['de','neldermead'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), self.make_nelder_mead()))
        # de + praxis combo
        self.new_topology(
          desc='{}, de+praxis'.format(desc),
          category=category,
          algorithms=['de','praxis'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), self.make_praxis()))
        # de + nsga2 combo
        self.new_topology(
          desc='{}, de+nsga2'.format(desc),
          category=category,
          algorithms=['de','nsga2'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), nsga2(gen=g)))
        # de + de1220 combo
        self.new_topology(
          desc='{}, de+de1220'.format(desc),
          category=category,
          algorithms=['de','de1220'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), de1220(gen=g)))
        # de + sade combo
        self.new_topology(
          desc='{}, de+sade'.format(desc),
          category=category,
          algorithms=['de','sade'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), sade(gen=g)))
        # de + pso combo
        self.new_topology(
          desc='{}, de+pso'.format(desc),
          category=category,
          algorithms=['de','pso'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), pso(gen=g)))


      # extra configurations for fully connected topology
        if constructor is self.factory.createFullyConnected:
            self.new_topology(
                desc='{}, de+pso+praxis'.format(desc),
                category=category,
                algorithms=['de','pso','praxis'],
                archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis())))
            self.new_topology(
                desc='{}, de+pso+praxis+nsga2'.format(desc),
                category=category,
                algorithms=['de','pso','praxis','nsga2'],
                archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), nsga2(gen=g))))
            self.new_topology(
                desc='{}, de+pso+praxis+cmaes'.format(desc),
                category=category,
                algorithms=['de','pso','praxis','cmaes'],
                archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), cmaes(gen=g))))
            self.new_topology(
                desc='{}, de+pso+praxis+xnes'.format(desc),
                category=category,
                algorithms=['de','pso','praxis','xnes'],
                archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), xnes(gen=g))))
Beispiel #20
0
        return [f1, f2]

    # Return number of objectives
    def get_nobj(self):
        return 2

    # Return bounds of decision variables
    def get_bounds(self):
        return ([0] * 1, [2] * 1)

    # Return function name
    def get_name(self):
        return "Schaffer function N.1"


prob = pg.problem(Schaffer())
print(prob)

# create population
pop = pg.population(prob, size=20)
# select algorithm
algo = pg.algorithm(pg.nsga2(gen=40))
# run optimization
pop = algo.evolve(pop)
# extract results
fits, vectors = pop.get_f(), pop.get_x()
# extract and print non-dominated fronts
ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(fits)
print(fits)
print(vectors)
fixed_seed = 112987

# Instantiate orbit problem
orbitProblem = AsteroidOrbitProblem(bodies,
                                    integrator_settings,
                                    propagator_settings,
                                    mission_initial_time,
                                    mission_duration,
                                    design_variable_lb,
                                    design_variable_ub)

# Create pygmo problem using the UDP instantiated above
prob = pg.problem(orbitProblem)

# Select Moead algorithm from pygmo, with one generation
algo = pg.algorithm(pg.nsga2(gen=1, seed=fixed_seed))


### Initial population
"""
An initial population is now going to be generated by PyGMO, of a size of 48 individuals. This means that 48 orbital simulations will be run, and the fitness corresponding to the 48 individuals will be computed using the UDP.
"""

# Initialize pygmo population with 48 individuals
population_size = 48
pop = pg.population(prob, size=population_size, seed=fixed_seed)


### Evolve population
"""
We now want to make this population evolve, as to (hopefully) get closer to optimum solutions.
def main():
    """
    The problem describes the orbit design around a small body (asteroid Itokawa).

    DYNAMICAL MODEL
    Itokawa spherical harmonics, cannonball radiation pressure from Sun, point-mass third-body
    from Sun, Jupiter, Saturn, Earth, Mars

    PROPAGATION TIME
    5 days

    INTEGRATOR
    RKF7(8) with tolerances 1E-8

    TERMINATION CONDITIONS
    In addition to 5 day time, minimum distance from Itokaw's center of mass: 150 m (no crashing),
    maximum distance from center of mass: 5 km (no escaping)

    DESIGN VARIABLES
    Initial values of semi-major axis, eccentricity, inclination, and longitude of node

    OBJECTIVES
    1. good coverage: the mean value of the absolute longitude w.r.t. Itokawa over the full propagation should be
       maximized;
    2. close orbit: the mean value of the distance should be minimized.
    """
    ###########################################################################
    # CREATE SIMULATION SETTINGS ##############################################
    ###########################################################################

    # Load spice kernels
    spice_interface.load_standard_kernels()

    # Define Itokawa radius
    itokawa_radius = 161.915

    # Set simulation start and end epochs
    mission_initial_time = 0.0
    mission_duration = 5.0 * 86400.0

    # Set boundaries on the design variables
    design_variable_lb = (300, 0.0, 0.0, 0.0)
    design_variable_ub = (2000, 0.3, 180, 360)

    # Set termination conditions
    minimum_distance_from_com = 150.0 + itokawa_radius
    maximum_distance_from_com = 5.0E3 + itokawa_radius

    # Create simulation bodies
    bodies = create_simulation_bodies(itokawa_radius)

    ###########################################################################
    # CREATE ACCELERATIONS ####################################################
    ###########################################################################

    bodies_to_propagate = ["Spacecraft"]
    central_bodies = ["Itokawa"]

    # Create acceleration models.
    acceleration_models = get_acceleration_models(bodies_to_propagate,
                                                  central_bodies, bodies)

    # Create numerical integrator settings.
    integrator_settings = propagation_setup.integrator.runge_kutta_variable_step_size(
        mission_initial_time, 1.0,
        propagation_setup.integrator.RKCoefficientSets.rkf_78, 1.0E-6, 86400.0,
        1.0E-8, 1.0E-8)

    ###########################################################################
    # CREATE PROPAGATION SETTINGS #############################################
    ###########################################################################

    # Define list of dependent variables to save
    dependent_variables_to_save = get_dependent_variables_to_save()

    # Create propagation settings
    termination_settings = get_termination_settings(mission_initial_time,
                                                    mission_duration,
                                                    minimum_distance_from_com,
                                                    maximum_distance_from_com)

    # Define (Cowell) propagator settings with mock initial state
    propagator_settings = propagation_setup.propagator.translational(
        central_bodies,
        acceleration_models,
        bodies_to_propagate,
        np.zeros(6),
        termination_settings,
        output_variables=dependent_variables_to_save)

    ###########################################################################
    # OPTIMIZE ORBIT WITH PYGMO ###############################################
    ###########################################################################

    # Fix seed for reproducibility
    fixed_seed = 17031861
    # Instantiate orbit problem
    orbitProblem = AsteroidOrbitProblem(bodies, integrator_settings,
                                        propagator_settings,
                                        mission_initial_time, mission_duration,
                                        design_variable_lb, design_variable_ub)

    # Select Moead algorithm from pygmo, with one generation
    algo = pg.algorithm(pg.nsga2(gen=1, seed=fixed_seed))
    # Create pygmo problem using the UDP instantiated above
    prob = pg.problem(orbitProblem)
    # Initialize pygmo population with 48 individuals
    population_size = 48
    pop = pg.population(prob, size=population_size, seed=fixed_seed)
    # Set the number of evolutions
    number_of_evolutions = 50
    # Initialize containers
    fitness_list = []
    population_list = []
    # Evolve the population recursively
    for gen in range(number_of_evolutions):
        print('Evolving population; at generation ' + str(gen))
        # Evolve the population
        pop = algo.evolve(pop)
        # Store the fitness values and design variables for all individuals
        fitness_list.append(pop.get_f())
        population_list.append(pop.get_x())

    ###########################################################################
    # ANALYZE FIRST AND LAST GENERATIONS ######################################
    ###########################################################################

    dump_results_to_file = False

    # Get output path
    output_path = os.getcwd() + '/PygmoExampleSimulationOutput/'

    # Retrieve first and last generations for further analysis
    pops_to_analyze = {0: 'initial', number_of_evolutions - 1: 'final'}
    # Initialize containers
    simulation_output = dict()
    # Loop over first and last generations
    for population_index, population_name in pops_to_analyze.items():
        current_population = population_list[population_index]
        # Save fitness and population members
        if dump_results_to_file:
            # Create directory
            if not os.path.isdir(output_path):
                os.mkdir(output_path)
            np.savetxt(output_path + 'Fitness_' + population_name + '.dat',
                       fitness_list[population_index])
            np.savetxt(output_path + 'Population_' + population_name + '.dat',
                       population_list[population_index])
        # Current generation's dictionary
        generation_output = dict()
        # Loop over all individuals of the populations
        for individual in range(population_size):
            # Retrieve orbital parameters
            current_orbit_parameters = current_population[individual]
            # Propagate orbit and compute fitness
            orbitProblem.fitness(current_orbit_parameters)
            # Retrieve state and dependent variable history
            current_states = orbitProblem.get_last_run_dynamics_simulator(
            ).state_history
            current_dependent_variables = orbitProblem.get_last_run_dynamics_simulator(
            ).dependent_variable_history
            # Save results to dict
            generation_output[individual] = [
                current_states, current_dependent_variables
            ]
            # Write data to files
            if dump_results_to_file:
                save2txt(
                    current_dependent_variables, population_name +
                    '_dependent_variables' + str(individual) + '.dat',
                    output_path)
                save2txt(
                    current_states,
                    population_name + '_states' + str(individual) + '.dat',
                    output_path)
        # Append to global dictionary
        simulation_output[population_index] = [
            generation_output, fitness_list[population_index],
            population_list[population_index]
        ]

    ###########################################################################
    # ANALYZE RESULTS #########################################################
    ###########################################################################

    # Set font size for plots
    font = {'size': 12}
    matplotlib.rc('font', **font)

    # Create dictionaries
    decision_variable_names = {
        0: 'Semi-major axis [m]',
        1: 'Eccentricity',
        2: 'Inclination [deg]',
        3: 'Longitude of the node [deg]'
    }
    decision_variable_range = {
        0: [800.0, 1300.0],
        1: [0.10, 0.17],
        2: [90.0, 95.0],
        3: [250.0, 270.0]
    }
    decision_variable_symbols = {
        0: r'$a$',
        1: r'$e$',
        2: r'$i$',
        3: r'$\Omega$'
    }
    decision_variable_units = {0: r' m', 1: r' ', 2: r' deg', 3: r' deg'}
    # Loop over populations
    for population_index in simulation_output.keys():
        # Retrieve current population
        current_generation = simulation_output[population_index]
        # Plot Pareto fronts for all design variables
        fig, axs = plt.subplots(2, 2, figsize=(14, 8))
        fig.suptitle('Generation ' + str(population_index),
                     fontweight='bold',
                     y=0.95)
        current_fitness = current_generation[1]
        current_population = current_generation[2]
        for ax_index, ax in enumerate(axs.flatten()):
            cs = ax.scatter(np.deg2rad(current_fitness[:, 0]),
                            current_fitness[:, 1],
                            40,
                            current_population[:, ax_index],
                            marker='.')
            cbar = fig.colorbar(cs, ax=ax)
            cbar.ax.set_ylabel(decision_variable_names[ax_index])
            ax.grid('major')
            if ax_index > 1:
                ax.set_xlabel(r'Objective 1: coverage [$deg^{-1}$] ')
            if ax_index == 0 or ax_index == 2:
                ax.set_ylabel(r'Objective 2: proximity [$m$]')
        # Save figure
        fig.savefig('pareto_generation_' + str(population_index) + '.png',
                    bbox_inches='tight')

    # Plot histogram for last generation, semi-major axis
    fig, axs = plt.subplots(2, 2, figsize=(12, 8))
    fig.suptitle('Final orbits by decision variable',
                 fontweight='bold',
                 y=0.95)
    last_pop = simulation_output[number_of_evolutions - 1][2]
    for ax_index, ax in enumerate(axs.flatten()):
        ax.hist(last_pop[:, ax_index], bins=30)
        # Prettify
        ax.set_xlabel(decision_variable_names[ax_index])
        if ax_index % 2 == 0:
            ax.set_ylabel('Occurrences in the population')
    # Save figure
    fig.savefig('histograms_final_generation.png', bbox_inches='tight')

    # Plot orbits of initial and final generation
    fig = plt.figure(figsize=(12, 6))
    fig.suptitle('Initial and final orbit bundle', fontweight='bold', y=0.95)
    title = {0: 'Initial orbit bundle', 1: 'Final orbit bundle'}
    # Loop over populations
    for ax_index, population_index in enumerate(simulation_output.keys()):
        current_ax = fig.add_subplot(1, 2, 1 + ax_index, projection='3d')
        # Retrieve current population
        current_generation = simulation_output[population_index]
        current_population = current_generation[2]
        # Loop over individuals
        for ind_index, individual in enumerate(current_population):
            # Plot orbit
            state_history = list(current_generation[0][ind_index][0].values())
            state_history = np.vstack(state_history)
            current_ax.plot(state_history[:, 0],
                            state_history[:, 1],
                            state_history[:, 2],
                            linewidth=0.5)
        # Prettify
        current_ax.set_xlabel('X [m]')
        current_ax.set_ylabel('Y [m]')
        current_ax.set_zlabel('Z [m]')
        current_ax.set_title(title[ax_index], y=1.0, pad=15)
    # Save figure
    fig.savefig('orbit_bundles_initial_final_gen.png', bbox_inches='tight')

    # Plot orbits of final generation divided by parameters
    fig = plt.figure(figsize=(12, 8))
    fig.suptitle('Final orbit bundle by decision variable',
                 fontweight='bold',
                 y=0.95)
    # Retrieve current population
    current_generation = simulation_output[number_of_evolutions - 1]
    # Plot Pareto fronts for all design variables
    current_population = current_generation[2]
    # Loop over decision variables
    for var in range(4):
        # Create axis
        current_ax = fig.add_subplot(2, 2, 1 + var, projection='3d')
        # Loop over individuals
        for ind_index, individual in enumerate(current_population):
            # Set plot color according to boundaries
            if individual[var] < decision_variable_range[var][0]:
                plt_color = 'r'
                label = decision_variable_symbols[var] + ' < ' + str(decision_variable_range[var][0]) + \
                        decision_variable_units[var]
            elif decision_variable_range[var][0] < individual[
                    var] < decision_variable_range[var][1]:
                plt_color = 'b'
                label = str(decision_variable_range[var][0]) + ' < ' + \
                        decision_variable_symbols[var] + \
                        ' < ' + str(decision_variable_range[var][1]) + decision_variable_units[var]
            else:
                plt_color = 'g'
                label = decision_variable_symbols[var] + ' > ' + str(decision_variable_range[var][1]) + \
                        decision_variable_units[var]

            # Plot orbit
            state_history = list(current_generation[0][ind_index][0].values())
            state_history = np.vstack(state_history)
            current_ax.plot(state_history[:, 0],
                            state_history[:, 1],
                            state_history[:, 2],
                            color=plt_color,
                            linewidth=0.5,
                            label=label)
        # Prettify
        current_ax.set_xlabel('X [m]')
        current_ax.set_ylabel('Y [m]')
        current_ax.set_zlabel('Z [m]')
        current_ax.set_title(decision_variable_names[var], y=1.0, pad=10)
        handles, decision_variable_legend = current_ax.get_legend_handles_labels(
        )
        decision_variable_legend, ids = np.unique(decision_variable_legend,
                                                  return_index=True)
        handles = [handles[i] for i in ids]
        current_ax.legend(handles,
                          decision_variable_legend,
                          loc='lower right',
                          bbox_to_anchor=(0.3, 0.6))
    # Save figure
    fig.savefig('orbit_bundle_final_gen_by_variable.png', bbox_inches='tight')

    # Show plot
    plt.show()