Exemplo n.º 1
0
def test_pygmo_single_generation(two_reservoir_wrapper):
    """ Simple pygmo wrapper test. """
    wrapper = two_reservoir_wrapper
    prob = pg.problem(wrapper)
    algo = pg.algorithm(pg.moead(gen=1))

    pg.mp_island.init_pool(2)
    isl = pg.island(algo=algo, prob=prob, size=50, udi=pg.mp_island())
    isl.evolve(1)
Exemplo n.º 2
0
def main(pop_init=None):

    # start timer
    startTime = timeit.default_timer()

    # initialize algorithm with hyperparameters
    alg = pg.moead(gen=generations, neighbours=neighbors, seed=seed)

    # there should be a way to run batch_fitness evaluations, haven't gotten it to work on NSCL
    #b = pg.bfe()
    #alg.set_bfe(b)
    #alg.set_verbosity(1)

    # initialize problem
    p_optimizeRes = pg.problem(optimizeRes(magnet_dim, outputFile))

    # relic from old evolutions, which launched all islands internally
    #   this can allow for interconnectivity between islands
    #   now each run of this script calls its own island
    n_islands = 1
    # if more than one island and want to exchange info need to define a topology to connect them
    #top = pg.topology(pg.fully_connected(n_islands,1.0))

    # when running 5 objectives, pop needed to be 70
    pop_n = 70  #84
    if p_optimizeRes.get_nobj() == 4:
        # with 4 objs need pop=84
        pop_n = 84

    # check if we are using an input population or need to initialize one
    pop_new = None
    if (pop_init == None):
        # randomly create a new population of size pop_n
        pop_new = pg.population(p_optimizeRes, size=pop_n)
        print("initialize pop")
    else:
        pop_new = pop_init
        print("provided pop")

    # create archipelago from algorithm (of a single island)
    archi = pg.algorithm(alg)
    # evolve archipelago
    archi.evolve(pop_new)

    # check total time
    print('Running time (sec): %f' % (timeit.default_timer() - startTime))

    # when I tried to use multiprocessing I needed this
    #pg.mp_island.shutdown_pool()
    #pg.mp_bfe.shutdown_pool()

    return pop_new
Exemplo n.º 3
0
    def __init__(self,
                 problem,
                 surrogate=None,
                 size=100,
                 generation=1,
                 weight_generation="grid",
                 decomposition="tchebycheff",
                 neighbours=20,
                 CR=1,
                 F=0.5,
                 eta_m=20,
                 realb=0.9,
                 limit=2,
                 preserve_diversity=True,
                 seed=None,
                 *args,
                 **kwargs):

        # Set seed
        seed = np.random.randint(1e8) if seed is None else seed

        a = pg.algorithm(
            pg.moead(gen=generation,
                     weight_generation=weight_generation,
                     decomposition=decomposition,
                     neighbours=neighbours,
                     CR=CR,
                     F=F,
                     eta_m=eta_m,
                     realb=realb,
                     limit=limit,
                     preserve_diversity=preserve_diversity,
                     seed=seed))

        # Check sanity for weight_generation
        if weight_generation == "grid":
            size = valid_moead_popsize(size=size, n_objs=problem.n_objs)

        if surrogate is not None:
            prob = construct_moead_problem_with_surrogate(problem, surrogate)
        else:
            prob = problem

        pop = pg.population(prob=pg.problem(prob), size=size, seed=seed)

        self.pop_size = size
        self.problem = prob
        self.population = pop
        self.algorithm = a

        return None
Exemplo n.º 4
0
    def run(self):
        """
        Run the optimisation process using PSO algorithm.
        :param converge_info: optional run the optimisation with convergence information
        :param converge_info: optional run the optimisation with population information
        :return:
        """
        print("Start the optimisation process...")

        if self.algorithm_type == 'nsga-2':
            uda = pg.nsga2(gen=self.generation)
        elif self.algorithm_type == 'moea-d':
            uda = pg.moead(gen=self.generation)
        elif self.algorithm_type == 'ihs':
            uda = pg.ihs(gen=self.generation)

        algo = pg.algorithm(uda)
        pop = pg.population(self.problem, self.pop_size)
        pop = algo.evolve(pop)
        self.pop = pop
Exemplo n.º 5
0
def get_optimization_algorithm(randseed):
    '''
    Returns an optimisation algorithm
    '''
    opt_alg = None
    if (cfg.MOG_ALG == "nsga2"):
        # cr: crossover probability, m: mutation probability
        # eta_c: distribution index for crossover, eta_m: distribution index for mutation
        opt_alg = pyg.algorithm(
            pyg.nsga2(gen=1,
                      cr=0.925,
                      m=0.05,
                      eta_c=10,
                      eta_m=50,
                      seed=randseed))
    elif (cfg.MOG_ALG == "moead"):
        opt_alg = pyg.algorithm(
            pyg.moead(gen=1,
                      weight_generation="grid",
                      decomposition="tchebycheff",
                      neighbours=5,
                      CR=1,
                      F=0.5,
                      eta_m=20,
                      realb=0.9,
                      limit=2,
                      preserve_diversity=True))
    elif (cfg.MOG_ALG == "nspso"):
        opt_alg = pyg.algorithm(
            pyg.nspso(gen=1,
                      omega=0.6,
                      c1=0.01,
                      c2=0.5,
                      chi=0.5,
                      v_coeff=0.5,
                      leader_selection_range=2,
                      diversity_mechanism="crowding distance",
                      memory=False))
    opt_alg.set_verbosity(1)
    return opt_alg
Exemplo n.º 6
0
def ecm_moead(data,
              k=2,
              popsize=40,
              gen=500,
              weight_generation="grid",
              decomposition="tchebycheff",
              neighbours=20,
              CR=1,
              F=0.5,
              eta_m=20,
              realb=0.9,
              limit=2,
              preserve_diversity=True):
    '''Entropy c-means - MOEA/D Clustering

    Parameters
    ----------

    data : array, shape (n_data_points, n_features)
        The data array.

    k : int, default: 2
    The number of clusters.

    popsize : int, default: 40
        The population size.

    gen : int, default: 500
        The number of generations to be iterated.

    weight_generation : str, default: 'grid'
        Method used for weight generation.

    decomposition : str, default: 'tchebycheff'
        Method for object decomposition.

    neighbours : int, default: 20
        Size of weight neighbourhood.

    CR : float, default: 1
        MOEA/D parameter for Differential Evolution crossover parameter.

    F : float, default: 0.5
        MOEA/D parameter for Differential Evolution operator.

    eta_m : float, default: 20
        MOEA/D parameter for polynomial mutation distribution index.

    realb : float, default: 0.9
        Chance that the neighbourhood is considered at each generation,
        rather than the whole population (only if preserve_diversity is true).

    limit : int, default: 2
        Maximum number of copies reinserted in the population
        (only if m_preserve_diversity is true).

    preserve_diversity : bool, default: True
        When true activates diversity preservation mechanisms.

    Returns
    -------

    vectors: array, shape (popsize, k * n_features)
        The resulting cluster centers, flattened to 1 dimensional arrays.

    pareto_front: array, shape (popsize, 2)
        The pareto front of mapped solutions

    '''
    # set up the problem
    spobj = ecm(k * data.shape[1])
    spobj.set_data(data)
    prob = pg.problem(spobj)

    # create population
    pop = pg.population(prob, popsize)
    # select the MO algorithm
    algo = pg.algorithm(
        pg.moead(gen=gen,
                 weight_generation=weight_generation,
                 decomposition=decomposition,
                 neighbours=neighbours,
                 CR=CR,
                 F=F,
                 eta_m=eta_m,
                 realb=realb,
                 limit=limit,
                 preserve_diversity=preserve_diversity))
    # run optimization
    pop = algo.evolve(pop)

    # extract results
    pareto_front, vectors = pop.get_f(), pop.get_x()
    # sort the Pareto front and solutions
    sorted_idxs = np.argsort(pareto_front[:, 0])
    pareto_front = pareto_front[sorted_idxs, :]
    vectors = vectors[sorted_idxs, :]

    return vectors, pareto_front
Exemplo n.º 7
0
    reader.load()

    outputPath = '../../result/moea/moeaD/' + str(
        MOEAD_triCriteria.AllowPerc) + '/' + para + '/'
    if not os.path.isdir(outputPath):
        os.makedirs(outputPath)

    for i in range(0, 30):
        time_start = time.time()
        # create UDP
        prob = pg.problem(MOEAD_triCriteria())
        print(prob)
        # create population
        pop = pg.population(prob, size=105)
        # select algorithm
        algo = pg.algorithm(pg.moead(gen=1981))
        # run optimization
        pop = algo.evolve(pop)
        # extract results
        fits, vectors = pop.get_f(), pop.get_x()
        # extract and print non-dominated fronts
        ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(fits)
        time_end = time.time()
        exec_time = time_end - time_start
        np.around(fits, 6)
        frontStr = ''

        isOptimal = False
        for fit in fits:
            if sum(fit) == 0:
                isOptimal = True
Exemplo n.º 8
0
def pygmo_main(harmonic=False):
    import pygmo as pg
    from pywr.optimisation.pygmo import PygmoWrapper

    def update_archive(pop, archive=None):
        if archive is None:
            combined_f = pop.get_f()
        else:
            combined_f = np.r_[archive, pop.get_f()]
        indices = pg.select_best_N_mo(combined_f, 50)
        new_archive = combined_f[indices, :]
        new_archive = np.unique(new_archive.round(4), axis=0)
        return new_archive

    wrapper = PygmoWrapper(get_model_data(harmonic=harmonic))
    prob = pg.problem(wrapper)
    print(prob)
    algo = pg.algorithm(pg.moead(gen=1))
    # algo = pg.algorithm(pg.nsga2(gen=1))

    pg.mp_island.init_pool(2)
    isl = pg.island(algo=algo, prob=prob, size=50, udi=pg.mp_island())

    ref_point = [216500, 4000]
    pop = isl.get_population()

    print("Evolving!")
    archive = update_archive(pop)
    hv = pg.hypervolume(archive)
    vol = hv.compute(ref_point)
    hvs = [
        vol,
    ]
    print(
        "Gen: {:03d}, Hypervolume: {:6.4g}, Archive size: {:d}".format(
            0, vol, len(archive)
        )
    )

    for gen in range(20):
        isl.evolve(1)
        isl.wait_check()

        pop = isl.get_population()
        archive = update_archive(pop, archive)

        hv = pg.hypervolume(archive)
        vol = hv.compute(ref_point)
        print(
            "Gen: {:03d}, Hypervolume: {:6.4g}, Archive size: {:d}".format(
                gen + 1, vol, len(archive)
            )
        )
        hvs.append(vol)

    hvs = pd.Series(hvs)

    print("Finished!")

    plt.scatter(archive[:, 0], archive[:, 1])

    objectives = wrapper.model_objectives
    plt.xlabel(objectives[0].name)
    plt.ylabel(objectives[1].name)
    plt.grid(True)
    title = "Harmonic Control Curve" if harmonic else "Monthly Control Curve"
    plt.savefig("{} Example ({}).pdf".format("pygmo", title), format="pdf")

    fig, ax = plt.subplots()
    ax.plot(hvs / 1e6, marker="o")
    ax.grid(True)
    ax.set_ylabel("Hypervolume")
    ax.set_xlabel("Generation")
    plt.savefig("{} Example Hypervolume ({}).pdf".format("pygmo", title), format="pdf")

    plt.show()
max_fevals = (dim + 1) * 2

# To account for the fact that the zero index array in util functions
# are actually the 1st feval
working_fevals = max_fevals - 1
pop_size = 24
seed = 33

# For the each problem in the problem suite
for i in range(problem_number):
    problem_function = getattr(pg.problems, problem_name)
    if (problem_name == "dtlz"):
        problem = pg.problem(problem_function(i + 1, dim=dim, fdim=fdim))
    else:
        problem = pg.problem(problem_function(i + 1, param=dim))
    algo_moead = pg.algorithm(pg.moead(gen=1))
    algo_nsga2 = pg.algorithm(pg.nsga2(gen=1))

    # Hypervolume calculations, mean taken over n number of times
    hv_rbfmopt_plot = calculate_mean_rbf(n, max_fevals, working_fevals, seed,
                                         problem, cycle)
    hv_moead_plot = calculate_mean_pyg(n, algo_moead, working_fevals, pop_size,
                                       seed, problem)
    hv_nsga2_plot = calculate_mean_pyg(n, algo_nsga2, working_fevals, pop_size,
                                       seed, problem)
    fevals_plot = range(0, max_fevals)

    save_values(
        'storedvalues/rbfmopt_hv_' + problem.get_name() + '_fevals' +
        str(max_fevals) + '.txt', hv_rbfmopt_plot.tolist())
    save_values(
Exemplo n.º 10
0
    def part_optimization(self, acm_template):
        ad = self.ad
        ad.init_logger(prefix='acmdm')

        # [4.1] Get bounds
        # ad.bounds_denorm = acm_template.bounds_denorm # get_classic_bounds(which_filter=self.fea_config_dict['which_filter'])
        # ad.bound_filter  = acm_template.bound_filter
        # otnb = acm_template.original_template_neighbor_bounds
        # print('---------------------\nBounds: (if there are two bounds within one line, they should be the same)')
        # idx_ad = 0
        # for idx, f in enumerate(ad.bound_filter):
        #     if f == True:
        #         print(idx, f, '[%g,%g]'%tuple(otnb[idx]), '[%g,%g]'%tuple(ad.bounds_denorm[idx_ad]))
        #         idx_ad += 1
        #     else:
        #         print(idx, f, '[%g,%g]'%tuple(otnb[idx]))

        # if self.fea_config_dict['bool_post_processing'] == True: # use the new script file instead: main_post_processing_pm.py
        #     import one_script_pm_post_processing
        #     one_script_pm_post_processing.post_processing(ad, self.fea_config_dict)
        #     quit()

        # [4.3] MOO
        from acm_designer import get_bad_fintess_values
        import logging
        import utility_moo
        import pygmo as pg
        ad.counter_fitness_called = 0
        ad.counter_fitness_return = 0
        __builtins__.ad = ad  # share global variable between modules # https://stackoverflow.com/questions/142545/how-to-make-a-cross-module-variable
        import codes3.Problem_BearinglessSynchronousDesign  # must import this after __builtins__.ad = ad
        # print('[acmop.py]', __builtins__.ad)

        ################################################################
        # MOO Step 1:
        #   Create UserDefinedProblem and create population
        #   The magic method __init__ cannot be fined for UDP class
        ################################################################
        # [4.3.1] Basic setup
        _, prob, popsize = codes3.Problem_BearinglessSynchronousDesign.get_prob_and_popsize(
        )

        print('[acmop.py]', '-' * 40 + '\n[acmop.py] Pop size is', popsize)

        # [4.3.2] Generate the pop
        if False:
            pop = pg.population(prob, size=popsize)
        # Add Restarting Feature when generating pop
        else:
            from main_utility import get_sorted_swarm_data_from_the_archive
            # def get_sorted_swarm_data_from_the_archive(path_to_archive):
            #     output_dir_backup = ad.solver.output_dir
            #     ad.solver.output_dir = ad.solver.fea_config_dict['dir.parent'] + path_to_archive
            #     number_of_chromosome = ad.solver.read_swarm_data(ad.bound_filter)
            #     ad.solver.output_dir = output_dir_backup

            #     ad.flag_do_not_evaluate_when_init_pop = True
            #     pop = pg.population(prob, size=popsize)
            #     swarm_data_on_pareto_front = utility_moo.learn_about_the_archive(prob, ad.solver.swarm_data, popsize, ad.solver.fea_config_dict, bool_plot_and_show=False)
            #     ad.flag_do_not_evaluate_when_init_pop = False
            #     return swarm_data_on_pareto_front

            # 检查swarm_data.txt,如果有至少一个数据,返回就不是None。
            print('[acmop.py] Check swarm_data.txt...')
            number_of_chromosome = ad.solver.read_swarm_data(self.select_spec)

            # case 1: swarm_data.txt exists
            if number_of_chromosome is not None:

                number_of_finished_iterations = number_of_chromosome // popsize
                number_of_finished_chromosome_in_current_generation = number_of_chromosome % popsize

                # 如果刚好整除,把余数0改为popsize
                if number_of_finished_chromosome_in_current_generation == 0:
                    number_of_finished_chromosome_in_current_generation = popsize
                    print(
                        f'\tThere are {number_of_chromosome} chromosomes found in swarm_data.txt.'
                    )
                    print(
                        '\tWhat is the odds! The script just stopped when the evaluation of the whole pop is finished.'
                    )
                    print(
                        '\tSet number_of_finished_chromosome_in_current_generation to popsize %d'
                        %
                        (number_of_finished_chromosome_in_current_generation))

                print('[acmop.py] This is a restart of ' +
                      self.fea_config_dict['run_folder'][:-1])
                print('\tNumber of finished iterations is %d' %
                      (number_of_finished_iterations))
                # print('This means the initialization of the population class is interrupted. So the pop in swarm_data.txt is used as the survivor.')

                # swarm_survivor feature. Not sure if this is needed anymore...
                if True:
                    # 继续从swarm_survivor.txt中读取数据,注意,survivor总是是完整的一代的,除非popsize被修改了。
                    print('\tCheck swarm_survivor.txt...', end='')
                    ad.solver.survivor = ad.solver.read_swarm_survivor(popsize)

                    # 如果发现ad.solver.survivor是None,那就说明是初始化pop的时候被中断了,此时就用swarm_data来生成pop。
                    if ad.solver.survivor is not None:
                        print(
                            'Found survivor!\nRestart the optimization based on the swarm_survivor.txt.'
                        )

                        if len(ad.solver.survivor) != popsize:
                            print(
                                'popsize is reduced'
                            )  # 如果popsize增大了,read_swarm_survivor(popsize)就会报错了,因为-----不能被split后转为float
                            raise Exception(
                                'This is a feature not tested. However, you can cheat to change popsize by manually modify swarm_data.txt or swarm_survivor.txt.'
                            )
                    else:
                        print(
                            'Gotta make do with swarm_data to generate survivor.'
                        )

                # 这些计数器的值永远都是评估过的chromosome的个数。
                ad.counter_fitness_called = ad.counter_fitness_return = number_of_chromosome
                print(
                    '[acmop.py] ad.counter_fitness_called = ad.counter_fitness_return = number_of_chromosome = %d'
                    % (number_of_chromosome))

                # case 1-A: swarm_data.txt exists and this is a re-evaluation run using the existing csv files (比如我们修改了计算铜损的代码,那就必须借助已有的有限元结果重新生成swarm_data.txt)
                if self.fea_config_dict['bool_re_evaluate']:
                    ad.counter_fitness_called = ad.counter_fitness_return = 0

                # 禁止在初始化pop时运行有限元
                ad.flag_do_not_evaluate_when_init_pop = True
                # 初始化population,如果ad.flag_do_not_evaluate_when_init_pop是False,那么就说明是 new run,否则,整代个体的fitness都是[0,0,0]。
                pop = pg.population(prob, size=popsize)
                if self.fea_config_dict['bool_re_evaluate_wo_csv']:
                    swarm_data_backup = ad.solver.swarm_data[::]  # This is going to be over-written in next line
                    swarm_data_on_pareto_front, _ = get_sorted_swarm_data_from_the_archive(
                        prob, popsize, path_to_archive)
                    ad.flag_do_not_evaluate_when_init_pop = True  # When you call function get_sorted_swarm_data_from_the_archive, flag_do_not_evaluate_when_init_pop is set to False at the end. Sometimes we do not want this, for example, restarting restart re-evaluation without csv.
                    ad.solver.swarm_data = swarm_data_backup
                    for i in range(popsize):
                        # print(path_to_archive, ':', swarm_data_on_pareto_front[i][::-1])
                        pop.set_xf(i, swarm_data_on_pareto_front[i][:-3],
                                   swarm_data_on_pareto_front[i][-3:])
                    print('[acmop.py] Old pop:')
                    print(pop)

                # Restarting feature related codes
                # 如果整代个体的fitness都是[0,0,0],那就需要调用set_xf,把txt文件中的数据写入pop。如果发现数据的个数不够,那就调用set_x()来产生数据,形成初代个体。
                if ad.flag_do_not_evaluate_when_init_pop == True:
                    pop_array = pop.get_x()
                    if number_of_chromosome <= popsize:
                        for i in range(popsize):
                            if i < number_of_chromosome:  #number_of_finished_chromosome_in_current_generation:
                                pop.set_xf(i, ad.solver.swarm_data[i][:-3],
                                           ad.solver.swarm_data[i][-3:])
                            else:
                                print(
                                    '[acmop.py] Set "ad.flag_do_not_evaluate_when_init_pop" to False...'
                                )
                                ad.flag_do_not_evaluate_when_init_pop = False
                                print(
                                    '[acmop.py] Calling pop.set_x()---this is a restart for individual#%d during pop initialization.'
                                    % (i))
                                print('[acmop.py]', i, 'get_fevals:',
                                      prob.get_fevals())
                                pop.set_x(i, pop_array[i])  # evaluate this guy

                    else:
                        # 新办法,直接从swarm_data.txt(相当于archive)中判断出当前最棒的群体
                        swarm_data_on_pareto_front = utility_moo.learn_about_the_archive(
                            prob, ad.solver.swarm_data, popsize,
                            fea_config_dict)
                        # print(swarm_data_on_pareto_front)
                        for i in range(popsize):
                            pop.set_xf(i, swarm_data_on_pareto_front[i][:-3],
                                       swarm_data_on_pareto_front[i][-3:])

                    # 必须放到这个if的最后,因为在 learn_about_the_archive 中是有初始化一个 pop_archive 的,会调用fitness方法。
                    ad.flag_do_not_evaluate_when_init_pop = False

            # case 2: swarm_data.txt does not exist
            else:
                number_of_finished_chromosome_in_current_generation = None
                number_of_finished_iterations = 0  # 实际上跑起来它不是零,而是一,因为我们认为初始化的一代也是一代。或者,我们定义number_of_finished_iterations = number_of_chromosome // popsize

                # case 2-A: swarm_data.txt does not exist and this is a whole new run.
                if not self.fea_config_dict['bool_re_evaluate_wo_csv']:
                    print(
                        '[acmop.py] Nothing exists in swarm_data.txt.\nThis is a whole new run.'
                    )
                    ad.flag_do_not_evaluate_when_init_pop = False
                    pop = pg.population(prob, size=popsize)

                # case 2-B: swarm_data.txt does not exist and this is a re-evalation run (without csv)
                else:
                    print(
                        '[acmop.py] Nothing exists in swarm_data.txt.\nRe-start from %s'
                        % (path_to_archive))
                    ad.flag_do_not_evaluate_when_init_pop = True
                    pop = pg.population(prob, size=popsize)
                    # read in swarm data from another older run's archive and start from it!
                    swarm_data_on_pareto_front, _ = get_sorted_swarm_data_from_the_archive(
                        prob, popsize, path_to_archive)
                    ad.flag_do_not_evaluate_when_init_pop = False
                    for i in range(popsize):
                        print(path_to_archive, ':',
                              swarm_data_on_pareto_front[i][::-1])
                        pop.set_x(i, swarm_data_on_pareto_front[i]
                                  [:-3])  # re-evaluate this guy

            # this flag must be false to move on
            ad.flag_do_not_evaluate_when_init_pop = False

        print('[acmop.py]', '-' * 40, '\nPop is initialized:\n', pop)
        hv = pg.hypervolume(pop)
        quality_measure = hv.compute(ref_point=get_bad_fintess_values(
            machine_type='PMSM',
            ref=True))  # ref_point must be dominated by the pop's pareto front
        print('[acmop.py] quality_measure: %g' % (quality_measure))
        # raise KeyboardInterrupt

        # 初始化以后,pop.problem.get_fevals()就是popsize,但是如果大于popsize,说明“pop.set_x(i, pop_array[i]) # evaluate this guy”被调用了,说明还没输出过 survivors 数据,那么就写一下。
        if pop.problem.get_fevals() > popsize:
            print('[acmop.py] Write survivors.')
            ad.solver.write_swarm_survivor(pop, ad.counter_fitness_return)

        ################################################################
        # MOO Step 2:
        #   Select algorithm (another option is pg.nsga2())
        ################################################################
        # [4.3.3] Selecting algorithm
        # Don't forget to change neighbours to be below popsize (default is 20) decomposition="bi"
        algo = pg.algorithm(
            pg.moead(gen=1,
                     weight_generation="grid",
                     decomposition="tchebycheff",
                     neighbours=20,
                     CR=1,
                     F=0.5,
                     eta_m=20,
                     realb=0.9,
                     limit=2,
                     preserve_diversity=True)
        )  # https://esa.github.io/pagmo2/docs/python/algorithms/py_algorithms.html#pygmo.moead
        print('[acmop.py]', '-' * 40, '\n', algo)
        # quit()

        ################################################################
        # MOO Step 3:
        #   Begin optimization
        ################################################################
        # [4.3.4] Begin optimization
        number_of_chromosome = ad.solver.read_swarm_data(self.select_spec)
        number_of_finished_iterations = number_of_chromosome // popsize
        number_of_iterations = 20
        logger = logging.getLogger(__name__)
        # try:
        if True:
            for _ in range(number_of_finished_iterations,
                           number_of_iterations):
                ad.number_of
                msg = '[acmop.py] This is iteration #%d. ' % (_)
                print(msg)
                logger.info(msg)
                pop = algo.evolve(pop)

                msg += 'Write survivors to file. '
                ad.solver.write_swarm_survivor(pop, ad.counter_fitness_return)

                hv = pg.hypervolume(pop)
                quality_measure = hv.compute(
                    ref_point=get_bad_fintess_values(machine_type='PMSM',
                                                     ref=True)
                )  # ref_point must be dominated by the pop's pareto front
                msg += 'Quality measure by hyper-volume: %g' % (
                    quality_measure)
                print('[acmop.py]', msg)
                logger.info(msg)

                utility_moo.my_print(ad, pop, _)
Exemplo n.º 11
0
    ################################################################
    udp = Problem_BearinglessSynchronousDesign()
    prob = pg.problem(udp)
    print(prob)

    popsize = 78
    print('-'*40 + '\nPop size is', popsize)

    ################################################################
    # MOO Step 2:
    #   Select algorithm (another option is pg.nsga2())
    ################################################################
    # Don't forget to change neighbours to be below popsize (default is 20) decomposition="bi"
    algo = pg.algorithm(pg.moead(gen=1, weight_generation="grid", decomposition="tchebycheff", 
                                 neighbours=20, 
                                 CR=1, F=0.5, eta_m=20, 
                                 realb=0.9, 
                                 limit=2, preserve_diversity=True)) # https://esa.github.io/pagmo2/docs/python/algorithms/py_algorithms.html#pygmo.moead
    print('-'*40, '\n', algo)

    ################################################################
    # MOO Step 3:
    #   Begin optimization
    ################################################################

    # initialization (will call fitness for popsize times to get an initial population)
    pop = pg.population(prob, size=popsize) 

    number_of_finished_iterations = 0
    number_of_iterations = 2
    # logger = logging.getLogger(__name__)
import pygmo as po
import numpy as np
import myUDPnodes
import time

generations = 400
sizePop = 35
#pathsave    = '/home/oscar/Documents/PythonProjects/kuramotoAO/optimizationResults/'
pathsave = '/Users/p277634/python/kaoModel/optimResult/'
filenameTXT = 'MOEAD.txt'
filenameNPZ = 'MOEAD.npz'

print('Running: ', filenameNPZ[:-4])

# algorithm
algo = po.algorithm(po.moead(gen=generations))
algo.set_verbosity(1)
# problem
prob = po.problem(myUDP.KAOnodesMultiObj())
# population
pop = po.population(prob=prob, size=sizePop)
# evolution
start = time.time()
popE = algo.evolve(pop)
print('time evolution: ', time.time() - start)

# save TXT fie with general description of the optimization
bestFstr = 'ideal found fit: ' + str(po.ideal(
    popE.get_f())) + '; best fit possible: -1'
bestChamp = 'champion decission vector'
#bestXstr  = 'velocity: ' + str(popE.champion_x[0]) + ', kL:' + str(popE.champion_x[1]),', kG: ' + str(popE.champion_x[2])
    print(prob)

    popsize = 78
    print('-' * 40 + '\nPop size is', popsize)

    ################################################################
    # MOO Step 2:
    #   Select algorithm (another option is pg.nsga2())
    ################################################################
    # Don't forget to change neighbours to be below popsize (default is 20) decomposition="bi"
    algo = pg.algorithm(
        pg.moead(gen=1,
                 weight_generation="grid",
                 decomposition="tchebycheff",
                 neighbours=20,
                 CR=1,
                 F=0.5,
                 eta_m=20,
                 realb=0.9,
                 limit=2,
                 preserve_diversity=True)
    )  # https://esa.github.io/pagmo2/docs/python/algorithms/py_algorithms.html#pygmo.moead
    print('-' * 40, '\n', algo)

    ################################################################
    # MOO Step 3:
    #   Begin optimization
    ################################################################

    # initialization (will call fitness for popsize times to get an initial population)
    pop = pg.population(prob, size=popsize)