def genetic_algorithm(explr_p):
    prob = pg.problem(GeneticAlgoProblem())
    #sade = pg.sade(gen=1, ftol=1e-20, xtol=1e-20)
    population_size = 5

    if obj_fcn == 'griewank' or dim_x == 3:
        total_evals = 500
    elif obj_fcn == 'shekel' and dim_x == 20:
        total_evals = 5000
    else:
        total_evals = 1000
    generations = total_evals / population_size
    optimizer = pg.cmaes(gen=generations, ftol=1e-20, xtol=1e-20)
    algo = pg.algorithm(optimizer)
    algo.set_verbosity(1)
    pop = pg.population(prob, size=population_size)
    pop = algo.evolve(pop)
    print pop.champion_f
    champion_x = pop.champion_x
    uda = algo.extract(pg.cmaes)
    log = np.array(uda.get_log())
    n_fcn_evals = log[:, 1]
    pop_best_at_generation = -log[:, 2]
    evaled_x = None
    evaled_y = pop_best_at_generation

    max_y = [pop_best_at_generation[0]]
    for y in pop_best_at_generation[1:]:
        if y > max_y[-1]:
            max_y.append(y)
        else:
            max_y.append(max_y[-1])

    return evaled_x, evaled_y, max_y, 0
Пример #2
0
def cma_es(problem, population_size, params):
    '''
    Execute the Pygmo CMA_ES algorithm on an
    optimisation problem with the population size
    and parameters specified. The CMA_ES possible
    set of parameters are:
    * sigma0: initial step size
    * ftol: stopping criteria on the function tolerance
    * xtol: stopping criteria on the step tolerance

    Parameters
    ----------
    - problem: the problem to optimise. It must comply
        to the Pygmo requirements, i.e. being an
        instance of an UDP class
    - population_size: The size of the population
    - params: dictionnary of parameters for the
        CMA_ES algorithm

    Return
    ------
    - log: the logs of the execution of the
        optimisation problem with the population size
    - duration: the total duration of the resolution
        of the  problem
    '''
    # Extract algorithm parameters
    nb_generation = params["nb_generation"]
    sigma0 = params["sigma0"]
    ftol = params["ftol"]
    xtol = params["xtol"]

    algo = pg.algorithm(
        pg.cmaes(gen=nb_generation, sigma0=sigma0, ftol=ftol, xtol=xtol))
    algo.set_verbosity(1)
    solution = pg.population(problem, size=population_size, b=None)
    startt = datetime.now()
    solution = algo.evolve(solution)
    duration = (datetime.now() - startt)
    uda = algo.extract(pg.cmaes)
    log = uda.get_log()

    return (log, duration, solution.champion_f, solution.champion_x)
Пример #3
0
def run_example1(impulses=4):
    import pykep as pk
    import pygmo as pg
    import numpy as np
    from matplotlib import pyplot as plt
    from pykep.examples import add_gradient, algo_factory

    # problem
    udp = add_gradient(pk.trajopt.pl2pl_N_impulses(
        start=pk.planet.jpl_lp('earth'),
        target=pk.planet.jpl_lp('venus'),
        N_max=impulses,
        tof=[100., 1000.],
        vinf=[0., 4],
        phase_free=False,
        multi_objective=False,
        t0=[pk.epoch(0), pk.epoch(1000)]),
                       with_grad=False)
    prob = pg.problem(udp)

    # algorithm
    uda = pg.cmaes(gen=1000, force_bounds=True)
    algo = pg.algorithm(uda)
    algo.set_verbosity(10)

    # population
    pop = pg.population(prob, 20)

    # solve the problem
    pop = algo.evolve(pop)

    # inspect the solution
    udp.udp_inner.plot(pop.champion_x)
    plt.ion()
    plt.show()

    udp.udp_inner.pretty(pop.champion_x)
Пример #4
0
 def minimize(self,
              fun,
              bounds,
              guess=None,
              sdevs=0.3,
              rg=Generator(MT19937()),
              store=None):
     gen = int(self.max_eval_num(store) / self.popsize + 1)
     algo = pg.algorithm(
         pg.cmaes(gen=gen,
                  force_bounds=True,
                  sigma0=np.mean(sdevs),
                  seed=int(rg.uniform(0, 2**32 - 1))))
     udp = pygmo_udp(fun, bounds)
     prob = pg.problem(udp)
     pop = pg.population(prob, self.popsize)
     if not guess is None:
         scale = np.multiply(0.5 * (bounds.ub - bounds.lb), sdevs)
         for i in range(self.popsize):
             xi = np.random.normal(guess, scale)
             xi = np.maximum(np.minimum(xi, bounds.ub), bounds.lb)
             pop.set_x(i, xi)
     pop = algo.evolve(pop)
     return pop.champion_x, pop.champion_f, pop.problem.get_fevals()
Пример #5
0
def cmaes(objective_function,
          gen=1000,
          cc=-1,
          cs=-1,
          c1=-1,
          cmu=-1,
          sigma0=0.5,
          ftol=1e-06,
          xtol=1e-06,
          memory=False,
          force_bounds=True,
          pop_size=15):
    """
    Covariance Matrix Evolutionary Strategy (CMA-ES)

    Parameters
    - gen (int) – number of generations
    - cc (float) – backward time horizon for the evolution path (by default is automatically assigned)
    - cs (float) – makes partly up for the small variance loss in case the indicator is zero (by default is 
    automatically assigned)
    - c1 (float) – learning rate for the rank-one update of the covariance matrix (by default is automatically assigned)
    - cmu (float) – learning rate for the rank-mu update of the covariance matrix (by default is automatically assigned)
    - sigma0 (float) – initial step-size
    - ftol (float) – stopping criteria on the x tolerance
    - xtol (float) – stopping criteria on the f tolerance
    - memory (bool) – when true the adapted parameters are not reset between successive calls to the evolve method
    - force_bounds (bool) – when true the box bounds are enforced. The fitness will never be called outside the bounds
    but the covariance matrix adaptation mechanism will worsen
    - pop_size (int)  – the number of individuals
    
    """
    logs = []
    problem = pg.problem(objective_function)
    algorithm = pg.algorithm(
        pg.cmaes(gen=gen,
                 cc=cc,
                 cs=cs,
                 c1=c1,
                 cmu=cmu,
                 sigma0=sigma0,
                 ftol=ftol,
                 xtol=xtol,
                 force_bounds=force_bounds,
                 memory=memory))
    algorithm.set_verbosity(50)
    solution = pg.population(prob=problem, size=pop_size, b=None, seed=None)
    solution = algorithm.evolve(solution)
    """
    get_logs output is a list of tuples with the following structure:
    - Gen (int), generation number
    - Fevals (int), number of functions evaluation made
    - Best (float), the best fitness function currently in the population
    - dx (float), the norm of the distance to the population mean of the mutant vectors
    - df (float), the population flatness evaluated as the distance between the fitness of the best and of the worst individual
    - sigma (float), the current step-size
    """

    logs = np.array(algorithm.extract(pg.cmaes).get_log())[:, (
        1, 2)]  # taking only function evaluations and best fitness

    algo_ = algorithm.get_name()
    function_ = objective_function.get_name()

    return {
        'champion solution': solution.champion_f,
        'champion coordinates': solution.champion_x,
        'log': logs,
        'algorithm': algo_,
        'problem': function_
    }
import time


generations = 500
sizePop     = 25
library     = 1

#pathsave    = '/home/oscar/Documents/PythonProjects/kuramotoAO/optimizationResults/'
pathsave    = '/Users/p277634/python/kaoModel/optimResult/'
filenameTXT = 'CMAES_constrins.txt'
filenameNPZ = 'CMAES_constrins.npz'


# algorithm
algo   = po.algorithm(po.cmaes(gen=generations,
                               force_bounds=True,
                               ftol=1e-4,
                               xtol=1e-4))
algo.set_verbosity(5)

# problem
prob   = po.problem(myUDPkaos.KAOsimpleSimuConstr(lib=library))
# population
pop    = po.population(prob=prob,size=sizePop)

# evolution
start  = time.time()
popE   = algo.evolve(pop)
print('time evolution: ',time.time()-start)


# save TXT fie with general description of the optimization
Пример #7
0
"""
import pygmo as po
import numpy as np
import myUDP
import time

generations = 400
sizePop = 15
#pathsave    = '/home/oscar/Documents/PythonProjects/kuramotoAO/optimizationResults/'
pathsave = '/Users/p277634/python/kaoModel/optimResult/'
filenameTXT = 'CMAES_cabral.txt'
filenameNPZ = 'CMAES_cabral.npz'

# algorithm
algo = po.algorithm(
    po.cmaes(gen=generations, force_bounds=True, ftol=1e-4, xtol=1e-4))
algo.set_verbosity(5)

# problem
prob = po.problem(myUDP.kMcabral())
# population
pop = po.population(prob=prob, size=sizePop)

# evolution
start = time.time()
popE = algo.evolve(pop)
print('time evolution: ', time.time() - start)

# save TXT fie with general description of the optimization
bestFstr = 'champion fitness: ' + str(
    popE.champion_f[0]) + '; best fit possible: -1'
Пример #8
0
    udp = mga(seq=seq,
              t0=[-1000., 0.],
              tof=[[30, 400], [100, 470], [30, 400], [400, 2000], [1000, 6000]],
              vinf=3.,
              tof_encoding='direct',
              orbit_insertion=True,
              e_target=0.98,
              rp_target=108950000)

    udp = mga(seq=seq,
              t0=[-1000., 0.],
              tof=7000.,
              vinf=3.,
              tof_encoding='eta',
              orbit_insertion=True,
              e_target=0.98,
              rp_target=108950000)

    #udp = mga(seq=seq, t0=[-1000., 0.], tof=[[130,200], [430,470], [30, 70], [900, 1200], [4000, 5000]], vinf=0., alpha_encoding=False)
    prob = pg.problem(udp)
    uda = pg.cmaes(1500, force_bounds=True, sigma0=0.5, ftol=1e-4)
    #uda = pg.sade(4500)
    algo = pg.algorithm(uda)
    algo.set_verbosity(10)
    res = list()
    # for i in range(100):
    pop = pg.population(prob, 100)
    pop = algo.evolve(pop)
    res.append(pop.champion_f)
# In[72]:


pop = pg.population(prob,1)
ax = prob.plot(pop.champion_x)
plt.show()
print (prob.pretty(pop.champion_x))


# Evolution et inspection d'une bonne solution

# In[73]:


algo = pg.algorithm(pg.cmaes(gen = 2500, force_bounds = True))
# sans force_bounds = True j'obtenais des erreurs
l = list()
for i in range (10) : 
    pop = pg.population(prob,10)
    pop = algo.evolve(pop)
    print (pop.champion_f)
    l.append((pop.champion_f,pop.champion_x))
l = sorted(l, key = lambda x: x[0])
print (l)
print (l[0])


# In[74]:

Пример #10
0
import pygmo as pg
import numpy as np

prob = pg.problem(pg.cec2014(prob_id=5, dim=10))
algo = pg.algorithm(pg.cmaes(gen=100))
archi = pg.archipelago(8, algo=algo, prob=prob, pop_size=20)
archi.evolve(1000)
archi.wait()
res = [isl.get_population().champion_f for isl in archi]
res = np.array(res)
print(f"Problem {5}: {res.mean()}")
Пример #11
0
 def _setup_algorithm(self, parameters):
     alg = pg.cmaes(**self._alg_attrs)
     return alg
Пример #12
0
    def __call__(self, function):

        scanner_options = {
            'sade':
            dict(gen=self.gen,
                 variant=self.variant,
                 variant_adptv=self.variant_adptv,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 memory=self.memory,
                 seed=self.seed),
            'gaco':
            dict(gen=self.gen,
                 ker=self.ker,
                 q=self.q,
                 oracle=self.oracle,
                 acc=self.acc,
                 threshold=self.threshold,
                 n_gen_mark=self.n_gen_mark,
                 impstop=self.impstop,
                 evalstop=self.evalstop,
                 focus=self.focus,
                 memory=self.memory,
                 seed=self.seed),
            'maco':
            dict(gen=self.gen,
                 ker=self.ker,
                 q=self.q,
                 threshold=self.threshold,
                 n_gen_mark=self.n_gen_mark,
                 evalstop=self.evalstop,
                 focus=self.focus,
                 memory=self.memory,
                 seed=self.seed),
            'gwo':
            dict(gen=self.gen, seed=self.seed),
            'bee_colony':
            dict(gen=self.gen, limit=self.limit, seed=self.seed),
            'de':
            dict(gen=self.gen,
                 F=self.F,
                 CR=self.CR,
                 variant=self.variant,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 seed=self.seed),
            'sea':
            dict(gen=self.gen, seed=self.seed),
            'sga':
            dict(gen=self.gen,
                 cr=self.cr,
                 eta_c=self.eta_c,
                 m=self.m,
                 param_m=self.param_m,
                 param_s=self.param_s,
                 crossover=self.crossover,
                 mutation=self.mutation,
                 selection=self.selection,
                 seed=self.seed),
            'de1220':
            dict(gen=self.gen,
                 allowed_variants=self.allowed_variants,
                 variant_adptv=self.variant_adptv,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 memory=self.memory,
                 seed=self.seed),
            'cmaes':
            dict(gen=self.gen,
                 cc=self.cc,
                 cs=self.cs,
                 c1=self.c1,
                 cmu=self.cmu,
                 sigma0=self.sigma0,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 memory=self.memory,
                 force_bounds=self.force_bounds,
                 seed=self.seed),
            'moead':
            dict(gen=self.gen,
                 weight_generation=self.weight_generation,
                 decomposition=self.decomposition,
                 neighbours=self.neighbours,
                 CR=self.CR,
                 F=self.F,
                 eta_m=self.eta_m,
                 realb=self.realb,
                 limit=self.limit,
                 preserve_diversity=self.preserve_diversity,
                 seed=self.seed),
            'compass_search':
            dict(max_fevals=self.max_fevals,
                 start_range=self.start_range,
                 stop_range=self.stop_range,
                 reduction_coeff=self.reduction_coeff),
            'simulated_annealing':
            dict(Ts=self.Ts,
                 Tf=self.Tf,
                 n_T_adj=self.n_T_adj,
                 n_range_adj=self.n_range_adj,
                 bin_size=self.bin_size,
                 start_range=self.start_range,
                 seed=self.seed),
            'pso':
            dict(gen=self.gen,
                 omega=self.omega,
                 eta1=self.eta1,
                 eta2=self.eta2,
                 max_vel=self.max_vel,
                 variant=self.variant,
                 neighb_type=self.neighb_type,
                 neighb_param=self.neighb_param,
                 memory=self.memory,
                 seed=self.seed),
            'pso_gen':
            dict(gen=self.gen,
                 omega=self.omega,
                 eta1=self.eta1,
                 eta2=self.eta2,
                 max_vel=self.max_vel,
                 variant=self.variant,
                 neighb_type=self.neighb_type,
                 neighb_param=self.neighb_param,
                 memory=self.memory,
                 seed=self.seed),
            'nsga2':
            dict(gen=self.gen,
                 cr=self.cr,
                 eta_c=self.eta_c,
                 m=self.m,
                 eta_m=self.eta_m,
                 seed=self.seed),
            'nspso':
            dict(gen=self.gen,
                 omega=self.omega,
                 c1=self.c1,
                 c2=self.c2,
                 chi=self.chi,
                 v_coeff=self.v_coeff,
                 leader_selection_range=self.leader_selection_range,
                 diversity_mechanism=self.diversity_mechanism,
                 memory=self.memory,
                 seed=self.seed),
            'mbh':
            dict(algo=self.algo,
                 stop=self.stop,
                 perturb=self.perturb,
                 seed=self.seed),
            'cstrs_self_adaptive':
            dict(iters=self.iters, algo=self.algo, seed=self.seed),
            'ihs':
            dict(gen=self.gen,
                 phmcr=self.phmcr,
                 ppar_min=self.ppar_min,
                 ppar_max=self.ppar_max,
                 bw_min=self.bw_min,
                 bw_max=self.bw_max,
                 seed=self.seed),
            'xnes':
            dict(gen=self.gen,
                 eta_mu=self.eta_mu,
                 eta_sigma=self.eta_sigma,
                 eta_b=self.eta_b,
                 sigma0=self.sigma0,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 memory=self.memory,
                 force_bounds=self.force_bounds,
                 seed=self.seed)
        }

        if self.log_data:
            xl = []
            yl = []

        log_data = self.log_data

        #
        class interf_function:
            def __init__(self, dim):
                self.dim = dim

            def fitness(self, x):
                x = np.expand_dims(x, axis=0)
                y = function(x)
                # x = x[0]
                y = y.tolist()
                if log_data:
                    xl.append(x)
                    yl.append(y)
                # print (x, y[0])
                return y[0]

            if function.is_differentiable():

                def gradient(self, x):
                    x = np.expand_dims(x, axis=0)
                    g = function(x)
                    g = g.tolist()
                    return g[0]

            def get_bounds(self):
                lb = []
                ub = []
                bounds = function.get_ranges()
                # warning
                # check for infinities
                for i in range(len(bounds)):
                    lb.append(bounds[i, 0])
                    ub.append(bounds[i, 1])
                r = (np.array(lb), np.array(ub))
                return r

        # I need to call pygmo functions directly
        prob = pg.problem(interf_function(function))

        # print (prob.get_thread_safety())

        if self.scanner == "sade":
            # I need a dictionary with algorithms and options
            algo = pg.algorithm(pg.sade(**scanner_options[self.scanner]))
        elif self.scanner == "gaco":
            algo = pg.algorithm(pg.gaco(**scanner_options[self.scanner]))
        # elif self.scanner == "maco": # is not implemented though in webpage
        #                               looks it is
        # algo = pg.algorithm(pg.maco(**scanner_options[self.scanner]))
        elif self.scanner == "gwo":
            algo = pg.algorithm(pg.gwo(**scanner_options[self.scanner]))
        elif self.scanner == "bee_colony":
            algo = pg.algorithm(pg.bee_colony(**scanner_options[self.scanner]))
        elif self.scanner == "de":
            algo = pg.algorithm(pg.de(**scanner_options[self.scanner]))
        elif self.scanner == "sea":
            algo = pg.algorithm(pg.sea(**scanner_options[self.scanner]))
        elif self.scanner == "sga":
            algo = pg.algorithm(pg.sga(**scanner_options[self.scanner]))
        elif self.scanner == "de1220":
            algo = pg.algorithm(pg.de1220(**scanner_options[self.scanner]))
        elif self.scanner == "cmaes":
            algo = pg.algorithm(pg.cmaes(**scanner_options[self.scanner]))
        # elif self.scanner == "moead": #multiobjective algorithm
        #  algo = pg.algorithm(pg.moead(**scanner_options[self.scanner]))
        elif self.scanner == "compass_search":
            algo = pg.algorithm(
                pg.compass_search(**scanner_options[self.scanner]))
        elif self.scanner == 'simulated_annealing':
            algo = pg.algorithm(
                pg.simulated_annealing(**scanner_options[self.scanner]))
        elif self.scanner == 'pso':
            algo = pg.algorithm(pg.pso(**scanner_options[self.scanner]))
        elif self.scanner == 'pso_gen':
            algo = pg.algorithm(pg.pso_gen(**scanner_options[self.scanner]))
        # elif self.scanner == 'nsga2': #multiobjective algorithm
        #  algo = pg.algorithm(pg.nsga2(**scanner_options[self.scanner]))
        # elif self.scanner == 'nspso': is not implemented though in webpage
        #                               looks it is
        #  algo = pg.algorithm(pg.nspso(**scanner_options[self.scanner]))
        elif self.scanner == 'mbh':
            if scanner_options[self.scanner]['algo'] == 'de':
                algo = pg.algorithm(
                    pg.mbh(pg.algorithm(pg.de(**scanner_options['de']))))
        # elif self.scanner == 'ihs': #does not work
        #  algo = pg.algorithm(ihs(**scanner_options[self.scanner]))
        # elif self.scanner == 'xnes': #does not work
        #  algo = pg.algorithm(xnes(**scanner_options[self.scanner]))
        # uda = algo.extract(xnes)
        else:
            print(
                'The ' + self.scanner + ' algorithm is not implemented. The '
                'list of algorithms available is', algorithms)
            sys.exit()

        # add verbosing flag
        if self.verbose > 1:
            algo.set_verbosity(self.verbose)

        pop = pg.population(prob, self.size)

        if self.verbose > 9:
            print('prob', prob)

        opt = algo.evolve(pop)

        if self.verbose > 9:
            print('algo', algo)

        # best_x = np.expand_dims(opt.champion_x, axis=0)
        # best_fitness = np.expand_dims(opt.get_f()[opt.best_idx()], axis=0)
        best_x = np.expand_dims(opt.champion_x, axis=0)
        best_fitness = np.expand_dims(opt.champion_f, axis=0)

        if self.verbose > 0:
            print('best fit:', best_x, best_fitness)

        if self.log_data:
            x = np.squeeze(xl, axis=(1, ))
            y = np.squeeze(yl, axis=(2, ))

        if self.log_data:
            return (x, y)
        else:
            return (best_x, best_fitness)
Пример #13
0
    def create_variants(self, n, desc, category, constructor):
        def assign_2nd_alg(archipelago, algo):
            if category == 'rings':
                for island in archipelago.topology.every_other_island():
                    island.algorithm = algo
            elif hasattr(archipelago.topology, 'endpoints'):
                for island in archipelago.topology.endpoints:
                    island.algorithm = algo
            elif isinstance(archipelago.topology, FullyConnectedTopology):
                for island in islice(archipelago.topology.islands, None, None, 2):
                    island.algorithm = algo
            return archipelago

        def assign_algs(archipelago, algos):
            '''
            Evenly partitions and assigns algorithms to islands.
            '''
            for island,algo in zip(archipelago.topology.islands, cycle(algos)):
                island.algorithm = algo

        g = self.generations

        self.new_topology(
          desc='{}, de'.format(desc),
          category=category,
          algorithms=['de'],
          archipelago=Archipelago(constructor(de(gen=g),n)))
        self.new_topology(
          desc='{}, de1220'.format(desc),
          category=category,
          algorithms=['de1220'],
          archipelago=Archipelago(constructor(de1220(gen=g),n)))
        self.new_topology(
          desc='{}, sade'.format(desc),
          category=category,
          algorithms=['sade'],
          archipelago=Archipelago(constructor(sade(gen=g),n)))
        self.new_topology(
          desc='{}, ihs'.format(desc),
          category=category,
          algorithms=['ihs'],
          archipelago=Archipelago(constructor(ihs(gen=g),n)))
        self.new_topology(
          desc='{}, pso'.format(desc),
          category=category,
          algorithms=['pso'],
          archipelago=Archipelago(constructor(pso(gen=g),n)))
        self.new_topology(
          desc='{}, pso_gen'.format(desc),
          category=category,
          algorithms=['pso_gen'],
          archipelago=Archipelago(constructor(pso_gen(gen=g),n)))
        # self.new_topology(
        #   desc='{}, simulated_annealing'.format(desc),
        #   category=category,
        #   algorithms=['simulated_annealing'],
        #   archipelago=Archipelago(constructor(simulated_annealing(),n)))
        self.new_topology(
          desc='{}, bee_colony'.format(desc),
          category=category,
          algorithms=['bee_colony'],
          archipelago=Archipelago(constructor(bee_colony(gen=g),n)))
        self.new_topology(
          desc='{}, cmaes'.format(desc),
          category=category,
          algorithms=['cmaes'],
          archipelago=Archipelago(constructor(cmaes(gen=g),n)))
        self.new_topology(
          desc='{}, nsga2'.format(desc),
          category=category,
          algorithms=['nsga2'],
          archipelago=Archipelago(constructor(nsga2(gen=g),n)))
        self.new_topology(
          desc='{}, xnes'.format(desc),
          category=category,
          algorithms=['xnes'],
          archipelago=Archipelago(constructor(xnes(gen=g),n)))
        # de + nelder mead combo
        self.new_topology(
          desc='{}, de+nelder mead'.format(desc),
          category=category,
          algorithms=['de','neldermead'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), self.make_nelder_mead()))
        # de + praxis combo
        self.new_topology(
          desc='{}, de+praxis'.format(desc),
          category=category,
          algorithms=['de','praxis'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), self.make_praxis()))
        # de + nsga2 combo
        self.new_topology(
          desc='{}, de+nsga2'.format(desc),
          category=category,
          algorithms=['de','nsga2'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), nsga2(gen=g)))
        # de + de1220 combo
        self.new_topology(
          desc='{}, de+de1220'.format(desc),
          category=category,
          algorithms=['de','de1220'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), de1220(gen=g)))
        # de + sade combo
        self.new_topology(
          desc='{}, de+sade'.format(desc),
          category=category,
          algorithms=['de','sade'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), sade(gen=g)))
        # de + pso combo
        self.new_topology(
          desc='{}, de+pso'.format(desc),
          category=category,
          algorithms=['de','pso'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), pso(gen=g)))


      # extra configurations for fully connected topology
        if constructor is self.factory.createFullyConnected:
            self.new_topology(
                desc='{}, de+pso+praxis'.format(desc),
                category=category,
                algorithms=['de','pso','praxis'],
                archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis())))
            self.new_topology(
                desc='{}, de+pso+praxis+nsga2'.format(desc),
                category=category,
                algorithms=['de','pso','praxis','nsga2'],
                archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), nsga2(gen=g))))
            self.new_topology(
                desc='{}, de+pso+praxis+cmaes'.format(desc),
                category=category,
                algorithms=['de','pso','praxis','cmaes'],
                archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), cmaes(gen=g))))
            self.new_topology(
                desc='{}, de+pso+praxis+xnes'.format(desc),
                category=category,
                algorithms=['de','pso','praxis','xnes'],
                archipelago=assign_algs(Archipelago(constructor(de(gen=g),n)), (de(gen=g), pso(gen=g), self.make_praxis(), xnes(gen=g))))
Пример #14
0
def fit_data(data, init=None, optlib="scipy", algo="L-BFGS-B", grad=True, printfinal=None, printsteps=None):
    if printfinal is None:
        printfinal = True
    if init is None:
        init = data.init

    print(init)
    print(like_model(init, data))

    data.printsteps = printsteps
    data.stepnum = 0

    if optlib == "scipy":
        def neg_like_model(params, pdata):
            return -like_model(params, pdata)

        tinit = time.time()
        result = optimize.minimize(neg_like_model, init, args=(data,), method=algo, bounds=data.bounds,
                                   options={'disp':True})
        paramsbest = result.x

    elif optlib == "pygmo":

        boundslower = [data.bounds[i][0] for i in range(len(data.bounds))]
        boundsupper = [data.bounds[i][1] for i in range(len(data.bounds))]

        class profit_udp:
            def fitness(self, x):
                return [-like_model(x, data=data)]

            def get_bounds(self):
                return boundslower, boundsupper

        class profit_udp_grad:
            def fitness(self, x):
                return [-like_model(x, data=data)]

            def get_bounds(self):
                return boundslower, boundsupper

            def gradient(self, x):
                return pg.estimate_gradient(lambda x: self.fitness(x), x)

        algocmaes = algo == "cmaes"
        algonlopt = not algocmaes
        if algocmaes:
            uda = pg.cmaes()
        elif algonlopt:
            uda = pg.nlopt(algo)

        algo = pg.algorithm(uda)
#        algo.extract(pg.nlopt).ftol_rel = 1e-6
        if algonlopt:
            algo.extract(pg.nlopt).ftol_abs = 1e-3

        algo.set_verbosity(0)

        if grad:
            prob = pg.problem(profit_udp_grad())
        else:
            prob = pg.problem(profit_udp())
        pop = pg.population(prob=prob, size=0)
        if algocmaes:
            npop = 5
            npushed = 0
            while npushed < npop:
                try:
                    pop.push_back(init + np.random.normal(np.zeros(np.sum(data.tofit)),
                                  data.sigmas[data.tofit]))
                    npushed += 1
                except:
                    pass
        else:
            pop.push_back(init)
        tinit = time.time()
        result = algo.evolve(pop)
        paramsbest = result.champion_x
    else:
        raise ValueError("Unknown optimization library " + optlib)

    timerun = time.time() - tinit

    if printfinal:
        print("Elapsed time: {:.1f}".format(timerun))
        verbosity = data.verbose
        data.verbose = True
        print("Final likelihood:")
        like_model(paramsbest, data)
        data.verbose = verbosity
        print("Parameter names: " + ",".join(["{:10s}".format(i) for i in data.names[data.tofit]]))
        print("Scaled parameters: " + ",".join(["{:.4e}".format(i) for i in paramsbest]))
        # TODO: These should be methods in the data object
        paramstransformed = paramsbest * data.sigmas[data.tofit]
        print("Parameters (logged): " + ",".join(["{:.4e}".format(i) for i in paramstransformed]))
        paramslinear = copy.copy(paramstransformed)
        paramslinear[data.tolog[data.tofit]] = 10 ** paramslinear[data.tolog[data.tofit]]
        print("Parameters (unlogged): " + ",".join(["{:.4e}".format(i) for i in paramslinear]))

    return paramsbest, paramstransformed, paramslinear, timerun, data