示例#1
0
    def create_variants(self, n, desc, category, constructor):
        def assign_2nd_alg(archipelago, algo):
            if category == 'rings':
                for island in archipelago.topology.every_other_island():
                    island.algorithm = algo
            elif hasattr(archipelago.topology, 'endpoints'):
                for island in archipelago.topology.endpoints:
                    island.algorithm = algo
            elif isinstance(archipelago.topology, FullyConnectedTopology):
                for island in islice(archipelago.topology.islands, None, None, 2):
                    island.algorithm = algo
            return archipelago

        def assign_algs(archipelago, algos):
            '''
            Evenly partitions and assigns algorithms to islands.
            '''
            for island,algo in zip(archipelago.topology.islands, cycle(algos)):
                island.algorithm = algo

        g = self.generations

        self.new_topology(
          desc='{}, de'.format(desc),
          category=category,
          algorithms=['de'],
          archipelago=Archipelago(constructor(de(gen=g),n)))
        self.new_topology(
          desc='{}, de1220'.format(desc),
          category=category,
          algorithms=['de1220'],
          archipelago=Archipelago(constructor(de1220(gen=g),n)))
        self.new_topology(
          desc='{}, sade'.format(desc),
          category=category,
          algorithms=['sade'],
          archipelago=Archipelago(constructor(sade(gen=g),n)))
        self.new_topology(
          desc='{}, bee_colony'.format(desc),
          category=category,
          algorithms=['bee_colony'],
          archipelago=Archipelago(constructor(bee_colony(gen=g),n)))
        # de + nelder mead combo
        self.new_topology(
          desc='{}, de+nelder mead'.format(desc),
          category=category,
          algorithms=['de','neldermead'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), self.make_nelder_mead()))
        # de + praxis combo
        self.new_topology(
          desc='{}, de+praxis'.format(desc),
          category=category,
          algorithms=['de','praxis'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), self.make_praxis()))
        # de + sade combo
        self.new_topology(
          desc='{}, de+sade'.format(desc),
          category=category,
          algorithms=['de','sade'],
          archipelago=assign_2nd_alg(Archipelago(constructor(de(gen=g),n)), sade(gen=g)))
示例#2
0
def test_non_population_based_migration():
    import pygmo as pg
    from pygmo import de, nlopt, rosenbrock
    from sabaody.topology import TopologyFactory
    topology_factory = TopologyFactory(island_size=5, migrant_pool_size=5)
    topology = topology_factory.createBidirChain(de, number_of_islands=2)
    assert len(topology.island_ids) == 2
    assert len(topology.endpoints) == 2
    de_island = pg.island(algo=de(gen=10),
                          prob=rosenbrock(3),
                          size=topology.islands[0].size)
    nm = nlopt('neldermead')
    nm.selection = 'random'
    nm.replacement = 'random'
    nm_island = pg.island(algo=nm,
                          prob=rosenbrock(3),
                          size=topology.islands[1].size)

    from sabaody.migration import BestSPolicy, FairRPolicy
    selection_policy = BestSPolicy(migration_rate=2)
    replacement_policy = FairRPolicy()
    from sabaody.migration import MigrationPolicyEachToAll
    migration_policy = MigrationPolicyEachToAll()

    # get the candidates from the de island
    p_de = de_island.get_population()
    candidates, candidate_f = selection_policy.select(p_de)
    # try migrating to the nelder mead island
    p_nm = nm_island.get_population()
    replacement_policy.replace(p_nm, candidates, candidate_f)
    nm_island.set_population(p_nm)

    # finally, try to evolve it
    new_pop = nm_island.evolve(n=10)
示例#3
0
def get_equilibrium(nn_controller, random_seed=0, mass=0.38905):
    class controller_equilibrium:
        def __init__(self, nn_controller):
            self.nn_controller = nn_controller

        def fitness(self, state):
            g0 = 9.81
            return [
                np.linalg.norm(
                    self.nn_controller.compute_control(state) -
                    np.array([g0 * mass, 0]))
            ]

        def get_bounds(self):
            return ([-1, 0, -1, 0, 0], [1, 0, 1, 0, 0])

    # optimisation parameters
    # increase if necessary to ensure convergence
    n_generations = 700
    n_individuals = 100

    prob = pygmo.problem(controller_equilibrium(nn_controller))
    algo = pygmo.algorithm(
        pygmo.de(gen=n_generations, tol=0, ftol=0, seed=random_seed))

    pop = pygmo.population(prob, size=n_individuals, seed=random_seed)
    pop.push_back([0, 0, 0, 0, 0])
    algo.set_verbosity(100)
    pop = algo.evolve(pop)

    return pop.champion_x
示例#4
0
def run_island(island):
    import pygmo as pg
    from multiprocessing import cpu_count
    from pymemcache.client.base import Client
    mc_client = Client((island.mc_host, island.mc_port))
    #udp = island.problem_factory()

    algorithm = pg.algorithm(pg.de())
    #problem = pg.problem(udp)
    problem = island.problem_factory()
    # TODO: configure pop size
    i = pg.island(algo=algorithm, prob=problem, size=20)

    mc_client.set(island.domain_qualifier('island', str(island.id), 'status'),
                  'Running', 10000)
    mc_client.set(island.domain_qualifier('island', str(island.id), 'n_cores'),
                  str(cpu_count()), 10000)
    #print('Starting island {} with {} cpus'.format(str(i.id), str(cpu_count())))

    i.evolve()
    i.wait()

    import socket
    hostname = socket.gethostname()
    ip = [
        l for l in ([
            ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
            if not ip.startswith("127.")
        ][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())
                 for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
                 ][0][1]]) if l
    ][0][0]
    return (ip, hostname, i.get_population().problem.get_fevals())
示例#5
0
    def full_index_refine(self,
                          rec_basis,
                          num_ap,
                          n_isl=20,
                          pop_size=50,
                          gen_num=2000,
                          pos_tol=(0.007, 0.014, 0.06),
                          rb_tol=0.12):
        """
        Return refinement problems archipelago

        rec_basis - preliminary reciprocal lattice basis vectors matrix
        num_ap = [num_ap_x, num_ap_y] - convergent beam numerical apertures in x- and y-axes
        n_isl - number of islands of one frame
        pop_size - population size
        gen_num - maximum generations number of the refinement algorithm
        pos_tol - relative sample position tolerance
        rb_tol - lattice basis vectors matrix tolerance
        """
        archi = pygmo.archipelago()
        for frame_idx, frame_strks in enumerate(iter(self)):
            frame_basis = rec_basis.dot(
                self.exp_set.rotation_matrix(frame_idx))
            prob = frame_strks.rot_index_refine(rec_basis=frame_basis,
                                                num_ap=num_ap,
                                                pos_tol=pos_tol,
                                                rb_tol=rb_tol)
            pops = [
                pygmo.population(size=pop_size, prob=prob, b=pygmo.mp_bfe())
                for _ in range(n_isl)
            ]
            for pop in pops:
                archi.push_back(algo=pygmo.de(gen_num), pop=pop)
        return archi
示例#6
0
def test_bidir_chain():
    '''
    Tests the migration on a one way chain.
    '''
    from sabaody.topology import TopologyFactory
    import pygmo as pg

    domain_qual = partial(getQualifiedName,
                          'com.how2cell.sabaody.test_bidir_chain_migration')
    problem = pg.problem(pg.rosenbrock(3))
    topology_factory = TopologyFactory(island_size=3,
                                       domain_qualifier=domain_qual,
                                       mc_host='localhost',
                                       mc_port=11211)
    topology = topology_factory.createBidirChain(pg.de(gen=10),
                                                 number_of_islands=5)
    assert len(topology.island_ids) == 5
    assert len(topology.endpoints) == 2

    from sabaody.migration_central import CentralMigrator, start_migration_service
    from sabaody.migration import MigrationPolicyEachToAll, BestSPolicy, FairRPolicy, sort_by_fitness
    try:
        process = start_migration_service()
        sleep(2)
        migrator = CentralMigrator(MigrationPolicyEachToAll(),
                                   BestSPolicy(migration_rate=1),
                                   FairRPolicy(), 'http://localhost:10100')

        from collections import OrderedDict
        for k in (1, 2):
            islands = OrderedDict(
                (i.id, pg.island(algo=i.algorithm, prob=problem, size=i.size))
                for i in topology.islands)
            for island_id in islands.keys():
                migrator.defineMigrantPool(island_id, 3)
            if k == 1:
                # test forward migration
                seed_first(islands, topology)
            else:
                # test reverse migration
                seed_last(islands, topology)

            for n in range(1, 5 + 1):
                assert count_hits(islands.values()) == n
                # perform migration
                for island_id, i in islands.items():
                    migrator.sendMigrants(island_id, i, topology)
                for island_id, i in islands.items():
                    deltas, src_ids = migrator.receiveMigrants(
                        island_id, i, topology)
            migrator.purgeAll()
    finally:
        process.terminate()
示例#7
0
def test1():
    algo = pg.algorithm(pg.de(gen=1000, seed=126))
    prob = pg.problem(toy_problem())
    pop = pg.population(prob=prob, size=10)
    print(pop.champion_f)
    pop = algo.evolve(pop)
    print(pop.champion_f)
    # fine up to this point

    archi = pg.archipelago(n=6, algo=algo, prob=prob, pop_size=70)
    archi.evolve()
    archi.wait_check()
示例#8
0
def get_island(evaluate, params, hooks):
    # config
    # D = 8  decision space dimension
    # N0 = 100  initial population
    # Ng = 100  total generation
    D, Ng, N0 = itemgetter('D', 'Ng', 'N0')(params)

    algo = pg.algorithm(pg.de(gen=Ng))
    algo.set_verbosity(int(Ng / 10))
    prob = pg.problem(evaluate_wrapper(D, evaluate))
    island = pg.island(algo=algo, prob=prob, size=N0, udi=pg.mp_island())

    return island
示例#9
0
def run_island(i):
    class Problem:
        def __init__(self, evaluator, lb, ub):
            from .utils import expect, check_vector
            # type: (Evaluator, array, array) -> None
            '''
            Inits the problem with an objective evaluator
            (implementing the method evaluate), the parameter
            vector lower bound (a numpy array) and upper bound.
            Both bounds must have the same dimension.
            '''
            check_vector(lb)
            check_vector(ub)
            expect(len(lb) == len(ub), 'Bounds mismatch')
            self.evaluator = evaluator
            self.lb = lb
            self.ub = ub

        def fitness(self, x):
            return evaluator.evaluate(x)

        def get_bounds(self):
            return (lb,ub)

        def get_name(self):
            return 'Sabaody udp'

        def get_extra_info(self):
            return 'Sabaody extra info'

    import pygmo as pg
    from multiprocessing import cpu_count
    from b2problem import B2Problem
    from params import getDefaultParamValues, getLowerBound, getUpperBound
    from pymemcache.client.base import Client
    mc_client = Client((i.mc_host,i.mc_port))
    #udp = i.problem_factory()

    algorithm = pg.algorithm(pg.de())
    problem = pg.problem(Problem(B2Problem(i.problem_factory), getLowerBound(), getUpperBound()))
    # TODO: configure pop size
    #a = pg.archipelago(n=cpu_count,algo=algorithm, prob=problem, pop_size=100)

    mc_client.set(i.domain_qualifier('island', str(i.id), 'status'), 'Running', 10000)
    mc_client.set(i.domain_qualifier('island', str(i.id), 'n_cores'), str(cpu_count()), 10000)
    print('Starting island {} with {} cpus'.format(str(i.id), str(cpu_count())))

    #a.evolve(100)

    return 0
示例#10
0
def benchmark_differential_evolution():
    island = pg_island(algo=de(gen=10), prob=problem(rosenbrock(5)), size=10)

    N = 10
    print('Differential Evolution (pop. size {})'.format(
        island.get_population().get_f().size))
    for k in range(N):
        island.evolve()
        island.wait()
        d = sqrt(
            float(((island.get_population().champion_x -
                    rosenbrock(5).best_known())**2).mean()))
        print('DE {:2}/{}: best fitness {:9.2f}, deviation {:9.2f}, fevals {}'.
              format(k, N, float(island.get_population().champion_f[0]), d,
                     island.get_population().problem.get_fevals()))
示例#11
0
def run_island(island, topology):
    import pygmo as pg
    from multiprocessing import cpu_count
    from pymemcache.client.base import Client
    from sabaody.migration import BestSPolicy, FairRPolicy
    from sabaody.migration_central import CentralMigrator
    mc_client = Client((island.mc_host, island.mc_port))
    migrator = CentralMigrator('http://luna:10100')

    algorithm = pg.de(gen=10)
    problem = island.problem_constructor()
    # TODO: configure pop size
    i = pg.island(algo=algorithm, prob=problem, size=20)

    mc_client.set(island.domain_qualifier('island', str(island.id), 'status'),
                  'Running', 10000)
    mc_client.set(island.domain_qualifier('island', str(island.id), 'n_cores'),
                  str(cpu_count()), 10000)

    rounds = 10
    migration_log = []
    for x in range(rounds):
        i.evolve()
        i.wait()

        # perform migration
        migrator.sendMigrants(island.id, i, topology)
        deltas, src_ids = migrator.receiveMigrants(island.id, i, topology)
        """
        For Kafka Migration Enable below 
        """
        #migrator.send_migrants(island.id,i,topology,generation=x)
        #deltas,src_ids = migrator.receive_migrants(island.id,i,topology,generation=x)

        migration_log.append((float(pop.champion_f[0]), deltas, src_ids))

    import socket
    hostname = socket.gethostname()
    ip = [
        l for l in ([
            ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
            if not ip.startswith("127.")
        ][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())
                 for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
                 ][0][1]]) if l
    ][0][0]
    return (ip, hostname, island.id, migration_log,
            i.get_population().problem.get_fevals())
def solver(dimension, lower_bound, upper_bound, optim, bias, popsize):
    global algo
    global pop
    global niter
    global log
    global curve
    prob = pg.problem(
        rosenbrock_prob(dimension, lower_bound, upper_bound, optim, bias))
    algo = pg.algorithm(
        pg.de(gen=3000, F=0.8, CR=0.9, variant=3, ftol=1e-06, xtol=1e-06))
    algo.set_verbosity(1)
    pop = pg.population(prob, popsize)
    pop = algo.evolve(pop)
    log = algo.extract(pg.de).get_log()
    curve = [x[2] for x in log]
    niter = log[-1][0]
    return prob, algo, pop, log, niter, curve
示例#13
0
def benchmark_differential_evolution(generations):
    island = pg_island(
        algo=de(gen=generations),
        prob=B2_UDP(getLowerBound(),getUpperBound(),'../../../../../sbml/b2.xml'),
        size=10)

    N = 50
    import arrow
    time_start = arrow.utcnow()
    print('Differential Evolution (pop. size {})'.format(island.get_population().get_f().size))
    for k in range(N):
        island.evolve()
        island.wait()
        delta_t = arrow.utcnow() - time_start
        print('DE {:2}/{}: best fitness {:9.2f}, fevals {}, duration {}'.format(
            k,N,float(island.get_population().champion_f[0]),
            island.get_population().problem.get_fevals(),
            delta_t))
    def fit(self, function, with_global_optimization=True, quiet=False):

        if no_threeml:
            raise RuntimeError('No 3ML!')

        self._ps = PointSource(self._band, 0, 0, spectral_shape=function)
        self._model = Model(self._ps)

        self._xyl = XYLike('band_%s' % (self._band), self._normalized_time,
                           self._flux, self._error)
        self._jl = JointLikelihood(self._model, DataList(self._xyl))

        if with_global_optimization:
            pagmo_minimizer = GlobalMinimization("pagmo")

            my_algorithm = pygmo.algorithm(pygmo.de(gen=25))

            # Create an instance of a local minimizer
            local_minimizer = LocalMinimization("ROOT")

            # Setup the global minimization
            pagmo_minimizer.setup(
                second_minimization=local_minimizer,
                algorithm=my_algorithm,
                islands=10,
                population_size=15,
                evolution_cycles=5)

            self._jl.set_minimizer(pagmo_minimizer)

        else:

            self._jl.set_minimizer('ROOT')

        try:

            _ = self._jl.fit(quiet=quiet)

        except:

            print('Fit FAILED')

            self._jl = None
示例#15
0
async def optimize(evaluate, params, hooks):
    # config
    # D = 8  decision space dimension
    # N0 = 100  initial population
    # Ng = 100  total generation
    D, Ng, N0 = itemgetter('D', 'Ng', 'N0')(params)
    hook_report = itemgetter('report')(hooks)

    algo = pg.algorithm(pg.de(gen=Ng))
    algo.set_verbosity(int(Ng / 10))
    prob = pg.problem(evaluate_wrapper(D, evaluate))
    pop = pg.population(prob, N0)

    await asyncio.sleep(0)
    pop = algo.evolve(pop)
    gbest = (pop.champion_x, pop.champion_f)

    if hook_report:
        hook_report(gbest)

    return gbest
示例#16
0
    print(archi)
    archi.evolve()
    archi.wait()
    archi.wait_check()
    print(archi)

import pygmo as pg
# The user-defined problem
udp = pg.schwefel(dim=20)
# The pygmo problem
prob = pg.problem(udp)

# For a number of generation based algorithms we can use a similar script to run and average over 25 runs.
udas = [
    pg.sade(gen=500),
    pg.de(gen=500),
    pg.de1220(gen=500),
    pg.pso(gen=500),
    pg.bee_colony(gen=250, limit=20)
]
for uda in udas:
    logs = []
    for i in range(25):
        algo = pg.algorithm(uda)
        algo.set_verbosity(1)  # regulates both screen and log verbosity
        pop = pg.population(prob, 20)
        pop = algo.evolve(pop)
        logs.append(algo.extract(type(uda)).get_log())
    logs = np.array(logs)
    avg_log = np.average(logs, 0)
    plt.plot(avg_log[:, 1],
"""
As a second step, we have to create an algorithm to solve the problem. Many different algorithms are available through PyGMO, including heuristic methods and local optimizers.

In this example, we will use the Differential Evolution (DE). Similarly to the UDP, it is also possible to create a User-Defined Algorithm (UDA), but in this tutorial we will use an algorithm readily available in PyGMO. [This webpage](See also https://esa.github.io/pygmo2/overview.html#list-of-algorithms) offers a list of all of the PyGMO optimisation algorithms that are available.

See also [this tutorial](https://esa.github.io/pygmo2/tutorials/using_algorithm.html) on PyGMO algorithms.
"""

# Define number of generations
number_of_generations = 1

# Fix seed
current_seed = 171015

# Create Differential Evolution object by passing the number of generations as input
de_algo = pygmo.de(gen=number_of_generations, seed=current_seed)

# Create pygmo algorithm object
algo = pygmo.algorithm(de_algo)

# Print the algorithm's information
print(algo)


## Initialise population
"""
A population in PyGMO is essentially a container for multiple individuals. Each individual has an associated decision vector which can change (evolution), the resulting fitness vector, and an unique ID to allow their tracking. The population is initialized starting from a specific problem to ensure that all individuals are compatible with the UDP. The default population size is 0.
"""

# Set population size
pop_size = 1000
示例#18
0
def run_island(id):
    class B2_UDP:
        def __init__(self, lb, ub):
            # type: (Evaluator, array, array) -> None
            '''
            Inits the problem with an objective evaluator
            (implementing the method evaluate), the parameter
            vector lower bound (a numpy array) and upper bound.
            Both bounds must have the same dimension.
            '''
            from sabaody.utils import check_vector, expect
            check_vector(lb)
            check_vector(ub)
            expect(len(lb) == len(ub), 'Bounds mismatch')
            self.lb = lb
            self.ub = ub
            from b2problem import B2Problem
            self.evaluator = B2Problem('b2.xml')

        def fitness(self, x):
            return (self.evaluator.evaluate(x), )

        def get_bounds(self):
            return (self.lb, self.ub)

        def get_name(self):
            return 'Sabaody udp'

        def get_extra_info(self):
            return 'Sabaody extra info'

        def __getstate__(self):
            return {'lb': self.lb, 'ub': self.ub}

        def __setstate__(self, state):
            self.lb = state['lb']
            self.ub = state['ub']
            from b2problem import B2Problem
            self.evaluator = B2Problem('b2.xml')

    import pygmo as pg
    from pymemcache.client.base import Client
    mc_client = Client(('luna', 11211))

    algorithm = pg.algorithm(pg.de(gen=1000))
    from params import getLowerBound, getUpperBound
    problem = pg.problem(B2_UDP(getLowerBound(), getUpperBound()))
    i = pg.island(algo=algorithm, prob=problem, size=20)

    #mc_client.set(id.domain_qualifier('island', str(id), 'status'), 'Running', 10000)

    i.evolve()
    i.wait()

    import socket
    hostname = socket.gethostname()
    ip = [
        l for l in ([
            ip for ip in socket.gethostbyname_ex(socket.gethostname())[2]
            if not ip.startswith("127.")
        ][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close())
                 for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
                 ][0][1]]) if l
    ][0][0]
    return (ip, hostname)
示例#19
0
    def __call__(self, function):

        scanner_options = {
            'sade':
            dict(gen=self.gen,
                 variant=self.variant,
                 variant_adptv=self.variant_adptv,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 memory=self.memory,
                 seed=self.seed),
            'gaco':
            dict(gen=self.gen,
                 ker=self.ker,
                 q=self.q,
                 oracle=self.oracle,
                 acc=self.acc,
                 threshold=self.threshold,
                 n_gen_mark=self.n_gen_mark,
                 impstop=self.impstop,
                 evalstop=self.evalstop,
                 focus=self.focus,
                 memory=self.memory,
                 seed=self.seed),
            'maco':
            dict(gen=self.gen,
                 ker=self.ker,
                 q=self.q,
                 threshold=self.threshold,
                 n_gen_mark=self.n_gen_mark,
                 evalstop=self.evalstop,
                 focus=self.focus,
                 memory=self.memory,
                 seed=self.seed),
            'gwo':
            dict(gen=self.gen, seed=self.seed),
            'bee_colony':
            dict(gen=self.gen, limit=self.limit, seed=self.seed),
            'de':
            dict(gen=self.gen,
                 F=self.F,
                 CR=self.CR,
                 variant=self.variant,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 seed=self.seed),
            'sea':
            dict(gen=self.gen, seed=self.seed),
            'sga':
            dict(gen=self.gen,
                 cr=self.cr,
                 eta_c=self.eta_c,
                 m=self.m,
                 param_m=self.param_m,
                 param_s=self.param_s,
                 crossover=self.crossover,
                 mutation=self.mutation,
                 selection=self.selection,
                 seed=self.seed),
            'de1220':
            dict(gen=self.gen,
                 allowed_variants=self.allowed_variants,
                 variant_adptv=self.variant_adptv,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 memory=self.memory,
                 seed=self.seed),
            'cmaes':
            dict(gen=self.gen,
                 cc=self.cc,
                 cs=self.cs,
                 c1=self.c1,
                 cmu=self.cmu,
                 sigma0=self.sigma0,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 memory=self.memory,
                 force_bounds=self.force_bounds,
                 seed=self.seed),
            'moead':
            dict(gen=self.gen,
                 weight_generation=self.weight_generation,
                 decomposition=self.decomposition,
                 neighbours=self.neighbours,
                 CR=self.CR,
                 F=self.F,
                 eta_m=self.eta_m,
                 realb=self.realb,
                 limit=self.limit,
                 preserve_diversity=self.preserve_diversity,
                 seed=self.seed),
            'compass_search':
            dict(max_fevals=self.max_fevals,
                 start_range=self.start_range,
                 stop_range=self.stop_range,
                 reduction_coeff=self.reduction_coeff),
            'simulated_annealing':
            dict(Ts=self.Ts,
                 Tf=self.Tf,
                 n_T_adj=self.n_T_adj,
                 n_range_adj=self.n_range_adj,
                 bin_size=self.bin_size,
                 start_range=self.start_range,
                 seed=self.seed),
            'pso':
            dict(gen=self.gen,
                 omega=self.omega,
                 eta1=self.eta1,
                 eta2=self.eta2,
                 max_vel=self.max_vel,
                 variant=self.variant,
                 neighb_type=self.neighb_type,
                 neighb_param=self.neighb_param,
                 memory=self.memory,
                 seed=self.seed),
            'pso_gen':
            dict(gen=self.gen,
                 omega=self.omega,
                 eta1=self.eta1,
                 eta2=self.eta2,
                 max_vel=self.max_vel,
                 variant=self.variant,
                 neighb_type=self.neighb_type,
                 neighb_param=self.neighb_param,
                 memory=self.memory,
                 seed=self.seed),
            'nsga2':
            dict(gen=self.gen,
                 cr=self.cr,
                 eta_c=self.eta_c,
                 m=self.m,
                 eta_m=self.eta_m,
                 seed=self.seed),
            'nspso':
            dict(gen=self.gen,
                 omega=self.omega,
                 c1=self.c1,
                 c2=self.c2,
                 chi=self.chi,
                 v_coeff=self.v_coeff,
                 leader_selection_range=self.leader_selection_range,
                 diversity_mechanism=self.diversity_mechanism,
                 memory=self.memory,
                 seed=self.seed),
            'mbh':
            dict(algo=self.algo,
                 stop=self.stop,
                 perturb=self.perturb,
                 seed=self.seed),
            'cstrs_self_adaptive':
            dict(iters=self.iters, algo=self.algo, seed=self.seed),
            'ihs':
            dict(gen=self.gen,
                 phmcr=self.phmcr,
                 ppar_min=self.ppar_min,
                 ppar_max=self.ppar_max,
                 bw_min=self.bw_min,
                 bw_max=self.bw_max,
                 seed=self.seed),
            'xnes':
            dict(gen=self.gen,
                 eta_mu=self.eta_mu,
                 eta_sigma=self.eta_sigma,
                 eta_b=self.eta_b,
                 sigma0=self.sigma0,
                 ftol=self.ftol,
                 xtol=self.xtol,
                 memory=self.memory,
                 force_bounds=self.force_bounds,
                 seed=self.seed)
        }

        if self.log_data:
            xl = []
            yl = []

        log_data = self.log_data

        #
        class interf_function:
            def __init__(self, dim):
                self.dim = dim

            def fitness(self, x):
                x = np.expand_dims(x, axis=0)
                y = function(x)
                # x = x[0]
                y = y.tolist()
                if log_data:
                    xl.append(x)
                    yl.append(y)
                # print (x, y[0])
                return y[0]

            if function.is_differentiable():

                def gradient(self, x):
                    x = np.expand_dims(x, axis=0)
                    g = function(x)
                    g = g.tolist()
                    return g[0]

            def get_bounds(self):
                lb = []
                ub = []
                bounds = function.get_ranges()
                # warning
                # check for infinities
                for i in range(len(bounds)):
                    lb.append(bounds[i, 0])
                    ub.append(bounds[i, 1])
                r = (np.array(lb), np.array(ub))
                return r

        # I need to call pygmo functions directly
        prob = pg.problem(interf_function(function))

        # print (prob.get_thread_safety())

        if self.scanner == "sade":
            # I need a dictionary with algorithms and options
            algo = pg.algorithm(pg.sade(**scanner_options[self.scanner]))
        elif self.scanner == "gaco":
            algo = pg.algorithm(pg.gaco(**scanner_options[self.scanner]))
        # elif self.scanner == "maco": # is not implemented though in webpage
        #                               looks it is
        # algo = pg.algorithm(pg.maco(**scanner_options[self.scanner]))
        elif self.scanner == "gwo":
            algo = pg.algorithm(pg.gwo(**scanner_options[self.scanner]))
        elif self.scanner == "bee_colony":
            algo = pg.algorithm(pg.bee_colony(**scanner_options[self.scanner]))
        elif self.scanner == "de":
            algo = pg.algorithm(pg.de(**scanner_options[self.scanner]))
        elif self.scanner == "sea":
            algo = pg.algorithm(pg.sea(**scanner_options[self.scanner]))
        elif self.scanner == "sga":
            algo = pg.algorithm(pg.sga(**scanner_options[self.scanner]))
        elif self.scanner == "de1220":
            algo = pg.algorithm(pg.de1220(**scanner_options[self.scanner]))
        elif self.scanner == "cmaes":
            algo = pg.algorithm(pg.cmaes(**scanner_options[self.scanner]))
        # elif self.scanner == "moead": #multiobjective algorithm
        #  algo = pg.algorithm(pg.moead(**scanner_options[self.scanner]))
        elif self.scanner == "compass_search":
            algo = pg.algorithm(
                pg.compass_search(**scanner_options[self.scanner]))
        elif self.scanner == 'simulated_annealing':
            algo = pg.algorithm(
                pg.simulated_annealing(**scanner_options[self.scanner]))
        elif self.scanner == 'pso':
            algo = pg.algorithm(pg.pso(**scanner_options[self.scanner]))
        elif self.scanner == 'pso_gen':
            algo = pg.algorithm(pg.pso_gen(**scanner_options[self.scanner]))
        # elif self.scanner == 'nsga2': #multiobjective algorithm
        #  algo = pg.algorithm(pg.nsga2(**scanner_options[self.scanner]))
        # elif self.scanner == 'nspso': is not implemented though in webpage
        #                               looks it is
        #  algo = pg.algorithm(pg.nspso(**scanner_options[self.scanner]))
        elif self.scanner == 'mbh':
            if scanner_options[self.scanner]['algo'] == 'de':
                algo = pg.algorithm(
                    pg.mbh(pg.algorithm(pg.de(**scanner_options['de']))))
        # elif self.scanner == 'ihs': #does not work
        #  algo = pg.algorithm(ihs(**scanner_options[self.scanner]))
        # elif self.scanner == 'xnes': #does not work
        #  algo = pg.algorithm(xnes(**scanner_options[self.scanner]))
        # uda = algo.extract(xnes)
        else:
            print(
                'The ' + self.scanner + ' algorithm is not implemented. The '
                'list of algorithms available is', algorithms)
            sys.exit()

        # add verbosing flag
        if self.verbose > 1:
            algo.set_verbosity(self.verbose)

        pop = pg.population(prob, self.size)

        if self.verbose > 9:
            print('prob', prob)

        opt = algo.evolve(pop)

        if self.verbose > 9:
            print('algo', algo)

        # best_x = np.expand_dims(opt.champion_x, axis=0)
        # best_fitness = np.expand_dims(opt.get_f()[opt.best_idx()], axis=0)
        best_x = np.expand_dims(opt.champion_x, axis=0)
        best_fitness = np.expand_dims(opt.champion_f, axis=0)

        if self.verbose > 0:
            print('best fit:', best_x, best_fitness)

        if self.log_data:
            x = np.squeeze(xl, axis=(1, ))
            y = np.squeeze(yl, axis=(2, ))

        if self.log_data:
            return (x, y)
        else:
            return (best_x, best_fitness)
示例#20
0
def main():
    ##########################################################
    # CREATE PROBLEM #########################################
    ##########################################################
    """
    First, we define the Pygmo problem by using the UDP class defined above. Note that an instantiation of the UDP class
    must be passed as input to pygmo.problem() and NOT the class itself. It is also possible to use a PyGMO UDP, i.e.
    a problem that is already defined in PyGMO, but it will not be shown in this tutorial.
    See also: https://esa.github.io/pygmo2/tutorials/using_problem.html.
    """
    # Instantiation of the UDP problem
    udp = HimmelblauOptimization(-5.0, 5.0, -5.0, 5.0)
    # Creation of the pygmo problem object
    prob = pygmo.problem(udp)
    # Print the problem's information
    print('\n########### PRINTING PROBLEM INFORMATION ###########\n')
    print(prob)

    ##########################################################
    # CREATE ALGORITHM #######################################
    ##########################################################
    """
    As a second step, we have to create an algorithm to solve the problem. Many different algorithms are available
    through PyGMO, including heuristic methods and local optimizers. In this example, we will use the Differential
    Evolution (DE). Similarly to the UDP, it is also possible to create a User-Defined Algorithm (UDA), but in this
    tutorial we will use an algorithm readily available in PyGMO.
    See also: https://esa.github.io/pygmo2/tutorials/using_algorithm.html.
    """
    # Define number of generations
    number_of_generations = 1
    # Fix seed
    current_seed = 171015
    # Create Differential Evolution object by passing the number of generations as input
    # NOTE: specific inputs for different pygmo algorithms may vary, check each algorithm's documentation
    # See also https://esa.github.io/pygmo2/overview.html#list-of-algorithms
    de_algo = pygmo.de(gen=number_of_generations, seed=current_seed)
    # Create pygmo algorithm object
    algo = pygmo.algorithm(de_algo)
    # Print the algorithm's information
    print('\n########### PRINTING ALGORITHM INFORMATION ###########\n')
    print(algo)

    ##########################################################
    # INITIALIZE POPULATION ##################################
    ##########################################################
    """
    A population in PyGMO is essentially a container for multiple individuals. Each individual has an associated 
    decision vector which can change (evolution), the resulting fitness vector, and an unique ID to allow their
    tracking. The population is initialized starting from a specific problem to ensure that all individuals are 
    compatible with the UDP. The default population size is 0.
    """
    # Set population size
    pop_size = 1000
    # Set seed
    current_seed = 171015
    # Create population
    pop = pygmo.population(prob, size=pop_size, seed=current_seed)
    # Inspect population (this is going to be long, uncomment if desired)
    # print('\n########### PRINTING POPULATION INFORMATION ###########\n')
    # print(pop)

    ##########################################################
    # EVOLVE POPULATION ######################################
    ##########################################################

    # Set number of evolutions
    number_of_evolutions = 100
    # Initialize empty containers
    individuals_list = []
    fitness_list = []
    # Evolve population multiple times
    for i in range(number_of_evolutions):
        pop = algo.evolve(pop)
        individuals_list.append(pop.get_x()[pop.best_idx()])
        fitness_list.append(pop.get_f()[pop.best_idx()])

    # At the end of the evolution(s), we extract the best individual
    print('\n########### PRINTING CHAMPION INDIVIDUALS ###########\n')
    # Print its fitness value
    print('Fitness (= function) value: ', pop.champion_f)
    # Print its decision variable vector
    print('Decision variable vector: ', pop.champion_x)
    # Print the number of function evaluations (calls to the fitness function)
    print('Number of function evaluations: ', pop.problem.get_fevals())
    # Print the difference wrt to the minimum location
    print('Difference wrt the minimum: ', pop.champion_x - np.array([3, 2]))

    ##########################################################
    # VISUALIZE OPTIMIZATION #################################
    ##########################################################

    # Set font size for plots
    font = {'size': 18}
    matplotlib.rc('font', **font)
    # Extract best individuals for each generation
    best_x = [ind[0] for ind in individuals_list]
    best_y = [ind[1] for ind in individuals_list]
    # Extract problem bounds
    (x_min, y_min), (x_max, y_max) = udp.get_bounds()

    # Plot fitness over generations
    fig, ax = plt.subplots(figsize=(16, 4))
    ax.plot(np.arange(0, number_of_evolutions),
            fitness_list,
            label='Function value')
    # Plot champion
    champion_n = np.argmin(np.array(fitness_list))
    ax.scatter(champion_n,
               np.min(fitness_list),
               marker='x',
               color='r',
               label='All-time champion')
    # Prettify
    ax.set_xlim((0, number_of_evolutions))
    ax.grid('major')
    ax.set_title('Best individual of each generation', fontweight='bold')
    ax.set_xlabel('Number of generation')
    ax.set_ylabel(r'Himmelblau function value $f(x,y)$')
    ax.legend(loc='upper right')
    plt.savefig('fitness_himmelblau.png', bbox_inches='tight')

    # Plot Himmelblau function
    grid_points = 100
    x_vector = np.linspace(x_min, x_max, grid_points)
    y_vector = np.linspace(y_min, y_max, grid_points)
    x_grid, y_grid = np.meshgrid(x_vector, y_vector)
    z_grid = np.zeros((grid_points, grid_points))
    for i in range(x_grid.shape[1]):
        for j in range(x_grid.shape[0]):
            z_grid[i, j] = himmelblau_function([x_grid[i, j], y_grid[i, j]])
    # Create figure
    fig, ax = plt.subplots(figsize=(16, 10))
    cs = ax.contour(x_grid, y_grid, z_grid, 50)
    # Plot best individuals of each generation
    ax.scatter(best_x, best_y, marker='x', color='r')
    # Prettify
    ax.set_xlim((x_min, x_max))
    ax.set_ylim((y_min, y_max))
    ax.set_title('Himmelblau function', fontweight='bold')
    ax.set_xlabel('X-coordinate')
    ax.set_ylabel('Y-coordinate')
    cbar = fig.colorbar(cs)
    cbar.ax.set_ylabel(r'Himmelblau function value $f(x,y)$')
    plt.savefig('contour_himmelblau.png', bbox_inches='tight')

    # Visualize only one minimum
    eps = 1E-3
    x_min, x_max = (3 - eps, 3 + eps)
    y_min, y_max = (2 - eps, 2 + eps)
    grid_points = 100
    x_vector = np.linspace(x_min, x_max, grid_points)
    y_vector = np.linspace(y_min, y_max, grid_points)
    x_grid, y_grid = np.meshgrid(x_vector, y_vector)
    z_grid = np.zeros((grid_points, grid_points))
    for i in range(x_grid.shape[1]):
        for j in range(x_grid.shape[0]):
            z_grid[i, j] = himmelblau_function([x_grid[i, j], y_grid[i, j]])
    fig, ax = plt.subplots(figsize=(16, 10))
    cs = ax.contour(x_grid, y_grid, z_grid, 50)
    # Plot best individuals of each generation
    ax.scatter(best_x,
               best_y,
               marker='x',
               color='r',
               label='Best individual of each generation')
    ax.scatter(pop.champion_x[0],
               pop.champion_x[1],
               marker='x',
               color='k',
               label='Champion')
    # Prettify
    ax.xaxis.set_major_formatter(mtick.FormatStrFormatter('%1.5f'))
    ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%1.5f'))
    plt.xticks(rotation=45)
    ax.set_xlim((x_min, x_max))
    ax.set_ylim((y_min, y_max))
    ax.set_title('Vicinity of (3,2)', fontweight='bold')
    ax.set_xlabel('X-coordinate')
    ax.set_ylabel('Y-coordinate')
    cbar = fig.colorbar(cs)
    cbar.ax.set_ylabel(r'Himmelblau function value $f(x,y)$')
    ax.legend(loc='lower right')
    ax.grid('major')
    plt.savefig('one_minimum_himmelblau.png', bbox_inches='tight')

    ##########################################################
    # GRID SEARCH ############################################
    ##########################################################

    # Set number of points
    number_of_nodes = 1000
    # Extract problem bounds
    (x_min, y_min), (x_max, y_max) = udp.get_bounds()
    x_vector = np.linspace(x_min, x_max, number_of_nodes)
    y_vector = np.linspace(y_min, y_max, number_of_nodes)
    x_grid, y_grid = np.meshgrid(x_vector, y_vector)
    z_grid = np.zeros((number_of_nodes, number_of_nodes))
    for i in range(x_grid.shape[1]):
        for j in range(x_grid.shape[0]):
            z_grid[i, j] = himmelblau_function([x_grid[i, j], y_grid[i, j]])
    best_f = np.min(z_grid)
    best_ind = np.argmin(z_grid)
    best_x = (x_grid.flatten()[best_ind], y_grid.flatten()[best_ind])
    print('\n########### RESULTS OF GRID SEARCH (' + str(number_of_nodes) +
          ' nodes per variable) ########### ')
    print(
        'Best fitness with grid search (' + str(number_of_nodes) + ' points):',
        best_f)
    print('Decision variable vector: ', best_x)
    print('Number of function evaluations: ', number_of_nodes**2)
    print('Difference wrt the minimum: ', best_x - np.array([3, 2]))
    del number_of_nodes

    ##########################################################
    # MONTE-CARLO SEARCH #####################################
    ##########################################################

    # Fix seed (for reproducibility)
    random.seed(current_seed)
    # Size of random number vector
    number_of_points = 1000
    x_vector = random.random(number_of_points)
    x_vector *= (x_max - x_min)
    x_vector += x_min
    y_vector = random.random(number_of_points)
    y_vector *= (y_max - y_min)
    y_vector += y_min
    x_grid, y_grid = np.meshgrid(x_vector, y_vector)
    z_grid = np.zeros((number_of_points, number_of_points))
    for i in range(x_grid.shape[1]):
        for j in range(x_grid.shape[0]):
            z_grid[i, j] = himmelblau_function([x_grid[i, j], y_grid[i, j]])
    best_f = np.min(z_grid)
    best_ind = np.argmin(z_grid)
    best_x = (x_grid.flatten()[best_ind], y_grid.flatten()[best_ind])
    print('\n########### RESULTS OF MONTE-CARLO SEARCH (' +
          str(number_of_points) + ' points per variable) ' + '########### ')
    print(
        'Best fitness with grid search (' + str(number_of_points) +
        ' points):', best_f)
    print('Decision variable vector: ', best_x)
    print('Number of function evaluations: ', number_of_points**2)
    print('Difference wrt the minimum: ', best_x - np.array([3, 2]))
    del number_of_points

    # Show plot
    plt.show()
示例#21
0
def chi_optimize(method='pso',
                 parallel=False,
                 N_ind=100,
                 N_gen=30,
                 iset=0,
                 show_results=False,
                 dir='../data/',
                 file_name='chi_fit'):
    # Load experimental data
    N_set = 2
    ca_data = svu.loaddata('../data/herzog_data.pkl')[0]
    po_data = svu.loaddata('../data/fit_po.pkl')[0]
    lr_data = svu.loaddata('../data/fit_lra.pkl')[0]

    # Get boundary dictionary
    bounds_dict = models.model_bounds(model='chi')
    # Save effective bounds and scaling
    bounds, scaling = bounds_for_exploration(bounds_dict, normalized=False)

    # Prepare model parameters
    c0 = 5.0
    # c0 = 15.0
    params = np.asarray(np.r_[po_data[[0, 1, 0, 2, 3]], lr_data[[0, 1]], c0,
                              iset])
    args = (params, ca_data)

    #-----------------------------------------------------------------------------------------------------------
    # Effective evolution
    #-----------------------------------------------------------------------------------------------------------
    # Size parameters
    # N_var = problem_dimension(model='lra')
    N_individuals = N_ind
    N_var = problem_dimension(model='chi')
    N_generations = N_gen * N_var

    # Tolerances
    FTOL = 1e-6
    XTOL = 1e-6

    # prob = pg.problem(fit_problem('lra',args))
    prob = pg.problem(fit_problem('chi', args))

    # Optimization
    # NOTE: We keep each algorithm self contained here because different setting of parameters for evolution are
    # needed depending on the chose algorithm
    if method == 'pso':
        algo = pg.algorithm(
            pg.pso(gen=N_generations, variant=5, neighb_type=4, max_vel=0.8))
    elif method == 'bee':
        algo = pg.algorithm(pg.bee_colony(gen=N_generations, limit=2))
    elif method == 'de':
        algo = pg.algorithm(pg.de(gen=N_generations, ftol=FTOL, tol=XTOL))
        # Single-node optimation to run on local machine / laptop
        # N_individuals = 100
        # N_generations = 40 * N_var
        # verbosity = 20

    if not parallel:
        verbosity = 20
        algo.set_verbosity(verbosity)
        pop = pg.population(prob, size=N_individuals)
        pop = algo.evolve(pop)

        best_fitness = pop.get_f()[pop.best_idx()]
        print(best_fitness)

        x_rescaled = rescale_vector(pop.champion_x,
                                    model='chi',
                                    normalized=True,
                                    bounds=bounds,
                                    scaling=scaling)

        # Show results of fit
        if show_results:
            astro = models.Astrocyte(
                model='chi',
                d1=po_data[0],
                d2=po_data[1],
                d3=po_data[0],
                d5=po_data[2],
                a2=po_data[3],
                c0=c0,
                c1=0.5,
                rl=0.1,
                Ker=0.1,
                rc=lr_data[0],
                ver=lr_data[1],
                vbeta=x_rescaled[0],
                vdelta=x_rescaled[1],
                v3k=x_rescaled[2],
                r5p=x_rescaled[3],
                ICs=np.asarray([x_rescaled[6], x_rescaled[4], x_rescaled[5]]))

            options = su.solver_opts(t0=ca_data['time'][iset][0],
                                     tfin=ca_data['time'][iset][-1],
                                     dt=1e-4,
                                     atol=1e-8,
                                     rtol=1e-6,
                                     method="gsl_msadams")
            astro.integrate(algparams=options, normalized=True)

            ca_trace = ca_data['smoothed'][iset]
            plt.plot(ca_data['time'][0], ca_trace, 'k-', astro.sol['ts'],
                     astro.sol['ca'], 'r-')
            plt.show()

    else:
        # Parallel version to run on cluster
        N_evolutions = 100
        N_islands = 10
        # Initiate optimization
        algo = pg.algorithm(
            pg.pso(gen=N_generations, variant=5, neighb_type=4, max_vel=0.8))
        archi = pg.archipelago(n=N_islands,
                               algo=algo,
                               prob=prob,
                               pop_size=N_individuals)
        archi.evolve(N_evolutions)
        archi.wait()
        imin = np.argmin(archi.get_champions_f())
        x_rescaled = rescale_vector(archi.get_champions_x()[imin],
                                    model='chi',
                                    normalized=True,
                                    bounds=bounds,
                                    scaling=scaling)
        print archi.get_champions_f()[imin]
        print x_rescaled

    svu.savedata([x_rescaled], dir + file_name + '.pkl')
示例#22
0
from rbsetup import RBRun

from numpy import array, save, savez, mean, std, nan_to_num, vstack

from tempfile import gettempdir
from os.path import join
import json

single_champions = []
N = 100
for i in range(N):
    with RBRun('luna', 11211) as run:
        print('Started run {} of {}'.format(i + 1, N))
        from rbsetup import make_problem
        algorithm = pg.de(gen=10)
        problem = make_problem()
        i = pg.island(algo=algorithm, prob=problem, size=20)

        rounds = 10
        c = []
        for x in range(rounds):
            i.evolve()
            i.wait()
            c.append(float(i.get_population().champion_f[0]))
            print('round {}'.format(x))
            print(i.get_population().champion_f)
            print(i.get_population().get_x()[0, :])
        single_champions.append(array(c))

single_champions_stack = vstack(single_champions)
示例#23
0
 def make_algorithm():
     return pg.de(gen=10)
示例#24
0
 bounds = {'S_LB':SPACE_L, 'S_UB':SPACE_U}
 obj = {'CLIENTS':clients, 'RADIUS':50.0, 'COSTS':1.0}
 area = 1000*1000
 mp = wap_problem(area,bounds,obj)
 prob = pg.problem(mp)
 print(prob)
 sol_eval = SolutionEvaluer(mp)
 #t = [np.random.rand() for i in range(400)]
 # f = prob.fitness(t)
 # print(str(f))
 # print(prob)
 gen = 10
 F = 0.7
 CR = 0.85
 seed = 7
 algo = pg.algorithm(pg.de(gen,F,CR))
 #algo = pg.algorithm(pg.moead(gen=250))
 #algo = pg.algorithm(pg.sga(gen = gen))
 algo.set_verbosity(50)
 # # #SINGOLA EVOLUZIONE
 # pop = pg.population(prob, size=30, seed=seed)
 # start = time.time()
 # pop = algo.evolve(pop)
 # stop = time.time() - start
 # best = pop.get_x()[pop.best_idx()]
 # print("Champion's Fitness: \t"+str(pop.champion_f)+"\t time: "+str(stop))
 # sol_eval.plot(best)
 # #MULTICORE
 archi = pg.archipelago(4, algo=algo, prob=prob, pop_size=30)
 archi.evolve(4)
 archi.wait()
            for (clf_name, lb, ub, fun, args, random_seed) in optimizers:
                print(clf_name, fun, random_seed)
                np.random.seed(random_seed)

                #                init=lhsu(lb,ub,pop_size) # latin hypercube sampling strategy
                #                res = de(func=fun, bounds=tuple(zip(lb,ub)), args=args, maxiter=max_iter,
                #                         init=init, seed=run, disp=True, polish=False,
                #                         strategy='best1bin', tol=1e-6)
                #
                #                xopt, fopt = res['x'], res['fun']
                #                sim = fun(xopt, *(X,y,'run',n_splits,random_seed))
                #                sim['ALGO'] = 'DE'

                #algo = pg.algorithm(pg.de1220(gen = max_iter, seed=random_seed))
                algo = pg.algorithm(
                    pg.de(gen=max_iter, variant=1, seed=random_seed))
                #algo = pg.algorithm(pg.pso(gen = max_iter, seed=random_seed))
                #algo = pg.algorithm(pg.ihs(gen = max_iter*pop_size, seed=random_seed))
                #algo = pg.algorithm(pg.gwo(gen = max_iter, seed=random_seed))
                #algo = pg.algorithm(pg.sea(gen = max_iter, seed=random_seed))
                #algo = pg.algorithm(pg.sade(gen = max_iter, seed=random_seed))
                #algo = pg.algorithm(pg.sga(gen = max_iter, m=0.10, crossover = "sbx", mutation = "gaussian", seed=random_seed))
                #algo = pg.algorithm(pg.cmaes(gen = max_iter, force_bounds = True, seed=random_seed))
                #algo = pg.algorithm(pg.xnes(gen = max_iter, memory=False, force_bounds = True, seed=random_seed))
                #algo = pg.algorithm(pg.simulated_annealing(Ts=100., Tf=1e-5, n_T_adj = 100, seed=random_seed))

                algo.set_verbosity(1)
                prob = pg.problem(evoML(args, fun, lb, ub))
                pop = pg.population(prob, pop_size, seed=random_seed)
                pop = algo.evolve(pop)
                xopt = pop.champion_x
示例#26
0
def run_DE_optmization_train_ml_methods(datasets, name_opt, \
                                        de_run0 = 0, de_runf = 1, de_pop_size=50, de_max_iter=50, \
                                        kf_n_splits=5, \
                                            save_basename='host_guest_ml___', save_test_size = '', save_file_erro_train = True):
    '''
    # lista com todos os possiveis algoritmos otmizadores para o DE
    list_opt_name = ['EN', 'XGB', 'DTC', 'VC', 'BAG', 'KNN', 'ANN', 'ELM', 'SVM', 'MLP', 'GB', 'KRR', 'CAT']
    
    de_pop_size: tamanho da populacao de individuos
    de_max_iter: quantidade maxima de iteracoes do DE 
    kf_n_splits: usado no K-fold 

    de_run0:
    de_runf:
    '''

    # save_path = './RESULTADOS/MACHINE_LEARNING/PKL/'

    try:
        os.mkdir('./RESULTADOS/MACHINE_LEARNING/')
    except:
        pass

    # try:
    #     os.mkdir(save_path)
    # except:
    #     pass

    for run in range(de_run0, de_runf):

        random_seed = run * 10 + 100
        np.random.seed(random_seed)

        for dataset in datasets:  #[:1]:

            # Definicao das variaveis associadas aos datasets
            target, y_ = dataset['target_names'], dataset['y_train']
            dataset_name, X_ = dataset['name'], dataset['X_train']
            n_samples, n_features = dataset['n_samples'], dataset['n_features']
            task = dataset['task']

            list_results_all = []

            print('=' * 80 + '\n' + dataset_name + ': ' + target + '\n' +
                  '=' * 80 + '\n')

            # defindo o target y conforme a task associada
            if task == 'classification':
                le = preprocessing.LabelEncoder()
                #le=preprocessing.LabelBinarizer()
                le.fit(y_)
                y = le.transform(y_)
            else:
                y = y_.copy()  #TODO: precisei pegar o indice 0 para funcionar

            X = X_.copy()
            ##scale_X = MinMaxScaler(feature_range=(0.15,0.85)).fit(X_)
            #scale_X = MinMaxScaler(feature_range=(0,1)).fit(X_)
            #X= scale_X.transform(X_)
            ##   scale_y = MinMaxScaler(feature_range=(0.15,0.85)).fit(y_)
            ##   X,y = scale_X.transform(X_), scale_y.transform(y_)

            args = (X, y, 'eval', kf_n_splits, random_seed)

            # lista das opcoes de algoritmos selecionados da lista acima

            optimizers = [(name, *get_parameters(name), args, random_seed)
                          for name in name_opt]

            for (clf_name, lb, ub, fun, args, random_seed) in optimizers:

                print()
                print(clf_name, '%test_size:', save_test_size)
                print()

                PATH = './RESULTADOS/MACHINE_LEARNING/' + str.upper(
                    clf_name) + '/'

                try:
                    os.mkdir(PATH)
                except:
                    pass

                try:
                    os.mkdir(PATH + 'CSV_ERROR_TRAIN/')
                except:
                    pass

                try:
                    os.mkdir(PATH + 'PKL/')
                except:
                    pass

                list_results = []

                #print(clf_name, random_seed)
                #print(clf_name, fun, random_seed)
                np.random.seed(random_seed)

                t0 = time.time()

                algo = pg.algorithm(
                    pg.de(gen=de_max_iter, variant=1, seed=random_seed))

                algo.set_verbosity(1)
                prob = pg.problem(evoML(args, fun, lb, ub))
                pop = pg.population(prob, de_pop_size, seed=random_seed)
                pop = algo.evolve(pop)

                xopt = pop.champion_x
                sim = fun(
                    xopt,
                    *(X.to_numpy(), y, 'run', kf_n_splits,
                      random_seed))  #TODO: verificar inserção do to_numpy()

                t1 = time.time()
                sim['ALGO'] = algo.get_name()

                sim['ACTIVE_VAR_NAMES'] = dataset['var_names'][
                    sim['ACTIVE_VAR']]
                if task == 'classification':
                    sim['Y_TRAIN_TRUE'] = le.inverse_transform(sim['Y_TRUE'])
                    sim['Y_TRAIN_PRED'] = le.inverse_transform(sim['Y_PRED'])
                else:  # TODO: pq isso mds?
                    sim['Y_TRAIN_TRUE'] = sim['Y_TRUE']
                    sim['Y_TRAIN_PRED'] = sim['Y_PRED']

                sim['RUN'] = run  #sim['Y_NAME']=yc
                sim['DATASET_NAME'] = dataset_name

                sim['time'] = t1 - t0

                pd.Series(sim['ERROR_TRAIN']).to_csv(
                    PATH + "CSV_ERROR_TRAIN/error_train_" + clf_name +
                    "_%test_size_" + save_test_size + ".csv",
                    header=False)

                # Erros no teste #TODO: precisei converter pra numpy pra dar certo. Conferir
                # erros no teste no evaluate?
                # mach = sim['ESTIMATOR']
                # y_p = mach.predict(dataset['X_test'].to_numpy())
                # y_t = dataset['y_test']

                # r  = RMSE(y_t, y_p)
                # r2 = MAPE(y_t, y_p)
                # r3 = RRMSE(y_t, y_p)

                # sim['ERROR_TEST'] = {'RMSE': r, 'MAPE': r2, 'RRMSE': r3}

                pk = (
                    PATH + 'PKL/' + save_basename + '_run_' +
                    str("{:02d}".format(run)) + '_' + dataset_name + '_' +
                    os.uname()[1] + '__' + str.lower(sim['EST_NAME']) + '__' +
                    target + '__%test_size_' + save_test_size +
                    #time.strftime("%Y_%m_%d_") + time.strftime("_%Hh_%Mm_%S")+
                    #'_loo'+
                    '.pkl')

                pk = pk[0].replace(' ', '_').replace("'", "")
                # pk=pk[0].replace(' ','_').replace("'","").lower() #TODO: precisei pegar o indice 0 para funcionar

                sim['name_pickle'] = pk

                list_results.append(sim)
                list_results_all.append(sim)

                # data = pd.DataFrame(list_results)
                # data.to_pickle(pk)

                pm = pk.replace('.pkl', '.dat')
                pickle.dump(sim['ESTIMATOR'], open(pm, "wb"))

    return list_results_all
示例#27
0
    def _setOptimizer(self, optimizer):

        if callable(optimizer) == True:

            # User-defined optimizer function

            pass

        elif isinstance(optimizer, str):

            # Pre-defined optimizer

            if optimizer == 'ga':

                gen = self.optimization_configuration['number_of_generations']

                cr = self.optimization_configuration['crossover_rate']

                mr = self.optimization_configuration['mutation_rate']

                elitism = self.optimization_configuration['elitism']

                ind = self.optimization_configuration['number_of_individuals']

                #=================================================================

                crossover_type = self.optimization_configuration[
                    'crossover_type']

                mutation_type = self.optimization_configuration[
                    'mutation_type']

                selection_type = self.optimization_configuration[
                    'selection_type']

                selection_param = self.optimization_configuration[
                    'selection_param']

                #=================================================================

                self._pagmo_selected_algorithm = pg.sga

                self._pagmo_selected_algorithm_log_columns = [
                    'Gen', 'Fevals', 'Best', 'Improvement'
                ]

                algo = pg.algorithm(
                    pg.sga(gen=gen,
                           cr=cr,
                           eta_c=1.,
                           m=mr,
                           param_m=1.,
                           param_s=selection_param,
                           crossover=crossover_type,
                           mutation=mutation_type,
                           selection=selection_type))

                #isl = pg.island(algo, self.optimization_problem, ind)

                self.optimization_mechanism = algo

                return ('ga')

            if optimizer == 'sade':

                gen = self.optimization_configuration['number_of_generations']

                de_ftol = self.optimization_configuration['ftol_de']

                de_xtol = self.optimization_configuration['xtol_de']

                ind = self.optimization_configuration['number_of_individuals']

                #==================================================================

                self._pagmo_selected_algorithm = pg.de1220

                self._pagmo_selected_algorithm_log_columns = [
                    'Gen', 'Fevals', 'Best', 'F', 'CR', 'Variant', 'dx', 'df'
                ]

                algo = pg.algorithm(
                    pg.de1220(gen=gen, ftol=de_ftol, xtol=de_xtol))

                #isl = pg.island(algo, self.optimization_problem, ind)

                self.optimization_mechanism = algo

                return ('sade')

            if optimizer == 'de':

                gen = self.optimization_configuration['number_of_generations']

                cr = self.optimization_configuration['crossover_rate']

                f_w = self.optimization_configuration['scale_factor']

                de_variant = self.optimization_configuration['variant_de']

                de_ftol = self.optimization_configuration['ftol_de']

                de_xtol = self.optimization_configuration['xtol_de']

                ind = self.optimization_configuration['number_of_individuals']

                #==================================================================

                self._pagmo_selected_algorithm = pg.de

                self._pagmo_selected_algorithm_log_columns = [
                    'Gen', 'Fevals', 'Best', 'F', 'CR', 'dx', 'df'
                ]

                algo = pg.algorithm(
                    pg.de(gen=gen,
                          F=f_w,
                          CR=cr,
                          variant=de_variant,
                          ftol=de_ftol,
                          xtol=de_xtol))

                #isl = pg.island(algo, self.optimization_problem, ind)

                self.optimization_mechanism = algo

                return ('de')

            if optimizer == 'pso':

                gen = self.optimization_configuration['number_of_generations']

                omega = self.optimization_configuration['omega_pso']

                eta1 = self.optimization_configuration['eta1_pso']

                eta2 = self.optimization_configuration['eta2_pso']

                vcoeff = self.optimization_configuration['max_v_pso']

                pso_variant = self.optimization_configuration['variant_pso']

                neighb_type = self.optimization_configuration[
                    'neighborhood_type_pso']

                neighb_param = self.optimization_configuration[
                    'neighborhood_param_pso']

                ind = self.optimization_configuration['number_of_individuals']

                #==================================================================

                self._pagmo_selected_algorithm = pg.pso

                self._pagmo_selected_algorithm_log_columns = [
                    'Gen', 'Fevals', 'gbest', 'Mean Vel.', 'Mean lbest',
                    'Avg. Dist.'
                ]

                algo = pg.algorithm(
                    pg.pso(gen=gen,
                           omega=omega,
                           eta1=eta1,
                           eta2=eta2,
                           max_vel=vcoeff,
                           variant=pso_variant))

                #isl = pg.island(algo, self.optimization_problem, ind)

                self.optimization_mechanism = algo

                return ('pso')

        else:

            raise UnexpectedValueError("(decorated function, str)")
示例#28
0
algo = pg.algorithm(pg.pso(gen=20))
pop = pg.population(prob, 50)
t0 = time()
pop = algo.evolve(pop)
t1 = time()
print('Optimization takes %f seconds' % (t1 - t0))
print(pop.champion_f)
print(pop.champion_x)

# PSO Mejorada
algo = pg.algorithm(pg.pso_gen(gen=50, memory=True, variant=6))
pop = pg.population(prob, 50)
t0 = time()
pop = algo.evolve(pop)
t1 = time()
print('Optimization takes %f seconds' % (t1 - t0))
print(pop.champion_f)
print(pop.champion_x)

# Estudio
algo = pg.algorithm(pg.de(gen=50))
pop = pg.population(prob, 50)
t0 = time()
pop = algo.evolve(pop)
t1 = time()
print('Optimization takes %f seconds' % (t1 - t0))
print(pop.champion_f)
print(pop.champion_x)

# Comparacion metodos:
# PSO se demora menos que el bee_colony y llega a resultados simulares
示例#29
0
import sampler_test_tool
import numpy as np
from scipy import optimize
import vprofile, vmodel
import pygmo as pg
# from pygmo import *

prob = pg.problem(sampler_test_tool.disp_func())
algo = pg.algorithm(pg.de(gen=1000))
algo.set_verbosity(10)
pop = pg.population(prob, 20)
pop = algo.evolve(pop)

# from pygmo import *
# algo = algorithm(de(gen = 500))
# algo.set_verbosity(100)
# prob = problem(rosenbrock(10))
# pop = population(prob, 20)
# pop = algo.evolve(pop)
示例#30
0
文件: fitter.py 项目: bek0s/gbkfit
 def _setup_algorithm(self, parameters):
     alg = pg.de(**self._alg_attrs)
     return alg