Beispiel #1
0
def test_multiretry(num_retries = min(256, 8*mp.cpu_count()), 
             keep = 0.7, optimizer = de2_cma(1500), logger = logger(), repeat = 10):
    seqs = Tandem(0).seqs
    n = len(seqs)
    problems = [Tandem(i) for i in range(n)]
    ids = [str(seqs[i]) for i in range(n)]
    t0 = time.perf_counter()
    for _ in range(repeat):
        # check all variants
        problem_stats = multiretry.minimize(problems, ids, num_retries, keep, optimizer, logger)
        ps = problem_stats[0]
        
#         for _ in range(10):
#             # improve the best variant using only one node
#             fval = ray.get(ps.retry.remote(optimizer))
#             logger.info("improve best variant " + ray.get(ps.name.remote()) 
#                         + ' ' + str(ray.get(ps.id.remote()))
#                         + ' ' + str(ray.get(ps.value.remote())) 
#                         + ' time = ' + str(dtime(t0)))
#             if fval < -1490:
#                 break           
            
        # optimize best variant starting from scratch using all nodes
        logger.info("improve best variant " + ray.get(ps.name.remote()) 
                    + ' ' + str(ray.get(ps.id.remote()))
                    + ' ' + str(ray.get(ps.value.remote())) 
                    + ' time = ' + str(dtime(t0)))
        problem = problems[ray.get(ps.index.remote())]
        _rayoptimizer(optimizer, problem, 1, max_time = 1200, log = logger)
Beispiel #2
0
def main():
    # do 'pip install fcmaesray'
    # see https://docs.ray.io/en/master/cluster/index.html
    # call 'ray start --head --num-cpus=1' on the head node and
    # the ip-adress logged needs to be replaced in the following commands executed at the worker nodes:
    # 'ray start --address=192.168.0.67:6379 --num-cpus=1'
    # adapt ip-adress also in the following ray.init command
    ray.init(address="192.168.0.67:6379")  #, include_webui=True)
    #ray.init() # for single node tests
    #test_all() # test all problems
    messengerFullLoop(de2_cma(min_evals), 1000)  # test messenger full
Beispiel #3
0
min_evals = 1500
max_nodes = 100

problems = [
    Cassini1(),
    Cassini2(),
    Rosetta(),
    Tandem(5),
    Messenger(),
    Gtoc1(),
    MessFull(),
    Sagas(),
    Cassini1minlp()
]
algos = [
    de2_cma(min_evals),
    de_cma(min_evals),
    da_cma(min_evals),
    Cma_cpp(min_evals),
    De_cpp(min_evals),
    Hh_cpp(min_evals),
    Da_cpp(min_evals),
    Dual_annealing(min_evals),
    Differential_evolution(min_evals)
]


def messengerFullLoop(opt, num, max_time=1200, log=logger()):
    problem = MessFull()
    minimizers = None  # remote actors created by minimize will be reused
    log.info(problem.name + ' ' + opt.name)
Beispiel #4
0
def minimize(problems, ids=None, num_retries = min(256, 8*mp.cpu_count()), 
             keep = 0.7, optimizer = de2_cma(1500), logger = logger()):
      
    """Minimization of a list of optimization problems by first applying parallel retry
    to filter the best ones and then applying coordinated retry to evaluate these further. 
    Can replace mixed integer optimization if the integer variables are narrowly bound. 
    In this case all combinations of these integer values can be enumerated to generate a
    list of problem instances each representing one combination. See for instance 
    https://www.esa.int/gsp/ACT/projects/gtop/tandem where there is a problem instance for each
    planet sequence.
     
    Parameters
    ----------
    
    problems: list
        list of objects providing name, fun and bounds attributes like fcmaes.astro.Astrofun

    ids:  list, optional
        list of objects corresponding to the list of problems used in logging to identify the 
        problem variant currently logged. If None, the index of the problem 
        variant is used instead.
    
    num_retries:  int, optional
        number of coordinated retries applied in the problem filter for each problem 
        in each iteration.
 
    keep:  float, optional
        rate of the problems kept after each iteration. 100*(1 - keep) % will be deleted. 
                        
    optimizer: optimizer.Optimizer, optional
        optimizer to use for the problem filter.
        
    logger, optional
        logger for log output. If None, logging
        is switched off. Default is a logger which logs both to stdout and
        appends to a file ``optimizer.log``.    
     
    Returns
    -------
    dictionary( optimizer -> ret): scipy.OptimizeResult
        The optimization result is represented as an ``OptimizeResult`` object.
        Important attributes are: ``x`` the solution array, 
        ``fun`` the best function value, ``nfev`` the number of function evaluations,
        ``success`` a Boolean flag indicating if the optimizer exited successfully. """

    solver = multiretry()
    n = len(problems)
        
    for i in range(n):    
        id = str(i+1) if ids is None else ids[i]   
        rps = problem_stats.remote(problems[i], id, i, num_retries, logger)
        solver.add(problems[i], rps)
    
    iter = 1    
    while solver.size() > 1:    
        solver.retry(optimizer)
        to_remove = int(round((1.0 - keep) * solver.size()))
        if to_remove == 0:
            to_remove = 1
        solver.remove_worst(to_remove)
        solver.dump_all(iter)
        iter += 1
    return solver.all_stats
Beispiel #5
0
# Examples for fcmaes parallel retry from https://www.esa.int/gsp/ACT/projects/gtop/
# Used to generate the results in https://github.com/dietmarwo/fast-cma-es/blob/master/Results.adoc

import math
from fcmaes.astro import MessFull, Messenger, Gtoc1, Cassini1, Cassini2, Rosetta, Tandem, Sagas, Cassini1minlp
from fcmaes.optimizer import logger, De_python, De_ask_tell, de_cma, de2_cma, da_cma, Cma_cpp, De_cpp, Da_cpp, Csma_cpp, Bite_cpp, Dual_annealing, Differential_evolution
from fcmaes import retry

import numpy as np
from scipy.optimize import Bounds

problems = [Cassini1(), Cassini2(), Rosetta(), Tandem(5), Messenger(), Gtoc1(), MessFull(), Sagas(), Cassini1minlp()]

max_evals = 50000

algos = [ de_cma(max_evals), de2_cma(max_evals), da_cma(max_evals), Cma_cpp(max_evals), De_cpp(max_evals),
         Da_cpp(max_evals), Bite_cpp(max_evals), Csma_cpp(max_evals), Dual_annealing(max_evals), Differential_evolution(max_evals)]
               
def test_all(num_retries = 10000, num = 1):
    for problem in problems:
        for algo in algos:
            _test_optimizer(algo, problem, num_retries, num) 
      
def _test_optimizer(opt, problem, num_retries = 32, num = 1):
    log = logger()
    log.info(problem.name + ' ' + opt.name)
    for i in range(num):
        name = str(i+1) + ' ' + problem.name if num > 1 else problem.name
        retry.minimize_plot(name, opt, problem.fun, problem.bounds, 
                            math.inf, 10.0, num_retries, logger=log)
Beispiel #6
0
    Cassini1(),
    Cassini2(),
    Rosetta(),
    Tandem(5),
    Messenger(),
    Gtoc1(),
    MessFull(),
    Sagas(),
    Cassini1minlp()
]

max_evals = 50000

algos = [
    de_cma(max_evals),
    de2_cma(max_evals),
    da_cma(max_evals),
    Cma_cpp(max_evals),
    De_cpp(max_evals),
    Hh_cpp(max_evals),
    Da_cpp(max_evals),
    Bite_cpp(max_evals),
    Csma_cpp(max_evals),
    Dual_annealing(max_evals),
    Differential_evolution(max_evals)
]


def test_all(num_retries=10000, num=1):
    for problem in problems:
        for algo in algos: