Exemple #1
0
def main():
    #numVars, numObjs, numConstr, lower, upper = bounds_re('RE21')
    #rep = re_problem('RE21', weight_bounds = Bounds([0, 10], [0.001, 100]) )
    #rep = re_problem('RE31', weight_bounds = Bounds([0.1, 0.0001, 0.1], [1, 0.001, 1]) )
    #rep = re_problem('RE24', weight_bounds = Bounds([0.1, 0.1], [1, 1]) )
    rep = re_problem('RE42',
                     weight_bounds=Bounds([0.2, 0.2, 0.2, 1000],
                                          [1, 1, 1, 1000]))
    minimize_plot(rep, de_cma(1000), '_decma', num_retries=320, exp=2.0)
Exemple #2
0
def messengerFullLoop():
    while True:
        problem = MessFull()
        logger().info(problem.name + ' de + cmaes c++')
        minimize(problem.fun,
                 bounds=problem.bounds,
                 num_retries=50000,
                 value_limit=12.0,
                 logger=logger(),
                 optimizer=de_cma(1500))
Exemple #3
0
def main():
    numRuns = 100
    min_evals = 1500
    _test_optimizer(de_cma(min_evals),
                    Gtoc1(),
                    num_retries=10000,
                    num=numRuns,
                    value_limit=-300000.0,
                    stop_val=-1581949)
    _test_optimizer(de_cma(min_evals),
                    Cassini1(),
                    num_retries=4000,
                    num=numRuns,
                    value_limit=20.0,
                    stop_val=4.93075)
    _test_optimizer(de_cma(min_evals),
                    Cassini2(),
                    num_retries=6000,
                    num=numRuns,
                    value_limit=20.0,
                    stop_val=8.38305)
    _test_optimizer(de_cma(min_evals),
                    Messenger(),
                    num_retries=8000,
                    num=numRuns,
                    value_limit=20.0,
                    stop_val=8.62995)
    _test_optimizer(de_cma(min_evals),
                    Rosetta(),
                    num_retries=4000,
                    num=numRuns,
                    value_limit=20.0,
                    stop_val=1.34335)
    _test_optimizer(de_cma(min_evals),
                    Sagas(),
                    num_retries=4000,
                    num=numRuns,
                    value_limit=100.0,
                    stop_val=18.188)
    _test_optimizer(de_cma(min_evals),
                    Tandem(5),
                    num_retries=20000,
                    num=numRuns,
                    value_limit=-300.0,
                    stop_val=-1500)
    _test_optimizer(de_cma(min_evals),
                    MessFull(),
                    num_retries=50000,
                    num=numRuns,
                    value_limit=12.0,
                    stop_val=1.960)
def test_multiretry(num_retries = 512, 
             keep = 0.7, optimizer = de_cma(1500), logger = logger(), repeat = 50):
    seqs = Tandem(0).seqs
    n = len(seqs)
    problems = [Tandem(i) for i in range(n)]
    ids = [str(seqs[i]) for i in range(n)]
    for _ in range(100):
        problem_stats = multiretry.minimize(problems, ids, num_retries, keep, optimizer, logger)
        ps = problem_stats[0]
        for _ in range(repeat):
            logger.info("problem " + ps.prob.name + ' ' + str(ps.id))
            ps.retry(optimizer)
Exemple #5
0
def test_de_cma_parallel_retry(problem):
    # parallel optimization retry using a DE -> CMA-ES sequence
    t0 = time.perf_counter()
    evals = 0
    for i in range(1000):
        ret = retry.minimize(problem.fun,
                             problem.bounds,
                             logger=logger(),
                             optimizer=de_cma(50000),
                             value_limit=10)

        evals += ret.nfev
        print("{0}: time = {1:.1f} fun = {2:.3f} nfev = {3}".format(
            i + 1, dtime(t0), ret.fun, evals))
    return ret
Exemple #6
0
def test_multiretry(num_retries=128,
                    keep=0.7,
                    optimizer=de_cma(1500),
                    logger=logger(),
                    repeat=50):
    problems = []
    ids = []
    for seq in sequences():
        problems.append(Cassini1multi(planets=seq))
        ids.append(str(seq))
    for _ in range(100):
        problem_stats = multiretry.minimize(problems, ids, num_retries, keep,
                                            optimizer, logger)
        ps = problem_stats[0]
        for _ in range(repeat):
            logger.info("problem " + ps.prob.name + ' ' + str(ps.id))
            ps.retry(optimizer)
Exemple #7
0
 def init(self, bounds, workers, value_limit, num_retries, popsize,
          min_evaluations, max_eval_fac, check_interval, capacity,
          stop_fittness, optimizer):
     if optimizer is None:
         optimizer = de_cma(self.min_evaluations, popsize, stop_fittness)
     if max_eval_fac is None:
         max_eval_fac = int(min(50, 1 + num_retries // check_interval))
     self.store = Store(bounds, max_eval_fac, check_interval, capacity,
                        None, num_retries)
     self.improved = mp.RawValue(ct.c_bool, False)
     self.bounds = bounds
     self.workers = mp.cpu_count() if workers is None else workers
     self.value_limit = value_limit
     self.num_retries = num_retries
     self.popsize = popsize
     self.min_evaluations = min_evaluations
     self.max_eval_fac = max_eval_fac
     self.check_interval = check_interval
     self.capacity = capacity
     self.stop_fittness = stop_fittness
     self.optimizer = optimizer
     self.procs = []
Exemple #8
0
def optimize():
    name = 'tsin3000.60'  # 60 trajectories to choose from
    # name = 'tsin3000.10' # 10 fixed trajectories
    transfers = pd.read_csv('data/' + name + '.xz',
                            sep=' ',
                            usecols=[1, 2, 3, 4, 5, 6, 7],
                            compression='xz',
                            names=[
                                'asteroid', 'station', 'trajectory', 'mass',
                                'dv', 'transfer_start', 'transfer_time'
                            ])
    # uncomment to write a clear text csv
    # transfers.to_csv('data/' + name + '.txt', sep=' ', header=False)

    global TRAJECTORY_NUM, ASTEROID_NUM  # adjust number of asteroids / trajectories
    TRAJECTORY_NUM = int(np.amax(transfers["trajectory"]) + 1)
    ASTEROID_NUM = int(np.amax(transfers["asteroid"]) + 1)

    # bounds for the objective function
    dim = 10 + 2 * STATION_NUM - 1
    lower_bound = np.zeros(dim)
    # lower_bound[10+STATION_NUM:dim] = 0.00001
    upper_bound = np.zeros(dim)
    lower_bound[:] = 0.0000001
    upper_bound[10:] = 0.9999999
    upper_bound[:10] = TRAJECTORY_NUM - 0.00001  # trajectory indices
    bounds = Bounds(lower_bound, upper_bound)

    # smart boundary management (SMB) with DE->CMA
    store = advretry.Store(fitness(transfers),
                           bounds,
                           num_retries=10000,
                           max_eval_fac=5.0,
                           logger=logger())
    advretry.retry(store, de_cma(10000).minimize)

    # smart boundary management (SMB) with CMA-ES
    # store = advretry.Store(fitness(transfers), bounds, num_retries=10000, max_eval_fac=5.0, logger=logger())
    # advretry.retry(store, Cma_cpp(10000).minimize)

    # BiteOpt algorithm multi threaded
    # store = retry.Store(fitness(transfers), bounds, logger=logger())
    # retry.retry(store, Bite_cpp(1000000, M=1).minimize, num_retries=3200)

    # CMA-ES multi threaded
    # store = retry.Store(fitness(transfers), bounds, logger=logger())
    # retry.retry(store, Cma_cpp(1000000).minimize, num_retries=3200)

    # scipy minimize algorithm multi threaded
    # store = retry.Store(fitness(transfers), bounds, logger=logger())
    # retry.retry(store, Minimize(1000000).minimize, num_retries=3200)

    # fcmaes differential evolution multi threaded
    # store = retry.Store(fitness(transfers), bounds, logger=logger())
    # retry.retry(store, De_cpp(1000000).minimize, num_retries=3200)

    # scipy differential evolution multi threaded
    # store = retry.Store(fitness(transfers), bounds, logger=logger())
    # retry.retry(store, Differential_evolution(1000000).minimize, num_retries=3200)

    # scipy dual annealing multi threaded
    # store = retry.Store(fitness(transfers), bounds, logger=logger())
    # retry.retry(store, Dual_annealing(1000000).minimize, num_retries=3200)

    # scipy differential evolution single threaded
    # store = retry.Store(fitness(transfers), bounds, logger=logger())
    # retry.retry(store, Differential_evolution(1000000).minimize, num_retries=320, workers=1)

    return store.get_xs(), store.get_ys()
Exemple #9
0
def minimize(
    fun,
    bounds=None,
    value_limit=math.inf,
    num_retries=1000,
    logger=None,
    workers=mp.cpu_count(),
    popsize=31,
    max_evaluations=50000,
    capacity=500,
    stop_fittness=None,
    optimizer=None,
):
    """Minimization of a scalar function of one or more variables using parallel 
     CMA-ES retry.
     
    Parameters
    ----------
    fun : callable
        The objective function to be minimized.
            ``fun(x, *args) -> float``
        where ``x`` is an 1-D array with shape (n,) and ``args``
        is a tuple of the fixed parameters needed to completely
        specify the function.
    bounds : sequence or `Bounds`, optional
        Bounds on variables. There are two ways to specify the bounds:
            1. Instance of the `scipy.Bounds` class.
            2. Sequence of ``(min, max)`` pairs for each element in `x`. None
               is used to specify no bound.
    value_limit : float, optional
        Upper limit for optimized function values to be stored. 
    num_retries : int, optional
        Number of optimization retries.    
    logger : logger, optional
        logger for log output of the retry mechanism. If None, logging
        is switched off. Default is a logger which logs both to stdout and
        appends to a file ``optimizer.log``.
    workers : int, optional
        number of parallel processes used. Default is mp.cpu_count()
    popsize = int, optional
        CMA-ES population size used for all CMA-ES runs. 
        Not used for differential evolution. 
        Ignored if parameter optimizer is defined. 
    max_evaluations : int, optional
        Forced termination of all optimization runs after ``max_evaluations`` 
        function evaluations. Only used if optimizer is undefined, otherwise
        this setting is defined in the optimizer. 
    capacity : int, optional
        capacity of the evaluation store.
    stop_fittness : float, optional 
         Limit for fitness value. optimization runs terminate if this value is reached. 
    optimizer : optimizer.Optimizer, optional
        optimizer to use. Default is a sequence of differential evolution and CMA-ES.
     
    Returns
    -------
    res : scipy.OptimizeResult
        The optimization result is represented as an ``OptimizeResult`` object.
        Important attributes are: ``x`` the solution array, 
        ``fun`` the best function value, ``nfev`` the number of function evaluations,
        ``success`` a Boolean flag indicating if the optimizer exited successfully. """

    if optimizer is None:
        optimizer = de_cma(max_evaluations, popsize, stop_fittness)
    store = Store(bounds, capacity=capacity, logger=logger)
    return retry(fun, store, optimizer.minimize, num_retries, value_limit,
                 workers)
Exemple #10
0
def main():
    test_optimizer(de_cma(1500), Cassini1minlp())
Exemple #11
0
# Copyright (c) Dietmar Wolz.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory.

import math
from fcmaes.astro import MessFull, Messenger, Gtoc1, Cassini1, Cassini2, Rosetta, Tandem, Sagas
from fcmaes.optimizer import logger, de_cma, da_cma, Cma_cpp, De_cpp, Da_cpp, Dual_annealing, Differential_evolution
from fcmaes.retry import minimize
            
problems = [Cassini1(), Cassini2(), Rosetta(), Tandem(5), Messenger(), Gtoc1(), MessFull(), Sagas()]

max_evals = 50000

algos = [de_cma(max_evals), da_cma(max_evals), Cma_cpp(max_evals), De_cpp(max_evals), 
         Da_cpp(max_evals), Dual_annealing(max_evals), Differential_evolution(max_evals)]
               
def test_all(num_retries = 10000, num = 10):
    for problem in problems:
        for algo in algos:
            _test_optimizer(algo, problem, num_retries, num) 
      
def _test_optimizer(opt, problem, num_retries = 32, num = 10):
    log = logger()
    log.info(problem.name + ' ' + opt.name)
    for _ in range(num):
        ret = minimize(problem.fun, problem.bounds, math.inf, num_retries, log, optimizer=opt)

def main():
    test_all()
Exemple #12
0
def main():
    test_optimizer(de_cma(1500), Tandem_minlp()) 
Exemple #13
0
def minimize(problems,
             ids=None,
             num_retries=min(256, 8 * mp.cpu_count()),
             keep=0.7,
             optimizer=de_cma(1500),
             logger=None,
             datafile=None):
    """Minimization of a list of optimization problems by first applying parallel retry
    to filter the best ones and then applying coordinated retry to evaluate these further. 
    Can replace mixed integer optimization if the integer variables are narrowly bound. 
    In this case all combinations of these integer values can be enumerated to generate a
    list of problem instances each representing one combination. See for instance 
    https://www.esa.int/gsp/ACT/projects/gtop/tandem where there is a problem instance for each
    planet sequence.
     
    Parameters
    ----------
    
    problems: list
        list of objects providing name, fun and bounds attributes like fcmaes.astro.Astrofun

    ids:  list, optional
        list of objects corresponding to the list of problems used in logging to identify the 
        problem variant currently logged. If None, the index of the problem 
        variant is used instead.
    
    num_retries:  int, optional
        number of coordinated retries applied in the problem filter for each problem 
        in each iteration.
 
    keep:  float, optional
        rate of the problems kept after each iteration. 100*(1 - keep) % will be deleted. 
                        
    optimizer: optimizer.Optimizer, optional
        optimizer to use for the problem filter.
        
    logger, optional
        logger for log output. If None, logging
        is switched off. Default is a logger which logs both to stdout and
        appends to a file.  
        
    datafile, optional
        file to persist / retrieve the internal state of the optimizations. 
     
    Returns
    -------
    dictionary( optimizer -> ret): scipy.OptimizeResult
        The optimization result is represented as an ``OptimizeResult`` object.
        Important attributes are: ``x`` the solution array, 
        ``fun`` the best function value, ``nfev`` the number of function evaluations,
        ``success`` a Boolean flag indicating if the optimizer exited successfully. """

    solver = multiretry(logger)
    n = len(problems)

    for i in range(n):
        id = str(i + 1) if ids is None else ids[i]
        solver.add(problem_stats(problems[i], id, i, num_retries, logger))

    if not datafile is None:
        solver.load(datafile)

    while solver.size() > 1:
        solver.retry(optimizer)
        to_remove = int(round((1.0 - keep) * solver.size()))
        if to_remove == 0 and keep < 1.0:
            to_remove = 1
        solver.remove_worst(to_remove)
        solver.dump()
        if not datafile is None:
            solver.save(datafile)

    idx = solver.values_all().argsort()
    return list(np.asarray(solver.all_stats)[idx])
Exemple #14
0
problems = [
    Cassini1(),
    Cassini2(),
    Rosetta(),
    Tandem(5),
    Messenger(),
    Gtoc1(),
    MessFull(),
    Sagas(),
    Cassini1minlp()
]

max_evals = 50000

algos = [
    de_cma(max_evals),
    de2_cma(max_evals),
    da_cma(max_evals),
    Cma_cpp(max_evals),
    De_cpp(max_evals),
    Hh_cpp(max_evals),
    Da_cpp(max_evals),
    Bite_cpp(max_evals),
    Csma_cpp(max_evals),
    Dual_annealing(max_evals),
    Differential_evolution(max_evals)
]


def test_all(num_retries=10000, num=1):
    for problem in problems:
Exemple #15
0
    de_minimize_plot(zdt1(20), '100k64')
    de_minimize_plot(schaffer(20), '100k64')
    de_minimize_plot(poloni(20), '100k64')
    de_minimize_plot(fonseca(20), '100k64')

    nsga_minimize_plot(zdt1(20), '100k64')
    nsga_minimize_plot(schaffer(20), '100k64')
    nsga_minimize_plot(poloni(20), '100k64')
    nsga_minimize_plot(fonseca(20), '100k64')

    minimize_plot(zdt1(20), Bite_cpp(M=16), '50k1k')
    minimize_plot(schaffer(20), Bite_cpp(M=16), '50k1k')
    minimize_plot(poloni(20), Bite_cpp(M=16), '50k1k', exp=1.0)
    minimize_plot(fonseca(20), Bite_cpp(M=16), '50k1k', exp=3.0)

    minimize_plot(zdt1(20), de_cma(), '50k1k')
    minimize_plot(schaffer(20), de_cma(), '50k1k')
    minimize_plot(poloni(20), de_cma(), '50k1k', exp=1.0)
    minimize_plot(fonseca(20), de_cma(), '50k1k', exp=3.0)

    minimize_plot(zdt1(20), random_search(), '50k1k')
    minimize_plot(schaffer(20), random_search(), '50k1k')
    minimize_plot(poloni(20), random_search(), '50k1k', exp=1.0)
    minimize_plot(fonseca(20), random_search(), '50k1k', exp=3.0)

    minimize_plot(cassini1_mo(),
                  de_cma(50000),
                  '50k4k',
                  num_retries=4096,
                  value_limits=[40, 7000])
    minimize_plot(cassini1_mo(),
Exemple #16
0
max_nodes = 100

problems = [
    Cassini1(),
    Cassini2(),
    Rosetta(),
    Tandem(5),
    Messenger(),
    Gtoc1(),
    MessFull(),
    Sagas(),
    Cassini1minlp()
]
algos = [
    de2_cma(min_evals),
    de_cma(min_evals),
    da_cma(min_evals),
    Cma_cpp(min_evals),
    De_cpp(min_evals),
    Hh_cpp(min_evals),
    Da_cpp(min_evals),
    Dual_annealing(min_evals),
    Differential_evolution(min_evals)
]


def messengerFullLoop(opt, num, max_time=1200, log=logger()):
    problem = MessFull()
    minimizers = None  # remote actors created by minimize will be reused
    log.info(problem.name + ' ' + opt.name)
    for i in range(num):
Exemple #17
0
def minimize(fun,
             bounds,
             value_limit=math.inf,
             num_retries=5000,
             logger=None,
             workers=mp.cpu_count(),
             popsize=31,
             min_evaluations=1500,
             max_eval_fac=None,
             check_interval=100,
             capacity=500,
             stop_fitness=-math.inf,
             optimizer=None,
             statistic_num=0,
             datafile=None):
    """Minimization of a scalar function of one or more variables using 
    smart parallel optimization retry.
     
    Parameters
    ----------
    fun : callable
        The objective function to be minimized.
            ``fun(x, *args) -> float``
        where ``x`` is an 1-D array with shape (n,) and ``args``
        is a tuple of the fixed parameters needed to completely
        specify the function.
    bounds : sequence or `Bounds`, optional
        Bounds on variables. There are two ways to specify the bounds:
            1. Instance of the `scipy.Bounds` class.
            2. Sequence of ``(min, max)`` pairs for each element in `x`. None
               is used to specify no bound.
    value_limit : float, optional
        Upper limit for optimized function values to be stored. 
        This limit needs to be carefully set to a value which is seldom
        found by optimization retry to keep the store free of bad runs.
        The crossover offspring of bad parents can
        cause the algorithm to get stuck at local minima.   
    num_retries : int, optional
        Number of optimization retries.    
    logger : logger, optional
        logger for log output of the retry mechanism. If None, logging
        is switched off. Default is a logger which logs both to stdout and
        appends to a file ``optimizer.log``.
    workers : int, optional
        number of parallel processes used. Default is mp.cpu_count()
    popsize = int, optional
        CMA-ES population size used for all CMA-ES runs. 
        Not used for differential evolution. 
        Ignored if parameter optimizer is defined. 
    min_evaluations : int, optional 
        Initial limit of the number of function evaluations. Only used if optimizer is undefined, 
        otherwise this setting is defined in the optimizer. 
    max_eval_fac : int, optional
        Final limit of the number of function evaluations = max_eval_fac*min_evaluations
    check_interval : int, optional
        After ``check_interval`` runs the store is sorted and the evaluation limit
        is incremented by ``evals_step_size``
    capacity : int, optional
        capacity of the evaluation store. Higher value means broader search.
    stop_fitness : float, optional 
         Limit for fitness value. optimization runs terminate if this value is reached. 
    optimizer : optimizer.Optimizer, optional
        optimizer to use. Default is a sequence of differential evolution and CMA-ES.
        Since advanced retry sets the initial step size it works best if CMA-ES is 
        used / in the sequence of optimizers. 
    datafile, optional
        file to persist / retrieve the internal state of the optimizations. 
    
    Returns
    -------
    res : scipy.OptimizeResult
        The optimization result is represented as an ``OptimizeResult`` object.
        Important attributes are: ``x`` the solution array, 
        ``fun`` the best function value, ``nfev`` the number of function evaluations,
        ``success`` a Boolean flag indicating if the optimizer exited successfully. """

    if optimizer is None:
        optimizer = de_cma(min_evaluations, popsize, stop_fitness)
    if max_eval_fac is None:
        max_eval_fac = int(min(50, 1 + num_retries // check_interval))
    store = Store(fun, bounds, max_eval_fac, check_interval, capacity, logger,
                  num_retries, statistic_num, datafile)
    if not datafile is None:
        try:
            store.load(datafile)
        except:
            pass
    return retry(store, optimizer.minimize, value_limit, workers, stop_fitness)
Exemple #18
0
def minimize(fun,             
             bounds,
             weight_bounds,
             ncon = 0,
             value_exp = 2.0,
             value_limits = None,
             num_retries = 1024,
             logger = None,
             workers = mp.cpu_count(),
             popsize = 31, 
             max_evaluations = 50000, 
             capacity = None,
             optimizer = None,
             statistic_num = 0,
             plot_name = None
              ):   
    """Minimization of a multi objective function of one or more variables using parallel 
     optimization retry.
     
    Parameters
    ----------
    fun : callable
        The objective function to be minimized.
            ``fun(x, *args) -> float``
        where ``x`` is an 1-D array with shape (n,) and ``args``
        is a tuple of the fixed parameters needed to completely
        specify the function.
    bounds : sequence or `Bounds`, optional
        Bounds on variables. There are two ways to specify the bounds:
            1. Instance of the `scipy.Bounds` class.
            2. Sequence of ``(min, max)`` pairs for each element in `x`. None
               is used to specify no bound.
    weight_bounds : `Bounds`, optional
        Bounds on objective weights.
    ncon : int, optional
        number of constraints
    value_exp : float, optional
        exponent applied to the objective values for the weighted sum. 
    value_limits : sequence of floats, optional
        Upper limit for optimized objective values to be stored. 
    num_retries : int, optional
        Number of optimization retries.    
    logger : logger, optional
        logger for log output of the retry mechanism. If None, logging
        is switched off. Default is a logger which logs both to stdout and
        appends to a file ``optimizer.log``.
    workers : int, optional
        number of parallel processes used. Default is mp.cpu_count()
    popsize = int, optional
        CMA-ES population size used for all CMA-ES runs. 
        Not used for differential evolution. 
        Ignored if parameter optimizer is defined. 
    max_evaluations : int, optional
        Forced termination of all optimization runs after ``max_evaluations`` 
        function evaluations. Only used if optimizer is undefined, otherwise
        this setting is defined in the optimizer. 
    capacity : int, optional
        capacity of the evaluation store.
    optimizer : optimizer.Optimizer, optional
        optimizer to use. Default is a sequence of differential evolution and CMA-ES.
    plot_name : plot_name, optional
        if defined the pareto front is plotted during the optimization to monitor progress
     
    Returns
    -------
    xs, ys: list of argument vectors and corresponding value vectors of the optimization results. """

    if optimizer is None:
        optimizer = de_cma(max_evaluations, popsize)  
    if capacity is None: 
        capacity = num_retries
    store = retry.Store(fun, bounds, capacity = capacity, logger = logger, 
                        statistic_num = statistic_num, plot_name = plot_name)
    xs = np.array(mo_retry(fun, weight_bounds, ncon, value_exp, 
                           store, optimizer.minimize, num_retries, value_limits, workers))
    ys = np.array([fun(x) for x in xs])
    return xs, ys