示例#1
0
def main(filename, directory):
    initialFilename = os.path.join(directory, 'initial.json')
    genDir = os.path.join(directory, 'gens')
    finalFilename = os.path.join(directory, 'final.json')
    baseCondor = 'base.condor'

    with open(baseCondor, 'r') as f:
        condorConfig = f.read()

    optMkdir(directory)
    optMkdir(genDir)
    shutil.copyfile(filename, initialFilename)
    with open(filename, 'r') as f:
        conf = json.load(f)
    paramNames = []
    paramVals = []
    flattenParams(conf, paramNames, paramVals, '')
    sigma0 = SIGMA0
    opts = Options()
    opts['popsize'] = POP_SIZE
    #opts.printme()
    cma = CMAEvolutionStrategy(paramVals, sigma0, opts)
    while (cma.countiter < NUM_GENS) and not (cma.stop()):
        thisGenDir = os.path.join(genDir, str(cma.countiter))
        optMkdir(thisGenDir)
        xs = cma.ask()
        xs, fits = runEvals(paramNames, xs, cma.countiter, thisGenDir,
                            condorConfig)
        cma.tell(xs, fits)
    res = cma.result()
    paramsToJsonFile(finalFilename, paramNames, res[0])
def main(filename, directory):
    initialFilename = os.path.join(directory, "initial.json")
    genDir = os.path.join(directory, "gens")
    finalFilename = os.path.join(directory, "final.json")
    baseCondor = "base.condor"

    with open(baseCondor, "r") as f:
        condorConfig = f.read()

    optMkdir(directory)
    optMkdir(genDir)
    shutil.copyfile(filename, initialFilename)
    with open(filename, "r") as f:
        conf = json.load(f)
    paramNames = []
    paramVals = []
    flattenParams(conf, paramNames, paramVals, "")
    sigma0 = SIGMA0
    opts = Options()
    opts["popsize"] = POP_SIZE
    # opts.printme()
    cma = CMAEvolutionStrategy(paramVals, sigma0, opts)
    while (cma.countiter < NUM_GENS) and not (cma.stop()):
        thisGenDir = os.path.join(genDir, str(cma.countiter))
        optMkdir(thisGenDir)
        xs = cma.ask()
        xs, fits = runEvals(paramNames, xs, cma.countiter, thisGenDir, condorConfig)
        cma.tell(xs, fits)
    res = cma.result()
    paramsToJsonFile(finalFilename, paramNames, res[0])
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        y = problem.evaluate_config(config)
        return y

    from cma import CMAEvolutionStrategy
    from litebo.utils.util_funcs import get_types
    from litebo.utils.config_space import Configuration

    types, bounds = get_types(cs)
    assert all(types == 0)

    # Check Constant Hyperparameter
    const_idx = list()
    for i, bound in enumerate(bounds):
        if np.isnan(bound[1]):
            const_idx.append(i)

    hp_num = len(bounds) - len(const_idx)
    es = CMAEvolutionStrategy(hp_num * [0], 0.99, inopts={'bounds': [0, 1], 'seed': seed})

    global_start_time = time.time()
    global_trial_counter = 0
    config_list = []
    perf_list = []
    time_list = []
    eval_num = 0
    while eval_num < max_runs:
        X = es.ask(number=es.popsize)
        _X = X.copy()
        for i in range(len(_X)):
            for index in const_idx:
                _X[i] = np.insert(_X[i], index, 0)  # np.insert returns a copy
        # _X = np.asarray(_X)
        values = []
        for xi in _X:
            # convert array to Configuration
            config = Configuration(cs, vector=xi)
            perf = objective_function(config)
            global_time = time.time() - global_start_time
            global_trial_counter += 1
            values.append(perf)
            print('=== CMAES Trial %d: %s perf=%f global_time=%f' % (global_trial_counter, config, perf, global_time))

            config_list.append(config)
            perf_list.append(perf)
            time_list.append(global_time)
        values = np.reshape(values, (-1,))
        es.tell(X, values)
        eval_num += es.popsize

    print('===== Total evaluation times=%d. Truncate to max_runs=%d.' % (eval_num, max_runs))
    config_list = config_list[:max_runs]
    perf_list = perf_list[:max_runs]
    time_list = time_list[:max_runs]
    return config_list, perf_list, time_list
def evolve_greedy_policies(model_dist: ModelDist,
                           iterations: int = 30,
                           population_size: int = 5):
    """
    Evolves the greedy policy to find the best policies

    :param model_dist: Model distribution
    :param iterations: Number of evolutions
    :param population_size: The population size
    """
    print(f'Evolves the greedy policies for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')

    eval_tasks, eval_servers = model_dist.generate_oneshot()
    lower_bound = greedy_algorithm(eval_tasks, eval_servers, ValuePriority(),
                                   ProductResources(),
                                   SumSpeed()).social_welfare
    print(f'Lower bound is {lower_bound}')
    reset_model(eval_tasks, eval_servers)

    evolution_strategy = CMAEvolutionStrategy(
        11 * [1], 0.2, {'population size': population_size})
    for iteration in range(iterations):
        suggestions = evolution_strategy.ask()
        tasks, servers = model_dist.generate_oneshot()

        solutions = []
        for i, suggestion in enumerate(suggestions):
            solutions.append(
                greedy_algorithm(
                    tasks, servers,
                    TaskPriorityEvoStrategy(i, *suggestion[:5]),
                    ServerSelectionEvoStrategy(i, *suggestion[5:8]),
                    ResourceAllocationEvoStrategy(
                        i, *suggestion[8:11])).social_welfare)
            reset_model(tasks, servers)

        evolution_strategy.tell(suggestions, solutions)
        evolution_strategy.disp()

        if iteration % 2 == 0:
            evaluation = greedy_algorithm(
                eval_tasks, eval_servers,
                TaskPriorityEvoStrategy(0, *suggestions[0][:5]),
                ServerSelectionEvoStrategy(0, *suggestions[0][5:8]),
                ResourceAllocationEvoStrategy(0, *suggestions[0][8:11]))
            print(f'Iter: {iteration} - {evaluation.social_welfare}')

    pprint.pprint(evolution_strategy.result())
示例#5
0
def main():
    # param length
    # 10 fights
    # rating_mu, rating_sig, wins, losses, odds
    input_params_len = 10 * 2 * 4
    es = CMAEvolutionStrategy([0] * input_params_len, 0.5)

    while not es.stop():
        solutions = es.ask()
        func_vals = [get_betting_result(x) for x in solutions]
        es.tell(solutions, func_vals)
        es.logger.add()  # write data to disc to be plotted
        es.disp()

    es.result_pretty()
示例#6
0
class CMAES(Algorithm):
    def initialize(self, **kwargs):
        super().initialize(**kwargs)
        if self.x0 is None:
            self.x0 = self.domain.l + self.domain.range / 2

        # cma operates on normalized scale
        x0 = self.domain.normalize(self.x0)
        self.cma = CMAEvolutionStrategy(x0=x0,
                                        sigma0=self.config.sigma0,
                                        inopts={'bounds': [0, 1]})
        self._X = None
        self._X_i = 0
        self._Y = None

    def _next(self, context=None):
        if self._X is None:
            # get new population
            self._X = self.cma.ask()
            self._Y = np.empty(len(self._X))
            self._X_i = 0

        return self.domain.denormalize(self._X[self._X_i])

    def finalize(self):
        self.cma.result_pretty()

    def best_predicted(self):
        xbest = None
        if self.cma.result.xbest is not None:
            xbest = self.domain.denormalize(self.cma.result.xbest)

        return xbest if not xbest is None else self.x0

    def add_data(self, data):
        self._Y[self._X_i] = data['y']
        self._X_i += 1

        # population complete
        if self._X_i == len(self._X):
            self.cma.tell(self._X, -self._Y)
            self._X = None

        super().add_data(data)
示例#7
0
class CMAES:
    def __init__(self, x0, s0=0.5, opts={}):
        """ wrapper around cma.CMAEvolutionStrategy """
        self.num_parameters = len(x0)
        print('{} params in controller'.format(self.num_parameters))
        self.solver = CMAEvolutionStrategy(x0, s0, opts)

    def __repr__(self):
        return '<pycma wrapper>'

    def ask(self):
        """ sample parameters """
        samples = self.solver.ask()
        return np.array(samples).reshape(-1, self.num_parameters)

    def tell(self, samples, fitness):
        """ update parameters with total episode reward """
        return self.solver.tell(samples, -1 * fitness)

    @property
    def mean(self):
        return self.solver.mean
示例#8
0
    def update_borders(self):
        for i in range(len(self.clusters)):
            cluster = self.clusters[i]
            len_bounds = np.linalg.norm(cluster.border[1] - cluster.border[1])
            es = CMAEvolutionStrategy(
                cluster.border.ravel().tolist(), len_bounds * 0.1, {
                    'bounds':
                    [self.boundary.min_bounds[0], self.boundary.max_bounds[0]]
                })
            while not es.stop():
                solutions = es.ask()
                #TODO
                es.tell(
                    solutions,
                    [self.evaluate_border(border, i) for border in solutions])
                #es.tell( solutions, [cluster.evaluate_border(border) for border in solutions] )

                x_best = es.result()[0]
                #if x_best is not None and cluster.in_global_border( x_best ):
                if x_best is not None:
                    cluster.border = x_best.reshape(cluster.border.shape)
            '''
示例#9
0
def run():
    train = 0
    
    names = [
        # 'bet_pred_a', 'bet_pred_b', 'bet_odds_a', 'bet_odds_b', 'bet_wnl_a', 'bet_wnl_b',
        'bet_ts_a', 'bet_ts_b', 'bet_tmi_a', 'bet_tmi_b', 'bet_tma_a', 'bet_tma_b',
    ]
    params = [
        0, 0, 0, 0, 0, 0
    ]
    bounds = [[-np.inf],
              [np.inf]]
    assert len(params) == len(names)
    # assert len(params) == len(bounds[0])

    if train:
        sigma = 1
        opts = CMAOptions()
        # opts['tolx'] = 1E-2
        opts['bounds'] = bounds
        es = CMAEvolutionStrategy(params, sigma, inopts=opts)
        while not es.stop():
            solutions = es.ask()
            fitness = [main(x, train=1) for x in solutions]
            es.tell(solutions, fitness)
            es.disp()
            print(list(es.result[0]))
            print(list(es.result[5]))
        es.result_pretty()
        print('')
        print('best')
        print(list(es.result[0]))
        print('')
        print('xfavorite: distribution mean in "phenotype" space, to be considered as current best estimate of the optimum')
        print(list(es.result[5]))

    else:
        main(params)
示例#10
0
def cma_minimize(acq_function,
                 bounds,
                 return_best_only=True,
                 **kwargs) -> Tuple[torch.Tensor, ...]:
    x0 = 0.5 * np.ones(bounds.shape[-1])
    opts = {
        'bounds': [0, 1],
        "popsize": kwargs.get('popsize', 100),
        "seed": 10,
        "verbose": -1
    }
    if "maxiter" in kwargs:
        opts.update(maxiter=kwargs["maxiter"])
    es = CMAEvolutionStrategy(x0=x0,
                              sigma0=kwargs.get('sigma0', 0.5),
                              inopts=opts)

    xs_list, y_list = [], []
    with torch.no_grad():
        while not es.stop():
            xs = es.ask()
            X = torch.tensor(xs, dtype=torch.float64)
            Y = acq_function(X.unsqueeze(-2))
            y = Y.view(-1).double().numpy()
            es.tell(xs, y)
            xs_list.append(xs)
            y_list.append(y)

        if return_best_only:
            cand = torch.tensor([es.best.x])
            cand_val = torch.tensor([es.best.f])
        else:
            cand = torch.tensor(np.concatenate(xs_list, axis=0))
            cand_val = torch.tensor(np.concatenate(y_list, axis=0))

    return cand, cand_val
示例#11
0
class CMAOptimizationSteppable(SteppableBasePy):
    def __init__(self, _simulator, _frequency=1):
        SteppableBasePy.__init__(self, _simulator, _frequency)

        self.optim = None
        self.sim_length_mcs = self.simulator.getNumSteps() - 1
        self.f_vec = []
        self.X_vec = []
        self.X_vec_check = []
        self.num_fcn_evals = -1

    def minimized_fcn(self, *args, **kwds):
        """
        this function needs to be overloaded in the subclass - it implements simulation fitness metric
        :return {float}: number describing the "fitness" of the simulation
        """
        return 0.0

    def initial_condition_fcn(self, *args, **kwds):
        """
        This function prepares initial condition for the simulaiton. Typically it creates cell field and initializes
        all cell and field properties
        :param args: first argument is a vector of parameters that are being optimized. The rest are up to the user
        :param kwds: key words arguments - those are are up to the user
        :return: None
        """
        pass

    def init_optimization_strategy(self, *args, **kwds):
        """
        init_optimization_strategy initializes optimizer object. Its argument depend on the specific initializer used
        IN the case of the CMA optimizer the options are described here: https://pypi.python.org/pypi/cma
        :param args: see https://pypi.python.org/pypi/cma
        :param kwds: see https://pypi.python.org/pypi/cma
        :return: None
        """

        self.optim = CMAEvolutionStrategy(*args, **kwds)

    def optimization_step(self, mcs):
        """
        THis function implements houklsekeeping associated with running optimization algorithm in a steppable
        :param mcs {int}: current mcs
        :return: None
        """

        if not mcs % self.sim_length_mcs:

            if self.optim.stop():
                self.stopSimulation()
                print('termination by', self.optim.stop())
                print('best f-value =', self.optim.result()[1])
                print('best solution =', self.optim.result()[0])

            if not len(self.X_vec):
                self.X_vec = self.optim.ask()

                if len(self.f_vec):
                    # print 'self.X_vec_check=', self.X_vec_check
                    # print 'self.f_vec=', self.f_vec
                    self.optim.tell(
                        self.X_vec_check,
                        self.f_vec)  # do all the real "update" work

                    self.optim.disp(20)  # display info every 20th iteration
                    self.optim.logger.add()  # log another "data line"

                    self.f_vec = []

                    self.num_fcn_evals = len(self.X_vec)

                self.X_vec_check = deepcopy(self.X_vec)

            self.X_current = self.X_vec[0]

            if len(self.X_vec_check) != self.num_fcn_evals:
                fcn_target = self.minimized_fcn()
                self.f_vec.append(fcn_target)
                self.X_vec.pop(0)

            self.num_fcn_evals -= 1

            CompuCellSetup.reset_current_step(0)
            self.simulator.setStep(0)
            self.clean_cell_field(reset_inventory=True)
            self.initial_condition_fcn(self.X_current)
示例#12
0
        if opt.vdisplay:
            from xvfbwrapper import Xvfb
            xvfb = Xvfb()
            xvfb.start()

        if opt.multiproc:
            ray.init()

        #TODO:revisit sigma
        es = CMAEvolutionStrategy(torch.zeros(n_features), sigma0=5.0)

        i = 0
        n_generation = 10000
        while not es.stop() and i < n_generation:
            start_time = time.time()
            solutions = es.ask()

            #calculate fitness
            objectives = []
            bcs = []

            if opt.multiproc:
                futures = [
                    multi_fit_func.remote(torch.from_numpy(solution).float(),
                                          opt.device,
                                          generators,
                                          opt.num_layer,
                                          rand_network,
                                          reals,
                                          noise_amplitudes,
                                          opt,
示例#13
0
    def run_optimization(self):

        """
        Runs optimization job
        :return:
        """
        simulation_name = self.parse_args.input
        population_size = self.parse_args.population_size

        self.optim_param_mgr = OptimizationParameterManager()
        optim_param_mgr = self.optim_param_mgr

        # optim_param_mgr.parse(args.params_file)
        optim_param_mgr.parse(self.parse_args.params_file)

        starting_params = optim_param_mgr.get_starting_points()
        print 'starting_params (mapped to [0,1])=', starting_params
        print 'remapped (true) starting params=', optim_param_mgr.params_from_0_1(starting_params)
        print 'dictionary of remapped parameters labeled by parameter name=', optim_param_mgr.param_from_0_1_dict(
            starting_params)

        print 'simulation_name=', simulation_name
        self.workload_dict = self.prepare_optimization_run(simulation_name=simulation_name)
        workload_dict = self.workload_dict

        print workload_dict

        std_dev = optim_param_mgr.std_dev
        default_bounds = optim_param_mgr.default_bounds

        optim = CMAEvolutionStrategy(starting_params, std_dev, {'bounds': list(default_bounds)})

        while not optim.stop():  # iterate
            # get candidate solutions
            # param_set_list = optim.ask(number=self.num_workers)
            # param_set_list = optim.ask(number=1)
            param_set_list = optim.ask(number=population_size)

            # set param_set_list for run_task to iterate over
            self.set_param_set_list(param_set_list=param_set_list)

            # #debug
            # return_result_vec = [self.fcn(optim_param_mgr.params_from_0_1(X)) for X in param_set_list]

            # evaluate  targert function values at the candidate solutions
            return_result_vec = np.array([], dtype=float)
            for param_set in self.param_generator(self.num_workers):
                print 'CURRENT PARAM SET=', param_set
                # distribution param_set to workers - run tasks spawns appropriate number of workers
                # given self.num_workers and the size of the param_set
                partial_return_result_vec = self.run_task(workload_dict, param_set)

                return_result_vec = np.append(return_result_vec, partial_return_result_vec)

                print 'FINISHED PARAM_SET=', param_set


            optim.tell(param_set_list, return_result_vec)  # do all the real "update" work
            optim.disp(20)  # display info every 20th iteration
            optim.logger.add()  # log another "data line"

        optimal_parameters = optim.result()[0]

        print('termination by', optim.stop())
        print('best f-value =', optim.result()[1])
        optimal_parameters_remapped = optim_param_mgr.params_from_0_1(optim.result()[0])
        print('best solution =', optimal_parameters_remapped)

        # print('best solution =', optim_param_mgr.params_from_0_1(optim.result()[0]))

        print optim_param_mgr.params_names

        self.save_optimal_parameters(optimal_parameters)
        self.save_optimal_simulation(optimal_parameters)
if not encoder is None:
    x_mean = encoder.predict(img[np.newaxis, ...])
fitness_func = Fitness(img, decoder)
best_img = None
best_z = None
best_score = -1
for i in range(args.runs):
    print('Runs: %d / %d' % (i + 1, args.runs))
    if encoder is None:
        init = np.random.randn(decoder.input_shape[-1]) * args.std
    else:
        init = x_mean[0]
    es = ES(init, args.sigma)
    for ite in range(args.iterations):
        dnas = np.asarray(es.ask())
        es.tell(dnas, fitness_func(dnas))
        es.disp()
    es.result_pretty()
    z = np.asarray(es.result[0])
    img_reconstruct = decoder.predict(z[np.newaxis, ...])[0]
    mse = np.mean(np.square(img_reconstruct - img))
    print('mse: {:.2f}'.format(mse))
    if mse > best_score:
        best_score = mse
        best_z = z
        best_img = img_reconstruct
    output_img = np.round(
        np.concatenate(
            (np.squeeze(img), np.squeeze(img_reconstruct)), axis=1) * 127.5 +
        127.5).astype(np.uint8)
class CMAOptimizationSteppable(SteppableBasePy):
    def __init__(self, _simulator, _frequency=1):
        SteppableBasePy.__init__(self, _simulator, _frequency)

        self.optim = None
        self.sim_length_mcs = self.simulator.getNumSteps() - 1
        self.f_vec = []
        self.X_vec = []
        self.X_vec_check = []
        self.num_fcn_evals = -1

    def minimized_fcn(self, *args, **kwds):
        """
        this function needs to be overloaded in the subclass - it implements simulation fitness metric
        :return {float}: number describing the "fitness" of the simulation
        """
        return 0.0

    def initial_condition_fcn(self, *args, **kwds):
        """
        This function prepares initial condition for the simulaiton. Typically it creates cell field and initializes
        all cell and field properties
        :param args: first argument is a vector of parameters that are being optimized. The rest are up to the user
        :param kwds: key words arguments - those are are up to the user
        :return: None
        """
        pass

    def init_optimization_strategy(self, *args, **kwds):
        """
        init_optimization_strategy initializes optimizer object. Its argument depend on the specific initializer used
        IN the case of the CMA optimizer the options are described here: https://pypi.python.org/pypi/cma
        :param args: see https://pypi.python.org/pypi/cma
        :param kwds: see https://pypi.python.org/pypi/cma
        :return: None
        """

        self.optim = CMAEvolutionStrategy(*args, **kwds)

    def optimization_step(self, mcs):
        """
        THis function implements houklsekeeping associated with running optimization algorithm in a steppable
        :param mcs {int}: current mcs
        :return: None
        """

        if not mcs % self.sim_length_mcs:

            if self.optim.stop():
                self.stopSimulation()
                print('termination by', self.optim.stop())
                print('best f-value =', self.optim.result()[1])
                print('best solution =', self.optim.result()[0])

            if not len(self.X_vec):
                self.X_vec = self.optim.ask()

                if len(self.f_vec):
                    # print 'self.X_vec_check=', self.X_vec_check
                    # print 'self.f_vec=', self.f_vec
                    self.optim.tell(self.X_vec_check, self.f_vec)  # do all the real "update" work

                    self.optim.disp(20)  # display info every 20th iteration
                    self.optim.logger.add()  # log another "data line"

                    self.f_vec = []

                    self.num_fcn_evals = len(self.X_vec)

                self.X_vec_check = deepcopy(self.X_vec)

            self.X_current = self.X_vec[0]

            if len(self.X_vec_check) != self.num_fcn_evals:
                fcn_target = self.minimized_fcn()
                self.f_vec.append(fcn_target)
                self.X_vec.pop(0)

            self.num_fcn_evals -= 1

            CompuCellSetup.reset_current_step(0)
            self.simulator.setStep(0)
            self.clean_cell_field(reset_inventory=True)
            self.initial_condition_fcn(self.X_current)
示例#16
0
class ALGO:
    def __init__(self, max_evaluations, n_points, dimension, function_id,
                 **kwargs):

        self.function_id = function_id
        self.n_points = n_points
        self.dimension = dimension
        self.function = CEC2005(dimension)[function_id].objective_function
        self.max_evaluations = max_evaluations * dimension
        self.max_bounds = Boundary(dimension, function_id).max_bounds
        self.min_bounds = Boundary(dimension, function_id).min_bounds
        self.optimal_position = CEC2005(
            dimension)[function_id].get_optimal_solutions()[0].phenome
        self.optimal_fitness = self.function(self.optimal_position)

        #self.problem = Problem(self.function.objective_function, max_evaluations=max_evaluations)
        self.boundary = Boundary(dimension, function_id)
        self.verbose = kwargs.get('verbose', False)

        self.population = [
        ]  #self.init_population( self.n_points, self.dimension )
        self.algo_type = kwargs.get('algo_type', 'CMA')
        self.init_algo()

        self.iteration = 0
        self.should_terminate = False
        self.optimal_solution = self.find_optimal_solution()

        self.stats = OrderedDict([
            ('iteration', []),
            ('FEs', []),
            ('error', []),
            ('best_value', []),
            #('best_position',[])
        ])

        self.run()
        self.best_solution = min(self.population,
                                 key=attrgetter('objective_values'))

    def init_population(self, n_points, dim):
        positions = np.zeros((n_points, dim))
        for d in range(dim):
            positions[:, d] = np.random.uniform(self.boundary.min_bounds[d],
                                                self.boundary.max_bounds[d],
                                                self.n_points)

        population = [Individual(position) for position in positions]
        self.problem.batch_evaluate(population)
        population = sorted(population, key=attrgetter('objective_values'))
        population = population[:len(population) / 2]
        ranks = range(1, len(population) + 1)
        return [Cluster(population, ranks)]

    def init_algo(self):
        init_min_bound = self.boundary.init_min_bounds[0]
        init_max_bound = self.boundary.init_max_bounds[0]
        min_bound = self.boundary.min_bounds[0]
        max_bound = self.boundary.max_bounds[0]

        if self.algo_type == 'CMA':
            init_point = [(init_max_bound + init_min_bound) / 2] * dimension
            sigma = (init_max_bound - init_min_bound) * 0.2
            #print 'init_point:', init_point
            #print 'sigma:', sigma
            self.algo = CMAEvolutionStrategy(init_point, sigma, {
                'popsize': self.n_points,
                'bounds': [min_bound, max_bound]
            })
        #elif self.algo_type == 'PSO':

    def find_optimal_solution(self):
        dimension = self.dimension
        function_id = self.function_id
        optimal_solutions = CEC2005(
            dimension)[function_id].get_optimal_solutions()
        test_prob = Problem(CEC2005(dimension)[function_id].objective_function)
        test_prob.batch_evaluate(optimal_solutions)
        return min(optimal_solutions, key=attrgetter('objective_values'))

    def run(self):
        self.iteration = self.iteration + 1
        if self.algo_type == 'CMA':

            positions = self.algo.ask()

            solutions = [Individual(position) for position in positions]
            try:
                self.problem.batch_evaluate(solutions)
            except ResourcesExhausted:
                self.should_terminate = True
                return

            self.algo.tell([p.phenome for p in solutions],
                           [p.objective_values for p in solutions])
            self.population = sorted(solutions,
                                     key=attrgetter('objective_values'))

        self.best_solution = min(self.population,
                                 key=attrgetter('objective_values'))
        self.update_statistics()

    def found_optimum(self, delta=1e-8):
        if self.best_solution.objective_values - self.optimal_solution.objective_values < delta:
            return True
        return False

    def stop(self):
        if self.algo.stop():
            if self.verbose: print('Algorithm stops!')
            self.should_terminate = True
        elif self.problem.remaining_evaluations < 1:
            if self.verbose: print('Consumed all evaluations!')
            self.should_terminate = True
        elif self.found_optimum(delta=goal_error):
            if self.verbose: print('Found Optimum!')
            self.should_terminate = True
        return self.should_terminate

    def print_status(self):
        error = self.best_solution.objective_values - self.optimal_solution.objective_values
        print('')
        print('    Iteration %d: error = %e' % (self.iteration, error))
        print('     Evaluations: consumed %d, remain %d' %
              (self.problem.consumed_evaluations,
               self.problem.remaining_evaluations))
        print('    best fitness: %f at %r' %
              (self.best_solution.objective_values,
               self.best_solution.phenome.tolist()))
        print('optimal solution: %f at %r' %
              (self.optimal_solution.objective_values,
               self.optimal_solution.phenome))
        print('')

    def update_statistics(self):
        self.stats['iteration'].append(self.iteration)
        self.stats['FEs'].append(self.problem.consumed_evaluations)
        self.stats['error'].append(self.best_solution.objective_values -
                                   self.optimal_solution.objective_values)
        self.stats['best_value'].append(self.best_solution.objective_values)