Esempio n. 1
0
    def run(self, FES):  # main part for your implementation
        population_size = int(4 + (3 * np.log(self.dimension)))
        bounds = np.array([[self.lower, self.upper]] * self.dimension)

        optimizer = CMA(mean=np.zeros(self.dimension),
                        sigma=0.5,
                        bounds=bounds,
                        population_size=population_size)
        while self.eval_times < FES:
            print('=====================FE=====================')
            # use other Function work because CMA need ask many times
            function = Function(self.target_func)
            solutions = []
            for generation in range(optimizer.population_size):
                solution = optimizer.ask()
                value = self.f.evaluate(func_num, solution)
                #print('generate: ', solution, 'loss: ', value)
                solutions.append((solution, value))
                self.eval_times += 1

                if value == "ReachFunctionLimit":
                    print("ReachFunctionLimit")
                    break
                if float(value) < self.optimal_value:
                    self.optimal_solution[:] = solution
                    self.optimal_value = float(value)
            optimizer.tell(solutions)

            print('eval times: ', self.eval_times)
            print("optimal: {}\n".format(self.get_optimal()[1]))
Esempio n. 2
0
def main() -> None:
    # Generate solutions from a source task
    source_solutions = []
    for _ in range(1000):
        x = np.random.random(2)
        value = source_task(x[0], x[1])
        source_solutions.append((x, value))

    # Estimate a promising distribution of the source task
    ws_mean, ws_sigma, ws_cov = get_warm_start_mgd(
        source_solutions, gamma=0.1, alpha=0.1
    )
    optimizer = CMA(mean=ws_mean, sigma=ws_sigma, cov=ws_cov)

    # Run WS-CMA-ES
    print(" g    f(x1,x2)     x1      x2  ")
    print("===  ==========  ======  ======")
    while True:
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = target_task(x[0], x[1])
            solutions.append((x, value))
            print(
                f"{optimizer.generation:3d}  {value:10.5f}"
                f"  {x[0]:6.2f}  {x[1]:6.2f}"
            )
        optimizer.tell(solutions)

        if optimizer.should_stop():
            break
Esempio n. 3
0
def main():
    seed = 0
    rng = np.random.RandomState(0)

    bounds = np.array([[-32.768, 32.768], [-32.768, 32.768]])
    lower_bounds, upper_bounds = bounds[:, 0], bounds[:, 1]

    mean = lower_bounds + (rng.rand(2) * (upper_bounds - lower_bounds))
    sigma = 32.768 * 2 / 5  # 1/5 of the domain width
    optimizer = CMA(mean=mean, sigma=sigma, bounds=bounds, seed=0)

    n_restarts = 0  # A small restart doesn't count in the n_restarts
    small_n_eval, large_n_eval = 0, 0
    popsize0 = optimizer.population_size
    inc_popsize = 2

    # Initial run is with "normal" population size; it is
    # the large population before first doubling, but its
    # budget accounting is the same as in case of small
    # population.
    poptype = "small"

    while n_restarts <= 5:
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = ackley(x[0], x[1])
            solutions.append((x, value))
            # print("{:10.5f}  {:6.2f}  {:6.2f}".format(value, x[0], x[1]))
        optimizer.tell(solutions)

        if optimizer.should_stop():
            seed += 1
            n_eval = optimizer.population_size * optimizer.generation
            if poptype == "small":
                small_n_eval += n_eval
            else:  # poptype == "large"
                large_n_eval += n_eval

            if small_n_eval < large_n_eval:
                poptype = "small"
                popsize_multiplier = inc_popsize**n_restarts
                popsize = math.floor(popsize0 *
                                     popsize_multiplier**(rng.uniform()**2))
            else:
                poptype = "large"
                n_restarts += 1
                popsize = popsize0 * (inc_popsize**n_restarts)
            mean = lower_bounds + (rng.rand(2) * (upper_bounds - lower_bounds))
            optimizer = CMA(
                mean=mean,
                sigma=sigma,
                bounds=bounds,
                seed=seed,
                population_size=popsize,
            )
            print("Restart CMA-ES with popsize={} ({})".format(
                popsize, poptype))
Esempio n. 4
0
 def test_cma_tell(self, data):
     dim = data.draw(st.integers(min_value=2, max_value=100))
     mean = data.draw(npst.arrays(dtype=float, shape=dim))
     sigma = data.draw(st.floats(min_value=1e-16))
     n_iterations = data.draw(st.integers(min_value=1))
     try:
         optimizer = CMA(mean, sigma)
     except AssertionError:
         return
     popsize = optimizer.population_size
     for _ in range(n_iterations):
         tell_solutions = data.draw(
             st.lists(
                 st.tuples(npst.arrays(dtype=float, shape=dim),
                           st.floats()),
                 min_size=popsize,
                 max_size=popsize,
             ))
         optimizer.ask()
         try:
             optimizer.tell(tell_solutions)
         except AssertionError:
             return
         optimizer.ask()
Esempio n. 5
0
def main():
    optimizer = CMA(mean=np.zeros(2), sigma=1.3)
    print(" g    f(x1,x2)     x1      x2  ")
    print("===  ==========  ======  ======")

    while True:
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = quadratic(x[0], x[1])
            solutions.append((x, value))
            print(f"{optimizer.generation:3d}  {value:10.5f}"
                  f"  {x[0]:6.2f}  {x[1]:6.2f}")
        optimizer.tell(solutions)

        if optimizer.should_stop():
            break
Esempio n. 6
0
def main():
    cma_es = CMA(mean=np.zeros(2), sigma=1.3)
    print(" g    f(x1,x2)     x1      x2  ")
    print("===  ==========  ======  ======")

    for generation in range(50):
        solutions = []
        for _ in range(cma_es.population_size):
            x = cma_es.ask()
            value = quadratic(x[0], x[1])
            solutions.append((x, value))

            msg = "{g:3d}  {value:10.5f}  {x1:6.2f}  {x2:6.2f}".format(
                g=generation,
                value=value,
                x1=x[0],
                x2=x[1],
            )
            print(msg)
        cma_es.tell(solutions)
Esempio n. 7
0
def main():
    dim = 40
    optimizer = CMA(mean=3 * np.ones(dim), sigma=2.0)
    print(" evals    f(x)")
    print("======  ==========")

    evals = 0
    while True:
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = ellipsoid(x)
            evals += 1
            solutions.append((x, value))
            if evals % 3000 == 0:
                print(f"{evals:5d}  {value:10.5f}")
        optimizer.tell(solutions)

        if optimizer.should_stop():
            break
Esempio n. 8
0
def main():
    seed = 0
    rng = np.random.RandomState(1)

    bounds = np.array([[-32.768, 32.768], [-32.768, 32.768]])
    lower_bounds, upper_bounds = bounds[:, 0], bounds[:, 1]

    mean = lower_bounds + (rng.rand(2) * (upper_bounds - lower_bounds))
    sigma = 32.768 * 2 / 5  # 1/5 of the domain width
    optimizer = CMA(mean=mean, sigma=sigma, bounds=bounds, seed=0)

    # Multiplier for increasing population size before each restart.
    inc_popsize = 2

    print(" g    f(x1,x2)     x1      x2  ")
    print("===  ==========  ======  ======")
    for generation in range(200):
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = ackley(x[0], x[1])
            solutions.append((x, value))
            print(f"{generation:3d}  {value:10.5f}  {x[0]:6.2f}  {x[1]:6.2f}")
        optimizer.tell(solutions)

        if optimizer.should_stop():
            seed += 1
            popsize = optimizer.population_size * inc_popsize
            mean = lower_bounds + (rng.rand(2) * (upper_bounds - lower_bounds))
            optimizer = CMA(
                mean=mean,
                sigma=sigma,
                bounds=bounds,
                seed=seed,
                population_size=popsize,
            )
            print("Restart CMA-ES with popsize={}".format(popsize))
Esempio n. 9
0
class OptimizingEmitter(object):
    """
  This class is a wrapper for the CMA-ES algorithm
  """
    def __init__(self, init_mean, id, mutation_rate, bounds, parameters):
        self.init_mean = init_mean
        self.id = id
        self._mutation_rate = mutation_rate
        self.steps = 0
        self._params = parameters
        self._bounds = bounds
        self.stored = 0

        # List of lists. Each inner list corresponds to the values obtained during a step
        # We init with the ancestor reward so it's easier to calculate the improvement
        self.values = []
        self.archived_values = []

        self._cmaes = CMA(mean=self.init_mean.copy(),
                          sigma=self._mutation_rate,
                          bounds=self._bounds,
                          seed=self._params.seed,
                          population_size=self._params.emitter_population)

    def ask(self):
        return self._cmaes.ask()

    def tell(self, solutions):
        return self._cmaes.tell(solutions)

    def should_stop(self):
        """
    Checks internal stopping criteria
    :return:
    """
        return self._cmaes.should_stop()
from cmaes import CMA
import numpy as np

import matplotlib.pyplot as plt

target = np.array((0.75,0.75))
def evaluate(point):
    return np.sqrt(((point-target)**2).sum())

optimizer = CMA(mean=np.array([0.5,0.5]),bounds=np.array([[0,1],[0,1]]),sigma=0.5,n_max_resampling=1)
generations = 16
sqrt = int(np.sqrt(generations))
fig, axs = plt.subplots(sqrt,sqrt,num="CMA-ES",sharex=True,sharey=True)
points = np.ndarray((generations,optimizer.population_size,2))
for g in range(generations):
    solutions = []
    for i in range(optimizer.population_size):
        point = optimizer.ask()
        points[g,i] = point
        score = evaluate(point)
        solutions.append((point,score))
    optimizer.tell(solutions)

for i in range(generations):
    ax = axs[i//sqrt,i%sqrt]
    ax.scatter(*zip(*points[i]),c="b")
    ax.scatter(*target,c="r")
    ax.set_xlim([0,1])
    ax.set_ylim([0,1])

plt.show()
Esempio n. 11
0
class CMAES(BaseEvolver):
    """
  This class is a wrapper around the CMA ES implementation.
  """
    def __init__(self, parameters):
        super().__init__(parameters)
        self.update_criteria = 'fitness'
        self.sigma = 0.05

        # Instantiated only to extract genome size
        controller = registered_envs[
            self.params.env_name]['controller']['controller'](
                input_size=registered_envs[
                    self.params.env_name]['controller']['input_size'],
                output_size=registered_envs[
                    self.params.env_name]['controller']['output_size'])

        self.genome_size = controller.genome_size
        self.bounds = self.params.genome_limit * np.ones(
            (self.genome_size, len(self.params.genome_limit)))
        self.values = []

        self.optimizer = CMA(mean=self.mu * np.ones(self.genome_size),
                             sigma=self.sigma,
                             bounds=self.bounds,
                             seed=self.params.seed,
                             population_size=self.params.emitter_population)
        self.restarted_at = 0

    def generate_offspring(self, parents, generation, pool=None):
        """
    This function returns the parents. This way the population is evaluated given that contrary to the other algos
    here the population is given by the CMA-ES library
    :param parents:
    :param pool:
    :return:
    """
        return parents

    def evaluate_performances(self, population, offsprings, pool=None):
        """
    This function evaluates performances of the population. It's what calls the tell function from the optimizer
    The novelty is evaluated according to the given distance metric
    :param population:
    :param offsprings:
    :param pool: Multiprocessing pool
    :return:
    """
        solutions = [(genome, -value) for genome, value in zip(
            population['genome'], population['reward'])]
        self.values += [-val[1] for val in solutions]
        self.optimizer.tell(solutions)

    def check_stopping_criteria(self, generation):
        """
    This function is used to check for when to stop the emitter
    :param emitter_idx:
    :return:
    """
        if self.optimizer.should_stop():
            return True
        elif self._stagnation(generation - self.restarted_at):
            return True
        else:
            return False

    def _stagnation(self, cma_es_step):
        """
    Calculates the stagnation criteria
    :param emitter_idx:
    :param ca_es_step:
    :return:
    """
        bottom = int(20 * self.genome_size / self.params.emitter_population +
                     120 + 0.2 * cma_es_step)
        if cma_es_step > bottom:
            values = self.values[-bottom:]
            if np.median(values[:20]) >= np.median(values[-20:]) or np.max(
                    values[:20]) >= np.max(values[-20:]):
                return True
        return False

    def update_archive(self, population, offsprings, generation):
        """
    Updates the archive. In this case the archive is a copy of the population.
    We do not really have the concept of archive in CMA-ES, so this archive here is just for ease of analysis and
    code compatibility.
    :param population:
    :param offsprings:
    :return:
    """
        # del self.archive
        # self.archive = Archive(self.params)

        for i in range(population.size):
            population[i]['stored'] = generation
            self.archive.store(population[i])

    def update_population(self, population, offsprings, generation):
        """
    This function updates the population according to the given criteria. For CMA-ES we use the ask function of the
    library
    :param population:
    :param offsprings:
    :return:
    """
        # In case a stopping criteria has been met, reinitialize the optimizer with the best agent in the archive (that is
        # the best found so far)
        if self.check_stopping_criteria(generation):
            print("Restarting")
            best = np.argmax(self.archive['reward'])
            self.restarted_at = generation

            self.values = []
            genome_idx = self.params.archive_stored_info.index('genome')
            self.optimizer = CMA(
                mean=self.archive[best][genome_idx],
                sigma=self.sigma,
                bounds=self.bounds,
                seed=self.params.seed,
                population_size=self.params.emitter_population)

        population.empty()
        for idx in range(self.params.emitter_population):
            population.add()
            population[idx]['genome'] = self.optimizer.ask()
            population[idx]['born'] = generation
Esempio n. 12
0
    def test_dimension(self):
        optimizer = CMA(mean=np.zeros(10), sigma=1.3)
        source_solutions = [(optimizer.ask(), 0.0) for _ in range(100)]
        ws_mean, ws_sigma, ws_cov = get_warm_start_mgd(source_solutions)

        self.assertEqual(ws_mean.size, 10)
Esempio n. 13
0
with open(file_to_write, "w", newline='') as csv_file:
    writer = csv.writer(csv_file, delimiter=',')
    writer.writerow((*C[:-2].T.columns, 'loss'))
print('something wrong')
if __name__ == "__main__":
    mean = x0 + 0.001
    sigma = 0.5
    optimizer = CMA(mean=mean,
                    sigma=sigma,
                    bounds=scale_bounds,
                    seed=0,
                    population_size=5)
    for generation in range(5):
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = loss(x, data, kwargs)
            solutions.append((x, value))
            with open(file_to_write, "a", newline='') as csv_file:
                writer = csv.writer(csv_file, delimiter=',')
                writer.writerow((*x, value))
            #print(f"#{generation} {value} ")
        optimizer.tell(solutions)

#res = calculate_full_trace(scale(C.value.values[:-2],*bounds.T), kwargs)
#print(res)
#result = pd.DataFrame(solutions, columns=['x', 'loss'])
#name = 'result_1.csv'
#result.to_csv(f"../../data/results/{name}")
print('OK')