コード例 #1
0
def main() -> None:
    # Generate solutions from a source task
    source_solutions = []
    for _ in range(1000):
        x = np.random.random(2)
        value = source_task(x[0], x[1])
        source_solutions.append((x, value))

    # Estimate a promising distribution of the source task
    ws_mean, ws_sigma, ws_cov = get_warm_start_mgd(
        source_solutions, gamma=0.1, alpha=0.1
    )
    optimizer = CMA(mean=ws_mean, sigma=ws_sigma, cov=ws_cov)

    # Run WS-CMA-ES
    print(" g    f(x1,x2)     x1      x2  ")
    print("===  ==========  ======  ======")
    while True:
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = target_task(x[0], x[1])
            solutions.append((x, value))
            print(
                f"{optimizer.generation:3d}  {value:10.5f}"
                f"  {x[0]:6.2f}  {x[1]:6.2f}"
            )
        optimizer.tell(solutions)

        if optimizer.should_stop():
            break
コード例 #2
0
ファイル: bipop_cmaes.py プロジェクト: HiroIshida/cmaes
def main():
    seed = 0
    rng = np.random.RandomState(0)

    bounds = np.array([[-32.768, 32.768], [-32.768, 32.768]])
    lower_bounds, upper_bounds = bounds[:, 0], bounds[:, 1]

    mean = lower_bounds + (rng.rand(2) * (upper_bounds - lower_bounds))
    sigma = 32.768 * 2 / 5  # 1/5 of the domain width
    optimizer = CMA(mean=mean, sigma=sigma, bounds=bounds, seed=0)

    n_restarts = 0  # A small restart doesn't count in the n_restarts
    small_n_eval, large_n_eval = 0, 0
    popsize0 = optimizer.population_size
    inc_popsize = 2

    # Initial run is with "normal" population size; it is
    # the large population before first doubling, but its
    # budget accounting is the same as in case of small
    # population.
    poptype = "small"

    while n_restarts <= 5:
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = ackley(x[0], x[1])
            solutions.append((x, value))
            # print("{:10.5f}  {:6.2f}  {:6.2f}".format(value, x[0], x[1]))
        optimizer.tell(solutions)

        if optimizer.should_stop():
            seed += 1
            n_eval = optimizer.population_size * optimizer.generation
            if poptype == "small":
                small_n_eval += n_eval
            else:  # poptype == "large"
                large_n_eval += n_eval

            if small_n_eval < large_n_eval:
                poptype = "small"
                popsize_multiplier = inc_popsize**n_restarts
                popsize = math.floor(popsize0 *
                                     popsize_multiplier**(rng.uniform()**2))
            else:
                poptype = "large"
                n_restarts += 1
                popsize = popsize0 * (inc_popsize**n_restarts)
            mean = lower_bounds + (rng.rand(2) * (upper_bounds - lower_bounds))
            optimizer = CMA(
                mean=mean,
                sigma=sigma,
                bounds=bounds,
                seed=seed,
                population_size=popsize,
            )
            print("Restart CMA-ES with popsize={} ({})".format(
                popsize, poptype))
コード例 #3
0
ファイル: quadratic_function.py プロジェクト: unghee/cmaes
def main():
    optimizer = CMA(mean=np.zeros(2), sigma=1.3)
    print(" g    f(x1,x2)     x1      x2  ")
    print("===  ==========  ======  ======")

    while True:
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = quadratic(x[0], x[1])
            solutions.append((x, value))
            print(f"{optimizer.generation:3d}  {value:10.5f}"
                  f"  {x[0]:6.2f}  {x[1]:6.2f}")
        optimizer.tell(solutions)

        if optimizer.should_stop():
            break
コード例 #4
0
ファイル: test_cmaes.py プロジェクト: HideakiImamura/optuna
def test_population_size_is_multiplied_when_enable_ipop(
        popsize: Optional[int]) -> None:
    inc_popsize = 2
    sampler = optuna.samplers.CmaEsSampler(
        x0={
            "x": 0,
            "y": 0
        },
        sigma0=0.1,
        seed=1,
        n_startup_trials=1,
        restart_strategy="ipop",
        popsize=popsize,
        inc_popsize=inc_popsize,
    )
    study = optuna.create_study(sampler=sampler)

    def objective(trial: optuna.Trial) -> float:
        _ = trial.suggest_float("x", -1, 1)
        _ = trial.suggest_float("y", -1, 1)
        return 1.0

    with patch("optuna.samplers._cmaes.CMA") as cma_class_mock, patch(
            "optuna.samplers._cmaes.pickle") as pickle_mock:
        pickle_mock.dump.return_value = b"serialized object"

        should_stop_mock = MagicMock()
        should_stop_mock.return_value = True

        cma_obj = CMA(
            mean=np.array([-1, -1], dtype=float),
            sigma=1.3,
            bounds=np.array([[-1, 1], [-1, 1]], dtype=float),
            population_size=popsize,  # Already tested by test_init_cmaes_opts().
        )
        cma_obj.should_stop = should_stop_mock
        cma_class_mock.return_value = cma_obj

        initial_popsize = cma_obj.population_size
        study.optimize(objective, n_trials=2 + initial_popsize)
        assert cma_obj.should_stop.call_count == 1

        _, actual_kwargs = cma_class_mock.call_args
        assert actual_kwargs[
            "population_size"] == inc_popsize * initial_popsize
コード例 #5
0
def main():
    dim = 40
    optimizer = CMA(mean=3 * np.ones(dim), sigma=2.0)
    print(" evals    f(x)")
    print("======  ==========")

    evals = 0
    while True:
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = ellipsoid(x)
            evals += 1
            solutions.append((x, value))
            if evals % 3000 == 0:
                print(f"{evals:5d}  {value:10.5f}")
        optimizer.tell(solutions)

        if optimizer.should_stop():
            break
コード例 #6
0
def main():
    seed = 0
    rng = np.random.RandomState(1)

    bounds = np.array([[-32.768, 32.768], [-32.768, 32.768]])
    lower_bounds, upper_bounds = bounds[:, 0], bounds[:, 1]

    mean = lower_bounds + (rng.rand(2) * (upper_bounds - lower_bounds))
    sigma = 32.768 * 2 / 5  # 1/5 of the domain width
    optimizer = CMA(mean=mean, sigma=sigma, bounds=bounds, seed=0)

    # Multiplier for increasing population size before each restart.
    inc_popsize = 2

    print(" g    f(x1,x2)     x1      x2  ")
    print("===  ==========  ======  ======")
    for generation in range(200):
        solutions = []
        for _ in range(optimizer.population_size):
            x = optimizer.ask()
            value = ackley(x[0], x[1])
            solutions.append((x, value))
            print(f"{generation:3d}  {value:10.5f}  {x[0]:6.2f}  {x[1]:6.2f}")
        optimizer.tell(solutions)

        if optimizer.should_stop():
            seed += 1
            popsize = optimizer.population_size * inc_popsize
            mean = lower_bounds + (rng.rand(2) * (upper_bounds - lower_bounds))
            optimizer = CMA(
                mean=mean,
                sigma=sigma,
                bounds=bounds,
                seed=seed,
                population_size=popsize,
            )
            print("Restart CMA-ES with popsize={}".format(popsize))
コード例 #7
0
class OptimizingEmitter(object):
    """
  This class is a wrapper for the CMA-ES algorithm
  """
    def __init__(self, init_mean, id, mutation_rate, bounds, parameters):
        self.init_mean = init_mean
        self.id = id
        self._mutation_rate = mutation_rate
        self.steps = 0
        self._params = parameters
        self._bounds = bounds
        self.stored = 0

        # List of lists. Each inner list corresponds to the values obtained during a step
        # We init with the ancestor reward so it's easier to calculate the improvement
        self.values = []
        self.archived_values = []

        self._cmaes = CMA(mean=self.init_mean.copy(),
                          sigma=self._mutation_rate,
                          bounds=self._bounds,
                          seed=self._params.seed,
                          population_size=self._params.emitter_population)

    def ask(self):
        return self._cmaes.ask()

    def tell(self, solutions):
        return self._cmaes.tell(solutions)

    def should_stop(self):
        """
    Checks internal stopping criteria
    :return:
    """
        return self._cmaes.should_stop()
コード例 #8
0
class CMAES(BaseEvolver):
    """
  This class is a wrapper around the CMA ES implementation.
  """
    def __init__(self, parameters):
        super().__init__(parameters)
        self.update_criteria = 'fitness'
        self.sigma = 0.05

        # Instantiated only to extract genome size
        controller = registered_envs[
            self.params.env_name]['controller']['controller'](
                input_size=registered_envs[
                    self.params.env_name]['controller']['input_size'],
                output_size=registered_envs[
                    self.params.env_name]['controller']['output_size'])

        self.genome_size = controller.genome_size
        self.bounds = self.params.genome_limit * np.ones(
            (self.genome_size, len(self.params.genome_limit)))
        self.values = []

        self.optimizer = CMA(mean=self.mu * np.ones(self.genome_size),
                             sigma=self.sigma,
                             bounds=self.bounds,
                             seed=self.params.seed,
                             population_size=self.params.emitter_population)
        self.restarted_at = 0

    def generate_offspring(self, parents, generation, pool=None):
        """
    This function returns the parents. This way the population is evaluated given that contrary to the other algos
    here the population is given by the CMA-ES library
    :param parents:
    :param pool:
    :return:
    """
        return parents

    def evaluate_performances(self, population, offsprings, pool=None):
        """
    This function evaluates performances of the population. It's what calls the tell function from the optimizer
    The novelty is evaluated according to the given distance metric
    :param population:
    :param offsprings:
    :param pool: Multiprocessing pool
    :return:
    """
        solutions = [(genome, -value) for genome, value in zip(
            population['genome'], population['reward'])]
        self.values += [-val[1] for val in solutions]
        self.optimizer.tell(solutions)

    def check_stopping_criteria(self, generation):
        """
    This function is used to check for when to stop the emitter
    :param emitter_idx:
    :return:
    """
        if self.optimizer.should_stop():
            return True
        elif self._stagnation(generation - self.restarted_at):
            return True
        else:
            return False

    def _stagnation(self, cma_es_step):
        """
    Calculates the stagnation criteria
    :param emitter_idx:
    :param ca_es_step:
    :return:
    """
        bottom = int(20 * self.genome_size / self.params.emitter_population +
                     120 + 0.2 * cma_es_step)
        if cma_es_step > bottom:
            values = self.values[-bottom:]
            if np.median(values[:20]) >= np.median(values[-20:]) or np.max(
                    values[:20]) >= np.max(values[-20:]):
                return True
        return False

    def update_archive(self, population, offsprings, generation):
        """
    Updates the archive. In this case the archive is a copy of the population.
    We do not really have the concept of archive in CMA-ES, so this archive here is just for ease of analysis and
    code compatibility.
    :param population:
    :param offsprings:
    :return:
    """
        # del self.archive
        # self.archive = Archive(self.params)

        for i in range(population.size):
            population[i]['stored'] = generation
            self.archive.store(population[i])

    def update_population(self, population, offsprings, generation):
        """
    This function updates the population according to the given criteria. For CMA-ES we use the ask function of the
    library
    :param population:
    :param offsprings:
    :return:
    """
        # In case a stopping criteria has been met, reinitialize the optimizer with the best agent in the archive (that is
        # the best found so far)
        if self.check_stopping_criteria(generation):
            print("Restarting")
            best = np.argmax(self.archive['reward'])
            self.restarted_at = generation

            self.values = []
            genome_idx = self.params.archive_stored_info.index('genome')
            self.optimizer = CMA(
                mean=self.archive[best][genome_idx],
                sigma=self.sigma,
                bounds=self.bounds,
                seed=self.params.seed,
                population_size=self.params.emitter_population)

        population.empty()
        for idx in range(self.params.emitter_population):
            population.add()
            population[idx]['genome'] = self.optimizer.ask()
            population[idx]['born'] = generation
コード例 #9
0

def compute_variance(X):
    mean = np.mean(X, axis=0)
    return sum([np.outer(x - mean, x - mean) for x in X]) / (len(X) - 1)


if __name__ == "__main__":
    # main routin
    cov = np.eye(2) * 0.01
    optimizer = CMA(mean=np.zeros(2),
                    sigma=1.3,
                    cov=cov,
                    cm=0.5,
                    population_size=10000)
    while True:
        solutions = []
        X = optimizer.ask_all(inball=True)
        C_emp1 = compute_variance(X)
        X2 = optimizer.ask_all(inball=False)
        C_emp2 = compute_variance(X2)
        print("=== testing covariance =====")
        print(C_emp1)
        print(optimizer._C * optimizer._sigma**2)
        values = quadratic(X[:, 0], X[:, 1]).tolist()
        optimizer.tell(list(zip(X, values)))
        print(optimizer._mean)

        if optimizer.should_stop():
            break