Beispiel #1
0
 def __init__(
     self,
     independent_sigma: bool = True,
     mutable_sigma: bool = True,
     multiobjective: bool = False,
     recombination: str = "crossover",
     optimum: tp.Tuple[int, int] = (80, 100)
 ) -> None:
     assert recombination in ("crossover", "average")
     self._optimum = np.array(optimum, dtype=float)
     parametrization = p.Array(shape=(2, ), mutable_sigma=mutable_sigma)
     init = np.array([1.0, 1.0] if independent_sigma else [1.0],
                     dtype=float)
     sigma = (p.Array(init=init).set_mutation(
         exponent=2.0) if mutable_sigma else p.Constant(init))
     parametrization.set_mutation(sigma=sigma)
     parametrization.set_recombination(
         "average" if recombination ==
         "average" else p.mutation.Crossover())
     self._multiobjective = MultiobjectiveFunction(self._multifunc,
                                                   2 * self._optimum)
     super().__init__(
         self._multiobjective if multiobjective else self._monofunc,
         parametrization.set_name(""))  # type: ignore
     descr = dict(independent_sigma=independent_sigma,
                  mutable_sigma=mutable_sigma,
                  multiobjective=multiobjective,
                  optimum=optimum,
                  recombination=recombination)
     self._descriptors.update(descr)
     self.register_initialization(**descr)
Beispiel #2
0
 def __init__(
     self,
     independent_sigma: bool = True,
     mutable_sigma: bool = True,
     multiobjective: bool = False,
     recombination: str = "crossover",
     optimum: tp.Tuple[int, int] = (80, 100),
 ) -> None:
     assert recombination in ("crossover", "average")
     self._optimum = np.array(optimum, dtype=float)
     parametrization = p.Array(shape=(2,), mutable_sigma=mutable_sigma)
     init = np.array([1.0, 1.0] if independent_sigma else [1.0], dtype=float)
     sigma = p.Array(init=init).set_mutation(exponent=2.0) if mutable_sigma else p.Constant(init)
     parametrization.set_mutation(sigma=sigma)
     parametrization.set_recombination("average" if recombination == "average" else p.mutation.Crossover())
     self.multiobjective_upper_bounds = np.array(2 * self._optimum) if multiobjective else None
     super().__init__(self._multifunc if multiobjective else self._monofunc, parametrization.set_name(""))  # type: ignore
Beispiel #3
0
    def minimize(
        self,
        objective_function: tp.Callable[..., tp.Loss],
        executor: tp.Optional[tp.ExecutorLike] = None,
        batch_mode: bool = False,
        verbosity: int = 0,
    ) -> p.Parameter:
        """Optimization (minimization) procedure

        Parameters
        ----------
        objective_function: callable
            A callable to optimize (minimize)
        executor: Executor
            An executor object, with method :code:`submit(callable, *args, **kwargs)` and returning a Future-like object
            with methods :code:`done() -> bool` and :code:`result() -> float`. The executor role is to dispatch the execution of
            the jobs locally/on a cluster/with multithreading depending on the implementation.
            Eg: :code:`concurrent.futures.ThreadPoolExecutor`
        batch_mode: bool
            when :code:`num_workers = n > 1`, whether jobs are executed by batch (:code:`n` function evaluations are launched,
            we wait for all results and relaunch n evals) or not (whenever an evaluation is finished, we launch
            another one)
        verbosity: int
            print information about the optimization (0: None, 1: fitness values, 2: fitness values and recommendation)

        Returns
        -------
        ng.p.Parameter
            The candidate with minimal value. :code:`ng.p.Parameters` have field :code:`args` and :code:`kwargs` which can
            be directly used on the function (:code:`objective_function(*candidate.args, **candidate.kwargs)`).

        Note
        ----
        for evaluation purpose and with the current implementation, it is better to use batch_mode=True
        """
        # pylint: disable=too-many-branches
        if self.budget is None:
            raise ValueError("Budget must be specified")
        if executor is None:
            executor = utils.SequentialExecutor(
            )  # defaults to run everything locally and sequentially
            if self.num_workers > 1:
                warnings.warn(
                    f"num_workers = {self.num_workers} > 1 is suboptimal when run sequentially",
                    errors.InefficientSettingsWarning,
                )
        assert executor is not None
        tmp_runnings: tp.List[tp.Tuple[p.Parameter, tp.JobLike[tp.Loss]]] = []
        tmp_finished: tp.Deque[tp.Tuple[p.Parameter,
                                        tp.JobLike[tp.Loss]]] = deque()
        # go
        sleeper = ngtools.Sleeper(
        )  # manages waiting time depending on execution time of the jobs
        remaining_budget = self.budget - self.num_ask
        first_iteration = True
        #
        while remaining_budget or self._running_jobs or self._finished_jobs:
            # # # # # Update optimizer with finished jobs # # # # #
            # this is the first thing to do when resuming an existing optimization run
            # process finished
            if self._finished_jobs:
                if (remaining_budget
                        or sleeper._start is not None) and not first_iteration:
                    # ignore stop if no more suggestion is sent
                    # this is an ugly hack to avoid warnings at the end of steady mode
                    sleeper.stop_timer()
                while self._finished_jobs:
                    x, job = self._finished_jobs[0]
                    result = job.result()
                    self.tell(x, result)
                    self._finished_jobs.popleft(
                    )  # remove it after the tell to make sure it was indeed "told" (in case of interruption)
                    if verbosity:
                        print(f"Updating fitness with value {job.result()}")
                if verbosity:
                    print(
                        f"{remaining_budget} remaining budget and {len(self._running_jobs)} running jobs"
                    )
                    if verbosity > 1:
                        print("Current pessimistic best is: {}".format(
                            self.current_bests["pessimistic"]))
            elif not first_iteration:
                sleeper.sleep()
            # # # # # Start new jobs # # # # #
            if not batch_mode or not self._running_jobs:
                new_sugg = max(
                    0,
                    min(remaining_budget,
                        self.num_workers - len(self._running_jobs)))
                if verbosity and new_sugg:
                    print(f"Launching {new_sugg} jobs with new suggestions")
                for _ in range(new_sugg):
                    try:
                        args = self.ask()
                    except errors.NevergradEarlyStopping:
                        remaining_budget = 0
                        break
                    self._running_jobs.append(
                        (args,
                         executor.submit(objective_function, *args.args,
                                         **args.kwargs)))
                if new_sugg:
                    sleeper.start_timer()
            if remaining_budget > 0:  # early stopping sets it to 0
                remaining_budget = self.budget - self.num_ask
            # split (repopulate finished and runnings in only one loop to avoid
            # weird effects if job finishes in between two list comprehensions)
            tmp_runnings, tmp_finished = [], deque()
            for x_job in self._running_jobs:
                (tmp_finished
                 if x_job[1].done() else tmp_runnings).append(x_job)
            self._running_jobs, self._finished_jobs = tmp_runnings, tmp_finished
            first_iteration = False
        return self.provide_recommendation(
        ) if self.num_objectives == 1 else p.Constant(None)