def optimize(self): """ Constructs objects required by the nevergrad engine to perform optimization. Yields ---------- optimization result: tuple(np.array, np.array, list) Point of evaluation, objective value, dummy list of weights """ upper_bounds = self._create_kpi_bounds() f = MultiobjectiveFunction( multiobjective_function=self._score, upper_bounds=upper_bounds ) instrumentation = self._assemble_instrumentation() instrumentation.random_state.seed(12) ng_optimizer = ng.optimizers.registry[self.algorithms]( instrumentation=instrumentation, budget=self.budget ) for _ in range(ng_optimizer.budget): x = ng_optimizer.ask() value = f.multiobjective_function(x.args) volume = f.compute_aggregate_loss( self._swap_minmax_kpivalues(value), *x.args, **x.kwargs ) ng_optimizer.tell(x, volume) if self.verbose_run: yield x.args, value, [1] * len(self.kpis) if not self.verbose_run: for point, value in f._points: value = self._swap_minmax_kpivalues(value) yield point[0], value, [1] * len(self.kpis)
def test_doc_multiobjective() -> None: # DOC_MULTIOBJ_0 import nevergrad as ng from nevergrad.functions import MultiobjectiveFunction import numpy as np f = MultiobjectiveFunction(multiobjective_function=lambda x: [np.sum(x**2), np.sum((x - 1)**2)], upper_bounds=[2.5, 2.5]) print(f(np.array([1.0, 2.0]))) optimizer = ng.optimizers.CMA( parametrization=3, budget=100) # 3 is the dimension, 100 is the budget. recommendation = optimizer.minimize(f) # The function embeds its Pareto-front: print("My Pareto front:", [x[0][0] for x in f.pareto_front()]) # It can also provide a subset: print("My Pareto front:", [x[0][0] for x in f.pareto_front(2, subset="random")]) print("My Pareto front:", [x[0][0] for x in f.pareto_front(2, subset="loss-covering")]) print("My Pareto front:", [x[0][0] for x in f.pareto_front(2, subset="domain-covering")]) # DOC_MULTIOBJ_1 assert len(f.pareto_front()) > 1 assert len(f.pareto_front(2, "loss-covering")) == 2 assert len(f.pareto_front(2, "domain-covering")) == 2 assert len(f.pareto_front(2, "random")) == 2
def _calculate_upper_bounds(self, optimizer, function): """Uses Nevergrad's MultiobjectiveFunction.compute_aggregate_loss protocol to estimate the upper bounds of each output KPI. This is only needed if we have a mixture of KPIs that use bounds and do not use bounds. """ ob_func = MultiobjectiveFunction(multiobjective_function=function) # Prior estimate of upper_bounds ensures the calculated KPIs # are always higher upper_bounds = np.array([-np.inf]) # Calculate a small random sample of output KPI scores for _ in range(self.bound_sample): # Use the optimizer to generate a new input / output point x, value = _nevergrad_ask_tell(optimizer, ob_func, no_bias=True) # Keep track of the highest bound upper_bounds = np.maximum(upper_bounds, value) # And replace those not defined return [ estimate if bound is None else bound for estimate, bound in zip(upper_bounds, self.upper_bounds) ]
def __init__(self, functions: tp.List[ArtificialFunction], upper_bounds: np.ndarray) -> None: self._functions = functions self._upper_bounds = upper_bounds self.multiobjective = MultiobjectiveFunction(self._mo, upper_bounds) super().__init__(self.multiobjective, self._functions[0].parametrization)
def test_readme_example() -> None: f = MultiobjectiveFunction(multiobjective_function=lambda x: (x[0]**2, x[1]**2), upper_bounds=[2.5, 2.5]) optimizer = ng.optimizers.CMA(parametrization=3, budget=100) # 3 is the dimension, 100 is the budget. optimizer.minimize(f) # The function embeds its Pareto-front: assert len(f.pareto_front) > 1
def get_multiobjective_function(self, ng_func, upper_bounds=None): return MultiobjectiveFunction(multiobjective_function=ng_func, upper_bounds=upper_bounds)