Beispiel #1
0
class NevergradMCOModel(BaseMCOModel):
    """ Base NevergradMCO Model class. Contains necessary traits attribute
    data to configure the NevergradOptimizerEngine."""

    #: Algorithms available to work with
    algorithms = Enum(*ALGORITHMS_KEYS)

    #: Defines the allowed number of objective calls
    budget = PositiveInt(100)

    #: Defines the sample size to estimate the KPI upper bounds
    bound_sample = PositiveInt(15)

    #: Display the generated points at runtime
    verbose_run = Bool(True)

    def _algorithms_default(self):
        return "TwoPointsDE"

    def default_traits_view(self):
        return View(
            Item("algorithms"),
            Item("budget", label="Allowed number of objective calls"),
            VFold(
                Group(Item("bound_sample",
                           label="Sample size for upper bound estimation",
                           visible_when='advanced'),
                      Item("verbose_run",
                           label="Report all calculated points?",
                           visible_when='advanced'),
                      label='Advanced Options')))
class WeightedMCOModel(BaseMCOModel):

    #: Algorithms available to work with
    algorithms = Enum(
        *ScipyOptimizer.class_traits()["algorithms"].handler.values)

    #: Search grid resolution per KPI
    num_points = PositiveInt(7)

    #: Display the generated points at runtime
    verbose_run = Bool(True)

    #: Space search distribution for weight points sampling
    space_search_mode = Enum("Uniform", "Dirichlet")

    #: 'Subprocess' mode performs evaluation of a state in the workflow via
    #: calling force_bdss on a new subprocess with SubprocessWorkflowEvaluator
    evaluation_mode = Enum("Internal", "Subprocess")

    def default_traits_view(self):
        return View(
            Item("evaluation_mode"),
            Item("algorithms"),
            Item("num_points", label="Weights grid resolution per KPI"),
            Item("space_search_mode"),
            Item("verbose_run"),
        )

    def __start_event_type_default(self):
        return WeightedMCOStartEvent

    def __progress_event_type_default(self):
        return WeightedMCOProgressEvent
class MCOModel(BaseMCOModel):

    num_points = PositiveInt(7)

    evaluation_mode = Enum("Internal", "Subprocess")

    def default_traits_view(self):
        return View(Item("num_points"), Item("evaluation_mode"))
Beispiel #4
0
class NevergradScalarOptimizer(HasStrictTraits):
    """ Optimization of a scalar function using nevergrad.
    """

    #: Algorithms available to work with
    algorithms = Enum(*ALGORITHMS_KEYS)

    #: Optimization budget defines the allowed number of objective calls
    budget = PositiveInt(500)

    def _algorithms_default(self):
        return "TwoPointsDE"

    def get_optimizer(self, params):

        instrumentation = translate_mco_to_ng(params)

        return ng.optimizers.registry[self.algorithms](
            parametrization=instrumentation, budget=self.budget)

    def optimize_function(self, func, params):
        """ Minimize the passed scalar function.

        Parameters
        ----------
        func: Callable
            The MCO function to optimize
            Takes a list of MCO parameter values.
        params: list of MCOParameter
            The MCO parameter objects corresponding to the parameters.

        Yields
        ------
        list of float or list:
            The list of optimal parameter values.
        """
        # Create optimizer.
        optimizer = self.get_optimizer(params)

        # Create a scalar objective Nevergrad function from
        # the MCO function.
        ng_func = partial(nevergrad_function, function=func, is_scalar=True)

        # Optimize.
        # This returns an nevergrad Instrumentation object.
        optimization_result = optimizer.minimize(ng_func)

        # Convert the optimal point into MCO format
        yield translate_ng_to_mco(list(optimization_result.args))
Beispiel #5
0
class SpaceSampler(ABCHasStrictTraits):
    """ Base class for search space sampling from various
    distributions.

    Given the dimension of the sample vectors, and the
    sampling resolution along each of the dimensions,
    it provides a public method to generate a number of
    search space samples.
    The space search satisfies the requirement that the
    l1-norm of all samples always equals 1.0.
    """

    #: the dimension of the sample vectors
    dimension = PositiveInt()

    #: the number of (effective) divisions along each dimension
    resolution = PositiveInt()

    def __init__(self, dimension, resolution, **kwargs):
        super().__init__(dimension=dimension, resolution=resolution, **kwargs)

    @abc.abstractmethod
    def _get_sample_point(self):
        pass

    @abc.abstractmethod
    def generate_space_sample(self, *args, **kwargs):
        """ Generates specified number of search space samples

        Yields
        -------
        generator
            random samples of vector satisfying the

        """
        pass
class MonteCarloModel(BaseMCOModel):
    """ Model class for MonteCarloMCO.
    """

    # sample or optimize
    method = Enum(['sample', 'optimize'])

    # number of samples.
    n_sample = PositiveInt(100)

    algorithms = Enum(*SCIPY_ALGORITHMS_KEYS)

    def default_traits_view(self):
        return View(
            Item("method"),
            Item("n_sample", label="No. samples"),
            Item("algorithms"),
        )
class MonteCarloEngine(BaseOptimizerEngine):
    """ An engine for random (Monte Carlo) sampling and optimization.
    To get a picture of the overall parameter-space the user might want to
    randomly sample points. If the parameter-space contains many local
    minima (or maxima) the user might want to optimize from multiple random
    initial points, to discover those locals.

    Notes
    -----
    To find local minima/maxima it only makes sense to use a single-criterion
    optimizer (such as Scipy) or an a priori multi-criterion optimizer:
    a posteriori multi-criterion optimizers usually set random initial points
    themselves.

    Although in this plugin we use the Scipy optimizer, which only accepts
    RangedMCOParameter and RangedVectorMCOParameter, there is no reason why
    it could not be used with a non-gradient based optimizer that accepts
    categorical/level parameterizations.

    Of course at the moment there is no initial value/condition/choice for
    categorical/level parameterization in either BDSS or Nevergrad: the choice
    of initial value/condition/choice would seem to be down to the optimizer
    and therefore may not give the desired result: i.e. if the optimizer
    does not randomize the initial value/condition/choice, then only a single
    local or global minimum might be found along the parameter's axis.
    """

    #: Optimizer name
    name = Str("Monte Carlo")

    # sample or optimize
    method = Enum(['sample', 'optimize'])

    # number of samples/initial-points.
    n_sample = PositiveInt(100)

    #: IOptimizer class, provides library backend for optimizing a callable
    optimizer = Instance(IOptimizer, transient=True)

    def optimize(self, *vargs):
        """ Generates sampling/optimization results.

        Yields
        ----------
        optimization result: tuple(np.array, np.array, list)
            Point of evaluation, objective value
        """

        # Sample
        if self.method == 'sample':

            # loop through sample points
            for _ in range(self.n_sample):

                # get point
                point = self.sample()

                # yield point and KPIs
                kpis = self._score(point)
                yield point, kpis

        # Optimize
        else:

            # loop through initial points
            for _ in range(self.n_sample):

                # set initial point
                self.sample(set_initial=True)

                # yield optimial point and KPIs (should yield once)
                for point in self.optimizer.optimize_function(
                        self._score, self.parameters):
                    kpis = self._score(point)
                    yield point, kpis

    def sample(self, set_initial=False):
        """ Generate a random point in parameter space.

        Parameters
        ----------
        set_initial: bool
            Also set the initial points/conditions of the parametrization.

        Returns
        -------
        list of Any
            The random point in parameter-space.

        Notes
        -----
        There is no initial value/condition/choice for categorical/level/set
        parameterization in either BDSS or Nevergrad. Therefore
        set_initial=True will do nothing for these.
        """

        sample = []
        for param in self.parameters:

            if type(param) is FixedMCOParameter:
                sample.append(param.value)

            elif type(param) is RangedMCOParameter:
                x = random.uniform(param.lower_bound, param.upper_bound)
                sample.append(x)
                if set_initial:
                    param.initial_value = x

            elif type(param) is RangedVectorMCOParameter:
                x = [
                    random.uniform(param.lower_bound[i], param.upper_bound[i])
                    for i in range(len(param.lower_bound))
                ]
                sample.append(x)
                if set_initial:
                    param.initial_value = x

            elif type(param) is ListedMCOParameter:
                x = random.choice(param.levels)
                sample.append(x)

            elif type(param) is CategoricalMCOParameter:
                x = random.choice(param.categories)
                sample.append(x)

        return sample
class WeightedOptimizerEngine(BaseOptimizerEngine):
    """ A priori multi-objective optimization.

    Notes
    -----
    A multi-objective function is optimized by the a priori method of
    weighting/scalarisation. That is:
    1) Create a single objective from the weighted sum of the muLtiple
    objectives.
    2) The single-objective function is optimized.
    3) By sampling a range of weight combinations, part or all of the
    Pareto-efficient set is found.

    The weights are calculated by:
    1) For each objective calculate a "scale" by Sen's method: Optimize each
    objective individually to find both it's minimum and maximum. Use these
    to calculate its "scale".
    2) weight = scale x uniform-random-variate[0, 1), where
    SUM(variates) over objectives = 1.0
    """

    #: Optimizer name
    name = Str("Weighted_Optimizer")

    #: Search grid resolution per KPI
    num_points = PositiveInt(7)

    #: Method to calculate KPIs normalization coefficients
    scaling_method = Str("sen_scaling_method")

    #: Space search distribution for weight points sampling
    space_search_mode = Enum("Uniform", "Dirichlet")

    #: IOptimizer class that provides library backend for optimizing a
    #: callable
    optimizer = Instance(IOptimizer, transient=True)

    def optimize(self, **kwargs):
        """ Generates optimization results.

        Yields
        ----------
        optimization result: tuple(np.array, np.array, list)
            Point of evaluation, objective value, weights
        """

        #: Get non-zero weight combinations for each KPI
        scaling_factors = self.get_scaling_factors()

        #: loop through weight combinations
        for weights in self.weights_samples():
            log.info("Doing MCO run with weights: {}".format(weights))

            #: multiply weights by scales
            scaled_weights = [
                weight * scale
                for weight, scale in zip(weights, scaling_factors)
            ]

            #: optimize
            for point, kpis in self._weighted_optimize(scaled_weights,
                                                       **kwargs):
                yield point, kpis, scaled_weights

    def weights_samples(self, **kwargs):
        """ Generates necessary number of search space sample points
        from the `space_search_mode` search strategy."""
        return self._space_search_distribution(
            **kwargs).generate_space_sample()

    def _weighted_optimize(self, weights, **kwargs):
        """ Performs single scipy.minimize operation on the dot product of
        the multiobjective function with `weights`.

        Parameters
        ----------
        weights: List[Float]
            Weights for each KPI objective

        Returns
        ----------
        optimization result: tuple(np.array, np.array)
            Point of evaluation, and objective values
        """

        # Clear the KPI cache at the start of the optimization
        self._kpi_cache = {}

        log.info("Running optimisation." +
                 "Initial point: {}".format(self.initial_parameter_value) +
                 "Bounds: {}".format(self.parameter_bounds))

        # partial of objective function.
        weighted_score_func = partial(self._weighted_score, weights=weights)

        # optimize and evaluate
        for point in self.optimizer.optimize_function(weighted_score_func,
                                                      self.parameters,
                                                      **kwargs):

            # retrieve the function at the optimal point
            kpis = self.retrieve_result(point)

            log.info("Optimal point : {}".format(point) +
                     "KPIs at optimal point : {}".format(kpis))

            yield point, kpis

    def _weighted_score(self, input_point, weights):
        """ Calculates the weighted score of the KPI vector at `input_point`,
        by taking dot product with a vector of `weights`."""

        # Calculate the value of the raw objective function
        score = self._score(input_point)

        # Return the score to be minimized
        score = np.dot(weights, score)
        log.info("Weighted score: {}".format(score))
        return score

    def get_scaling_factors(self):
        """ Calculates scaling factors for KPIs, defined in MCO.
        Scaling factors are calculated (as required) by the provided scaling
        method. In general, this provides normalization values for the possible
        range of each KPI.
        Performs scaling for all KPIs that have `auto_scale == True`.
        Otherwise, keeps the default `scale_factor`.
        """
        if self.scaling_method == "sen_scaling_method":
            scaling_method = sen_scaling_method
        else:
            raise NotImplementedError(
                f"Scaling method with name {self.scaling_method} is not found."
            )

        #: Get default scaling weights for each KPI variable
        default_scaling_factors = np.array(
            [kpi.scale_factor for kpi in self.kpis])

        #: Apply a wrapper for the evaluator weights assignment and
        #: call of the .optimize method.
        #: Then, calculate scaling factors defined by the `scaling_method`
        scaling_factors = scaling_method(len(self.kpis),
                                         self._weighted_optimize)

        #: Apply the scaling factors where necessary
        auto_scales = [kpi.auto_scale for kpi in self.kpis]
        default_scaling_factors[auto_scales] = scaling_factors[auto_scales]

        log.info(
            "Using KPI scaling factors: {}".format(default_scaling_factors))

        return default_scaling_factors.tolist()

    def _space_search_distribution(self, **kwargs):
        """ Creates a space search distribution object, based on
        the user settings of the `space_search_mode` attribute."""

        if self.space_search_mode == "Uniform":
            distribution = UniformSpaceSampler
        elif self.space_search_mode == "Dirichlet":
            distribution = DirichletSpaceSampler
        else:
            raise NotImplementedError
        return distribution(len(self.kpis), self.num_points, **kwargs)
Beispiel #9
0
class NevergradMultiOptimizer(HasStrictTraits):
    """ Optimization of a multi-objective function using nevergrad.
    """

    #: Algorithms available to work with
    algorithms = Enum(*ALGORITHMS_KEYS)

    #: Optimization budget defines the allowed number of objective calls
    budget = PositiveInt(500)

    #: Defines the sample size to estimate the KPI upper bounds
    bound_sample = PositiveInt(15)

    #: List of upper bounds for KPI values
    upper_bounds = List(Union(None, Float), visible=False, transient=True)

    def _algorithms_default(self):
        return "TwoPointsDE"

    def _valid_upper_bounds(self):
        """Returns whether or not the KPI upper bounds need to be
        estimated prior to running the optimization proceedure.
        """
        # If no upper_bounds have been set, we need to estimate
        if len(self.upper_bounds) == 0:
            return False

        # If any upper_bound values are not defined,
        # we need to estimate
        return all([value is not None for value in self.upper_bounds])

    def _calculate_upper_bounds(self, optimizer, function):
        """Uses Nevergrad's MultiobjectiveFunction.compute_aggregate_loss
        protocol to estimate the upper bounds of each output KPI. This
        is only needed if we have a mixture of KPIs that use bounds and
        do not use bounds.
        """

        ob_func = MultiobjectiveFunction(multiobjective_function=function)

        # Prior estimate of upper_bounds ensures the calculated KPIs
        # are always higher
        upper_bounds = np.array([-np.inf])

        # Calculate a small random sample of output KPI scores
        for _ in range(self.bound_sample):
            # Use the optimizer to generate a new input / output point
            x, value = _nevergrad_ask_tell(optimizer, ob_func, no_bias=True)

            # Keep track of the highest bound
            upper_bounds = np.maximum(upper_bounds, value)

        # And replace those not defined
        return [
            estimate if bound is None else bound
            for estimate, bound in zip(upper_bounds, self.upper_bounds)
        ]

    def get_optimizer(self, params):

        instrumentation = translate_mco_to_ng(params)
        return ng.optimizers.registry[self.algorithms](
            parametrization=instrumentation, budget=self.budget)

    def get_multiobjective_function(self, ng_func, upper_bounds=None):
        return MultiobjectiveFunction(multiobjective_function=ng_func,
                                      upper_bounds=upper_bounds)

    def optimize_function(self, func, params, verbose_run=False):
        """ Minimize the passed multi-objective function.

        Parameters
        ----------
        func: Callable
            The MCO function to optimize
            Takes a list of MCO parameter values.
        params: list of MCOParameter
            The MCO parameter objects corresponding to the parameters.
        verbose_run: Bool, optional
            Whether or not to return all points generated during the
            optimization procedure, or just those on the Pareto front.

        Yields
        ------
        list of float or list:
            The list of parameter values for a single member
            of the Pareto set.
        """

        # Create optimizer.
        optimizer = self.get_optimizer(params)

        # Create a multi-objective nevergrad function from
        # the MCO function.
        ng_func = partial(nevergrad_function, function=func, is_scalar=False)

        # If a complete set of KPI upper bounds are defined, use them.
        # Otherwise use Nevergrad to estimate those not defined
        if self._valid_upper_bounds():
            upper_bounds = self.upper_bounds
        else:
            # Estimate all KPI upper bounds
            upper_bounds = self._calculate_upper_bounds(optimizer, ng_func)

        # Create a MultiobjectiveFunction object with assigned upper bounds
        ob_func = self.get_multiobjective_function(ng_func, upper_bounds)

        # Perform all calculations in the budget
        for index in range(self.budget):
            log.info("Doing  MCO run # {} / {}".format(index, self.budget))

            # Generate and solve a new input point
            x, _ = _nevergrad_ask_tell(optimizer, ob_func)

            # If verbose, report back all points, not just those in
            # Pareto front
            if verbose_run:
                yield translate_ng_to_mco(x.args)

        # If not verbose, yield each member of the Pareto set.
        # x is a tuple - ((<vargs parameters>), {<kwargs parameters>})
        # return the vargs, translated into mco.
        if not verbose_run:
            for x in ob_func.pareto_front():
                yield translate_ng_to_mco(list(x[0]))
class WeightedOptimizer(HasTraits):
    """Performs a scipy optimise with SLSQP method given a set of weights
    for the individual KPIs.
    """

    #: Optimizer name
    name = Unicode("Weighted_Optimizer")

    single_point_evaluator = Instance(IEvaluator)

    #: Algorithms available to work with
    algorithms = Enum("SLSQP", "TNC")

    scaling_method = staticmethod(sen_scaling_method)

    #: Search grid resolution per KPI
    num_points = PositiveInt(7)

    #: Space search distribution for weight points sampling
    space_search_mode = Enum("Uniform", "Dirichlet")

    def default_traits_view(self):
        return View(
            Group(
                Item("name", style="readonly"),
                Item("algorithms"),
                Item("num_points"),
                Item("space_search_mode"),
            )
        )

    def _score(self, point, weights):
        score = np.dot(weights, self.single_point_evaluator.evaluate(point))
        log.info("Weighted score: {}".format(score))
        return score

    def get_scaling_factors(self, scaling_method=None):
        """ Calculates scaling factors for KPIs, defined in MCO.
        Scaling factors are calculated (as required) by the provided scaling
        method. In general, this provides normalization values for the possible
        range of each KPI.
        Performs scaling for all KPIs that have `auto_scale == True`.
        Otherwise, keeps the default scale factor.

        Parameters
        ----------
        scaling_method: callable
            A method to scale KPI weights. Default set to the Sen's
            "Multi-Objective Programming Method"
        """
        if scaling_method is None:
            scaling_method = self.scaling_method

        #: Get default scaling weights for each KPI variable
        default_scaling_factors = np.array(
            [kpi.scale_factor for kpi in self.kpis]
        )

        #: Apply a wrapper for the evaluator weights assignment and
        #: call of the .optimize method.
        #: Then, calculate scaling factors defined by the `scaling_method`
        scaling_factors = scaling_method(
            len(self.kpis), self._weighted_optimize
        )

        #: Apply the scaling factors where necessary
        auto_scales = [kpi.auto_scale for kpi in self.kpis]
        default_scaling_factors[auto_scales] = scaling_factors[auto_scales]

        log.info(
            "Using KPI scaling factors: {}".format(default_scaling_factors)
        )

        return default_scaling_factors.tolist()

    def _space_search_distribution(self, **kwargs):
        """ Generates space search distribution object, based on
        the user settings of the `space_search_strategy` trait."""

        if self.space_search_mode == "Uniform":
            distribution = UniformSpaceSampler
        elif self.space_search_mode == "Dirichlet":
            distribution = DirichletSpaceSampler
        else:
            raise NotImplementedError
        return distribution(len(self.kpis), self.num_points, **kwargs)

    def weights_samples(self, **kwargs):
        """ Generates necessary number of search space sample points
        from the internal search strategy."""
        return self._space_search_distribution(
            **kwargs
        ).generate_space_sample()

    def optimize(self):
        """ Generates optimization results with weighted optimization.

        Yields
        ----------
        optimization result: tuple(np.array, np.array, list)
            Point of evaluation, objective value, dummy list of weights
        """
        #: Get scaling factors and non-zero weight combinations for each KPI
        scaling_factors = self.get_scaling_factors()
        for weights in self.weights_samples():

            log.info("Doing MCO run with weights: {}".format(weights))

            scaled_weights = [
                weight * scale
                for weight, scale in zip(weights, scaling_factors)
            ]

            optimal_point, optimal_kpis = self._weighted_optimize(
                scaled_weights
            )
            yield optimal_point, optimal_kpis, scaled_weights

    def _weighted_optimize(self, weights):
        """ Performs single scipy.minimize operation on the convolution of
        the multiobjective function with `weights`.

        Parameters
        ----------
        weights: List[Float]
            Weights for each KPI objective

        Returns
        ----------
        optimization result: tuple(np.array, np.array)
            Point of evaluation, and objective values
        """
        initial_point = [p.initial_value for p in self.parameters]
        bounds = [(p.lower_bound, p.upper_bound) for p in self.parameters]

        log.info(
            "Running optimisation."
            + "Initial point: {}".format(initial_point)
            + "Bounds: {}".format(bounds)
        )

        weighted_score_func = partial(self._score, weights=weights)

        optimization_result = scipy_optimize.minimize(
            weighted_score_func, initial_point, method="SLSQP", bounds=bounds
        )
        optimal_point = optimization_result.x
        optimal_kpis = self.single_point_evaluator.evaluate(optimal_point)

        log.info(
            "Optimal point : {}".format(optimal_point)
            + "KPIs at optimal point : {}".format(optimal_kpis)
        )

        return optimal_point, optimal_kpis

    def __getstate__(self):
        state_data = pop_dunder_recursive(super().__getstate__())
        state_data.pop("kpis")
        state_data.pop("parameters")
        return state_data
class NevergradOptimizer(HasTraits):
    single_point_evaluator = Instance(IEvaluator)

    #: Optimizer name
    name = Unicode("Nevergrad")

    #: Algorithms available to work with
    algorithms = Enum(*ng.optimizers.registry.keys())

    #: Optimization budget defines the allowed number of objective calls
    budget = PositiveInt(500)

    #: Yield all data points or only the Pareto-optimal
    verbose_run = Bool(False)

    def _algorithms_default(self):
        return "TwoPointsDE"

    def default_traits_view(self):
        return View(
            Item("name", style="readonly"),
            Item("algorithms"),
            Item(
                "budget", label="Allowed number of objective calls"
            ),
            Item("verbose_run", label="Display objective values at runtime"),
        )

    def _create_instrumentation_variable(self, parameter):
        """ Create nevergrad.variable from `MCOParameter`. Different
        MCOParameter subclasses have different signature attributes.
        The mapping between MCOParameters and nevergrad types is bijective.

        Parameters
        ----------
        parameter: BaseMCOParameter
            object to convert to nevergrad type

        Returns
        ----------
        nevergrad_parameter: nevergrad.Variable
            nevergrad variable of corresponding type
        """
        if hasattr(parameter, "lower_bound") and hasattr(
            parameter, "upper_bound"
        ):
            # The affine transformation with `slope` before `bounded` cab be
            # used to normalize the distribution of points in internal space.
            # This allows better exploration of the boundary regions. This
            # feature is still in research mode, and presumably must be for
            # the user to play with. Implementation would be:
            # >>> affine_slope = 1.0
            # >>> var = ng.var.Scalar().affined(affine_slope, 0).bounded(...)
            return ng.var.Scalar().bounded(
                parameter.lower_bound, parameter.upper_bound
            )
        elif hasattr(parameter, "value"):
            return ng.var._Constant(value=parameter.value)
        elif hasattr(parameter, "levels"):
            return ng.var.OrderedDiscrete(parameter.sample_values)
        elif hasattr(parameter, "categories"):
            return ng.var.SoftmaxCategorical(
                possibilities=parameter.sample_values, deterministic=True
            )
        else:
            raise NevergradTypeError(
                f"Can not convert {parameter} to any of"
                " supported Nevergrad types"
            )

    def _assemble_instrumentation(self, parameters=None):
        """ Assemble nevergrad.Instrumentation object from `parameters` list.

        Parameters
        ----------
        parameters: List(BaseMCOParameter)
            parameter objects containing lower and upper numerical bounds

        Returns
        ----------
        instrumentation: ng.Instrumentation
        """
        if parameters is None:
            parameters = self.parameters

        instrumentation = [
            self._create_instrumentation_variable(p) for p in parameters
        ]
        return ng.Instrumentation(*instrumentation)

    def _create_kpi_bounds(self, kpis=None):
        """ Assemble optimization bounds on KPIs, provided by
        `scaled_factor` attributes.
        Note: Ideally, a different kpi attribute should be
        responsible for the bounds.

        Parameters
        ----------
        kpis: List(KPISpecification)
            kpi objects containing upper numerical bounds

        Returns
        ----------
        upper_bounds: np.array
            kpis upper bounds
        """
        if kpis is None:
            kpis = self.kpis
        upper_bounds = np.zeros(len(kpis))
        for i, kpi in enumerate(kpis):
            try:
                upper_bounds[i] = kpi.scale_factor
            except AttributeError:
                upper_bounds[i] = 100
        return upper_bounds

    def _swap_minmax_kpivalues(self, values):
        """ Inverts the array of KPI values whenever the corresponding
         KPI is subject to maximization instead of minimization.

        Parameters
        ----------
        values: List[int, float], np.array
            KPI values to invert for minimization mode

        Returns
        --------
        substituted_values: np.array
            New KPI values, with the elements corresponding to
            maximization are inverted by _a -> -_a
        """
        substituted_values = np.array(values)
        for i in range(len(values)):
            if self.kpis[i].objective == "MAXIMISE":
                substituted_values[i] *= -1.0
        return substituted_values

    def _score(self, point):
        score = self.single_point_evaluator.evaluate(point)
        log.info("Objective score: {}".format(score))
        return score

    def optimize(self):
        """ Constructs objects required by the nevergrad engine to
        perform optimization.

        Yields
        ----------
        optimization result: tuple(np.array, np.array, list)
            Point of evaluation, objective value, dummy list of weights
        """
        upper_bounds = self._create_kpi_bounds()
        f = MultiobjectiveFunction(
            multiobjective_function=self._score, upper_bounds=upper_bounds
        )
        instrumentation = self._assemble_instrumentation()
        instrumentation.random_state.seed(12)
        ng_optimizer = ng.optimizers.registry[self.algorithms](
            instrumentation=instrumentation, budget=self.budget
        )
        for _ in range(ng_optimizer.budget):
            x = ng_optimizer.ask()
            value = f.multiobjective_function(x.args)
            volume = f.compute_aggregate_loss(
                self._swap_minmax_kpivalues(value), *x.args, **x.kwargs
            )
            ng_optimizer.tell(x, volume)

            if self.verbose_run:
                yield x.args, value, [1] * len(self.kpis)

        if not self.verbose_run:
            for point, value in f._points:
                value = self._swap_minmax_kpivalues(value)
                yield point[0], value, [1] * len(self.kpis)

    def __getstate__(self):
        state_data = pop_dunder_recursive(super().__getstate__())
        state_data.pop("kpis")
        state_data.pop("parameters")
        return state_data
Beispiel #12
0
class RandomSamplingMCOModel(BaseMCOModel):
    num_trials = PositiveInt(1800,
                             label='Number of trials',
                             desc='The number of random trials to perform')
    evaluation_mode = Enum("Internal", "Subprocess")
class EggboxPESDataSourceModel(BaseDataSourceModel):
    """ This model stores all the data required to compute the
    potential. All randomness must be contained in the model, not at
    the instance-level. Changing any of the parameters used to generate
    the potential will generate an entirely new random potential.

    """

    # traits controlled by user
    dimension = PositiveInt(
        2,
        label='Dimensionality',
        changes_slots=True
    )
    cuba_design_space_type = Unicode(
        changes_slots=True,
        label='Parameter space type/units'
    )
    cuba_potential_type = Unicode(
        changes_slots=True,
        label='Potential type'
    )
    num_cells = PositiveInt(
        5,
        label='Number of cells',
        desc='Number of lattice points in each direction'
    )
    sigma_star = Float(
        0.1,
        label='σ*',
        desc='Variance of basin depths: σ*~0 will lead to identical basins '
             'σ*~1 normally lead to a few basins dominating'
    )
    locally_optimize = Bool(
        True,
        label='Locally optimize trials?',
        desc='Whether or not to locally optimize each '
             'trial and return the local minima'
    )

    # traits set by calculation
    basin_depths = List()
    basin_positions = List()

    # these lists can be useful for debugging and plotting, they contain
    # the trial values and results at each step of the MCO (see
    # `scripts/`)
    trials = List()
    results = List()

    traits_view = View([Item('locally_optimize'),
                        Item('sigma_star'),
                        Item('num_cells'),
                        Item('dimension'),
                        Item('cuba_design_space_type'),
                        Item('cuba_potential_type')])

    def __init__(self, *args, **kwargs):
        super(EggboxPESDataSourceModel, self).__init__(*args, **kwargs)
        self._set_basin_positions()
        self._randomise_model_data()

    @on_trait_change('sigma_star,basin_positions')
    def _randomise_model_data(self):
        """ Assign random depths to defined basins, with variance
        controlled by self.sigma_star.

        """
        self.basin_depths = ((self.sigma_star *
                              np.random.rand(len(self.basin_positions)))
                             .tolist())

    @on_trait_change('num_cells,dimension')
    def _set_basin_positions(self):
        """ Construct the array of basin positions for the given lattice.
        The square lattice is the only one implemented in this example.

        """
        self._set_basin_positions_square_lattice()

    def _set_basin_positions_square_lattice(self):
        """ Set the basin positions to a square lattice from 0 -> 1. """
        grids = self.dimension * [np.linspace(0, 1,
                                              num=self.num_cells,
                                              endpoint=False)]

        self.basin_positions = ((np.asarray(np.meshgrid(*grids))
                                 .reshape(self.dimension, -1).T)
                                .tolist())
Beispiel #14
0
class MCOModel(BaseMCOModel):

    num_points = PositiveInt(7)

    def default_traits_view(self):
        return View(Item('num_points'))