예제 #1
0
 def __call__(self, data: D, inplace: bool = False) -> D:
     """Creates a new Data instance with bounds"""
     new = data if inplace else data.copy()
     # if not utils.BoundChecker(*self.bounds)(new.value):
     #     raise errors.NevergradValueError("Current value is not within bounds, please update it first")
     value = new.value
     new.add_layer(self.copy())
     try:
         new.value = value
     except ValueError as e:
         raise errors.NevergradValueError(
             "Current value is not within bounds, please update it first"
         ) from e
     if all(x is not None for x in self.bounds):
         tests = [data.copy() for _ in range(2)
                  ]  # TODO make it simpler and more efficient?
         with warnings.catch_warnings():
             warnings.simplefilter("ignore", category=RuntimeWarning)
             for test, bound in zip(tests, self.bounds):
                 val = bound * np.ones(value.shape) if isinstance(
                     value, np.ndarray) else bound[0]  # type: ignore
                 test.value = val
         state = tests[0].get_standardized_data(reference=tests[1])
         min_dist = np.min(np.abs(state))
         if min_dist < 3.0:
             warnings.warn(
                 f"Bounds are {min_dist} sigma away from each other at the closest, "
                 "you should aim for at least 3 for better quality.")
     return new
예제 #2
0
    def __init__(
        self,
        lower: tp.BoundValue = None,
        upper: tp.BoundValue = None,
        method: str = "bouncing",
        uniform_sampling: tp.Optional[bool] = None,
    ) -> None:
        """Bounds all real values into [lower, upper] using a provided method

        See Parameter.set_bounds
        """
        super().__init__(lower=lower,
                         upper=upper,
                         uniform_sampling=uniform_sampling)
        # update instance
        transforms = dict(
            clipping=trans.Clipping,
            arctan=trans.ArctanBound,
            tanh=trans.TanhBound,
            gaussian=trans.CumulativeDensity,
        )
        transforms["bouncing"] = functools.partial(trans.Clipping,
                                                   bounce=True)  # type: ignore
        if method not in transforms:
            raise errors.NevergradValueError(
                f"Unknown method {method}, available are: {transforms.keys()}\nSee docstring for more help."
            )
        self._method = method
        self._transform = transforms[method](*self.bounds)
        self.set_name(self._transform.name)
예제 #3
0
 def __init__(self, base: float) -> None:
     super().__init__(base)
     if base <= 0:
         raise errors.NevergradValueError(
             "Exponent must be strictly positive")
     self._base = base
     self._name = f"exp={base}"
예제 #4
0
    def set_mutation(
        self: D,
        sigma: tp.Optional[tp.Union[float, core.Parameter]] = None,
        exponent: tp.Optional[float] = None,
        custom: tp.Optional[tp.Union[str, core.Parameter]] = None,
    ) -> D:
        """Output will be cast to integer(s) through deterministic rounding.

        Parameters
        ----------
        sigma: Array/Log or float
            The standard deviation of the mutation. If a Parameter is provided, it will replace the current
            value. If a float is provided, it will either replace a previous float value, or update the value
            of the Parameter.
        exponent: float
            exponent for the logarithmic mode. With the default sigma=1, using exponent=2 will perform
            x2 or /2 "on average" on the value at each mutation.
        custom: str or Parameter
            custom mutation which can be a string ("gaussian" or "cauchy")
            or Mutation/Recombination like object
            or a Parameter which value is either of those

        Returns
        -------
        self
        """
        if sigma is not None:
            # just replace if an actual Parameter is provided as sigma, else update value (parametrized or not)
            if isinstance(sigma, core.Parameter) or isinstance(
                    self.parameters._content["sigma"], core.Constant):
                self.parameters._content["sigma"] = core.as_parameter(sigma)
            else:
                self.sigma.value = sigma  # type: ignore
        if exponent is not None:
            from . import _datalayers

            if exponent <= 0.0:
                raise ValueError(
                    "Only exponents strictly higher than 0.0 are allowed")
            value = self.value
            layer = _datalayers.Exponent(base=exponent)
            layer._LEGACY = True
            self.add_layer(layer)
            _fix_legacy(self)
            try:
                self.value = value
            except ValueError as e:
                raise errors.NevergradValueError(
                    "Cannot convert to logarithmic mode with current non-positive value, please update it firstp."
                ) from e
        if custom is not None:
            self.parameters._content["mutation"] = core.as_parameter(custom)
        return self
예제 #5
0
    def __init__(
        self,
        lower: tp.BoundValue = None,
        upper: tp.BoundValue = None,
        uniform_sampling: tp.Optional[bool] = None,
    ) -> None:
        """Bounds all real values into [lower, upper]

        Parameters
        ----------
        lower: float or None
            minimum value
        upper: float or None
            maximum value
        method: str
            One of the following choices:
        uniform_sampling: Optional bool
            Changes the default behavior of the "sample" method (aka creating a child and mutating it from the current instance)
            or the sampling optimizers, to creating a child with a value sampled uniformly (or log-uniformly) within
            the while range of the bounds. The "sample" method is used by some algorithms to create an initial population.
            This is activated by default if both bounds are provided.
        """  # TODO improve description of methods
        super().__init__(lower, upper, uniform_sampling)
        self.bounds: tp.Tuple[tp.Optional[np.ndarray],
                              tp.Optional[np.ndarray]] = tuple(  # type: ignore
                                  a if isinstance(a, np.ndarray) or a is None
                                  else np.array([a], dtype=float)
                                  for a in (lower, upper))
        both_bounds = all(b is not None for b in self.bounds)
        self.uniform_sampling: bool = uniform_sampling  # type: ignore
        if uniform_sampling is None:
            self.uniform_sampling = both_bounds
        if self.uniform_sampling and not both_bounds:
            raise errors.NevergradValueError(
                "Cannot use full range sampling if both bounds are not set")
        if not (lower is None or upper is None):
            if (self.bounds[0] >= self.bounds[1]).any():  # type: ignore
                raise errors.NevergradValueError(
                    f"Lower bounds {lower} should be strictly smaller than upper bounds {upper}"
                )
예제 #6
0
 def _call_deeper(self, name: str, *args: tp.Any, **kwargs: tp.Any) -> tp.Any:
     if self._layers[self._layer_index] is not self:
         layers = [f"{lay.name}({lay._layer_index})" for lay in self._layers]
         raise errors.NevergradRuntimeError(
             "Layer indexing has changed for an unknown reason. Please open an issue:\n"
             f"Caller at index {self._layer_index}: {self.name}"
             f"Layers: {layers}.\n"
         )
     if not name.startswith("_layered_"):
         raise errors.NevergradValueError("For consistency, only _layered functions can be used.")
     for layer in reversed(self._layers[: self._layer_index]):
         func = getattr(layer, name)
         if func.__func__ is not getattr(Layered, name):  # skip unecessary stack calls
             return func(*args, **kwargs)
     types = [type(x) for x in self._layers]
     raise errors.NevergradNotImplementedError(f"No implementation for {name} on layers: {types}.")
예제 #7
0
    def set_bounds(
        self: D,
        lower: tp.BoundValue = None,
        upper: tp.BoundValue = None,
        method: str = "bouncing",
        full_range_sampling: tp.Optional[bool] = None,
    ) -> D:
        """Bounds all real values into [lower, upper] using a provided method

        Parameters
        ----------
        lower: float or None
            minimum value
        upper: float or None
            maximum value
        method: str
            One of the following choices:

            - "bouncing": bounce on border (at most once). This is a variant of clipping,
               avoiding bounds over-samping (default).
            - "clipping": clips the values inside the bounds. This is efficient but leads
              to over-sampling on the bounds.
            - "constraint": adds a constraint (see register_cheap_constraint) which leads to rejecting mutations
              reaching beyond the bounds. This avoids oversampling the boundaries, but can be inefficient in large
              dimension.
            - "arctan": maps the space [lower, upper] to to all [-inf, inf] using arctan transform. This is efficient
              but it completely reshapes the space (a mutation in the center of the space will be larger than a mutation
              close to the bounds), and reaching the bounds is equivalent to reaching the infinity.
            - "tanh": same as "arctan", but with a "tanh" transform. "tanh" saturating much faster than "arctan", it can lead
              to unexpected behaviors.
        full_range_sampling: Optional bool
            Changes the default behavior of the "sample" method (aka creating a child and mutating it from the current instance)
            or the sampling optimizers, to creating a child with a value sampled uniformly (or log-uniformly) within
            the while range of the bounds. The "sample" method is used by some algorithms to create an initial population.
            This is activated by default if both bounds are provided.

        Notes
        -----
        - "tanh" reaches the boundaries really quickly, while "arctan" is much softer
        - only "clipping" accepts partial bounds (None values)
        """  # TODO improve description of methods
        from . import _datalayers

        # if method == "constraint":
        #     method = "clipping"
        value = self.value
        if method == "constraint":
            layer = _datalayers.BoundLayer(
                lower=lower, upper=upper, uniform_sampling=full_range_sampling)
            checker = utils.BoundChecker(*layer.bounds)
            self.register_cheap_constraint(checker)
        else:
            layer = _datalayers.Bound(lower=lower,
                                      upper=upper,
                                      method=method,
                                      uniform_sampling=full_range_sampling)
        layer._LEGACY = True
        layer(self, inplace=True)
        _fix_legacy(self)
        try:
            self.value = value
        except ValueError as e:
            raise errors.NevergradValueError(
                "Current value is not within bounds, please update it first"
            ) from e
        return self
예제 #8
0
 def _check(self, value: np.ndarray) -> None:
     if not utils.BoundChecker(*self.bounds)(value):
         raise errors.NevergradValueError(
             "New value does not comply with bounds")