Exemple #1
0
 def __init__(
     self,
     independent_sigma: bool = True,
     mutable_sigma: bool = True,
     multiobjective: bool = False,
     recombination: str = "crossover",
     optimum: tp.Tuple[int, int] = (80, 100)
 ) -> None:
     assert recombination in ("crossover", "average")
     self._optimum = np.array(optimum, dtype=float)
     parametrization = p.Array(shape=(2, ), mutable_sigma=mutable_sigma)
     init = np.array([1.0, 1.0] if independent_sigma else [1.0],
                     dtype=float)
     sigma = (p.Array(init=init).set_mutation(
         exponent=2.0) if mutable_sigma else p.Constant(init))
     parametrization.set_mutation(sigma=sigma)
     parametrization.set_recombination(
         "average" if recombination ==
         "average" else p.mutation.Crossover())
     self._multiobjective = MultiobjectiveFunction(self._multifunc,
                                                   2 * self._optimum)
     super().__init__(
         self._multiobjective if multiobjective else self._monofunc,
         parametrization.set_name(""))  # type: ignore
     descr = dict(independent_sigma=independent_sigma,
                  mutable_sigma=mutable_sigma,
                  multiobjective=multiobjective,
                  optimum=optimum,
                  recombination=recombination)
     self._descriptors.update(descr)
     self.register_initialization(**descr)
Exemple #2
0
def test_bound_scaler() -> None:
    ref = p.Instrumentation(
        p.Array(shape=(1, 2)).set_bounds(-12, 12, method="arctan"),
        p.Array(shape=(2, )).set_bounds(-12, 12, full_range_sampling=False),
        lr=p.Log(lower=0.001, upper=1000),
        stuff=p.Scalar(lower=-1, upper=2),
        unbounded=p.Scalar(lower=-1, init=0.0),
        value=p.Scalar(),
        letter=p.Choice("abc"),
    )
    param = ref.spawn_child()
    scaler = utils.BoundScaler(param)
    output = scaler.transform([1.0] * param.dimension, lambda x: x)
    param.set_standardized_data(output)
    (array1, array2), values = param.value
    np.testing.assert_array_almost_equal(array1, [[12, 12]])
    np.testing.assert_array_almost_equal(array2, [1, 1])
    assert values["stuff"] == 2
    assert values["unbounded"] == 1
    assert values["value"] == 1
    np.testing.assert_almost_equal(values["lr"], 1000)
    # again, on the middle point
    output = scaler.transform([0] * param.dimension, lambda x: x)
    param.set_standardized_data(output)
    np.testing.assert_almost_equal(param.value[1]["lr"], 1.0)
    np.testing.assert_almost_equal(param.value[1]["stuff"], 0.5)
Exemple #3
0
def test_bound_scaler() -> None:
    ref = p.Instrumentation(
        p.Array(shape=(1, 2)).set_bounds(-12, 12, method="arctan"),
        p.Array(shape=(2, )).set_bounds(-12, 12, full_range_sampling=False),
        lr=p.Log(lower=0.001, upper=1000),
        stuff=p.Scalar(lower=-1, upper=2),
        unbounded=p.Scalar(lower=-1, init=0.0),
        value=p.Scalar(),
        letter=p.Choice("abc"),
    )
    # make sure the order is preserved using legacy split method
    expected = [x[1] for x in split_as_data_parameters(ref)]
    assert p.helpers.list_data(ref) == expected
    # check the bounds
    param = ref.spawn_child()
    scaler = utils.BoundScaler(param)
    output = scaler.transform([1.0] * param.dimension, lambda x: x)
    param.set_standardized_data(output)
    (array1, array2), values = param.value
    np.testing.assert_array_almost_equal(array1, [[12, 12]])
    np.testing.assert_array_almost_equal(array2, [1, 1])
    assert values["stuff"] == 2
    assert values["unbounded"] == 1
    assert values["value"] == 1
    assert values["lr"] == pytest.approx(1000)
    # again, on the middle point
    output = scaler.transform([0] * param.dimension, lambda x: x)
    param.set_standardized_data(output)
    assert param.value[1]["lr"] == pytest.approx(1.0)
    assert param.value[1]["stuff"] == pytest.approx(0.5)
Exemple #4
0
def _make_parametrization(name: str,
                          dimension: int,
                          bounding_method: str = "bouncing",
                          rolling: bool = False) -> p.Array:
    """Creates appropriate parametrization for a Photonics problem

    Parameters
    name: str
        problem name, among bragg, chirped and morpho
    dimension: int
        size of the problem among 16, 40 and 60 (morpho) or 80 (bragg and chirped)
    bounding_method: str
        transform type for the bounding ("arctan", "tanh", "bouncing" or "clipping"see `Array.bounded`)

    Returns
    -------
    Instrumentation
        the parametrization for the problem
    """
    if name == "bragg":
        shape = (2, dimension // 2)
        bounds = [(2, 3), (30, 180)]
    elif name == "chirped":
        shape = (1, dimension)
        bounds = [(30, 180)]
    elif name == "morpho":
        shape = (4, dimension // 4)
        bounds = [(0, 300), (0, 600), (30, 600), (0, 300)]
    else:
        raise NotImplementedError(f"Transform for {name} is not implemented")
    divisor = max(2, len(bounds))
    assert not dimension % divisor, f"points length should be a multiple of {divisor}, got {dimension}"
    assert shape[0] * shape[
        1] == dimension, f"Cannot work with dimension {dimension} for {name}: not divisible by {shape[0]}."
    b_array = np.array(bounds)
    assert b_array.shape[0] == shape[0]  # pylint: disable=unsubscriptable-object
    init = np.sum(b_array, axis=1, keepdims=True).dot(np.ones((
        1,
        shape[1],
    ))) / 2
    array = p.Array(init=init)
    if bounding_method not in ("arctan", "tanh"):
        # sigma must be adapted for clipping and constraint methods
        sigma = p.Array(init=[[10.0]] if name != "bragg" else [[0.03], [10.0]]
                        ).set_mutation(exponent=2.0)  # type: ignore
        array.set_mutation(sigma=sigma)
    if rolling:
        array.set_mutation(custom=p.Choice(
            ["gaussian", "cauchy",
             p.mutation.Translation(axis=1)]))
    array.set_bounds(b_array[:, [0]],
                     b_array[:, [1]],
                     method=bounding_method,
                     full_range_sampling=True)
    array.set_recombination(p.mutation.Crossover(axis=1)).set_name("")
    assert array.dimension == dimension, f"Unexpected {array} for dimension {dimension}"
    return array
Exemple #5
0
def test_experiment_function() -> None:
    param = p.Instrumentation(
        p.Choice([1, 12]),
        "constant",
        p.Array(shape=(2, 2)),
        constkwarg="blublu",
        plop=p.Choice([3, 4]),
    )
    with pytest.raises(RuntimeError):
        base.ExperimentFunction(_arg_return, param)
    param.set_name("myparam")
    ifunc = base.ExperimentFunction(_arg_return, param)
    np.testing.assert_equal(ifunc.dimension, 8)
    data = [-100.0, 100, 1, 2, 3, 4, 100, -100]
    args0, kwargs0 = ifunc.parametrization.spawn_child().set_standardized_data(
        data).value
    output: tp.Any = ifunc(*args0, **kwargs0)
    args: tp.Any = output[0]
    kwargs: tp.Any = output[1]
    testing.printed_assert_equal(args, [12, "constant", [[1, 2], [3, 4]]])
    testing.printed_assert_equal(kwargs, {"constkwarg": "blublu", "plop": 3})
    testing.printed_assert_equal(
        ifunc.descriptors,
        {
            "dimension": 8,
            "name": "_arg_return",
            "function_class": "ExperimentFunction",
            "parametrization": "myparam"
        },
    )
Exemple #6
0
 def __init__(
         self,
         names: tp.Tuple[str, ...] = ("sphere", "cigar", "ellipsoid"),
         dimensions: tp.Tuple[int, ...] = (7, 7, 7),
         num_workers: int = 10,
 ):
     for name in names:
         if name not in corefuncs.registry:
             available = ", ".join(sorted(corefuncs.registry))
             raise ValueError(
                 f'Unknown core function "{name}" in PBT. Available names are:\n-----\n{available}'
             )
     self._funcs = [corefuncs.registry[name] for name in names]
     self._optima = [np.random.normal(size=d) for d in dimensions]
     assert len(names) == len(dimensions)
     self._hyperparameter_dimension = len(names)
     self._dimensions = dimensions
     self._total_dimension = sum(dimensions)
     parametrization = p.Array(
         shape=(self._hyperparameter_dimension, )).set_name("")
     # Population of checkpoints (that are optimized by the underlying optimization method)
     # and parameters (that we do optimize).
     self._population_checkpoints: tp.List[
         np.ndarray] = [np.zeros(self._total_dimension)] * num_workers
     self._population_parameters: tp.List[np.ndarray] = [
         np.zeros(self._hyperparameter_dimension)
     ] * num_workers
     self._population_fitness: tp.List[float] = [float("inf")] * num_workers
     super().__init__(self._func, parametrization)
Exemple #7
0
def test_experiment_function() -> None:
    ifunc = base.ExperimentFunction(
        _arg_return,
        p.Instrumentation(  # type: ignore
            p.Choice([1, 12]),
            "constant",
            p.Array(shape=(2, 2)),
            constkwarg="blublu",
            plop=p.Choice([3, 4]),
        ))
    np.testing.assert_equal(ifunc.dimension, 8)
    data = [-100.0, 100, 1, 2, 3, 4, 100, -100]
    args0, kwargs0 = ifunc.parametrization.spawn_child().set_standardized_data(
        data).value
    output = ifunc(
        *args0, **kwargs0
    )  # this is very stupid and should be removed when Parameter is in use
    args: tp.Any = output[0]  # type: ignore
    kwargs: tp.Any = output[1]  # type: ignore
    testing.printed_assert_equal(args, [12, "constant", [[1, 2], [3, 4]]])
    testing.printed_assert_equal(kwargs, {"constkwarg": "blublu", "plop": 3})
    instru_str = ("Instrumentation(Tuple(Choice(choices=Tuple(1,12),"
                  "weights=Array{(1,2)}),constant,"
                  "Array{(2,2)}),"
                  "Dict(constkwarg=blublu,plop=Choice(choices=Tuple(3,4),"
                  "weights=Array{(1,2)})))")
    testing.printed_assert_equal(
        ifunc.descriptors,
        {
            "dimension": 8,
            "name": "_arg_return",
            "function_class": "ExperimentFunction",
            "parametrization": instru_str,
        },
    )
Exemple #8
0
 def __init__(self, proximity_array: np.ndarray) -> None:
     self._proximity = proximity_array
     self._proximity_2 = self._proximity ** 2
     self._proximity_2[
         self._proximity_2 == 0
     ] = 1  # avoid ZeroDivision (for diagonal terms, or identical points)
     super().__init__(self._compute_distance, p.Array(shape=(self._proximity.shape[0], 2)))
Exemple #9
0
 def __init__(self, points: np.ndarray, num_clusters: int, rescale: bool = True) -> None:
     self.num_clusters = num_clusters
     self._points = np.array(points, copy=True)
     if rescale:
         self._points -= np.mean(self._points, axis=0, keepdims=True)
         self._points /= np.std(self._points, axis=0, keepdims=True)
     super().__init__(self._compute_distance, p.Array(shape=(num_clusters, points.shape[1])))
Exemple #10
0
 def __init__(self, x: np.ndarray, y: np.ndarray) -> None:
     assert x.ndim == 1
     assert y.ndim == 1
     self._x = x
     self._y = y
     super().__init__(self._compute_loss, p.Array(shape=(10,)))
     self.register_initialization(x=x, y=y)
 def __init__(
     self,
     independent_sigma: bool = True,
     mutable_sigma: bool = True,
     multiobjective: bool = False,
     recombination: str = "crossover",
     optimum: tp.Tuple[int, int] = (80, 100),
 ) -> None:
     assert recombination in ("crossover", "average")
     self._optimum = np.array(optimum, dtype=float)
     parametrization = p.Array(shape=(2,), mutable_sigma=mutable_sigma)
     init = np.array([1.0, 1.0] if independent_sigma else [1.0], dtype=float)
     sigma = p.Array(init=init).set_mutation(exponent=2.0) if mutable_sigma else p.Constant(init)
     parametrization.set_mutation(sigma=sigma)
     parametrization.set_recombination("average" if recombination == "average" else p.mutation.Crossover())
     self.multiobjective_upper_bounds = np.array(2 * self._optimum) if multiobjective else None
     super().__init__(self._multifunc if multiobjective else self._monofunc, parametrization.set_name(""))  # type: ignore
Exemple #12
0
 def __init__(self,
              dimension: int = 500,
              complex_tsp: bool = False) -> None:
     super().__init__(self._simulate_stsp, p.Array(shape=(dimension, )))
     self.order = np.arange(0, self.dimension)
     self.complex = complex_tsp
     self.x = self.parametrization.random_state.normal(size=self.dimension)
     self.y = self.parametrization.random_state.normal(size=self.dimension)
Exemple #13
0
    def __init__(self,
                 parametrization: IntOrParameter,
                 budget: tp.Optional[int] = None,
                 num_workers: int = 1) -> None:
        if self.no_parallelization and num_workers > 1:
            raise ValueError(
                f"{self.__class__.__name__} does not support parallelization")
        # "seedable" random state: externally setting the seed will provide deterministic behavior
        # you can also replace or reinitialize this random state
        self.num_workers = int(num_workers)
        self.budget = budget

        # How do we deal with cheap constraints i.e. constraints which are fast and use low resources and easy ?
        # True ==> we penalize them (infinite values for candidates which violate the constraint).
        # False ==> we repeat the ask until we solve the problem.
        self._constraints_manager = utils.ConstraintManager()
        self._penalize_cheap_violations = False

        self.parametrization = (parametrization if
                                not isinstance(parametrization,
                                               (int, np.int)) else p.Array(
                                                   shape=(parametrization, )))
        self.parametrization.freeze()  # avoids issues!
        if not self.dimension:
            raise ValueError(
                "No variable to optimize in this parametrization.")
        self.name = self.__class__.__name__  # printed name in repr
        # keep a record of evaluations, and current bests which are updated at each new evaluation
        self.archive: utils.Archive[utils.MultiValue] = utils.Archive(
        )  # dict like structure taking np.ndarray as keys and Value as values
        self.current_bests = {
            x: utils.MultiValue(self.parametrization,
                                np.inf,
                                reference=self.parametrization)
            for x in ["optimistic", "pessimistic", "average", "minimum"]
        }
        # pruning function, called at each "tell"
        # this can be desactivated or modified by each implementation
        self.pruning: tp.Optional[
            _PruningCallable] = utils.Pruning.sensible_default(
                num_workers=num_workers,
                dimension=self.parametrization.dimension)
        # multiobjective
        self._MULTIOBJECTIVE_AUTO_BOUND = mobj.AUTO_BOUND
        self._hypervolume_pareto: tp.Optional[mobj.HypervolumePareto] = None
        # instance state
        self._asked: tp.Set[str] = set()
        self._num_objectives = 0
        self._suggestions: tp.Deque[p.Parameter] = deque()
        self._num_ask = 0
        self._num_tell = 0  # increases after each successful tell
        self._num_tell_not_asked = 0
        self._callbacks: tp.Dict[str, tp.List[tp.Any]] = {}
        # to make optimize function stoppable halway through
        self._running_jobs: tp.List[tp.Tuple[p.Parameter,
                                             tp.JobLike[tp.Loss]]] = []
        self._finished_jobs: tp.Deque[tp.Tuple[p.Parameter,
                                               tp.JobLike[tp.Loss]]] = deque()
Exemple #14
0
 def __init__(self,
              num_rollouts: int,
              random_state: tp.Optional[int] = None) -> None:
     super().__init__(self._simulate, p.Array(shape=self.policy_dim))
     self.num_rollouts = num_rollouts
     self.random_state = random_state
     self._descriptors.pop(
         "random_state",
         None)  # remove it from automatically added descriptors
Exemple #15
0
def test_instrumented_function_kwarg_order() -> None:
    ifunc = base.ExperimentFunction(_arg_return, p.Instrumentation(  # type: ignore
        kw4=p.Choice([1, 0]), kw2="constant", kw3=p.Array(shape=(2, 2)), kw1=p.Scalar(2.0).set_mutation(sigma=2.0)
    ))
    np.testing.assert_equal(ifunc.dimension, 7)
    data = np.array([-1, 1, 2, 3, 4, 100, -100])
    args0, kwargs0 = ifunc.parametrization.spawn_child().set_standardized_data(data).value
    # this is very stupid and should be removed when Parameter is in use
    kwargs: tp.Any = ifunc(*args0, **kwargs0)[1]   # type: ignore
    testing.printed_assert_equal(kwargs, {"kw1": 0, "kw2": "constant", "kw3": [[1, 2], [3, 4]], "kw4": 1})
Exemple #16
0
 def __init__(self, game: str = "war") -> None:
     self.game = game
     self.game_object = _Game()
     dimension = (self.game_object.play_game(self.game) * 2
                  )  # times 2 because we consider both players separately.
     super().__init__(self._simulate_game, p.Array(shape=(dimension, )))
     self.parametrization.descriptors.deterministic_function = False
     self.parametrization.descriptors.metrizable = game not in [
         "war", "batawaf"
     ]
Exemple #17
0
 def __init__(self, module: nn.Module,
              deterministic: bool = True,
              instrumentation_std: float = 0.1) -> None:
     super().__init__()
     self.deterministic = deterministic
     self.module = module
     kwargs = {
         name: p.Array(shape=value.shape).set_mutation(sigma=instrumentation_std).set_bounds(-10, 10, method="arctan")
         for name, value in module.state_dict().items()  # type: ignore
     }  # bounded to avoid overflows
     self.instrumentation = p.Instrumentation(**kwargs)
def test_oracle() -> None:
    func = functionlib.ArtificialFunction("sphere", 5, noise_level=0.1)
    x = np.array([1, 2, 1, 0, 0.5])
    y1 = func(x)  # returns a float
    y2 = func(x)  # returns a different float since the function is noisy
    np.testing.assert_raises(AssertionError,
                             np.testing.assert_array_almost_equal, y1, y2)
    reco = p.Array(init=x)
    y3 = func.evaluation_function(reco)  # returns a float
    # returns the same float (no noise for oracles + sphere function is deterministic)
    y4 = func.evaluation_function(reco)
    np.testing.assert_array_almost_equal(y3, y4)  # should be equal
Exemple #19
0
def _make_instrumentation(name: str,
                          dimension: int,
                          transform: str = "tanh") -> p.Instrumentation:
    """Creates appropriate instrumentation for a Photonics problem

    Parameters
    name: str
        problem name, among bragg, chirped and morpho
    dimension: int
        size of the problem among 16, 40 and 60 (morpho) or 80 (bragg and chirped)
    transform: str
        transform type for the bounding ("arctan", "tanh" or "clipping", see `Array.bounded`)

    Returns
    -------
    Instrumentation
        the instrumentation for the problem
    """
    assert not dimension % 4, f"points length should be a multiple of 4, got {dimension}"
    n = dimension // 4
    arrays: tp.List[p.Array] = []
    ones = np.ones((n, ), dtype=float)
    if name == "bragg":
        # n multiple of 2, from 16 to 80
        # main (n=60): [2,3]^30 x [0,300]^30
        arrays.extend([
            p.Array(init=2.5 * ones).set_bounds(2, 3, method=transform)
            for _ in range(2)
        ])
        arrays.extend([
            p.Array(init=150 * ones).set_bounds(0, 300, method=transform)
            for _ in range(2)
        ])
    elif name == "chirped":
        # n multiple of 2, from 10 to 80
        # domain (n=60): [0,300]^60
        arrays = [
            p.Array(init=150 * ones).set_bounds(0, 300, method=transform)
            for _ in range(4)
        ]
    elif name == "morpho":
        # n multiple of 4, from 16 to 60
        # domain (n=60): [0,300]^15 x [0,600]^15 x [30,600]^15 x [0,300]^15
        arrays.extend([
            p.Array(init=150 * ones).set_bounds(0, 300, method=transform),
            p.Array(init=300 * ones).set_bounds(0, 600, method=transform),
            p.Array(init=315 * ones).set_bounds(30, 600, method=transform),
            p.Array(init=150 * ones).set_bounds(0, 300, method=transform)
        ])
    else:
        raise NotImplementedError(f"Transform for {name} is not implemented")
    instrumentation = p.Instrumentation(*arrays)
    assert instrumentation.dimension == dimension
    return instrumentation
Exemple #20
0
    def __init__(
        self,
        num_dams: int = 13,
        depth: int = 3,
        width: int = 3,
        year_to_day_ratio: float = 2.,
        constant_to_year_ratio: float = 1.,
        back_to_normal: float = 0.5,
        consumption_noise: float = 0.1,
        num_thermal_plants: int = 7,
        num_years: int = 1,
        failure_cost: float = 500.,
    ) -> None:
        params = {
            x: y
            for x, y in locals().items() if x not in ["self", "__class__"]
        }  # for copying
        self.num_dams = num_dams
        self.losses: tp.List[float] = []
        self.marginal_costs: tp.List[float] = []
        # Parameters describing the problem.
        self.year_to_day_ratio = year_to_day_ratio
        self.constant_to_year_ratio = constant_to_year_ratio
        self.back_to_normal = back_to_normal
        self.consumption_noise = consumption_noise
        self.num_thermal_plants = num_thermal_plants
        self.number_of_years = num_years
        self.failure_cost = failure_cost
        self.hydro_prod_per_time_step: tp.List[tp.Any] = [
        ]  # TODO @oteytaud initial values?
        self.consumption_per_time_step: tp.List[tp.Any] = []

        self.average_consumption = self.constant_to_year_ratio * self.year_to_day_ratio
        self.thermal_power_capacity = self.average_consumption * np.random.rand(
            self.num_thermal_plants)
        self.thermal_power_prices = np.random.rand(num_thermal_plants)
        dam_agents: tp.List[tp.Any] = []
        for _ in range(num_dams):
            dam_agents += [
                Agent(10 + num_dams + 2 * self.num_thermal_plants, depth,
                      width)
            ]
        # dimension = int(sum([a.dimension for a in dam_agents]))
        parameter = p.Instrumentation(
            *[p.Array(shape=(int(a.dimension), ))
              for a in dam_agents]).set_name("")
        super().__init__(self._simulate_power_system, parameter)
        self.parametrization.descriptors.deterministic_function = False
        self.register_initialization(**params)
        self.dam_agents = dam_agents
        self._descriptors.update(num_dams=num_dams, depth=depth, width=width)
Exemple #21
0
 def __init__(self,
              instrumentation: IntOrParameter,
              budget: Optional[int] = None,
              num_workers: int = 1) -> None:
     if self.no_parallelization and num_workers > 1:
         raise ValueError(
             f"{self.__class__.__name__} does not support parallelization")
     # "seedable" random state: externally setting the seed will provide deterministic behavior
     # you can also replace or reinitialize this random state
     self.num_workers = int(num_workers)
     self.budget = budget
     # How do we deal with cheap constraints i.e. constraints which are fast and use low resources and easy ?
     # True ==> we penalize them (infinite values for candidates which violate the constraint).
     # False ==> we repeat the ask until we solve the problem.
     self._penalize_cheap_violations = False
     self.instrumentation = (instrumentation if
                             not isinstance(instrumentation,
                                            (int, np.int)) else p.Array(
                                                shape=(instrumentation, )))
     self.instrumentation.freeze()  # avoids issues!
     if not self.dimension:
         raise ValueError(
             "No variable to optimize in this instrumentation.")
     self.create_candidate = CandidateMaker()
     self.name = self.__class__.__name__  # printed name in repr
     # keep a record of evaluations, and current bests which are updated at each new evaluation
     self.archive: utils.Archive[utils.Value] = utils.Archive(
     )  # dict like structure taking np.ndarray as keys and Value as values
     self.current_bests = {
         x: utils.Point(np.zeros(self.dimension, dtype=np.float),
                        utils.Value(np.inf))
         for x in ["optimistic", "pessimistic", "average"]
     }
     # pruning function, called at each "tell"
     # this can be desactivated or modified by each implementation
     self.pruning: Optional[Callable[
         [utils.Archive[utils.Value]],
         utils.Archive[utils.Value]]] = utils.Pruning.sensible_default(
             num_workers=num_workers,
             dimension=self.instrumentation.dimension)
     # instance state
     self._asked: Set[str] = set()
     self._suggestions: Deque[p.Parameter] = deque()
     self._num_ask = 0
     self._num_tell = 0
     self._num_tell_not_asked = 0
     self._callbacks: Dict[str, List[Any]] = {}
     # to make optimize function stoppable halway through
     self._running_jobs: List[Tuple[p.Parameter, JobLike[float]]] = []
     self._finished_jobs: Deque[Tuple[p.Parameter,
                                      JobLike[float]]] = deque()
Exemple #22
0
 def __init__(
     self,
     num_rollouts: int,
     activation: str = "tanh",
     intermediate_layer_dim: tp.Optional[tuple] = None,
     deterministic_sim: bool = True,
     noise_level: float = 0.0,
     states_normalization: bool = True,
     layer_rescaling_coef: tp.Optional[tuple] = None,
     random_state: tp.Optional[int] = None,
 ) -> None:
     if intermediate_layer_dim is not None:
         self.policy_dim = (
             self.policy_dim[0], ) + intermediate_layer_dim + (
                 self.policy_dim[1], )  # type: ignore
     list_parametrizations = [
         p.Array(shape=(a, b)).set_name(r"layer_{a}_{b}")
         for a, b in zip(self.policy_dim[:-1], self.policy_dim[1:])
     ]
     parametrization: p.Tuple = p.Tuple(*list_parametrizations).set_name(
         self.env_name)
     super().__init__(self._simulate, parametrization)
     self.num_rollouts = num_rollouts
     self.random_state = random_state
     self.activation = activation
     self.states_normalization = states_normalization
     self.noise_level = noise_level
     self.deterministic_sim = deterministic_sim
     self.layer_rescaling_coef = layer_rescaling_coef
     if layer_rescaling_coef is None:
         self.layer_rescaling_coef = np.ones(len(self.policy_dim) -
                                             1)  # type: ignore
     self.add_descriptors(
         num_rollouts=num_rollouts,
         intermediate_layer_dim=intermediate_layer_dim,
         activation=activation,
         states_normalization=states_normalization,
         noise_level=self.noise_level,
         deterministic_sim=deterministic_sim,
     )
     if self.noise_level > 0.0 or not deterministic_sim:
         self.parametrization.descriptors.deterministic_function = False
     self._descriptors.pop(
         "random_state",
         None)  # remove it from automatically added descriptors
Exemple #23
0
 def __init__(self, name: str, block_dimension: int, num_blocks: int = 1,  # pylint: disable=too-many-arguments
              useless_variables: int = 0, noise_level: float = 0, noise_dissymmetry: bool = False,
              rotation: bool = False, translation_factor: float = 1., hashing: bool = False,
              aggregator: str = "max") -> None:
     # pylint: disable=too-many-locals
     self.name = name
     self._parameters = {x: y for x, y in locals().items() if x not in ["__class__", "self"]}
     # basic checks
     assert noise_level >= 0, "Noise level must be greater or equal to 0"
     if not all(isinstance(x, bool) for x in [noise_dissymmetry, hashing, rotation]):
         raise TypeError("hashing and rotation should be bools")
     for param, mini in [("block_dimension", 1), ("num_blocks", 1), ("useless_variables", 0)]:
         value = self._parameters[param]
         if not isinstance(value, int):
             raise TypeError(f'"{param}" must be an int')
         if value < mini:
             raise ValueError(f'"{param}" must be greater or equal to {mini}')
     if not isinstance(translation_factor, (float, int)):
         raise TypeError(f"Got non-float value {translation_factor}")
     if name not in corefuncs.registry:
         available = ", ".join(self.list_sorted_function_names())
         raise ValueError(f'Unknown core function "{name}". Available names are:\n-----\n{available}')
     # record necessary info and prepare transforms
     self._dimension = block_dimension * num_blocks + useless_variables
     self._func = corefuncs.registry[name]
     # special case
     info = corefuncs.registry.get_info(self._parameters["name"])
     only_index_transform = info.get("no_transform", False)
     # variable
     self.transform_var = ArtificialVariable(dimension=self._dimension, num_blocks=num_blocks, block_dimension=block_dimension,
                                             translation_factor=translation_factor, rotation=rotation, hashing=hashing,
                                             only_index_transform=only_index_transform)
     parametrization = p.Array(shape=(1,) if hashing else (self._dimension,)).set_name("")
     if noise_level > 0:
         parametrization.descriptors.deterministic_function = False
     super().__init__(self.noisy_function, parametrization)
     self.register_initialization(**self._parameters)
     self._aggregator = {"max": np.max, "mean": np.mean, "sum": np.sum}[aggregator]
     info = corefuncs.registry.get_info(self._parameters["name"])
     # add descriptors
     self._descriptors.update(**self._parameters, useful_dimensions=block_dimension * num_blocks,
                              discrete=any(x in name for x in ["onemax", "leadingones", "jump"]))
     # transforms are initialized at runtime to avoid slow init
     if hasattr(self._func, "get_postponing_delay"):
         raise RuntimeError('"get_posponing_delay" has been replaced by "compute_pseudotime" and has been  aggressively deprecated')
Exemple #24
0
    def __init__(
        self,
        num_dams: int = 13,
        depth: int = 3,
        width: int = 3,
        year_to_day_ratio: float = 2.0,
        constant_to_year_ratio: float = 1.0,
        back_to_normal: float = 0.5,
        consumption_noise: float = 0.1,
        num_thermal_plants: int = 7,
        num_years: float = 1.0,
        failure_cost: float = 500.0,
    ) -> None:
        self.num_dams = num_dams
        self.losses: tp.List[float] = []
        self.marginal_costs: tp.List[float] = []
        # Parameters describing the problem.
        self.year_to_day_ratio = year_to_day_ratio
        self.constant_to_year_ratio = constant_to_year_ratio
        self.back_to_normal = back_to_normal
        self.consumption_noise = consumption_noise
        self.num_thermal_plants = num_thermal_plants
        self.number_of_years = num_years
        self.failure_cost = failure_cost
        self.hydro_prod_per_time_step: tp.List[tp.Any] = [
        ]  # TODO @oteytaud initial values?
        self.consumption_per_time_step: tp.List[tp.Any] = []

        self.average_consumption = self.constant_to_year_ratio * self.year_to_day_ratio
        self.thermal_power_capacity = self.average_consumption * np.random.rand(
            self.num_thermal_plants)
        self.thermal_power_prices = np.random.rand(num_thermal_plants)
        self.dam_agents = [
            Agent(10 + num_dams + 2 * self.num_thermal_plants, depth, width)
            for _ in range(num_dams)
        ]
        parameter = p.Instrumentation(
            *[p.Array(shape=(int(a.dimension), ))
              for a in self.dam_agents]).set_name("")
        super().__init__(self._simulate_power_system, parameter)
        self.parametrization.descriptors.deterministic_function = False
Exemple #25
0
 def __init__(self, x: np.ndarray, y: np.ndarray) -> None:
     assert x.ndim == 1
     assert y.ndim == 1
     self._x = x
     self._y = y
     super().__init__(self._compute_loss, p.Array(shape=(10,)))
Exemple #26
0
        ).set_standardized_data(data, deterministic=True).value
        testing.printed_assert_equal(args, [0])
        testing.printed_assert_equal(kwargs, {"y": 0})
    arg_sum, kwarg_sum = 0, 0
    for _ in range(24):
        args, kwargs = ifunc.parametrization.spawn_child(
        ).set_standardized_data(data, deterministic=False).value
        arg_sum += args[0]
        kwarg_sum += kwargs["y"]
    assert arg_sum != 0
    assert kwarg_sum != 0


@testing.parametrized(
    floats=((p.Scalar(), p.Scalar(init=12.0)), True, False),
    array_int=((p.Scalar(), p.Array(shape=(1, )).set_integer_casting()), False,
               False),
    softmax_noisy=((p.Choice(["blue",
                              "red"]), p.Array(shape=(1, ))), True, True),
    softmax_deterministic=((p.Choice(["blue", "red"], deterministic=True),
                            p.Array(shape=(1, ))), False, False),
    ordered_discrete=((p.TransitionChoice([True, False]),
                       p.Array(shape=(1, ))), False, False),
)
def test_parametrization_continuous_noisy(variables: tp.Tuple[p.Parameter,
                                                              ...],
                                          continuous: bool,
                                          noisy: bool) -> None:
    instru = p.Instrumentation(*variables)
    assert instru.descriptors.continuous == continuous
    assert instru.descriptors.deterministic != noisy
Exemple #27
0
 def __init__(self, dimension: int = 500) -> None:
     super().__init__(self._simulate_stsp, p.Array(shape=(dimension,)))
     self.register_initialization(dimension=dimension)
     self.order = np.arange(0, self.dimension)
     self.x = self.parametrization.random_state.normal(size=self.dimension)
     self.y = self.parametrization.random_state.normal(size=self.dimension)
Exemple #28
0
 def __init__(self, symmetry: int = 0) -> None:
     super().__init__(rocket, parametrization=parameter.Array(shape=(24,)), symmetry=symmetry)
Exemple #29
0

def test_deterministic_data_setter() -> None:
    instru = p.Instrumentation(p.Choice([0, 1, 2, 3]), y=p.Choice([0, 1, 2, 3]))
    ifunc = base.ExperimentFunction(_Callable(), instru)
    data = [0.01, 0, 0, 0, 0.01, 0, 0, 0]
    for _ in range(20):
        args, kwargs = ifunc.parametrization.spawn_child().set_standardized_data(data, deterministic=True).value
        testing.printed_assert_equal(args, [0])
        testing.printed_assert_equal(kwargs, {"y": 0})
    arg_sum, kwarg_sum = 0, 0
    for _ in range(24):
        args, kwargs = ifunc.parametrization.spawn_child().set_standardized_data(data, deterministic=False).value
        arg_sum += args[0]
        kwarg_sum += kwargs["y"]
    assert arg_sum != 0
    assert kwarg_sum != 0


@testing.parametrized(
    floats=((p.Scalar(), p.Scalar(init=12.0)), True, False),
    array_int=((p.Scalar(), p.Array(shape=(1,)).set_integer_casting()), False, False),
    softmax_noisy=((p.Choice(["blue", "red"]), p.Array(shape=(1,))), True, True),
    softmax_deterministic=((p.Choice(["blue", "red"], deterministic=True), p.Array(shape=(1,))), False, False),
    ordered_discrete=((p.TransitionChoice([True, False]), p.Array(shape=(1,))), False, False),
)
def test_parametrization_continuous_noisy(variables: tp.Tuple[p.Parameter, ...], continuous: bool, noisy: bool) -> None:
    instru = p.Instrumentation(*variables)
    assert instru.descriptors.continuous == continuous
    assert instru.descriptors.deterministic != noisy
    def __init__(  # pylint: disable=too-many-arguments
        self,
        name: str,
        block_dimension: int,
        num_blocks: int = 1,
        useless_variables: int = 0,
        noise_level: float = 0,
        noise_dissymmetry: bool = False,
        rotation: bool = False,
        translation_factor: float = 1.0,
        hashing: bool = False,
        aggregator: str = "max",
        split: bool = False,
    ) -> None:
        # pylint: disable=too-many-locals
        self.name = name
        self._parameters = {x: y for x, y in locals().items() if x not in ["__class__", "self"]}
        # basic checks
        assert noise_level >= 0, "Noise level must be greater or equal to 0"
        if not all(isinstance(x, bool) for x in [noise_dissymmetry, hashing, rotation]):
            raise TypeError("hashing and rotation should be bools")
        for param, mini in [("block_dimension", 1), ("num_blocks", 1), ("useless_variables", 0)]:
            value = self._parameters[param]
            if not isinstance(value, int):
                raise TypeError(f'"{param}" must be an int')
            if value < mini:
                raise ValueError(f'"{param}" must be greater or equal to {mini}')
        if not isinstance(translation_factor, (float, int)):
            raise TypeError(f"Got non-float value {translation_factor}")
        if name not in corefuncs.registry:
            available = ", ".join(self.list_sorted_function_names())
            raise ValueError(f'Unknown core function "{name}". Available names are:\n-----\n{available}')
        # record necessary info and prepare transforms
        self._dimension = block_dimension * num_blocks + useless_variables
        self._func = corefuncs.registry[name]
        # special case
        info = corefuncs.registry.get_info(self._parameters["name"])
        only_index_transform = info.get("no_transform", False)

        assert not (split and hashing)
        assert not (split and useless_variables > 0)
        parametrization = (
            p.Array(shape=(1,) if hashing else (self._dimension,)).set_name("")
            if not split
            else (
                p.Instrumentation(*[p.Array(shape=(block_dimension,)) for _ in range(num_blocks)]).set_name(
                    "split"
                )
            )
        )
        if noise_level > 0:
            parametrization.descriptors.deterministic_function = False
        super().__init__(self.noisy_function, parametrization)
        # variable, must come after super().__init__(...) to bind the random_state
        # may consider having its a local random_state instead but less reproducible
        self.transform_var = ArtificialVariable(
            dimension=self._dimension,
            num_blocks=num_blocks,
            block_dimension=block_dimension,
            translation_factor=translation_factor,
            rotation=rotation,
            hashing=hashing,
            only_index_transform=only_index_transform,
            random_state=self._parametrization.random_state,
        )
        self._aggregator = {"max": np.max, "mean": np.mean, "sum": np.sum}[aggregator]
        info = corefuncs.registry.get_info(self._parameters["name"])
        # add descriptors
        self.add_descriptors(
            useful_dimensions=block_dimension * num_blocks,
            discrete=any(x in name for x in ["onemax", "leadingones", "jump"]),
        )