Example #1
0
def test_trust_region_raises_for_missing_datasets_key(
        datasets: dict[str, Dataset],
        models: dict[str, ProbabilisticModel]) -> None:
    search_space = one_dimensional_range(-1, 1)
    rule = TrustRegion()
    with pytest.raises(KeyError):
        rule.acquire(search_space, datasets, models, None)
def test_bayesian_optimizer_optimize_for_uncopyable_model() -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        _optimize_count = 0

        def optimize(self, dataset: Dataset) -> None:
            self._optimize_count += 1

        def __deepcopy__(self, memo: Dict[int, object]) -> _UncopyableModel:
            if self._optimize_count >= 3:
                raise _Whoops

            return self

    rule = FixedAcquisitionRule(tf.constant([[0.0]]))
    result, history = (BayesianOptimizer(_quadratic_observer,
                                         one_dimensional_range(0, 1)).optimize(
                                             10, {
                                                 "": zero_dataset()
                                             }, {
                                                 "": _UncopyableModel()
                                             }, rule).astuple())

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 4
Example #3
0
def test_thompson_sampling_raises_for_invalid_models_keys(
        datasets: dict[str, Dataset],
        models: dict[str, ProbabilisticModel]) -> None:
    search_space = one_dimensional_range(-1, 1)
    rule = ThompsonSampling(100, 10)
    with pytest.raises(ValueError):
        rule.acquire(search_space, datasets, models)
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys_and_default_acquisition(
) -> None:
    optimizer = BayesianOptimizer(lambda x: x[:1],
                                  one_dimensional_range(-1, 1))
    with pytest.raises(ValueError):
        optimizer.optimize(3, {"foo": zero_dataset()},
                           {"foo": _PseudoTrainableQuadratic()})
def test_bayesian_optimizer_optimize_raises_for_negative_steps(
        num_steps: int) -> None:
    optimizer = BayesianOptimizer(_quadratic_observer,
                                  one_dimensional_range(-1, 1))

    with pytest.raises(ValueError, match="num_steps"):
        optimizer.optimize(num_steps, {"": zero_dataset()},
                           {"": _PseudoTrainableQuadratic()})
Example #6
0
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys(
        datasets: Dict[str, Dataset],
        model_specs: Dict[str, TrainableProbabilisticModel]) -> None:
    optimizer = BayesianOptimizer(lambda x: {"foo": Dataset(x, x[:1])},
                                  one_dimensional_range(-1, 1))
    rule = FixedAcquisitionRule(tf.constant([[0.0]]))
    with pytest.raises(ValueError):
        optimizer.optimize(10, datasets, model_specs, rule)
def test_bayesian_optimizer_optimize_tracked_state() -> None:
    class _CountingRule(AcquisitionRule[int, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[TensorType, int]:
            new_state = 0 if state is None else state + 1
            return tf.constant([[10.0]]) + new_state, new_state

    class _DecreasingVarianceModel(QuadraticMeanAndRBFKernel,
                                   TrainableProbabilisticModel):
        def __init__(self, data: Dataset):
            super().__init__()
            self._data = data

        def predict(self,
                    query_points: TensorType) -> Tuple[TensorType, TensorType]:
            mean, var = super().predict(query_points)
            return mean, var / len(self._data)

        def update(self, dataset: Dataset) -> None:
            self._data = dataset

        def optimize(self, dataset: Dataset) -> None:
            pass

    _, history = (BayesianOptimizer(
        _quadratic_observer, one_dimensional_range(0, 1)).optimize(
            3, {
                "": zero_dataset()
            }, {
                "": _DecreasingVarianceModel(zero_dataset())
            }, _CountingRule()).astuple())

    assert [record.acquisition_state for record in history] == [None, 0, 1]

    assert_datasets_allclose(history[0].datasets[""], zero_dataset())
    assert_datasets_allclose(
        history[1].datasets[""],
        Dataset(tf.constant([[0.0], [10.0]]), tf.constant([[0.0], [100.0]])),
    )
    assert_datasets_allclose(
        history[2].datasets[""],
        Dataset(tf.constant([[0.0], [10.0], [11.0]]),
                tf.constant([[0.0], [100.0], [121.0]])),
    )

    for step in range(3):
        _, variance_from_saved_model = history[step].models[""].predict(
            tf.constant([[0.0]]))
        npt.assert_allclose(variance_from_saved_model, 1.0 / (step + 1))
Example #8
0
def test_bayesian_optimizer_optimize_returns_default_acquisition_state_of_correct_type(
) -> None:
    optimizer = BayesianOptimizer(lambda x: {OBJECTIVE: Dataset(x, x[:1])},
                                  one_dimensional_range(-1, 1))
    res: OptimizationResult[None] = optimizer.optimize(
        3, {OBJECTIVE: zero_dataset()},
        {OBJECTIVE: _PseudoTrainableQuadratic()})

    if res.error is not None:
        raise res.error

    assert all(logging_state.acquisition_state is None
               for logging_state in res.history)
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, one_dimensional_range(0, 1))
    result, history = optimizer.optimize(3, {
        "": zero_dataset()
    }, {
        "": model
    }, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1
def test_bayesian_optimizer_optimize_doesnt_track_state_if_told_not_to(
) -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        def __deepcopy__(self, memo: Dict[int, object]) -> NoReturn:
            assert False

    history = (BayesianOptimizer(_quadratic_observer,
                                 one_dimensional_range(-1, 1)).optimize(
                                     5, {
                                         OBJECTIVE: zero_dataset()
                                     }, {
                                         OBJECTIVE: _UncopyableModel()
                                     },
                                     track_state=False).history)
    assert len(history) == 0
def test_bayesian_optimizer_optimize_is_noop_for_zero_steps() -> None:
    class _UnusableModel(TrainableProbabilisticModel):
        def predict(self, query_points: TensorType) -> NoReturn:
            assert False

        def predict_joint(self, query_points: TensorType) -> NoReturn:
            assert False

        def sample(self, query_points: TensorType,
                   num_samples: int) -> NoReturn:
            assert False

        def update(self, dataset: Dataset) -> NoReturn:
            assert False

        def optimize(self, dataset: Dataset) -> NoReturn:
            assert False

    class _UnusableRule(AcquisitionRule[None, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: None,
        ) -> NoReturn:
            assert False

    def _unusable_observer(x: tf.Tensor) -> NoReturn:
        assert False

    data = {"": zero_dataset()}
    result, history = (BayesianOptimizer(
        _unusable_observer,
        one_dimensional_range(-1, 1)).optimize(0, data, {
            "": _UnusableModel()
        }, _UnusableRule()).astuple())
    assert history == []
    final_data = result.unwrap().datasets
    assert len(final_data) == 1
    assert_datasets_allclose(final_data[""], data[""])
def test_bayesian_optimizer_calls_observer_once_per_iteration(
        steps: int) -> None:
    class _CountingObserver:
        call_count = 0

        def __call__(self, x: tf.Tensor) -> Dict[str, Dataset]:
            self.call_count += 1
            return {
                OBJECTIVE: Dataset(x,
                                   tf.reduce_sum(x**2, axis=-1, keepdims=True))
            }

    observer = _CountingObserver()
    optimizer = BayesianOptimizer(observer, one_dimensional_range(-1, 1))
    data = Dataset(tf.constant([[0.5]]), tf.constant([[0.25]]))

    optimizer.optimize(steps, {
        OBJECTIVE: data
    }, {
        OBJECTIVE: _PseudoTrainableQuadratic()
    }).final_result.unwrap()

    assert observer.call_count == steps
def test_bayesian_optimizer_uses_specified_acquisition_state(
    starting_state: Optional[int],
    expected_states_received: List[Optional[int]],
    final_acquisition_state: Optional[int],
) -> None:
    class Rule(AcquisitionRule[int, Box]):
        def __init__(self):
            self.states_received = []

        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[TensorType, int]:
            self.states_received.append(state)

            if state is None:
                state = 0

            return tf.constant([[0.0]]), state + 1

    rule = Rule()

    final_state, history = (BayesianOptimizer(lambda x: {
        "": Dataset(x, x**2)
    }, one_dimensional_range(-1, 1)).optimize(3, {
        "": zero_dataset()
    }, {
        "": _PseudoTrainableQuadratic()
    }, rule, starting_state).astuple())

    assert rule.states_received == expected_states_received
    assert final_state.unwrap().acquisition_state == final_acquisition_state
    assert [record.acquisition_state
            for record in history] == expected_states_received
Example #14
0
def test_bayesian_optimizer_uses_specified_acquisition_state(
        starting_state: Optional[int],
        expected_states: List[Optional[int]]) -> None:
    class Rule(AcquisitionRule[int, Box]):
        def __init__(self):
            self.states_received = []

        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[QueryPoints, int]:
            self.states_received.append(state)

            if state is None:
                state = 0

            return tf.constant([[0.0]]), state + 1

    rule = Rule()

    res = BayesianOptimizer(lambda x: {
        "": Dataset(x, x**2)
    }, one_dimensional_range(-1,
                             1)).optimize(3, {"": zero_dataset()},
                                          {"": _PseudoTrainableQuadratic()},
                                          rule, starting_state)

    if res.error is not None:
        raise res.error

    assert rule.states_received == expected_states
    assert [state.acquisition_state
            for state in res.history] == expected_states