def test_bayesian_optimizer_optimize_tracked_state() -> None:
    class _CountingRule(AcquisitionRule[int, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[TensorType, int]:
            new_state = 0 if state is None else state + 1
            return tf.constant([[10.0]]) + new_state, new_state

    class _DecreasingVarianceModel(QuadraticMeanAndRBFKernel,
                                   TrainableProbabilisticModel):
        def __init__(self, data: Dataset):
            super().__init__()
            self._data = data

        def predict(self,
                    query_points: TensorType) -> Tuple[TensorType, TensorType]:
            mean, var = super().predict(query_points)
            return mean, var / len(self._data)

        def update(self, dataset: Dataset) -> None:
            self._data = dataset

        def optimize(self, dataset: Dataset) -> None:
            pass

    _, history = (BayesianOptimizer(
        _quadratic_observer, one_dimensional_range(0, 1)).optimize(
            3, {
                "": zero_dataset()
            }, {
                "": _DecreasingVarianceModel(zero_dataset())
            }, _CountingRule()).astuple())

    assert [record.acquisition_state for record in history] == [None, 0, 1]

    assert_datasets_allclose(history[0].datasets[""], zero_dataset())
    assert_datasets_allclose(
        history[1].datasets[""],
        Dataset(tf.constant([[0.0], [10.0]]), tf.constant([[0.0], [100.0]])),
    )
    assert_datasets_allclose(
        history[2].datasets[""],
        Dataset(tf.constant([[0.0], [10.0], [11.0]]),
                tf.constant([[0.0], [100.0], [121.0]])),
    )

    for step in range(3):
        _, variance_from_saved_model = history[step].models[""].predict(
            tf.constant([[0.0]]))
        npt.assert_allclose(variance_from_saved_model, 1.0 / (step + 1))
Beispiel #2
0
def test_single_builder_using_passes_on_correct_dataset_and_model() -> None:
    class _Mock(SingleModelAcquisitionBuilder):
        def prepare_acquisition_function(
            self, dataset: Dataset, model: ProbabilisticModel
        ) -> AcquisitionFunction:
            assert dataset is data["foo"]
            assert model is models["foo"]
            return lambda at: at

    builder = _Mock().using("foo")

    data = {"foo": zero_dataset(), "bar": zero_dataset()}
    models = {"foo": QuadraticWithUnitVariance(), "bar": QuadraticWithUnitVariance()}
    builder.prepare_acquisition_function(data, models)
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys_and_default_acquisition(
) -> None:
    optimizer = BayesianOptimizer(lambda x: x[:1],
                                  one_dimensional_range(-1, 1))
    with pytest.raises(ValueError):
        optimizer.optimize(3, {"foo": zero_dataset()},
                           {"foo": _PseudoTrainableQuadratic()})
Beispiel #4
0
def test_single_builder_raises_immediately_for_wrong_key() -> None:
    builder = _IdentitySingleBuilder().using("foo")

    with pytest.raises(KeyError):
        builder.prepare_acquisition_function(
            {"bar": zero_dataset()}, {"bar": QuadraticWithUnitVariance()}
        )
def test_bayesian_optimizer_optimize_for_uncopyable_model() -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        _optimize_count = 0

        def optimize(self, dataset: Dataset) -> None:
            self._optimize_count += 1

        def __deepcopy__(self, memo: Dict[int, object]) -> _UncopyableModel:
            if self._optimize_count >= 3:
                raise _Whoops

            return self

    rule = FixedAcquisitionRule(tf.constant([[0.0]]))
    result, history = (BayesianOptimizer(_quadratic_observer,
                                         one_dimensional_range(0, 1)).optimize(
                                             10, {
                                                 "": zero_dataset()
                                             }, {
                                                 "": _UncopyableModel()
                                             }, rule).astuple())

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 4
Beispiel #6
0
def test_probability_of_feasibility_builder_builds_pof(threshold: float,
                                                       at: tf.Tensor) -> None:
    builder = ProbabilityOfFeasibility(threshold)
    acq = builder.prepare_acquisition_function(zero_dataset(),
                                               QuadraticWithUnitVariance())
    expected = probability_of_feasibility(QuadraticWithUnitVariance(),
                                          threshold, at)
    npt.assert_allclose(acq(at), expected)
def test_bayesian_optimizer_optimize_raises_for_negative_steps(
        num_steps: int) -> None:
    optimizer = BayesianOptimizer(_quadratic_observer,
                                  one_dimensional_range(-1, 1))

    with pytest.raises(ValueError, match="num_steps"):
        optimizer.optimize(num_steps, {"": zero_dataset()},
                           {"": _PseudoTrainableQuadratic()})
Beispiel #8
0
def test_single_builder_raises_immediately_for_wrong_key(
    single_builder: Union[SingleModelAcquisitionBuilder, SingleModelBatchAcquisitionBuilder]
) -> None:
    builder = single_builder.using("foo")

    with pytest.raises(KeyError):
        builder.prepare_acquisition_function(
            {"bar": zero_dataset()}, {"bar": QuadraticMeanAndRBFKernel()}
        )
Beispiel #9
0
def test_bayesian_optimizer_optimize_returns_default_acquisition_state_of_correct_type(
) -> None:
    optimizer = BayesianOptimizer(lambda x: {OBJECTIVE: Dataset(x, x[:1])},
                                  one_dimensional_range(-1, 1))
    res: OptimizationResult[None] = optimizer.optimize(
        3, {OBJECTIVE: zero_dataset()},
        {OBJECTIVE: _PseudoTrainableQuadratic()})

    if res.error is not None:
        raise res.error

    assert all(logging_state.acquisition_state is None
               for logging_state in res.history)
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, one_dimensional_range(0, 1))
    result, history = optimizer.optimize(3, {
        "": zero_dataset()
    }, {
        "": model
    }, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1
def test_bayesian_optimizer_optimize_doesnt_track_state_if_told_not_to(
) -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        def __deepcopy__(self, memo: Dict[int, object]) -> NoReturn:
            assert False

    history = (BayesianOptimizer(_quadratic_observer,
                                 one_dimensional_range(-1, 1)).optimize(
                                     5, {
                                         OBJECTIVE: zero_dataset()
                                     }, {
                                         OBJECTIVE: _UncopyableModel()
                                     },
                                     track_state=False).history)
    assert len(history) == 0
def test_bayesian_optimizer_optimize_is_noop_for_zero_steps() -> None:
    class _UnusableModel(TrainableProbabilisticModel):
        def predict(self, query_points: TensorType) -> NoReturn:
            assert False

        def predict_joint(self, query_points: TensorType) -> NoReturn:
            assert False

        def sample(self, query_points: TensorType,
                   num_samples: int) -> NoReturn:
            assert False

        def update(self, dataset: Dataset) -> NoReturn:
            assert False

        def optimize(self, dataset: Dataset) -> NoReturn:
            assert False

    class _UnusableRule(AcquisitionRule[None, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: None,
        ) -> NoReturn:
            assert False

    def _unusable_observer(x: tf.Tensor) -> NoReturn:
        assert False

    data = {"": zero_dataset()}
    result, history = (BayesianOptimizer(
        _unusable_observer,
        one_dimensional_range(-1, 1)).optimize(0, data, {
            "": _UnusableModel()
        }, _UnusableRule()).astuple())
    assert history == []
    final_data = result.unwrap().datasets
    assert len(final_data) == 1
    assert_datasets_allclose(final_data[""], data[""])
def test_bayesian_optimizer_uses_specified_acquisition_state(
    starting_state: Optional[int],
    expected_states_received: List[Optional[int]],
    final_acquisition_state: Optional[int],
) -> None:
    class Rule(AcquisitionRule[int, Box]):
        def __init__(self):
            self.states_received = []

        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[TensorType, int]:
            self.states_received.append(state)

            if state is None:
                state = 0

            return tf.constant([[0.0]]), state + 1

    rule = Rule()

    final_state, history = (BayesianOptimizer(lambda x: {
        "": Dataset(x, x**2)
    }, one_dimensional_range(-1, 1)).optimize(3, {
        "": zero_dataset()
    }, {
        "": _PseudoTrainableQuadratic()
    }, rule, starting_state).astuple())

    assert rule.states_received == expected_states_received
    assert final_state.unwrap().acquisition_state == final_acquisition_state
    assert [record.acquisition_state
            for record in history] == expected_states_received
Beispiel #14
0
def test_bayesian_optimizer_uses_specified_acquisition_state(
        starting_state: Optional[int],
        expected_states: List[Optional[int]]) -> None:
    class Rule(AcquisitionRule[int, Box]):
        def __init__(self):
            self.states_received = []

        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[QueryPoints, int]:
            self.states_received.append(state)

            if state is None:
                state = 0

            return tf.constant([[0.0]]), state + 1

    rule = Rule()

    res = BayesianOptimizer(lambda x: {
        "": Dataset(x, x**2)
    }, one_dimensional_range(-1,
                             1)).optimize(3, {"": zero_dataset()},
                                          {"": _PseudoTrainableQuadratic()},
                                          rule, starting_state)

    if res.error is not None:
        raise res.error

    assert rule.states_received == expected_states
    assert [state.acquisition_state
            for state in res.history] == expected_states
Beispiel #15
0

@pytest.mark.parametrize(
    "models",
    [
        {},
        {
            "foo": QuadraticMeanAndRBFKernel()
        },
        {
            "foo": QuadraticMeanAndRBFKernel(),
            OBJECTIVE: QuadraticMeanAndRBFKernel()
        },
    ],
)
@pytest.mark.parametrize("datasets", [{}, {OBJECTIVE: zero_dataset()}])
def test_thompson_sampling_raises_for_invalid_models_keys(
        datasets: dict[str, Dataset],
        models: dict[str, ProbabilisticModel]) -> None:
    search_space = one_dimensional_range(-1, 1)
    rule = ThompsonSampling(100, 10)
    with pytest.raises(ValueError):
        rule.acquire(search_space, datasets, models)


@pytest.mark.parametrize("datasets", [{}, {"foo": zero_dataset()}])
@pytest.mark.parametrize("models", [{}, {
    "foo": QuadraticMeanAndRBFKernel()
}, {
    OBJECTIVE: QuadraticMeanAndRBFKernel()
}])
def test_optimization_result_try_get_final_models_for_successful_optimization(
) -> None:
    models = {"foo": _PseudoTrainableQuadratic()}
    result: OptimizationResult[None] = OptimizationResult(
        Ok(Record({"foo": zero_dataset()}, models, None)), [])
    assert result.try_get_final_models() is models
    optimizer.optimize(steps, {
        OBJECTIVE: data
    }, {
        OBJECTIVE: _PseudoTrainableQuadratic()
    }).final_result.unwrap()

    assert observer.call_count == steps


@pytest.mark.parametrize(
    "datasets, models",
    [
        ({}, {}),
        ({
            "foo": zero_dataset()
        }, {}),
        ({
            "foo": zero_dataset()
        }, {
            "bar": _PseudoTrainableQuadratic()
        }),
        (
            {
                "foo": zero_dataset()
            },
            {
                "foo": _PseudoTrainableQuadratic(),
                "bar": _PseudoTrainableQuadratic()
            },
        ),