Exemplo n.º 1
0
def test_single_builder_using_passes_on_correct_dataset_and_model(
    single_builder: Union[SingleModelAcquisitionBuilder, SingleModelBatchAcquisitionBuilder]
) -> None:
    builder = single_builder.using("foo")
    data = {"foo": mk_dataset([[0.0]], [[0.0]]), "bar": mk_dataset([[1.0]], [[1.0]])}
    models = {"foo": QuadraticMeanAndRBFKernel(0.0), "bar": QuadraticMeanAndRBFKernel(1.0)}
    builder.prepare_acquisition_function(data, models)
Exemplo n.º 2
0
def test_bayesian_optimizer_optimize_tracked_state() -> None:
    class _CountingRule(AcquisitionRule[State[Optional[int], TensorType],
                                        Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
        ) -> State[int | None, TensorType]:
            def go(state: int | None) -> tuple[int | None, TensorType]:
                new_state = 0 if state is None else state + 1
                return new_state, tf.constant([[10.0]], tf.float64) + new_state

            return go

    class _DecreasingVarianceModel(QuadraticMeanAndRBFKernel,
                                   TrainableProbabilisticModel):
        def __init__(self, data: Dataset):
            super().__init__()
            self._data = data

        def predict(self,
                    query_points: TensorType) -> tuple[TensorType, TensorType]:
            mean, var = super().predict(query_points)
            return mean, var / len(self._data)

        def update(self, dataset: Dataset) -> None:
            self._data = dataset

        def optimize(self, dataset: Dataset) -> None:
            pass

    initial_data = mk_dataset([[0.0]], [[0.0]])
    model = _DecreasingVarianceModel(initial_data)
    _, history = (BayesianOptimizer(_quadratic_observer,
                                    Box([0],
                                        [1])).optimize(3, {
                                            "": initial_data
                                        }, {
                                            "": model
                                        }, _CountingRule()).astuple())

    assert [record.acquisition_state for record in history] == [None, 0, 1]

    assert_datasets_allclose(history[0].datasets[""], initial_data)
    assert_datasets_allclose(history[1].datasets[""],
                             mk_dataset([[0.0], [10.0]], [[0.0], [100.0]]))
    assert_datasets_allclose(
        history[2].datasets[""],
        mk_dataset([[0.0], [10.0], [11.0]], [[0.0], [100.0], [121.0]]))

    for step in range(3):
        assert history[step].model == history[step].models[""]
        assert history[step].dataset == history[step].datasets[""]

        _, variance_from_saved_model = (history[step].models[""].predict(
            tf.constant([[0.0]], tf.float64)))
        npt.assert_allclose(variance_from_saved_model, 1.0 / (step + 1))
Exemplo n.º 3
0
def test_bayesian_optimizer_optimize_for_uncopyable_model() -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        _optimize_count = 0

        def optimize(self, dataset: Dataset) -> None:
            self._optimize_count += 1

        def __deepcopy__(self, memo: dict[int, object]) -> _UncopyableModel:
            if self._optimize_count >= 3:
                raise _Whoops

            return self

    rule = FixedAcquisitionRule([[0.0]])
    result, history = (BayesianOptimizer(_quadratic_observer, Box(
        [0], [1])).optimize(10, {
            "": mk_dataset([[0.0]], [[0.0]])
        }, {
            "": _UncopyableModel()
        }, rule).astuple())

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 4
Exemplo n.º 4
0
def test_bayesian_optimizer_optimizes_initial_model(
        fit_initial_model: bool) -> None:
    class _CountingOptimizerModel(_PseudoTrainableQuadratic):
        _optimize_count = 0

        def optimize(self, dataset: Dataset) -> None:
            self._optimize_count += 1

    rule = FixedAcquisitionRule([[0.0]])
    model = _CountingOptimizerModel()

    final_opt_state, _ = (BayesianOptimizer(_quadratic_observer, Box(
        [0], [1])).optimize(
            1,
            {
                "": mk_dataset([[0.0]], [[0.0]])
            },
            {
                "": model
            },
            rule,
            fit_initial_model=fit_initial_model,
        ).astuple())
    final_model = final_opt_state.unwrap().model

    if fit_initial_model:  # optimized at start and end of first BO step
        assert final_model._optimize_count == 2  # type: ignore
    else:  # optimized just at end of first BO step
        assert final_model._optimize_count == 1  # type: ignore
Exemplo n.º 5
0
def test_batch_monte_carlo_expected_improvement_raises_for_model_with_wrong_event_shape(
) -> None:
    builder = BatchMonteCarloExpectedImprovement(100)
    data = mk_dataset([[0.0, 0.0]], [[0.0, 0.0]])
    model = _dim_two_gp()
    with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
        builder.prepare_acquisition_function(data, model)
Exemplo n.º 6
0
def test_batch_monte_carlo_expected_improvement_raises_for_model_with_wrong_event_shape() -> None:
    builder = BatchMonteCarloExpectedImprovement(100)
    data = mk_dataset([(0.0, 0.0)], [(0.0, 0.0)])
    matern52 = tfp.math.psd_kernels.MaternFiveHalves(
        amplitude=tf.cast(2.3, tf.float64), length_scale=tf.cast(0.5, tf.float64)
    )
    model = GaussianProcess([lambda x: branin(x), lambda x: quadratic(x)], [matern52, rbf()])
    with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
        builder.prepare_acquisition_function(data, model)
Exemplo n.º 7
0
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, Box([0], [1]))
    data, models = {"": mk_dataset([[0.0]], [[0.0]])}, {"": model}
    result, history = optimizer.optimize(3, data, models, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1
Exemplo n.º 8
0
def test_ask_tell_optimizer_updates_state_with_new_data(
    search_space: Box,
    init_dataset: Dataset,
    model: TrainableProbabilisticModel,
    acquisition_rule: AcquisitionRule[TensorType, Box],
) -> None:
    new_data = mk_dataset([[1.0]], [[1.0]])
    ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)

    ask_tell.tell(new_data)
    state_record: Record[None] = ask_tell.to_record()

    assert_datasets_allclose(state_record.dataset, init_dataset + new_data)
def test_ask_tell_optimizer_tell_validates_keys(
    search_space: Box,
    init_dataset: Dataset,
    model: TrainableProbabilisticModel,
    acquisition_rule: AcquisitionRule[TensorType, Box],
) -> None:
    dataset_with_key_1 = {"1": init_dataset}
    model_with_key_1 = {"1": model}
    new_data_with_key_2 = {"2": mk_dataset([[1.0]], [[1.0]])}

    ask_tell = AskTellOptimizer(search_space, dataset_with_key_1,
                                model_with_key_1, acquisition_rule)
    with pytest.raises(ValueError):
        ask_tell.tell(new_data_with_key_2)
Exemplo n.º 10
0
def test_ask_tell_optimizer_trains_model(
    search_space: Box,
    init_dataset: Dataset,
    model: TrainableProbabilisticModel,
    acquisition_rule: AcquisitionRule[TensorType, Box],
) -> None:
    new_data = mk_dataset([[1.0]], [[1.0]])
    ask_tell = AskTellOptimizer(
        search_space, init_dataset, model, acquisition_rule, fit_model=False
    )

    ask_tell.tell(new_data)
    state_record: Record[None] = ask_tell.to_record()

    assert state_record.model.optimize_count == 1  # type: ignore
Exemplo n.º 11
0
def test_bayesian_optimizer_calls_observer_once_per_iteration(steps: int) -> None:
    class _CountingObserver:
        call_count = 0

        def __call__(self, x: tf.Tensor) -> Dataset:
            self.call_count += 1
            return Dataset(x, tf.reduce_sum(x ** 2, axis=-1, keepdims=True))

    observer = _CountingObserver()
    optimizer = BayesianOptimizer(observer, Box([-1], [1]))
    data = mk_dataset([[0.5]], [[0.25]])

    optimizer.optimize(steps, data, _PseudoTrainableQuadratic()).final_result.unwrap()

    assert observer.call_count == steps
Exemplo n.º 12
0
def test_batch_monte_carlo_expected_improvement() -> None:
    xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64)
    model = QuadraticMeanAndRBFKernel()

    mean, cov = model.predict_joint(xs)
    mvn = tfp.distributions.MultivariateNormalFullCovariance(tf.linalg.matrix_transpose(mean), cov)
    mvn_samples = mvn.sample(10_000)
    min_predictive_mean_at_known_points = 0.09
    # fmt: off
    expected = tf.reduce_mean(tf.reduce_max(tf.maximum(
        min_predictive_mean_at_known_points - mvn_samples, 0.0
    ), axis=-1), axis=0)
    # fmt: on

    builder = BatchMonteCarloExpectedImprovement(10_000)
    acq = builder.prepare_acquisition_function(mk_dataset([[0.3], [0.5]], [[0.09], [0.25]]), model)

    npt.assert_allclose(acq(xs), expected, rtol=0.05)
Exemplo n.º 13
0
def test_bayesian_optimizer_optimize_is_noop_for_zero_steps() -> None:
    class _UnusableModel(TrainableProbabilisticModel):
        def predict(self, query_points: TensorType) -> NoReturn:
            assert False

        def predict_joint(self, query_points: TensorType) -> NoReturn:
            assert False

        def sample(self, query_points: TensorType,
                   num_samples: int) -> NoReturn:
            assert False

        def update(self, dataset: Dataset) -> NoReturn:
            assert False

        def optimize(self, dataset: Dataset) -> NoReturn:
            assert False

    class _UnusableRule(AcquisitionRule[None, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: None = None,
        ) -> NoReturn:
            assert False

    def _unusable_observer(x: tf.Tensor) -> NoReturn:
        assert False

    data = {"": mk_dataset([[0.0]], [[0.0]])}
    result, history = (BayesianOptimizer(_unusable_observer,
                                         Box([-1], [1])).optimize(
                                             0, data, {
                                                 "": _UnusableModel()
                                             }, _UnusableRule()).astuple())
    assert history == []
    final_data = result.unwrap().datasets
    assert len(final_data) == 1
    assert_datasets_allclose(final_data[""], data[""])
Exemplo n.º 14
0
def test_bayesian_optimizer_uses_specified_acquisition_state(
    starting_state: int | None,
    expected_states_received: list[int | None],
    final_acquisition_state: int | None,
) -> None:
    class Rule(AcquisitionRule[State[Optional[int], TensorType], Box]):
        def __init__(self) -> None:
            self.states_received: list[int | None] = []

        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
        ) -> State[int | None, TensorType]:
            def go(state: int | None) -> tuple[int | None, TensorType]:
                self.states_received.append(state)

                if state is None:
                    state = 0

                return state + 1, tf.constant([[0.0]], tf.float64)

            return go

    rule = Rule()

    data, models = {
        "": mk_dataset([[0.0]], [[0.0]])
    }, {
        "": _PseudoTrainableQuadratic()
    }
    final_state, history = (BayesianOptimizer(lambda x: {
        "": Dataset(x, x**2)
    }, Box([-1], [1])).optimize(3, data, models, rule,
                                starting_state).astuple())

    assert rule.states_received == expected_states_received
    assert final_state.unwrap().acquisition_state == final_acquisition_state
    assert [record.acquisition_state
            for record in history] == expected_states_received
Exemplo n.º 15
0
def init_dataset() -> Dataset:
    return mk_dataset([[0.0]], [[0.0]])