Exemplo n.º 1
0
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys_and_default_acquisition(
) -> None:
    optimizer = BayesianOptimizer(lambda x: x[:1],
                                  one_dimensional_range(-1, 1))
    with pytest.raises(ValueError):
        optimizer.optimize(3, {"foo": zero_dataset()},
                           {"foo": _PseudoTrainableQuadratic()})
Exemplo n.º 2
0
def test_bayesian_optimizer_optimize_raises_for_invalid_keys(
    datasets: dict[str, Dataset], models: dict[str, TrainableProbabilisticModel]
) -> None:
    search_space = Box([-1], [1])
    optimizer = BayesianOptimizer(lambda x: {"foo": Dataset(x, x)}, search_space)
    rule = FixedAcquisitionRule([[0.0]])
    with pytest.raises(ValueError):
        optimizer.optimize(10, datasets, models, rule)
Exemplo n.º 3
0
def test_bayesian_optimizer_optimize_raises_for_negative_steps(
        num_steps: int) -> None:
    optimizer = BayesianOptimizer(_quadratic_observer,
                                  one_dimensional_range(-1, 1))

    with pytest.raises(ValueError, match="num_steps"):
        optimizer.optimize(num_steps, {"": zero_dataset()},
                           {"": _PseudoTrainableQuadratic()})
Exemplo n.º 4
0
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys(
        datasets: Dict[str, Dataset],
        model_specs: Dict[str, TrainableProbabilisticModel]) -> None:
    optimizer = BayesianOptimizer(lambda x: {"foo": Dataset(x, x[:1])},
                                  one_dimensional_range(-1, 1))
    rule = FixedAcquisitionRule(tf.constant([[0.0]]))
    with pytest.raises(ValueError):
        optimizer.optimize(10, datasets, model_specs, rule)
Exemplo n.º 5
0
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys_and_default_acquisition(
) -> None:
    optimizer = BayesianOptimizer(lambda x: x[:1], Box([-1], [1]))
    data, models = {
        "foo": empty_dataset([1], [1])
    }, {
        "foo": _PseudoTrainableQuadratic()
    }
    with pytest.raises(ValueError):
        optimizer.optimize(3, data, models)
Exemplo n.º 6
0
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, Box([0], [1]))
    data, models = {"": mk_dataset([[0.0]], [[0.0]])}, {"": model}
    result, history = optimizer.optimize(3, data, models, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1
Exemplo n.º 7
0
def test_bayesian_optimizer_optimize_raises_for_negative_steps(
        num_steps: int) -> None:
    optimizer = BayesianOptimizer(_quadratic_observer, Box([-1], [1]))

    data, models = {
        "": empty_dataset([1], [1])
    }, {
        "": _PseudoTrainableQuadratic()
    }
    with pytest.raises(ValueError, match="num_steps"):
        optimizer.optimize(num_steps, data, models)
Exemplo n.º 8
0
def test_bayesian_optimizer_optimize_returns_default_acquisition_state_of_correct_type(
) -> None:
    optimizer = BayesianOptimizer(lambda x: {OBJECTIVE: Dataset(x, x[:1])},
                                  one_dimensional_range(-1, 1))
    res: OptimizationResult[None] = optimizer.optimize(
        3, {OBJECTIVE: zero_dataset()},
        {OBJECTIVE: _PseudoTrainableQuadratic()})

    if res.error is not None:
        raise res.error

    assert all(logging_state.acquisition_state is None
               for logging_state in res.history)
Exemplo n.º 9
0
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, one_dimensional_range(0, 1))
    result, history = optimizer.optimize(3, {
        "": zero_dataset()
    }, {
        "": model
    }, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1
Exemplo n.º 10
0
def test_bayesian_optimizer_calls_observer_once_per_iteration(steps: int) -> None:
    class _CountingObserver:
        call_count = 0

        def __call__(self, x: tf.Tensor) -> Dataset:
            self.call_count += 1
            return Dataset(x, tf.reduce_sum(x ** 2, axis=-1, keepdims=True))

    observer = _CountingObserver()
    optimizer = BayesianOptimizer(observer, Box([-1], [1]))
    data = mk_dataset([[0.5]], [[0.25]])

    optimizer.optimize(steps, data, _PseudoTrainableQuadratic()).final_result.unwrap()

    assert observer.call_count == steps
Exemplo n.º 11
0
def test_bayesian_optimizer_optimize_for_uncopyable_model() -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        _optimize_count = 0

        def optimize(self, dataset: Dataset) -> None:
            self._optimize_count += 1

        def __deepcopy__(self, memo: dict[int, object]) -> _UncopyableModel:
            if self._optimize_count >= 3:
                raise _Whoops

            return self

    rule = FixedAcquisitionRule([[0.0]])
    result, history = (BayesianOptimizer(_quadratic_observer, Box(
        [0], [1])).optimize(10, {
            "": mk_dataset([[0.0]], [[0.0]])
        }, {
            "": _UncopyableModel()
        }, rule).astuple())

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 4
Exemplo n.º 12
0
def test_optimizer_finds_minima_of_the_branin_function(
    num_steps: int, acquisition_rule: AcquisitionRule
) -> None:
    search_space = Box([0, 0], [1, 1])

    def build_model(data: Dataset) -> GaussianProcessRegression:
        variance = tf.math.reduce_variance(data.observations)
        kernel = gpflow.kernels.Matern52(variance, tf.constant([0.2, 0.2], tf.float64))
        gpr = gpflow.models.GPR((data.query_points, data.observations), kernel, noise_variance=1e-5)
        gpflow.utilities.set_trainable(gpr.likelihood, False)
        return GaussianProcessRegression(gpr)

    initial_query_points = search_space.sample(5)
    observer = mk_observer(branin, OBJECTIVE)
    initial_data = observer(initial_query_points)
    model = build_model(initial_data[OBJECTIVE])

    dataset = (
        BayesianOptimizer(observer, search_space)
        .optimize(num_steps, initial_data, {OBJECTIVE: model}, acquisition_rule)
        .try_get_final_datasets()[OBJECTIVE]
    )

    arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))

    best_y = dataset.observations[arg_min_idx]
    best_x = dataset.query_points[arg_min_idx]

    relative_minimizer_err = tf.abs((best_x - BRANIN_MINIMIZERS) / BRANIN_MINIMIZERS)
    # these accuracies are the current best for the given number of optimization steps, which makes
    # this is a regression test
    assert tf.reduce_any(tf.reduce_all(relative_minimizer_err < 0.03, axis=-1), axis=0)
    npt.assert_allclose(best_y, BRANIN_MINIMUM, rtol=0.03)
Exemplo n.º 13
0
def test_bayesian_optimizer_optimizes_initial_model(
        fit_initial_model: bool) -> None:
    class _CountingOptimizerModel(_PseudoTrainableQuadratic):
        _optimize_count = 0

        def optimize(self, dataset: Dataset) -> None:
            self._optimize_count += 1

    rule = FixedAcquisitionRule([[0.0]])
    model = _CountingOptimizerModel()

    final_opt_state, _ = (BayesianOptimizer(_quadratic_observer, Box(
        [0], [1])).optimize(
            1,
            {
                "": mk_dataset([[0.0]], [[0.0]])
            },
            {
                "": model
            },
            rule,
            fit_initial_model=fit_initial_model,
        ).astuple())
    final_model = final_opt_state.unwrap().model

    if fit_initial_model:  # optimized at start and end of first BO step
        assert final_model._optimize_count == 2  # type: ignore
    else:  # optimized just at end of first BO step
        assert final_model._optimize_count == 1  # type: ignore
Exemplo n.º 14
0
def test_bayesian_optimizer_optimize_tracked_state() -> None:
    class _CountingRule(AcquisitionRule[State[Optional[int], TensorType],
                                        Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
        ) -> State[int | None, TensorType]:
            def go(state: int | None) -> tuple[int | None, TensorType]:
                new_state = 0 if state is None else state + 1
                return new_state, tf.constant([[10.0]], tf.float64) + new_state

            return go

    class _DecreasingVarianceModel(QuadraticMeanAndRBFKernel,
                                   TrainableProbabilisticModel):
        def __init__(self, data: Dataset):
            super().__init__()
            self._data = data

        def predict(self,
                    query_points: TensorType) -> tuple[TensorType, TensorType]:
            mean, var = super().predict(query_points)
            return mean, var / len(self._data)

        def update(self, dataset: Dataset) -> None:
            self._data = dataset

        def optimize(self, dataset: Dataset) -> None:
            pass

    initial_data = mk_dataset([[0.0]], [[0.0]])
    model = _DecreasingVarianceModel(initial_data)
    _, history = (BayesianOptimizer(_quadratic_observer,
                                    Box([0],
                                        [1])).optimize(3, {
                                            "": initial_data
                                        }, {
                                            "": model
                                        }, _CountingRule()).astuple())

    assert [record.acquisition_state for record in history] == [None, 0, 1]

    assert_datasets_allclose(history[0].datasets[""], initial_data)
    assert_datasets_allclose(history[1].datasets[""],
                             mk_dataset([[0.0], [10.0]], [[0.0], [100.0]]))
    assert_datasets_allclose(
        history[2].datasets[""],
        mk_dataset([[0.0], [10.0], [11.0]], [[0.0], [100.0], [121.0]]))

    for step in range(3):
        assert history[step].model == history[step].models[""]
        assert history[step].dataset == history[step].datasets[""]

        _, variance_from_saved_model = (history[step].models[""].predict(
            tf.constant([[0.0]], tf.float64)))
        npt.assert_allclose(variance_from_saved_model, 1.0 / (step + 1))
Exemplo n.º 15
0
def test_bayesian_optimizer_optimize_tracked_state() -> None:
    class _CountingRule(AcquisitionRule[int, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[TensorType, int]:
            new_state = 0 if state is None else state + 1
            return tf.constant([[10.0]]) + new_state, new_state

    class _DecreasingVarianceModel(QuadraticMeanAndRBFKernel,
                                   TrainableProbabilisticModel):
        def __init__(self, data: Dataset):
            super().__init__()
            self._data = data

        def predict(self,
                    query_points: TensorType) -> Tuple[TensorType, TensorType]:
            mean, var = super().predict(query_points)
            return mean, var / len(self._data)

        def update(self, dataset: Dataset) -> None:
            self._data = dataset

        def optimize(self, dataset: Dataset) -> None:
            pass

    _, history = (BayesianOptimizer(
        _quadratic_observer, one_dimensional_range(0, 1)).optimize(
            3, {
                "": zero_dataset()
            }, {
                "": _DecreasingVarianceModel(zero_dataset())
            }, _CountingRule()).astuple())

    assert [record.acquisition_state for record in history] == [None, 0, 1]

    assert_datasets_allclose(history[0].datasets[""], zero_dataset())
    assert_datasets_allclose(
        history[1].datasets[""],
        Dataset(tf.constant([[0.0], [10.0]]), tf.constant([[0.0], [100.0]])),
    )
    assert_datasets_allclose(
        history[2].datasets[""],
        Dataset(tf.constant([[0.0], [10.0], [11.0]]),
                tf.constant([[0.0], [100.0], [121.0]])),
    )

    for step in range(3):
        _, variance_from_saved_model = history[step].models[""].predict(
            tf.constant([[0.0]]))
        npt.assert_allclose(variance_from_saved_model, 1.0 / (step + 1))
Exemplo n.º 16
0
def test_bayesian_optimizer_optimize_doesnt_track_state_if_told_not_to() -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        def __deepcopy__(self, memo: dict[int, object]) -> NoReturn:
            assert False

    data, models = {OBJECTIVE: empty_dataset([1], [1])}, {OBJECTIVE: _UncopyableModel()}
    history = (
        BayesianOptimizer(_quadratic_observer, Box([-1], [1]))
        .optimize(5, data, models, track_state=False)
        .history
    )
    assert len(history) == 0
Exemplo n.º 17
0
def test_bayesian_optimizer_calls_observer_once_per_iteration(
        steps: int) -> None:
    class _CountingObserver:
        call_count = 0

        def __call__(self, x: tf.Tensor) -> Dict[str, Dataset]:
            self.call_count += 1
            return {
                OBJECTIVE: Dataset(x,
                                   tf.reduce_sum(x**2, axis=-1, keepdims=True))
            }

    observer = _CountingObserver()
    optimizer = BayesianOptimizer(observer, one_dimensional_range(-1, 1))
    data = Dataset(tf.constant([[0.5]]), tf.constant([[0.25]]))

    optimizer.optimize(steps, {
        OBJECTIVE: data
    }, {
        OBJECTIVE: _PseudoTrainableQuadratic()
    }).final_result.unwrap()

    assert observer.call_count == steps
Exemplo n.º 18
0
def test_bayesian_optimizer_optimize_doesnt_track_state_if_told_not_to(
) -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        def __deepcopy__(self, memo: Dict[int, object]) -> NoReturn:
            assert False

    history = (BayesianOptimizer(_quadratic_observer,
                                 one_dimensional_range(-1, 1)).optimize(
                                     5, {
                                         OBJECTIVE: zero_dataset()
                                     }, {
                                         OBJECTIVE: _UncopyableModel()
                                     },
                                     track_state=False).history)
    assert len(history) == 0
Exemplo n.º 19
0
def test_optimizer_finds_minima_of_the_scaled_branin_function(
    num_steps: int,
    acquisition_rule: AcquisitionRule[TensorType, SearchSpace]
    | AcquisitionRule[State[TensorType, AsynchronousGreedy.State | TrustRegion.State], Box],
) -> None:
    search_space = BRANIN_SEARCH_SPACE

    def build_model(data: Dataset) -> GaussianProcessRegression:
        variance = tf.math.reduce_variance(data.observations)
        kernel = gpflow.kernels.Matern52(variance, tf.constant([0.2, 0.2], tf.float64))
        scale = tf.constant(1.0, dtype=tf.float64)
        kernel.variance.prior = tfp.distributions.LogNormal(
            tf.constant(-2.0, dtype=tf.float64), scale
        )
        kernel.lengthscales.prior = tfp.distributions.LogNormal(
            tf.math.log(kernel.lengthscales), scale
        )
        gpr = gpflow.models.GPR((data.query_points, data.observations), kernel, noise_variance=1e-5)
        gpflow.utilities.set_trainable(gpr.likelihood, False)
        return GaussianProcessRegression(gpr)

    initial_query_points = search_space.sample(5)
    observer = mk_observer(scaled_branin)
    initial_data = observer(initial_query_points)
    model = build_model(initial_data)

    dataset = (
        BayesianOptimizer(observer, search_space)
        .optimize(num_steps, initial_data, model, acquisition_rule)
        .try_get_final_dataset()
    )

    arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))

    best_y = dataset.observations[arg_min_idx]
    best_x = dataset.query_points[arg_min_idx]

    relative_minimizer_err = tf.abs((best_x - BRANIN_MINIMIZERS) / BRANIN_MINIMIZERS)
    # these accuracies are the current best for the given number of optimization steps, which makes
    # this is a regression test
    assert tf.reduce_any(tf.reduce_all(relative_minimizer_err < 0.05, axis=-1), axis=0)
    npt.assert_allclose(best_y, SCALED_BRANIN_MINIMUM, rtol=0.005)

    # check that acquisition functions defined as classes aren't being retraced unnecessarily
    if isinstance(acquisition_rule, EfficientGlobalOptimization):
        acquisition_function = acquisition_rule._acquisition_function
        if isinstance(acquisition_function, AcquisitionFunctionClass):
            assert acquisition_function.__call__._get_tracing_count() == 3  # type: ignore
def test_two_layer_dgp_optimizer_finds_minima_of_michalewicz_function(
    num_steps: int, acquisition_rule: AcquisitionRule[TensorType, SearchSpace], keras_float: None
) -> None:

    # this unit test fails sometimes for
    # normal search space used with MICHALEWICZ function
    # so for stability we reduce its size here
    search_space = Box(MICHALEWICZ_2_MINIMIZER[0] - 0.5, MICHALEWICZ_2_MINIMIZER[0] + 0.5)

    def build_model(data: Dataset) -> DeepGaussianProcess:
        epochs = int(2e3)
        batch_size = 100

        dgp = two_layer_dgp_model(data.query_points)

        def scheduler(epoch: int, lr: float) -> float:
            if epoch == epochs // 2:
                return lr * 0.1
            else:
                return lr

        optimizer = tf.optimizers.Adam(0.01)
        fit_args = {
            "batch_size": batch_size,
            "epochs": epochs,
            "verbose": 0,
            "callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
        }

        return DeepGaussianProcess(model=dgp, optimizer=optimizer, fit_args=fit_args)

    initial_query_points = search_space.sample(50)
    observer = mk_observer(michalewicz, OBJECTIVE)
    initial_data = observer(initial_query_points)
    model = build_model(initial_data[OBJECTIVE])
    dataset = (
        BayesianOptimizer(observer, search_space)
        .optimize(num_steps, initial_data, {OBJECTIVE: model}, acquisition_rule, track_state=False)
        .try_get_final_dataset()
    )
    arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))

    best_y = dataset.observations[arg_min_idx]
    best_x = dataset.query_points[arg_min_idx]
    relative_minimizer_err = tf.abs((best_x - MICHALEWICZ_2_MINIMIZER) / MICHALEWICZ_2_MINIMIZER)

    assert tf.reduce_all(relative_minimizer_err < 0.03, axis=-1)
    npt.assert_allclose(best_y, MICHALEWICZ_2_MINIMUM, rtol=0.03)
Exemplo n.º 21
0
def test_multi_objective_optimizer_finds_pareto_front_of_the_VLMOP2_function(
        num_steps: int, acquisition_rule: AcquisitionRule[TensorType, Box],
        convergence_threshold: float) -> None:
    search_space = Box([-2, -2], [2, 2])

    def build_stacked_independent_objectives_model(
            data: Dataset) -> ModelStack:
        gprs = []
        for idx in range(2):
            single_obj_data = Dataset(
                data.query_points, tf.gather(data.observations, [idx], axis=1))
            variance = tf.math.reduce_variance(single_obj_data.observations)
            kernel = gpflow.kernels.Matern52(
                variance, tf.constant([0.2, 0.2], tf.float64))
            gpr = gpflow.models.GPR(single_obj_data.astuple(),
                                    kernel,
                                    noise_variance=1e-5)
            gpflow.utilities.set_trainable(gpr.likelihood, False)
            gprs.append((GaussianProcessRegression(gpr), 1))

        return ModelStack(*gprs)

    observer = mk_observer(VLMOP2().objective(), OBJECTIVE)

    initial_query_points = search_space.sample(10)
    initial_data = observer(initial_query_points)

    model = build_stacked_independent_objectives_model(initial_data[OBJECTIVE])

    dataset = (BayesianOptimizer(observer, search_space).optimize(
        num_steps, initial_data, {
            OBJECTIVE: model
        }, acquisition_rule).try_get_final_datasets()[OBJECTIVE])

    # A small log hypervolume difference corresponds to a succesful optimization.
    ref_point = get_reference_point(dataset.observations)

    obs_hv = Pareto(dataset.observations).hypervolume_indicator(ref_point)
    ideal_pf = tf.cast(VLMOP2().gen_pareto_optimal_points(100),
                       dtype=tf.float64)
    ideal_hv = Pareto(ideal_pf).hypervolume_indicator(ref_point)

    assert tf.math.log(ideal_hv - obs_hv) < convergence_threshold
Exemplo n.º 22
0
def test_bayesian_optimizer_optimize_is_noop_for_zero_steps() -> None:
    class _UnusableModel(TrainableProbabilisticModel):
        def predict(self, query_points: TensorType) -> NoReturn:
            assert False

        def predict_joint(self, query_points: TensorType) -> NoReturn:
            assert False

        def sample(self, query_points: TensorType,
                   num_samples: int) -> NoReturn:
            assert False

        def update(self, dataset: Dataset) -> NoReturn:
            assert False

        def optimize(self, dataset: Dataset) -> NoReturn:
            assert False

    class _UnusableRule(AcquisitionRule[None, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: None = None,
        ) -> NoReturn:
            assert False

    def _unusable_observer(x: tf.Tensor) -> NoReturn:
        assert False

    data = {"": mk_dataset([[0.0]], [[0.0]])}
    result, history = (BayesianOptimizer(_unusable_observer,
                                         Box([-1], [1])).optimize(
                                             0, data, {
                                                 "": _UnusableModel()
                                             }, _UnusableRule()).astuple())
    assert history == []
    final_data = result.unwrap().datasets
    assert len(final_data) == 1
    assert_datasets_allclose(final_data[""], data[""])
Exemplo n.º 23
0
def test_bayesian_optimizer_uses_specified_acquisition_state(
    starting_state: int | None,
    expected_states_received: list[int | None],
    final_acquisition_state: int | None,
) -> None:
    class Rule(AcquisitionRule[State[Optional[int], TensorType], Box]):
        def __init__(self) -> None:
            self.states_received: list[int | None] = []

        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
        ) -> State[int | None, TensorType]:
            def go(state: int | None) -> tuple[int | None, TensorType]:
                self.states_received.append(state)

                if state is None:
                    state = 0

                return state + 1, tf.constant([[0.0]], tf.float64)

            return go

    rule = Rule()

    data, models = {
        "": mk_dataset([[0.0]], [[0.0]])
    }, {
        "": _PseudoTrainableQuadratic()
    }
    final_state, history = (BayesianOptimizer(lambda x: {
        "": Dataset(x, x**2)
    }, Box([-1], [1])).optimize(3, data, models, rule,
                                starting_state).astuple())

    assert rule.states_received == expected_states_received
    assert final_state.unwrap().acquisition_state == final_acquisition_state
    assert [record.acquisition_state
            for record in history] == expected_states_received
Exemplo n.º 24
0
def test_bayesian_optimizer_uses_specified_acquisition_state(
    starting_state: Optional[int],
    expected_states_received: List[Optional[int]],
    final_acquisition_state: Optional[int],
) -> None:
    class Rule(AcquisitionRule[int, Box]):
        def __init__(self):
            self.states_received = []

        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[TensorType, int]:
            self.states_received.append(state)

            if state is None:
                state = 0

            return tf.constant([[0.0]]), state + 1

    rule = Rule()

    final_state, history = (BayesianOptimizer(lambda x: {
        "": Dataset(x, x**2)
    }, one_dimensional_range(-1, 1)).optimize(3, {
        "": zero_dataset()
    }, {
        "": _PseudoTrainableQuadratic()
    }, rule, starting_state).astuple())

    assert rule.states_received == expected_states_received
    assert final_state.unwrap().acquisition_state == final_acquisition_state
    assert [record.acquisition_state
            for record in history] == expected_states_received
Exemplo n.º 25
0
def test_bayesian_optimizer_uses_specified_acquisition_state(
        starting_state: Optional[int],
        expected_states: List[Optional[int]]) -> None:
    class Rule(AcquisitionRule[int, Box]):
        def __init__(self):
            self.states_received = []

        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[QueryPoints, int]:
            self.states_received.append(state)

            if state is None:
                state = 0

            return tf.constant([[0.0]]), state + 1

    rule = Rule()

    res = BayesianOptimizer(lambda x: {
        "": Dataset(x, x**2)
    }, one_dimensional_range(-1,
                             1)).optimize(3, {"": zero_dataset()},
                                          {"": _PseudoTrainableQuadratic()},
                                          rule, starting_state)

    if res.error is not None:
        raise res.error

    assert rule.states_received == expected_states
    assert [state.acquisition_state
            for state in res.history] == expected_states
Exemplo n.º 26
0
def test_bayesian_optimizer_can_use_two_gprs_for_objective_defined_by_two_dimensions(
) -> None:
    class ExponentialWithUnitVariance(GaussianProcess,
                                      PseudoTrainableProbModel):
        def __init__(self):
            super().__init__([lambda x: tf.exp(-x)], [rbf()])

    class LinearWithUnitVariance(GaussianProcess, PseudoTrainableProbModel):
        def __init__(self):
            super().__init__([lambda x: 2 * x], [rbf()])

    LINEAR = "linear"
    EXPONENTIAL = "exponential"

    class AdditionRule(AcquisitionRule[int, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            previous_state: int | None = None,
        ) -> tuple[TensorType, int]:
            if previous_state is None:
                previous_state = 1

            candidate_query_points = search_space.sample(previous_state)
            linear_predictions, _ = models[LINEAR].predict(
                candidate_query_points)
            exponential_predictions, _ = models[EXPONENTIAL].predict(
                candidate_query_points)

            target = linear_predictions + exponential_predictions

            optimum_idx = tf.argmin(target, axis=0)[0]
            next_query_points = tf.expand_dims(
                candidate_query_points[optimum_idx, ...], axis=0)

            return next_query_points, previous_state * 2

    def linear_and_exponential(query_points: tf.Tensor) -> dict[str, Dataset]:
        return {
            LINEAR: Dataset(query_points, 2 * query_points),
            EXPONENTIAL: Dataset(query_points, tf.exp(-query_points)),
        }

    data: Mapping[str, Dataset] = {
        LINEAR: Dataset(tf.constant([[0.0]]), tf.constant([[0.0]])),
        EXPONENTIAL: Dataset(tf.constant([[0.0]]), tf.constant([[1.0]])),
    }

    models: Mapping[str, TrainableProbabilisticModel] = {
        LINEAR: LinearWithUnitVariance(),
        EXPONENTIAL: ExponentialWithUnitVariance(),
    }

    data = (BayesianOptimizer(linear_and_exponential,
                              Box(tf.constant([-2.0]),
                                  tf.constant([2.0]))).optimize(
                                      20, data, models,
                                      AdditionRule()).try_get_final_datasets())

    objective_values = data[LINEAR].observations + data[
        EXPONENTIAL].observations
    min_idx = tf.argmin(objective_values, axis=0)[0]
    npt.assert_allclose(data[LINEAR].query_points[min_idx],
                        -tf.math.log(2.0),
                        rtol=0.01)
Exemplo n.º 27
0
def test_bayesian_optimizer_can_use_two_gprs_for_objective_defined_by_two_dimensions(
) -> None:
    class ExponentialWithUnitVariance(GaussianMarginal,
                                      PseudoTrainableProbModel):
        def predict(
            self, query_points: QueryPoints
        ) -> Tuple[ObserverEvaluations, TensorType]:
            return tf.exp(-query_points), tf.ones_like(query_points)

    class LinearWithUnitVariance(GaussianMarginal, PseudoTrainableProbModel):
        def predict(
            self, query_points: QueryPoints
        ) -> Tuple[ObserverEvaluations, TensorType]:
            return 2 * query_points, tf.ones_like(query_points)

    LINEAR = "linear"
    EXPONENTIAL = "exponential"

    class AdditionRule(AcquisitionRule[int, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            previous_state: Optional[int],
        ) -> Tuple[QueryPoints, int]:
            if previous_state is None:
                previous_state = 1

            candidate_query_points = search_space.sample(previous_state)
            linear_predictions, _ = models[LINEAR].predict(
                candidate_query_points)
            exponential_predictions, _ = models[EXPONENTIAL].predict(
                candidate_query_points)

            target = linear_predictions + exponential_predictions

            optimum_idx = tf.argmin(target, axis=0)[0]
            next_query_points = tf.expand_dims(
                candidate_query_points[optimum_idx, ...], axis=0)

            return next_query_points, previous_state * 2

    def linear_and_exponential(query_points: tf.Tensor) -> Dict[str, Dataset]:
        return {
            LINEAR: Dataset(query_points, 2 * query_points),
            EXPONENTIAL: Dataset(query_points, tf.exp(-query_points)),
        }

    data = {
        LINEAR: Dataset(tf.constant([[0.0]]), tf.constant([[0.0]])),
        EXPONENTIAL: Dataset(tf.constant([[0.0]]), tf.constant([[1.0]])),
    }

    models: Dict[str, TrainableProbabilisticModel] = {  # mypy can't infer this type
        LINEAR: LinearWithUnitVariance(),
        EXPONENTIAL: ExponentialWithUnitVariance(),
    }

    res = BayesianOptimizer(linear_and_exponential,
                            Box(tf.constant([-2.0]),
                                tf.constant([2.0]))).optimize(
                                    20, data, models, AdditionRule())

    if res.error is not None:
        raise res.error

    objective_values = res.datasets[LINEAR].observations + res.datasets[
        EXPONENTIAL].observations
    min_idx = tf.argmin(objective_values, axis=0)[0]
    npt.assert_allclose(res.datasets[LINEAR].query_points[min_idx],
                        -tf.math.log(2.0),
                        rtol=0.01)
def test_optimizer_finds_minima_of_Gardners_Simulation_1(
    num_steps: int, acquisition_function_builder
) -> None:
    """
    Test that tests the covergence of constrained BO algorithms on the
    synthetic "simulation 1" experiment of :cite:`gardner14`.
    """
    search_space = Box([0, 0], [6, 6])

    def objective(input_data):
        x, y = input_data[..., -2], input_data[..., -1]
        z = tf.cos(2.0 * x) * tf.cos(y) + tf.sin(x)
        return z[:, None]

    def constraint(input_data):
        x, y = input_data[:, -2], input_data[:, -1]
        z = tf.cos(x) * tf.cos(y) - tf.sin(x) * tf.sin(y)
        return z[:, None]

    MINIMUM = -2.0
    MINIMIZER = [math.pi * 1.5, 0.0]

    OBJECTIVE = "OBJECTIVE"
    CONSTRAINT = "CONSTRAINT"

    def observer(query_points):  # observe both objective and constraint data
        return {
            OBJECTIVE: Dataset(query_points, objective(query_points)),
            CONSTRAINT: Dataset(query_points, constraint(query_points)),
        }

    num_initial_points = 5
    initial_data = observer(search_space.sample(num_initial_points))

    def build_model(data):
        variance = tf.math.reduce_variance(data.observations)
        kernel = gpflow.kernels.Matern52(variance, tf.constant([0.2, 0.2], tf.float64))
        gpr = gpflow.models.GPR((data.query_points, data.observations), kernel, noise_variance=1e-5)
        gpflow.utilities.set_trainable(gpr.likelihood, False)
        return GaussianProcessRegression(gpr)

    models = map_values(build_model, initial_data)

    pof = ProbabilityOfFeasibility(threshold=0.5)
    acq = acquisition_function_builder(OBJECTIVE, pof.using(CONSTRAINT))
    rule: EfficientGlobalOptimization[Box] = EfficientGlobalOptimization(acq)

    dataset = (
        BayesianOptimizer(observer, search_space)
        .optimize(num_steps, initial_data, models, rule)
        .try_get_final_datasets()[OBJECTIVE]
    )

    arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))

    best_y = dataset.observations[arg_min_idx]
    best_x = dataset.query_points[arg_min_idx]

    relative_minimizer_err = tf.abs(best_x - MINIMIZER)
    # these accuracies are the current best for the given number of optimization steps, which makes
    # this is a regression test
    assert tf.reduce_all(relative_minimizer_err < 0.03, axis=-1)
    npt.assert_allclose(best_y, MINIMUM, rtol=0.03)