def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys_and_default_acquisition(
) -> None:
    optimizer = BayesianOptimizer(lambda x: x[:1],
                                  one_dimensional_range(-1, 1))
    with pytest.raises(ValueError):
        optimizer.optimize(3, {"foo": zero_dataset()},
                           {"foo": _PseudoTrainableQuadratic()})
def test_bayesian_optimizer_optimize_raises_for_negative_steps(
        num_steps: int) -> None:
    optimizer = BayesianOptimizer(_quadratic_observer,
                                  one_dimensional_range(-1, 1))

    with pytest.raises(ValueError, match="num_steps"):
        optimizer.optimize(num_steps, {"": zero_dataset()},
                           {"": _PseudoTrainableQuadratic()})
def test_bayesian_optimizer_optimize_raises_for_invalid_keys(
    datasets: dict[str, Dataset], models: dict[str, TrainableProbabilisticModel]
) -> None:
    search_space = Box([-1], [1])
    optimizer = BayesianOptimizer(lambda x: {"foo": Dataset(x, x)}, search_space)
    rule = FixedAcquisitionRule([[0.0]])
    with pytest.raises(ValueError):
        optimizer.optimize(10, datasets, models, rule)
Example #4
0
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys(
        datasets: Dict[str, Dataset],
        model_specs: Dict[str, TrainableProbabilisticModel]) -> None:
    optimizer = BayesianOptimizer(lambda x: {"foo": Dataset(x, x[:1])},
                                  one_dimensional_range(-1, 1))
    rule = FixedAcquisitionRule(tf.constant([[0.0]]))
    with pytest.raises(ValueError):
        optimizer.optimize(10, datasets, model_specs, rule)
Example #5
0
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys_and_default_acquisition(
) -> None:
    optimizer = BayesianOptimizer(lambda x: x[:1], Box([-1], [1]))
    data, models = {
        "foo": empty_dataset([1], [1])
    }, {
        "foo": _PseudoTrainableQuadratic()
    }
    with pytest.raises(ValueError):
        optimizer.optimize(3, data, models)
Example #6
0
def test_bayesian_optimizer_optimize_raises_for_negative_steps(
        num_steps: int) -> None:
    optimizer = BayesianOptimizer(_quadratic_observer, Box([-1], [1]))

    data, models = {
        "": empty_dataset([1], [1])
    }, {
        "": _PseudoTrainableQuadratic()
    }
    with pytest.raises(ValueError, match="num_steps"):
        optimizer.optimize(num_steps, data, models)
def test_bayesian_optimizer_calls_observer_once_per_iteration(steps: int) -> None:
    class _CountingObserver:
        call_count = 0

        def __call__(self, x: tf.Tensor) -> Dataset:
            self.call_count += 1
            return Dataset(x, tf.reduce_sum(x ** 2, axis=-1, keepdims=True))

    observer = _CountingObserver()
    optimizer = BayesianOptimizer(observer, Box([-1], [1]))
    data = mk_dataset([[0.5]], [[0.25]])

    optimizer.optimize(steps, data, _PseudoTrainableQuadratic()).final_result.unwrap()

    assert observer.call_count == steps
Example #8
0
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, Box([0], [1]))
    data, models = {"": mk_dataset([[0.0]], [[0.0]])}, {"": model}
    result, history = optimizer.optimize(3, data, models, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1
Example #9
0
def test_bayesian_optimizer_optimize_returns_default_acquisition_state_of_correct_type(
) -> None:
    optimizer = BayesianOptimizer(lambda x: {OBJECTIVE: Dataset(x, x[:1])},
                                  one_dimensional_range(-1, 1))
    res: OptimizationResult[None] = optimizer.optimize(
        3, {OBJECTIVE: zero_dataset()},
        {OBJECTIVE: _PseudoTrainableQuadratic()})

    if res.error is not None:
        raise res.error

    assert all(logging_state.acquisition_state is None
               for logging_state in res.history)
def test_bayesian_optimizer_calls_observer_once_per_iteration(
        steps: int) -> None:
    class _CountingObserver:
        call_count = 0

        def __call__(self, x: tf.Tensor) -> Dict[str, Dataset]:
            self.call_count += 1
            return {
                OBJECTIVE: Dataset(x,
                                   tf.reduce_sum(x**2, axis=-1, keepdims=True))
            }

    observer = _CountingObserver()
    optimizer = BayesianOptimizer(observer, one_dimensional_range(-1, 1))
    data = Dataset(tf.constant([[0.5]]), tf.constant([[0.25]]))

    optimizer.optimize(steps, {
        OBJECTIVE: data
    }, {
        OBJECTIVE: _PseudoTrainableQuadratic()
    }).final_result.unwrap()

    assert observer.call_count == steps
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, one_dimensional_range(0, 1))
    result, history = optimizer.optimize(3, {
        "": zero_dataset()
    }, {
        "": model
    }, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1