Пример #1
0
def test_bayesian_optimizer_optimize_for_uncopyable_model() -> None:
    class _UncopyableModel(_PseudoTrainableQuadratic):
        _optimize_count = 0

        def optimize(self, dataset: Dataset) -> None:
            self._optimize_count += 1

        def __deepcopy__(self, memo: dict[int, object]) -> _UncopyableModel:
            if self._optimize_count >= 3:
                raise _Whoops

            return self

    rule = FixedAcquisitionRule([[0.0]])
    result, history = (BayesianOptimizer(_quadratic_observer, Box(
        [0], [1])).optimize(10, {
            "": mk_dataset([[0.0]], [[0.0]])
        }, {
            "": _UncopyableModel()
        }, rule).astuple())

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 4
Пример #2
0
def test_bayesian_optimizer_optimizes_initial_model(
        fit_initial_model: bool) -> None:
    class _CountingOptimizerModel(_PseudoTrainableQuadratic):
        _optimize_count = 0

        def optimize(self, dataset: Dataset) -> None:
            self._optimize_count += 1

    rule = FixedAcquisitionRule([[0.0]])
    model = _CountingOptimizerModel()

    final_opt_state, _ = (BayesianOptimizer(_quadratic_observer, Box(
        [0], [1])).optimize(
            1,
            {
                "": mk_dataset([[0.0]], [[0.0]])
            },
            {
                "": model
            },
            rule,
            fit_initial_model=fit_initial_model,
        ).astuple())
    final_model = final_opt_state.unwrap().model

    if fit_initial_model:  # optimized at start and end of first BO step
        assert final_model._optimize_count == 2  # type: ignore
    else:  # optimized just at end of first BO step
        assert final_model._optimize_count == 1  # type: ignore
Пример #3
0
def test_bayesian_optimizer_optimize_raises_for_invalid_keys(
    datasets: dict[str, Dataset], models: dict[str, TrainableProbabilisticModel]
) -> None:
    search_space = Box([-1], [1])
    optimizer = BayesianOptimizer(lambda x: {"foo": Dataset(x, x)}, search_space)
    rule = FixedAcquisitionRule([[0.0]])
    with pytest.raises(ValueError):
        optimizer.optimize(10, datasets, models, rule)
Пример #4
0
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys(
        datasets: Dict[str, Dataset],
        model_specs: Dict[str, TrainableProbabilisticModel]) -> None:
    optimizer = BayesianOptimizer(lambda x: {"foo": Dataset(x, x[:1])},
                                  one_dimensional_range(-1, 1))
    rule = FixedAcquisitionRule(tf.constant([[0.0]]))
    with pytest.raises(ValueError):
        optimizer.optimize(10, datasets, model_specs, rule)
Пример #5
0
class _BrokenRule(AcquisitionRule[None, SearchSpace]):
    def acquire(
        self,
        search_space: SearchSpace,
        datasets: Mapping[str, Dataset],
        models: Mapping[str, ProbabilisticModel],
        state: None = None,
    ) -> NoReturn:
        raise _Whoops


@pytest.mark.parametrize(
    "observer, model, rule",
    [
        (_broken_observer, _PseudoTrainableQuadratic(),
         FixedAcquisitionRule([[0.0]])),
        (_quadratic_observer, _BrokenModel(), FixedAcquisitionRule([[0.0]])),
        (_quadratic_observer, _PseudoTrainableQuadratic(), _BrokenRule()),
    ],
)
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, Box([0], [1]))
    data, models = {"": mk_dataset([[0.0]], [[0.0]])}, {"": model}
    result, history = optimizer.optimize(3, data, models, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1
Пример #6
0
def acquisition_rule() -> AcquisitionRule[TensorType, Box]:
    return FixedAcquisitionRule([[0.0]])
Пример #7
0
class _BrokenRule(AcquisitionRule[None, SearchSpace]):
    def acquire(
        self,
        search_space: SearchSpace,
        datasets: Mapping[str, Dataset],
        models: Mapping[str, ProbabilisticModel],
        state: None,
    ) -> NoReturn:
        raise _Whoops


@pytest.mark.parametrize(
    "observer, model, rule",
    [
        (_broken_observer, _PseudoTrainableQuadratic(),
         FixedAcquisitionRule(tf.constant([[0.0]]))),
        (_quadratic_observer, _BrokenModel(),
         FixedAcquisitionRule(tf.constant([[0.0]]))),
        (_quadratic_observer, _PseudoTrainableQuadratic(), _BrokenRule()),
    ],
)
def test_bayesian_optimizer_optimize_for_failed_step(
        observer: Observer, model: TrainableProbabilisticModel,
        rule: AcquisitionRule) -> None:
    optimizer = BayesianOptimizer(observer, one_dimensional_range(0, 1))
    result, history = optimizer.optimize(3, {
        "": zero_dataset()
    }, {
        "": model
    }, rule).astuple()
Пример #8
0

class _BrokenRule(AcquisitionRule[NoReturn, SearchSpace]):
    def acquire(
        self,
        search_space: SearchSpace,
        models: Mapping[str, ProbabilisticModel],
        datasets: Optional[Mapping[str, Dataset]] = None,
    ) -> NoReturn:
        raise _Whoops


@pytest.mark.parametrize(
    "observer, model, rule",
    [
        (_broken_observer, _PseudoTrainableQuadratic(), FixedAcquisitionRule([[0.0]])),
        (_quadratic_observer, _BrokenModel(), FixedAcquisitionRule([[0.0]])),
        (_quadratic_observer, _PseudoTrainableQuadratic(), _BrokenRule()),
    ],
)
def test_bayesian_optimizer_optimize_for_failed_step(
    observer: Observer, model: TrainableProbabilisticModel, rule: AcquisitionRule[None, Box]
) -> None:
    optimizer = BayesianOptimizer(observer, Box([0], [1]))
    data, models = {"": mk_dataset([[0.0]], [[0.0]])}, {"": model}
    result, history = optimizer.optimize(3, data, models, rule).astuple()

    with pytest.raises(_Whoops):
        result.unwrap()

    assert len(history) == 1