예제 #1
0
def test_async_greedy_raises_for_incorrect_query_points() -> None:
    with pytest.raises(ValueError):
        AsynchronousGreedy(
            builder=_GreedyBatchModelMinusMeanMaximumSingleBuilder(), num_query_points=0
        )

    with pytest.raises(ValueError):
        AsynchronousGreedy(
            builder=_GreedyBatchModelMinusMeanMaximumSingleBuilder(), num_query_points=-5
        )
예제 #2
0
def test_async_ego_keeps_track_of_pending_points() -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
    acq = _GreedyBatchModelMinusMeanMaximumSingleBuilder()
    async_rule: AsynchronousGreedy[Box] = AsynchronousGreedy(acq)
    dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))

    state_fn = async_rule.acquire_single(search_space, dataset, QuadraticMeanAndRBFKernel())
    state, point1 = state_fn(None)
    state, point2 = state_fn(state)

    assert state is not None
    assert len(state.pending_points) == 2

    # let's pretend we saw observations for the first point
    new_observations = Dataset(
        query_points=point1,
        observations=tf.constant([[1]], dtype=tf.float32),
    )
    state_fn = async_rule.acquire_single(
        search_space, dataset + new_observations, QuadraticMeanAndRBFKernel()
    )
    state, point3 = state_fn(state)

    assert state is not None
    assert len(state.pending_points) == 2
    # two points from the first batch and all points from second
    npt.assert_allclose(state.pending_points, tf.concat([point2, point3], axis=0))
예제 #3
0
            (15, False, lambda: TrustRegion()),
            (15, True, lambda: TrustRegion()),
            (
                10,
                False,
                lambda: EfficientGlobalOptimization(
                    LocalPenalizationAcquisitionFunction(
                        BRANIN_SEARCH_SPACE, ).using(OBJECTIVE),
                    num_query_points=3,
                ),
            ),
            (
                30,
                False,
                lambda: AsynchronousGreedy(
                    LocalPenalizationAcquisitionFunction(
                        BRANIN_SEARCH_SPACE, ).using(OBJECTIVE), ),
            ),
        ],
    ),
)
def test_ask_tell_optimization_finds_minima_of_the_scaled_branin_function(
    num_steps: int,
    reload_state: bool,
    acquisition_rule_fn: Callable[[], AcquisitionRule[TensorType, SearchSpace]]
    | Callable[[], AcquisitionRule[State[TensorType, AsynchronousGreedy.State
                                         | TrustRegion.State], Box], ],
) -> None:
    # For the case when optimization state is saved and reload on each iteration
    # we need to use new acquisition function object to imitate real life usage
    # hence acquisition rule factory method is passed in, instead of a rule object itself
예제 #4
0
def test_async_greedy_raises_for_non_greedy_function() -> None:
    non_greedy_function_builder = NegativeLowerConfidenceBound()
    with pytest.raises(NotImplementedError):
        # we are deliberately passing in wrong object
        # hence type ignore
        AsynchronousGreedy(non_greedy_function_builder)  # type: ignore
예제 #5
0
        dataset: Optional[Dataset] = None,
        pending_points: Optional[TensorType] = None,
        new_optimization_step: bool = True,
    ) -> AcquisitionFunction:
        self._update_count += 1
        return self.prepare_acquisition_function(
            model, dataset=dataset, pending_points=pending_points
        )


@random_seed
@pytest.mark.parametrize(
    "rule_fn",
    [
        lambda acq, batch_size: EfficientGlobalOptimization(acq, num_query_points=batch_size),
        lambda acq, batch_size: AsynchronousGreedy(acq, num_query_points=batch_size),
    ],
)
# As a side effect, this test ensures and EGO and AsynchronousGreedy
# behave similarly in sync mode
def test_greedy_batch_acquisition_rule_acquire(
    rule_fn: Callable[
        # callable input type(s)
        [_GreedyBatchModelMinusMeanMaximumSingleBuilder, int],
        # callable output type
        AcquisitionRule[TensorType, Box]
        | AcquisitionRule[State[TensorType, AsynchronousRuleState], Box],
    ]
) -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
    num_query_points = 4
예제 #6
0
        function: Optional[AcquisitionFunction],
        dataset: Dataset,
        model: ProbabilisticModel,
        pending_points: Optional[TensorType] = None,
        new_optimization_step: bool = True,
    ) -> AcquisitionFunction:
        self._update_count += 1
        return self.prepare_acquisition_function(dataset, model, pending_points)


@random_seed
@pytest.mark.parametrize(
    "rule_fn, num_query_points",
    [
        (lambda acq: EfficientGlobalOptimization(acq, num_query_points=4), 4),
        (lambda acq: AsynchronousGreedy(acq), 1),
    ],
)
# As a side effect, this test ensures and EGO and AsynchronousGreedy
# behave similarly in sync mode
def test_greedy_batch_acquisition_rule_acquire(
    rule_fn: Callable[
        # callable input type(s)
        [_GreedyBatchModelMinusMeanMaximumSingleBuilder],
        # callable output type
        AcquisitionRule[TensorType, Box]
        | AcquisitionRule[State[TensorType, AsynchronousGreedy.State], Box],
    ],
    num_query_points: int,
) -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
 ),
 (12, AsynchronousOptimization(num_query_points=3)),
 (
     10,
     EfficientGlobalOptimization(
         LocalPenalizationAcquisitionFunction(
             BRANIN_SEARCH_SPACE,
         ).using(OBJECTIVE),
         num_query_points=3,
     ),
 ),
 (
     10,
     AsynchronousGreedy(
         LocalPenalizationAcquisitionFunction(
             BRANIN_SEARCH_SPACE,
         ).using(OBJECTIVE),
         num_query_points=3,
     ),
 ),
 (
     10,
     EfficientGlobalOptimization(
         GIBBON(
             BRANIN_SEARCH_SPACE,
         ).using(OBJECTIVE),
         num_query_points=2,
     ),
 ),
 (15, TrustRegion()),
 (
     15,