Ejemplo n.º 1
0
def test_async_ego_keeps_track_of_pending_points() -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
    acq = _GreedyBatchModelMinusMeanMaximumSingleBuilder()
    async_rule: AsynchronousGreedy[Box] = AsynchronousGreedy(acq)
    dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))

    state_fn = async_rule.acquire_single(search_space, dataset, QuadraticMeanAndRBFKernel())
    state, point1 = state_fn(None)
    state, point2 = state_fn(state)

    assert state is not None
    assert len(state.pending_points) == 2

    # let's pretend we saw observations for the first point
    new_observations = Dataset(
        query_points=point1,
        observations=tf.constant([[1]], dtype=tf.float32),
    )
    state_fn = async_rule.acquire_single(
        search_space, dataset + new_observations, QuadraticMeanAndRBFKernel()
    )
    state, point3 = state_fn(state)

    assert state is not None
    assert len(state.pending_points) == 2
    # two points from the first batch and all points from second
    npt.assert_allclose(state.pending_points, tf.concat([point2, point3], axis=0))
Ejemplo n.º 2
0
def test_async_keeps_track_of_pending_points(
    async_rule: AcquisitionRule[State[TensorType, AsynchronousRuleState], Box]
) -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
    dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))

    state_fn = async_rule.acquire_single(search_space, QuadraticMeanAndRBFKernel(), dataset=dataset)
    state, point1 = state_fn(None)
    state, point2 = state_fn(state)

    assert state is not None
    assert len(state.pending_points) == 2

    # pretend we saw observation for the first point
    new_observations = Dataset(
        query_points=point1,
        observations=tf.constant([[1]], dtype=tf.float32),
    )
    state_fn = async_rule.acquire_single(
        search_space,
        QuadraticMeanAndRBFKernel(),
        dataset=dataset + new_observations,
    )
    state, point3 = state_fn(state)

    assert state is not None
    assert len(state.pending_points) == 2

    # we saw first point, so pendings points are
    # second point and new third point
    npt.assert_allclose(state.pending_points, tf.concat([point2, point3], axis=0))
Ejemplo n.º 3
0
def test_sparse_variational_update_updates_num_data() -> None:
    model = SparseVariational(
        _svgp(tf.zeros([1, 4])),
        Dataset(tf.zeros([3, 4]), tf.zeros([3, 1])),
    )
    model.update(Dataset(tf.zeros([5, 4]), tf.zeros([5, 1])))
    assert model.model.num_data == 5
Ejemplo n.º 4
0
def test_bayesian_optimizer_optimize_tracked_state() -> None:
    class _CountingRule(AcquisitionRule[int, Box]):
        def acquire(
            self,
            search_space: Box,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            state: Optional[int],
        ) -> Tuple[TensorType, int]:
            new_state = 0 if state is None else state + 1
            return tf.constant([[10.0]]) + new_state, new_state

    class _DecreasingVarianceModel(QuadraticMeanAndRBFKernel,
                                   TrainableProbabilisticModel):
        def __init__(self, data: Dataset):
            super().__init__()
            self._data = data

        def predict(self,
                    query_points: TensorType) -> Tuple[TensorType, TensorType]:
            mean, var = super().predict(query_points)
            return mean, var / len(self._data)

        def update(self, dataset: Dataset) -> None:
            self._data = dataset

        def optimize(self, dataset: Dataset) -> None:
            pass

    _, history = (BayesianOptimizer(
        _quadratic_observer, one_dimensional_range(0, 1)).optimize(
            3, {
                "": zero_dataset()
            }, {
                "": _DecreasingVarianceModel(zero_dataset())
            }, _CountingRule()).astuple())

    assert [record.acquisition_state for record in history] == [None, 0, 1]

    assert_datasets_allclose(history[0].datasets[""], zero_dataset())
    assert_datasets_allclose(
        history[1].datasets[""],
        Dataset(tf.constant([[0.0], [10.0]]), tf.constant([[0.0], [100.0]])),
    )
    assert_datasets_allclose(
        history[2].datasets[""],
        Dataset(tf.constant([[0.0], [10.0], [11.0]]),
                tf.constant([[0.0], [100.0], [121.0]])),
    )

    for step in range(3):
        _, variance_from_saved_model = history[step].models[""].predict(
            tf.constant([[0.0]]))
        npt.assert_allclose(variance_from_saved_model, 1.0 / (step + 1))
Ejemplo n.º 5
0
def test_gaussian_process_deep_copyable(
        gpr_interface_factory: ModelFactoryType) -> None:
    x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
    model, _ = gpr_interface_factory(x, fnc_2sin_x_over_3(x))
    model_copy = copy.deepcopy(model)
    x_predict = tf.constant([[50.5]], gpflow.default_float())

    # check deepcopy predicts same values as original
    mean_f, variance_f = model.predict(x_predict)
    mean_f_copy, variance_f_copy = model_copy.predict(x_predict)
    npt.assert_equal(mean_f, mean_f_copy)
    npt.assert_equal(variance_f, variance_f_copy)

    # check that updating the original doesn't break or change the deepcopy
    x_new = tf.concat(
        [x, tf.constant([[10.0], [11.0]], dtype=gpflow.default_float())], 0)
    new_data = Dataset(x_new, fnc_2sin_x_over_3(x_new))
    model.update(new_data)
    model.optimize(new_data)

    mean_f_updated, variance_f_updated = model.predict(x_predict)
    mean_f_copy_updated, variance_f_copy_updated = model_copy.predict(
        x_predict)
    npt.assert_equal(mean_f_copy_updated, mean_f_copy)
    npt.assert_equal(variance_f_copy_updated, variance_f_copy)
    npt.assert_array_compare(operator.__ne__, mean_f_updated, mean_f)
    npt.assert_array_compare(operator.__ne__, variance_f_updated, variance_f)
Ejemplo n.º 6
0
class ReducerTestData:
    type_class: Type[Union[Sum, Product]]
    raw_reduce_op: Callable[[Sequence], float]
    dataset: Dataset = Dataset(
        np.arange(5, dtype=np.float64).reshape(-1, 1),
        np.zeros(5).reshape(-1, 1))
    query_point: tf.Tensor = tf.convert_to_tensor(np.array([[0.1], [0.2]]))
Ejemplo n.º 7
0
def test_expected_constrained_improvement_is_constraint_when_no_feasible_points(
) -> None:
    class _Constraint(AcquisitionFunctionBuilder):
        def prepare_acquisition_function(
                self, datasets: Mapping[str, Dataset],
                models: Mapping[str,
                                ProbabilisticModel]) -> AcquisitionFunction:
            def acquisition(x: TensorType) -> TensorType:
                x_ = tf.squeeze(x, -2)
                return tf.cast(tf.logical_and(0.0 <= x_, x_ < 1.0), x.dtype)

            return acquisition

    data = {
        "foo": Dataset(tf.constant([[-2.0], [1.0]]), tf.constant([[4.0],
                                                                  [1.0]]))
    }
    models_ = {"foo": QuadraticMeanAndRBFKernel()}
    eci = ExpectedConstrainedImprovement(
        "foo", _Constraint()).prepare_acquisition_function(data, models_)

    constraint_fn = _Constraint().prepare_acquisition_function(data, models_)

    xs = tf.linspace([[-10.0]], [[10.0]], 100)
    npt.assert_allclose(eci(xs), constraint_fn(xs))
Ejemplo n.º 8
0
def test_sparse_variational_update_raises_for_invalid_shapes(new_data: Dataset) -> None:
    model = SparseVariational(
        _svgp(tf.zeros([1, 4])),
        Dataset(tf.zeros([3, 4]), tf.zeros([3, 1])),
    )
    with pytest.raises(ValueError):
        model.update(new_data)
Ejemplo n.º 9
0
    def build_stacked_independent_objectives_model(
            data: Dataset) -> ModelStack:
        gprs = []
        for idx in range(2):
            single_obj_data = Dataset(
                data.query_points, tf.gather(data.observations, [idx], axis=1))
            variance = tf.math.reduce_variance(single_obj_data.observations)
            kernel = gpflow.kernels.Matern52(
                variance, tf.constant([0.2, 0.2], tf.float64))
            gpr = gpflow.models.GPR(single_obj_data.astuple(),
                                    kernel,
                                    noise_variance=1e-5)
            gpflow.utilities.set_trainable(gpr.likelihood, False)
            gprs.append((GaussianProcessRegression(gpr), 1))

        return ModelStack(*gprs)
Ejemplo n.º 10
0
def test_rff_sampler_raises_for_a_non_gpflow_kernel() -> None:
    model = QuadraticMeanAndRBFKernel()
    dataset = Dataset(tf.constant([[-2.0]]), tf.constant([[4.1]]))
    sampler = RandomFourierFeatureThompsonSampler(dataset, model, 100)

    with pytest.raises(AssertionError):
        sampler.get_trajectory()
Ejemplo n.º 11
0
def test_vgp_optimize_natgrads_only_updates_variational_params(compile: bool) -> None:
    x_observed = np.linspace(0, 100, 10).reshape((-1, 1))
    y_observed = _3x_plus_gaussian_noise(x_observed)
    data = x_observed, y_observed
    dataset = Dataset(*data)

    class DummyBatchOptimizer(BatchOptimizer):
        def optimize(self, model: tf.Module, dataset: Dataset) -> None:
            pass

    optimizer = DummyBatchOptimizer(tf.optimizers.Adam(), compile=compile, max_iter=10)

    model = VariationalGaussianProcess(
        vgp_matern_model(x_observed[:10], y_observed[:10]), optimizer=optimizer, use_natgrads=True
    )

    old_num_trainable_params = len(model.trainable_variables)
    old_kernel_params = model.get_kernel().parameters[0].numpy()
    old_q_mu = model.model.q_mu.numpy()
    old_q_sqrt = model.model.q_sqrt.numpy()

    model.optimize(dataset)

    new_num_trainable_params = len(model.trainable_variables)
    new_kernel_params = model.get_kernel().parameters[0].numpy()
    new_q_mu = model.model.q_mu.numpy()
    new_q_sqrt = model.model.q_sqrt.numpy()

    npt.assert_allclose(old_kernel_params, new_kernel_params, atol=1e-3)
    npt.assert_equal(old_num_trainable_params, new_num_trainable_params)
    npt.assert_raises(AssertionError, npt.assert_allclose, old_q_mu, new_q_mu)
    npt.assert_raises(AssertionError, npt.assert_allclose, old_q_sqrt, new_q_sqrt)
Ejemplo n.º 12
0
def test_model_stack_training() -> None:
    class Model(GaussianProcess, TrainableProbabilisticModel):
        def __init__(
            self,
            mean_functions: Sequence[Callable[[TensorType], TensorType]],
            kernels: Sequence[tfp.math.psd_kernels.PositiveSemidefiniteKernel],
            output_dims: slice,
        ):
            super().__init__(mean_functions, kernels)
            self._output_dims = output_dims

        def _assert_data(self, dataset: Dataset) -> None:
            qp, obs = dataset.astuple()
            expected_obs = data.observations[..., self._output_dims]
            assert_datasets_allclose(dataset, Dataset(qp, expected_obs))

        optimize = _assert_data
        update = _assert_data

    rbf = tfp.math.psd_kernels.ExponentiatedQuadratic()
    model01 = Model([quadratic, quadratic], [rbf, rbf], slice(0, 2))
    model2 = Model([quadratic], [rbf], slice(2, 3))
    model3 = Model([quadratic], [rbf], slice(3, 4))

    stack = ModelStack((model01, 2), (model2, 1), (model3, 1))
    data = Dataset(tf.random.uniform([5, 7, 3]), tf.random.uniform([5, 7, 4]))
    stack.update(data)
    stack.optimize(data)
Ejemplo n.º 13
0
def test_greedy_batch_acquisition_rule_acquire(
    rule_fn: Callable[
        # callable input type(s)
        [_GreedyBatchModelMinusMeanMaximumSingleBuilder],
        # callable output type
        AcquisitionRule[TensorType, Box]
        | AcquisitionRule[State[TensorType, AsynchronousGreedy.State], Box],
    ],
    num_query_points: int,
) -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
    acq = _GreedyBatchModelMinusMeanMaximumSingleBuilder()
    assert acq._update_count == 0
    acq_rule: AcquisitionRule[TensorType, Box] | AcquisitionRule[
        State[TensorType, AsynchronousGreedy.State], Box
    ] = rule_fn(acq)
    dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
    points_or_stateful = acq_rule.acquire_single(search_space, dataset, QuadraticMeanAndRBFKernel())
    if callable(points_or_stateful):
        _, query_points = points_or_stateful(None)
    else:
        query_points = points_or_stateful
    assert acq._update_count == num_query_points - 1
    npt.assert_allclose(query_points, [[0.0, 0.0]] * num_query_points, atol=1e-3)

    points_or_stateful = acq_rule.acquire_single(search_space, dataset, QuadraticMeanAndRBFKernel())
    if callable(points_or_stateful):
        _, query_points = points_or_stateful(None)
    else:
        query_points = points_or_stateful
    npt.assert_allclose(query_points, [[0.0, 0.0]] * num_query_points, atol=1e-3)
    assert acq._update_count == 2 * num_query_points - 1
Ejemplo n.º 14
0
def test_rff_sampler_returns_correctly_shaped_samples(
    sample_min_value: bool, sample_size: int
) -> None:
    search_space = Box([0.0, 0.0], [1.0, 1.0])
    model = QuadraticMeanAndRBFKernel(noise_variance=tf.constant(1.0, dtype=tf.float64))
    model.kernel = (
        gpflow.kernels.RBF()
    )  # need a gpflow kernel object for random feature decompositions

    x_range = tf.linspace(0.0, 1.0, 5)
    x_range = tf.cast(x_range, dtype=tf.float64)
    xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
    ys = quadratic(xs)
    dataset = Dataset(xs, ys)

    sampler = RandomFourierFeatureThompsonSampler(
        sample_size, model, dataset, num_features=100, sample_min_value=sample_min_value
    )

    query_points = search_space.sample(100)
    thompson_samples = sampler.sample(query_points)
    if sample_min_value:
        tf.debugging.assert_shapes([(thompson_samples, [sample_size, 1])])
    else:
        tf.debugging.assert_shapes([(thompson_samples, [sample_size, 2])])
Ejemplo n.º 15
0
def test_rff_sampler_raises_for_invalid_sample_size(
    sample_size: int,
) -> None:
    model = QuadraticMeanAndRBFKernel()
    dataset = Dataset(tf.constant([[-2.0]]), tf.constant([[4.1]]))
    with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
        RandomFourierFeatureThompsonSampler(sample_size, model, dataset)
Ejemplo n.º 16
0
def test_batch_monte_carlo_expected_improvement_raises_for_empty_data(
) -> None:
    builder = BatchMonteCarloExpectedImprovement(100)
    data = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
    model = QuadraticMeanAndRBFKernel()
    with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
        builder.prepare_acquisition_function(data, model)
Ejemplo n.º 17
0
def test_expected_constrained_improvement_min_feasibility_probability_bound_is_inclusive(
) -> None:
    pof = tfp.bijectors.Sigmoid().forward

    class _Constraint(AcquisitionFunctionBuilder):
        def prepare_acquisition_function(
                self, datasets: Mapping[str, Dataset],
                models: Mapping[str,
                                ProbabilisticModel]) -> AcquisitionFunction:
            return pof

    models_ = {"foo": QuadraticMeanAndRBFKernel()}

    data = {
        "foo": Dataset(tf.constant([[1.1], [2.0]]), tf.constant([[1.21],
                                                                 [4.0]]))
    }
    eci = ExpectedConstrainedImprovement(
        "foo", _Constraint(),
        min_feasibility_probability=pof(1.0)).prepare_acquisition_function(
            data, models_)

    ei = ExpectedImprovement().using("foo").prepare_acquisition_function(
        data, models_)

    x = tf.constant([[1.5]])
    npt.assert_allclose(eci(x), ei(x) * pof(x))
Ejemplo n.º 18
0
def test_locally_penalized_acquisitions_match_base_acquisition(
    base_builder, ) -> None:
    data = Dataset(tf.zeros([3, 2], dtype=tf.float64),
                   tf.ones([3, 2], dtype=tf.float64))
    search_space = Box([0, 0], [1, 1])
    model = QuadraticMeanAndRBFKernel()

    lp_acq_builder = LocalPenalizationAcquisitionFunction(
        search_space, base_acquisition_function_builder=base_builder)
    lp_acq = lp_acq_builder.prepare_acquisition_function(data, model, None)

    base_acq = base_builder.prepare_acquisition_function(data, model)

    x_range = tf.linspace(0.0, 1.0, 11)
    x_range = tf.cast(x_range, dtype=tf.float64)
    xs = tf.reshape(
        tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1),
        (-1, 2))
    lp_acq_values = lp_acq(xs[..., None, :])
    base_acq_values = base_acq(xs[..., None, :])

    if isinstance(base_builder, ExpectedImprovement):
        npt.assert_array_equal(lp_acq_values, base_acq_values)
    else:  # check sampling-based acquisition functions are close
        npt.assert_allclose(lp_acq_values, base_acq_values, atol=0.001)
Ejemplo n.º 19
0
def test_ego(search_space: SearchSpace, expected_minimum: tf.Tensor) -> None:
    ego = EfficientGlobalOptimization(
        NegativeLowerConfidenceBound(0).using(OBJECTIVE))
    dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
    query_point, _ = ego.acquire(search_space, {OBJECTIVE: dataset},
                                 {OBJECTIVE: QuadraticMeanAndRBFKernel()})
    npt.assert_array_almost_equal(query_point, expected_minimum, decimal=5)
Ejemplo n.º 20
0
def build_stacked_independent_objectives_model(data: Dataset,
                                               num_output) -> ModelStack:
    gprs = []
    for idx in range(num_output):
        single_obj_data = Dataset(data.query_points,
                                  tf.gather(data.observations, [idx], axis=1))
        variance = tf.math.reduce_variance(single_obj_data.observations)
        kernel = gpflow.kernels.Matern52(variance)
        gpr = gpflow.models.GPR(
            (single_obj_data.query_points, single_obj_data.observations),
            kernel,
            noise_variance=1e-5)
        gpflow.utilities.set_trainable(gpr.likelihood, False)
        gprs.append((create_model(
            GPflowModelConfig(
                **{
                    "model": gpr,
                    "optimizer": gpflow.optimizers.Scipy(),
                    "optimizer_args": {
                        "minimize_args": {
                            "options": dict(maxiter=100)
                        }
                    }
                })), 1))

    return ModelStack(*gprs)
Ejemplo n.º 21
0
def test_trust_region_for_unsuccessful_local_to_global_trust_region_reduced(
) -> None:
    tr = TrustRegion(NegativeLowerConfidenceBound(0).using(OBJECTIVE))
    dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]),
                      tf.constant([[0.4], [0.5]]))
    lower_bound = tf.constant([-2.2, -1.0])
    upper_bound = tf.constant([1.3, 3.3])
    search_space = Box(lower_bound, upper_bound)

    eps = 0.5 * (search_space.upper - search_space.lower) / 10
    previous_y_min = dataset.observations[0]
    is_global = False
    acquisition_space = Box(dataset.query_points[0] - eps,
                            dataset.query_points[0] + eps)
    previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min,
                                       is_global)

    _, current_state = tr.acquire(search_space, {OBJECTIVE: dataset},
                                  {OBJECTIVE: QuadraticMeanAndRBFKernel()},
                                  previous_state)

    npt.assert_array_less(
        current_state.eps,
        previous_state.eps)  # current TR smaller than previous
    assert current_state.is_global
    npt.assert_array_almost_equal(current_state.acquisition_space.lower,
                                  lower_bound)
Ejemplo n.º 22
0
def test_joint_batch_acquisition_rule_acquire(
    rule_fn: Callable[
        # callable input type(s)
        [_JointBatchModelMinusMeanMaximumSingleBuilder, int],
        # callable output type
        AcquisitionRule[TensorType, Box]
        | AcquisitionRule[State[TensorType, AsynchronousRuleState], Box],
    ]
) -> None:
    search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
    num_query_points = 4
    acq = _JointBatchModelMinusMeanMaximumSingleBuilder()
    acq_rule: AcquisitionRule[TensorType, Box] | AcquisitionRule[
        State[TensorType, AsynchronousRuleState], Box
    ] = rule_fn(acq, num_query_points)

    dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
    points_or_stateful = acq_rule.acquire_single(
        search_space, QuadraticMeanAndRBFKernel(), dataset=dataset
    )
    if callable(points_or_stateful):
        _, query_point = points_or_stateful(None)
    else:
        query_point = points_or_stateful

    print(query_point)
    npt.assert_allclose(query_point, [[0.0, 0.0]] * num_query_points, atol=1e-3)
Ejemplo n.º 23
0
def test_dataset_raises_for_invalid_ranks(
        query_points_shape: ShapeLike, observations_shape: ShapeLike) -> None:
    query_points = tf.zeros(query_points_shape)
    observations = tf.ones(observations_shape)

    with pytest.raises(ValueError):
        Dataset(query_points, observations)
Ejemplo n.º 24
0
def test_trust_region_successful_global_to_global_trust_region_unchanged(
    rule: AcquisitionRule[TensorType, Box], expected_query_point: TensorType
) -> None:
    tr = TrustRegion(rule)
    dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.3]]))
    lower_bound = tf.constant([-2.2, -1.0])
    upper_bound = tf.constant([1.3, 3.3])
    search_space = Box(lower_bound, upper_bound)

    eps = 0.5 * (search_space.upper - search_space.lower) / 10
    previous_y_min = dataset.observations[0]
    is_global = True
    previous_state = TrustRegion.State(search_space, eps, previous_y_min, is_global)

    current_state, query_point = tr.acquire(
        search_space,
        {OBJECTIVE: QuadraticMeanAndRBFKernel()},
        datasets={OBJECTIVE: dataset},
    )(previous_state)

    assert current_state is not None
    npt.assert_array_almost_equal(current_state.eps, previous_state.eps)
    assert current_state.is_global
    npt.assert_array_almost_equal(query_point, expected_query_point, 5)
    npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
    npt.assert_array_almost_equal(current_state.acquisition_space.upper, upper_bound)
Ejemplo n.º 25
0
def test_rff_sampler_returns_same_posterior_from_each_calculation_method(
) -> None:
    model = QuadraticMeanAndRBFKernel(
        noise_variance=tf.constant(1.0, dtype=tf.float64))
    model.kernel = (
        gpflow.kernels.RBF()
    )  # need a gpflow kernel object for random feature decompositions
    x_range = tf.linspace(0.0, 1.0, 5)
    x_range = tf.cast(x_range, dtype=tf.float64)
    xs = tf.reshape(
        tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1),
        (-1, 2))
    ys = quadratic(xs)
    dataset = Dataset(xs, ys)

    sampler = RandomFourierFeatureThompsonSampler(dataset, model, 100)
    sampler.get_trajectory()

    posterior_1 = sampler._prepare_theta_posterior_in_design_space()
    posterior_2 = sampler._prepare_theta_posterior_in_gram_space()

    npt.assert_allclose(posterior_1.loc, posterior_2.loc, rtol=0.02)
    npt.assert_allclose(posterior_1.scale_tril,
                        posterior_2.scale_tril,
                        rtol=0.02)
Ejemplo n.º 26
0
def test_trust_region_for_successful_local_to_global_trust_region_increased(
    rule: AcquisitionRule[TensorType, Box]
) -> None:
    tr = TrustRegion(rule)
    dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.3]]))
    lower_bound = tf.constant([-2.2, -1.0])
    upper_bound = tf.constant([1.3, 3.3])
    search_space = Box(lower_bound, upper_bound)

    eps = 0.5 * (search_space.upper - search_space.lower) / 10
    previous_y_min = dataset.observations[0]
    is_global = False
    acquisition_space = Box(dataset.query_points[0] - eps, dataset.query_points[0] + eps)
    previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)

    current_state, _ = tr.acquire(
        search_space,
        {OBJECTIVE: QuadraticMeanAndRBFKernel()},
        datasets={OBJECTIVE: dataset},
    )(previous_state)

    assert current_state is not None
    npt.assert_array_less(previous_state.eps, current_state.eps)  # current TR larger than previous
    assert current_state.is_global
    npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
    npt.assert_array_almost_equal(current_state.acquisition_space.upper, upper_bound)
Ejemplo n.º 27
0
def test_gaussian_process_regression_default_optimize(gpr_interface_factory) -> None:
    data = _mock_data()
    model = gpr_interface_factory(*data)
    internal_model = model.model
    loss = internal_model.training_loss()
    model.optimize(Dataset(*data))
    assert internal_model.training_loss() < loss
Ejemplo n.º 28
0
def test_trust_region_for_unsuccessful_global_to_local_trust_region_unchanged(
) -> None:
    tr = TrustRegion(NegativeLowerConfidenceBound(0).using(OBJECTIVE))
    dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]),
                      tf.constant([[0.4], [0.5]]))
    lower_bound = tf.constant([-2.2, -1.0])
    upper_bound = tf.constant([1.3, 3.3])
    search_space = Box(lower_bound, upper_bound)

    eps = 0.5 * (search_space.upper - search_space.lower) / 10
    previous_y_min = dataset.observations[0]
    is_global = True
    acquisition_space = search_space
    previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min,
                                       is_global)

    query_point, current_state = tr.acquire(
        search_space, {OBJECTIVE: dataset},
        {OBJECTIVE: QuadraticWithUnitVariance()}, previous_state)

    npt.assert_array_almost_equal(current_state.eps, previous_state.eps)
    assert not current_state.is_global
    npt.assert_array_less(lower_bound, current_state.acquisition_space.lower)
    npt.assert_array_less(current_state.acquisition_space.upper, upper_bound)
    assert query_point[0] in current_state.acquisition_space
Ejemplo n.º 29
0
def test_mc_ind_acquisition_function_builder_approximates_model_samples() -> None:
    class _Acq(MCIndAcquisitionFunctionBuilder):
        def _build_with_sampler(
            self,
            datasets: Mapping[str, Dataset],
            models: Mapping[str, ProbabilisticModel],
            samplers: Mapping[str, IndependentReparametrizationSampler],
        ) -> AcquisitionFunction:
            assert samplers.keys() == {"foo", "bar", "baz"}

            x = tf.random.uniform([100, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)

            for key in samplers:
                samples = samplers[key].sample(x)
                mean, var = models[key].predict(x)
                _assert_kolmogorov_smirnov_95(
                    tf.linalg.matrix_transpose(samples),
                    tfp.distributions.Normal(mean[..., None], tf.sqrt(var)[..., None]),
                )

            return raise_

    data = Dataset(tf.zeros([0, 2], dtype=tf.float64), tf.zeros([0, 2], dtype=tf.float64))
    _Acq(20_000).prepare_acquisition_function(
        {"foo": data, "bar": data, "baz": data},
        {
            "foo": _dim_two_gp((0.5, 0.5)),
            "bar": _dim_two_gp((1.3, 1.3)),
            "baz": _dim_two_gp((-0.7, -0.7)),
        },
    )
Ejemplo n.º 30
0
def test_augmented_expected_improvement_builder_raises_for_empty_data(
) -> None:
    data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))

    with pytest.raises(ValueError):
        AugmentedExpectedImprovement().prepare_acquisition_function(
            data, QuadraticMeanAndRBFKernel())