Example #1
0
def test_rff_sampler_returns_correctly_shaped_samples(
    sample_min_value: bool, sample_size: int
) -> None:
    search_space = Box([0.0, 0.0], [1.0, 1.0])
    model = QuadraticMeanAndRBFKernel(noise_variance=tf.constant(1.0, dtype=tf.float64))
    model.kernel = (
        gpflow.kernels.RBF()
    )  # need a gpflow kernel object for random feature decompositions

    x_range = tf.linspace(0.0, 1.0, 5)
    x_range = tf.cast(x_range, dtype=tf.float64)
    xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
    ys = quadratic(xs)
    dataset = Dataset(xs, ys)

    sampler = RandomFourierFeatureThompsonSampler(
        sample_size, model, dataset, num_features=100, sample_min_value=sample_min_value
    )

    query_points = search_space.sample(100)
    thompson_samples = sampler.sample(query_points)
    if sample_min_value:
        tf.debugging.assert_shapes([(thompson_samples, [sample_size, 1])])
    else:
        tf.debugging.assert_shapes([(thompson_samples, [sample_size, 2])])
Example #2
0
def test_rff_sampler_returns_same_posterior_from_each_calculation_method(
) -> None:
    model = QuadraticMeanAndRBFKernel(
        noise_variance=tf.constant(1.0, dtype=tf.float64))
    model.kernel = (
        gpflow.kernels.RBF()
    )  # need a gpflow kernel object for random feature decompositions
    x_range = tf.linspace(0.0, 1.0, 5)
    x_range = tf.cast(x_range, dtype=tf.float64)
    xs = tf.reshape(
        tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1),
        (-1, 2))
    ys = quadratic(xs)
    dataset = Dataset(xs, ys)

    sampler = RandomFourierFeatureThompsonSampler(dataset, model, 100)
    sampler.get_trajectory()

    posterior_1 = sampler._prepare_theta_posterior_in_design_space()
    posterior_2 = sampler._prepare_theta_posterior_in_gram_space()

    npt.assert_allclose(posterior_1.loc, posterior_2.loc, rtol=0.02)
    npt.assert_allclose(posterior_1.scale_tril,
                        posterior_2.scale_tril,
                        rtol=0.02)
Example #3
0
def test_optimize(
    search_space: Box | DiscreteSearchSpace,
    shift: list[float],
    expected_maximizer: list[list[float]],
) -> None:
    maximizer = optimize(search_space, lambda x: 0.5 - quadratic(x - shift))
    npt.assert_allclose(maximizer, expected_maximizer, rtol=2e-4)
Example #4
0
def test_batch_monte_carlo_expected_improvement_can_reproduce_ei() -> None:
    known_query_points = tf.random.uniform([5, 2], dtype=tf.float64)
    data = Dataset(known_query_points, quadratic(known_query_points))
    model = QuadraticMeanAndRBFKernel()
    batch_ei = BatchMonteCarloExpectedImprovement(10_000).prepare_acquisition_function(data, model)
    ei = ExpectedImprovement().prepare_acquisition_function(data, model)
    xs = tf.random.uniform([3, 5, 1, 2], dtype=tf.float64)
    npt.assert_allclose(batch_ei(xs), ei(tf.squeeze(xs, -2)), rtol=0.03)
Example #5
0
def _dim_two_gp(mean_shift: Tuple[float, float] = (0.0, 0.0)) -> GaussianProcess:
    matern52 = tfp.math.psd_kernels.MaternFiveHalves(
        amplitude=tf.cast(2.3, tf.float64), length_scale=tf.cast(0.5, tf.float64)
    )
    return GaussianProcess(
        [lambda x: mean_shift[0] + branin(x), lambda x: mean_shift[1] + quadratic(x)],
        [matern52, rbf()],
    )
Example #6
0
 def __init__(
     self,
     *,
     x_shift: float | SequenceN[float] | TensorType = 0,
     kernel_amplitude: float | TensorType | None = None,
 ):
     kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(kernel_amplitude)
     super().__init__([lambda x: quadratic(x - x_shift)], [kernel])
Example #7
0
def _example_gaussian_process() -> GaussianProcess:
    return GaussianProcess(
        [quadratic, lambda x: quadratic(x) / 5.0],
        [
            tfp.math.psd_kernels.ExponentiatedQuadratic(amplitude=1.6, length_scale=1.0),
            tfp.math.psd_kernels.ExponentiatedQuadratic(amplitude=1.6, length_scale=2.0),
        ],
    )
Example #8
0
def test_batch_monte_carlo_expected_improvement_raises_for_model_with_wrong_event_shape() -> None:
    builder = BatchMonteCarloExpectedImprovement(100)
    data = mk_dataset([(0.0, 0.0)], [(0.0, 0.0)])
    matern52 = tfp.math.psd_kernels.MaternFiveHalves(
        amplitude=tf.cast(2.3, tf.float64), length_scale=tf.cast(0.5, tf.float64)
    )
    model = GaussianProcess([lambda x: branin(x), lambda x: quadratic(x)], [matern52, rbf()])
    with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
        builder.prepare_acquisition_function(data, model)
Example #9
0
 def __init__(self, mean_shifts: list[float],
              kernel_amplitudes: list[float]):
     super().__init__(
         [(lambda y: lambda x: quadratic(x) + y)(shift)
          for shift in mean_shifts],
         [
             tfp.math.psd_kernels.ExponentiatedQuadratic(x)
             for x in kernel_amplitudes
         ],
     )
Example #10
0
def test_gumbel_samples_are_minima() -> None:
    search_space = Box([0, 0], [1, 1])

    x_range = tf.linspace(0.0, 1.0, 5)
    x_range = tf.cast(x_range, dtype=tf.float64)
    xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
    ys = quadratic(xs)
    dataset = Dataset(xs, ys)

    model = QuadraticMeanAndRBFKernel()
    gumbel_sampler = GumbelSampler(5, model)

    query_points = search_space.sample(100)
    query_points = tf.concat([dataset.query_points, query_points], 0)
    gumbel_samples = gumbel_sampler.sample(query_points)

    fmean, _ = model.predict(dataset.query_points)
    assert max(gumbel_samples) < min(fmean)
Example #11
0
def test_rff_sampler_returns_deterministic_trajectory() -> None:
    model = QuadraticMeanAndRBFKernel(noise_variance=tf.constant(1.0, dtype=tf.float64))
    model.kernel = (
        gpflow.kernels.RBF()
    )  # need a gpflow kernel object for random feature decompositions
    x_range = tf.linspace(0.0, 1.0, 5)
    x_range = tf.cast(x_range, dtype=tf.float64)
    xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
    ys = quadratic(xs)
    dataset = Dataset(xs, ys)

    sampler = RandomFourierFeatureThompsonSampler(1, model, dataset, num_features=100)
    trajectory = sampler.get_trajectory()

    trajectory_eval_1 = trajectory(xs)
    trajectory_eval_2 = trajectory(xs)

    npt.assert_allclose(trajectory_eval_1, trajectory_eval_2)
Example #12
0
def test_rff_thompson_samples_are_minima() -> None:
    search_space = Box([0.0, 0.0], [1.0, 1.0])
    model = QuadraticMeanAndRBFKernel(noise_variance=tf.constant(1e-5, dtype=tf.float64))
    model.kernel = (
        gpflow.kernels.RBF()
    )  # need a gpflow kernel object for random feature decompositions

    x_range = tf.linspace(0.0, 1.0, 5)
    x_range = tf.cast(x_range, dtype=tf.float64)
    xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
    ys = quadratic(xs)
    dataset = Dataset(xs, ys)

    sampler = RandomFourierFeatureThompsonSampler(
        1, model, dataset, num_features=100, sample_min_value=True
    )

    query_points = search_space.sample(100)
    query_points = tf.concat([dataset.query_points, query_points], 0)
    thompson_samples = sampler.sample(query_points)

    fmean, _ = model.predict(dataset.query_points)
    assert max(thompson_samples) < min(fmean)
Example #13
0
def _quadratic_sum(shift: list[float]) -> AcquisitionFunction:
    return lambda x: tf.reduce_sum(0.5 - quadratic(x - shift), axis=-2)
Example #14
0
def _quadratic_observer(x: tf.Tensor) -> Mapping[str, Dataset]:
    return {"": Dataset(x, quadratic(x))}
Example #15
0
 def prepare_acquisition_function(
         self, dataset: Dataset,
         model: ProbabilisticModel) -> AcquisitionFunction:
     return lambda x: -quadratic(tf.squeeze(x, -2) - 1)
Example #16
0
 def prepare_acquisition_function(
         self, datasets: Mapping[str, Dataset],
         models: Mapping[str,
                         ProbabilisticModel]) -> AcquisitionFunction:
     return lambda x: -quadratic(tf.squeeze(x, -2) - 1)