def test_single_builder_using_passes_on_correct_dataset_and_model( single_builder: Union[SingleModelAcquisitionBuilder, SingleModelBatchAcquisitionBuilder] ) -> None: builder = single_builder.using("foo") data = {"foo": mk_dataset([[0.0]], [[0.0]]), "bar": mk_dataset([[1.0]], [[1.0]])} models = {"foo": QuadraticMeanAndRBFKernel(0.0), "bar": QuadraticMeanAndRBFKernel(1.0)} builder.prepare_acquisition_function(data, models)
def test_rff_sampler_returns_same_posterior_from_each_calculation_method( ) -> None: model = QuadraticMeanAndRBFKernel( noise_variance=tf.constant(1.0, dtype=tf.float64)) model.kernel = ( gpflow.kernels.RBF() ) # need a gpflow kernel object for random feature decompositions x_range = tf.linspace(0.0, 1.0, 5) x_range = tf.cast(x_range, dtype=tf.float64) xs = tf.reshape( tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2)) ys = quadratic(xs) dataset = Dataset(xs, ys) sampler = RandomFourierFeatureThompsonSampler(dataset, model, 100) sampler.get_trajectory() posterior_1 = sampler._prepare_theta_posterior_in_design_space() posterior_2 = sampler._prepare_theta_posterior_in_gram_space() npt.assert_allclose(posterior_1.loc, posterior_2.loc, rtol=0.02) npt.assert_allclose(posterior_1.scale_tril, posterior_2.scale_tril, rtol=0.02)
def test_probability_of_feasibility_builder_builds_pof(threshold: float, at: tf.Tensor) -> None: builder = ProbabilityOfFeasibility(threshold) acq = builder.prepare_acquisition_function(empty_dataset([1], [1]), QuadraticMeanAndRBFKernel()) expected = probability_of_feasibility(QuadraticMeanAndRBFKernel(), threshold, at) npt.assert_allclose(acq(at), expected)
def test_locally_penalized_expected_improvement_builder_raises_for_invalid_pending_points_shape( pending_points, ) -> None: data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64)) space = Box([0, 0], [1, 1]) builder = LocalPenalizationAcquisitionFunction(search_space=space) builder.prepare_acquisition_function(data, QuadraticMeanAndRBFKernel(), None) # first initialize with pytest.raises(TF_DEBUGGING_ERROR_TYPES): builder.prepare_acquisition_function(data, QuadraticMeanAndRBFKernel(), pending_points)
def test_rff_sampler_does_pre_calc_during_first_trajectory_call() -> None: model = QuadraticMeanAndRBFKernel( noise_variance=tf.constant(1.0, dtype=tf.float64)) model.kernel = gpflow.kernels.RBF() dataset = Dataset(tf.constant([[-2.0]], dtype=tf.float64), tf.constant([[4.1]], dtype=tf.float64)) sampler = RandomFourierFeatureThompsonSampler(dataset, model, 100) assert sampler._pre_calc is False sampler.get_trajectory() assert sampler._pre_calc is True
def test_single_model_acquisition_builder_using_passes_on_correct_dataset_and_model() -> None: class Builder(SingleModelAcquisitionBuilder): def prepare_acquisition_function( self, dataset: Dataset, model: ProbabilisticModel ) -> AcquisitionFunction: assert dataset is data["foo"] assert model is models["foo"] return raise_exc data = {"foo": empty_dataset([1], [1]), "bar": empty_dataset([1], [1])} models = {"foo": QuadraticMeanAndRBFKernel(), "bar": QuadraticMeanAndRBFKernel()} Builder().using("foo").prepare_acquisition_function(data, models)
def test_gumbel_samples_are_minima() -> None: dataset = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64)) search_space = Box([0, 0], [1, 1]) model = QuadraticMeanAndRBFKernel() gumbel_sampler = GumbelSampler(5, model) query_points = search_space.sample(100) query_points = tf.concat([dataset.query_points, query_points], 0) gumbel_samples = gumbel_sampler.sample(query_points) fmean, _ = model.predict(dataset.query_points) assert max(gumbel_samples) < min(fmean)
def test_min_value_entropy_search_builder_gumbel_samples(mocked_mves) -> None: dataset = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64)) search_space = Box([0, 0], [1, 1]) builder = MinValueEntropySearch(search_space) model = QuadraticMeanAndRBFKernel() builder.prepare_acquisition_function(dataset, model) mocked_mves.assert_called_once() # check that the Gumbel samples look sensible gumbel_samples = mocked_mves.call_args[0][1] query_points = builder._search_space.sample(num_samples=builder._grid_size) query_points = tf.concat([dataset.query_points, query_points], 0) fmean, _ = model.predict(query_points) assert max(gumbel_samples) < min(fmean)
def test_rff_sampler_returns_trajectory_function_with_correct_shaped_output( num_evals: int) -> None: model = QuadraticMeanAndRBFKernel( noise_variance=tf.constant(1.0, dtype=tf.float64)) model.kernel = ( gpflow.kernels.RBF() ) # need a gpflow kernel object for random feature decompositions dataset = Dataset(tf.constant([[-2.0]], dtype=tf.float64), tf.constant([[4.1]], dtype=tf.float64)) sampler = RandomFourierFeatureThompsonSampler(dataset, model, 100) trajectory = sampler.get_trajectory() xs = tf.linspace([-10.0], [10.0], num_evals) tf.debugging.assert_shapes([(trajectory(xs), [num_evals, 1])])
def test_ehvi_builder_raises_for_empty_data() -> None: num_obj = 3 dataset = empty_dataset([2], [num_obj]) model = QuadraticMeanAndRBFKernel() with pytest.raises(TF_DEBUGGING_ERROR_TYPES): ExpectedHypervolumeImprovement().prepare_acquisition_function(dataset, model)
def test_rff_sampler_raises_for_a_non_gpflow_kernel() -> None: model = QuadraticMeanAndRBFKernel() dataset = Dataset(tf.constant([[-2.0]]), tf.constant([[4.1]])) sampler = RandomFourierFeatureThompsonSampler(dataset, model, 100) with pytest.raises(AssertionError): sampler.get_trajectory()
def test_min_value_entropy_search_raises_for_invalid_batch_size( at: TensorType) -> None: mes = min_value_entropy_search(QuadraticMeanAndRBFKernel(), tf.constant([[1.0], [2.0]])) with pytest.raises(TF_DEBUGGING_ERROR_TYPES): mes(at)
def test_independent_reparametrization_sampler_sample_raises_for_invalid_at_shape( shape: ShapeLike, ) -> None: sampler = IndependentReparametrizationSampler(1, QuadraticMeanAndRBFKernel()) with pytest.raises(TF_DEBUGGING_ERROR_TYPES): sampler.sample(tf.zeros(shape))
def test_batch_reparametrization_sampler_sample_raises_for_inconsistent_batch_size( ) -> None: sampler = BatchReparametrizationSampler(100, QuadraticMeanAndRBFKernel()) sampler.sample(tf.constant([[0.0], [1.0], [2.0]])) with pytest.raises(TF_DEBUGGING_ERROR_TYPES): sampler.sample(tf.constant([[0.0], [1.0]]))
def test_ego(search_space: SearchSpace, expected_minimum: tf.Tensor) -> None: ego = EfficientGlobalOptimization( NegativeLowerConfidenceBound(0).using(OBJECTIVE)) dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])) query_point, _ = ego.acquire(search_space, {OBJECTIVE: dataset}, {OBJECTIVE: QuadraticMeanAndRBFKernel()}) npt.assert_array_almost_equal(query_point, expected_minimum, decimal=5)
def test_augmented_expected_improvement_raises_for_invalid_batch_size( at: TensorType) -> None: aei = augmented_expected_improvement(QuadraticMeanAndRBFKernel(), tf.constant([1.0])) with pytest.raises(TF_DEBUGGING_ERROR_TYPES): aei(at)
def test_expected_constrained_improvement_is_constraint_when_no_feasible_points( ) -> None: class _Constraint(AcquisitionFunctionBuilder): def prepare_acquisition_function( self, datasets: Mapping[str, Dataset], models: Mapping[str, ProbabilisticModel]) -> AcquisitionFunction: def acquisition(x: TensorType) -> TensorType: x_ = tf.squeeze(x, -2) return tf.cast(tf.logical_and(0.0 <= x_, x_ < 1.0), x.dtype) return acquisition data = { "foo": Dataset(tf.constant([[-2.0], [1.0]]), tf.constant([[4.0], [1.0]])) } models_ = {"foo": QuadraticMeanAndRBFKernel()} eci = ExpectedConstrainedImprovement( "foo", _Constraint()).prepare_acquisition_function(data, models_) constraint_fn = _Constraint().prepare_acquisition_function(data, models_) xs = tf.linspace([[-10.0]], [[10.0]], 100) npt.assert_allclose(eci(xs), constraint_fn(xs))
def test_single_model_acquisition_builder_raises_immediately_for_wrong_key() -> None: builder = _ArbitrarySingleBuilder().using("foo") with pytest.raises(KeyError): builder.prepare_acquisition_function( {"bar": empty_dataset([1], [1])}, {"bar": QuadraticMeanAndRBFKernel()} )
def test_locally_penalized_acquisitions_match_base_acquisition( base_builder, ) -> None: data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64)) search_space = Box([0, 0], [1, 1]) model = QuadraticMeanAndRBFKernel() lp_acq_builder = LocalPenalizationAcquisitionFunction( search_space, base_acquisition_function_builder=base_builder) lp_acq = lp_acq_builder.prepare_acquisition_function(data, model, None) base_acq = base_builder.prepare_acquisition_function(data, model) x_range = tf.linspace(0.0, 1.0, 11) x_range = tf.cast(x_range, dtype=tf.float64) xs = tf.reshape( tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2)) lp_acq_values = lp_acq(xs[..., None, :]) base_acq_values = base_acq(xs[..., None, :]) if isinstance(base_builder, ExpectedImprovement): npt.assert_array_equal(lp_acq_values, base_acq_values) else: # check sampling-based acquisition functions are close npt.assert_allclose(lp_acq_values, base_acq_values, atol=0.001)
def test_expected_constrained_improvement_min_feasibility_probability_bound_is_inclusive( ) -> None: pof = tfp.bijectors.Sigmoid().forward class _Constraint(AcquisitionFunctionBuilder): def prepare_acquisition_function( self, datasets: Mapping[str, Dataset], models: Mapping[str, ProbabilisticModel]) -> AcquisitionFunction: return pof models_ = {"foo": QuadraticMeanAndRBFKernel()} data = { "foo": Dataset(tf.constant([[1.1], [2.0]]), tf.constant([[1.21], [4.0]])) } eci = ExpectedConstrainedImprovement( "foo", _Constraint(), min_feasibility_probability=pof(1.0)).prepare_acquisition_function( data, models_) ei = ExpectedImprovement().using("foo").prepare_acquisition_function( data, models_) x = tf.constant([[1.5]]) npt.assert_allclose(eci(x), ei(x) * pof(x))
def test_batch_monte_carlo_expected_improvement_raises_for_empty_data( ) -> None: builder = BatchMonteCarloExpectedImprovement(100) data = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])) model = QuadraticMeanAndRBFKernel() with pytest.raises(TF_DEBUGGING_ERROR_TYPES): builder.prepare_acquisition_function(data, model)
def test_gumbel_sampler_returns_correctly_shaped_samples( sample_size: int) -> None: search_space = Box([0, 0], [1, 1]) gumbel_sampler = GumbelSampler(sample_size, QuadraticMeanAndRBFKernel()) query_points = search_space.sample(5) gumbel_samples = gumbel_sampler.sample(query_points) tf.debugging.assert_shapes([(gumbel_samples, [sample_size, 1])])
def test_trust_region_for_unsuccessful_local_to_global_trust_region_reduced( ) -> None: tr = TrustRegion(NegativeLowerConfidenceBound(0).using(OBJECTIVE)) dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.5]])) lower_bound = tf.constant([-2.2, -1.0]) upper_bound = tf.constant([1.3, 3.3]) search_space = Box(lower_bound, upper_bound) eps = 0.5 * (search_space.upper - search_space.lower) / 10 previous_y_min = dataset.observations[0] is_global = False acquisition_space = Box(dataset.query_points[0] - eps, dataset.query_points[0] + eps) previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global) _, current_state = tr.acquire(search_space, {OBJECTIVE: dataset}, {OBJECTIVE: QuadraticMeanAndRBFKernel()}, previous_state) npt.assert_array_less( current_state.eps, previous_state.eps) # current TR smaller than previous assert current_state.is_global npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
def test_augmented_expected_improvement_builder_raises_for_empty_data( ) -> None: data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1])) with pytest.raises(ValueError): AugmentedExpectedImprovement().prepare_acquisition_function( data, QuadraticMeanAndRBFKernel())
def test_batch_monte_carlo_expected_improvement_can_reproduce_ei() -> None: known_query_points = tf.random.uniform([5, 2], dtype=tf.float64) data = Dataset(known_query_points, quadratic(known_query_points)) model = QuadraticMeanAndRBFKernel() batch_ei = BatchMonteCarloExpectedImprovement(10_000).prepare_acquisition_function(data, model) ei = ExpectedImprovement().prepare_acquisition_function(data, model) xs = tf.random.uniform([3, 5, 1, 2], dtype=tf.float64) npt.assert_allclose(batch_ei(xs), ei(tf.squeeze(xs, -2)), rtol=0.03)
def test_locally_penalized_expected_improvement_builder_raises_for_empty_data( ) -> None: data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1])) space = Box([0, 0], [1, 1]) with pytest.raises(ValueError): LocalPenalizationAcquisitionFunction( search_space=space).prepare_acquisition_function( data, QuadraticMeanAndRBFKernel())
def test_discrete_thompson_sampler_returns_correctly_shaped_samples( sample_size: int) -> None: search_space = Box([0, 0], [1, 1]) thompson_sampler = DiscreteThompsonSampler(sample_size, QuadraticMeanAndRBFKernel()) query_points = search_space.sample(100) thompson_samples = thompson_sampler.sample(query_points) tf.debugging.assert_shapes([(thompson_samples, ["N", 2])])
def test_single_builder_raises_immediately_for_wrong_key( single_builder: Union[SingleModelAcquisitionBuilder, SingleModelBatchAcquisitionBuilder] ) -> None: builder = single_builder.using("foo") with pytest.raises(KeyError): builder.prepare_acquisition_function( {"bar": zero_dataset()}, {"bar": QuadraticMeanAndRBFKernel()} )
def test_batch_monte_carlo_expected_improvement() -> None: xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64) model = QuadraticMeanAndRBFKernel() mean, cov = model.predict_joint(xs) mvn = tfp.distributions.MultivariateNormalFullCovariance(tf.linalg.matrix_transpose(mean), cov) mvn_samples = mvn.sample(10_000) min_predictive_mean_at_known_points = 0.09 # fmt: off expected = tf.reduce_mean(tf.reduce_max(tf.maximum( min_predictive_mean_at_known_points - mvn_samples, 0.0 ), axis=-1), axis=0) # fmt: on builder = BatchMonteCarloExpectedImprovement(10_000) acq = builder.prepare_acquisition_function(mk_dataset([[0.3], [0.5]], [[0.09], [0.25]]), model) npt.assert_allclose(acq(xs), expected, rtol=0.05)
def test_negative_lower_confidence_bound_builder_builds_negative_lower_confidence_bound() -> None: model = QuadraticMeanAndRBFKernel() beta = 1.96 acq_fn = NegativeLowerConfidenceBound(beta).prepare_acquisition_function( Dataset(tf.zeros([0, 1]), tf.zeros([0, 1])), model ) query_at = tf.linspace([-10], [10], 100) expected = -lower_confidence_bound(model, beta, query_at) npt.assert_array_almost_equal(acq_fn(query_at), expected)