Esempio n. 1
0
def test_create_loop_state_wrong_size_input_fails():
    x = np.array([[1], [2], [3], [4]])
    y = np.array([[4], [5], [6], [7]])
    c = np.array([[4], [5], [6], [7], [8]])

    with pytest.raises(ValueError):
        create_loop_state(x, y, cost=c)
Esempio n. 2
0
    def __init__(self, model: VanillaBayesianQuadrature, acquisition: Acquisition = None,
                 model_updater: ModelUpdater = None):
        """
        The loop for vanilla Bayesian Quadrature

        :param model: the vanilla Bayesian quadrature method
        :param acquisition: The acquisition function that is be used to collect new points.
        default, IntegralVarianceReduction
        :param model_updater: Defines how and when the quadrature model is updated if new data arrives.
                              Defaults to updating hyper-parameters every iteration.
        """

        if acquisition is None:
            acquisition = IntegralVarianceReduction(model)

        if model_updater is None:
            model_updater = FixedIntervalUpdater(model, 1)

        space = ParameterSpace(model.integral_bounds.convert_to_list_of_continuous_parameters())
        acquisition_optimizer = AcquisitionOptimizer(space)
        candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
        loop_state = create_loop_state(model.X, model.Y)

        super().__init__(candidate_point_calculator, model_updater, loop_state)

        self.model = model
Esempio n. 3
0
    def __init__(self,
                 model: VanillaBayesianQuadrature,
                 acquisition: Acquisition = None,
                 model_updater: ModelUpdater = None):
        """
        The loop for vanilla Bayesian Quadrature

        :param model: the vanilla Bayesian quadrature method
        :param acquisition: The acquisition function that is be used to collect new points.
        default, IntegralVarianceReduction
        :param model_updater: Defines how and when the quadrature model is updated if new data arrives.
                              Defaults to updating hyper-parameters every iteration.
        """

        self.model = model

        if acquisition is None:
            acquisition = IntegralVarianceReduction(self.model)

        if model_updater is None:
            model_updater = FixedIntervalUpdater(self.model, 1)

        space = ParameterSpace(self.model.integral_bounds.
                               convert_to_list_of_continuous_parameters())
        acquisition_optimizer = AcquisitionOptimizer(space)
        candidate_point_calculator = SequentialPointCalculator(
            acquisition, acquisition_optimizer)
        loop_state = create_loop_state(self.model.X, self.model.Y)

        super().__init__(candidate_point_calculator, model_updater, loop_state)
Esempio n. 4
0
def test_iteration_end_event():
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    def user_function(x):
        return x

    x_test = np.linspace(0, 1)[:, None]
    y_test = user_function(x_test)

    x_init = np.linspace(0, 1, 5)[:, None]
    y_init = user_function(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    mse = []

    def compute_mse(self, loop_state):
        mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))

    loop_state = create_loop_state(x_init, y_init)

    acquisition = ModelVariance(model)
    acquisition_optimizer = AcquisitionOptimizer(space)
    candidate_point_calculator = SequentialPointCalculator(
        acquisition, acquisition_optimizer)
    model_updater = FixedIntervalUpdater(model)

    loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
    loop.iteration_end_event.append(compute_mse)
    loop.run_loop(user_function, 5)

    assert len(mse) == 5
Esempio n. 5
0
def test_cost_returns_none():
    x = np.array([[1], [2], [3], [4]])
    y = np.array([[4], [5], [6], [7]])

    loop_state = create_loop_state(x[:3, :], y[:3, :])

    assert np.array_equiv(loop_state.cost, np.array([None, None, None]))
Esempio n. 6
0
def test_iteration_end_event():
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    def user_function(x):
        return x

    x_test = np.linspace(0, 1)[:, None]
    y_test = user_function(x_test)

    x_init = np.linspace(0, 1, 5)[:, None]
    y_init = user_function(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    mse = []

    def compute_mse(self, loop_state):
        mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))

    loop_state = create_loop_state(x_init, y_init)

    acquisition = ModelVariance(model)
    acquisition_optimizer = AcquisitionOptimizer(space)
    candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
    model_updater = FixedIntervalUpdater(model)

    loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
    loop.iteration_end_event.append(compute_mse)
    loop.run_loop(user_function, 5)

    assert len(mse) == 5
Esempio n. 7
0
def test_create_loop_state():
    x_init = np.array([[1], [2], [3]])
    y_init = np.array([[4], [5], [6]])

    loop_state = create_loop_state(x_init, y_init)

    assert_array_equal(loop_state.X, x_init)
    assert_array_equal(loop_state.Y, y_init)
    assert loop_state.iteration == 0
Esempio n. 8
0
def test_create_loop_state():
    x_init = np.array([[1], [2], [3]])
    y_init = np.array([[4], [5], [6]])

    loop_state = create_loop_state(x_init, y_init)

    assert_array_equal(loop_state.X, x_init)
    assert_array_equal(loop_state.Y, y_init)
    assert loop_state.iteration == 0
Esempio n. 9
0
def test_loop_state_update_error():
    x = np.array([[1], [2], [3], [4]])
    y = np.array([[4], [5], [6], [7]])

    loop_state = create_loop_state(x[:3, :], y[:3, :])
    with pytest.raises(ValueError):
        loop_state.update(None)

    with pytest.raises(ValueError):
        loop_state.update([])
Esempio n. 10
0
def test_loop_state_update_error():
    x = np.array([[1], [2], [3], [4]])
    y = np.array([[4], [5], [6], [7]])

    loop_state = create_loop_state(x[:3, :], y[:3, :])
    with pytest.raises(ValueError):
        loop_state.update(None)

    with pytest.raises(ValueError):
        loop_state.update([])
Esempio n. 11
0
def test_loop_state_update():
    x = np.array([[1], [2], [3], [4]])
    y = np.array([[4], [5], [6], [7]])

    loop_state = create_loop_state(x[:3, :], y[:3, :])
    step_result = UserFunctionResult(x[3, :], y[3, :])
    loop_state.update([step_result])

    assert_array_equal(loop_state.X, x)
    assert_array_equal(loop_state.Y, y)
    assert loop_state.iteration == 1
Esempio n. 12
0
def test_loop_state_update():
    x = np.array([[1], [2], [3], [4]])
    y = np.array([[4], [5], [6], [7]])

    loop_state = create_loop_state(x[:3, :], y[:3, :])
    step_result = UserFunctionResult(x[3, :], y[3, :])
    loop_state.update([step_result])

    assert_array_equal(loop_state.X, x)
    assert_array_equal(loop_state.Y, y)
    assert loop_state.iteration == 1
Esempio n. 13
0
    def __init__(self, space: ParameterSpace,
                 X_init: np.ndarray, Y_init: np.ndarray, cost_init: np.ndarray,
                 s_min: float, s_max: float,
                 update_interval: int = 1,
                 num_eval_points: int = 2000,
                 marginalize_hypers: bool = True):
        """
        Implements FAst Bayesian Optimization for LArge DataSets as described in:

        Fast Bayesian hyperparameter optimization on large datasets
        A. Klein and S. Falkner and S. Bartels and P. Hennig and F. Hutter
        Electronic Journal of Statistics (2017)

        :param space: input space where the optimization is carried out.
        :param X_init: initial data points
        :param Y_init: initial function values
        :param cost_init: initial costs
        :param s_min: smallest possible dataset size
        :param s_max: highest possible dataset size
        :param update_interval:  number of iterations between optimization of model hyper-parameters. Defaults to 1.
        :param num_eval_points: number of points to evaluate the acquisition function
        :param marginalize_hypers: if true, marginalize over the GP hyperparameters
        """

        l = space.parameters
        l.extend([ContinuousParameter("s", s_min, s_max)])  
        extended_space = ParameterSpace(l)

        model_objective = FabolasModel(X_init=X_init, Y_init=Y_init, s_min=s_min, s_max=s_max)
        model_cost = FabolasModel(X_init=X_init, Y_init=cost_init[:, None], s_min=s_min, s_max=s_max)

        if marginalize_hypers:
            acquisition_generator = lambda model: ContinuousFidelityEntropySearch(model_objective, space=extended_space,
                                                                                  target_fidelity_index=len(
                                                                                      extended_space.parameters) - 1)
            entropy_search = IntegratedHyperParameterAcquisition(model_objective, acquisition_generator)
        else:
            entropy_search = ContinuousFidelityEntropySearch(model_objective, space=extended_space,
                                                             target_fidelity_index=len(extended_space.parameters) - 1)

        acquisition = acquisition_per_expected_cost(entropy_search, model_cost)

        model_updater_objective = FixedIntervalUpdater(model_objective, update_interval)
        model_updater_cost = FixedIntervalUpdater(model_cost, update_interval, lambda state: state.cost)

        acquisition_optimizer = RandomSearchAcquisitionOptimizer(extended_space, num_eval_points=num_eval_points)
        candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)

        loop_state = create_loop_state(model_objective.X, model_objective.Y, model_cost.Y)

        super(CostSensitiveBayesianOptimizationLoop, self).__init__(candidate_point_calculator,
                                                                    [model_updater_objective, model_updater_cost],
                                                                    loop_state)
def test_batch_point_calculator(mock_model):

    acquisition = mock.create_autospec(Acquisition)
    acquisition_optimizer = mock.create_autospec(GradientAcquisitionOptimizer)
    acquisition_optimizer.optimize.return_value = (np.zeros((1, 1)), 0)
    batch_size = 10

    calculator = GreedyBatchPointCalculator(mock_model, acquisition,
                                            acquisition_optimizer, batch_size)

    loop_state = create_loop_state(np.zeros((1, 1)), np.zeros((1, 1)))
    next_points = calculator.compute_next_points(loop_state)
    assert next_points.shape[0] == batch_size
def test_local_penalization():
    parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1)])
    acquisition_optimizer = AcquisitionOptimizer(parameter_space)
    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)
    acquisition = ExpectedImprovement(model)
    batch_size = 5
    lp_calc = LocalPenalizationPointCalculator(acquisition, acquisition_optimizer, model, parameter_space, batch_size)

    loop_state = create_loop_state(x_init, y_init)
    new_points = lp_calc.compute_next_points(loop_state)
    assert new_points.shape == (batch_size, 1)
Esempio n. 16
0
def test_local_penalization():
    parameter_space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space)
    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)
    acquisition = ExpectedImprovement(model)
    batch_size = 5
    lp_calc = LocalPenalizationPointCalculator(acquisition,
                                               acquisition_optimizer, model,
                                               parameter_space, batch_size)

    loop_state = create_loop_state(x_init, y_init)
    new_points = lp_calc.compute_next_points(loop_state)
    assert new_points.shape == (batch_size, 1)
Esempio n. 17
0
def test_minimum_observed_value_metric():
    x_observations = np.random.rand(50, 2)
    y_observations = np.random.rand(50, 2)

    mock_model = mock.create_autospec(IModel)

    model_updater_mock = mock.create_autospec(ModelUpdater)
    model_updater_mock.model = mock_model
    mock_loop = mock.create_autospec(OuterLoop)
    mock_loop.model_updaters = [model_updater_mock]

    loop_state = create_loop_state(x_observations, y_observations)
    loop_state.metrics = dict()

    metric = MinimumObservedValueMetric()
    metric_value = metric.evaluate(mock_loop, loop_state)

    assert metric_value.shape == (2, )
Esempio n. 18
0
def test_time_metric():
    x_observations = np.random.rand(50, 2)
    y_observations = np.random.rand(50, 2)

    mock_model = mock.create_autospec(IModel)

    model_updater_mock = mock.create_autospec(ModelUpdater)
    model_updater_mock.model = mock_model
    mock_loop = mock.create_autospec(OuterLoop)
    mock_loop.model_updater = model_updater_mock

    loop_state = create_loop_state(x_observations, y_observations)
    loop_state.metrics = dict()

    name = 'time'
    metric = TimeMetric(name)
    metric.reset()
    metric_value = metric.evaluate(mock_loop, loop_state)

    assert metric_value.shape == (1, )
Esempio n. 19
0
def test_cumulative_costs():
    x_observations = np.random.rand(50, 2)
    y_observations = np.random.rand(50, 2)
    c_observations = np.random.rand(50, 1)
    mock_model = mock.create_autospec(IModel)

    model_updater_mock = mock.create_autospec(ModelUpdater)
    model_updater_mock.model = mock_model
    mock_loop = mock.create_autospec(OuterLoop)
    mock_loop.model_updater = model_updater_mock

    loop_state = create_loop_state(x_observations,
                                   y_observations,
                                   cost=c_observations)
    loop_state.metrics = dict()

    name = 'cost'
    metric = CumulativeCostMetric(name)
    metric.reset()
    metric_value = metric.evaluate(mock_loop, loop_state)

    assert metric_value == np.cumsum(c_observations)[-1]
    assert metric_value.shape == (1, )
Esempio n. 20
0
def test_create_loop_error():
    x_init = np.array([[1], [2], [3]])
    y_init = np.array([[4], [5]])

    with pytest.raises(ValueError):
        create_loop_state(x_init, y_init)
Esempio n. 21
0
def test_create_loop_error():
    x_init = np.array([[1], [2], [3]])
    y_init = np.array([[4], [5]])

    with pytest.raises(ValueError):
        create_loop_state(x_init, y_init)