Пример #1
0
def test_sequential_with_all_parameters_fixed():
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
    acquisition_optimizer = AcquisitionOptimizer(space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25, 'y': 0.25})
    assert np.array_equiv(next_points, np.array([0.25, 0.25]))
Пример #2
0
def test_sequential_evaluator():
    # SequentialPointCalculator should just return result of the acquisition optimizer
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition_optimizer = mock.create_autospec(AcquisitionOptimizer)
    mock_acquisition_optimizer.optimize.return_value = (np.array([[0.]]), None)
    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, mock_acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock)

    # "SequentialPointCalculator" should only ever return 1 value
    assert(len(next_points) == 1)
    # Value should be result of acquisition optimization
    assert(np.equal(np.array([[0.]]), next_points[0]))
Пример #3
0
def test_sequential_with_context():
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
    acquisition_optimizer = AcquisitionOptimizer(space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25})

    # "SequentialPointCalculator" should only ever return 1 value
    assert(len(next_points) == 1)
    # Context value should be what we set
    assert np.isclose(next_points[0, 0], 0.25)
Пример #4
0
def test_iteration_end_event():
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    def user_function(x):
        return x

    x_test = np.linspace(0, 1)[:, None]
    y_test = user_function(x_test)

    x_init = np.linspace(0, 1, 5)[:, None]
    y_init = user_function(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    mse = []

    def compute_mse(self, loop_state):
        mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))

    loop_state = create_loop_state(x_init, y_init)

    acquisition = ModelVariance(model)
    acquisition_optimizer = AcquisitionOptimizer(space)
    candidate_point_calculator = SequentialPointCalculator(
        acquisition, acquisition_optimizer)
    model_updater = FixedIntervalUpdater(model)

    loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
    loop.iteration_end_event.append(compute_mse)
    loop.run_loop(user_function, 5)

    assert len(mse) == 5
Пример #5
0
    def __init__(self,
                 model: VanillaBayesianQuadrature,
                 acquisition: Acquisition = None,
                 model_updater: ModelUpdater = None):
        """
        The loop for vanilla Bayesian Quadrature

        :param model: the vanilla Bayesian quadrature method
        :param acquisition: The acquisition function that is be used to collect new points.
        default, IntegralVarianceReduction
        :param model_updater: Defines how and when the quadrature model is updated if new data arrives.
                              Defaults to updating hyper-parameters every iteration.
        """

        self.model = model

        if acquisition is None:
            acquisition = IntegralVarianceReduction(self.model)

        if model_updater is None:
            model_updater = FixedIntervalUpdater(self.model, 1)

        space = ParameterSpace(self.model.integral_bounds.
                               convert_to_list_of_continuous_parameters())
        acquisition_optimizer = AcquisitionOptimizer(space)
        candidate_point_calculator = SequentialPointCalculator(
            acquisition, acquisition_optimizer)
        loop_state = create_loop_state(self.model.X, self.model.Y)

        super().__init__(candidate_point_calculator, model_updater, loop_state)
Пример #6
0
def test_multi_source_sequential_with_source_context():
    # Check that we can fix a non-information source parameter with context
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace(
        [ContinuousParameter("x", 0, 1), ContinuousParameter("y", 0, 1), InformationSourceParameter(2)]
    )
    acquisition_optimizer = GradientAcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, multi_source_acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={"source": 1.0})

    # "SequentialPointCalculator" should only ever return 1 value
    assert len(next_points) == 1
    # Context value should be what we set
    assert np.isclose(next_points[0, 1], 1.0)
Пример #7
0
def test_multi_source_sequential_with_source_context():
    # Check that we can fix a non-information source parameter with context
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1),
                            ContinuousParameter('y', 0, 1),
                            InformationSourceParameter(2)])
    acquisition_optimizer = AcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, multi_source_acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'source': 1.0})

    # "SequentialPointCalculator" should only ever return 1 value
    assert(len(next_points) == 1)
    # Context value should be what we set
    assert np.isclose(next_points[0, 1], 1.)
Пример #8
0
    def __init__(self, space: ParameterSpace,
                 X_init: np.ndarray, Y_init: np.ndarray, cost_init: np.ndarray,
                 s_min: float, s_max: float,
                 update_interval: int = 1,
                 num_eval_points: int = 2000,
                 marginalize_hypers: bool = True):
        """
        Implements FAst Bayesian Optimization for LArge DataSets as described in:

        Fast Bayesian hyperparameter optimization on large datasets
        A. Klein and S. Falkner and S. Bartels and P. Hennig and F. Hutter
        Electronic Journal of Statistics (2017)

        :param space: input space where the optimization is carried out.
        :param X_init: initial data points
        :param Y_init: initial function values
        :param cost_init: initial costs
        :param s_min: smallest possible dataset size
        :param s_max: highest possible dataset size
        :param update_interval:  number of iterations between optimization of model hyper-parameters. Defaults to 1.
        :param num_eval_points: number of points to evaluate the acquisition function
        :param marginalize_hypers: if true, marginalize over the GP hyperparameters
        """

        l = space.parameters
        l.extend([ContinuousParameter("s", s_min, s_max)])  
        extended_space = ParameterSpace(l)

        model_objective = FabolasModel(X_init=X_init, Y_init=Y_init, s_min=s_min, s_max=s_max)
        model_cost = FabolasModel(X_init=X_init, Y_init=cost_init[:, None], s_min=s_min, s_max=s_max)

        if marginalize_hypers:
            acquisition_generator = lambda model: ContinuousFidelityEntropySearch(model_objective, space=extended_space,
                                                                                  target_fidelity_index=len(
                                                                                      extended_space.parameters) - 1)
            entropy_search = IntegratedHyperParameterAcquisition(model_objective, acquisition_generator)
        else:
            entropy_search = ContinuousFidelityEntropySearch(model_objective, space=extended_space,
                                                             target_fidelity_index=len(extended_space.parameters) - 1)

        acquisition = acquisition_per_expected_cost(entropy_search, model_cost)

        model_updater_objective = FixedIntervalUpdater(model_objective, update_interval)
        model_updater_cost = FixedIntervalUpdater(model_cost, update_interval, lambda state: state.cost)

        acquisition_optimizer = RandomSearchAcquisitionOptimizer(extended_space, num_eval_points=num_eval_points)
        candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)

        loop_state = create_loop_state(model_objective.X, model_objective.Y, model_cost.Y)

        super(CostSensitiveBayesianOptimizationLoop, self).__init__(candidate_point_calculator,
                                                                    [model_updater_objective, model_updater_cost],
                                                                    loop_state)
def run_optimisation(current_range, freq_range, power_range):
    parameter_space = ParameterSpace([\
        ContinuousParameter('current', current_range[0], current_range[1]), \
        ContinuousParameter('freq', freq_range[0], freq_range[1]), \
        ContinuousParameter('power', power_range[0], power_range[1])
        ])

    def function(X):
        current = X[:, 0]
        freq = X[:, 1]
        power = X[:, 2]
        out = np.zeros((len(current), 1))
        for g in range(len(current)):
            '''
            Set JPA Current, Frequency & Power
            '''
            out[g, 0] = -get_SNR(
                plot=False)[-1]  #Negative as want to maximise SNR
        return out

    num_data_points = 10

    design = RandomDesign(parameter_space)
    X = design.get_samples(num_data_points)
    Y = function(X)

    model_gpy = GPRegression(X, Y)
    model_gpy.optimize()
    model_emukit = GPyModelWrapper(model_gpy)

    exp_imprv = ExpectedImprovement(model=model_emukit)
    optimizer = GradientAcquisitionOptimizer(space=parameter_space)
    point_calc = SequentialPointCalculator(exp_imprv, optimizer)
    coords = []
    min = []

    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=exp_imprv,
                                             batch_size=1)

    stopping_condition = FixedIterationsStoppingCondition(i_max=100)

    bayesopt_loop.run_loop(q, stopping_condition)

    coord_results = bayesopt_loop.get_results().minimum_location
    min_value = bayesopt_loop.get_results().minimum_value
    step_results = bayesopt_loop.get_results().best_found_value_per_iteration
    print(coord_results)
    print(min_value)

    return coord_results, abs(min_value)
Пример #10
0
# Set up random seeding of parameter space
num_data_points = no_random_seeds
design = RandomDesign(parameter_space)
X = design.get_samples(num_data_points)
Y = q(X)

# Set up emukit model
model_gpy = GPRegression(X,Y)
model_gpy.optimize()
model_emukit = GPyModelWrapper(model_gpy)

# Set up Bayesian optimisation routine
exp_imprv = ExpectedImprovement(model = model_emukit)
optimizer = GradientAcquisitionOptimizer(space = parameter_space)
point_calc = SequentialPointCalculator(exp_imprv,optimizer)

# Bayesian optimisation routine
bayesopt_loop = BayesianOptimizationLoop(model = model_emukit,
                                         space = parameter_space,
                                         acquisition=exp_imprv,
                                         batch_size=1)

stopping_condition = FixedIterationsStoppingCondition(i_max = no_BO_sims)
bayesopt_loop.run_loop(q, stopping_condition)


# Results of Bayesian optimisation
coord_results  = bayesopt_loop.get_results().minimum_location
min_value = bayesopt_loop.get_results().minimum_value
step_results = bayesopt_loop.get_results().best_found_value_per_iteration