Exemplo n.º 1
0
def test_gradient_acquisition_optimizer_categorical(simple_square_acquisition):
    space = ParameterSpace([
        ContinuousParameter("x", 0, 1),
        CategoricalParameter("y", OneHotEncoding(["A", "B"]))
    ])
    optimizer = GradientAcquisitionOptimizer(space)
    context = {"y": "B"}
    opt_x, opt_val = optimizer.optimize(simple_square_acquisition, context)
    assert_array_equal(opt_x, np.array([[0.0, 0.0, 1.0]]))
    assert_array_equal(opt_val, np.array([[2.0]]))
Exemplo n.º 2
0
def test_gradient_acquisition_optimizer_categorical(simple_square_acquisition):
    space = ParameterSpace([
        ContinuousParameter('x', 0, 1),
        CategoricalParameter('y', OneHotEncoding(['A', 'B']))
    ])
    optimizer = GradientAcquisitionOptimizer(space)
    context = {'y': 'B'}
    opt_x, opt_val = optimizer.optimize(simple_square_acquisition, context)
    assert_array_equal(opt_x, np.array([[0., 0., 1.]]))
    assert_array_equal(opt_val, np.array([[2.]]))
Exemplo n.º 3
0
def test_multi_source_batch_experimental_design():
    objective, space = multi_fidelity_forrester_function()

    # Create initial data
    random_design = RandomDesign(space)
    x_init = random_design.get_samples(10)
    intiial_results = objective.evaluate(x_init)
    y_init = np.array([res.Y for res in intiial_results])

    # Create multi source acquisition optimizer
    acquisition_optimizer = GradientAcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(
        acquisition_optimizer, space)

    # Create GP model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    # Create acquisition
    acquisition = ModelVariance(model)

    # Create batch candidate point calculator
    batch_candidate_point_calculator = GreedyBatchPointCalculator(
        model, acquisition, multi_source_acquisition_optimizer, batch_size=5)

    initial_loop_state = LoopState(intiial_results)
    loop = OuterLoop(batch_candidate_point_calculator,
                     FixedIntervalUpdater(model, 1), initial_loop_state)

    loop.run_loop(objective, 10)
    assert loop.loop_state.X.shape[0] == 60
Exemplo n.º 4
0
def test_iteration_end_event():
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    def user_function(x):
        return x

    x_test = np.linspace(0, 1)[:, None]
    y_test = user_function(x_test)

    x_init = np.linspace(0, 1, 5)[:, None]
    y_init = user_function(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    mse = []

    def compute_mse(self, loop_state):
        mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))

    loop_state = create_loop_state(x_init, y_init)

    acquisition = ModelVariance(model)
    acquisition_optimizer = GradientAcquisitionOptimizer(space)
    candidate_point_calculator = SequentialPointCalculator(
        acquisition, acquisition_optimizer)
    model_updater = FixedIntervalUpdater(model)

    loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
    loop.iteration_end_event.append(compute_mse)
    loop.run_loop(user_function, 5)

    assert len(mse) == 5
def test_multi_source_acquisition_optimizer(simple_square_acquisition):
    space = ParameterSpace(
        [ContinuousParameter("x", 0, 1),
         InformationSourceParameter(2)])
    single_optimizer = GradientAcquisitionOptimizer(space)
    optimizer = MultiSourceAcquisitionOptimizer(single_optimizer, space)

    opt_x, opt_val = optimizer.optimize(simple_square_acquisition)
    assert_array_equal(opt_x, np.array([[0.0, 1.0]]))
    assert_array_equal(opt_val, np.array([[2.0]]))
Exemplo n.º 6
0
def test_sequential_with_all_parameters_fixed():
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25, 'y': 0.25})
    assert np.array_equiv(next_points, np.array([0.25, 0.25]))
Exemplo n.º 7
0
def test_local_penalization_requires_gradients():
    parameter_space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space)

    model = mock.create_autospec(IModel)

    acquisition = ExpectedImprovement(model)
    batch_size = 5

    with pytest.raises(ValueError):
        LocalPenalizationPointCalculator(acquisition, acquisition_optimizer,
                                         model, parameter_space, batch_size)
def run_optimisation(current_range, freq_range, power_range):
    parameter_space = ParameterSpace([\
        ContinuousParameter('current', current_range[0], current_range[1]), \
        ContinuousParameter('freq', freq_range[0], freq_range[1]), \
        ContinuousParameter('power', power_range[0], power_range[1])
        ])

    def function(X):
        current = X[:, 0]
        freq = X[:, 1]
        power = X[:, 2]
        out = np.zeros((len(current), 1))
        for g in range(len(current)):
            '''
            Set JPA Current, Frequency & Power
            '''
            out[g, 0] = -get_SNR(
                plot=False)[-1]  #Negative as want to maximise SNR
        return out

    num_data_points = 10

    design = RandomDesign(parameter_space)
    X = design.get_samples(num_data_points)
    Y = function(X)

    model_gpy = GPRegression(X, Y)
    model_gpy.optimize()
    model_emukit = GPyModelWrapper(model_gpy)

    exp_imprv = ExpectedImprovement(model=model_emukit)
    optimizer = GradientAcquisitionOptimizer(space=parameter_space)
    point_calc = SequentialPointCalculator(exp_imprv, optimizer)
    coords = []
    min = []

    bayesopt_loop = BayesianOptimizationLoop(model=model_emukit,
                                             space=parameter_space,
                                             acquisition=exp_imprv,
                                             batch_size=1)

    stopping_condition = FixedIterationsStoppingCondition(i_max=100)

    bayesopt_loop.run_loop(q, stopping_condition)

    coord_results = bayesopt_loop.get_results().minimum_location
    min_value = bayesopt_loop.get_results().minimum_value
    step_results = bayesopt_loop.get_results().best_found_value_per_iteration
    print(coord_results)
    print(min_value)

    return coord_results, abs(min_value)
Exemplo n.º 9
0
def test_sequential_with_context():
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25})

    # "SequentialPointCalculator" should only ever return 1 value
    assert(len(next_points) == 1)
    # Context value should be what we set
    assert np.isclose(next_points[0, 0], 0.25)
Exemplo n.º 10
0
def test_gradient_acquisition_optimizer(simple_square_acquisition):
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])
    with pytest.raises(ValueError):
        GradientAcquisitionOptimizer(space, optimizer='CMA')
    optimizer = GradientAcquisitionOptimizer(space)

    with pytest.raises(ValueError):
        optimizer.optimize(simple_square_acquisition, {'y': 3})
    opt_x, opt_val = optimizer.optimize(simple_square_acquisition)
    assert_array_equal(opt_x, np.array([[0.]]))
    assert_array_equal(opt_val, np.array([[1.]]))
Exemplo n.º 11
0
def test_local_penalization():
    parameter_space = ParameterSpace([ContinuousParameter("x", 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space)
    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)
    acquisition = ExpectedImprovement(model)
    batch_size = 5
    lp_calc = LocalPenalizationPointCalculator(acquisition,
                                               acquisition_optimizer, model,
                                               parameter_space, batch_size)

    loop_state = create_loop_state(x_init, y_init)
    new_points = lp_calc.compute_next_points(loop_state)
    assert new_points.shape == (batch_size, 1)
Exemplo n.º 12
0
def create_bayesian_optimization_loop(
        gpy_model: ComparisonGP, lims: np.array, batch_size: int,
        acquisition: AcquisitionFunction) -> BayesianOptimizationLoop:
    """
    Creates Bayesian optimization loop for Bayesian neural network or random forest models.
    :param gpy_model: the GPy model used in optimization
    :param lims: Optimization limits for the inputs
    :param batch_size: number of observations used in batch
    :param acquisition: acquisition function used in the bayesian optimization
    :return: emukit BO loop
    """

    # Create model
    model = ComparisonGPEmukitWrapper(gpy_model, batch_size)

    # Create acquisition
    emukit_acquisition = EmukitAcquisitionFunctionWrapper(model, acquisition)

    if type(emukit_acquisition.acquisitionFunction) is ThompsonSampling:
        parameter_space = []
        for j in range(len(lims)):
            parameter_space += [
                ContinuousParameter("x{}".format(j), lims[j][0], lims[j][1])
            ]
        parameter_space = ParameterSpace(parameter_space)
        acquisition_optimizer = SequentialGradientAcquisitionOptimizer(
            parameter_space, batch_size)
    else:
        parameter_space = []
        for k in range(batch_size):
            for j in range(len(lims)):
                parameter_space += [
                    ContinuousParameter("x{}{}".format(k, j), lims[j][0],
                                        lims[j][1])
                ]
        parameter_space = ParameterSpace(parameter_space)
        acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space)

    bo = BayesianOptimizationLoop(model=model,
                                  space=parameter_space,
                                  acquisition=emukit_acquisition)
    return BayesianOptimizationLoop(
        model=model,
        space=parameter_space,
        acquisition=emukit_acquisition,
        acquisition_optimizer=acquisition_optimizer)
Exemplo n.º 13
0
def test_multi_source_sequential_with_source_context():
    # Check that we can fix a non-information source parameter with context
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace(
        [ContinuousParameter("x", 0, 1), ContinuousParameter("y", 0, 1), InformationSourceParameter(2)]
    )
    acquisition_optimizer = GradientAcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, multi_source_acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={"source": 1.0})

    # "SequentialPointCalculator" should only ever return 1 value
    assert len(next_points) == 1
    # Context value should be what we set
    assert np.isclose(next_points[0, 1], 1.0)
Exemplo n.º 14
0
def test_gradient_acquisition_optimizer(simple_square_acquisition):
    space = ParameterSpace([ContinuousParameter('x', 0, 1)])
    optimizer = GradientAcquisitionOptimizer(space)
    opt_x, opt_val = optimizer.optimize(simple_square_acquisition)
    assert_array_equal(opt_x, np.array([[0.]]))
    assert_array_equal(opt_val, np.array([[1.]]))
Exemplo n.º 15
0
    return out

# Set up random seeding of parameter space
num_data_points = no_random_seeds
design = RandomDesign(parameter_space)
X = design.get_samples(num_data_points)
Y = q(X)

# Set up emukit model
model_gpy = GPRegression(X,Y)
model_gpy.optimize()
model_emukit = GPyModelWrapper(model_gpy)

# Set up Bayesian optimisation routine
exp_imprv = ExpectedImprovement(model = model_emukit)
optimizer = GradientAcquisitionOptimizer(space = parameter_space)
point_calc = SequentialPointCalculator(exp_imprv,optimizer)

# Bayesian optimisation routine
bayesopt_loop = BayesianOptimizationLoop(model = model_emukit,
                                         space = parameter_space,
                                         acquisition=exp_imprv,
                                         batch_size=1)

stopping_condition = FixedIterationsStoppingCondition(i_max = no_BO_sims)
bayesopt_loop.run_loop(q, stopping_condition)


# Results of Bayesian optimisation
coord_results  = bayesopt_loop.get_results().minimum_location
min_value = bayesopt_loop.get_results().minimum_value
Exemplo n.º 16
0
def NonCausal_BO(num_trials, graph, dict_ranges, interventional_data_x, interventional_data_y, costs, 
			observational_samples, functions, min_intervention_value, min_y, intervention_variables, Causal_prior=False):

	## Get do function corresponding to the specified intervention_variables
	function_name = get_do_function_name(intervention_variables)
	do_function = graph.get_all_do()[function_name]

	## Compute input space dimension
	input_space = len(intervention_variables)

	## Initialise matrices for storing 
	current_best_x= np.zeros((num_trials + 1, input_space))
	current_best_y = np.zeros((num_trials + 1, 1))
	current_cost = np.zeros((num_trials + 1, 1))

	## Get functions for mean do and var do
	mean_function_do, var_function_do = mean_var_do_functions(do_function, observational_samples, functions)

	## Get interventional data
	data_x = interventional_data_x.copy()
	data_y = interventional_data_y.copy()

	
	## Assign the initial values 
	current_cost[0] = 0.
	current_best_y[0] = min_y
	current_best_x[0] = min_intervention_value
	cumulative_cost = 0.


	## Compute target function and space parameters
	target_function, space_parameters = Intervention_function(get_interventional_dict(intervention_variables),
																model = graph.define_SEM(), target_variable = 'Y', 
																min_intervention = list_interventional_ranges(graph.get_interventional_ranges(), intervention_variables)[0],
																max_intervention = list_interventional_ranges(graph.get_interventional_ranges(), intervention_variables)[1])


	if Causal_prior==False:
		#### Define the model without Causal prior
		gpy_model = GPy.models.GPRegression(data_x, data_y, GPy.kern.RBF(input_space, lengthscale=1., variance=1.), noise_var=1e-10)
		emukit_model= GPyModelWrapper(gpy_model)
	else:
		#### Define the model with Causal prior
		mf = GPy.core.Mapping(input_space, 1)
		mf.f = lambda x: mean_function_do(x)
		mf.update_gradients = lambda a, b: None
		kernel = CausalRBF(input_space, variance_adjustment=var_function_do, lengthscale=1., variance=1., rescale_variance = 1., ARD = False)
		gpy_model = GPy.models.GPRegression(data_x, data_y, kernel, noise_var=1e-10, mean_function=mf)
		emukit_model = GPyModelWrapper(gpy_model)


	## BO loop
	start_time = time.clock()
	for j in range(num_trials):
		print('Iteration', j)
		## Optimize model and get new evaluation point
		emukit_model.optimize()
		acquisition = ExpectedImprovement(emukit_model)
		optimizer = GradientAcquisitionOptimizer(space_parameters)
		x_new, _ = optimizer.optimize(acquisition)
		y_new = target_function(x_new)

		## Append the data
		data_x = np.append(data_x, x_new, axis=0)
		data_y = np.append(data_y, y_new, axis=0)
		emukit_model.set_data(data_x, data_y)

		## Compute cost
		x_new_dict = get_new_dict_x(x_new, intervention_variables)
		cumulative_cost += total_cost(intervention_variables, costs, x_new_dict)
		current_cost[j + 1] = cumulative_cost

		## Get current optimum
		results = np.concatenate((emukit_model.X, emukit_model.Y), axis =1)
		current_best_y[j + 1 ] = np.min(results[:,input_space])
		if results[results[:,input_space] == np.min(results[:,input_space]), :input_space].shape[0] > 1:
			best_x = results[results[:,input_space] == np.min(results[:,input_space]), :input_space][0]
		else:
			best_x = results[results[:,input_space] == np.min(results[:,input_space]), :input_space]
		print('Current best Y', np.min(results[:,input_space]))

	total_time = time.clock() - start_time

	return (current_cost, current_best_x, current_best_y, total_time)