def bayesian_opt(): # 2. ranges of the synth parameters syn1 = syn2 = syn3 = syn4 = syn5 = np.arange(158) syn6 = np.arange(6000) syn7 = np.arange(1000) syn8 = np.arange(700) # 2. synth paramters ranges into an 8D parameter space # parameter_space = ParameterSpace( # [ContinuousParameter('x1', 0., 157.)]) # parameter_space = ParameterSpace( # [DiscreteParameter('x8', syn8)]) parameter_space = ParameterSpace( [ContinuousParameter('x1', 0., 157.), ContinuousParameter('x2', 0., 157.), ContinuousParameter('x3', 0., 157.), ContinuousParameter('x4', 0., 157.), ContinuousParameter('x5', 0., 157.), ContinuousParameter('x6', 0., 5999.), ContinuousParameter('x7', 0., 999.), ContinuousParameter('x8', 0., 699.)]) # parameter_space = ParameterSpace( # [DiscreteParameter('x1', syn1), DiscreteParameter('x2', syn2), DiscreteParameter('x3', syn3), # DiscreteParameter('x4', syn4), DiscreteParameter('x5', syn5), DiscreteParameter('x6', syn6), # DiscreteParameter('x7', syn1), DiscreteParameter('x8', syn8)]) # 3. collect random points design = RandomDesign(parameter_space) X = design.get_samples(num_data_points) # X is a numpy array print("X=", X) # [is the below needed?] # UserFunction.evaluate(training_function, X) # I put UserFunctionWrapper in line 94 # 4. define training_function as Y Y = training_function(X) # [is this needed?] # loop_state = create_loop_state(X, Y) # 5. train and wrap the model in Emukit model_gpy = GPRegression(X, Y, normalizer=True) model_emukit = GPyModelWrapper(model_gpy) expected_improvement = ExpectedImprovement(model=model_emukit) bayesopt_loop = BayesianOptimizationLoop(model=model_emukit, space=parameter_space, acquisition=expected_improvement, batch_size=5) max_iterations = 15 bayesopt_loop.run_loop(training_function, max_iterations) model_gpy.plot() plt.show() results = bayesopt_loop.get_results() # bayesopt_loop.loop_state.X print("X: ", bayesopt_loop.loop_state.X) print("Y: ", bayesopt_loop.loop_state.Y) print("cost: ", bayesopt_loop.loop_state.cost)
def test_categorical_variables(): np.random.seed(123) def objective(x): return np.array(np.sum(x, axis=1).reshape(-1, 1)) carol_spirits = ['past', 'present', 'yet to come'] encoding = OneHotEncoding(carol_spirits) parameter_space = ParameterSpace([ ContinuousParameter('real_param', 0.0, 1.0), CategoricalParameter('categorical_param', encoding) ]) random_design = RandomDesign(parameter_space) x_init = random_design.get_samples(10) assert x_init.shape == (10, 4) assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0)) y_init = objective(x_init) gpy_model = GPy.models.GPRegression(x_init, y_init) gpy_model.Gaussian_noise.fix(1) model = GPyModelWrapper(gpy_model) acquisition = ExpectedImprovement(model) loop = BayesianOptimizationLoop(parameter_space, model, acquisition) loop.run_loop(objective, 5) assert len(loop.loop_state.Y) == 15 assert np.all( np.logical_or(loop.loop_state.X[:, 1:3] == 0.0, loop.loop_state.X[:, 1:3] == 1.0))
def test_loop(): n_iterations = 5 x_init = np.random.rand(5, 1) y_init = np.random.rand(5, 1) # Make GPy model gpy_model = GPy.models.GPRegression(x_init, y_init) model = GPyModelWrapper(gpy_model) space = ParameterSpace([ContinuousParameter('x', 0, 1)]) acquisition = ExpectedImprovement(model) # Make loop and collect points bo = BayesianOptimizationLoop(model=model, space=space, acquisition=acquisition) bo.run_loop(UserFunctionWrapper(f), FixedIterationsStoppingCondition(n_iterations)) # Check we got the correct number of points assert bo.loop_state.X.shape[0] == n_iterations + 5 # Check the obtained results results = bo.get_results() assert results.minimum_location.shape[0] == 1 assert results.best_found_value_per_iteration.shape[0] == n_iterations + 5
def test_categorical_variables(): np.random.seed(123) def objective(x): return np.array(np.sum(x, axis=1).reshape(-1, 1)) carol_spirits = ['past', 'present', 'yet to come'] encoding = OneHotEncoding(carol_spirits) parameter_space = ParameterSpace([ ContinuousParameter('real_param', 0.0, 1.0), CategoricalParameter('categorical_param', encoding) ]) random_design = RandomDesign(parameter_space) x_init = random_design.get_samples(10) assert x_init.shape == (10, 4) assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0)) y_init = objective(x_init) gpy_model = GPy.models.GPRegression(x_init, y_init) gpy_model.Gaussian_noise.fix(1) model = GPyModelWrapper(gpy_model) acquisition = ExpectedImprovement(model) loop = BayesianOptimizationLoop(parameter_space, model, acquisition) loop.run_loop(objective, 5) assert len(loop.loop_state.Y) == 15 assert np.all(np.logical_or(loop.loop_state.X[:, 1:3] == 0.0, loop.loop_state.X[:, 1:3] == 1.0))
def create_bayesian_optimization_loop( gpy_model: ComparisonGP, lims: np.array, batch_size: int, acquisition: AcquisitionFunction) -> BayesianOptimizationLoop: """ Creates Bayesian optimization loop for Bayesian neural network or random forest models. :param gpy_model: the GPy model used in optimization :param lims: Optimization limits for the inputs :param batch_size: number of observations used in batch :param acquisition: acquisition function used in the bayesian optimization :return: emukit BO loop """ # Create model model = ComparisonGPEmukitWrapper(gpy_model, batch_size) # Create acquisition emukit_acquisition = EmukitAcquisitionFunctionWrapper(model, acquisition) if type(emukit_acquisition.acquisitionFunction) is ThompsonSampling: parameter_space = [] for j in range(len(lims)): parameter_space += [ ContinuousParameter("x{}".format(j), lims[j][0], lims[j][1]) ] parameter_space = ParameterSpace(parameter_space) acquisition_optimizer = SequentialGradientAcquisitionOptimizer( parameter_space, batch_size) else: parameter_space = [] for k in range(batch_size): for j in range(len(lims)): parameter_space += [ ContinuousParameter("x{}{}".format(k, j), lims[j][0], lims[j][1]) ] parameter_space = ParameterSpace(parameter_space) acquisition_optimizer = GradientAcquisitionOptimizer(parameter_space) bo = BayesianOptimizationLoop(model=model, space=parameter_space, acquisition=emukit_acquisition) return BayesianOptimizationLoop( model=model, space=parameter_space, acquisition=emukit_acquisition, acquisition_optimizer=acquisition_optimizer)
def test_optimization_with_linear_constraint(): branin_fcn, parameter_space = branin_function() x_init = parameter_space.sample_uniform(10) y_init = branin_fcn(x_init) A = np.array([[1.0, 1.0]]) b_lower = np.array([-5]) b_upper = np.array([5]) parameter_space.constraints = [ LinearInequalityConstraint(A, np.array([-5]), np.array([5])) ] gpy_model = GPy.models.GPRegression(x_init, y_init) model = GPyModelWrapper(gpy_model) lp = BayesianOptimizationLoop(parameter_space, model) lp.run_loop(branin_fcn, 5) assert True
def test_local_penalization(): np.random.seed(123) branin_fcn, parameter_space = branin_function() random_design = RandomDesign(parameter_space) x_init = random_design.get_samples(10) y_init = branin_fcn(x_init) gpy_model = GPy.models.GPRegression(x_init, y_init) gpy_model.Gaussian_noise.fix(1) model = GPyModelWrapper(gpy_model) base_acquisition = ExpectedImprovement(model) batch_size = 10 update_interval = 1 lp = BayesianOptimizationLoop(parameter_space, model, base_acquisition, update_interval, batch_size) lp.run_loop(branin_fcn, 5) assert len(lp.loop_state.Y) == 60
def test_batch_loop_fails_without_gradients_implemented(): parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1)]) model = mock.create_autospec(IModel) base_acquisition = ExpectedImprovement(model) batch_size = 10 with pytest.raises(ValueError): BayesianOptimizationLoop(parameter_space, model, base_acquisition, batch_size)
def test_local_penalization(): np.random.seed(123) branin_fcn, parameter_space = branin_function() x_init = parameter_space.sample_uniform(10) y_init = branin_fcn(x_init) gpy_model = GPy.models.GPRegression(x_init, y_init) gpy_model.Gaussian_noise.fix(1) model = GPyModelWrapper(gpy_model) base_acquisition = ExpectedImprovement(model) batch_size = 10 update_interval = 1 lp = BayesianOptimizationLoop(parameter_space, model, base_acquisition, update_interval, batch_size) lp.run_loop(branin_fcn, 5) assert len(lp.loop_state.Y) == 60
def run_optimisation(current_range, freq_range, power_range): parameter_space = ParameterSpace([\ ContinuousParameter('current', current_range[0], current_range[1]), \ ContinuousParameter('freq', freq_range[0], freq_range[1]), \ ContinuousParameter('power', power_range[0], power_range[1]) ]) def function(X): current = X[:, 0] freq = X[:, 1] power = X[:, 2] out = np.zeros((len(current), 1)) for g in range(len(current)): ''' Set JPA Current, Frequency & Power ''' out[g, 0] = -get_SNR( plot=False)[-1] #Negative as want to maximise SNR return out num_data_points = 10 design = RandomDesign(parameter_space) X = design.get_samples(num_data_points) Y = function(X) model_gpy = GPRegression(X, Y) model_gpy.optimize() model_emukit = GPyModelWrapper(model_gpy) exp_imprv = ExpectedImprovement(model=model_emukit) optimizer = GradientAcquisitionOptimizer(space=parameter_space) point_calc = SequentialPointCalculator(exp_imprv, optimizer) coords = [] min = [] bayesopt_loop = BayesianOptimizationLoop(model=model_emukit, space=parameter_space, acquisition=exp_imprv, batch_size=1) stopping_condition = FixedIterationsStoppingCondition(i_max=100) bayesopt_loop.run_loop(q, stopping_condition) coord_results = bayesopt_loop.get_results().minimum_location min_value = bayesopt_loop.get_results().minimum_value step_results = bayesopt_loop.get_results().best_found_value_per_iteration print(coord_results) print(min_value) return coord_results, abs(min_value)
X = design.get_samples(num_data_points) Y = q(X) # Set up emukit model model_gpy = GPRegression(X,Y) model_gpy.optimize() model_emukit = GPyModelWrapper(model_gpy) # Set up Bayesian optimisation routine exp_imprv = ExpectedImprovement(model = model_emukit) optimizer = GradientAcquisitionOptimizer(space = parameter_space) point_calc = SequentialPointCalculator(exp_imprv,optimizer) # Bayesian optimisation routine bayesopt_loop = BayesianOptimizationLoop(model = model_emukit, space = parameter_space, acquisition=exp_imprv, batch_size=1) stopping_condition = FixedIterationsStoppingCondition(i_max = no_BO_sims) bayesopt_loop.run_loop(q, stopping_condition) # Results of Bayesian optimisation coord_results = bayesopt_loop.get_results().minimum_location min_value = bayesopt_loop.get_results().minimum_value step_results = bayesopt_loop.get_results().best_found_value_per_iteration print(coord_results) print(min_value) # Save the pararmeters of the best resonator results = [coord_results,min_value]
# ContinuousParameter('x7', 0., 999.), ContinuousParameter('x8', 0., 699.)]) latin_design = LatinDesign(parameter_space=parameter_space) X0 = latin_design.get_samples(n_samples) Y0 = training_function(X0) #D0 = ((Y0 - target)**2).sum(axis=1) #plotter = BayesOptPlotter(h_noiseless, target, xmin, xmax, X0=X0, Y0=Y0) model = GPRegression(X0, Y0) model_wrapped = GPyModelWrapper(model) target = user_sample_vector acq = L2_LCB(model=model_wrapped, target=target) fit_update = lambda a, b: model.optimize_restarts(verbose=False) bayesopt_loop = BayesianOptimizationLoop(model=model_wrapped, space=parameter_space, acquisition=acq) bayesopt_loop.iteration_end_event.append(fit_update) bayesopt_loop.run_loop(training_function, 5) # 5. train and wrap the model in Emukit # model_gpy = GPRegression(X, Y, normalizer=True) # # model_emukit = GPyModelWrapper(model_gpy) # expected_improvement = ExpectedImprovement(model=model_emukit) # bayesopt_loop = BayesianOptimizationLoop(model=model_emukit, # space=parameter_space, # acquisition=expected_improvement, # batch_size=5) # # max_iterations = 15
def bo_loop(config, image_path, ai_model=None): target_function, space = eval(config.name)() data_dim = config.data_dim num_mix = config.num_mix init_num_data = config.init_num_data interval_std = config.interval_std interval = np.zeros((1, data_dim)) std = np.zeros((1, data_dim)) mean = np.zeros((1, data_dim)) #set up data, scaling for ii in range(data_dim): interval[0, ii] = space.parameters[ii].max - space.parameters[ii].min std[0, ii] = interval[0, ii] / interval_std mean[0, ii] = (space.parameters[ii].max + space.parameters[ii].min) / 2 space.parameters[ii].min = (space.parameters[ii].min - mean[0, ii]) / std[0, ii] space.parameters[ii].max = (space.parameters[ii].max - mean[0, ii]) / std[0, ii] results_list = [None] * config.repeated_runs best_value_per_iter = np.zeros((config.repeated_runs, config.bo_iter)) npr = np.random.RandomState(123) for ii in tqdm(range(config.repeated_runs)): #initialize data points X_init = (npr.rand(init_num_data, data_dim) - 0.5) * interval + mean X_init_norm = (X_init - mean) / std Y_init = target_function(X_init) Y_init_norm, mean_Y, std_Y = standardize(Y_init) # normalized function function_norm = lambda x: (target_function(x * std + mean) - mean_Y ) / std_Y if config.is_GPY: kernel = GPy.kern.RBF(input_dim=data_dim, variance=npr.rand(1), lengthscale=npr.rand(data_dim), ARD=True) for jj in range(num_mix - 1): rbf_new = GPy.kern.RBF(input_dim=data_dim, variance=npr.rand(1), lengthscale=npr.rand(data_dim), ARD=True) kernel = kernel + rbf_new if config.is_sparseGP: z = (np.random.rand(config.num_inducing_pts, data_dim) - 0.5) * interval_std model_gp = GPy.models.SparseGPRegression(X_init_norm, Y_init_norm, kernel, Z=z) else: model_gp = GPy.models.GPRegression(X_init_norm, Y_init_norm, kernel) model_gp.Gaussian_noise.variance = config.epsilon model_gp.Gaussian_noise.variance.fix() model_emukit = GPyModelWrapperTime(model_gp) model_emukit.optimize() else: #Set up Emukit_BO_BQ_GP_Model model_emukit = Emukit_BO_BQ_GP_Model(X_init_norm, Y_init_norm, config, ai_model) model_emukit.optimize() model_emukit.set_kernel() expected_improvement = ExpectedImprovement(model=model_emukit) bayesopt_loop = BayesianOptimizationLoop( model=model_emukit, space=space, acquisition=expected_improvement, batch_size=1) max_iterations = config.bo_iter bayesopt_loop.run_loop(function_norm, max_iterations) results = bayesopt_loop.get_results() #scale back the x and y results_save = edict() results_save.best_found_value_per_iteration = results.best_found_value_per_iteration[ init_num_data:] * std_Y.item() + mean_Y.item() best_value_per_iter[ ii, :] = results_save.best_found_value_per_iteration results_save.minimum_value = results.minimum_value * std_Y.item( ) + mean_Y.item() results_save.minimum_location = results.minimum_location * std.squeeze( 0) + mean.squeeze(0) results_save.time_elapsed = model_emukit.time_count results_list[ii] = results_save best_value_mean = np.mean(best_value_per_iter, 0) best_value_std = np.std(best_value_per_iter, 0) plt.figure(figsize=(12, 8)) plt.fill_between(np.arange(max_iterations) + 1, best_value_mean - 0.2 * best_value_std, best_value_mean + 0.2 * best_value_std, color='red', alpha=0.15) plt.plot(np.arange(max_iterations) + 1, best_value_mean, 'or-', lw=2, label='Best found function value') plt.legend(loc=2, prop={'size': LEGEND_SIZE}) plt.xlabel(r"iteration") plt.ylabel(r"$f(x)$") plt.grid(True) plt.savefig(image_path, format='pdf') return results_list
'''use random forests as model''' from emukit.examples.models.random_forest import RandomForest from emukit.experimental_design import RandomDesign random_design = RandomDesign(space) '''start finding optimimum''' start = time.time() initial_points_count = 1500 X_init = random_design.get_samples(initial_points_count) Y_init = emukit_friendly_objective_function(X_init) rf_model = RandomForest(X_init, Y_init) loop = BayesianOptimizationLoop(space,rf_model) noSamples = 1500 loop.run_loop(emukit_friendly_objective_function, noSamples) end = time.time() print('time needed: ' + str(end-start)) '''get results''' bestIteration = np.argmin(loop.loop_state.Y) bestPointEncoded = loop.loop_state.X[bestIteration] bestAction = encodingToAction(bestPointEncoded,encodingList) model.printAllAboutAction(bestAction) model.savePerformance('emukit_opt',end-start,noSamples+initial_points_count,bestAction) print("end of optimization")
fun = env.reset(upper_bound=1, lower_bound=0) ppo.ppoMax = 0 ppoMin = float('inf') ppo.ep_r = 0 ppo.funCurMax = env.maxVal ppo.curFun = env.getCurFun() design = RandomDesign(parameter_space) # Collect random points X = design.get_samples(num_data_points) Y = fun(X) model_gpy = GPRegression(X, Y) # Train and wrap the model in Emukit model_emukit = GPyModelWrapper(model_gpy) ppo.model = model_emukit bo = BayesianOptimizationLoop(model=model_emukit, space=parameter_space, acquisition=ppo, batch_size=1) mu = np.array([np.mean(bo.loop_state.X)])[np.newaxis] var = np.array([np.var(bo.loop_state.X)])[np.newaxis] s = np.concatenate((mu, var), axis=1) boPPOep_r = [] model_gpyEI = GPRegression(X, Y) # Train and wrap the model in Emukit model_emukitEI = GPyModelWrapper(model_gpyEI) boEI = BayesianOptimizationLoop(model=model_emukitEI, space=parameter_space, acquisition=ExpectedImprovement( model=model_emukit, jitter=Exploration_parameter), batch_size=1) boEIep_r = []
acquisition = NegativeLowerConfidenceBound(model) elif args.acquisition_type == "logei": acquisition = LogExpectedImprovement(model) elif args.acquisition_type == "entropy_search": model = BOGP(X_init=X_init, Y_init=Y_init) acquisition = EntropySearch(model, space=space) # if with_gradients: # acquisition_optimizer = AcquisitionOptimizer(space) # else: acquisition_optimizer = DirectOptimizer(space) candidate_point_calculator = Sequential(acquisition, acquisition_optimizer) bo = BayesianOptimizationLoop(model=model, space=space, X_init=X_init, Y_init=Y_init, acquisition=acquisition, candidate_point_calculator=candidate_point_calculator) bo.run_loop(user_function=obj, stopping_condition=FixedIterationsStoppingCondition(args.num_iterations)) curr_inc = np.inf traj = [] regret = [] for yi in bo.loop_state.Y: if curr_inc > yi: curr_inc = yi[0] traj.append(curr_inc) regret.append(curr_inc - f_opt) data = dict() data["regret"] = regret path = os.path.join(args.output_path, args.benchmark)
# ppo.ppoMax = 0 ppo.ppoMin = env.maxVal ppo.ep_r = 0 boPPOep_r = [] # ppo.funCurMax = env.maxVal ppo.funCurMin = env.minVal ppo.curFun = env.getCurFun() design = RandomDesign(parameter_space) # Collect random points X = design.get_samples(num_data_points) Y = fun(X) model_gpy = GPRegression(X,Y) # Train and wrap the model in Emukit model_emukit = GPyModelWrapper(model_gpy) ppo.model = model_emukit bo = BayesianOptimizationLoop(model = model_emukit, space = parameter_space, acquisition = ppo, batch_size = 1) mu_, var_ = bo.model.predict(bo.loop_state.X[-1].reshape(-1,1)) ppo.s_ = np.concatenate((mu_,var_),axis=1) for t in range(EP_LEN): # in one episode bo.run_loop(fun, 1) # ppo.ppoMax = max(ppo.ppoMax,env.curFun(bo.loop_state.X[-1])) ppo.ppoMin = min(ppo.ppoMin,env.curFun(bo.loop_state.X[-1])) # boPPOep_r.append(ppo.ppoMax-env.maxVal) boPPOep_r.append(env.minVal-ppo.ppoMin) if (t+1) % BATCH == 0 or t == EP_LEN-1: print("updating.......") v_s_ = ppo.get_v(ppo.s_)
def make_loop(loop_state): gpy_model = GPy.models.GPRegression(loop_state.X, loop_state.Y) model = GPyModelWrapper(gpy_model) return BayesianOptimizationLoop(space, model)
def main(): print("######################") global target, X0, Y0, values, frac_M, frac_X, bo_flag #target_params = np.array([[0.14,0.4],[1.4,0.03]]) #target = LiX_wrapper(True,'LiF','Rocksalt','JC', # target_params,False,False,eng) target = np.array([[-764.5, 6.012 * 0.99, 6.012 * 0.99, 6.012 * 0.99]]) if focus == 'energy': target_comp = target[0, 0].reshape(1, -1) if focus == 'constant': target_comp = target[0, 1].reshape(1, -1) else: target_comp = target[0, :4].reshape(1, -1) print('Target initialized!') latin_design = LatinDesign(parameter_space=parameter_space) X0 = latin_design.get_samples(INIT_POINTS) Y0 = np.array([]) for x in X0: x = np.array([x]) Y0 = np.append(Y0, f.evaluate(x)) values = [] for y in Y0: values.append(y.Y) values = np.asarray(values, dtype=float) ### Redundancy check if (values[:, 7:-1] == values[0, 7]).all(): values = values[:, :7] frac_X = False if (values[:, 4:7] == values[0, 4]).all(): values = values[:, :4] frac_M = False values = values.reshape(-1, np.max(np.shape(target))) bo_flag = True if focus == 'energy': values = values[:, 0].reshape(-1, 1) if focus == 'constant': values = values[:, 1:4].reshape(-1, 3) ### BO Loop kern = Matern52(X0.shape[1], variance=1) model = GPRegression(X0, values, kernel=kern, normalizer=True, noise_var=NOISE) # Kernel = None: RBF default model.optimize(optimizer='lbfgsb') model.optimize_restarts(num_restarts=50, verbose=False) model_wrapped = GPyModelWrapper(model) acq = L2_LCB(model=model_wrapped, target=target_comp, beta=np.float64(1.)) # beta is the exploration constant bayesopt_loop = BayesianOptimizationLoop(model=model_wrapped, space=parameter_space, acquisition=acq) bayesopt_loop.run_loop(f, BO_ITER) return save(bayesopt_loop)