def __init__(self, config_space, burnin=3000, n_iters=10000): super(Bohamiann, self).__init__(sacred_space_to_configspace(config_space)) self.rng = np.random.RandomState(np.random.seed()) self.n_dims = len(self.config_space.get_hyperparameters()) # All inputs are mapped to be in [0, 1]^D self.lower = np.zeros([self.n_dims]) self.upper = np.ones([self.n_dims]) self.incumbents = [] self.X = None self.y = None self.model = BayesianNeuralNetwork(sampling_method="sghmc", l_rate=np.sqrt(1e-4), mdecay=0.05, burn_in=burnin, n_iters=n_iters, precondition=True, normalize_input=True, normalize_output=True) self.acquisition_func = LogEI(self.model) self.maximizer = Direct(self.acquisition_func, self.lower, self.upper, verbose=False)
def test_direct(self): maximizer = Direct(self.acquisition_func, self.X_lower, self.X_upper) x = maximizer.maximize() assert x.shape[0] == 1 assert x.shape[1] == self.dims assert np.all(x[:, 0] >= self.X_lower[0]) assert np.all(x[:, 0] <= self.X_upper[0])
def test_direct(self): maximizer = Direct(self.objective_function, self.lower, self.upper) x = maximizer.maximize() assert x.shape[0] == 2 assert len(x.shape) == 1 assert np.all(x >= self.lower) assert np.all(x <= self.upper)
def test_direct(self): maximizer = Direct(self.objective_function, self.lower, self.upper) x = maximizer.maximize() assert x.shape[0] == 1 assert len(x.shape) == 1 assert np.all(x >= self.lower) assert np.all(x <= self.upper)
def suggest_configuration(self): if self.X is None and self.Y is None: new_x = init_random_uniform(self.X_lower, self.X_upper, N=1, rng=self.rng) elif self.X.shape[0] == 1: # We need at least 2 data points to train a GP Xopt = init_random_uniform(self.X_lower, self.X_upper, N=1, rng=self.rng) else: prior = DNGOPrior() model = DNGO(batch_size=100, num_epochs=20000, learning_rate=0.1, momentum=0.9, l2=1e-16, adapt_epoch=5000, n_hypers=20, prior=prior, do_optimize=True, do_mcmc=True) #acquisition_func = EI(model, task.X_lower, task.X_upper) lo = np.ones([model.n_units_3]) * -1 up = np.ones([model.n_units_3]) ei = LogEI(model, lo, up) acquisition_func = IntegratedAcquisition(model, ei, self.X_lower, self.X_upper) maximizer = Direct(acquisition_func, self.X_lower, self.X_upper) model.train(self.X, self.Y) acquisition_func.update(model) new_x = maximizer.maximize() # Map from [0, 1]^D space back to original space next_config = Configuration(self.config_space, vector=new_x[0, :]) # Transform to sacred configuration result = configspace_config_to_sacred(next_config) return result
def suggest_configuration(self): if self.X is None and self.y is None: new_x = init_random_uniform(self.lower, self.upper, n_points=1, rng=self.rng)[0, :] elif self.X.shape[0] == 1: # We need at least 2 data points to train a GP new_x = init_random_uniform(self.lower, self.upper, n_points=1, rng=self.rng)[0, :] else: cov_amp = 1 n_dims = self.lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1) model = GaussianProcessMCMC(kernel, prior=prior, n_hypers=self.n_hypers, chain_length=self.chain_length, burnin_steps=self.burnin, normalize_input=False, normalize_output=True, rng=self.rng, lower=self.lower, upper=self.upper) a = LogEI(model) acquisition_func = MarginalizationGPMCMC(a) max_func = Direct(acquisition_func, self.lower, self.upper, verbose=False) model.train(self.X, self.y) acquisition_func.update(model) new_x = max_func.maximize() next_config = Configuration(self.config_space, vector=new_x) # Transform to sacred configuration result = configspace_config_to_sacred(next_config) return result
def setUp(self): lower = np.zeros([1]) upper = np.ones([1]) kernel = george.kernels.Matern52Kernel(np.array([1]), dim=1, ndim=1) model = GaussianProcess(kernel) lcb = LCB(model) maximizer = Direct(lcb, lower, upper, n_func_evals=10) self.solver = BayesianOptimization(objective_func, lower, upper, lcb, model, maximizer)
class Bohamiann(Optimizer): def __init__(self, config_space, burnin=3000, n_iters=10000): super(Bohamiann, self).__init__(sacred_space_to_configspace(config_space)) self.rng = np.random.RandomState(np.random.seed()) self.n_dims = len(self.config_space.get_hyperparameters()) # All inputs are mapped to be in [0, 1]^D self.lower = np.zeros([self.n_dims]) self.upper = np.ones([self.n_dims]) self.incumbents = [] self.X = None self.y = None self.model = BayesianNeuralNetwork(sampling_method="sghmc", l_rate=np.sqrt(1e-4), mdecay=0.05, burn_in=burnin, n_iters=n_iters, precondition=True, normalize_input=True, normalize_output=True) self.acquisition_func = LogEI(self.model) self.maximizer = Direct(self.acquisition_func, self.lower, self.upper, verbose=False) def suggest_configuration(self): if self.X is None and self.y is None: # No data points yet to train a model, just return a random configuration instead new_x = init_random_uniform(self.lower, self.upper, n_points=1, rng=self.rng)[0, :] else: # Train the model on all finished runs self.model.train(self.X, self.y) self.acquisition_func.update(self.model) # Maximize the acquisition function new_x = self.maximizer.maximize() # Maps from [0, 1]^D space back to original space next_config = Configuration(self.config_space, vector=new_x) # Transform to sacred configuration result = configspace_config_to_sacred(next_config) return result
def test_json_base_solver(self): task = Levy() kernel = george.kernels.Matern52Kernel([1.0], ndim=1) model = GaussianProcess(kernel) ei = EI(model, task.X_lower, task.X_upper) maximizer = Direct(ei, task.X_lower, task.X_upper) solver = BayesianOptimization(acquisition_func=ei, model=model, maximize_func=maximizer, task=task) solver.run(1, X=None, Y=None) iteration = 0 data = solver.get_json_data(it=iteration) assert data['iteration'] == iteration
def fabolas(objective_function, lower, upper, s_min, s_max, n_init=40, num_iterations=100, subsets=[256, 128, 64], inc_estimation="mean", burnin=100, chain_length=100, n_hypers=12, output_path=None, rng=None): """ Fast Bayesian Optimization of Machine Learning Hyperparameters on Large Datasets Fast Bayesian Optimization of Machine Learning Hyperparameters on Large Datasets A. Klein and S. Falkner and S. Bartels and P. Hennig and F. Hutter http://arxiv.org/abs/1605.07079 Parameters ---------- objective_function: function Objective function that will be optimized lower: np.array(D,) Lower bound of the input space upper: np.array(D,) Upper bound of the input space s_min: int Minimum number of data points for the training data set s_max: int Maximum number of data points for the training data set n_init: int Number of initial design points n_hypers: int Number of hyperparameter samples for the GP subsets: list The ratio of the subsets size of the initial design. For example if subsets=[256, 128, 64] then the first random point from the initial design is evaluated on s_max/256 of the data, the second point on s_max/256 of the data and so on. num_iterations: int Number of iterations chain_length : int The length of the MCMC chain for each walker. burnin : int The number of burnin steps before the actual MCMC sampling starts. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict """ assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" assert lower.shape[0] == upper.shape[ 0], "Dimension miss match between upper and lower bound" time_start = time.time() if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) n_dims = lower.shape[0] # Bookkeeping time_func_eval = [] time_overhead = [] incumbents = [] runtime = [] X = [] y = [] c = [] # Define model for the objective function cov_amp = 1 # Covariance amplitude kernel = cov_amp # ARD Kernel for the configuration space for d in range(n_dims): kernel *= george.kernels.Matern52Kernel(np.ones([1]) * 0.01, ndim=n_dims + 1, dim=d) # Kernel for the environmental variable # We use (1-s)**2 as basis function for the Bayesian linear kernel degree = 1 env_kernel = george.kernels.BayesianLinearRegressionKernel(n_dims + 1, dim=n_dims, degree=degree) env_kernel[:] = np.ones([degree + 1]) * 0.1 kernel *= env_kernel # Take 3 times more samples than we have hyperparameters if n_hypers < 2 * len(kernel): n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 prior = EnvPrior(len(kernel) + 1, n_ls=n_dims, n_lr=(degree + 1), rng=rng) quadratic_bf = lambda x: (1 - x)**2 linear_bf = lambda x: x model_objective = FabolasGPMCMC(kernel, prior=prior, burnin_steps=burnin, chain_length=chain_length, n_hypers=n_hypers, normalize_output=False, basis_func=quadratic_bf, lower=lower, upper=upper, rng=rng) # Define model for the cost function cost_cov_amp = 1 cost_kernel = cost_cov_amp # ARD Kernel for the configuration space for d in range(n_dims): cost_kernel *= george.kernels.Matern52Kernel(np.ones([1]) * 0.01, ndim=n_dims + 1, dim=d) cost_degree = 1 cost_env_kernel = george.kernels.BayesianLinearRegressionKernel( n_dims + 1, dim=n_dims, degree=cost_degree) cost_env_kernel[:] = np.ones([cost_degree + 1]) * 0.1 cost_kernel *= cost_env_kernel cost_prior = EnvPrior(len(cost_kernel) + 1, n_ls=n_dims, n_lr=(cost_degree + 1), rng=rng) model_cost = FabolasGPMCMC(cost_kernel, prior=cost_prior, burnin_steps=burnin, chain_length=chain_length, n_hypers=n_hypers, basis_func=linear_bf, normalize_output=False, lower=lower, upper=upper, rng=rng) # Extend input space by task variable extend_lower = np.append(lower, 0) extend_upper = np.append(upper, 1) is_env = np.zeros(extend_lower.shape[0]) is_env[-1] = 1 # Define acquisition function and maximizer ig = InformationGainPerUnitCost(model_objective, model_cost, extend_lower, extend_upper, sampling_acquisition=EI, is_env_variable=is_env, n_representer=50) acquisition_func = MarginalizationGPMCMC(ig) maximizer = Direct(acquisition_func, extend_lower, extend_upper, verbose=True, n_func_evals=200) # Initial Design logger.info("Initial Design") for it in range(n_init): start_time_overhead = time.time() # Draw random configuration s = int(s_max / float(subsets[it])) x = init_random_uniform(lower, upper, 1, rng)[0] logger.info("Evaluate %s on subset size %d", str(x), s) st = time.time() func_val, cost = objective_function(x, s) time_func_eval.append(time.time() - st) logger.info("Configuration achieved a performance of %f with cost %f", func_val, cost) logger.info("Evaluation of this configuration took %f seconds", time_func_eval[-1]) # Bookkeeping config = np.append(x, transform(s, s_min, s_max)) X.append(config) y.append(np.log( func_val)) # Model the target function on a logarithmic scale c.append(np.log(cost)) # Model the cost on a logarithmic scale # Estimate incumbent as the best observed value so far best_idx = np.argmin(y) incumbents.append(X[best_idx][:-1]) # Incumbent is always on s=s_max time_overhead.append(time.time() - start_time_overhead) runtime.append(time.time() - time_start) if output_path is not None: data = dict() data["optimization_overhead"] = time_overhead[it] data["runtime"] = runtime[it] data["incumbent"] = incumbents[it].tolist() data["time_func_eval"] = time_func_eval[it] data["iteration"] = it json.dump( data, open(os.path.join(output_path, "fabolas_iter_%d.json" % it), "w")) X = np.array(X) y = np.array(y) c = np.array(c) for it in range(n_init, num_iterations): logger.info("Start iteration %d ... ", it) start_time = time.time() # Train models model_objective.train(X, y, do_optimize=True) model_cost.train(X, c, do_optimize=True) if inc_estimation == "last_seen": # Estimate incumbent as the best observed value so far best_idx = np.argmin(y) incumbent = X[best_idx][:-1] incumbent = np.append(incumbent, 1) incumbent_value = y[best_idx] else: # Estimate incumbent by projecting all observed points to the task of interest and # pick the point with the lowest mean prediction incumbent, incumbent_value = projected_incumbent_estimation( model_objective, X[:, :-1], proj_value=1) incumbents.append(incumbent[:-1]) logger.info("Current incumbent %s with estimated performance %f", str(incumbent), np.exp(incumbent_value)) # Maximize acquisition function acquisition_func.update(model_objective, model_cost) new_x = maximizer.maximize() s = retransform(new_x[-1], s_min, s_max) # Map s from log space to original linear space time_overhead.append(time.time() - start_time) logger.info("Optimization overhead was %f seconds", time_overhead[-1]) # Evaluate the chosen configuration logger.info("Evaluate candidate %s on subset size %f", str(new_x[:-1]), s) start_time = time.time() new_y, new_c = objective_function(new_x[:-1], s) time_func_eval.append(time.time() - start_time) logger.info("Configuration achieved a performance of %f with cost %f", new_y, new_c) logger.info("Evaluation of this configuration took %f seconds", time_func_eval[-1]) # Add new observation to the data X = np.concatenate((X, new_x[None, :]), axis=0) y = np.concatenate( (y, np.log(np.array([new_y]))), axis=0) # Model the target function on a logarithmic scale c = np.concatenate( (c, np.log(np.array([new_c]))), axis=0) # Model the cost function on a logarithmic scale runtime.append(time.time() - time_start) if output_path is not None: data = dict() data["optimization_overhead"] = time_overhead[it] data["runtime"] = runtime[it] data["incumbent"] = incumbents[it].tolist() data["time_func_eval"] = time_func_eval[it] data["iteration"] = it json.dump( data, open(os.path.join(output_path, "fabolas_iter_%d.json" % it), "w")) # Estimate the final incumbent model_objective.train(X, y, do_optimize=True) incumbent, incumbent_value = projected_incumbent_estimation( model_objective, X[:, :-1], proj_value=1) logger.info("Final incumbent %s with estimated performance %f", str(incumbent), incumbent_value) results = dict() results["x_opt"] = incumbent[:-1].tolist() results["incumbents"] = [inc.tolist() for inc in incumbents] results["runtime"] = runtime results["overhead"] = time_overhead results["time_func_eval"] = time_func_eval results["X"] = X results["y"] = y results["c"] = c return results
def fabolas(objective_function, lower, upper, s_min, s_max, n_init=40, num_iterations=100, subsets=[256, 128, 64], inc_estimation="mean", burnin=100, chain_length=100, n_hypers=12, output_path=None, rng=None): """ Fast Bayesian Optimization of Machine Learning Hyperparameters on Large Datasets Fast Bayesian Optimization of Machine Learning Hyperparameters on Large Datasets A. Klein and S. Falkner and S. Bartels and P. Hennig and F. Hutter http://arxiv.org/abs/1605.07079 Parameters ---------- objective_function: function Objective function that will be optimized lower: np.array(D,) Lower bound of the input space upper: np.array(D,) Upper bound of the input space s_min: int Minimum number of data points for the training data set s_max: int Maximum number of data points for the training data set n_init: int Number of initial design points n_hypers: int Number of hyperparameter samples for the GP subsets: list The ratio of the subsets size of the initial design. For example if subsets=[256, 128, 64] then the first random point from the initial design is evaluated on s_max/256 of the data, the second point on s_max/256 of the data and so on. num_iterations: int Number of iterations chain_length : int The length of the MCMC chain for each walker. burnin : int The number of burnin steps before the actual MCMC sampling starts. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict """ assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" assert lower.shape[0] == upper.shape[0], "Dimension miss match between upper and lower bound" time_start = time.time() if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) n_dims = lower.shape[0] # Bookkeeping time_func_eval = [] time_overhead = [] incumbents = [] runtime = [] X = [] y = [] c = [] # Define model for the objective function cov_amp = 1 # Covariance amplitude kernel = cov_amp # ARD Kernel for the configuration space for d in range(n_dims): kernel *= george.kernels.Matern52Kernel(np.ones([1]) * 0.01, ndim=n_dims+1, dim=d) # Kernel for the environmental variable # We use (1-s)**2 as basis function for the Bayesian linear kernel degree = 1 env_kernel = george.kernels.BayesianLinearRegressionKernel(n_dims+1, dim=n_dims, degree=degree) env_kernel[:] = np.ones([degree + 1]) * 0.1 kernel *= env_kernel # Take 3 times more samples than we have hyperparameters if n_hypers < 2*len(kernel): n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 prior = EnvPrior(len(kernel) + 1, n_ls=n_dims, n_lr=(degree + 1), rng=rng) quadratic_bf = lambda x: (1 - x) ** 2 linear_bf = lambda x: x model_objective = FabolasGPMCMC(kernel, prior=prior, burnin_steps=burnin, chain_length=chain_length, n_hypers=n_hypers, normalize_output=False, basis_func=quadratic_bf, lower=lower, upper=upper, rng=rng) # Define model for the cost function cost_cov_amp = 1 cost_kernel = cost_cov_amp # ARD Kernel for the configuration space for d in range(n_dims): cost_kernel *= george.kernels.Matern52Kernel(np.ones([1]) * 0.01, ndim=n_dims+1, dim=d) cost_degree = 1 cost_env_kernel = george.kernels.BayesianLinearRegressionKernel(n_dims+1, dim=n_dims, degree=cost_degree) cost_env_kernel[:] = np.ones([cost_degree + 1]) * 0.1 cost_kernel *= cost_env_kernel cost_prior = EnvPrior(len(cost_kernel) + 1, n_ls=n_dims, n_lr=(cost_degree + 1), rng=rng) model_cost = FabolasGPMCMC(cost_kernel, prior=cost_prior, burnin_steps=burnin, chain_length=chain_length, n_hypers=n_hypers, basis_func=linear_bf, normalize_output=False, lower=lower, upper=upper, rng=rng) # Extend input space by task variable extend_lower = np.append(lower, 0) extend_upper = np.append(upper, 1) is_env = np.zeros(extend_lower.shape[0]) is_env[-1] = 1 # Define acquisition function and maximizer ig = InformationGainPerUnitCost(model_objective, model_cost, extend_lower, extend_upper, sampling_acquisition=EI, is_env_variable=is_env, n_representer=50) acquisition_func = MarginalizationGPMCMC(ig) maximizer = Direct(acquisition_func, extend_lower, extend_upper, verbose=True, n_func_evals=200) # Initial Design logger.info("Initial Design") for it in range(n_init): start_time_overhead = time.time() # Draw random configuration s = int(s_max / float(subsets[it])) x = init_random_uniform(lower, upper, 1, rng)[0] logger.info("Evaluate %s on subset size %d", str(x), s) st = time.time() func_val, cost = objective_function(x, s) time_func_eval.append(time.time() - st) logger.info("Configuration achieved a performance of %f with cost %f", func_val, cost) logger.info("Evaluation of this configuration took %f seconds", time_func_eval[-1]) # Bookkeeping config = np.append(x, transform(s, s_min, s_max)) X.append(config) y.append(np.log(func_val)) # Model the target function on a logarithmic scale c.append(np.log(cost)) # Model the cost on a logarithmic scale # Estimate incumbent as the best observed value so far best_idx = np.argmin(y) incumbents.append(X[best_idx][:-1]) # Incumbent is always on s=s_max time_overhead.append(time.time() - start_time_overhead) runtime.append(time.time() - time_start) if output_path is not None: data = dict() data["optimization_overhead"] = time_overhead[it] data["runtime"] = runtime[it] data["incumbent"] = incumbents[it].tolist() data["time_func_eval"] = time_func_eval[it] data["iteration"] = it json.dump(data, open(os.path.join(output_path, "fabolas_iter_%d.json" % it), "w")) X = np.array(X) y = np.array(y) c = np.array(c) for it in range(n_init, num_iterations): logger.info("Start iteration %d ... ", it) start_time = time.time() # Train models model_objective.train(X, y, do_optimize=True) model_cost.train(X, c, do_optimize=True) if inc_estimation == "last_seen": # Estimate incumbent as the best observed value so far best_idx = np.argmin(y) incumbent = X[best_idx][:-1] incumbent = np.append(incumbent, 1) incumbent_value = y[best_idx] else: # Estimate incumbent by projecting all observed points to the task of interest and # pick the point with the lowest mean prediction incumbent, incumbent_value = projected_incumbent_estimation(model_objective, X[:, :-1], proj_value=1) incumbents.append(incumbent[:-1]) logger.info("Current incumbent %s with estimated performance %f", str(incumbent), np.exp(incumbent_value)) # Maximize acquisition function acquisition_func.update(model_objective, model_cost) new_x = maximizer.maximize() s = retransform(new_x[-1], s_min, s_max) # Map s from log space to original linear space time_overhead.append(time.time() - start_time) logger.info("Optimization overhead was %f seconds", time_overhead[-1]) # Evaluate the chosen configuration logger.info("Evaluate candidate %s on subset size %f", str(new_x[:-1]), s) start_time = time.time() new_y, new_c = objective_function(new_x[:-1], s) time_func_eval.append(time.time() - start_time) logger.info("Configuration achieved a performance of %f with cost %f", new_y, new_c) logger.info("Evaluation of this configuration took %f seconds", time_func_eval[-1]) # Add new observation to the data X = np.concatenate((X, new_x[None, :]), axis=0) y = np.concatenate((y, np.log(np.array([new_y]))), axis=0) # Model the target function on a logarithmic scale c = np.concatenate((c, np.log(np.array([new_c]))), axis=0) # Model the cost function on a logarithmic scale runtime.append(time.time() - time_start) if output_path is not None: data = dict() data["optimization_overhead"] = time_overhead[it] data["runtime"] = runtime[it] data["incumbent"] = incumbents[it].tolist() data["time_func_eval"] = time_func_eval[it] data["iteration"] = it json.dump(data, open(os.path.join(output_path, "fabolas_iter_%d.json" % it), "w")) # Estimate the final incumbent model_objective.train(X, y, do_optimize=True) incumbent, incumbent_value = projected_incumbent_estimation(model_objective, X[:, :-1], proj_value=1) logger.info("Final incumbent %s with estimated performance %f", str(incumbent), incumbent_value) results = dict() results["x_opt"] = incumbent[:-1].tolist() results["incumbents"] = [inc.tolist() for inc in incumbents] results["runtime"] = runtime results["overhead"] = time_overhead results["time_func_eval"] = time_func_eval results["X"] = X results["y"] = y results["c"] = c return results
def bohamiann(objective_function, lower, upper, num_iterations=30, acquisition_func="log_ei", n_init=3, rng=None): """ General interface for Bayesian optimization for global black box optimization problems. Parameters ---------- objective_function: function The objective function that is minimized. This function gets a numpy array (D,) as input and returns the function value (scalar) lower: np.ndarray (D,) The lower bound of the search space upper: np.ndarray (D,) The upper bound of the search space num_iterations: int The number of iterations (initial design + BO) acquisition_func: {"ei", "log_ei", "lcb", "pi"} The acquisition function n_init: int Number of points for the initial design. Make sure that it is <= num_iterations. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert upper.shape[0] == lower.shape[0] assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) model = BayesianNeuralNetwork(sampling_method="sghmc", l_rate=np.sqrt(1e-4), mdecay=0.05, burn_in=3000, n_iters=50000, precondition=True, normalize_input=True, normalize_output=True) if acquisition_func == "ei": a = EI(model) elif acquisition_func == "log_ei": a = LogEI(model) elif acquisition_func == "pi": a = PI(model) elif acquisition_func == "lcb": a = LCB(model) else: print("ERROR: %s is not a valid acquisition function!" % acquisition_func) return max_func = Direct(a, lower, upper, verbose=False) bo = BayesianOptimization(objective_function, lower, upper, a, model, max_func, initial_points=n_init, rng=rng) x_best, f_min = bo.run(num_iterations) results = dict() results["x_opt"] = x_best results["f_opt"] = f_min results["incumbents"] = [inc for inc in bo.incumbents] results["incumbent_values"] = [val for val in bo.incumbents_values] results["runtime"] = bo.runtime results["overhead"] = bo.time_overhead return results
Created on Mar 16, 2016 @author: Aaron Klein ''' import george from robo.maximizers.direct import Direct from robo.models.gaussian_process import GaussianProcess from robo.task.synthetic_functions.levy import Levy from robo.acquisition.ei import EI from robo.solver.bayesian_optimization import BayesianOptimization task = Levy() kernel = george.kernels.Matern52Kernel([1.0], ndim=1) model = GaussianProcess(kernel) ei = EI(model, task.X_lower, task.X_upper) maximizer = Direct(ei, task.X_lower, task.X_upper) bo = BayesianOptimization(acquisition_func=ei, model=model, maximize_func=maximizer, task=task) print bo.run(10)
def entropy_search(objective_function, lower, upper, num_iterations=30, maximizer="direct", model="gp_mcmc", n_init=3, output_path=None, rng=None): """ Entropy search for global black box optimization problems. This is a reimplemenation of the entropy search algorithm by Henning and Schuler[1]. [1] Entropy search for information-efficient global optimization. P. Hennig and C. Schuler. JMLR, (1), 2012. Parameters ---------- objective_function: function The objective function that is minimized. This function gets a numpy array (D,) as input and returns the function value (scalar) lower: np.ndarray (D,) The lower bound of the search space upper: np.ndarray (D,) The upper bound of the search space num_iterations: int The number of iterations (initial design + BO) maximizer: {"direct", "cmaes"} Defines how the acquisition function is maximized. NOTE: "cmaes" only works in D > 1 dimensions model: {"gp", "gp_mcmc"} The model for the objective function. n_init: int Number of points for the initial design. Make sure that it is <= num_iterations. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert upper.shape[0] == lower.shape[0], "Dimension miss match" assert np.all(lower < upper), "Lower bound >= upper bound" assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 if model == "gp": gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model == "gp_mcmc": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=False, rng=rng, lower=lower, upper=upper) else: print("ERROR: %s is not a valid model!" % model) return a = InformationGain(gp, lower=lower, upper=upper, sampling_acquisition=EI) if model == "gp": acquisition_func = a elif model == "gp_mcmc": acquisition_func = MarginalizationGPMCMC(a) if maximizer == "cmaes": max_func = CMAES(acquisition_func, lower, upper, verbose=False, rng=rng) elif maximizer == "direct": max_func = Direct(acquisition_func, lower, upper) else: print( "ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer) return bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations) results = dict() results["x_opt"] = x_best results["f_opt"] = f_min results["incumbents"] = [inc for inc in bo.incumbents] results["incumbent_values"] = [val for val in bo.incumbents_values] results["runtime"] = bo.runtime results["overhead"] = bo.time_overhead results["X"] = [x.tolist() for x in bo.X] results["y"] = [y for y in bo.y] return results
def mtbo(objective_function, lower, upper, n_tasks=2, n_init=2, num_iterations=30, burnin=100, chain_length=200, n_hypers=20, output_path=None, rng=None): """ Interface to MTBO[1] which uses an auxiliary cheaper task to speed up the optimization of a more expensive but similar task. [1] Multi-Task Bayesian Optimization K. Swersky and J. Snoek and R. Adams Proceedings of the 27th International Conference on Advances in Neural Information Processing Systems (NIPS'13) Parameters ---------- objective_function: function Objective function that will be optimized lower: np.array(D,) Lower bound of the input space upper: np.array(D,) Upper bound of the input space n_tasks: int Number of task n_init: int Number of initial design points num_iterations: int Number of iterations chain_length : int The length of the MCMC chain for each walker. burnin : int The number of burnin steps before the actual MCMC sampling starts. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" assert lower.shape[0] == upper.shape[ 0], "Dimension miss match between upper and lower bound" time_start = time.time() if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) n_dims = lower.shape[0] # Bookkeeping time_func_eval = [] time_overhead = [] incumbents = [] runtime = [] X = [] y = [] c = [] # Define model for the objective function cov_amp = 1 # Covariance amplitude kernel = cov_amp # ARD Kernel for the configuration space for d in range(n_dims): kernel *= george.kernels.Matern52Kernel(np.ones([1]) * 0.01, ndim=n_dims + 1, dim=d) task_kernel = george.kernels.TaskKernel(n_dims + 1, n_dims, n_tasks) kernel *= task_kernel # Take 3 times more samples than we have hyperparameters if n_hypers < 2 * len(kernel): n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 prior = MTBOPrior(len(kernel) + 1, n_ls=n_dims, n_kt=len(task_kernel), rng=rng) model_objective = MTBOGPMCMC(kernel, prior=prior, burnin_steps=burnin, chain_length=chain_length, n_hypers=n_hypers, lower=lower, upper=upper, rng=rng) # Define model for the cost function cost_cov_amp = 1 cost_kernel = cost_cov_amp # ARD Kernel for the configuration space for d in range(n_dims): cost_kernel *= george.kernels.Matern52Kernel(np.ones([1]) * 0.01, ndim=n_dims + 1, dim=d) cost_task_kernel = george.kernels.TaskKernel(n_dims + 1, n_dims, n_tasks) cost_kernel *= cost_task_kernel cost_prior = MTBOPrior(len(cost_kernel) + 1, n_ls=n_dims, n_kt=len(task_kernel), rng=rng) model_cost = MTBOGPMCMC(cost_kernel, prior=cost_prior, burnin_steps=burnin, chain_length=chain_length, n_hypers=n_hypers, lower=lower, upper=upper, rng=rng) # Extend input space by task variable extend_lower = np.append(lower, 0) extend_upper = np.append(upper, n_tasks - 1) is_env = np.zeros(extend_lower.shape[0]) is_env[-1] = 1 # Define acquisition function and maximizer ig = InformationGainPerUnitCost(model_objective, model_cost, extend_lower, extend_upper, sampling_acquisition=EI, is_env_variable=is_env, n_representer=50) acquisition_func = MarginalizationGPMCMC(ig) maximizer = Direct(acquisition_func, extend_lower, extend_upper, n_func_evals=200) # Initial Design logger.info("Initial Design") for it in range(n_init): start_time_overhead = time.time() # Draw random configuration and evaluate it just on the auxiliary task task = 0 x = init_random_uniform(lower, upper, 1, rng)[0] logger.info("Evaluate candidate %s", str(x)) st = time.time() func_val, cost = objective_function(x, task) time_func_eval.append(time.time() - st) logger.info("Configuration achieved a performance of %f with cost %f", func_val, cost) logger.info("Evaluation of this configuration took %f seconds", time_func_eval[-1]) # Bookkeeping config = np.append(x, task) X.append(config) y.append(np.log( func_val)) # Model the target function on a logarithmic scale c.append(np.log(cost)) # Model the cost on a logarithmic scale # Estimate incumbent as the best observed value so far best_idx = np.argmin(y) incumbents.append(X[best_idx][:-1]) time_overhead.append(time.time() - start_time_overhead) runtime.append(time.time() - time_start) if output_path is not None: data = dict() data["optimization_overhead"] = time_overhead[it] data["runtime"] = runtime[it] data["incumbent"] = incumbents[it].tolist() data["time_func_eval"] = time_func_eval[it] data["iteration"] = it json.dump( data, open(os.path.join(output_path, "mtbo_iter_%d.json" % it), "w")) X = np.array(X) y = np.array(y) c = np.array(c) for it in range(n_init, num_iterations): logger.info("Start iteration %d ... ", it) start_time = time.time() # Train models model_objective.train(X, y, do_optimize=True) model_cost.train(X, c, do_optimize=True) # Estimate incumbent as the best observed value so far best_idx = np.argmin(y) incumbent = X[best_idx][:-1] incumbent = np.append(incumbent, 1) incumbent_value = y[best_idx] incumbents.append(incumbent[:-1]) logger.info("Current incumbent %s with estimated performance %f", str(incumbent), incumbent_value) # Maximize acquisition function acquisition_func.update(model_objective, model_cost) new_x = maximizer.maximize() new_x[-1] = np.rint( new_x[-1]) # Map float value to discrete task variable time_overhead.append(time.time() - start_time) logger.info("Optimization overhead was %f seconds", time_overhead[-1]) # Evaluate the chosen configuration logger.info("Evaluate candidate %s", str(new_x)) start_time = time.time() new_y, new_c = objective_function(new_x[:-1], new_x[-1]) time_func_eval.append(time.time() - start_time) logger.info("Configuration achieved a performance of %f with cost %f", new_y, new_c) logger.info("Evaluation of this configuration took %f seconds", time_func_eval[-1]) # Add new observation to the data X = np.concatenate((X, new_x[None, :]), axis=0) y = np.concatenate( (y, np.log(np.array([new_y]))), axis=0) # Model the target function on a logarithmic scale c = np.concatenate( (c, np.log(np.array([new_c]))), axis=0) # Model the cost function on a logarithmic scale runtime.append(time.time() - time_start) if output_path is not None: data = dict() data["optimization_overhead"] = time_overhead[it] data["runtime"] = runtime[it] data["incumbent"] = incumbents[it].tolist() data["time_func_eval"] = time_func_eval[it] data["iteration"] = it json.dump( data, open(os.path.join(output_path, "mtbo_iter_%d.json" % it), "w")) # Estimate the final incumbent model_objective.train(X, y) incumbent, incumbent_value = projected_incumbent_estimation( model_objective, X[:, :-1], proj_value=n_tasks - 1) logger.info("Final incumbent %s with estimated performance %f", str(incumbent), incumbent_value) results = dict() results["x_opt"] = incumbent[:-1].tolist() results["incumbents"] = [inc.tolist() for inc in incumbents] results["runtime"] = runtime results["overhead"] = time_overhead results["time_func_eval"] = time_func_eval results["X"] = X results["y"] = y results["c"] = c return results
cov_amp = 1.0 config_kernel = george.kernels.Matern52Kernel(np.ones([task.n_dims]), ndim=task.n_dims) kernel = cov_amp * config_kernel prior = MyPrior(len(kernel) + 1) model = GaussianProcessMCMC(kernel, prior=prior, burnin=burnin, chain_length=chain_length, n_hypers=n_hypers) ei = EI( model, X_upper=task.X_upper, X_lower=task.X_lower, ) acquisition_func = IntegratedAcquisition(model, ei, task.X_lower, task.X_upper) maximizer = Direct(acquisition_func, task.X_lower, task.X_upper) bo = BayesianOptimization(acquisition_func=acquisition_func, model=model, maximize_func=maximizer, task=task) bo.run(20)
def warmstart_mtbo(objective_function, lower, upper, observed_X, observed_y, n_tasks=2, num_iterations=30, target_task_id=1, burnin=100, chain_length=200, n_hypers=20, output_path=None, rng=None): """ Interface to MTBO[1] which uses an auxiliary cheaper task to warm start the optimization on new but similar task. Note here we only warmstart the optimization process, in case you want to speed up Bayesian optimization by evaluating on auxiliary task during the optimization check out mtbo() or fabolas(). [1] Multi-Task Bayesian Optimization K. Swersky and J. Snoek and R. Adams Proceedings of the 27th International Conference on Advances in Neural Information Processing Systems (NIPS'13) Parameters ---------- objective_function: function Objective function that will be optimized lower: np.array(D,) Lower bound of the input space upper: np.array(D,) Upper bound of the input space observed_X: np.array(N, D + 1) observed point from the auxiliary task. Make sure that the last dimension identifies the auxiliary task (default=0). We assume the main task to have the task id = 1 observed_y: np.array(N,) corresponding target values n_tasks: int Number of task target_task_id: int the id of the target task num_iterations: int Number of iterations chain_length : int The length of the MCMC chain for each walker. burnin : int The number of burnin steps before the actual MCMC sampling starts. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert lower.shape[0] == upper.shape[ 0], "Dimension miss match between upper and lower bound" time_start = time.time() if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) n_dims = lower.shape[0] # Bookkeeping time_func_eval = [] time_overhead = [] incumbents = [] incumbent_values = [] runtime = [] X = deepcopy(observed_X) y = deepcopy(observed_y) # Define model for the objective function cov_amp = 1 # Covariance amplitude kernel = cov_amp # ARD Kernel for the configuration space for d in range(n_dims): kernel *= george.kernels.Matern52Kernel(np.ones([1]) * 0.01, ndim=n_dims + 1, dim=d) task_kernel = george.kernels.TaskKernel(n_dims + 1, n_dims, n_tasks) kernel *= task_kernel # Take 3 times more samples than we have hyperparameters if n_hypers < 2 * len(kernel): n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 prior = MTBOPrior(len(kernel) + 1, n_ls=n_dims, n_kt=len(task_kernel), rng=rng) model_objective = MTBOGPMCMC(kernel, prior=prior, burnin_steps=burnin, chain_length=chain_length, n_hypers=n_hypers, lower=lower, upper=upper, rng=rng) acquisition_func = LogEI(model_objective) # Optimize acquisition function only on the main task def wrapper(x): x_ = np.append(x, np.ones([x.shape[0], 1]) * target_task_id, axis=1) if y.shape[0] == init_points: eta = 0 else: eta = np.min(y[init_points:]) a = acquisition_func(x_, eta=eta) return a maximizer = Direct(wrapper, lower, upper, n_func_evals=200) X = np.array(X) y = np.array(y) init_points = y.shape[0] for it in range(num_iterations): logger.info("Start iteration %d ... ", it) start_time = time.time() # Train models model_objective.train(X, y, do_optimize=True) # Maximize acquisition function acquisition_func.update(model_objective) new_x = maximizer.maximize() new_x = np.append(new_x, np.array([target_task_id])) time_overhead.append(time.time() - start_time) logger.info("Optimization overhead was %f seconds", time_overhead[-1]) # Evaluate the chosen configuration logger.info("Evaluate candidate %s", str(new_x)) start_time = time.time() new_y = objective_function(new_x[:-1], int(new_x[-1])) time_func_eval.append(time.time() - start_time) logger.info("Configuration achieved a performance of %f", new_y) logger.info("Evaluation of this configuration took %f seconds", time_func_eval[-1]) # Add new observation to the data X = np.concatenate((X, new_x[None, :]), axis=0) y = np.concatenate( (y, np.array([new_y])), axis=0) # Model the target function on a logarithmic scale # Estimate incumbent as the best observed value so far best_idx = np.argmin(y[init_points:]) + init_points incumbent = X[best_idx][:-1] incumbent_value = y[best_idx] incumbents.append(incumbent) incumbent_values.append(incumbent_value) logger.info("Current incumbent %s with estimated performance %f", str(incumbent), incumbent_value) runtime.append(time.time() - time_start) if output_path is not None: data = dict() data["optimization_overhead"] = time_overhead[it] data["runtime"] = runtime[it] data["incumbent"] = incumbents[it].tolist() data["time_func_eval"] = time_func_eval[it] data["iteration"] = it json.dump( data, open(os.path.join(output_path, "mtbo_iter_%d.json" % it), "w")) logger.info("Final incumbent %s with estimated performance %f", str(incumbent), incumbent_value) results = dict() results["x_opt"] = incumbent.tolist() results["incumbents"] = [inc.tolist() for inc in incumbents] results["runtime"] = runtime results["overhead"] = time_overhead results["time_func_eval"] = time_func_eval results["incumbent_values"] = incumbent_values results["X"] = X results["y"] = y return results
def bayesian_optimization(objective_function, lower, upper, num_iterations=30, maximizer="random", acquisition_func="log_ei", model_type="gp_mcmc", n_init=3, rng=None, output_path=None): """ General interface for Bayesian optimization for global black box optimization problems. Parameters ---------- objective_function: function The objective function that is minimized. This function gets a numpy array (D,) as input and returns the function value (scalar) lower: np.ndarray (D,) The lower bound of the search space upper: np.ndarray (D,) The upper bound of the search space num_iterations: int The number of iterations (initial design + BO) maximizer: {"direct", "cmaes", "random", "scipy"} The optimizer for the acquisition function. NOTE: "cmaes" only works in D > 1 dimensions acquisition_func: {"ei", "log_ei", "lcb", "pi"} The acquisition function model_type: {"gp", "gp_mcmc", "rf"} The model for the objective function. n_init: int Number of points for the initial design. Make sure that it is <= num_iterations. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert upper.shape[0] == lower.shape[0], "Dimension miss match" assert np.all(lower < upper), "Lower bound >= upper bound" assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 if model_type == "gp": model = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=False, normalize_input=True, lower=lower, upper=upper) elif model_type == "gp_mcmc": model = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=True, rng=rng, lower=lower, upper=upper) elif model_type == "rf": model = RandomForest(rng=rng) else: raise ValueError("'{}' is not a valid model".format(model_type)) if acquisition_func == "ei": a = EI(model) elif acquisition_func == "log_ei": a = LogEI(model) elif acquisition_func == "pi": a = PI(model) elif acquisition_func == "lcb": a = LCB(model) else: raise ValueError("'{}' is not a valid acquisition function".format( acquisition_func)) if model_type == "gp_mcmc": acquisition_func = MarginalizationGPMCMC(a) else: acquisition_func = a if maximizer == "cmaes": max_func = CMAES(acquisition_func, lower, upper, verbose=False, rng=rng) elif maximizer == "direct": max_func = Direct(acquisition_func, lower, upper, verbose=True) elif maximizer == "random": max_func = RandomSampling(acquisition_func, lower, upper, rng=rng) elif maximizer == "scipy": max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng) else: raise ValueError("'{}' is not a valid function to maximize the " "acquisition function".format(maximizer)) bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, model, max_func, initial_points=n_init, rng=rng, output_path=output_path) x_best, f_min = bo.run(num_iterations) results = dict() results["x_opt"] = x_best results["f_opt"] = f_min results["incumbents"] = [inc for inc in bo.incumbents] results["incumbent_values"] = [val for val in bo.incumbents_values] results["runtime"] = bo.runtime results["overhead"] = bo.time_overhead results["X"] = [x.tolist() for x in bo.X] results["y"] = [y for y in bo.y] return results
def bohamiann(objective_function, lower, upper, num_iterations=30, maximizer="random", acquisition_func="log_ei", n_init=3, output_path=None, rng=None): """ Bohamiann uses Bayesian neural networks to model the objective function [1] inside Bayesian optimization. Bayesian neural networks usually scale better with the number of function evaluations and the number of dimensions than Gaussian processes. [1] Bayesian optimization with robust Bayesian neural networks J. T. Springenberg and A. Klein and S. Falkner and F. Hutter Advances in Neural Information Processing Systems 29 Parameters ---------- objective_function: function The objective function that is minimized. This function gets a numpy array (D,) as input and returns the function value (scalar) lower: np.ndarray (D,) The lower bound of the search space upper: np.ndarray (D,) The upper bound of the search space num_iterations: int The number of iterations (initial design + BO) acquisition_func: {"ei", "log_ei", "lcb", "pi"} The acquisition function maximizer: {"direct", "cmaes", "random", "scipy"} The optimizer for the acquisition function. NOTE: "cmaes" only works in D > 1 dimensions n_init: int Number of points for the initial design. Make sure that it is <= num_iterations. output_path: string Specifies the path where the intermediate output after each iteration will be saved. If None no output will be saved to disk. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert upper.shape[0] == lower.shape[0] assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) model = BayesianNeuralNetwork(sampling_method="sghmc", l_rate=np.sqrt(1e-4), mdecay=0.05, burn_in=3000, n_iters=50000, precondition=True, normalize_input=True, normalize_output=True) if acquisition_func == "ei": a = EI(model) elif acquisition_func == "log_ei": a = LogEI(model) elif acquisition_func == "pi": a = PI(model) elif acquisition_func == "lcb": a = LCB(model) else: print("ERROR: %s is not a valid acquisition function!" % acquisition_func) return if maximizer == "cmaes": max_func = CMAES(a, lower, upper, verbose=True, rng=rng) elif maximizer == "direct": max_func = Direct(a, lower, upper, verbose=True) elif maximizer == "random": max_func = RandomSampling(a, lower, upper, rng=rng) elif maximizer == "scipy": max_func = SciPyOptimizer(a, lower, upper, rng=rng) bo = BayesianOptimization(objective_function, lower, upper, a, model, max_func, initial_points=n_init, output_path=output_path, rng=rng) x_best, f_min = bo.run(num_iterations) results = dict() results["x_opt"] = x_best results["f_opt"] = f_min results["incumbents"] = [inc for inc in bo.incumbents] results["incumbent_values"] = [val for val in bo.incumbents_values] results["runtime"] = bo.runtime results["overhead"] = bo.time_overhead results["X"] = [x.tolist() for x in bo.X] results["y"] = [y for y in bo.y] return results
def bayesian_optimization(objective_function, lower, upper, num_iterations=30, maximizer="direct", acquisition_func="log_ei", model="gp_mcmc", n_init=3, rng=None): """ General interface for Bayesian optimization for global black box optimization problems. Parameters ---------- objective_function: function The objective function that is minimized. This function gets a numpy array (D,) as input and returns the function value (scalar) lower: np.ndarray (D,) The lower bound of the search space upper: np.ndarray (D,) The upper bound of the search space num_iterations: int The number of iterations (initial design + BO) maximizer: {"direct", "cmaes"} Defines how the acquisition function is maximized. NOTE: "cmaes" only works in D > 1 dimensions acquisition_func: {"ei", "log_ei", "lcb", "pi"} The acquisition function model: {"gp", "gp_mcmc"} The model for the objective function. n_init: int Number of points for the initial design. Make sure that it is <= num_iterations. rng: numpy.random.RandomState Random number generator Returns ------- dict with all results """ assert upper.shape[0] == lower.shape[0] assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations" if rng is None: rng = np.random.RandomState(np.random.randint(0, 10000)) cov_amp = 2 n_dims = lower.shape[0] initial_ls = np.ones([n_dims]) exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims) kernel = cov_amp * exp_kernel prior = DefaultPrior(len(kernel) + 1) n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 if model == "gp": gp = GaussianProcess(kernel, prior=prior, rng=rng, normalize_output=True, normalize_input=True, lower=lower, upper=upper) elif model == "gp_mcmc": gp = GaussianProcessMCMC(kernel, prior=prior, n_hypers=n_hypers, chain_length=200, burnin_steps=100, normalize_input=True, normalize_output=True, rng=rng, lower=lower, upper=upper) else: print("ERROR: %s is not a valid model!" % model) return if acquisition_func == "ei": a = EI(gp) elif acquisition_func == "log_ei": a = LogEI(gp) elif acquisition_func == "pi": a = PI(gp) elif acquisition_func == "lcb": a = LCB(gp) else: print("ERROR: %s is not a valid acquisition function!" % acquisition_func) return if model == "gp": acquisition_func = a elif model == "gp_mcmc": acquisition_func = MarginalizationGPMCMC(a) if maximizer == "cmaes": max_func = CMAES(acquisition_func, lower, upper, verbose=False, rng=rng) elif maximizer == "direct": max_func = Direct(acquisition_func, lower, upper, verbose=False) else: print( "ERROR: %s is not a valid function to maximize the acquisition function!" % maximizer) return bo = BayesianOptimization(objective_function, lower, upper, acquisition_func, gp, max_func, initial_points=n_init, rng=rng) x_best, f_min = bo.run(num_iterations) results = dict() results["x_opt"] = x_best results["f_opt"] = f_min results["incumbents"] = [inc for inc in bo.incumbents] results["incumbent_values"] = [val for val in bo.incumbents_values] results["runtime"] = bo.runtime results["overhead"] = bo.time_overhead return results