def evaluator_creator(self, evaluator_type, acquisition, batch_size, model_type, model, space, acquisition_optimizer): """ Acquisition chooser from the available options. Guide the optimization through sequential or parallel evalutions of the objective. """ acquisition_transformation = self.kwargs.get( 'acquisition_transformation', 'none') if batch_size == 1 or evaluator_type == 'sequential': return Sequential(acquisition) elif batch_size > 1 and (evaluator_type == 'random' or evaluator_type is None): return RandomBatch(acquisition, batch_size) elif batch_size > 1 and evaluator_type == 'thompson_sampling': return ThompsonBatch(acquisition, batch_size) elif evaluator_type == 'local_penalization': if model_type not in ['GP', 'sparseGP', 'GP_MCMC', 'warpedGP']: raise InvalidConfigError( 'local_penalization evaluator can only be used with GP models' ) if not isinstance(acquisition, AcquisitionLP): acquisition_lp = AcquisitionLP(model, space, acquisition_optimizer, acquisition, acquisition_transformation) return LocalPenalization(acquisition_lp, batch_size)
def one_step_assesment(self, attribute=0, context=None): """ """ if self.objective is None: raise InvalidConfigError("Cannot run the optimization loop without the objective function") #self.model_parameters_iterations = None self.context = context # --- Initial function evaluation if self.X is not None and self.Y is None: self.Y, cost_values = self.objective.evaluate(self.X) if self.cost.cost_type == 'evaluation_time': self.cost.update_cost_model(self.X, cost_values) self._update_model() self.suggested_sample = self.compute_next_evaluations() model_to_plot = deepcopy(self.model) integrated_plot(self.acquisition.space.get_bounds(), self.X.shape[1], model_to_plot, self.X, self.Y, self.acquisition.acquisition_function, self.suggested_sample, attribute, None) self.X = np.vstack((self.X,self.suggested_sample)) self.evaluate_objective() self._update_model() self.historical_optimal_values.append(self._current_max_value())
def convergence_assesment(self, n_iter=10, attribute=0, context=None): if self.objective is None: raise InvalidConfigError( "Cannot run the optimization loop without the objective function" ) #self.model_parameters_iterations = None self.context = context # --- Initial function evaluation if self.X is not None and self.Y is None: self.Y, cost_values = self.objective.evaluate(self.X) if self.cost.cost_type == 'evaluation_time': self.cost.update_cost_model(self.X, cost_values) self._update_model() for i in range(n_iter): self.suggested_sample = self.compute_next_evaluations() filename = './experiments/1d' + str(i) + '.eps' model_to_plot = deepcopy(self.model) integrated_plot(self.acquisition.space.get_bounds(), self.X.shape[1], model_to_plot, self.X, self.Y, self.acquisition.acquisition_function, self.suggested_sample, attribute, filename) self.X = np.vstack((self.X, self.suggested_sample)) self.evaluate_objective() self._update_model() #self.model.get_model_parameters_names() #self.model.get_model_parameters() #print('Acquisition value at previously evaluated points:') #print(self.acquisition.acquisition_function(self.X)) #print('Posterior mean and variance') #print(self.model.predict(self.X)) #print(self.Y) self.historical_optimal_values.append(self._current_max_value())
def best_evaluated(self): if self.n_attributes > 1: raise InvalidConfigError("This option is not avialable with multiple objectives") else: scores = self.Y[0].flatten() x_best = self.X[np.argsort(-scores)[0],:] fx_best = -np.sort(-scores) return x_best, fx_best
def _init_design_chooser(self): """ Initializes the choice of X and Y based on the selected initial design and number of points selected. """ # If objective function was not provided, we require some initial sample data if self.f is None and (self.X is None or self.Y is None): raise InvalidConfigError( "Initial data for both X and Y is required when objective function is not provided") # Case 1: if self.X is None: self.X = initial_design( self.initial_design_type, self.space, self.initial_design_numdata) self.Y, _ = self.objective.evaluate(self.X) # Case 2 elif self.X is not None and self.Y is None: self.Y, _ = self.objective.evaluate(self.X)
def run_optimization(self, max_iter=1, max_time=np.inf, rep=None, last_step_evaluator=None, eps=1e-8, context=None, verbosity=False, path=None, evaluations_file=None): """ Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data) :param max_iter: exploration horizon, or number of acquisitions. If nothing is provided optimizes the current acquisition. :param max_time: maximum exploration horizon in seconds. :param eps: minimum distance between two consecutive x's to keep running the model. :param context: fixes specified variables to a particular context (values) for the optimization run (default, None). :param verbosity: flag to print the optimization results after each iteration (default, False). :param evaluations_file: filename of the file where the evaluated points and corresponding evaluations are saved (default, None). """ self.last_step_evaluator = last_step_evaluator if self.objective is None: raise InvalidConfigError( "Cannot run the optimization loop without the objective function" ) # --- Save the options to print and save the results self.verbosity = verbosity self.evaluations_file = evaluations_file self.context = context self.path = path self.rep = rep # --- Setting up stop conditions self.eps = eps if (max_iter is None) and (max_time is None): self.max_iter = 0 self.max_time = np.inf elif (max_iter is None) and (max_time is not None): self.max_iter = np.inf self.max_time = max_time elif (max_iter is not None) and (max_time is None): self.max_iter = max_iter self.max_time = np.inf else: self.max_iter = max_iter self.max_time = max_time # --- Initial function evaluation and model fitting if self.X is not None and self.Y is None: self.Y, cost_values = self.objective.evaluate(self.X) if self.constraint is not None: self.C, cost_values = self.constraint.evaluate(self.X) if self.cost.cost_type == 'evaluation_time': self.cost.update_cost_model(self.X, cost_values) #self.model.updateModel(self.X,self.Y) # --- Initialize iterations and running time self.time_zero = time.time() self.cum_time = 0 self.num_acquisitions = 0 self.suggested_sample = self.X self.Y_new = self.Y self.Opportunity_Cost = {"Hypervolume": np.array([])} value_so_far = [] # --- Initialize time cost of the evaluations print("MAIN LOOP STARTS") self.true_best_stats = { "true_best": [], "mean_gp": [], "std gp": [], "pf": [], "mu_pf": [], "var_pf": [], "residual_noise": [] } self._update_model() while (self.max_iter > self.num_acquisitions): self.optimize_final_evaluation() print("maKG optimizer") start = time.time() self.suggested_sample = self._compute_next_evaluations() print("self.suggested_sample", self.suggested_sample) if verbosity: if self.constraint is not None: self.verbosity_plot_2D_constrained() else: self.verbosity_plot_2D_unconstrained() # self.suggested_sample = np.array([[0.1,0.0]]) finish = time.time() print("time optimisation point X", finish - start) self.X = np.vstack((self.X, self.suggested_sample)) # --- Evaluate *f* in X, augment Y and update cost function (if needed) self.evaluate_objective() self._update_model() # --- Update current evaluation time and function evaluations self.cum_time = time.time() - self.time_zero self.num_acquisitions += 1 print("optimize_final_evaluation") print("num acquired samples Main Alg: ", self.num_acquisitions) print("self.X, self.Y, self.C , self.Opportunity_Cost", self.X, self.Y, self.C, self.Opportunity_Cost) return self.X, self.Y, self.C, self.Opportunity_Cost
def run_optimization(self, max_iter=1, parallel=False, plot=False, results_file=None, max_time=np.inf, eps=1e-8, context=None, verbosity=False): """ Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data) :param max_iter: exploration horizon, or number of acquisitions. If nothing is provided optimizes the current acquisition. :param max_time: maximum exploration horizon in seconds. :param eps: minimum distance between two consecutive x's to keep running the model. :param context: fixes specified variables to a particular context (values) for the optimization run (default, None). :param verbosity: flag to print the optimization results after each iteration (default, False). :param evaluations_file: filename of the file where the evaluated points and corresponding evaluations are saved (default, None). """ if self.objective is None: raise InvalidConfigError("Cannot run the optimization loop without the objective function") # --- Save the options to print and save the results self.verbosity = verbosity self.results_file = results_file self.context = context # --- Setting up stop conditions self.eps = eps if (max_iter is None) and (max_time is None): self.max_iter = 0 self.max_time = np.inf elif (max_iter is None) and (max_time is not None): self.max_iter = np.inf self.max_time = max_time elif (max_iter is not None) and (max_time is None): self.max_iter = max_iter self.max_time = np.inf else: self.max_iter = max_iter self.max_time = max_time # --- Initial function evaluation if self.X is not None and self.Y is None: self.Y, cost_values = self.objective.evaluate(self.X) if self.cost.cost_type == 'evaluation_time': self.cost.update_cost_model(self.X, cost_values) # --- Initialize model self.model.updateModel(self.X,self.Y) # --- Initialize iterations and running time self.time_zero = time.clock() self.cum_time = 0 self.num_acquisitions = 0 self.suggested_sample = self.X self.Y_new = self.Y # --- Initialize time cost of the evaluations while (self.max_time > self.cum_time) and (self.num_acquisitions < self.max_iter): #if not ((self.num_acquisitions < self.max_iter) and (self._distance_last_evaluations() > self.eps)): tmp = self.suggested_sample self.suggested_sample = self.compute_next_evaluations() if np.all(self.suggested_sample == tmp): self.suggested_sample = self._perturb(self.suggested_sample) try: self.acquisition.update_Z_samples() except: pass # --- Augment X self.X = np.vstack((self.X, self.suggested_sample)) # --- Evaluate *f* in X, augment Y and update cost function (if needed) print('Acquisition {}'.format(self.num_acquisitions+1)) self.evaluate_objective() # --- Update model if (self.num_acquisitions%self.model_update_interval)==0: self._update_model() self.model.get_model_parameters_names() self.model.get_model_parameters() if parallel and (not self.full_parameter_support): current_max_val = self._current_max_value_parallel() else: current_max_val = self._current_max_value() self.historical_optimal_values.append(current_max_val) # --- Update current evaluation time and function evaluations self.cum_time = time.clock() - self.time_zero self.historical_time.append(self.cum_time) self.num_acquisitions += 1 if verbosity: print("num acquisition: {}, time elapsed: {:.2f}s".format( self.num_acquisitions, self.cum_time)) if results_file is not None: self.save_results(results_file) if plot: self.plot_convergence(confidence_interval=True)
def run_optimization(self, max_iter=1, max_time=np.inf, eps=1e-8, context=None, verbosity=False, evaluations_file=None): """ Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data) :param max_iter: exploration horizon, or number of acquisitions. If nothing is provided optimizes the current acquisition. :param max_time: maximum exploration horizon in seconds. :param eps: minimum distance between two consecutive x's to keep running the model. :param context: fixes specified variables to a particular context (values) for the optimization run (default, None). :param verbosity: flag to print the optimization results after each iteration (default, False). :param evaluations_file: filename of the file where the evaluated points and corresponding evaluations are saved (default, None). """ self.verbosity = verbosity if self.objective is None: raise InvalidConfigError( "Cannot run the optimization loop without the objective function" ) # --- Save the options to print and save the results self.verbosity = verbosity self.evaluations_file = evaluations_file self.context = context # --- Setting up stop conditions self.eps = eps if (max_iter is None) and (max_time is None): self.max_iter = 0 self.max_time = np.inf elif (max_iter is None) and (max_time is not None): self.max_iter = np.inf self.max_time = max_time elif (max_iter is not None) and (max_time is None): self.max_iter = max_iter self.max_time = np.inf else: self.max_iter = max_iter self.max_time = max_time # print("------------------------TRAINING HYPERS----------------------") # self._get_hyperparameters() # --- Initial function evaluation and model fitting if self.X is not None and self.Y is None: self.Y, cost_values = self.objective.evaluate(self.X) if self.constraint is not None: self.C, cost_values = self.constraint.evaluate(self.X) if self.cost.cost_type == 'evaluation_time': self.cost.update_cost_model(self.X, cost_values) #self.model.updateModel(self.X,self.Y) # --- Initialize iterations and running time self.time_zero = time.time() self.cum_time = 0 self.num_acquisitions = 0 self.suggested_sample = self.X self.Y_new = self.Y self.Opportunity_Cost = [] value_so_far = [] # --- Initialize time cost of the evaluations print("-----------------------MAIN LOOP STARTS----------------------") Opportunity_Cost = [] self.true_best_stats = { "true_best": [], "mean_gp": [], "std gp": [], "pf": [], "mu_pf": [], "var_pf": [], "residual_noise": [] } while (self.max_iter > self.num_acquisitions): self._update_model() if self.constraint is None: self.Opportunity_Cost_caller_unconstrained() else: self.Opportunity_Cost_caller_constrained() print("maKG optimizer") start = time.time() self.suggested_sample = self._compute_next_evaluations() finish = time.time() print("time optimisation point X", finish - start) if verbosity: ####plots design_plot = initial_design('random', self.space, 1000) ac_f = self.expected_improvement(design_plot) Y, _ = self.objective.evaluate(design_plot) C, _ = self.constraint.evaluate(design_plot) pf = self.probability_feasibility_multi_gp( design_plot, self.model_c).reshape(-1, 1) mu_f = self.model.predict(design_plot)[0] bool_C = np.product(np.concatenate(C, axis=1) < 0, axis=1) func_val = Y * bool_C.reshape(-1, 1) print("self.suggested_sample", self.suggested_sample) fig, axs = plt.subplots(2, 2) axs[0, 0].set_title('True Function') axs[0, 0].scatter(design_plot[:, 0], design_plot[:, 1], c=np.array(func_val).reshape(-1)) axs[0, 0].scatter(self.X[:, 0], self.X[:, 1], color="red", label="sampled") axs[0, 0].scatter(self.suggested_sample[:, 0], self.suggested_sample[:, 1], marker="x", color="red", label="suggested") axs[0, 1].set_title('approximation Acqu Function') axs[0, 1].scatter(design_plot[:, 0], design_plot[:, 1], c=np.array(ac_f).reshape(-1)) axs[1, 0].set_title("convergence") axs[1, 0].plot(range(len(self.Opportunity_Cost)), np.array(self.Opportunity_Cost).reshape(-1)) axs[1, 0].set_yscale("log") axs[1, 1].set_title("mu") axs[1, 1].scatter(design_plot[:, 0], design_plot[:, 1], c=np.array(mu_f).reshape(-1) * np.array(pf).reshape(-1)) plt.show() self.X = np.vstack((self.X, self.suggested_sample)) # --- Evaluate *f* in X, augment Y and update cost function (if needed) self.evaluate_objective() print("X", self.X, "Y", self.Y, "C", self.C, "OC", self.Opportunity_Cost) # --- Update current evaluation time and function evaluations self.cum_time = time.time() - self.time_zero self.num_acquisitions += 1 return self.X, self.Y, self.C, self.Opportunity_Cost
def run_optimization(self, max_iter=0, max_time=np.inf, eps=1e-8, context=None, verbosity=False, save_models_parameters=True, report_file=None, evaluations_file=None, models_file=None): if self.objective is None: raise InvalidConfigError( "Cannot run the optimization loop without the objective function" ) # --- Save the options to print and save the results self.verbosity = verbosity self.save_models_parameters = save_models_parameters self.report_file = report_file self.evaluations_file = evaluations_file self.models_file = models_file self.model_parameters_iterations = None self.context = context # --- Check if we can save the model parameters in each iteration if self.save_models_parameters == True: if not (isinstance(self.model, GPyOpt.models.GPModel) or isinstance(self.model, GPyOpt.models.GPModel_MCMC)): print( 'Models printout after each iteration is only available for GP and GP_MCMC models' ) self.save_models_parameters = False # --- Setting up stop conditions self.eps = eps if (max_iter is None) and (max_time is None): self.max_iter = 0 self.max_time = np.inf elif (max_iter is None) and (max_time is not None): self.max_iter = np.inf self.max_time = max_time elif (max_iter is not None) and (max_time is None): self.max_iter = max_iter self.max_time = np.inf else: self.max_iter = max_iter self.max_time = max_time # --- Initial function evaluation and model fitting if self.X is not None and self.Y is None: self.Y, cost_values = self.objective.evaluate(self.X) if self.cost.cost_type == 'evaluation_time': self.cost.update_cost_model(self.X, cost_values) # --- Initialize iterations and running time self.time_zero = time.time() self.cum_time = 0 self.num_acquisitions = 0 self.suggested_sample = self.X self.Y_new = self.Y # --- Initialize time cost of the evaluations while (self.max_time > self.cum_time): print('.') # --- Update model try: self._update_model(self.normalization_type) except np.linalg.linalg.LinAlgError: break if (self.num_acquisitions >= self.max_iter or (len(self.X) > 1 and self._distance_last_evaluations() <= self.eps)): break self.suggested_sample = self._compute_next_evaluations() # --- Augment X self.X = np.vstack((self.X, self.suggested_sample)) # --- Evaluate *f* in X, augment Y and update cost function (if needed) self.evaluate_objective() # --- Update current evaluation time and function evaluations self.cum_time = time.time() - self.time_zero self.num_acquisitions += 1 if verbosity: print("num acquisition: {}, time elapsed: {:.2f}s".format( self.num_acquisitions, self.cum_time)) # --- Stop messages and execution time self._compute_results() # --- Print the desired result in files if self.report_file is not None: self.save_report(self.report_file) if self.evaluations_file is not None: self.save_evaluations(self.evaluations_file) if self.models_file is not None: self.save_models(self.models_file) self._save()