utility_param_distribution = UtilityDistribution( prior_sample_generator=prior_sample_generator, utility_func=utility_func, elicitation_strategy=random_preference_elicitation) utility = Utility(func=utility_func, gradient=utility_gradient, parameter_distribution=utility_param_distribution, affine=True) # --- Sampling policy sampling_policy_name = 'uEI' learn_preferences = False if sampling_policy_name is 'uEI': model = MultiOutputGP(output_dim=m, exact_feval=[True] * m, fixed_hyps=False) # Model (Multi-output GP) acquisition_optimizer = AcquisitionOptimizer( space=space, model=model, utility=utility, optimizer='lbfgs', include_baseline_points=True) acquisition = uEI_affine(model, space, optimizer=acquisition_optimizer, utility=utility) evaluator = GPyOpt.core.evaluators.Sequential(acquisition) sampling_policy = AcquisitionFunction(model, space, acquisition, evaluator) if learn_preferences:
def suggest_sample(self, number_of_samples=1): """ Returns a suggested next point to evaluate. """ # Get current evaluations X_evaluated, Y_evaluated = self.model.get_XY() self.X_aux = X_evaluated self.Y_aux = Y_evaluated # Auxiliary Bayesian optimization model to run ParEGO weight = np.random.dirichlet( np.ones(len(self.Y_aux)) ) # self._sample_posterior_weight_for_chebyshev_scalarization() self.Y_aux = np.reshape(self.Y_aux, (len(self.Y_aux), self.Y_aux[0].shape[0])) scalarized_fX = chebyshev_scalarization(self.Y_aux, weight) scalarized_fX = np.reshape(scalarized_fX, (len(scalarized_fX), 1)) aux_model = MultiOutputGP(output_dim=1, exact_feval=[True], fixed_hyps=False) def aux_utility_func(y, parameter): return np.dot(parameter, y) def aux_utility_gradient(y, parameter): return parameter aux_utility_parameter_support = np.ones((1, 1)) aux_utility_parameter_prob_distribution = np.ones((1, )) aux_utility_param_distribution = UtilityDistribution( support=aux_utility_parameter_support, prob_dist=aux_utility_parameter_prob_distribution, utility_func=aux_utility_func) aux_utility = Utility( func=aux_utility_func, gradient=aux_utility_gradient, parameter_distribution=aux_utility_param_distribution, affine=True) aux_acquisition_optimizer = AcquisitionOptimizer( space=self.space, model=aux_model, utility=aux_utility, optimizer=self.optimizer, include_baseline_points=True) aux_acquisition = uEI_affine(aux_model, self.space, optimizer=aux_acquisition_optimizer, utility=aux_utility) aux_evaluator = GPyOpt.core.evaluators.Sequential(aux_acquisition) aux_sampling_policy = AcquisitionFunction(aux_model, self.space, aux_acquisition, aux_evaluator) bopl = BOPL(aux_model, self.space, sampling_policy=aux_sampling_policy, utility=aux_utility, X_init=self.X_aux, Y_init=[scalarized_fX], dynamic_utility_parameter_distribution=False) suggested_sample = bopl.suggest_next_points_to_evaluate() return suggested_sample
from models import BasicModel from models import MultiOutputGP from sampling_policies import Random from sampling_policies import uTS from utility import Utility from utility import UtilityDistribution from optimization_services import AcquisitionOptimizer import aux_software.GPyOpt as GPyOpt # Space d = 2 space = GPyOpt.Design_space(space=[{'name': 'var', 'type': 'continuous', 'domain': (0, 1), 'dimensionality': d}]) # Model (Multi-output GP) m = 1 model = MultiOutputGP(output_dim=m, fixed_hyps=False) # Utility function def utility_func(y, parameter): return np.squeeze(y) def utility_gradient(y, parameter): return parameter utility_parameter_support = np.ones((1, 1)) utility_parameter_prob_distribution = np.ones((1,)) utility_param_distribution = UtilityDistribution(support=utility_parameter_support, prob_dist=utility_parameter_prob_distribution, utility_func=utility_func)