Exemple #1
0
        elicitation_strategy=random_preference_elicitation)
    utility = Utility(func=utility_func,
                      gradient=utility_gradient,
                      parameter_distribution=utility_param_distribution,
                      affine=True)

    # --- Sampling policy
    sampling_policy_name = 'uEI'
    learn_preferences = False
    if sampling_policy_name is 'uEI':
        model = MultiOutputGP(output_dim=m,
                              exact_feval=[True] * m,
                              fixed_hyps=False)  # Model (Multi-output GP)
        acquisition_optimizer = AcquisitionOptimizer(
            space=space,
            model=model,
            utility=utility,
            optimizer='lbfgs',
            include_baseline_points=True)
        acquisition = uEI_affine(model,
                                 space,
                                 optimizer=acquisition_optimizer,
                                 utility=utility)
        evaluator = GPyOpt.core.evaluators.Sequential(acquisition)
        sampling_policy = AcquisitionFunction(model, space, acquisition,
                                              evaluator)
        if learn_preferences:
            dynamic_utility_parameter_distribution = True
        else:
            dynamic_utility_parameter_distribution = False
            sampling_policy_name = 'uEI_prior'
    elif sampling_policy_name is 'uTS':
Exemple #2
0
    def suggest_sample(self, number_of_samples=1):
        """
        Returns a suggested next point to evaluate.
        """
        # Get current evaluations
        X_evaluated, Y_evaluated = self.model.get_XY()
        self.X_aux = X_evaluated
        self.Y_aux = Y_evaluated

        # Auxiliary Bayesian optimization model to run ParEGO
        weight = np.random.dirichlet(
            np.ones(len(self.Y_aux))
        )  # self._sample_posterior_weight_for_chebyshev_scalarization()
        self.Y_aux = np.reshape(self.Y_aux,
                                (len(self.Y_aux), self.Y_aux[0].shape[0]))
        scalarized_fX = chebyshev_scalarization(self.Y_aux, weight)
        scalarized_fX = np.reshape(scalarized_fX, (len(scalarized_fX), 1))

        aux_model = MultiOutputGP(output_dim=1,
                                  exact_feval=[True],
                                  fixed_hyps=False)

        def aux_utility_func(y, parameter):
            return np.dot(parameter, y)

        def aux_utility_gradient(y, parameter):
            return parameter

        aux_utility_parameter_support = np.ones((1, 1))
        aux_utility_parameter_prob_distribution = np.ones((1, ))
        aux_utility_param_distribution = UtilityDistribution(
            support=aux_utility_parameter_support,
            prob_dist=aux_utility_parameter_prob_distribution,
            utility_func=aux_utility_func)
        aux_utility = Utility(
            func=aux_utility_func,
            gradient=aux_utility_gradient,
            parameter_distribution=aux_utility_param_distribution,
            affine=True)

        aux_acquisition_optimizer = AcquisitionOptimizer(
            space=self.space,
            model=aux_model,
            utility=aux_utility,
            optimizer=self.optimizer,
            include_baseline_points=True)

        aux_acquisition = uEI_affine(aux_model,
                                     self.space,
                                     optimizer=aux_acquisition_optimizer,
                                     utility=aux_utility)
        aux_evaluator = GPyOpt.core.evaluators.Sequential(aux_acquisition)
        aux_sampling_policy = AcquisitionFunction(aux_model, self.space,
                                                  aux_acquisition,
                                                  aux_evaluator)
        bopl = BOPL(aux_model,
                    self.space,
                    sampling_policy=aux_sampling_policy,
                    utility=aux_utility,
                    X_init=self.X_aux,
                    Y_init=[scalarized_fX],
                    dynamic_utility_parameter_distribution=False)
        suggested_sample = bopl.suggest_next_points_to_evaluate()
        return suggested_sample
Exemple #3
0
                      parameter_distribution=utility_param_distribution,
                      expectation=expectation_utility)

    # --- Sampling policy
    sampling_policy_name = 'Random'
    if sampling_policy_name is 'uEI':
        # Model (Multi-output GP)
        model = MultiOutputGP(output_dim=m,
                              exact_feval=[True] * m,
                              fixed_hyps=False)

        # Sampling policy
        acquisition_optimizer = AcquisitionOptimizer(
            space=space,
            model=model,
            utility=utility,
            expectation_utility=expectation_utility,
            optimizer='lbfgs',
            inner_optimizer='lbfgs')

        acquisition = uEI(model,
                          space,
                          optimizer=acquisition_optimizer,
                          utility=utility)
        evaluator = GPyOpt.core.evaluators.Sequential(acquisition)
        sampling_policy = AcquisitionFunction(model, space, acquisition,
                                              evaluator)
    elif sampling_policy_name is 'TS':
        model = MultiOutputGP(output_dim=m,
                              exact_feval=[True] * m,
                              fixed_hyps=False)  # Model (Multi-output GP)