Пример #1
0
    def suggest_sample(self, number_of_samples=1):
        """
        Returns a suggested next point to evaluate.
        """
        # Get current evaluations
        X_evaluated, Y_evaluated = self.model.get_XY()
        self.X_aux = X_evaluated
        self.Y_aux = Y_evaluated

        # Auxiliary Bayesian optimization model to run ParEGO
        weight = np.random.dirichlet(
            np.ones(len(self.Y_aux))
        )  # self._sample_posterior_weight_for_chebyshev_scalarization()
        self.Y_aux = np.reshape(self.Y_aux,
                                (len(self.Y_aux), self.Y_aux[0].shape[0]))
        scalarized_fX = chebyshev_scalarization(self.Y_aux, weight)
        scalarized_fX = np.reshape(scalarized_fX, (len(scalarized_fX), 1))

        aux_model = MultiOutputGP(output_dim=1,
                                  exact_feval=[True],
                                  fixed_hyps=False)

        def aux_utility_func(y, parameter):
            return np.dot(parameter, y)

        def aux_utility_gradient(y, parameter):
            return parameter

        aux_utility_parameter_support = np.ones((1, 1))
        aux_utility_parameter_prob_distribution = np.ones((1, ))
        aux_utility_param_distribution = UtilityDistribution(
            support=aux_utility_parameter_support,
            prob_dist=aux_utility_parameter_prob_distribution,
            utility_func=aux_utility_func)
        aux_utility = Utility(
            func=aux_utility_func,
            gradient=aux_utility_gradient,
            parameter_distribution=aux_utility_param_distribution,
            affine=True)

        aux_acquisition_optimizer = AcquisitionOptimizer(
            space=self.space,
            model=aux_model,
            utility=aux_utility,
            optimizer=self.optimizer,
            include_baseline_points=True)

        aux_acquisition = uEI_affine(aux_model,
                                     self.space,
                                     optimizer=aux_acquisition_optimizer,
                                     utility=aux_utility)
        aux_evaluator = GPyOpt.core.evaluators.Sequential(aux_acquisition)
        aux_sampling_policy = AcquisitionFunction(aux_model, self.space,
                                                  aux_acquisition,
                                                  aux_evaluator)
        bopl = BOPL(aux_model,
                    self.space,
                    sampling_policy=aux_sampling_policy,
                    utility=aux_utility,
                    X_init=self.X_aux,
                    Y_init=[scalarized_fX],
                    dynamic_utility_parameter_distribution=False)
        suggested_sample = bopl.suggest_next_points_to_evaluate()
        return suggested_sample
Пример #2
0
    def utility_func(y, parameter):
        return np.dot(parameter, y)

    def utility_gradient(y, parameter):
        return parameter

    def prior_sample_generator(n_samples=1, seed=None):
        if seed is None:
            samples = np.random.dirichlet(np.ones((m, )), n_samples)
        else:
            random_state = np.random.RandomState(seed)
            samples = random_state.dirichlet(np.ones((m, )), n_samples)
        return samples

    utility_param_distribution = UtilityDistribution(
        prior_sample_generator=prior_sample_generator,
        utility_func=utility_func,
        elicitation_strategy=random_preference_elicitation)
    utility = Utility(func=utility_func,
                      gradient=utility_gradient,
                      parameter_distribution=utility_param_distribution,
                      affine=True)

    # --- Sampling policy
    sampling_policy_name = 'uEI'
    learn_preferences = False
    if sampling_policy_name is 'uEI':
        model = MultiOutputGP(output_dim=m,
                              exact_feval=[True] * m,
                              fixed_hyps=False)  # Model (Multi-output GP)
        acquisition_optimizer = AcquisitionOptimizer(
            space=space,
Пример #3
0
    m = 1
    model = MultiOutputGP(output_dim=m, fixed_hyps=False)

    # Utility function
    def utility_func(y, parameter):
        return np.squeeze(y)


    def utility_gradient(y, parameter):
        return parameter


    utility_parameter_support = np.ones((1, 1))
    utility_parameter_prob_distribution = np.ones((1,))
    utility_param_distribution = UtilityDistribution(support=utility_parameter_support,
                                                     prob_dist=utility_parameter_prob_distribution,
                                                     utility_func=utility_func)
    dynamic_utility_parameter_distribution = False
    utility = Utility(func=utility_func, gradient=utility_gradient,
                      parameter_distribution=utility_param_distribution, affine=True)

    # --- Sampling policy
    sampling_policy_name = 'Random'
    if sampling_policy_name is 'uTS':
        sampling_policy = uTS(model, space, optimizer='CMA', utility=utility)
    elif sampling_policy_name is 'Random':
        sampling_policy = Random(model, space)

    # Attributes
    def f(X):
        X = np.atleast_2d(X)
Пример #4
0
        best_val_found = np.inf
        for x0 in starting_points:
            res = scipy.optimize.fmin_l_bfgs_b(marginal_func,
                                               x0,
                                               approx_grad=True,
                                               bounds=bounds)
            if best_val_found > res[1]:
                best_val_found = res[1]
                marginal_opt = res[0]
        X[j + m, :] = marginal_opt
    utility_parameter_support = f(X).T
    utility_parameter_prob_distribution = np.ones((2 * m, )) / (2 * m)
    utility_param_distribution = UtilityDistribution(
        support=utility_parameter_support,
        prob_dist=utility_parameter_prob_distribution,
        utility_func=utility_func,
        elicitation_strategy=random_preference_elicitation)

    # Expectation of utility
    def expectation_utility_func(mu, var, parameter):
        aux = (mu.transpose() - parameter).transpose()
        val = -np.sum(np.square(aux), axis=0) - np.sum(var, axis=0)
        return val

    def expectation_utility_gradient(mu, var, parameter):
        mu_aux = np.squeeze(mu)
        var_aux = np.squeeze(var)
        gradient = -np.concatenate(
            (2 * (mu_aux - parameter), np.ones((len(var_aux), ))))
        return gradient
Пример #5
0
    attributes = Attributes([f1, f2, f3], as_list=True, output_dim=m)
    
    # Utility function
    def utility_func(y, parameter):
        return np.dot(parameter, y)


    def utility_gradient(y, parameter):
        return parameter
    
        # Parameter distribution
    L = 3
    support = np.eye(L)
    prob_dist = np.ones((L,)) / L
    
    utility_parameter_distribution = UtilityDistribution(support=support, prob_dist=prob_dist, utility_func=utility_func, elicitation_strategy=None)
    utility = Utility(func=utility_func, gradient=utility_gradient, parameter_distribution=utility_parameter_distribution, affine=True)
    
    # --- Sampling policy
    sampling_policy_name = 'uTS'
    if sampling_policy_name is 'uEI':
        model = MultiOutputGP(output_dim=m, exact_feval=[True] * m, fixed_hyps=False)  # Model (Multi-output GP)
        acquisition_optimizer = AcquisitionOptimizer(space=space, model=model, utility=utility, optimizer='lbfgs', include_baseline_points=True)
        acquisition = uEI_affine(model, space, optimizer=acquisition_optimizer, utility=utility)
        evaluator = GPyOpt.core.evaluators.Sequential(acquisition)
        sampling_policy = AcquisitionFunction(model, space, acquisition, evaluator)
    elif sampling_policy_name is 'uTS':
        model = MultiOutputGP(output_dim=m, exact_feval=[True] * m, fixed_hyps=False)  # Model (Multi-output GP)
        sampling_policy = uTS(model, space, optimizer='CMA', utility=utility)
    elif sampling_policy_name is 'Random':
        model = BasicModel(output_dim=m)