예제 #1
0
    def __init__(self, model: Union[IModel, IDifferentiable, IEntropySearchModel], cost_model: IModel,
                 space: ParameterSpace, sampler: McmcSampler = None, num_samples: int = 400,
                 num_representer_points: int = 50, proposal_function: Acquisition = None,
                 burn_in_steps: int = 50) -> None:

        """
        Entropy Search acquisition function approximates the distribution of the global
        minimum and tries to decrease its entropy. See this paper for more details:

        P. Hennig and C. J. Schuler
        Entropy search for information-efficient global optimization
        Journal of Machine Learning Research, 13, 2012

        :param model: GP model to compute the distribution of the minimum dubbed pmin.
        :param space: Domain space which we need for the sampling of the representer points
        :param sampler: mcmc sampler for representer points
        :param num_samples: integer determining how many samples to draw for each candidate input
        :param num_representer_points: integer determining how many representer points to sample
        :param proposal_function: Function that defines an unnormalized log proposal measure from which to sample the
        representer points. The default is expected improvement.
        :param burn_in_steps: integer that defines the number of burn-in steps when sampling the representer points
        """
        super().__init__()

        self.cost_model = cost_model
        self.es = EntropySearch(model, space, sampler,
                                num_samples, num_representer_points,
                                proposal_function, burn_in_steps)
예제 #2
0
def entropy_search_acquisition(gpy_model, continuous_space):
    sampler = AffineInvariantEnsembleSampler(continuous_space)
    return EntropySearch(gpy_model, continuous_space, sampler, num_representer_points=10)
예제 #3
0
    with_gradients = False

elif args.model_type == "gp":
    model = BOGP(X_init=X_init, Y_init=Y_init)

if args.acquisition_type == "ei":
    acquisition = ExpectedImprovement(model)
elif args.acquisition_type == "pi":
    acquisition = ProbabilityOfImprovement(model)
elif args.acquisition_type == "nlcb":
    acquisition = NegativeLowerConfidenceBound(model)
elif args.acquisition_type == "logei":
    acquisition = LogExpectedImprovement(model)
elif args.acquisition_type == "entropy_search":
    model = BOGP(X_init=X_init, Y_init=Y_init)
    acquisition = EntropySearch(model, space=space)


# if with_gradients:
#    acquisition_optimizer = AcquisitionOptimizer(space)
# else:
acquisition_optimizer = DirectOptimizer(space)

candidate_point_calculator = Sequential(acquisition, acquisition_optimizer)

bo = BayesianOptimizationLoop(model=model, space=space, X_init=X_init, Y_init=Y_init, acquisition=acquisition,
                              candidate_point_calculator=candidate_point_calculator)
bo.run_loop(user_function=obj, stopping_condition=FixedIterationsStoppingCondition(args.num_iterations))

curr_inc = np.inf
traj = []