Exemple #1
0
    def test_gen_candidates(self, gen_candidates=gen_candidates_scipy, options=None):
        options = options or {}
        options = {**options, "maxiter": 5}
        for double in (True, False):
            self._setUp(double=double)
            acqfs = [
                qExpectedImprovement(self.model, best_f=self.f_best),
                qKnowledgeGradient(
                    self.model, num_fantasies=4, current_value=self.f_best
                ),
            ]
            for acqf in acqfs:
                ics = self.initial_conditions
                if isinstance(acqf, qKnowledgeGradient):
                    ics = ics.repeat(5, 1)

                candidates, _ = gen_candidates(
                    initial_conditions=ics,
                    acquisition_function=acqf,
                    lower_bounds=0,
                    upper_bounds=1,
                    options=options or {},
                )
                if isinstance(acqf, qKnowledgeGradient):
                    candidates = acqf.extract_candidates(candidates)

                self.assertTrue(-EPS <= candidates <= 1 + EPS)
Exemple #2
0
def prepare_acquisition_function(args, model_obj, train_x, train_y, bounds, step):
    if args.num_steps > 500:
        sampler = IIDNormalSampler(num_samples=256)
    else:
        sampler = SobolQMCNormalSampler(num_samples=256)
    if args.acqf == "ei":
        acqf = qExpectedImprovement(
            model=model_obj, best_f=train_y.max(), sampler=sampler,
        )
    elif args.acqf == "ucb":
        acqf = qUpperConfidenceBound(model=model_obj, beta=0.9 ** step)
    elif args.acqf == "nei":
        acqf = qNoisyExpectedImprovement(
            model=model_obj, X_baseline=train_x, sampler=sampler
        )
    elif args.acqf == "kg":
        acqf = qKnowledgeGradient(
            model=model_obj,
            sampler=sampler,
            num_fantasies=None,
            current_value=train_y.max(),
        )
    elif args.acqf == "mves":
        candidate_set = torch.rand(10000, bounds.size(0), device=bounds.device)
        candidate_set = bounds[..., 0] + (bounds[..., 1] - bounds[..., 0]) * candidate_set
        acqf = qMaxValueEntropy(
            model=model_obj, candidate_set=candidate_set, train_inputs=train_x,
        )

    return acqf
Exemple #3
0
 def _maximize_kg(self) -> None:
     """
     maximizes the KG acquisition function and stores the resulting value and
     the candidate
     """
     acq_func = qKnowledgeGradient(
         model=self.model, current_value=self.current_best_val
     )
     # acq_func = qExpectedImprovement(model=self.model, best_f=self.current_best_val)
     # acq_func = ExpectedImprovement(model=self.model, best_f=self.current_best_val)
     self.next_candidate, self.kg_value = optimize_acqf(
         acq_func,
         Tensor([[0], [1]]).repeat(1, self.dim),
         q=1,
         num_restarts=self.num_restarts,
         raw_samples=self.raw_samples,
     )