Пример #1
0
 def test_probability_of_improvement_batch(self):
     for dtype in (torch.float, torch.double):
         mean = torch.tensor([0.0, 0.67449], device=self.device, dtype=dtype).view(
             2, 1, 1
         )
         variance = torch.ones_like(mean)
         mm = MockModel(MockPosterior(mean=mean, variance=variance))
         module = ProbabilityOfImprovement(model=mm, best_f=0.0)
         X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype)
         pi = module(X)
         pi_expected = torch.tensor([0.5, 0.75], device=self.device, dtype=dtype)
         self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))
         # check for proper error if multi-output model
         mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
         variance2 = torch.ones_like(mean2)
         mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
         with self.assertRaises(UnsupportedError):
             ProbabilityOfImprovement(model=mm2, best_f=0.0)
Пример #2
0
    def test_acquisition_functions(self):
        tkwargs = {"device": self.device, "dtype": torch.double}
        train_X, train_Y, train_Yvar, model = self._get_data_and_model(
            infer_noise=True, **tkwargs
        )
        fit_fully_bayesian_model_nuts(
            model, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
        )
        sampler = IIDNormalSampler(num_samples=2)
        acquisition_functions = [
            ExpectedImprovement(model=model, best_f=train_Y.max()),
            ProbabilityOfImprovement(model=model, best_f=train_Y.max()),
            PosteriorMean(model=model),
            UpperConfidenceBound(model=model, beta=4),
            qExpectedImprovement(model=model, best_f=train_Y.max(), sampler=sampler),
            qNoisyExpectedImprovement(model=model, X_baseline=train_X, sampler=sampler),
            qProbabilityOfImprovement(
                model=model, best_f=train_Y.max(), sampler=sampler
            ),
            qSimpleRegret(model=model, sampler=sampler),
            qUpperConfidenceBound(model=model, beta=4, sampler=sampler),
            qNoisyExpectedHypervolumeImprovement(
                model=ModelListGP(model, model),
                X_baseline=train_X,
                ref_point=torch.zeros(2, **tkwargs),
                sampler=sampler,
            ),
            qExpectedHypervolumeImprovement(
                model=ModelListGP(model, model),
                ref_point=torch.zeros(2, **tkwargs),
                sampler=sampler,
                partitioning=NondominatedPartitioning(
                    ref_point=torch.zeros(2, **tkwargs), Y=train_Y.repeat([1, 2])
                ),
            ),
        ]

        for acqf in acquisition_functions:
            for batch_shape in [[5], [6, 5, 2]]:
                test_X = torch.rand(*batch_shape, 1, 4, **tkwargs)
                self.assertEqual(acqf(test_X).shape, torch.Size(batch_shape))