Exemple #1
0
    def test_expected_improvement_batch(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([-0.5, 0.0, 0.5], device=device, dtype=dtype).view(
                3, 1, 1
            )
            variance = torch.ones(3, 1, 1, device=device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))
            module = ExpectedImprovement(model=mm, best_f=0.0)
            X = torch.empty(3, 1, 1, device=device, dtype=dtype)  # dummy
            ei = module(X)
            ei_expected = torch.tensor(
                [0.19780, 0.39894, 0.69780], device=device, dtype=dtype
            )
            self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))
            # check for proper error if multi-output model
            mean2 = torch.rand(3, 1, 2, device=device, dtype=dtype)
            variance2 = torch.rand(3, 1, 2, device=device, dtype=dtype)
            mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
            module2 = ExpectedImprovement(model=mm2, best_f=0.0)
            with self.assertRaises(UnsupportedError):
                module2(X)

            # test objective (single-output)
            mean = torch.tensor([[[0.5]], [[0.25]]], device=device, dtype=dtype)
            covar = torch.tensor([[[[0.16]]], [[[0.125]]]], device=device, dtype=dtype)
            mvn = MultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([0.5], device=device, dtype=dtype)
            obj = ScalarizedObjective(weights)
            ei = ExpectedImprovement(model=mm, best_f=0.0, objective=obj)
            X = torch.rand(2, 1, 2, device=device, dtype=dtype)
            ei_expected = torch.tensor([[0.2601], [0.1500]], device=device, dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)

            # test objective (multi-output)
            mean = torch.tensor(
                [[[-0.25, 0.5]], [[0.2, -0.1]]], device=device, dtype=dtype
            )
            covar = torch.tensor(
                [[[0.5, 0.125], [0.125, 0.5]], [[0.25, -0.1], [-0.1, 0.25]]],
                device=device,
                dtype=dtype,
            )
            mvn = MultitaskMultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([2.0, 1.0], device=device, dtype=dtype)
            obj = ScalarizedObjective(weights)
            ei = ExpectedImprovement(model=mm, best_f=0.0, objective=obj)
            X = torch.rand(2, 1, 2, device=device, dtype=dtype)
            ei_expected = torch.tensor([0.6910, 0.5371], device=device, dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)

        # test bad objective class
        with self.assertRaises(UnsupportedError):
            ExpectedImprovement(model=mm, best_f=0.0, objective=IdentityMCObjective())
Exemple #2
0
    def test_expected_improvement(self):
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([[-0.5]], device=self.device, dtype=dtype)
            variance = torch.ones(1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))

            # basic test
            module = ExpectedImprovement(model=mm, best_f=0.0)
            X = torch.empty(1, 1, device=self.device, dtype=dtype)  # dummy
            ei = module(X)
            ei_expected = torch.tensor(0.19780,
                                       device=self.device,
                                       dtype=dtype)
            self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))

            # test maximize
            module = ExpectedImprovement(model=mm, best_f=0.0, maximize=False)
            X = torch.empty(1, 1, device=self.device, dtype=dtype)  # dummy
            ei = module(X)
            ei_expected = torch.tensor(0.6978, device=self.device, dtype=dtype)
            self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))
            with self.assertRaises(UnsupportedError):
                module.set_X_pending(None)

            # test posterior transform (single-output)
            mean = torch.tensor([0.5], device=self.device, dtype=dtype)
            covar = torch.tensor([[0.16]], device=self.device, dtype=dtype)
            mvn = MultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([0.5], device=self.device, dtype=dtype)
            transform = ScalarizedPosteriorTransform(weights)
            ei = ExpectedImprovement(model=mm,
                                     best_f=0.0,
                                     posterior_transform=transform)
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            ei_expected = torch.tensor(0.2601, device=self.device, dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)

            # test posterior transform (multi-output)
            mean = torch.tensor([[-0.25, 0.5]],
                                device=self.device,
                                dtype=dtype)
            covar = torch.tensor([[[0.5, 0.125], [0.125, 0.5]]],
                                 device=self.device,
                                 dtype=dtype)
            mvn = MultitaskMultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([2.0, 1.0], device=self.device, dtype=dtype)
            transform = ScalarizedPosteriorTransform(weights)
            ei = ExpectedImprovement(model=mm,
                                     best_f=0.0,
                                     posterior_transform=transform)
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            ei_expected = torch.tensor(0.6910, device=self.device, dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)
Exemple #3
0
    def untransform_posterior(self, posterior: Posterior) -> Posterior:
        r"""Un-standardize the posterior.

        Args:
            posterior: A posterior in the standardized space.

        Returns:
            The un-standardized posterior. If the input posterior is a MVN,
            the transformed posterior is again an MVN.
        """
        if self._outputs is not None:
            raise NotImplementedError(
                "Standardize does not yet support output selection for "
                "untransform_posterior"
            )
        if not self._m == posterior.event_shape[-1]:
            raise RuntimeError(
                "Incompatible output dimensions encountered for transform "
                f"{self._m} and posterior {posterior.event_shape[-1]}"
            )
        if not isinstance(posterior, GPyTorchPosterior):
            # fall back to TransformedPosterior
            return TransformedPosterior(
                posterior=posterior,
                sample_transform=lambda s: self.means + self.stdvs * s,
                mean_transform=lambda m, v: self.means + self.stdvs * m,
                variance_transform=lambda m, v: self._stdvs_sq * v,
            )
        # GPyTorchPosterior (TODO: Should we Lazy-evaluate the mean here as well?)
        mvn = posterior.mvn
        offset = self.means
        scale_fac = self.stdvs
        if not posterior._is_mt:
            mean_tf = offset.squeeze(-1) + scale_fac.squeeze(-1) * mvn.mean
            scale_fac = scale_fac.squeeze(-1).expand_as(mean_tf)
        else:
            mean_tf = offset + scale_fac * mvn.mean
            reps = mean_tf.shape[-2:].numel() // scale_fac.size(-1)
            scale_fac = scale_fac.squeeze(-2)
            if mvn._interleaved:
                scale_fac = scale_fac.repeat(*[1 for _ in scale_fac.shape[:-1]], reps)
            else:
                scale_fac = torch.repeat_interleave(scale_fac, reps, dim=-1)

        if (
            not mvn.islazy
            # TODO: Figure out attribute namming weirdness here
            or mvn._MultivariateNormal__unbroadcasted_scale_tril is not None
        ):
            # if already computed, we can save a lot of time using scale_tril
            covar_tf = CholLazyTensor(mvn.scale_tril * scale_fac.unsqueeze(-1))
        else:
            lcv = mvn.lazy_covariance_matrix
            # allow batch-evaluation of the model
            scale_mat = DiagLazyTensor(scale_fac.expand(lcv.shape[:-1]))
            covar_tf = scale_mat @ lcv @ scale_mat

        kwargs = {"interleaved": mvn._interleaved} if posterior._is_mt else {}
        mvn_tf = mvn.__class__(mean=mean_tf, covariance_matrix=covar_tf, **kwargs)
        return GPyTorchPosterior(mvn_tf)
Exemple #4
0
def _get_test_posterior(device, n=3, dtype=torch.float, batched=False):
    mean = torch.zeros(n, device=device, dtype=dtype)
    cov = torch.eye(n, device=device, dtype=dtype)
    if batched:
        cov = cov.repeat(3, 1, 1)
    mvn = MultivariateNormal(mean, cov)
    return GPyTorchPosterior(mvn)
Exemple #5
0
 def test_unsupported_dimension(self):
     sampler = SobolQMCNormalSampler(num_samples=2)
     mean = torch.zeros(1112)
     cov = DiagLazyTensor(torch.ones(1112))
     mvn = MultivariateNormal(mean, cov)
     posterior = GPyTorchPosterior(mvn)
     with self.assertRaises(UnsupportedError) as e:
         sampler(posterior)
         self.assertIn("Requested: 1112", str(e.exception))
Exemple #6
0
 def posterior(self, X: Tensor, observation_noise: bool = False) -> MockPosterior:
     m_shape = X.shape[:-1]
     r_shape = list(X.shape[:-2]) + [1, 1]
     mvn = MultivariateNormal(
         mean=torch.zeros(m_shape, dtype=X.dtype, device=X.device),
         covariance_matrix=torch.eye(
             m_shape[-1], dtype=X.dtype, device=X.device
         ).repeat(r_shape),
     )
     return GPyTorchPosterior(mvn)
Exemple #7
0
 def test_unsupported_dimension(self):
     sampler = SobolQMCNormalSampler(num_samples=2)
     maxdim = torch.quasirandom.SobolEngine.MAXDIM + 1
     mean = torch.zeros(maxdim)
     cov = DiagLazyTensor(torch.ones(maxdim))
     mvn = MultivariateNormal(mean, cov)
     posterior = GPyTorchPosterior(mvn)
     with self.assertRaises(UnsupportedError) as e:
         sampler(posterior)
         self.assertIn(f"Requested: {maxdim}", str(e.exception))
Exemple #8
0
def _get_test_posterior(shape, device, dtype, interleaved=True, lazy=False):
    mean = torch.rand(shape, device=device, dtype=dtype)
    n_covar = shape[-2:].numel()
    diag = torch.rand(shape, device=device, dtype=dtype)
    diag = diag.view(*diag.shape[:-2], n_covar)
    a = torch.rand(*shape[:-2], n_covar, n_covar, device=device, dtype=dtype)
    covar = a @ a.transpose(-1, -2) + torch.diag_embed(diag)
    if lazy:
        covar = NonLazyTensor(covar)
    if shape[-1] == 1:
        mvn = MultivariateNormal(mean.squeeze(-1), covar)
    else:
        mvn = MultitaskMultivariateNormal(mean, covar, interleaved=interleaved)
    return GPyTorchPosterior(mvn)
 def posterior(
     self,
     X: Tensor,
     observation_noise: bool = False,
     posterior_transform: Optional[PosteriorTransform] = None,
 ) -> MockPosterior:
     m_shape = X.shape[:-1]
     r_shape = list(X.shape[:-2]) + [1, 1]
     mvn = MultivariateNormal(
         mean=torch.zeros(m_shape, dtype=X.dtype, device=X.device),
         covariance_matrix=torch.eye(m_shape[-1],
                                     dtype=X.dtype,
                                     device=X.device).repeat(r_shape),
     )
     if self.num_outputs > 1:
         mvn = mvn = MultitaskMultivariateNormal.from_independent_mvns(
             mvns=[mvn] * self.num_outputs)
     posterior = GPyTorchPosterior(mvn)
     if posterior_transform is not None:
         return posterior_transform(posterior)
     return posterior
Exemple #10
0
def _get_posterior_batched(cuda=False, dtype=torch.float):
    device = torch.device("cuda") if cuda else torch.device("cpu")
    mean = torch.zeros(3, 2, device=device, dtype=dtype)
    cov = torch.eye(2, device=device, dtype=dtype).repeat(3, 1, 1)
    mvn = MultivariateNormal(mean, cov)
    return GPyTorchPosterior(mvn)
    def posterior(self, X, observation_noise=False, **kwargs):
        self.eval()
        X = X.to(self.likelihood.noise.dtype)
        mvn = self(X)

        return GPyTorchPosterior(mvn)
Exemple #12
0
def _get_posterior_batched(device, dtype=torch.float):
    mean = torch.zeros(3, 2, device=device, dtype=dtype)
    cov = torch.eye(2, device=device, dtype=dtype).repeat(3, 1, 1)
    mvn = MultivariateNormal(mean, cov)
    return GPyTorchPosterior(mvn)
Exemple #13
0
def _get_posterior(device, dtype=torch.float):
    mean = torch.zeros(2, device=device, dtype=dtype)
    cov = torch.eye(2, device=device, dtype=dtype)
    mvn = MultivariateNormal(mean, cov)
    return GPyTorchPosterior(mvn)
Exemple #14
0
 def _make_gpytorch_posterior(self, shape, dtype):
     mean = torch.rand(*shape, dtype=dtype, device=self.device)
     variance = 1 + torch.rand(*shape, dtype=dtype, device=self.device)
     covar = torch.diag_embed(variance)
     mvn = MultivariateNormal(mean, lazify(covar))
     return GPyTorchPosterior(mvn=mvn)