Example #1
0
 def test_scalarize_posterior(self):
     for dtype in (torch.float, torch.double):
         offset = torch.rand(1).item()
         for batch_shape in ([], [3]):
             for o in (1, 2):
                 weights = torch.randn(o, device=self.device, dtype=dtype)
                 posterior = _get_test_posterior(batch_shape,
                                                 self.device,
                                                 dtype,
                                                 o=o)
                 mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
                 new_posterior = scalarize_posterior(
                     posterior, weights, offset)
                 exp_size = torch.Size(batch_shape + [1, 1])
                 self.assertEqual(new_posterior.mean.shape, exp_size)
                 new_mean_exp = offset + mean @ weights
                 self.assertTrue(
                     torch.allclose(new_posterior.mean[..., -1],
                                    new_mean_exp))
                 self.assertEqual(new_posterior.variance.shape, exp_size)
                 new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1)
                 self.assertTrue(
                     torch.allclose(new_posterior.variance[..., -1],
                                    new_covar_exp))
                 # test errors
                 with self.assertRaises(RuntimeError):
                     scalarize_posterior(posterior, weights[:-1], offset)
                 posterior2 = _get_test_posterior(batch_shape,
                                                  self.device,
                                                  dtype,
                                                  q=2,
                                                  o=o)
                 with self.assertRaises(UnsupportedError):
                     scalarize_posterior(posterior2, weights, offset)
Example #2
0
    def forward(self, posterior: GPyTorchPosterior) -> GPyTorchPosterior:
        r"""Compute the posterior of the affine transformation.

        Args:
            posterior: A posterior with the same number of outputs as the
                elements in `self.weights`.

        Returns:
            A single-output posterior.
        """
        return scalarize_posterior(posterior=posterior,
                                   weights=self.weights,
                                   offset=self.offset)
Example #3
0
 def test_scalarize_posterior(self):
     for batch_shape, m, dtype in itertools.product(
         ([], [3]), (1, 2), (torch.float, torch.double)):
         tkwargs = {"device": self.device, "dtype": dtype}
         offset = torch.rand(1).item()
         weights = torch.randn(m, **tkwargs)
         posterior = _get_test_posterior(batch_shape, m=m, **tkwargs)
         mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
         new_posterior = scalarize_posterior(posterior, weights, offset)
         exp_size = torch.Size(batch_shape + [1, 1])
         self.assertEqual(new_posterior.mean.shape, exp_size)
         new_mean_exp = offset + mean @ weights
         self.assertTrue(
             torch.allclose(new_posterior.mean[..., -1], new_mean_exp))
         self.assertEqual(new_posterior.variance.shape, exp_size)
         new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1)
         self.assertTrue(
             torch.allclose(new_posterior.variance[..., -1], new_covar_exp))
         # test errors
         with self.assertRaises(RuntimeError):
             scalarize_posterior(posterior, weights[:-1], offset)
         posterior2 = _get_test_posterior(batch_shape, q=2, m=m, **tkwargs)
         with self.assertRaises(UnsupportedError):
             scalarize_posterior(posterior2, weights, offset)
Example #4
0
    def test_scalarize_posterior(self):
        for batch_shape, m, lazy, dtype in itertools.product(
            ([], [3]), (1, 2), (False, True), (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            offset = torch.rand(1).item()
            weights = torch.randn(m, **tkwargs)
            # test q=1
            posterior = _get_test_posterior(batch_shape,
                                            m=m,
                                            lazy=lazy,
                                            **tkwargs)
            mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
            new_posterior = scalarize_posterior(posterior, weights, offset)
            exp_size = torch.Size(batch_shape + [1, 1])
            self.assertEqual(new_posterior.mean.shape, exp_size)
            new_mean_exp = offset + (mean @ weights).unsqueeze(-1)
            self.assertTrue(torch.allclose(new_posterior.mean, new_mean_exp))
            self.assertEqual(new_posterior.variance.shape, exp_size)
            new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1)
            self.assertTrue(
                torch.allclose(new_posterior.variance[..., -1], new_covar_exp))
            # test q=2, interleaved
            q = 2
            posterior = _get_test_posterior(batch_shape,
                                            q=q,
                                            m=m,
                                            lazy=lazy,
                                            interleaved=True,
                                            **tkwargs)
            mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
            new_posterior = scalarize_posterior(posterior, weights, offset)
            exp_size = torch.Size(batch_shape + [q, 1])
            self.assertEqual(new_posterior.mean.shape, exp_size)
            new_mean_exp = offset + (mean @ weights).unsqueeze(-1)
            self.assertTrue(torch.allclose(new_posterior.mean, new_mean_exp))
            self.assertEqual(new_posterior.variance.shape, exp_size)
            new_covar = new_posterior.mvn.covariance_matrix
            if m == 1:
                self.assertTrue(torch.allclose(new_covar, weights**2 * covar))
            else:
                w = weights.unsqueeze(0)
                covar00_exp = (w * covar[..., :m, :m] * w.t()).sum(-1).sum(-1)
                self.assertTrue(
                    torch.allclose(new_covar[..., 0, 0], covar00_exp))
                covarnn_exp = (w * covar[..., -m:, -m:] *
                               w.t()).sum(-1).sum(-1)
                self.assertTrue(
                    torch.allclose(new_covar[..., -1, -1], covarnn_exp))
            # test q=2, non-interleaved
            # test independent special case as well
            for independent in (False, True) if m > 1 else (False, ):
                posterior = _get_test_posterior(batch_shape,
                                                q=q,
                                                m=m,
                                                lazy=lazy,
                                                interleaved=False,
                                                independent=independent,
                                                **tkwargs)
                mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
                new_posterior = scalarize_posterior(posterior, weights, offset)
                exp_size = torch.Size(batch_shape + [q, 1])
                self.assertEqual(new_posterior.mean.shape, exp_size)
                new_mean_exp = offset + (mean @ weights).unsqueeze(-1)
                self.assertTrue(
                    torch.allclose(new_posterior.mean, new_mean_exp))
                self.assertEqual(new_posterior.variance.shape, exp_size)
                new_covar = new_posterior.mvn.covariance_matrix
                if m == 1:
                    self.assertTrue(
                        torch.allclose(new_covar, weights**2 * covar))
                else:
                    # construct the indices manually
                    cs = list(
                        itertools.combinations_with_replacement(range(m), 2))
                    idx_nlzd = torch.tensor(
                        list(set(cs + [tuple(i[::-1]) for i in cs])),
                        dtype=torch.long,
                        device=self.device,
                    )
                    w = weights[idx_nlzd[:, 0]] * weights[idx_nlzd[:, 1]]
                    idx = q * idx_nlzd
                    covar00_exp = (covar[..., idx[:, 0], idx[:, 1]] *
                                   w).sum(-1)
                    self.assertTrue(
                        torch.allclose(new_covar[..., 0, 0], covar00_exp))
                    idx_ = q - 1 + idx
                    covarnn_exp = (covar[..., idx_[:, 0], idx_[:, 1]] *
                                   w).sum(-1)
                    self.assertTrue(
                        torch.allclose(new_covar[..., -1, -1], covarnn_exp))

            # test errors
            with self.assertRaises(RuntimeError):
                scalarize_posterior(posterior, weights[:-1], offset)
            with self.assertRaises(BotorchTensorDimensionError):
                scalarize_posterior(posterior, weights.unsqueeze(0), offset)