Пример #1
0
 def test_affine_acquisition_objective(self):
     for batch_shape, m, dtype in itertools.product(
         ([], [3]), (1, 2), (torch.float, torch.double)
     ):
         offset = torch.rand(1).item()
         weights = torch.randn(m, device=self.device, dtype=dtype)
         obj = ScalarizedObjective(weights=weights, offset=offset)
         posterior = _get_test_posterior(
             batch_shape, m=m, device=self.device, dtype=dtype
         )
         mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
         new_posterior = obj(posterior)
         exp_size = torch.Size(batch_shape + [1, 1])
         self.assertEqual(new_posterior.mean.shape, exp_size)
         new_mean_exp = offset + mean @ weights
         self.assertTrue(torch.allclose(new_posterior.mean[..., -1], new_mean_exp))
         self.assertEqual(new_posterior.variance.shape, exp_size)
         new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1)
         self.assertTrue(
             torch.allclose(new_posterior.variance[..., -1], new_covar_exp)
         )
         # test error
         with self.assertRaises(ValueError):
             ScalarizedObjective(weights=torch.rand(2, m))
         # test evaluate
         Y = torch.rand(2, m, device=self.device, dtype=dtype)
         val = obj.evaluate(Y)
         val_expected = offset + Y @ weights
         self.assertTrue(torch.equal(val, val_expected))
Пример #2
0
    def test_scalarize_posterior(self):
        for batch_shape, m, lazy, dtype in itertools.product(
            ([], [3]), (1, 2), (False, True), (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            offset = torch.rand(1).item()
            weights = torch.randn(m, **tkwargs)
            # test q=1
            posterior = _get_test_posterior(batch_shape,
                                            m=m,
                                            lazy=lazy,
                                            **tkwargs)
            mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
            new_posterior = scalarize_posterior(posterior, weights, offset)
            exp_size = torch.Size(batch_shape + [1, 1])
            self.assertEqual(new_posterior.mean.shape, exp_size)
            new_mean_exp = offset + (mean @ weights).unsqueeze(-1)
            self.assertTrue(torch.allclose(new_posterior.mean, new_mean_exp))
            self.assertEqual(new_posterior.variance.shape, exp_size)
            new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1)
            self.assertTrue(
                torch.allclose(new_posterior.variance[..., -1], new_covar_exp))
            # test q=2, interleaved
            q = 2
            posterior = _get_test_posterior(batch_shape,
                                            q=q,
                                            m=m,
                                            lazy=lazy,
                                            interleaved=True,
                                            **tkwargs)
            mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
            new_posterior = scalarize_posterior(posterior, weights, offset)
            exp_size = torch.Size(batch_shape + [q, 1])
            self.assertEqual(new_posterior.mean.shape, exp_size)
            new_mean_exp = offset + (mean @ weights).unsqueeze(-1)
            self.assertTrue(torch.allclose(new_posterior.mean, new_mean_exp))
            self.assertEqual(new_posterior.variance.shape, exp_size)
            new_covar = new_posterior.mvn.covariance_matrix
            if m == 1:
                self.assertTrue(torch.allclose(new_covar, weights**2 * covar))
            else:
                w = weights.unsqueeze(0)
                covar00_exp = (w * covar[..., :m, :m] * w.t()).sum(-1).sum(-1)
                self.assertTrue(
                    torch.allclose(new_covar[..., 0, 0], covar00_exp))
                covarnn_exp = (w * covar[..., -m:, -m:] *
                               w.t()).sum(-1).sum(-1)
                self.assertTrue(
                    torch.allclose(new_covar[..., -1, -1], covarnn_exp))
            # test q=2, non-interleaved
            # test independent special case as well
            for independent in (False, True) if m > 1 else (False, ):
                posterior = _get_test_posterior(batch_shape,
                                                q=q,
                                                m=m,
                                                lazy=lazy,
                                                interleaved=False,
                                                independent=independent,
                                                **tkwargs)
                mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
                new_posterior = scalarize_posterior(posterior, weights, offset)
                exp_size = torch.Size(batch_shape + [q, 1])
                self.assertEqual(new_posterior.mean.shape, exp_size)
                new_mean_exp = offset + (mean @ weights).unsqueeze(-1)
                self.assertTrue(
                    torch.allclose(new_posterior.mean, new_mean_exp))
                self.assertEqual(new_posterior.variance.shape, exp_size)
                new_covar = new_posterior.mvn.covariance_matrix
                if m == 1:
                    self.assertTrue(
                        torch.allclose(new_covar, weights**2 * covar))
                else:
                    # construct the indices manually
                    cs = list(
                        itertools.combinations_with_replacement(range(m), 2))
                    idx_nlzd = torch.tensor(
                        list(set(cs + [tuple(i[::-1]) for i in cs])),
                        dtype=torch.long,
                        device=self.device,
                    )
                    w = weights[idx_nlzd[:, 0]] * weights[idx_nlzd[:, 1]]
                    idx = q * idx_nlzd
                    covar00_exp = (covar[..., idx[:, 0], idx[:, 1]] *
                                   w).sum(-1)
                    self.assertTrue(
                        torch.allclose(new_covar[..., 0, 0], covar00_exp))
                    idx_ = q - 1 + idx
                    covarnn_exp = (covar[..., idx_[:, 0], idx_[:, 1]] *
                                   w).sum(-1)
                    self.assertTrue(
                        torch.allclose(new_covar[..., -1, -1], covarnn_exp))

            # test errors
            with self.assertRaises(RuntimeError):
                scalarize_posterior(posterior, weights[:-1], offset)
            with self.assertRaises(BotorchTensorDimensionError):
                scalarize_posterior(posterior, weights.unsqueeze(0), offset)