示例#1
0
 def test_lkj_covariance_prior_validate_args(self):
     sd_prior = SmoothedBoxPrior(exp(-1), exp(1), validate_args=True)
     LKJCovariancePrior(2, 1.0, sd_prior)
     with self.assertRaises(ValueError):
         LKJCovariancePrior(1.5, 1.0, sd_prior, validate_args=True)
     with self.assertRaises(ValueError):
         LKJCovariancePrior(2, -1.0, sd_prior, validate_args=True)
示例#2
0
    def test_lkj_covariance_prior_log_prob_hetsd(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        a = torch.tensor([exp(-1), exp(-2)], device=device)
        b = torch.tensor([exp(1), exp(2)], device=device)
        sd_prior = SmoothedBoxPrior(a, b, log_transform=True)
        prior = LKJCovariancePrior(2, torch.tensor(0.5, device=device),
                                   sd_prior)
        self.assertFalse(prior.log_transform)
        S = torch.eye(2, device=device)
        self.assertAlmostEqual(prior.log_prob(S).item(), -4.71958, places=4)
        S = torch.stack(
            [S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
        self.assertTrue(
            approx_equal(prior.log_prob(S),
                         torch.tensor([-4.71958, -4.57574], device=S.device)))
        with self.assertRaises(ValueError):
            prior.log_prob(torch.eye(3, device=device))

        # For eta=1.0 log_prob is flat over all covariance matrices
        prior = LKJCovariancePrior(2, torch.tensor(1.0, device=device),
                                   sd_prior)
        marginal_sd = torch.diagonal(S, dim1=-2, dim2=-1).sqrt()
        log_prob_expected = prior.correlation_prior.C + prior.sd_prior.log_prob(
            marginal_sd)
        self.assertTrue(approx_equal(prior.log_prob(S), log_prob_expected))
示例#3
0
    def test_lkj_prior_sample(self):
        prior = LKJCovariancePrior(2,
                                   0.5,
                                   sd_prior=SmoothedBoxPrior(exp(-1), exp(1)))
        random_samples = prior.sample(torch.Size((6, )))
        # need to check that these are positive semi-sefinite
        min_eval = torch.linalg.eigh(random_samples)[0].min()
        self.assertTrue(min_eval >= 0)
        # and that they are symmetric
        max_non_symm = (random_samples -
                        random_samples.transpose(-1, -2)).abs().max()
        self.assertLess(max_non_symm, 1e-4)

        self.assertEqual(random_samples.shape, torch.Size((6, 2, 2)))
示例#4
0
 def test_lkj_covariance_prior_to_gpu(self):
     if torch.cuda.is_available():
         sd_prior = SmoothedBoxPrior(exp(-1), exp(1))
         prior = LKJCovariancePrior(2, 1.0, sd_prior).cuda()
         self.assertEqual(prior.correlation_prior.eta.device.type, "cuda")
         self.assertEqual(prior.correlation_prior.C.device.type, "cuda")
         self.assertEqual(prior.sd_prior.a.device.type, "cuda")
示例#5
0
    def test_lkj_covariance_prior_batch_log_prob(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        v = torch.ones(2, 1, device=device)
        sd_prior = SmoothedBoxPrior(exp(-1) * v, exp(1) * v)
        prior = LKJCovariancePrior(2, torch.tensor([0.5, 1.5], device=device),
                                   sd_prior)
        corr_dist = LKJCholesky(2, torch.tensor([0.5, 1.5], device=device))

        S = torch.eye(2, device=device)
        dist_log_prob = corr_dist.log_prob(S) + sd_prior.log_prob(S.diag())
        self.assertLessEqual((prior.log_prob(S) - dist_log_prob).abs().sum(),
                             1e-4)

        S = torch.stack(
            [S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
        S_chol = torch.linalg.cholesky(S)
        dist_log_prob = corr_dist.log_prob(S_chol) + sd_prior.log_prob(
            torch.diagonal(S, dim1=-2, dim2=-1))
        self.assertLessEqual((prior.log_prob(S) - dist_log_prob).abs().sum(),
                             1e-4)
示例#6
0
    def test_lkj_covariance_prior_log_prob_hetsd(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        a = torch.tensor([exp(-1), exp(-2)], device=device)
        b = torch.tensor([exp(1), exp(2)], device=device)
        sd_prior = SmoothedBoxPrior(a, b)
        prior = LKJCovariancePrior(2, torch.tensor(0.5, device=device),
                                   sd_prior)
        corr_dist = LKJCholesky(2, torch.tensor(0.5, device=device))

        S = torch.eye(2, device=device)
        dist_log_prob = corr_dist.log_prob(S) + sd_prior.log_prob(
            S.diag()).sum()
        self.assertAlmostEqual(prior.log_prob(S), dist_log_prob, places=4)

        S = torch.stack(
            [S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
        S_chol = torch.linalg.cholesky(S)
        dist_log_prob = corr_dist.log_prob(S_chol) + sd_prior.log_prob(
            torch.diagonal(S, dim1=-2, dim2=-1))
        self.assertTrue(approx_equal(prior.log_prob(S), dist_log_prob))
 def __init__(self, train_x, train_y, likelihood):
     super(HadamardMultitaskGPModel, self).__init__(train_x, train_y,
                                                    likelihood)
     # Default bounds on mean are (-1e10, 1e10)
     self.mean_module = ConstantMean()
     # We use the very common RBF kernel
     self.covar_module = RBFKernel()
     # We learn an IndexKernel for 2 tasks
     # (so we'll actually learn 2x2=4 tasks with correlations)
     sd_prior = SmoothedBoxPrior(exp(-4), exp(4), log_transform=True)
     cov_prior = LKJCovariancePrior(n=2, eta=1, sd_prior=sd_prior)
     self.task_covar_module = IndexKernel(num_tasks=2,
                                          rank=1,
                                          prior=cov_prior)
示例#8
0
    def test_lkj_covariance_prior_batch_log_prob(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        v = torch.ones(2, 1, device=device)
        sd_prior = SmoothedBoxPrior(exp(-1) * v, exp(1) * v)
        prior = LKJCovariancePrior(2, torch.tensor([0.5, 1.5], device=device), sd_prior)

        S = torch.eye(2, device=device)
        self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-3.59981, -2.21351], device=S.device)))
        S = torch.stack([S, torch.tensor([[1.0, 0.5], [0.5, 1]], device=S.device)])
        self.assertTrue(approx_equal(prior.log_prob(S), torch.tensor([-3.59981, -2.35735], device=S.device)))
        with self.assertRaises(ValueError):
            prior.log_prob(torch.eye(3, device=device))