def test_matmul(self):
        # Forward
        res = CholLazyVariable(self.chol_var).matmul(self.vecs)
        actual = self.actual_mat.matmul(self.vecs_copy)
        self.assertTrue(approx_equal(res, actual))

        # Backward
        grad_output = torch.randn(*self.vecs.size())
        res.backward(gradient=grad_output)
        actual.backward(gradient=grad_output)
        self.assertTrue(approx_equal(self.chol_var.grad.data, self.chol_var_copy.grad.data))
        self.assertTrue(approx_equal(self.vecs.grad.data, self.vecs_copy.grad.data))
 def test_diag(self):
     res = CholLazyVariable(self.chol_var).diag()
     actual = torch.cat([
         self.actual_mat[0].diag().unsqueeze(0),
         self.actual_mat[1].diag().unsqueeze(0),
     ], 0)
     self.assertTrue(approx_equal(res.data, actual.data))
Esempio n. 3
0
 def test_inv_matmul(self):
     # Forward
     res = CholLazyVariable(self.chol_var).inv_matmul(self.vecs)
     actual = self.actual_mat.inverse().matmul(self.vecs_copy)
     self.assertLess(
         torch.max((res.data - actual.data).abs() / actual.data.norm()), 1e-2
     )
 def test_inv_quad_log_det(self):
     # Forward
     res_inv_quad, res_log_det = CholLazyVariable(self.chol_var).inv_quad_log_det(inv_quad_rhs=self.vecs,
                                                                                  log_det=True)
     res = res_inv_quad + res_log_det
     actual_inv_quad = self.actual_mat.inverse().matmul(self.vecs_copy).mul(self.vecs_copy).sum()
     actual = actual_inv_quad + math.log(np.linalg.det(self.actual_mat.data))
     self.assertLess(((res.data - actual.data) / actual.data).abs()[0], 1e-2)
Esempio n. 5
0
 def test_inv_quad_log_det(self):
     # Forward
     res_inv_quad, res_log_det = CholLazyVariable(self.chol_var).inv_quad_log_det(
         inv_quad_rhs=self.vecs, log_det=True
     )
     res = res_inv_quad + res_log_det
     actual_inv_quad = self.actual_mat_inv.matmul(self.vecs_copy).mul(self.vecs_copy).sum(-1).sum(-1)
     actual_log_det = Variable(
         torch.Tensor(
             [math.log(np.linalg.det(self.actual_mat[0].data)), math.log(np.linalg.det(self.actual_mat[1].data))]
         )
     )
     actual = actual_inv_quad + actual_log_det
     self.assertLess(torch.max((res.data - actual.data).abs() / actual.data.norm()), 1e-2)
    def variational_output(self):
        chol_variational_covar = self.chol_variational_covar

        # Negate each row with a negative diagonal (the Cholesky decomposition
        # of a matrix requires that the diagonal elements be positive).
        if chol_variational_covar.ndimension() == 2:
            chol_variational_covar = chol_variational_covar.triu()
            inside = chol_variational_covar.diag().sign().unsqueeze(
                1).expand_as(chol_variational_covar).triu()
        elif chol_variational_covar.ndimension() == 3:
            batch_size, diag_size, _ = chol_variational_covar.size()

            # Batch mode
            chol_variational_covar_size = list(
                chol_variational_covar.size())[-2:]
            mask = chol_variational_covar.data.new(
                *chol_variational_covar_size).fill_(1).triu()
            mask = Variable(
                mask.unsqueeze(0).expand(*([chol_variational_covar.size(0)] +
                                           chol_variational_covar_size)))

            batch_index = chol_variational_covar.data.new(batch_size).long()
            torch.arange(0, batch_size, out=batch_index)
            batch_index = batch_index.unsqueeze(1).repeat(1,
                                                          diag_size).view(-1)
            diag_index = chol_variational_covar.data.new(diag_size).long()
            torch.arange(0, diag_size, out=diag_index)
            diag_index = diag_index.unsqueeze(1).repeat(batch_size, 1).view(-1)
            diag = chol_variational_covar[batch_index, diag_index,
                                          diag_index].view(
                                              batch_size, diag_size)

            chol_variational_covar = chol_variational_covar.mul(mask)
            inside = diag.sign().unsqueeze(-1).expand_as(
                chol_variational_covar).mul(mask)
        else:
            raise RuntimeError(
                "Invalid number of variational covar dimensions")

        chol_variational_covar = inside.mul(chol_variational_covar)
        variational_covar = CholLazyVariable(
            chol_variational_covar.transpose(-1, -2))
        return GaussianRandomVariable(self.variational_mean, variational_covar)
 def test_evaluate(self):
     res = CholLazyVariable(self.chol_var).evaluate()
     actual = self.actual_mat
     self.assertTrue(approx_equal(res.data, actual.data))
 def test_getitem(self):
     res = CholLazyVariable(self.chol_var)[2:4, -2]
     actual = self.actual_mat[2:4, -2]
     self.assertTrue(approx_equal(res.data, actual.data))
 def test_diag(self):
     res = CholLazyVariable(self.chol_var).diag()
     actual = self.actual_mat.diag()
     self.assertTrue(approx_equal(res.data, actual.data))