def test_solve(self): size = 100 train_x = torch.linspace(0, 1, size) covar_matrix = RBFKernel()(train_x, train_x).evaluate() piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10) woodbury_factor, inv_scale, logdet = woodbury.woodbury_factor( piv_chol, piv_chol, torch.ones(100), logdet=True) self.assertTrue( approx_equal(logdet, (piv_chol @ piv_chol.transpose(-1, -2) + torch.eye(100)).logdet(), 2e-4)) rhs_vector = torch.randn(100, 50) shifted_covar_matrix = covar_matrix + torch.eye(size) real_solve = shifted_covar_matrix.inverse().matmul(rhs_vector) scaled_inv_diag = (inv_scale / torch.ones(100)).unsqueeze(-1) approx_solve = woodbury.woodbury_solve(rhs_vector, piv_chol * scaled_inv_diag, woodbury_factor, scaled_inv_diag, inv_scale) self.assertTrue(approx_equal(approx_solve, real_solve, 2e-4))
def test_solve(self): size = 100 train_x = torch.cat( [torch.linspace(0, 1, size).unsqueeze(0), torch.linspace(0, 0.5, size).unsqueeze(0)], 0 ).unsqueeze(-1) covar_matrix = RBFKernel()(train_x, train_x).evaluate() piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10) woodbury_factor = pivoted_cholesky.woodbury_factor(piv_chol, torch.ones(2, 100)) rhs_vector = torch.randn(2, 100, 5) shifted_covar_matrix = covar_matrix + torch.eye(size) real_solve = torch.cat( [ shifted_covar_matrix[0].inverse().matmul(rhs_vector[0]).unsqueeze(0), shifted_covar_matrix[1].inverse().matmul(rhs_vector[1]).unsqueeze(0), ], 0, ) approx_solve = pivoted_cholesky.woodbury_solve(rhs_vector, piv_chol, woodbury_factor, torch.ones(2, 100)) self.assertTrue(approx_equal(approx_solve, real_solve, 2e-4))
def test_pivoted_cholesky(self): size = 100 train_x = torch.cat( [ torch.linspace(0, 1, size).unsqueeze(0), torch.linspace(0, 0.5, size).unsqueeze(0), torch.linspace(0, 0.25, size).unsqueeze(0), torch.linspace(0, 1.25, size).unsqueeze(0), torch.linspace(0, 1.5, size).unsqueeze(0), torch.linspace(0, 1, size).unsqueeze(0), torch.linspace(0, 0.5, size).unsqueeze(0), torch.linspace(0, 0.25, size).unsqueeze(0), torch.linspace(0, 1.25, size).unsqueeze(0), torch.linspace(0, 1.25, size).unsqueeze(0), torch.linspace(0, 1.5, size).unsqueeze(0), torch.linspace(0, 1, size).unsqueeze(0), ], 0, ).unsqueeze(-1) covar_matrix = RBFKernel()(train_x, train_x).evaluate().view(2, 2, 3, size, size) piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10) covar_approx = piv_chol @ piv_chol.transpose(-1, -2) self.assertTrue(approx_equal(covar_approx, covar_matrix, 2e-4))
def test_solve(self): size = 100 train_x = torch.cat( [ torch.linspace(0, 1, size).unsqueeze(0), torch.linspace(0, 0.5, size).unsqueeze(0), torch.linspace(0, 0.25, size).unsqueeze(0), torch.linspace(0, 1.25, size).unsqueeze(0), torch.linspace(0, 1.5, size).unsqueeze(0), torch.linspace(0, 1, size).unsqueeze(0), torch.linspace(0, 0.5, size).unsqueeze(0), torch.linspace(0, 0.25, size).unsqueeze(0), torch.linspace(0, 1.25, size).unsqueeze(0), torch.linspace(0, 1.25, size).unsqueeze(0), torch.linspace(0, 1.5, size).unsqueeze(0), torch.linspace(0, 1, size).unsqueeze(0), ], 0, ).unsqueeze(-1) covar_matrix = RBFKernel()(train_x, train_x).evaluate().view( 2, 2, 3, size, size) piv_chol = pivoted_cholesky.pivoted_cholesky(covar_matrix, 10) woodbury_factor, inv_scale, logdet = woodbury.woodbury_factor( piv_chol, piv_chol, torch.ones(2, 2, 3, 100), logdet=True) actual_logdet = torch.stack([ mat.logdet() for mat in (piv_chol @ piv_chol.transpose(-1, -2) + torch.eye(100)).view(-1, 100, 100) ], 0).view(2, 2, 3) self.assertTrue(approx_equal(logdet, actual_logdet, 2e-4)) rhs_vector = torch.randn(2, 2, 3, 100, 5) shifted_covar_matrix = covar_matrix + torch.eye(size) real_solve = torch.cat( [ shifted_covar_matrix[0, 0, 0].inverse().matmul( rhs_vector[0, 0, 0]).unsqueeze(0), shifted_covar_matrix[0, 0, 1].inverse().matmul( rhs_vector[0, 0, 1]).unsqueeze(0), shifted_covar_matrix[0, 0, 2].inverse().matmul( rhs_vector[0, 0, 2]).unsqueeze(0), shifted_covar_matrix[0, 1, 0].inverse().matmul( rhs_vector[0, 1, 0]).unsqueeze(0), shifted_covar_matrix[0, 1, 1].inverse().matmul( rhs_vector[0, 1, 1]).unsqueeze(0), shifted_covar_matrix[0, 1, 2].inverse().matmul( rhs_vector[0, 1, 2]).unsqueeze(0), shifted_covar_matrix[1, 0, 0].inverse().matmul( rhs_vector[1, 0, 0]).unsqueeze(0), shifted_covar_matrix[1, 0, 1].inverse().matmul( rhs_vector[1, 0, 1]).unsqueeze(0), shifted_covar_matrix[1, 0, 2].inverse().matmul( rhs_vector[1, 0, 2]).unsqueeze(0), shifted_covar_matrix[1, 1, 0].inverse().matmul( rhs_vector[1, 1, 0]).unsqueeze(0), shifted_covar_matrix[1, 1, 1].inverse().matmul( rhs_vector[1, 1, 1]).unsqueeze(0), shifted_covar_matrix[1, 1, 2].inverse().matmul( rhs_vector[1, 1, 2]).unsqueeze(0), ], 0, ).view_as(rhs_vector) scaled_inv_diag = (inv_scale / torch.ones(2, 3, 100)).unsqueeze(-1) approx_solve = woodbury.woodbury_solve(rhs_vector, piv_chol * scaled_inv_diag, woodbury_factor, scaled_inv_diag, inv_scale) self.assertTrue(approx_equal(approx_solve, real_solve, 2e-4))
lambda0 = 0.5 n_cells_1d = 50 forward_cutoff = 400 # Only make 200 observations (Fourier and pointwise). my_problem = ToyFourier2d.build_problem(n_cells_1d, forward_cutoff) updatable_gp = UpdatableGP(kernel, lambda0, sigma0, m0, torch.tensor(my_problem.grid.cells).float(), n_chunks=200) lazy_cov = UpdatableCovLazyTensor(updatable_gp.covariance) # Test getitem. lazy_cov[0:10, 0:10].evaluate() # Test pivoted Cholesky decomposition. from gpytorch.utils.pivoted_cholesky import pivoted_cholesky res = pivoted_cholesky(lazy_cov, max_iter=300, error_tol=0.01) preconditioner = MatmulLazyTensor(res, res.t()) # Now test conjugate gradient inversion. rhs = torch.rand((lazy_cov.n, 1)) ans = linear_cg(lazy_cov.matmul, rhs, tolerance=0.1, max_iter=400, preconditioner=preconditioner.matmul)