def test_batch_matmul(self): if not torch.cuda.is_available(): return x1 = torch.randn(3, 2, 100, 3).cuda() kern1 = MaternKernel(nu=2.5).cuda() kern2 = GMaternKernel(nu=2.5).cuda() rhs = torch.randn(3, 2, 100, 1).cuda() res1 = kern1(x1, x1).matmul(rhs) res2 = kern2(x1, x1).matmul(rhs) self.assertLess(torch.norm(res1 - res2), 1e-4)
def forward_x1_eq_x2(self, nu): if not torch.cuda.is_available(): return x1 = torch.randn(100, 3).cuda() kern1 = MaternKernel(nu=nu).cuda() kern2 = GMaternKernel(nu=nu).cuda() k1 = kern1(x1, x1).evaluate() k2 = kern2(x1, x1).evaluate() self.assertLess(torch.norm(k1 - k2), 1e-4)
def create_kernel_ard(self, num_dims, **kwargs): return MaternKernel(nu=2.5, ard_num_dims=num_dims, **kwargs)
def create_kernel_no_ard(self, **kwargs): return MaternKernel(nu=2.5, **kwargs)