Ejemplo n.º 1
0
    def test_inv_quad_many_vectors(self):
        # Forward pass
        flattened_mats = self.mats_clone.view(-1, *self.mats_clone.shape[-2:])
        actual_inv_quad = (
            torch.cat([mat.inverse().unsqueeze(0) for mat in flattened_mats])
            .view(self.mats_clone.shape)
            .matmul(self.vecs_clone)
            .mul(self.vecs_clone)
            .sum(-2)
            .sum(-1)
        )

        with gpytorch.settings.num_trace_samples(2000):
            non_lazy_tsr = NonLazyTensor(self.mats)
            res_inv_quad = non_lazy_tsr.inv_quad(self.vecs)

        self.assertEqual(res_inv_quad.shape, actual_inv_quad.shape)
        self.assertLess(torch.max((res_inv_quad - actual_inv_quad).abs()).item(), 1e-1)

        # Backward
        inv_quad_grad_output = torch.randn(2, 3, dtype=torch.float)
        actual_inv_quad.backward(gradient=inv_quad_grad_output)
        res_inv_quad.backward(gradient=inv_quad_grad_output, retain_graph=True)

        self.assertLess(torch.max((self.mats_clone.grad - self.mats.grad).abs()).item(), 1e-1)
        self.assertLess(torch.max((self.vecs_clone.grad - self.vecs.grad).abs()).item(), 1e-1)
Ejemplo n.º 2
0
    def test_inv_quad_many_vectors(self):
        # Forward pass
        actual_inv_quad = self.mat_clone.inverse().matmul(self.vecs_clone).mul(self.vecs_clone).sum()
        with gpytorch.settings.num_trace_samples(1000):
            non_lazy_tsr = NonLazyTensor(self.mat)
            res_inv_quad = non_lazy_tsr.inv_quad(self.vecs)
        self.assertAlmostEqual(res_inv_quad.item(), actual_inv_quad.item(), places=1)

        # Backward
        actual_inv_quad.backward()
        res_inv_quad.backward(retain_graph=True)

        self.assertLess(torch.max((self.mat_clone.grad - self.mat.grad).abs()).item(), 1e-1)
        self.assertLess(torch.max((self.vecs_clone.grad - self.vecs.grad).abs()).item(), 1e-1)