Exemplo n.º 1
0
def test_root_decomposition_forward():
    a = torch.randn(5, 5)
    a = torch.matmul(a, a.t())

    a_lv = NonLazyVariable(Variable(a, requires_grad=True))
    a_root = a_lv.root_decomposition()

    assert torch.max(
        ((a_root.matmul(a_root.transpose(-1, -2)).data - a)).abs()) < 1e-2
Exemplo n.º 2
0
    def test_root_decomposition_backward(self):
        a = torch.Tensor([
            [5.0212, 0.5504, -0.1810, 1.5414, 2.9611],
            [0.5504, 2.8000, 1.9944, 0.6208, -0.8902],
            [-0.1810, 1.9944, 3.0505, 1.0790, -1.1774],
            [1.5414, 0.6208, 1.0790, 2.9430, 0.4170],
            [2.9611, -0.8902, -1.1774, 0.4170, 3.3208],
        ])

        a_var = Variable(a, requires_grad=True)
        a_lv = NonLazyVariable(a_var)
        a_root = a_lv.root_decomposition()
        res = a_root.matmul(a_root.transpose(-1, -2))
        res.trace().backward()

        a_var_copy = Variable(a, requires_grad=True)
        a_var_copy.trace().backward()

        self.assertTrue(approx_equal(a_var.grad.data, a_var_copy.grad.data))