예제 #1
0
 def create_lazy_tensor(self):
     mat1 = make_random_mat(40, rank=5, batch_size=2)
     mat2 = make_random_mat(40, rank=5, batch_size=2)
     mat3 = make_random_mat(40, rank=5, batch_size=2)
     mat4 = make_random_mat(40, rank=5, batch_size=2)
     mat5 = make_random_mat(40, rank=5, batch_size=2)
     res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2),
                         RootLazyTensor(mat3), RootLazyTensor(mat4),
                         RootLazyTensor(mat5))
     return res.add_diag(torch.tensor(0.5))
예제 #2
0
 def create_lazy_tensor(self):
     mat1 = make_random_mat(30, 3)
     mat2 = make_random_mat(30, 3)
     mat3 = make_random_mat(30, 3)
     mat4 = make_random_mat(30, 3)
     mat5 = make_random_mat(30, 3)
     res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2),
                         RootLazyTensor(mat3), RootLazyTensor(mat4),
                         RootLazyTensor(mat5))
     return res.add_diag(torch.tensor(1.0))
예제 #3
0
    def test_mul_adding_another_variable(self):
        mat1 = make_random_mat(20, rank=4, batch_size=5)
        mat2 = make_random_mat(20, rank=4, batch_size=5)
        mat3 = make_random_mat(20, rank=4, batch_size=5)

        mat1_copy = mat1.clone().detach().requires_grad_(True)
        mat2_copy = mat2.clone().detach().requires_grad_(True)
        mat3_copy = mat3.clone().detach().requires_grad_(True)

        # Forward
        res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2))
        res = res * RootLazyTensor(mat3)
        actual = prod(
            [
                mat1_copy.matmul(mat1_copy.transpose(-1, -2)),
                mat2_copy.matmul(mat2_copy.transpose(-1, -2)),
                mat3_copy.matmul(mat3_copy.transpose(-1, -2)),
            ]
        )
        self.assertLess(torch.max(((res.evaluate() - actual) / actual).abs()), 0.01)
예제 #4
0
    def test_matmul_mat_with_two_matrices(self):
        mat1 = make_random_mat(20, 5)
        mat2 = make_random_mat(20, 5)
        vec = torch.randn(20, 7, requires_grad=True)

        mat1_copy = mat1.clone().detach().requires_grad_(True)
        mat2_copy = mat2.clone().detach().requires_grad_(True)
        vec_copy = vec.clone().detach().requires_grad_(True)

        # Forward
        res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2)).matmul(vec)
        actual = prod(
            [mat1_copy.matmul(mat1_copy.transpose(-1, -2)), mat2_copy.matmul(mat2_copy.transpose(-1, -2))]
        ).matmul(vec_copy)
        assert torch.max(((res - actual) / actual).abs()) < 0.01

        # Backward
        res.sum().backward()
        actual.sum().backward()
        self.assertLess(torch.max(((mat1.grad - mat1_copy.grad) / mat1_copy.grad).abs()), 0.01)
        self.assertLess(torch.max(((mat2.grad - mat2_copy.grad) / mat2_copy.grad).abs()), 0.01)
        self.assertLess(torch.max(((vec.grad - vec_copy.grad) / vec_copy.grad).abs()), 0.01)
예제 #5
0
    def test_batch_matmul_mat_with_five_matrices(self):
        mat1 = make_random_mat(20, rank=4, batch_size=5)
        mat2 = make_random_mat(20, rank=4, batch_size=5)
        mat3 = make_random_mat(20, rank=4, batch_size=5)
        mat4 = make_random_mat(20, rank=4, batch_size=5)
        mat5 = make_random_mat(20, rank=4, batch_size=5)
        vec = torch.randn(5, 20, 7, requires_grad=True)

        mat1_copy = mat1.clone().detach().requires_grad_(True)
        mat2_copy = mat2.clone().detach().requires_grad_(True)
        mat3_copy = mat3.clone().detach().requires_grad_(True)
        mat4_copy = mat4.clone().detach().requires_grad_(True)
        mat5_copy = mat5.clone().detach().requires_grad_(True)
        vec_copy = vec.clone().detach().requires_grad_(True)

        # Forward
        res = MulLazyTensor(
            RootLazyTensor(mat1), RootLazyTensor(mat2), RootLazyTensor(mat3), RootLazyTensor(mat4), RootLazyTensor(mat5)
        ).matmul(vec)
        actual = prod(
            [
                mat1_copy.matmul(mat1_copy.transpose(-1, -2)),
                mat2_copy.matmul(mat2_copy.transpose(-1, -2)),
                mat3_copy.matmul(mat3_copy.transpose(-1, -2)),
                mat4_copy.matmul(mat4_copy.transpose(-1, -2)),
                mat5_copy.matmul(mat5_copy.transpose(-1, -2)),
            ]
        ).matmul(vec_copy)
        self.assertLess(torch.max(((res - actual) / actual).abs()), 0.01)

        # Backward
        res.sum().backward()
        actual.sum().backward()
        self.assertLess(torch.max(((mat1.grad - mat1_copy.grad) / mat1_copy.grad).abs()), 0.01)
        self.assertLess(torch.max(((mat2.grad - mat2_copy.grad) / mat2_copy.grad).abs()), 0.01)
        self.assertLess(torch.max(((mat3.grad - mat3_copy.grad) / mat3_copy.grad).abs()), 0.01)
        self.assertLess(torch.max(((mat4.grad - mat4_copy.grad) / mat4_copy.grad).abs()), 0.01)
        self.assertLess(torch.max(((mat5.grad - mat5_copy.grad) / mat5_copy.grad).abs()), 0.01)
        self.assertLess(torch.max(((vec.grad - vec_copy.grad) / vec_copy.grad).abs()), 0.01)
예제 #6
0
    def test_diag(self):
        mat1 = make_random_mat(20, rank=4)
        mat2 = make_random_mat(20, rank=4)
        mat3 = make_random_mat(20, rank=4)

        mat1_copy = mat1.clone().detach().requires_grad_(True)
        mat2_copy = mat2.clone().detach().requires_grad_(True)
        mat3_copy = mat3.clone().detach().requires_grad_(True)

        # Forward
        res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2), RootLazyTensor(mat3)).diag()
        actual = prod(
            [
                mat1_copy.matmul(mat1_copy.transpose(-1, -2)),
                mat2_copy.matmul(mat2_copy.transpose(-1, -2)),
                mat3_copy.matmul(mat3_copy.transpose(-1, -2)),
            ]
        ).diag()
        assert torch.max(((res - actual) / actual).abs()) < 0.01
예제 #7
0
    def test_batch_diag(self):
        mat1 = make_random_mat(20, rank=4, batch_size=5)
        mat2 = make_random_mat(20, rank=4, batch_size=5)
        mat3 = make_random_mat(20, rank=4, batch_size=5)

        mat1_copy = mat1.clone().detach().requires_grad_(True)
        mat2_copy = mat2.clone().detach().requires_grad_(True)
        mat3_copy = mat3.clone().detach().requires_grad_(True)

        # Forward
        res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2), RootLazyTensor(mat3)).diag()
        actual = prod(
            [
                mat1_copy.matmul(mat1_copy.transpose(-1, -2)),
                mat2_copy.matmul(mat2_copy.transpose(-1, -2)),
                mat3_copy.matmul(mat3_copy.transpose(-1, -2)),
            ]
        )
        actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(5)])
        self.assertLess(torch.max(((res - actual) / actual).abs()), 0.01)
예제 #8
0
    def test_getitem(self):
        mat1 = make_random_mat(20, rank=4)
        mat2 = make_random_mat(20, rank=4)
        mat3 = make_random_mat(20, rank=4)

        mat1_copy = mat1.clone().detach().requires_grad_(True)
        mat2_copy = mat2.clone().detach().requires_grad_(True)
        mat3_copy = mat3.clone().detach().requires_grad_(True)

        # Forward
        res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2), RootLazyTensor(mat3))
        actual = prod(
            [
                mat1_copy.matmul(mat1_copy.transpose(-1, -2)),
                mat2_copy.matmul(mat2_copy.transpose(-1, -2)),
                mat3_copy.matmul(mat3_copy.transpose(-1, -2)),
            ]
        )

        self.assertLess(torch.max(((res[5, 3:5] - actual[5, 3:5]) / actual[5, 3:5]).abs()), 0.01)
        self.assertLess(torch.max(((res[3:5, 2:].evaluate() - actual[3:5, 2:]) / actual[3:5, 2:]).abs()), 0.01)
        self.assertLess(torch.max(((res[2:, 3:5].evaluate() - actual[2:, 3:5]) / actual[2:, 3:5]).abs()), 0.01)
예제 #9
0
    def test_mul_adding_constant_mul(self):
        mat1 = make_random_mat(20, rank=4, batch_size=5)
        mat2 = make_random_mat(20, rank=4, batch_size=5)
        mat3 = make_random_mat(20, rank=4, batch_size=5)
        const = torch.ones(1, requires_grad=True)

        mat1_copy = mat1.clone().detach().requires_grad_(True)
        mat2_copy = mat2.clone().detach().requires_grad_(True)
        mat3_copy = mat3.clone().detach().requires_grad_(True)
        const_copy = const.clone().detach().requires_grad_(True)

        # Forward
        res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2), RootLazyTensor(mat3))
        res = res * const
        actual = (
            prod(
                [
                    mat1_copy.matmul(mat1_copy.transpose(-1, -2)),
                    mat2_copy.matmul(mat2_copy.transpose(-1, -2)),
                    mat3_copy.matmul(mat3_copy.transpose(-1, -2)),
                ]
            )
            * const_copy
        )
        self.assertLess(torch.max(((res.evaluate() - actual) / actual).abs()), 0.01)

        # Forward
        res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2), RootLazyTensor(mat3))
        res = res * 2.5
        actual = (
            prod(
                [
                    mat1_copy.matmul(mat1_copy.transpose(-1, -2)),
                    mat2_copy.matmul(mat2_copy.transpose(-1, -2)),
                    mat3_copy.matmul(mat3_copy.transpose(-1, -2)),
                ]
            )
            * 2.5
        )
        self.assertLess(torch.max(((res.evaluate() - actual) / actual).abs()), 0.01)
예제 #10
0
 def create_lazy_tensor(self):
     mat1 = make_random_mat(6, rank=5, batch_size=2)
     mat2 = make_random_mat(6, rank=5, batch_size=2)
     res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2))
     return res.add_diag(torch.tensor(2.0))
예제 #11
0
 def create_lazy_tensor(self):
     mat1 = make_random_mat(20, rank=5, batch_size=2)
     mat2 = make_random_mat(20, rank=5, batch_size=2)
     constant = torch.tensor(4.0)
     res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2))
     return res.mul(constant).add_diag(torch.tensor(2.0))
예제 #12
0
 def create_lazy_tensor(self):
     mat1 = make_random_mat(6, 3)
     mat2 = make_random_mat(6, 3)
     res = MulLazyTensor(RootLazyTensor(mat1), RootLazyTensor(mat2))
     return res.add_diag(torch.tensor(2.))