def create_lazy_tensor(self):
     toeplitz_column = torch.tensor([[4, 0, 0, 1], [3, 0, -0.5, -1]],
                                    dtype=torch.float)
     toeplitz_column.detach_()
     return BatchRepeatLazyTensor(ToeplitzLazyTensor(toeplitz_column),
                                  torch.Size((3, )))
     return BatchRepeatLazyTensor(ToeplitzLazyTensor(toeplitz_column),
                                  torch.Size((3, )))
Ejemplo n.º 2
0
 def create_lazy_tensor(self):
     c1 = torch.tensor([[2, 0.5, 0, 0], [5, 1, 2, 0]],
                       dtype=torch.float,
                       requires_grad=True)
     t1 = ToeplitzLazyTensor(c1)
     c2 = torch.tensor([[2, 0.5, 0, 0], [6, 0, 1, -1]],
                       dtype=torch.float,
                       requires_grad=True)
     t2 = ToeplitzLazyTensor(c2)
     return PsdSumLazyTensor(t1, t2)
Ejemplo n.º 3
0
    def test_getitem(self):
        c1_var = torch.tensor([5, 1, 2, 0],
                              dtype=torch.float,
                              requires_grad=True)
        c2_var = torch.tensor([12.5, 2.5, 5, 0],
                              dtype=torch.float,
                              requires_grad=True)
        toeplitz_lazy_var = ToeplitzLazyTensor(c1_var) * 2.5
        actual = ToeplitzLazyTensor(c2_var)

        diff = torch.norm(actual[2:, 2:].evaluate() -
                          toeplitz_lazy_var[2:, 2:].evaluate())
        self.assertLess(diff, 1e-3)
Ejemplo n.º 4
0
    def test_batch_inv_matmul(self):
        labels_var = torch.randn(2, 4, 1, requires_grad=True)
        labels_var_copy = labels_var.clone().detach().requires_grad_(True)
        grad_output = torch.randn(2, 4, 1)

        # Test case
        c1_var = torch.tensor([[5, 1, 2, 0]], dtype=torch.float).repeat(2, 1)
        c2_var = torch.tensor([[5, 1, 2, 0]], dtype=torch.float).repeat(2, 1)
        c1_var.requires_grad = True
        c2_var.requires_grad = True
        toeplitz_lazy_var = ToeplitzLazyTensor(c1_var) * torch.tensor(
            [2.5, 1.])
        actual = ToeplitzLazyTensor(c2_var).evaluate() * torch.tensor(
            [2.5, 1.]).view(2, 1, 1)

        # Test forward
        with gpytorch.settings.max_cg_iterations(1000):
            res = toeplitz_lazy_var.inv_matmul(labels_var)
            actual = gpytorch.inv_matmul(actual, labels_var_copy)

        # Test backwards
        res.backward(grad_output)
        actual.backward(grad_output)

        for i in range(c1_var.size(0)):
            for j in range(c1_var.size(1)):
                self.assertLess(
                    math.fabs(res[i, j].item() - actual[i, j].item()), 1e-2)
                self.assertLess(
                    math.fabs(c1_var.grad[i, j].item() -
                              c2_var.grad[i, j].item()), 1e-2)
Ejemplo n.º 5
0
    def test_evaluate(self):
        lazy_toeplitz_var = ToeplitzLazyTensor(self.toeplitz_column)
        res = lazy_toeplitz_var.evaluate()
        actual = torch.tensor([[2, 0, 4, 1], [0, 2, 0, 4], [4, 0, 2, 0], [1, 4, 0, 2]], dtype=torch.float)
        self.assertTrue(utils.approx_equal(res, actual))

        lazy_toeplitz_var = ToeplitzLazyTensor(self.batch_toeplitz_column)
        res = lazy_toeplitz_var.evaluate()
        actual = torch.tensor(
            [
                [[2, 0, 4, 1], [0, 2, 0, 4], [4, 0, 2, 0], [1, 4, 0, 2]],
                [[1, 1, -1, 3], [1, 1, 1, -1], [-1, 1, 1, 1], [3, -1, 1, 1]],
            ],
            dtype=torch.float,
        )
        self.assertTrue(utils.approx_equal(res, actual))
Ejemplo n.º 6
0
 def create_lazy_tensor(self):
     c1 = torch.tensor([5, 1, 2, 0], dtype=torch.float, requires_grad=True)
     t1 = ToeplitzLazyTensor(c1)
     c2 = torch.tensor([6, 0, 1, -1], dtype=torch.float, requires_grad=True)
     t2 = ToeplitzLazyTensor(c2)
     return t1 + t2
Ejemplo n.º 7
0
 def test_get_item_scalar_on_batch(self):
     toeplitz_var = ToeplitzLazyTensor(torch.tensor([[1, 2, 3, 4]], dtype=torch.float))
     evaluated = toeplitz_var.evaluate()
     self.assertTrue(utils.approx_equal(toeplitz_var[0].evaluate(), evaluated[0]))
Ejemplo n.º 8
0
 def test_get_item_on_batch(self):
     toeplitz_var = ToeplitzLazyTensor(self.batch_toeplitz_column)
     evaluated = toeplitz_var.evaluate()
     self.assertTrue(utils.approx_equal(toeplitz_var[0, 1:3].evaluate(), evaluated[0, 1:3]))
Ejemplo n.º 9
0
    def test_get_item_square_on_tensor(self):
        toeplitz_var = ToeplitzLazyTensor(torch.tensor([1, 2, 3, 4], dtype=torch.float))
        evaluated = toeplitz_var.evaluate()

        self.assertTrue(utils.approx_equal(toeplitz_var[2:4, 2:4].evaluate(), evaluated[2:4, 2:4]))
 def create_lazy_tensor(self):
     column = torch.tensor([[5, 1, 2, 0]],
                           dtype=torch.float).repeat(3, 2, 1)
     column.requires_grad_(True)
     constant = torch.randn(2, 1, 1).abs()
     return ToeplitzLazyTensor(column) * constant
 def create_lazy_tensor(self):
     column = torch.tensor([5, 1, 2, 0],
                           dtype=torch.float,
                           requires_grad=True)
     constant = 2.5
     return ToeplitzLazyTensor(column) * constant
 def create_lazy_tensor(self):
     column = torch.tensor([[5, 1, 2, 0]], dtype=torch.float).repeat(2, 1)
     column.requires_grad_(True)
     constant = torch.tensor([2.5, 1.0]).view(2, 1, 1)
     return ToeplitzLazyTensor(column) * constant
 def create_lazy_tensor(self):
     toeplitz_column = torch.tensor([4, 0.1, 0.05, 0.01, 0.0], dtype=torch.float)
     toeplitz_column.detach_()
     return BatchRepeatLazyTensor(ToeplitzLazyTensor(toeplitz_column), torch.Size((3,)))
Ejemplo n.º 14
0
 def create_lazy_tensor(self):
     toeplitz_column = torch.tensor([[2, -1, 0.5, 0.25], [4, 0, 0, 1]], dtype=torch.float, requires_grad=True)
     return ToeplitzLazyTensor(toeplitz_column)
Ejemplo n.º 15
0
def make_sum_lazy_var():
    c1 = torch.tensor([5, 1, 2, 0], dtype=torch.float, requires_grad=True)
    t1 = ToeplitzLazyTensor(c1)
    c2 = torch.tensor([6, 0, 1, -1], dtype=torch.float, requires_grad=True)
    t2 = ToeplitzLazyTensor(c2)
    return t1 + t2
 def create_lazy_tensor(self):
     toeplitz_column = torch.tensor([[4, 0, 0, 1], [3, 0, -0.5, -1]],
                                    dtype=torch.float,
                                    requires_grad=True)
     return BatchRepeatLazyTensor(ToeplitzLazyTensor(toeplitz_column),
                                  torch.Size((3, )))
 def create_lazy_tensor(self):
     toeplitz_column = torch.tensor([4, 0.1, 0.05, 0.01, 0.0],
                                    dtype=torch.float,
                                    requires_grad=True)
     return BatchRepeatLazyTensor(ToeplitzLazyTensor(toeplitz_column),
                                  torch.Size((3, )))