def test_get_item_on_interpolated_variable_no_diagonal():
    no_diag_toeplitz = ToeplitzLazyVariable(lazy_toeplitz_var.c, lazy_toeplitz_var.J_left, lazy_toeplitz_var.C_left,
                                            lazy_toeplitz_var.J_right, lazy_toeplitz_var.C_right)
    evaluated = no_diag_toeplitz.evaluate().data

    assert utils.approx_equal(no_diag_toeplitz[4:6].evaluate().data, evaluated[4:6])
    assert utils.approx_equal(no_diag_toeplitz[4:6, 2:6].evaluate().data, evaluated[4:6, 2:6])
Ejemplo n.º 2
0
    def test_exact_gp_mll(self):
        labels_var = Variable(torch.randn(4))

        # Test case
        c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
        c2_var = Variable(torch.Tensor([6, 0, 1, -1]), requires_grad=True)
        actual = ToeplitzLazyVariable(c1_var + c2_var)

        # Actual case
        sum_lv = make_sum_lazy_var()
        t1, t2 = sum_lv.lazy_vars

        # Test forward
        mll_res = sum_lv.exact_gp_marginal_log_likelihood(labels_var)
        mll_actual = actual.exact_gp_marginal_log_likelihood(labels_var)
        self.assertLess(
            math.fabs(mll_res.data.squeeze()[0] - mll_actual.data.squeeze()[0]),
            5e-1,
        )

        # Test backwards
        mll_res.backward()
        mll_actual.backward()
        self.assertLess(
            math.fabs(c1_var.grad.data[0] - t1.column.grad.data[0]),
            1e-1,
        )
        self.assertLess(
            math.fabs(c2_var.grad.data[0] - t2.column.grad.data[0]),
            1e-1,
        )
Ejemplo n.º 3
0
 def test_get_item_on_batch(self):
     toeplitz_var = ToeplitzLazyVariable(
         Variable(self.batch_toeplitz_column))
     evaluated = toeplitz_var.evaluate().data
     self.assertTrue(
         utils.approx_equal(toeplitz_var[0, 1:3].evaluate().data,
                            evaluated[0, 1:3]))
Ejemplo n.º 4
0
def test_trace_log_det_quad_form():
    mu_diffs_var = Variable(torch.randn(4))
    chol_covar_1_var = Variable(torch.eye(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([6, 0, 1, -1]), requires_grad=True)
    actual = ToeplitzLazyVariable(c1_var + c2_var)

    # Actual case
    sum_lv = make_sum_lazy_var()
    t1, t2 = sum_lv.lazy_vars

    # Test forward
    tldqf_res = sum_lv.trace_log_det_quad_form(mu_diffs_var, chol_covar_1_var)
    tldqf_actual = actual.trace_log_det_quad_form(mu_diffs_var,
                                                  chol_covar_1_var)
    assert (math.fabs(tldqf_res.data.squeeze()[0] -
                      tldqf_actual.data.squeeze()[0]) < 1.5)

    # Test backwards
    tldqf_res.backward()
    tldqf_actual.backward()
    assert (math.fabs(c1_var.grad.data[0] - t1.column.grad.data[0]) < 1e-1)
    assert (math.fabs(c2_var.grad.data[0] - t2.column.grad.data[0]) < 1e-1)
Ejemplo n.º 5
0
def test_exact_posterior():
    train_mean = Variable(torch.randn(4))
    train_y = Variable(torch.randn(4))
    test_mean = Variable(torch.randn(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([6, 0, 1, -1]), requires_grad=True)
    indices = Variable(torch.arange(0, 4).long().view(4, 1))
    values = Variable(torch.ones(4).view(4, 1))
    toeplitz_1 = InterpolatedLazyVariable(ToeplitzLazyVariable(c1_var),
                                          indices, values, indices, values)
    toeplitz_2 = InterpolatedLazyVariable(ToeplitzLazyVariable(c2_var),
                                          indices, values, indices, values)
    sum_lv = toeplitz_1 + toeplitz_2

    # Actual case
    actual = sum_lv.evaluate()

    # Test forward
    actual_alpha = gpytorch.posterior_strategy(actual).exact_posterior_alpha(
        train_mean, train_y)
    actual_mean = gpytorch.posterior_strategy(actual).exact_posterior_mean(
        test_mean, actual_alpha)
    sum_lv_alpha = sum_lv.posterior_strategy().exact_posterior_alpha(
        train_mean, train_y)
    sum_lv_mean = sum_lv.posterior_strategy().exact_posterior_mean(
        test_mean, sum_lv_alpha)
    assert (torch.norm(actual_mean.data - sum_lv_mean.data) < 1e-4)
Ejemplo n.º 6
0
def test_getitem():
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([12.5, 2.5, 5, 0]), requires_grad=True)
    toeplitz_lazy_var = ToeplitzLazyVariable(c1_var) * 2.5
    actual = ToeplitzLazyVariable(c2_var)

    assert torch.norm(actual[2:, 2:].evaluate().data -
                      toeplitz_lazy_var[2:, 2:].evaluate().data) < 1e-3
Ejemplo n.º 7
0
    def test_get_item_square_on_variable(self):
        toeplitz_var = ToeplitzLazyVariable(
            Variable(torch.Tensor([1, 2, 3, 4])))
        evaluated = toeplitz_var.evaluate().data

        self.assertTrue(
            utils.approx_equal(toeplitz_var[2:4, 2:4].evaluate().data,
                               evaluated[2:4, 2:4]))
Ejemplo n.º 8
0
def make_mul_lazy_var():
    diag = Variable(torch.Tensor([1]), requires_grad=True)
    c1 = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    t1 = ToeplitzLazyVariable(c1)
    c2 = Variable(torch.Tensor([[6, 0], [1, -1]]), requires_grad=True)
    t2 = KroneckerProductLazyVariable(c2)
    c3 = Variable(torch.Tensor([7, 2, 1, 0]), requires_grad=True)
    t3 = ToeplitzLazyVariable(c3)
    return (t1 * t2 * t3).add_diag(diag), diag
Ejemplo n.º 9
0
    def forward(self, x1, x2, **kwargs):
        if not torch.equal(x1.data, self._inducing_points) or \
                not torch.equal(x1.data, self._inducing_points):
            raise RuntimeError(
                'The kernel should only receive the inducing points as input')

        if not self.training and hasattr(self, '_cached_kernel_mat'):
            return self._cached_kernel_mat
        else:
            d = x1.size(1)
            grid_var = Variable(self.grid)
            if d > 1:
                k_UUs = Variable(x1.data.new(d, self.grid_size).zero_())
                for i in range(d):
                    k_UUs[i] = self.base_kernel_module(grid_var[i, 0],
                                                       grid_var[i],
                                                       **kwargs).squeeze()
                K_XX = KroneckerProductLazyVariable(k_UUs)

            else:
                if gpytorch.functions.use_toeplitz:
                    k_UU = self.base_kernel_module(grid_var[0, 0], grid_var[0],
                                                   **kwargs).squeeze()
                    K_XX = ToeplitzLazyVariable(k_UU)
                else:
                    for i in range(100):
                        k_UU = self.base_kernel_module(grid_var[0],
                                                       grid_var[0],
                                                       **kwargs).squeeze()
                    K_XX = NonLazyVariable(k_UU)

            if not self.training:
                self._cached_kernel_mat = K_XX
            return K_XX
Ejemplo n.º 10
0
    def forward(self, x1, x2, **kwargs):
        n, d = x1.size()
        grid_size = self.grid_size

        if self.conditioning:
            J1, C1, J2, C2 = self._compute_grid(x1, x2)
            self.train_J1 = J1
            self.train_C1 = C1
            self.train_J2 = J2
            self.train_C2 = C2
        else:
            train_data = self.train_inputs[0].data if hasattr(
                self, 'train_inputs') else None
            if train_data is not None and torch.equal(
                    x1.data, train_data) and torch.equal(x2.data, train_data):
                J1 = self.train_J1
                C1 = self.train_C1
                J2 = self.train_J2
                C2 = self.train_C2
            else:
                J1, C1, J2, C2 = self._compute_grid(x1, x2)

        grid_var = Variable(self.grid)
        if d > 1:
            k_UUs = Variable(x1.data.new(d, grid_size).zero_())
            for i in range(d):
                k_UUs[i] = self.base_kernel_module(grid_var[i, 0], grid_var[i],
                                                   **kwargs).squeeze()
            K_XX = KroneckerProductLazyVariable(k_UUs, J1, C1, J2, C2)
        else:
            k_UU = self.base_kernel_module(grid_var[0, 0], grid_var[0],
                                           **kwargs).squeeze()
            K_XX = ToeplitzLazyVariable(k_UU, J1, C1, J2, C2)

        return K_XX
    def test_inv_matmul(self):
        labels_var = Variable(torch.randn(4))
        grad_output = torch.randn(4)

        # Test case
        c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
        c2_var = Variable(torch.Tensor([12.5, 2.5, 5, 0]), requires_grad=True)
        toeplitz_lazy_var = ToeplitzLazyVariable(c1_var) * 2.5
        actual = ToeplitzLazyVariable(c2_var)

        # Test forward
        with gpytorch.settings.max_cg_iterations(1000):
            res = toeplitz_lazy_var.inv_matmul(labels_var)
            actual = gpytorch.inv_matmul(actual, labels_var)

        # Test backwards
        res.backward(grad_output)
        actual.backward(grad_output)

        self.assertLess(
            math.fabs(res.data.squeeze()[0] - actual.data.squeeze()[0]),
            6e-1,
        )
        self.assertLess(math.fabs(c1_var.grad.data[0] - c2_var.grad.data[0]),
                        1)
Ejemplo n.º 12
0
def test_exact_posterior():
    train_mean = Variable(torch.randn(4))
    train_y = Variable(torch.randn(4))
    test_mean = Variable(torch.randn(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([[6, 0], [1, -1]]), requires_grad=True)
    c3_var = Variable(torch.Tensor([7, 2, 1, 0]), requires_grad=True)
    indices_1 = torch.arange(0, 4).long().view(4, 1)
    values_1 = torch.ones(4).view(4, 1)
    indices_2 = torch.arange(0, 2).expand(4, 2).long().view(2, 4, 1)
    values_2 = torch.ones(8).view(2, 4, 1)
    indices_3 = torch.arange(0, 4).long().view(4, 1)
    values_3 = torch.ones(4).view(4, 1)
    toeplitz_1 = InterpolatedLazyVariable(ToeplitzLazyVariable(c1_var),
                                          Variable(indices_1),
                                          Variable(values_1),
                                          Variable(indices_1),
                                          Variable(values_1))
    kronecker_product = KroneckerProductLazyVariable(c2_var, indices_2,
                                                     values_2, indices_2,
                                                     values_2)
    toeplitz_2 = InterpolatedLazyVariable(ToeplitzLazyVariable(c3_var),
                                          Variable(indices_3),
                                          Variable(values_3),
                                          Variable(indices_3),
                                          Variable(values_3))
    mul_lv = toeplitz_1 * kronecker_product * toeplitz_2

    # Actual case
    actual = mul_lv.evaluate()
    # Test forward
    actual_alpha = gpytorch.posterior_strategy(actual).exact_posterior_alpha(
        train_mean, train_y)
    actual_mean = gpytorch.posterior_strategy(actual).exact_posterior_mean(
        test_mean, actual_alpha)
    mul_lv_alpha = mul_lv.posterior_strategy().exact_posterior_alpha(
        train_mean, train_y)
    mul_lv_mean = mul_lv.posterior_strategy().exact_posterior_mean(
        test_mean, mul_lv_alpha)
    assert (torch.norm(actual_mean.data - mul_lv_mean.data) < 1e-3)
Ejemplo n.º 13
0
def test_evaluate():
    lazy_toeplitz_var = ToeplitzLazyVariable(Variable(toeplitz_column))
    res = lazy_toeplitz_var.evaluate()
    actual = torch.Tensor([
        [2, 0, 4, 1],
        [0, 2, 0, 4],
        [4, 0, 2, 0],
        [1, 4, 0, 2],
    ])
    assert utils.approx_equal(res, actual)

    lazy_toeplitz_var = ToeplitzLazyVariable(Variable(batch_toeplitz_column))
    res = lazy_toeplitz_var.evaluate()
    actual = torch.Tensor([
        [
            [2, 0, 4, 1],
            [0, 2, 0, 4],
            [4, 0, 2, 0],
            [1, 4, 0, 2],
        ],
        [
            [1, 1, -1, 3],
            [1, 1, 1, -1],
            [-1, 1, 1, 1],
            [3, -1, 1, 1],
        ],
    ])
    assert utils.approx_equal(res, actual)
    def test_diag(self):
        c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
        c2_var = Variable(torch.Tensor([12.5, 2.5, 5, 0]), requires_grad=True)
        toeplitz_lazy_var = ToeplitzLazyVariable(c1_var) * 2.5
        actual = ToeplitzLazyVariable(c2_var)

        diff = torch.norm(actual.diag() - toeplitz_lazy_var.diag())
        self.assertLess(diff, 1e-3)
Ejemplo n.º 15
0
def test_trace_log_det_quad_form():
    mu_diffs_var = Variable(torch.arange(1, 5, 1))
    chol_covar_1_var = Variable(torch.eye(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([[6, 0], [1, -1]]), requires_grad=True)
    c3_var = Variable(torch.Tensor([7, 2, 1, 0]), requires_grad=True)
    diag_var = Variable(torch.Tensor([1]), requires_grad=True)
    diag_var_expand = diag_var.expand(4)
    toeplitz_1 = ToeplitzLazyVariable(c1_var).evaluate()
    kronecker_product = KroneckerProductLazyVariable(c2_var).evaluate()
    toeplitz_2 = ToeplitzLazyVariable(c3_var).evaluate()
    actual = toeplitz_1 * kronecker_product * toeplitz_2 + diag_var_expand.diag(
    )

    # Actual case
    mul_lv, diag = make_mul_lazy_var()
    t1, t2, t3 = mul_lv.lazy_vars

    # Test forward
    tldqf_res = mul_lv.trace_log_det_quad_form(mu_diffs_var, chol_covar_1_var)
    tldqf_actual = gpytorch._trace_logdet_quad_form_factory_class()(
        mu_diffs_var, chol_covar_1_var, actual)
    assert (math.fabs(tldqf_res.data.squeeze()[0] -
                      tldqf_actual.data.squeeze()[0]) < 1.5)

    # Test backwards
    tldqf_res.backward()
    tldqf_actual.backward()
    assert ((c1_var.grad.data - t1.column.grad.data).abs().norm() /
            c1_var.grad.data.abs().norm() < 1e-1)
    assert ((c2_var.grad.data - t2.columns.grad.data).abs().norm() /
            c2_var.grad.data.abs().norm() < 1e-1)
    assert ((c3_var.grad.data - t3.column.grad.data).abs().norm() /
            c3_var.grad.data.abs().norm() < 1e-1)
    assert ((diag_var.grad.data - diag.grad.data).abs().norm() /
            diag_var.grad.data.abs().norm() < 1e-1)
Ejemplo n.º 16
0
def test_diag():
    left_interp_indices = Variable(
        torch.LongTensor([[2, 3], [3, 4], [4, 5]]).repeat(5, 1, 1))
    left_interp_values = Variable(
        torch.Tensor([[1, 1], [1, 1], [1, 1]]).repeat(5, 1, 1))
    right_interp_indices = Variable(
        torch.LongTensor([[0, 1], [1, 2], [2, 3]]).repeat(5, 1, 1))
    right_interp_values = Variable(
        torch.Tensor([[1, 1], [1, 1], [1, 1]]).repeat(5, 1, 1))

    # Non-lazy variable
    base_lazy_variable_mat = torch.randn(5, 6, 6)
    base_lazy_variable_mat = base_lazy_variable_mat.transpose(
        1, 2).matmul(base_lazy_variable_mat)

    base_lazy_variable = NonLazyVariable(
        Variable(base_lazy_variable_mat, requires_grad=True))
    interp_lazy_var = SumInterpolatedLazyVariable(base_lazy_variable,
                                                  left_interp_indices,
                                                  left_interp_values,
                                                  right_interp_indices,
                                                  right_interp_values)

    res = interp_lazy_var.diag()
    actual = interp_lazy_var.evaluate().diag()
    assert approx_equal(res, actual)

    # Toeplitz
    base_lazy_variable = ToeplitzLazyVariable(Variable(torch.randn(5, 6)))
    interp_lazy_var = SumInterpolatedLazyVariable(base_lazy_variable,
                                                  left_interp_indices,
                                                  left_interp_values,
                                                  right_interp_indices,
                                                  right_interp_values)

    res = interp_lazy_var.diag()
    actual = interp_lazy_var.evaluate().diag()
    assert approx_equal(res, actual)

    # Constant mul
    base_lazy_variable = base_lazy_variable * Variable(torch.ones(1) * 1.3)
    interp_lazy_var = SumInterpolatedLazyVariable(base_lazy_variable,
                                                  left_interp_indices,
                                                  left_interp_values,
                                                  right_interp_indices,
                                                  right_interp_values)

    res = interp_lazy_var.diag()
    actual = interp_lazy_var.evaluate().diag()
    assert approx_equal(res, actual)
Ejemplo n.º 17
0
def test_exact_gp_mll():
    labels_var = Variable(torch.arange(1, 5, 1))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([[6, 0], [1, -1]]), requires_grad=True)
    c3_var = Variable(torch.Tensor([7, 2, 1, 0]), requires_grad=True)
    diag_var = Variable(torch.Tensor([1]), requires_grad=True)
    diag_var_expand = diag_var.expand(4)
    toeplitz_1 = ToeplitzLazyVariable(c1_var).evaluate()
    kronecker_product = KroneckerProductLazyVariable(c2_var).evaluate()
    toeplitz_2 = ToeplitzLazyVariable(c3_var).evaluate()
    actual = toeplitz_1 * kronecker_product * toeplitz_2 + diag_var_expand.diag(
    )

    # Actual case
    mul_lv, diag = make_mul_lazy_var()
    t1, t2, t3 = mul_lv.lazy_vars

    # Test forward
    mll_res = mul_lv.exact_gp_marginal_log_likelihood(labels_var)
    mll_actual = gpytorch.exact_gp_marginal_log_likelihood(actual, labels_var)
    assert (math.fabs(mll_res.data.squeeze()[0] - mll_actual.data.squeeze()[0])
            < 1)
    # Test backwards
    mll_res.backward()
    mll_actual.backward()

    assert ((c1_var.grad.data - t1.column.grad.data).abs().norm() /
            c1_var.grad.data.abs().norm() < 1e-1)
    assert ((c2_var.grad.data - t2.columns.grad.data).abs().norm() /
            c2_var.grad.data.abs().norm() < 1e-1)
    assert ((c3_var.grad.data - t3.column.grad.data).abs().norm() /
            c3_var.grad.data.abs().norm() < 1e-1)
    assert ((diag_var.grad.data - diag.grad.data).abs().norm() /
            diag_var.grad.data.abs().norm() < 1e-1)
    def forward(self, x1, x2, **kwargs):
        n, d = x1.size()
        m, _ = x2.size()

        if d > 1:
            raise RuntimeError(' '.join([
                'The grid interpolation kernel can only be applied to inputs of a single dimension at this time \
                until Kronecker structure is implemented.'
            ]))

        if self.grid is None:
            raise RuntimeError(' '.join([
                'This GridInterpolationKernel has no grid. Call initialize_interpolation_grid \
                 on a GPModel first.'
            ]))

        both_min = torch.min(x1.min(0)[0].data, x2.min(0)[0].data)[0]
        both_max = torch.max(x1.max(0)[0].data, x2.max(0)[0].data)[0]

        if both_min < self.grid_bounds[0] or both_max > self.grid_bounds[1]:
            # Out of bounds data is still ok if we are specifically computing kernel values for grid entries.
            if torch.abs(both_min - self.grid[0].data)[0] > 1e-7 or torch.abs(
                    both_max - self.grid[-1].data)[0] > 1e-7:
                raise RuntimeError(
                    'Received data that was out of bounds for the specified grid. \
                                    Grid bounds were ({}, {}), but min = {}, max = {}'
                    .format(self.grid_bounds[0], self.grid_bounds[1], both_min,
                            both_max))

        J1, C1 = Interpolation().interpolate(self.grid.data, x1.data.squeeze())
        J2, C2 = Interpolation().interpolate(self.grid.data, x2.data.squeeze())

        k_UU = self.base_kernel_module(self.grid[0], self.grid,
                                       **kwargs).squeeze()

        K_XX = ToeplitzLazyVariable(k_UU, J1, C1, J2, C2)

        return K_XX
Ejemplo n.º 19
0
def test_exact_gp_mll():
    labels_var = Variable(torch.randn(4))

    # Test case
    c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    c2_var = Variable(torch.Tensor([12.5, 2.5, 5, 0]), requires_grad=True)
    toeplitz_lazy_var = ToeplitzLazyVariable(c1_var) * 2.5
    actual = ToeplitzLazyVariable(c2_var)

    # Test forward
    mll_res = toeplitz_lazy_var.exact_gp_marginal_log_likelihood(labels_var)
    mll_actual = actual.exact_gp_marginal_log_likelihood(labels_var)
    assert (math.fabs(mll_res.data.squeeze()[0] - mll_actual.data.squeeze()[0])
            < 5e-1)

    # Test backwards
    mll_res.backward()
    mll_actual.backward()
    assert (math.fabs(c1_var.grad.data[0] - c2_var.grad.data[0]) < 1)
    def test_exact_gp_mll(self):
        labels_var = Variable(torch.randn(4))

        # Test case
        c1_var = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
        c2_var = Variable(torch.Tensor([12.5, 2.5, 5, 0]), requires_grad=True)
        toeplitz_lazy_var = ToeplitzLazyVariable(c1_var) * 2.5
        actual = ToeplitzLazyVariable(c2_var)

        # Test forward
        with gpytorch.settings.num_trace_samples(1000):
            mll_res = toeplitz_lazy_var.exact_gp_marginal_log_likelihood(labels_var)
            mll_actual = actual.exact_gp_marginal_log_likelihood(labels_var)

        # Test backwards
        mll_res.backward()
        mll_actual.backward()

        self.assertLess(
            math.fabs(mll_res.data.squeeze()[0] - mll_actual.data.squeeze()[0]),
            6e-1,
        )
        self.assertLess(math.fabs(c1_var.grad.data[0] - c2_var.grad.data[0]), 1)
Ejemplo n.º 21
0
def make_sum_lazy_var():
    c1 = Variable(torch.Tensor([5, 1, 2, 0]), requires_grad=True)
    t1 = ToeplitzLazyVariable(c1)
    c2 = Variable(torch.Tensor([6, 0, 1, -1]), requires_grad=True)
    t2 = ToeplitzLazyVariable(c2)
    return t1 + t2
Ejemplo n.º 22
0
def test_get_item_scalar_on_batch():
    toeplitz_var = ToeplitzLazyVariable(Variable(torch.Tensor([[1, 2, 3, 4]])))
    evaluated = toeplitz_var.evaluate().data
    assert utils.approx_equal(toeplitz_var[0].evaluate().data, evaluated[0])
def test_get_item_square_on_variable():
    toeplitz_var = ToeplitzLazyVariable(Variable(torch.Tensor([1, 2, 3, 4])),
                                        added_diag=Variable(torch.ones(4) * 3))
    evaluated = toeplitz_var.evaluate().data

    assert utils.approx_equal(toeplitz_var[2:4, 2:4].evaluate().data, evaluated[2:4, 2:4])