Exemplo n.º 1
0
def test_interpolated_toeplitz_gp_marginal_log_likelihood_forward():
    x = Variable(torch.linspace(0, 1, 5))
    y = torch.randn(5)
    noise = torch.Tensor([1e-4])
    rbf_covar = RBFKernel()
    rbf_covar.initialize(log_lengthscale=-4)
    covar_module = GridInterpolationKernel(rbf_covar)
    covar_module.initialize_interpolation_grid(10, grid_bounds=(0, 1))
    covar_x = covar_module.forward(x.unsqueeze(1), x.unsqueeze(1))
    c = covar_x.c.data
    T = utils.toeplitz.sym_toeplitz(c)

    W_left = index_coef_to_sparse(covar_x.J_left, covar_x.C_left, len(c))
    W_right = index_coef_to_sparse(covar_x.J_right, covar_x.C_right, len(c))

    W_left_dense = W_left.to_dense()
    W_right_dense = W_right.to_dense()

    WTW = W_left_dense.matmul(T.matmul(W_right_dense.t())) + torch.eye(len(x)) * 1e-4

    quad_form_actual = y.dot(WTW.inverse().matmul(y))
    chol_T = torch.potrf(WTW)
    log_det_actual = chol_T.diag().log().sum() * 2

    actual = -0.5 * (log_det_actual + quad_form_actual + math.log(2 * math.pi) * len(y))

    res = InterpolatedToeplitzGPMarginalLogLikelihood(W_left, W_right, num_samples=1000)(Variable(c),
                                                                                         Variable(y),
                                                                                         Variable(noise)).data
    assert all(torch.abs((res - actual) / actual) < 0.05)
    def evaluate(self):
        """
        Explicitly evaluate and return the Toeplitz matrix this object wraps as a float Tensor.
        To do this, we explicitly compute W_{left}TW_{right}^{T} and return it.

        Warning: as implicitly stored by this LazyVariable, W is very sparse and T requires O(n)
        storage, where as the full matrix requires O(n^2) storage. Calling evaluate can very easily
        lead to memory issues. As a result, using it should be a last resort.
        """

        if self.J_left is not None:
            n_left = len(self.J_left)
            n_right = len(self.J_right)
            W_left = toeplitz.index_coef_to_sparse(self.J_left, self.C_left,
                                                   len(self.c))
            W_right = toeplitz.index_coef_to_sparse(self.J_right, self.C_right,
                                                    len(self.c))
            if n_left <= n_right:
                W_left_T = self.explicit_interpolate_T(self.J_left,
                                                       self.C_left)
                WTW = gpytorch.dsmm(Variable(W_right), W_left_T.t()).t()
            else:
                W_right_T = self.explicit_interpolate_T(
                    self.J_right, self.C_right)
                WTW = gpytorch.dsmm(Variable(W_left), W_right_T.t())
        else:
            WTW = ToeplitzLazyVariable(self.c).mm(
                Variable(torch.eye(len(self.c))))

        if self.added_diag is not None:
            WTW = WTW + torch.diag(self.added_diag)

        return WTW
Exemplo n.º 3
0
def test_interpolated_toeplitz_gp_marginal_log_likelihood_backward():
    x = Variable(torch.linspace(0, 1, 5))
    y = Variable(torch.randn(5), requires_grad=True)
    noise = Variable(torch.Tensor([1e-4]), requires_grad=True)

    rbf_covar = RBFKernel()
    rbf_covar.initialize(log_lengthscale=-4)
    covar_module = GridInterpolationKernel(rbf_covar)
    covar_module.eval()
    covar_module.initialize_interpolation_grid(10, [(0, 1)])
    covar_x = covar_module.forward(x.unsqueeze(1), x.unsqueeze(1))

    c = Variable(covar_x.c.data, requires_grad=True)

    W_left = index_coef_to_sparse(covar_x.J_left, covar_x.C_left, len(c))
    W_right = index_coef_to_sparse(covar_x.J_right, covar_x.C_right, len(c))

    W_left_dense = Variable(W_left.to_dense())
    W_right_dense = Variable(W_right.to_dense())

    T = Variable(torch.zeros(len(c), len(c)))
    for i in range(len(c)):
        for j in range(len(c)):
            T[i, j] = utils.toeplitz.sym_toeplitz_getitem(c, i, j)

    WTW = W_left_dense.matmul(T.matmul(
        W_right_dense.t())) + Variable(torch.eye(len(x))) * noise

    quad_form_actual = y.dot(WTW.inverse().matmul(y))
    log_det_actual = _det(WTW).log()

    actual_nll = -0.5 * (log_det_actual + quad_form_actual +
                         math.log(2 * math.pi) * len(y))
    actual_nll.backward()

    actual_c_grad = c.grad.data.clone()
    actual_y_grad = y.grad.data.clone()
    actual_noise_grad = noise.grad.data.clone()

    c.grad.data.fill_(0)
    y.grad.data.fill_(0)
    noise.grad.data.fill_(0)

    covar_x = gpytorch.lazy.ToeplitzLazyVariable(c, covar_x.J_left,
                                                 covar_x.C_left,
                                                 covar_x.J_right,
                                                 covar_x.C_right, noise)
    res = covar_x.exact_gp_marginal_log_likelihood(y)
    res.backward()

    res_c_grad = covar_x.c.grad.data
    res_y_grad = y.grad.data
    res_noise_grad = noise.grad.data

    assert (actual_c_grad - res_c_grad).norm() / res_c_grad.norm() < 0.05
    assert (actual_y_grad - res_y_grad).norm() / res_y_grad.norm() < 1e-3
    assert (actual_noise_grad -
            res_noise_grad).norm() / res_noise_grad.norm() < 1e-3
 def exact_gp_marginal_log_likelihood(self, target):
     W_left = Variable(
         toeplitz.index_coef_to_sparse(self.J_left, self.C_left,
                                       len(self.c)))
     W_right = Variable(
         toeplitz.index_coef_to_sparse(self.J_right, self.C_right,
                                       len(self.c)))
     noise_diag = self.added_diag
     return InterpolatedToeplitzGPMarginalLogLikelihood(W_left,
                                                        W_right)(self.c,
                                                                 target,
                                                                 noise_diag)
    def monte_carlo_log_likelihood(self, log_probability_func, train_y,
                                   variational_mean, chol_var_covar,
                                   num_samples):
        epsilon = Variable(torch.randn(len(self.c), num_samples))
        samples = chol_var_covar.mm(epsilon)
        samples = samples + variational_mean.unsqueeze(1).expand_as(samples)
        W_left = Variable(
            toeplitz.index_coef_to_sparse(self.J_left, self.C_left,
                                          len(self.c)))
        samples = gpytorch.dsmm(W_left, samples)
        log_likelihood = log_probability_func(samples, train_y)

        return log_likelihood
Exemplo n.º 6
0
def list_of_indices_and_values_to_sparse(index_matrices, value_matrices, columns):
    index_matrix, value_matrix, m = _merge_index_and_value_matrices(index_matrices, value_matrices, columns)
    return index_coef_to_sparse(index_matrix, value_matrix, m)