Esempio n. 1
0
def test_backward():
    for n_cols in [2, 3, 4]:
        print('ncols', n_cols)
        a = torch.Tensor([
            [5, -3, 0],
            [-3, 5, 0],
            [0, 0, 2],
        ])
        b = torch.ones(3, 3).fill_(2)
        c = torch.randn(3, n_cols)
        actual_a_grad = -torch.mm(
            a.inverse().mul_(0.5).mm(torch.eye(3, n_cols)),
            a.inverse().mul_(0.5).mm(c).t()
        ) * 2 * 2
        actual_c_grad = (a.inverse() / 2).t().mm(torch.eye(3, n_cols)) * 2

        a_var = Variable(a, requires_grad=True)
        c_var = Variable(c, requires_grad=True)
        out_var = a_var.mul(Variable(b))
        out_var = Invmm()(out_var, c_var)
        out_var = out_var.mul(Variable(torch.eye(3, n_cols))).sum() * 2
        out_var.backward()
        a_res = a_var.grad.data
        c_res = c_var.grad.data

        assert(torch.norm(actual_a_grad - a_res) < 1e-4)
        assert(torch.norm(actual_c_grad - c_res) < 1e-4)
Esempio n. 2
0
    def forward(self, *inputs, **params):
        has_posterior = len(self.train_xs[0]) if hasattr(self,
                                                         'train_xs') else 0

        n = len(self.inducing_points[0])

        if has_posterior:
            inducing_point_vars = [
                Variable(train_x) for train_x in self.train_xs
            ]
            full_inputs = [
                torch.cat([inducing_point_var,
                           input]) for inducing_point_var, input in zip(
                               inducing_point_vars, inputs)
            ]
        else:
            full_inputs = inputs

        gaussian_rv_output = self.gp_observation_model.forward(
            *full_inputs, **params)
        full_mean, full_covar = gaussian_rv_output.representation()

        if not has_posterior:
            test_mean = full_mean
            test_covar = full_covar
        else:
            train_train_covar = full_covar[:n, :n]
            test_train_covar = full_covar[n:, :n]
            train_test_covar = full_covar[:n, n:]

            alpha = Invmv()(train_train_covar,
                            self.variational_parameters.variational_mean)
            test_mean = torch.mv(test_train_covar, alpha)

            chol_covar = self.variational_parameters.chol_variational_covar
            variational_covar = chol_covar.t().mm(chol_covar)

            test_covar = variational_covar - train_train_covar

            # test_covar = K_{mn}K_{nn}^{-1}(S - K_{nn})
            test_covar = torch.mm(test_train_covar,
                                  Invmm()(train_train_covar, test_covar))

            # right_factor = K_{nn}^{-1}K_{nm}
            right_factor = Invmm()(train_train_covar, train_test_covar)

            # test_covar = K_{mn}K_{nn}^{-1}(S - K_{nn})K_{nn}^{-1}K_{nm}
            test_covar = full_covar[n:, n:] + test_covar.mm(right_factor)

        return GaussianRandomVariable(test_mean, test_covar)
Esempio n. 3
0
    def forward(self, *inputs, **params):
        n = len(self.train_xs[0]) if hasattr(self, 'train_xs') else 0
        m = len(inputs[0])

        # Compute mean and full data (train/test) covar
        if n:
            train_x_vars = [Variable(train_x) for train_x in self.train_xs]
            full_inputs = [torch.cat([train_x_var, input]) for train_x_var, input in zip(train_x_vars, inputs)]
        else:
            full_inputs = inputs
        gaussian_rv_output, log_noise = self.gp_observation_model.forward(*full_inputs, **params)
        full_mean, full_covar = gaussian_rv_output.representation()

        # Get mean/covar components
        test_mean = full_mean[n:]
        test_test_covar = full_covar[n:, n:]

        # If there's data, use it
        if n:
            train_y_var = Variable(self.train_y)
            train_mean = full_mean[:n]
            train_train_covar = AddDiag()(full_covar[:n, :n], log_noise.exp())
            test_train_covar = full_covar[n:, :n]
            train_test_covar = full_covar[:n, n:]

            # Update test mean
            alpha = Invmv()(train_train_covar, train_y_var - train_mean)
            test_mean = test_mean.add(torch.mv(test_train_covar, alpha))

            # Update test-test covar
            test_test_covar_correction = torch.mm(test_train_covar, Invmm()(train_train_covar, train_test_covar))
            test_test_covar = test_test_covar.sub(test_test_covar_correction)

        return GaussianRandomVariable(test_mean, test_test_covar), log_noise
Esempio n. 4
0
def test_forward():
    for n_cols in [2, 3, 4]:
        print('ncols', n_cols)
        a = torch.Tensor([
            [5, -3, 0],
            [-3, 5, 0],
            [0, 0, 2],
        ])
        b = torch.randn(3, n_cols)
        actual = a.inverse().mm(b)

        a_var = Variable(a)
        b_var = Variable(b)
        out_var = Invmm()(a_var, b_var)
        res = out_var.data

        assert(torch.norm(actual - res) < 1e-4)