def prior_output(self):
        res = super(AbstractVariationalGP, self).__call__(self.inducing_points)
        if not isinstance(res, GaussianRandomVariable):
            raise RuntimeError(
                "%s.forward must return a GaussianRandomVariable" %
                self.__class__.__name__)

        res = GaussianRandomVariable(res.mean(), res.covar().evaluate_kernel())
        return res
    def forward(self, x1, x2, **kwargs):
        covar = self._get_covariance(x1, x2)

        if self.training:
            if not torch.equal(x1, x2):
                raise RuntimeError("x1 should equal x2 in training mode")
            zero_mean = torch.zeros_like(x1.select(-1, 0))
            new_variational_strategy = MVNVariationalStrategy(
                GaussianRandomVariable(zero_mean, self._covar_diag(x1)), GaussianRandomVariable(zero_mean, covar)
            )
            self.update_variational_strategy("inducing_point_strategy", new_variational_strategy)

        return covar
Example #3
0
    def __call__(self, inputs, **kwargs):
        if inputs.ndimension() == 1:
            inputs = inputs.unsqueeze(-1).unsqueeze(-1)
        elif inputs.ndimension() == 2:
            inputs = inputs.unsqueeze(-1)
        elif inputs.ndimension() != 3:
            raise RuntimeError(
                "AdditiveGridInducingVariationalGP expects a 3d tensor.")

        n_data, n_components, n_dimensions = inputs.size()
        if n_dimensions != self.grid.size(0):
            raise RuntimeError(
                "The number of dimensions should match the inducing points "
                " number of dimensions.")
        if n_components != self.n_components:
            raise RuntimeError(
                "The number of components should match the number specified.")
        if n_dimensions != 1:
            raise RuntimeError(
                "At the moment, AdditiveGridInducingVariationalGP only supports "
                "1d (Toeplitz) interpolation.")

        output = super(AdditiveGridInducingVariationalGP,
                       self).__call__(inputs, **kwargs)
        if self.sum_output:
            mean = output.mean().sum(0)
            covar = output.covar().sum_batch()
            return GaussianRandomVariable(mean, covar)
        else:
            return output
Example #4
0
    def forward(self, *inputs, **params):
        n = len(self.train_xs[0]) if hasattr(self, 'train_xs') else 0
        m = len(inputs[0])

        # Compute mean and full data (train/test) covar
        if n:
            train_x_vars = [Variable(train_x) for train_x in self.train_xs]
            full_inputs = [torch.cat([train_x_var, input]) for train_x_var, input in zip(train_x_vars, inputs)]
        else:
            full_inputs = inputs
        gaussian_rv_output, log_noise = self.gp_observation_model.forward(*full_inputs, **params)
        full_mean, full_covar = gaussian_rv_output.representation()

        # Get mean/covar components
        test_mean = full_mean[n:]
        test_test_covar = full_covar[n:, n:]

        # If there's data, use it
        if n:
            train_y_var = Variable(self.train_y)
            train_mean = full_mean[:n]
            train_train_covar = AddDiag()(full_covar[:n, :n], log_noise.exp())
            test_train_covar = full_covar[n:, :n]
            train_test_covar = full_covar[:n, n:]

            # Update test mean
            alpha = Invmv()(train_train_covar, train_y_var - train_mean)
            test_mean = test_mean.add(torch.mv(test_train_covar, alpha))

            # Update test-test covar
            test_test_covar_correction = torch.mm(test_train_covar, Invmm()(train_train_covar, train_test_covar))
            test_test_covar = test_test_covar.sub(test_test_covar_correction)

        return GaussianRandomVariable(test_mean, test_test_covar), log_noise
Example #5
0
    def forward(self, x):
        mean_x = self.mean_module(x, constant=self.params.constant_mean)
        covar_x = self.covar_module(
            x, log_lengthscale=self.params.log_lengthscale)

        latent_pred = GaussianRandomVariable(mean_x, covar_x)
        return latent_pred, self.params.log_noise
Example #6
0
 def forward(self, x):
     projected_x = self.feature_extractor(x)
     projected_x = projected_x - projected_x.min(0)[0]
     projected_x = 2 * (projected_x / projected_x.max(0)[0]) - 1
     mean_x = self.mean_module(projected_x)
     covar_x = self.covar_module(projected_x)
     #covar_x = covar_x.mul(self.log_outputscale.exp())
     return GaussianRandomVariable(mean_x, covar_x)
 def forward(self, x, i):
     # Get predictive mean
     mean_x = self.mean_module(x)
     # Get all covariances, we'll look up the task-speicific ones
     covar_x = self.covar_module(x)
     # # Get the covariance for task i
     covar_i = self.task_covar_module(i)
     covar_xi = covar_x.mul(covar_i)
     return GaussianRandomVariable(mean_x, covar_xi)
 def forward(self,x):
     # Learned mean is near-zero
     mean_x = self.mean_module(x)
     # Get predictive and scale
     covar_x = self.covar_module(x)
     covar_x = covar_x.mul(self.log_outputscale.exp())
     # Store as Gaussian
     latent_pred = GaussianRandomVariable(mean_x, covar_x)
     return latent_pred
Example #9
0
    def forward(self, x):
        mean_x = self.mean_module(x, constant=Variable(torch.Tensor([0])))
        covar_x = self.covar_module(
            x,
            log_mixture_weights=self.params.log_mixture_weights,
            log_mixture_means=self.params.log_mixture_means,
            log_mixture_scales=self.params.log_mixture_scales)

        latent_pred = GaussianRandomVariable(mean_x, covar_x)
        return latent_pred, self.params.log_noise
Example #10
0
    def forward(self, *inputs, **params):
        if not self.training:
            inducing_point_vars = [
                inducing_pt for inducing_pt in self.inducing_points
            ]
            full_inputs = [
                torch.cat([inducing_point_var,
                           input]) for inducing_point_var, input in zip(
                               inducing_point_vars, inputs)
            ]
        else:
            full_inputs = inputs

        gaussian_rv_output = self.prior_model.forward(*full_inputs, **params)
        full_mean, full_covar = gaussian_rv_output.representation()

        if not self.training:
            # Get mean/covar components
            n = self.num_inducing
            test_mean = full_mean[n:]
            induc_induc_covar = full_covar[:n, :n]
            induc_test_covar = full_covar[:n, n:]
            test_induc_covar = full_covar[n:, :n]
            test_test_covar = full_covar[n:, n:]

            # Calculate posterior components
            if not hasattr(self, 'alpha'):
                self.alpha = gpytorch.variational_posterior_alpha(
                    induc_induc_covar, self.variational_mean)
            test_mean = gpytorch.variational_posterior_mean(
                test_induc_covar, self.alpha)
            test_covar = gpytorch.variational_posterior_covar(
                test_induc_covar, induc_test_covar,
                self.chol_variational_covar, test_test_covar,
                induc_induc_covar)
            output = GaussianRandomVariable(test_mean, test_covar)
            return output

        else:
            full_covar = gpytorch.add_jitter(full_covar)
            f_prior = GaussianRandomVariable(full_mean, full_covar)
            return f_prior
Example #11
0
    def forward(self, x, i):
        mean_x = self.mean_module(x, constant=self.model_params.constant_mean)

        covar_x = self.covar_module(x, log_lengthscale=self.model_params.log_lengthscale)
        covar_i = self.task_covar_module(i,
                                         index_covar_factor=self.task_params.task_matrix,
                                         index_log_var=self.task_params.task_log_vars)

        covar_xi = covar_x.mul(covar_i)

        latent_pred = GaussianRandomVariable(mean_x, covar_xi)
        return latent_pred, self.model_params.log_noise
Example #12
0
    def forward(self, *inputs, **params):
        has_posterior = len(self.train_xs[0]) if hasattr(self,
                                                         'train_xs') else 0

        n = len(self.inducing_points[0])

        if has_posterior:
            inducing_point_vars = [
                Variable(train_x) for train_x in self.train_xs
            ]
            full_inputs = [
                torch.cat([inducing_point_var,
                           input]) for inducing_point_var, input in zip(
                               inducing_point_vars, inputs)
            ]
        else:
            full_inputs = inputs

        gaussian_rv_output = self.gp_observation_model.forward(
            *full_inputs, **params)
        full_mean, full_covar = gaussian_rv_output.representation()

        if not has_posterior:
            test_mean = full_mean
            test_covar = full_covar
        else:
            train_train_covar = full_covar[:n, :n]
            test_train_covar = full_covar[n:, :n]
            train_test_covar = full_covar[:n, n:]

            alpha = Invmv()(train_train_covar,
                            self.variational_parameters.variational_mean)
            test_mean = torch.mv(test_train_covar, alpha)

            chol_covar = self.variational_parameters.chol_variational_covar
            variational_covar = chol_covar.t().mm(chol_covar)

            test_covar = variational_covar - train_train_covar

            # test_covar = K_{mn}K_{nn}^{-1}(S - K_{nn})
            test_covar = torch.mm(test_train_covar,
                                  Invmm()(train_train_covar, test_covar))

            # right_factor = K_{nn}^{-1}K_{nm}
            right_factor = Invmm()(train_train_covar, train_test_covar)

            # test_covar = K_{mn}K_{nn}^{-1}(S - K_{nn})K_{nn}^{-1}K_{nm}
            test_covar = full_covar[n:, n:] + test_covar.mm(right_factor)

        return GaussianRandomVariable(test_mean, test_covar)
    def variational_output(self):
        chol_variational_covar = self.chol_variational_covar

        # Negate each row with a negative diagonal (the Cholesky decomposition
        # of a matrix requires that the diagonal elements be positive).
        if chol_variational_covar.ndimension() == 2:
            chol_variational_covar = chol_variational_covar.triu()
            inside = chol_variational_covar.diag().sign().unsqueeze(
                1).expand_as(chol_variational_covar).triu()
        elif chol_variational_covar.ndimension() == 3:
            batch_size, diag_size, _ = chol_variational_covar.size()

            # Batch mode
            chol_variational_covar_size = list(
                chol_variational_covar.size())[-2:]
            mask = torch.ones(*chol_variational_covar_size,
                              dtype=chol_variational_covar.dtype,
                              device=chol_variational_covar.device).triu_()
            mask = mask.unsqueeze(0).expand(
                *([chol_variational_covar.size(0)] +
                  chol_variational_covar_size))

            batch_index = torch.arange(0,
                                       batch_size,
                                       dtype=torch.long,
                                       device=mask.device)
            batch_index = batch_index.unsqueeze(1).repeat(1,
                                                          diag_size).view(-1)
            diag_index = torch.arange(0,
                                      diag_size,
                                      dtype=torch.long,
                                      device=mask.device)
            diag_index = diag_index.unsqueeze(1).repeat(batch_size, 1).view(-1)
            diag = chol_variational_covar[batch_index, diag_index,
                                          diag_index].view(
                                              batch_size, diag_size)

            chol_variational_covar = chol_variational_covar.mul(mask)
            inside = diag.sign().unsqueeze(-1).expand_as(
                chol_variational_covar).mul(mask)
        else:
            raise RuntimeError(
                "Invalid number of variational covar dimensions")

        chol_variational_covar = inside.mul(chol_variational_covar)
        variational_covar = CholLazyTensor(
            chol_variational_covar.transpose(-1, -2))
        return GaussianRandomVariable(self.variational_mean, variational_covar)
    def forward(self, x, i):
        n = len(x)
        mean_x = self.mean_module(x, constant=self.model_params.constant_mean)
        covar_x = self.covar_module(x, log_lengthscale=self.model_params.log_lengthscale)
        covar_i = Variable(torch.zeros(n, n))

        for j in range(self.num_task_samples):
            task_assignments = self.latent_tasks.task_assignments.sample()
            task_assignments = task_assignments.index_select(0, i)
            covar_ji = self.task_covar_module(task_assignments,
                                              index_covar_factor=self.task_params.task_matrix,
                                              index_log_var=self.task_params.task_log_vars)
            covar_i += covar_ji.mul_(1. / self.num_task_samples)

        covar_xi = covar_x.mul(covar_i)
        latent_pred = GaussianRandomVariable(mean_x, covar_xi)
        return latent_pred, self.model_params.log_noise
Example #15
0
 def forward(self, x):
     mean_x = self.mean_module(x)
     covar_x = self.covar_module(x)
     # Return moddl output as GaussianRandomVariable
     return GaussianRandomVariable(mean_x, covar_x)
Example #16
0
 def forward(self, x):
     mean_x = self.mean_module(x)
     covar_x = self.covar_module(x)
     covar_x = covar_x.mul(self.log_outputscale.exp().expand_as(covar_x))
     latent_pred = GaussianRandomVariable(mean_x, covar_x)
     return latent_pred
Example #17
0
 def forward(self, x, i):
     mean_x = self.mean_module(x)
     covar_x = self.covar_module(x)
     covar_i = self.task_covar_module(i)
     covar_xi = covar_x.mul(covar_i)
     return GaussianRandomVariable(mean_x, covar_xi)
 def forward(self, x):
     mean_x = self.mean_module(x)
     covar_x = self.covar_module(x).add_diag(Variable(torch.Tensor([1e-2])))
     return GaussianRandomVariable(mean_x, covar_x)
Example #19
0
 def forward(self, input):
     assert (isinstance(input, GaussianRandomVariable))
     mean, covar = input.representation()
     noise = gpytorch.add_diag(covar, self.log_noise.exp())
     return GaussianRandomVariable(mean, noise)
 def forward(self, x):
     mean_x = self.mean_module(x)
     covar_x = ConstantMulLazyVariable(self.covar_module(x),
                                       self.log_outputscale.exp())
     latent_pred = GaussianRandomVariable(mean_x, covar_x)
     return latent_pred
Example #21
0
 def forward(self, input, log_noise):
     assert (isinstance(input, GaussianRandomVariable))
     mean, covar = input.representation()
     noise = AddDiag()(covar, log_noise.exp())
     return GaussianRandomVariable(mean, noise)
 def forward(self,x):
     mean_x = self.mean_module(x)
     covar_x = self.covar_module(x)
     covar_x = covar_x.mul(self.log_outputscale.exp())
     return GaussianRandomVariable(mean_x, covar_x)
Example #23
0
 def prior_output(self):
     out = super(AdditiveGridInducingVariationalGP, self).prior_output()
     mean = out.mean()
     covar = out.covar().repeat(self.n_components, 1, 1)
     return GaussianRandomVariable(mean, covar)
Example #24
0
 def forward(self, x):
     mean_x = self.mean_module(x)
     covar_x = self.covar_module(x)
     return GaussianRandomVariable(mean_x, covar_x)
Example #25
0
 def forward(self, x):
     features = self.feature_extractor(x)
     features = gpytorch.utils.scale_to_bounds(features, 0, 1)
     mean_x = self.mean_module(features)
     covar_x = self.covar_module(features)
     return GaussianRandomVariable(mean_x, covar_x)