Exemple #1
0
 def __init__(self):
     super(GPClassificationModel, self).__init__(grid_size=32, grid_bounds=[(0, 1)])
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-5, 5))
     self.covar_module = ScaleKernel(
         RBFKernel(log_lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1, log_transform=True)),
         log_outputscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1, log_transform=True),
     )
 def __init__(self, train_x, train_y, likelihood, num_classes):
     super(DirichletGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(
         batch_shape=torch.Size((num_classes, )))
     self.covar_module = ScaleKernel(
         RBFKernel(batch_shape=torch.Size((num_classes, ))),
         batch_shape=torch.Size((num_classes, )),
     )
Exemple #3
0
 def __init__(self, train_X, train_Y):
     self._validate_tensor_args(train_X, train_Y)
     train_Y = train_Y.squeeze(-1)
     likelihood = GaussianLikelihood()
     super().__init__(train_X, train_Y, likelihood)
     self.mean_module = ConstantMean()
     self.covar_module = ScaleKernel(RBFKernel())
     self.to(train_X)
 def __init__(self, train_inputs, train_targets, likelihood):
     super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))
     self.rbf_covar_module = RBFKernel(
         log_lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1, log_transform=True)
     )
     self.noise_covar_module = WhiteNoiseKernel(variances=torch.ones(11) * 0.001)
     self.covar_module = ScaleKernel(self.rbf_covar_module + self.noise_covar_module)
Exemple #5
0
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean()
     self.base_covar_module = ScaleKernel(RBFKernel())
     self.covar_module = InducingPointKernel(
         self.base_covar_module,
         inducing_points=train_x[:500, :],
         likelihood=likelihood)
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
     self.base_covar_module = ScaleKernel(
         RBFKernel(log_lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1, log_transform=True))
     )
     self.covar_module = GridInterpolationKernel(self.base_covar_module, grid_size=50, grid_bounds=[(0, 1)])
     self.feature_extractor = feature_extractor
 def __init__(self, train_x, train_y, likelihood, amountinducing):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean()
     self.base_covar_module = ScaleKernel(RBFKernel())
     dimension = train_x.size(-1)
     self.covar_module = ProductStructureKernel(GridInterpolationKernel(
         self.base_covar_module, grid_size=amountinducing, num_dims=1),
                                                num_dims=dimension)
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))
     self.base_covar_module = ScaleKernel(
         RBFKernel(log_lengthscale_prior=SmoothedBoxPrior(
             exp(-3), exp(3), sigma=0.1, log_transform=True)))
     self.covar_module = ProductStructureKernel(GridInterpolationKernel(
         self.base_covar_module, grid_size=100, num_dims=2),
                                                num_dims=2)
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
     self.base_covar_module = ScaleKernel(
         RBFKernel(log_lengthscale_prior=SmoothedBoxPrior(
             exp(-5), exp(6), sigma=0.1, log_transform=True)))
     self.covar_module = InducingPointKernel(self.base_covar_module,
                                             inducing_points=torch.linspace(
                                                 0, 1, 32))
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ZeroMean()
     self.base_covar_module = ScaleKernel(
         RBFKernel(log_lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1, log_transform=True))
     )
     self.covar_module = AdditiveGridInterpolationKernel(
         self.base_covar_module, grid_size=100, grid_bounds=[(-0.5, 1.5)], n_components=2
     )
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
     self.base_covar_module = ScaleKernel(
         RBFKernel(lengthscale_prior=SmoothedBoxPrior(
             exp(-5), exp(6), sigma=0.1)))
     self.covar_module = GridInterpolationKernel(self.base_covar_module,
                                                 grid_size=50,
                                                 num_dims=1)
Exemple #12
0
    def __init__(self, train_x, train_y, likelihood):
        super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)

        self.mean_module = ConstantMean()
        self.covar_module = ScaleKernel(
            MaternKernel(
                nu=5 / 2,
                lengthscale=1.,
            ))
 def __init__(self):
     super(GPClassificationModel, self).__init__(grid_size=8,
                                                 grid_bounds=[(0, 3),
                                                              (0, 3)])
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
     self.covar_module = ScaleKernel(
         RBFKernel(ard_num_dims=2,
                   log_lengthscale_prior=SmoothedBoxPrior(
                       exp(-2.5), exp(3), sigma=0.1, log_transform=True)))
Exemple #14
0
 def __init__(self, train_x, train_y):
     likelihood = gpytorch.likelihoods.GaussianLikelihood()
     super().__init__(train_x, train_y, likelihood)
     self.mean_module = gpytorch.means.ZeroMean()
     self.covar_module = InducingPointKernel(
         ScaleKernel(RBFKernel(ard_num_dims=3)),
         inducing_points=torch.randn(512, 3),
         likelihood=likelihood,
     )
    def test_ard(self):
        base_k = RBFKernel(ard_num_dims=3)
        base_k.initialize(lengthscale=[1., 2., 3.])
        AddK = NewtonGirardAdditiveKernel(base_k, 3, max_degree=1)

        testvals = torch.tensor([[1, 2, 3], [7, 5, 2]], dtype=torch.float)
        add_k_val = AddK(testvals, testvals).evaluate()

        ks = []
        for i in range(3):
            k = RBFKernel(active_dims=i)
            k.initialize(lengthscale=i + 1)
            ks.append(k)
        manual_k = ScaleKernel(AdditiveKernel(*ks))
        manual_k.initialize(outputscale=1.)
        manual_add_k_val = manual_k(testvals, testvals).evaluate()

        # np.testing.assert_allclose(add_k_val.detach().numpy(), manual_add_k_val.detach().numpy(), atol=1e-5)
        self.assertTrue(torch.allclose(add_k_val, manual_add_k_val, atol=1e-5))
Exemple #16
0
 def __init__(self, train_inputs, train_targets, likelihood):
     super().__init__(train_inputs, train_targets, likelihood)
     if train_inputs.ndim == 2:
         dims = train_inputs.shape[1]
     else:
         dims = 1
     self.mean_module = gpytorch.means.ZeroMean()
     # self.covar_module = ScaleKernel(RBFKernel(ard_num_dims=dims))
     self.covar_module = ScaleKernel(MaternKernel(ard_num_dims=dims,
                                                  nu=1.5))
Exemple #17
0
 def __init__(self,
              train_inputs,
              train_targets,
              likelihood,
              batch_shape=torch.Size([2])):
     super(ExactGPModel, self).__init__(train_inputs, train_targets,
                                        likelihood)
     self.mean_module = ConstantMean(batch_shape=batch_shape)
     self.covar_module = ScaleKernel(RBFKernel(batch_shape=batch_shape),
                                     batch_shape=batch_shape)
Exemple #18
0
def create_bayesian_quadrature_iso_gauss():

    x1 = torch.from_numpy(np.array([[-1, 1], [0, 0], [-2, 0.1]]))
    x2 = torch.from_numpy(np.array([[-1, 1], [0, 0], [-2, 0.1], [-3, 3]]))
    M1 = x1.size()[0]
    M2 = x2.size()[0]
    D = x1.size()[1]

    prior_mean = torch.from_numpy(np.arange(D))[None, :]
    prior_variance = 2.

    rbf = RBFKernel()
    rbf.lengthscale = 1.
    kernel = ScaleKernel(rbf)
    kernel.outputscale = 1.

    bqkernel = QuadratureRBFGaussPrior(kernel, prior_mean, prior_variance)

    return bqkernel, x1, x2, M1, M2, D
 def __init__(self, train_x, train_y, likelihood, lengthscale_constraint,
              outputscale_constraint, ard_dims):
     super(GP, self).__init__(train_x, train_y, likelihood)
     self.ard_dims = ard_dims
     self.mean_module = ConstantMeanGrad()
     base_kernel = RBFKernelGrad(
         lengthscale_constraint=lengthscale_constraint,
         ard_num_dims=ard_dims)
     self.covar_module = ScaleKernel(
         base_kernel, outputscale_constraint=outputscale_constraint)
 def __init__(self, dim):
     # squeeze output dim before passing train_Y to ExactGP
     # super().__init__(train_X, train_Y.squeeze(-1), GaussianLikelihood())
     # super().__init__(train_X, train_Y, MultitaskGaussianLikelihood(num_tasks=1+train_X.shape[-1]))
     self.likelihood = MultitaskGaussianLikelihood(num_tasks=1 + dim)
     self.mean_module = ConstantMeanGrad()
     base_kernel = RBFKernelGrad(ard_num_dims=dim)
     self.covar_module = ScaleKernel(base_kernel=base_kernel)
     # self.to(train_X)  # make sure we're on the right device/dtype
     self.dim = dim
Exemple #21
0
 def __init__(self, n_features, grid_size=100, grid_bounds=(-10.1, 10.1)):
     #super(GPLayer, self).__init__(grid_size=grid_size, grid_bounds=n_features*[grid_bounds])
     super(GPLayer, self).__init__(grid_size=grid_size,
                                   grid_bounds=[grid_bounds],
                                   n_components=n_features,
                                   mixing_params=False,
                                   sum_output=True)
     self.grid_bounds = grid_bounds
     self.mean_module = ConstantMean()
     self.covar_module = ScaleKernel(RBFKernel())
Exemple #22
0
    def __init__(self, train_x, train_y, likelihood, outputscale=1.0):
        super().__init__(train_x, train_y, likelihood)

        self.mean_module = ZeroMean()
        self.kernel = ScaleKernel(
            MaternKernel(nu=2.5,
                         # ard_num_dims=train_x.shape[-1]
                         ))

        self.kernel.outputscale = outputscale
Exemple #23
0
 def __init__(self, x_train, y_train, likelihood):
     super().__init__(x_train, y_train, likelihood)
     self.mean = ConstantMean()
     base_kernel = ScaleKernel(RBFKernel())
     # self.covariance = base_kernel
     # here we chose inducing points very randomly just based on the first five
     # samples of training data but it can be much better or smarter
     self.covariance = InducingPointKernel(base_kernel,
                                           inducing_points=x_train[:5],
                                           likelihood=likelihood)
Exemple #24
0
    def __init__(self, input_dim, feature_dim, label_dim, hidden_width,
                 hidden_depth, n_inducing, batch_size, max_epochs_since_update,
                 **kwargs):
        """
        Args:
            input_dim (int)
            feature_dim (int): dimension of deep kernel features
            label_dim (int)
            hidden_depth (int)
            hidden_width (int or list)
            n_inducing (int): number of inducing points for variational approximation
            batch_size (int)
            max_epochs_since_update (int)
        """
        params = locals()
        del params['self']
        self.__dict__ = params
        super().__init__()

        noise_constraint = GreaterThan(1e-4)
        self.likelihood = GaussianLikelihood(batch_shape=torch.Size(
            [label_dim]),
                                             noise_constraint=noise_constraint)

        self.nn = FCNet(input_dim,
                        output_dim=label_dim,
                        hidden_width=hidden_width,
                        hidden_depth=hidden_depth,
                        batch_norm=True)
        self.batch_norm = torch.nn.BatchNorm1d(feature_dim)

        self.mean_module = ConstantMean(batch_shape=torch.Size([label_dim]))
        base_kernel = RBFKernel(batch_shape=torch.Size([label_dim]),
                                ard_num_dims=feature_dim)
        self.covar_module = ScaleKernel(base_kernel,
                                        batch_shape=torch.Size([label_dim]))

        variational_dist = MeanFieldVariationalDistribution(
            num_inducing_points=n_inducing,
            batch_shape=torch.Size([label_dim]))
        inducing_points = torch.randn(n_inducing, feature_dim)
        self.variational_strategy = VariationalStrategy(
            self,
            inducing_points,
            variational_dist,
            learn_inducing_locations=True)

        # initialize preprocessers
        self.register_buffer("input_mean", torch.zeros(input_dim))
        self.register_buffer("input_std", torch.ones(input_dim))
        self.register_buffer("label_mean", torch.zeros(label_dim))
        self.register_buffer("label_std", torch.ones(label_dim))

        self._train_ckpt = deepcopy(self.state_dict())
        self._eval_ckpt = deepcopy(self.state_dict())
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ZeroMean()
     self.base_covar_module = ScaleKernel(
         RBFKernel(ard_num_dims=2,
                   lengthscale_prior=SmoothedBoxPrior(exp(-3),
                                                      exp(3),
                                                      sigma=0.1)))
     self.covar_module = AdditiveStructureKernel(GridInterpolationKernel(
         self.base_covar_module, grid_size=100, num_dims=2),
                                                 num_dims=2)
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
     self.base_covar_module = ScaleKernel(
         RBFKernel(log_lengthscale_prior=SmoothedBoxPrior(
             exp(-5), exp(6), sigma=0.1, log_transform=True)))
     self.grid_covar_module = GridInterpolationKernel(
         self.base_covar_module, grid_size=50, num_dims=1)
     self.noise_covar_module = WhiteNoiseKernel(variances=torch.ones(100) *
                                                0.001)
     self.covar_module = self.grid_covar_module + self.noise_covar_module
    def __init__(self,
                 train_x,
                 train_y,
                 likelihood,
                 shape,
                 output_device,
                 use_ard=False,
                 use_priors=False,
                 kernel='rq',
                 mean_type='zero',
                 matern_nu=2.5,
                 heuristic_lengthscales=None,
                 lengthscale_prior_std=0.1):

        # Run constructor of superclass
        super(BatchedGP, self).__init__(train_x, train_y, likelihood)

        # Determine if using ARD
        ard_num_dims = None
        if use_ard:
            ard_num_dims = train_x.shape[-1]

        # Get input size
        input_size = train_x.shape[-1]
        self.shape = torch.Size([shape])

        # Get mean function and kernel
        M, K, kwargs = get_mean_and_kernel(kernel,
                                           mean_type,
                                           shape,
                                           input_size,
                                           is_composite=False,
                                           matern_nu=matern_nu)
        # Default priors work if targets standardized and features min-max normalized
        lengthscale_prior = None
        outputscale_prior = None

        # Now construct mean function and kernel
        self.mean_module = M
        self.base_kernel = K(batch_shape=self.shape,
                             ard_num_dims=ard_num_dims,
                             lengthscale_prior=lengthscale_prior,
                             **kwargs)
        self.covar_module = ScaleKernel(self.base_kernel,
                                        batch_shape=self.shape,
                                        output_device=output_device,
                                        outputscale_prior=outputscale_prior)

        # Set priors, if applicable
        if lengthscale_prior is not None:
            self.covar_module.base_kernel.lengthscale = lengthscale_prior.mean

        elif use_priors:
            self.covar_module.base_kernel.lengthscale = lengthscale_prior.mean
Exemple #28
0
 def __init__(self, train_x):
     super(GPClassificationModel, self).__init__(train_x)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
     self.covar_module = ScaleKernel(
         RBFKernel(log_lengthscale_prior=SmoothedBoxPrior(
             exp(-5), exp(6), sigma=0.1, log_transform=True)),
         log_outputscale_prior=SmoothedBoxPrior(exp(-5),
                                                exp(6),
                                                sigma=0.1,
                                                log_transform=True),
     )
Exemple #29
0
 def __init__(self, train_inputs, train_targets, inducing_points,
              likelihood):
     super().__init__(train_inputs, train_targets, likelihood)
     if train_inputs.ndim == 2:
         dims = train_inputs.shape[1]
     else:
         dims = 1
     self.mean_module = gpytorch.means.ZeroMean()
     self.base_cov_module = ScaleKernel(RBFKernel(arg_num_dims=dims))
     self.covar_module = NystromKernel(self.base_cov_module,
                                       inducing_points, likelihood)
Exemple #30
0
 def __init__(self, train_X, train_Y):
     self._validate_tensor_args(train_X, train_Y)
     self._set_dimensions(train_X=train_X, train_Y=train_Y)
     train_X, train_Y, _ = self._transform_tensor_args(X=train_X, Y=train_Y)
     likelihood = GaussianLikelihood(batch_shape=self._aug_batch_shape)
     super().__init__(train_X, train_Y, likelihood)
     self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape)
     self.covar_module = ScaleKernel(
         RBFKernel(batch_shape=self._aug_batch_shape),
         batch_shape=self._aug_batch_shape,
     )
     self.to(train_X)