예제 #1
0
 def __init__(
     self,
     num_dimensions,
     variance_prior=None,
     offset_prior=None,
     eps=1e-5,
     active_dims=None,
     variance_bounds=None,
     offset_bounds=None,
 ):
     super(LinearKernel, self).__init__(active_dims=active_dims)
     self.eps = eps
     variance_prior = _bounds_to_prior(prior=variance_prior,
                                       bounds=variance_bounds,
                                       log_transform=False)
     self.register_parameter(name="variance",
                             parameter=torch.nn.Parameter(torch.zeros(1)),
                             prior=variance_prior)
     offset_prior = _bounds_to_prior(prior=offset_prior,
                                     bounds=offset_bounds,
                                     log_transform=False)
     self.register_parameter(name="offset",
                             parameter=torch.nn.Parameter(
                                 torch.zeros(1, 1, num_dimensions)),
                             prior=offset_prior)
예제 #2
0
 def __init__(
     self,
     num_dimensions,
     variance_prior=None,
     offset_prior=None,
     active_dims=None,
     variance_bounds=None,
     offset_bounds=None,
 ):
     """
     Args:
         num_dimensions (int): Number of data dimensions to expect. This is necessary to create the offset parameter.
         variance_prior (:obj:`gpytorch.priors.Prior`): Prior over the variance parameter (default `None`).
         offset_prior (:obj:`gpytorch.priors.Prior`): Prior over the offset parameter (default `None`).
         active_dims (list): List of data dimensions to operate on. `len(active_dims)` should equal `num_dimensions`.
         variance_bounds (tuple, deprecated): Min and max value for the variance parameter. Deprecated, and now
                                              creates a :obj:`gpytorch.priors.SmoothedBoxPrior`.
         offset_bounds (tuple, deprecated): Min and max value for the offset parameter. Deprecated, and now creates a
                                             :obj:'gpytorch.priors.SmoothedBoxPrior'.
     """
     super(LinearKernel, self).__init__(active_dims=active_dims)
     variance_prior = _bounds_to_prior(prior=variance_prior, bounds=variance_bounds, log_transform=False)
     self.register_parameter(name="variance", parameter=torch.nn.Parameter(torch.zeros(1)), prior=variance_prior)
     offset_prior = _bounds_to_prior(prior=offset_prior, bounds=offset_bounds, log_transform=False)
     self.register_parameter(
         name="offset", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)), prior=offset_prior
     )
예제 #3
0
 def __init__(
     self,
     has_lengthscale=False,
     ard_num_dims=None,
     batch_size=1,
     active_dims=None,
     log_lengthscale_bounds=None,
     log_lengthscale_prior=None,
     eps=1e-6,
 ):
     super(Kernel, self).__init__()
     if active_dims is not None and not torch.is_tensor(active_dims):
         active_dims = torch.tensor(active_dims, dtype=torch.long)
     self.active_dims = active_dims
     self.ard_num_dims = ard_num_dims
     self.batch_size = batch_size
     self.__has_lengthscale = has_lengthscale
     if has_lengthscale:
         lengthscale_num_dims = 1 if ard_num_dims is None else ard_num_dims
         log_lengthscale_prior = _bounds_to_prior(
             prior=log_lengthscale_prior, bounds=log_lengthscale_bounds)
         self.register_parameter(
             name="log_lengthscale",
             parameter=torch.nn.Parameter(
                 torch.zeros(batch_size, 1, lengthscale_num_dims)),
             prior=log_lengthscale_prior,
         )
예제 #4
0
 def __init__(self, log_noise_prior=None, log_noise_bounds=None):
     # TODO: Remove deprecated log_noise_bounds kwarg
     log_noise_prior = _bounds_to_prior(prior=log_noise_prior,
                                        bounds=log_noise_bounds)
     super(GaussianLikelihood, self).__init__()
     self.register_parameter(name="log_noise",
                             parameter=torch.nn.Parameter(torch.zeros(1)),
                             prior=log_noise_prior)
예제 #5
0
 def __init__(self, prior=None, batch_size=None, constant_bounds=None):
     # TODO: Remove deprecated bounds kwarg
     prior = _bounds_to_prior(prior=prior, bounds=constant_bounds, batch_size=batch_size, log_transform=False)
     super(ConstantMean, self).__init__()
     self.batch_size = batch_size
     self.register_parameter(
         name="constant", parameter=torch.nn.Parameter(torch.zeros(batch_size or 1, 1)), prior=prior
     )
예제 #6
0
 def __init__(
     self,
     log_lengthscale_prior=None,
     log_period_length_prior=None,
     eps=1e-5,
     active_dims=None,
     log_lengthscale_bounds=None,
     log_period_length_bounds=None,
 ):
     log_period_length_prior = _bounds_to_prior(
         prior=log_period_length_prior, bounds=log_period_length_bounds)
     super(PeriodicKernel, self).__init__(
         has_lengthscale=True,
         active_dims=active_dims,
         log_lengthscale_prior=log_lengthscale_prior,
         log_lengthscale_bounds=log_lengthscale_bounds,
     )
     self.eps = eps
     self.register_parameter(name="log_period_length",
                             parameter=torch.nn.Parameter(
                                 torch.zeros(1, 1, 1)),
                             prior=log_period_length_prior)
예제 #7
0
    def __init__(
        self,
        has_lengthscale=False,
        ard_num_dims=None,
        log_lengthscale_prior=None,
        active_dims=None,
        batch_size=1,
        log_lengthscale_bounds=None,
    ):
        """
        The base Kernel class handles both lengthscales and ARD.

        Args:
            has_lengthscale (bool): If True, we will register a :obj:`torch.nn.Parameter` named `log_lengthscale`
            ard_num_dims (int): If not None, the `log_lengthscale` parameter will have this many entries.
            log_lengthscale_prior (:obj:`gpytorch.priors.Prior`): Prior over the log lengthscale
            active_dims (list): List of data dimensions to evaluate this Kernel on.
            batch_size (int): If training or testing multiple GPs simultaneously, this is how many lengthscales to
                              register.
            log_lengthscale_bounds (tuple): Deprecated min and max values for the lengthscales. If supplied, this
                                            now registers a :obj:`gpytorch.priors.SmoothedBoxPrior`
        """
        super(Kernel, self).__init__()
        if active_dims is not None and not torch.is_tensor(active_dims):
            active_dims = torch.tensor(active_dims, dtype=torch.long)
        self.active_dims = active_dims
        self.ard_num_dims = ard_num_dims
        if has_lengthscale:
            lengthscale_num_dims = 1 if ard_num_dims is None else ard_num_dims
            log_lengthscale_prior = _bounds_to_prior(
                prior=log_lengthscale_prior, bounds=log_lengthscale_bounds)
            self.register_parameter(
                name="log_lengthscale",
                parameter=torch.nn.Parameter(
                    torch.zeros(batch_size, 1, lengthscale_num_dims)),
                prior=log_lengthscale_prior,
            )
예제 #8
0
 def test_bounds_to_prior(self):
     prior = GammaPrior(1, 1)
     self.assertEqual(prior, _bounds_to_prior(prior=prior, bounds=None))
     self.assertIsInstance(_bounds_to_prior(prior=None, bounds=(-10, 10)),
                           SmoothedBoxPrior)