Ejemplo n.º 1
0
 def __init__(self, train_x, train_y, likelihood):
     super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), num_tasks=2)
     self.base_kernel_list = [RBFKernel()]
     self.covar_module = LCMKernel(self.base_kernel_list,
                                   num_tasks=2,
                                   rank=1)
Ejemplo n.º 2
0
 def create_mean(self):
     return MultitaskMean([
         ConstantMean(batch_shape=torch.Size([2, 3])),
         ZeroMean(),
         ZeroMean()
     ],
                          num_tasks=3)
 def __init__(self, train_x, train_y, likelihood):
     super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), n_tasks=2)
     self.data_covar_module = RBFKernel()
     self.covar_module = MultitaskKernel(self.data_covar_module,
                                         n_tasks=2,
                                         rank=1)
 def __init__(self, train_inputs, train_targets, likelihood, batch_size=1):
     super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
     self.mean_module = MultitaskMean(
         ConstantMean(batch_size=batch_size, prior=gpytorch.priors.SmoothedBoxPrior(-1, 1)), num_tasks=2
     )
     if batch_size > 1:
         self.covar_module = MultitaskKernel(
             RBFKernel(
                 batch_size=batch_size,
                 lengthscale_prior=gpytorch.priors.NormalPrior(
                     loc=torch.zeros(batch_size, 1, 1), scale=torch.ones(batch_size, 1, 1)
                 ),
             ),
             num_tasks=2,
             rank=1,
         )
     else:
         self.covar_module = MultitaskKernel(
             RBFKernel(
                 lengthscale_prior=gpytorch.priors.NormalPrior(
                     loc=torch.zeros(batch_size, 1, 1), scale=torch.ones(batch_size, 1, 1)
                 ),
             ),
             num_tasks=2,
             rank=1,
         )
 def __init__(self, train_x, train_y, likelihood):
     super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), num_tasks=2)
     self.data_covar_module = GridInterpolationKernel(RBFKernel(),
                                                      grid_size=100,
                                                      num_dims=1)
     self.covar_module = MultitaskKernel(self.data_covar_module,
                                         num_tasks=2,
                                         rank=1)
Ejemplo n.º 6
0
 def setUp(self):
     self.mean = MultitaskMean(
         [ConstantMean(),
          ZeroMean(),
          ZeroMean(),
          ConstantMean()],
         n_tasks=4)
     self.mean.base_means[0].constant.data.fill_(5)
     self.mean.base_means[3].constant.data.fill_(7)
Ejemplo n.º 7
0
 def __init__(self, train_inputs, train_targets, likelihood, batch_shape=torch.Size()):
     super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
     self.mean_module = MultitaskMean(
         ConstantMean(batch_shape=batch_shape, prior=gpytorch.priors.SmoothedBoxPrior(-1, 1)), num_tasks=2
     )
     self.covar_module = MultitaskKernel(
         RBFKernel(
             batch_shape=batch_shape,
             lengthscale_prior=gpytorch.priors.NormalPrior(loc=torch.tensor(0.0), scale=torch.tensor(1.0)),
         ),
         num_tasks=2,
         rank=1,
     )
Ejemplo n.º 8
0
    def __init__(self, input_size, target_size, device='cpu'):
        if device == 'gpu' and torch.cuda.is_available():
            self.device = torch.device('cuda:0')
        else:
            self.device = torch.device('cpu')

        self.input_size = input_size
        self.target_size = target_size

        _likelihood = MultitaskGaussianLikelihood(num_tasks=self.target_size)
        super(MultiTaskGPRegressor, self).__init__(train_inputs=None,
                                                   train_targets=None,
                                                   likelihood=_likelihood)

        self.mean_module = MultitaskMean(ZeroMean(), num_tasks=self.target_size)
        self.covar_module = MultitaskKernel(RBFKernel(), num_tasks=self.target_size, rank=1)

        self.input_trans = None
        self.target_trans = None
Ejemplo n.º 9
0
    def __init__(self, x: torch.Tensor, xe: torch.Tensor, y: torch.Tensor,
                 lik: GaussianLikelihood, **conf):
        super().__init__((x, xe), y.squeeze(), lik)
        mean = conf.get('mean', ConstantMean())
        kern = conf.get(
            'kern',
            ScaleKernel(MaternKernel(nu=1.5, ard_num_dims=x.shape[1]),
                        outputscale_prior=GammaPrior(0.5, 0.5)))
        kern_emb = conf.get('kern_emb', MaternKernel(nu=2.5))

        self.multi_task = y.shape[1] > 1
        self.mean = mean if not self.multi_task else MultitaskMean(
            mean, num_tasks=y.shape[1])
        if x.shape[1] > 0:
            self.kern = kern if not self.multi_task else MultitaskKernel(
                kern, num_tasks=y.shape[1])
        if xe.shape[1] > 0:
            assert 'num_uniqs' in conf
            num_uniqs = conf['num_uniqs']
            emb_sizes = conf.get('emb_sizes', None)
            self.emb_trans = EmbTransform(num_uniqs, emb_sizes=emb_sizes)
            self.kern_emb = kern_emb if not self.multi_task else MultitaskKernel(
                kern_emb, num_tasks=y.shape[1])
Ejemplo n.º 10
0
 def setUp(self):
     self.mean = MultitaskMean(ConstantMean(), n_tasks=4)
     self.mean.base_means[0].constant.data.fill_(0)
     self.mean.base_means[1].constant.data.fill_(1)
     self.mean.base_means[2].constant.data.fill_(2)
     self.mean.base_means[3].constant.data.fill_(3)
Ejemplo n.º 11
0
    def __init__(
        self,
        train_x,
        train_y,
        likelihood,
        rank,
        num_mixtures,
        X_scaler,
    ):
        super().__init__(train_x, train_y, likelihood)
        num_dims = train_x.shape[1]
        num_tasks = train_y.shape[1]

        self.mean_module = MultitaskMean(Square2DPolynomialMean(),
                                         num_tasks=num_tasks)

        xmax = X_scaler.inverse_transform(np.ones((1, 2)))[0]
        xmin = X_scaler.inverse_transform(np.zeros((1, 2)))[0]
        llcoeff = (xmax - xmin)[0] / (xmax - xmin)[1]

        lower_ll_mean = 0.1
        upper_ll_mean = 1e2
        lower_ll_scale = 0.1
        upper_ll_scale = 1e2
        lower_mean_constraint = np.array(
            [1. / upper_ll_mean, 1. / (upper_ll_mean * llcoeff)])
        upper_mean_constraint = np.array(
            [1. / lower_ll_mean, 1. / (lower_ll_mean * llcoeff)])
        lower_scale_constraint = np.array(
            [1. / upper_ll_scale, 1. / (upper_ll_scale * llcoeff)])
        upper_scale_constraint = np.array(
            [1. / lower_ll_scale, 1. / (lower_ll_scale * llcoeff)])

        lower_mean_constraint = tensor(
            lower_mean_constraint,
            dtype=torch.float32,
            device=DEVICE,
        )
        upper_mean_constraint = tensor(
            upper_mean_constraint,
            dtype=torch.float32,
            device=DEVICE,
        )
        lower_scale_constraint = tensor(
            lower_scale_constraint,
            dtype=torch.float32,
            device=DEVICE,
        )
        upper_scale_constraint = tensor(
            upper_scale_constraint,
            dtype=torch.float32,
            device=DEVICE,
        )

        covar_module = PatchedSpectralMixtureKernel(
            num_mixtures,
            ard_num_dims=num_dims,
            mixture_scales_constraint=Interval(
                lower_scale_constraint,
                upper_scale_constraint,
            ),
            mixture_means_constraint=Interval(
                lower_mean_constraint,
                upper_mean_constraint,
            ),
        )
        if IS_CUDA:
            covar_module = covar_module.cuda(device=DEVICE)

        covar_module.initialize_from_data(train_x, train_y)
        self.covar_module = MultitaskKernel(
            covar_module,
            num_tasks=num_tasks,
            rank=rank,
        )
        self.init_multitask_from_data(Y=train_y, rank=rank)
Ejemplo n.º 12
0
    def __init__(
        self,
        train_X: Tensor,
        train_Y: Tensor,
        likelihood: Optional[MultitaskGaussianLikelihood] = None,
        data_covar_module: Optional[Module] = None,
        task_covar_prior: Optional[Prior] = None,
        rank: Optional[int] = None,
        input_transform: Optional[InputTransform] = None,
        outcome_transform: Optional[OutcomeTransform] = None,
        **kwargs: Any,
    ) -> None:
        r"""Multi-task GP with Kronecker structure, using a simple ICM kernel.

        Args:
            train_X: A `batch_shape x n x d` tensor of training features.
            train_Y: A `batch_shape x n x m` tensor of training observations.
            likelihood: A `MultitaskGaussianLikelihood`. If omitted, uses a
                `MultitaskGaussianLikelihood` with a `GammaPrior(1.1, 0.05)`
                noise prior.
            data_covar_module: The module computing the covariance (Kernel) matrix
                in data space. If omitted, use a `MaternKernel`.
            task_covar_prior : A Prior on the task covariance matrix. Must operate
                on p.s.d. matrices. A common prior for this is the `LKJ` prior. If
                omitted, uses `LKJCovariancePrior` with `eta` parameter as specified
                in the keyword arguments (if not specified, use `eta=1.5`).
            rank: The rank of the ICM kernel. If omitted, use a full rank kernel.
            kwargs: Additional arguments to override default settings of priors,
                including:
                - eta: The eta parameter on the default LKJ task_covar_prior.
                A value of 1.0 is uninformative, values <1.0 favor stronger
                correlations (in magnitude), correlations vanish as eta -> inf.
                - sd_prior: A scalar prior over nonnegative numbers, which is used
                for the default LKJCovariancePrior task_covar_prior.
                - likelihood_rank: The rank of the task covariance matrix to fit.
                Defaults to 0 (which corresponds to a diagonal covariance matrix).

        Example:
            >>> train_X = torch.rand(10, 2)
            >>> train_Y = torch.cat([f_1(X), f_2(X)], dim=-1)
            >>> model = KroneckerMultiTaskGP(train_X, train_Y)
        """
        with torch.no_grad():
            transformed_X = self.transform_inputs(
                X=train_X, input_transform=input_transform)
        if outcome_transform is not None:
            train_Y, _ = outcome_transform(train_Y)

        self._validate_tensor_args(X=transformed_X, Y=train_Y)
        self._num_outputs = train_Y.shape[-1]
        batch_shape, ard_num_dims = train_X.shape[:-2], train_X.shape[-1]
        num_tasks = train_Y.shape[-1]

        if rank is None:
            rank = num_tasks
        if likelihood is None:
            noise_prior = GammaPrior(1.1, 0.05)
            noise_prior_mode = (noise_prior.concentration -
                                1) / noise_prior.rate
            likelihood = MultitaskGaussianLikelihood(
                num_tasks=num_tasks,
                batch_shape=batch_shape,
                noise_prior=noise_prior,
                noise_constraint=GreaterThan(
                    MIN_INFERRED_NOISE_LEVEL,
                    transform=None,
                    initial_value=noise_prior_mode,
                ),
                rank=kwargs.get("likelihood_rank", 0),
            )
        if task_covar_prior is None:
            task_covar_prior = LKJCovariancePrior(
                n=num_tasks,
                eta=torch.tensor(kwargs.get("eta", 1.5)).to(train_X),
                sd_prior=kwargs.get(
                    "sd_prior",
                    SmoothedBoxPrior(math.exp(-6), math.exp(1.25), 0.05),
                ),
            )
        super().__init__(train_X, train_Y, likelihood)
        self.mean_module = MultitaskMean(
            base_means=ConstantMean(batch_shape=batch_shape),
            num_tasks=num_tasks)
        if data_covar_module is None:
            data_covar_module = MaternKernel(
                nu=2.5,
                ard_num_dims=ard_num_dims,
                lengthscale_prior=GammaPrior(3.0, 6.0),
                batch_shape=batch_shape,
            )
        else:
            data_covar_module = data_covar_module

        self.covar_module = MultitaskKernel(
            data_covar_module=data_covar_module,
            num_tasks=num_tasks,
            rank=rank,
            batch_shape=batch_shape,
            task_covar_prior=task_covar_prior,
        )

        if outcome_transform is not None:
            self.outcome_transform = outcome_transform
        if input_transform is not None:
            self.input_transform = input_transform
        self.to(train_X)
Ejemplo n.º 13
0
 def __init__(self, train_x, train_y, likelihood):
     super().__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), num_tasks=2)
     self.covar_module = MultitaskKernel(RBFKernel(), num_tasks=2, rank=1)
Ejemplo n.º 14
0
 def create_mean(self):
     return MultitaskMean(
         [ConstantMean(), ZeroMean(),
          ZeroMean()], num_tasks=3)
 def __init__(self, train_x, train_y, likelihood, num_tasks=2):
     super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = MultitaskMean(ConstantMean(), num_tasks=num_tasks)
     self.covar_module = MultitaskKernel(MaternKernel(),
                                         num_tasks=num_tasks)