Exemplo n.º 1
0
    def _transform_tensor_args(self,
                               X: Tensor,
                               Y: Tensor,
                               Yvar: Optional[Tensor] = None
                               ) -> Tuple[Tensor, Tensor, Optional[Tensor]]:
        r"""Transforms tensor arguments: for single output models, the output
        dimension is squeezed and for multi-output models, the output dimension is
        transformed into the left-most batch dimension.

        Args:
            X: A `n x d` or `batch_shape x n x d` (batch mode) tensor of training
                features.
            Y: A `n x m` or `batch_shape x n x m` (batch mode) tensor of
                training observations.
            Yvar: A `n x m` or `batch_shape x n x m` (batch mode) tensor of
                observed measurement noise. Note: this will be None when using a model
                that infers the noise level (e.g. a `SingleTaskGP`).

        Returns:
            3-element tuple containing

            - A `input_batch_shape x (m) x n x d` tensor of training features.
            - A `target_batch_shape x (m) x n` tensor of training observations.
            - A `target_batch_shape x (m) x n` tensor observed measurement noise
                (or None).
        """
        if self._num_outputs > 1:
            return multioutput_to_batch_mode_transform(
                train_X=X,
                train_Y=Y,
                train_Yvar=Yvar,
                num_outputs=self._num_outputs)
        return X, Y.squeeze(-1), None if Yvar is None else Yvar.squeeze(-1)
Exemplo n.º 2
0
    def condition_on_observations(
        self, X: Tensor, Y: Tensor, **kwargs: Any
    ) -> BatchedMultiOutputGPyTorchModel:
        r"""Condition the model on new observations.

        Args:
            X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of
                the feature space, `m` is the number of points per batch, and
                `batch_shape` is the batch shape (must be compatible with the
                batch shape of the model).
            Y: A `batch_shape' x n' x m`-dim Tensor, where `m` is the number of
                model outputs, `n'` is the number of points per batch, and
                `batch_shape'` is the batch shape of the observations.
                `batch_shape'` must be broadcastable to `batch_shape` using
                standard broadcasting semantics. If `Y` has fewer batch dimensions
                than `X`, its is assumed that the missing batch dimensions are
                the same for all `Y`.

        Returns:
            A `BatchedMultiOutputGPyTorchModel` object of the same type with
            `n + n'` training examples, representing the original model
            conditioned on the new observations `(X, Y)` (and possibly noise
            observations passed in via kwargs).

        Example:
            >>> train_X = torch.rand(20, 2)
            >>> train_Y = torch.cat(
            >>>     [torch.sin(train_X[:, 0]), torch.cos(train_X[:, 1])], -1
            >>> )
            >>> model = SingleTaskGP(train_X, train_Y)
            >>> new_X = torch.rand(5, 2)
            >>> new_Y = torch.cat([torch.sin(new_X[:, 0]), torch.cos(new_X[:, 1])], -1)
            >>> model = model.condition_on_observations(X=new_X, Y=new_Y)
        """
        noise = kwargs.get("noise")
        if hasattr(self, "outcome_transform"):
            # we need to apply transforms before shifting batch indices around
            Y, noise = self.outcome_transform(Y, noise)
        self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False)
        inputs = X
        if self._num_outputs > 1:
            inputs, targets, noise = multioutput_to_batch_mode_transform(
                train_X=X, train_Y=Y, num_outputs=self._num_outputs, train_Yvar=noise
            )
            # `multioutput_to_batch_mode_transform` removes the output dimension,
            # which is necessary for `condition_on_observations`
            targets = targets.unsqueeze(-1)
            if noise is not None:
                noise = noise.unsqueeze(-1)
        else:
            inputs = X
            targets = Y
        if noise is not None:
            kwargs.update({"noise": noise})
        fantasy_model = super().condition_on_observations(X=inputs, Y=targets, **kwargs)
        fantasy_model._input_batch_shape = fantasy_model.train_targets.shape[
            : (-1 if self._num_outputs == 1 else -2)
        ]
        fantasy_model._aug_batch_shape = fantasy_model.train_targets.shape[:-1]
        return fantasy_model
Exemplo n.º 3
0
 def test_multioutput_to_batch_mode_transform(self, cuda=False):
     for double in (False, True):
         tkwargs = {
             "device":
             torch.device("cuda") if cuda else torch.device("cpu"),
             "dtype": torch.double if double else torch.float,
         }
         n = 3
         num_outputs = 1
         train_X = torch.rand(n, 1, **tkwargs)
         train_Y = torch.rand(n, **tkwargs)
         train_Yvar = torch.rand(n, **tkwargs)
         # num_outputs = 1 and train_Y has shape `n`
         X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
             train_X=train_X,
             train_Y=train_Y,
             num_outputs=num_outputs,
             train_Yvar=train_Yvar,
         )
         self.assertTrue(torch.equal(X_out, train_X))
         self.assertTrue(torch.equal(Y_out, train_Y))
         self.assertTrue(torch.equal(Yvar_out, train_Yvar))
         # num_outputs = 1 and train_Y has shape `n x 1`
         X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
             train_X=train_X,
             train_Y=train_Y.view(-1, 1),
             num_outputs=num_outputs,
             train_Yvar=train_Yvar.view(-1, 1),
         )
         self.assertTrue(torch.equal(X_out, train_X))
         self.assertTrue(torch.equal(Y_out, train_Y))
         self.assertTrue(torch.equal(Yvar_out, train_Yvar))
         # num_outputs > 1
         num_outputs = 2
         train_Y = torch.rand(n, num_outputs, **tkwargs)
         train_Yvar = torch.rand(n, num_outputs, **tkwargs)
         X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
             train_X=train_X,
             train_Y=train_Y,
             num_outputs=num_outputs,
             train_Yvar=train_Yvar,
         )
         expected_X_out = train_X.unsqueeze(0).expand(num_outputs, -1, 1)
         self.assertTrue(torch.equal(X_out, expected_X_out))
         self.assertTrue(torch.equal(Y_out, train_Y.transpose(0, 1)))
         self.assertTrue(torch.equal(Yvar_out, train_Yvar.transpose(0, 1)))
Exemplo n.º 4
0
 def test_multioutput_to_batch_mode_transform(self, cuda=False):
     for double in (False, True):
         tkwargs = {
             "device": torch.device("cuda") if cuda else torch.device("cpu"),
             "dtype": torch.double if double else torch.float,
         }
         n = 3
         num_outputs = 1
         train_X = torch.rand(n, 1, **tkwargs)
         train_Y = torch.rand(n, **tkwargs)
         train_Yvar = torch.rand(n, **tkwargs)
         # num_outputs = 1 and train_Y has shape `n`
         X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
             train_X=train_X,
             train_Y=train_Y,
             num_outputs=num_outputs,
             train_Yvar=train_Yvar,
         )
         self.assertTrue(torch.equal(X_out, train_X))
         self.assertTrue(torch.equal(Y_out, train_Y))
         self.assertTrue(torch.equal(Yvar_out, train_Yvar))
         # num_outputs = 1 and train_Y has shape `n x 1`
         X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
             train_X=train_X,
             train_Y=train_Y.view(-1, 1),
             num_outputs=num_outputs,
             train_Yvar=train_Yvar.view(-1, 1),
         )
         self.assertTrue(torch.equal(X_out, train_X))
         self.assertTrue(torch.equal(Y_out, train_Y))
         self.assertTrue(torch.equal(Yvar_out, train_Yvar))
         # num_outputs > 1
         num_outputs = 2
         train_Y = torch.rand(n, num_outputs, **tkwargs)
         train_Yvar = torch.rand(n, num_outputs, **tkwargs)
         X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
             train_X=train_X,
             train_Y=train_Y,
             num_outputs=num_outputs,
             train_Yvar=train_Yvar,
         )
         expected_X_out = train_X.unsqueeze(0).expand(num_outputs, -1, 1)
         self.assertTrue(torch.equal(X_out, expected_X_out))
         self.assertTrue(torch.equal(Y_out, train_Y.transpose(0, 1)))
         self.assertTrue(torch.equal(Yvar_out, train_Yvar.transpose(0, 1)))
Exemplo n.º 5
0
 def __init__(self, train_X, train_Y):
     train_X, train_Y, _ = self._set_dimensions(train_X=train_X, train_Y=train_Y)
     train_X, train_Y, _ = multioutput_to_batch_mode_transform(
         train_X=train_X, train_Y=train_Y, num_outputs=self._num_outputs
     )
     likelihood = GaussianLikelihood(batch_shape=self._aug_batch_shape)
     super().__init__(train_X, train_Y, likelihood)
     self.mean_module = ConstantMean(batch_shape=self._aug_batch_shape)
     self.covar_module = ScaleKernel(
         RBFKernel(batch_shape=self._aug_batch_shape),
         batch_shape=self._aug_batch_shape,
     )
Exemplo n.º 6
0
 def test_multioutput_to_batch_mode_transform(self):
     for dtype in (torch.float, torch.double):
         tkwargs = {"device": self.device, "dtype": dtype}
         n = 3
         num_outputs = 2
         train_X = torch.rand(n, 1, **tkwargs)
         train_Y = torch.rand(n, num_outputs, **tkwargs)
         train_Yvar = torch.rand(n, num_outputs, **tkwargs)
         X_out, Y_out, Yvar_out = multioutput_to_batch_mode_transform(
             train_X=train_X,
             train_Y=train_Y,
             num_outputs=num_outputs,
             train_Yvar=train_Yvar,
         )
         expected_X_out = train_X.unsqueeze(0).expand(num_outputs, -1, 1)
         self.assertTrue(torch.equal(X_out, expected_X_out))
         self.assertTrue(torch.equal(Y_out, train_Y.transpose(0, 1)))
         self.assertTrue(torch.equal(Yvar_out, train_Yvar.transpose(0, 1)))