Exemple #1
0
class UnstandardizeAnalyticMultiOutputObjective(AnalyticMultiOutputObjective):
    r"""Objective that unstandardizes the posterior.

    TODO: remove this when MultiTask models support outcome transforms.

    Example:
        >>> unstd_objective = UnstandardizeAnalyticMultiOutputObjective(Y_mean, Y_std)
        >>> unstd_posterior = unstd_objective(posterior)
    """
    def __init__(self, Y_mean: Tensor, Y_std: Tensor) -> None:
        r"""Initialize objective.

        Args:
            Y_mean: `m`-dim tensor of outcome means
            Y_std: `m`-dim tensor of outcome standard deviations

        """
        if Y_mean.ndim > 1 or Y_std.ndim > 1:
            raise BotorchTensorDimensionError(
                "Y_mean and Y_std must both be 1-dimensional, but got "
                f"{Y_mean.ndim} and {Y_std.ndim}")
        super().__init__()
        self.outcome_transform = Standardize(m=Y_mean.shape[0]).to(Y_mean)
        Y_std_unsqueezed = Y_std.unsqueeze(0)
        self.outcome_transform.means = Y_mean.unsqueeze(0)
        self.outcome_transform.stdvs = Y_std_unsqueezed
        self.outcome_transform._stdvs_sq = Y_std_unsqueezed.pow(2)
        self.outcome_transform.eval()

    def forward(self, posterior: GPyTorchPosterior) -> Tensor:
        return self.outcome_transform.untransform_posterior(posterior)
Exemple #2
0
    def test_unstandardize_mo_objective(self):
        for objective_class in (
                UnstandardizeMCMultiOutputObjective,
                UnstandardizeAnalyticMultiOutputObjective,
        ):
            Y_mean = torch.ones(2)
            Y_std = torch.ones(2)
            with self.assertRaises(BotorchTensorDimensionError):
                objective_class(Y_mean=Y_mean.unsqueeze(0), Y_std=Y_std)
            with self.assertRaises(BotorchTensorDimensionError):
                objective_class(Y_mean=Y_mean, Y_std=Y_std.unsqueeze(0))
            with self.assertRaises(BotorchTensorDimensionError):
                objective_class(Y_mean=Y_mean, Y_std=Y_std, outcomes=[0])
            with self.assertRaises(BotorchTensorDimensionError):
                objective_class(Y_mean=Y_mean, Y_std=Y_std, outcomes=[0, 1, 2])
            objective = objective_class(Y_mean=Y_mean, Y_std=Y_std)
            for batch_shape, m, outcomes, dtype in itertools.product(
                ([], [3]), (2, 3), (None, [-2, -1]),
                (torch.float, torch.double)):
                Y_mean = torch.rand(m, dtype=dtype, device=self.device)
                Y_std = torch.rand(m, dtype=dtype,
                                   device=self.device).clamp_min(1e-3)
                objective = objective_class(Y_mean=Y_mean,
                                            Y_std=Y_std,
                                            outcomes=outcomes)
                if objective_class == UnstandardizeAnalyticMultiOutputObjective:
                    if outcomes is None:
                        # passing outcomes is not currently supported
                        mean = torch.rand(2, m)
                        variance = variance = torch.rand(2, m)
                        mock_posterior = MockPosterior(mean=mean,
                                                       variance=variance)
                        tf_posterior = objective(mock_posterior)
                        tf = Standardize(m=m)
                        tf.means = Y_mean
                        tf.stdvs = Y_std
                        tf._stdvs_sq = Y_std.pow(2)
                        tf.eval()
                        expected_posterior = tf.untransform_posterior(
                            mock_posterior)
                        self.assertTrue(
                            torch.equal(tf_posterior.mean,
                                        expected_posterior.mean))
                        self.assertTrue(
                            torch.equal(tf_posterior.variance,
                                        expected_posterior.variance))
                else:

                    samples = torch.rand(*batch_shape,
                                         2,
                                         m,
                                         dtype=dtype,
                                         device=self.device)
                    obj_expected = samples * Y_std.to(dtype=dtype) + Y_mean.to(
                        dtype=dtype)
                    if outcomes is not None:
                        obj_expected = obj_expected[..., outcomes]
                    self.assertTrue(
                        torch.equal(objective(samples), obj_expected))
Exemple #3
0
class UnstandardizeAnalyticMultiOutputObjective(AnalyticMultiOutputObjective):
    r"""Objective that unstandardizes the posterior.

    TODO: remove this when MultiTask models support outcome transforms.

    Example:
        >>> unstd_objective = UnstandardizeAnalyticMultiOutputObjective(Y_mean, Y_std)
        >>> unstd_posterior = unstd_objective(posterior)
    """
    def __init__(self,
                 Y_mean: Tensor,
                 Y_std: Tensor,
                 outcomes: Optional[List[int]] = None) -> None:
        r"""Initialize objective.

        Args:
            Y_mean: `m`-dim tensor of outcome means
            Y_std: `m`-dim tensor of outcome standard deviations
            outcomes: A list of `m' <= m` indices that specifies which of the `m` model
                outputs should be considered as the outcomes for MOO. If omitted, use
                all model outcomes. Typically used for constrained optimization.

        """
        if Y_mean.ndim > 1 or Y_std.ndim > 1:
            raise BotorchTensorDimensionError(
                "Y_mean and Y_std must both be 1-dimensional, but got "
                f"{Y_mean.ndim} and {Y_std.ndim}")
        if outcomes is not None:
            if len(outcomes) < 2:
                raise BotorchTensorDimensionError(
                    "Must specify at least two outcomes for MOO.")
            elif len(outcomes) > Y_mean.shape[-1]:
                raise BotorchTensorDimensionError(
                    f"Cannot specify more ({len(outcomes)}) outcomes that present in "
                    f"the normalization inputs ({Y_mean.shape[-1]}).")
        super().__init__()
        self.outcome_transform = Standardize(m=Y_mean.shape[0],
                                             outputs=outcomes).to(Y_mean)
        Y_std_unsqueezed = Y_std.unsqueeze(0)
        self.outcome_transform.means = Y_mean.unsqueeze(0)
        self.outcome_transform.stdvs = Y_std_unsqueezed
        self.outcome_transform._stdvs_sq = Y_std_unsqueezed.pow(2)
        self.outcome_transform.eval()

    def forward(self, posterior: GPyTorchPosterior) -> Tensor:
        return self.outcome_transform.untransform_posterior(posterior)
Exemple #4
0
    def test_standardize(self):

        # test error on incompatible dim
        tf = Standardize(m=1)
        with self.assertRaises(RuntimeError):
            tf(torch.zeros(3, 2, device=self.device), None)
        # test error on incompatible batch shape
        with self.assertRaises(RuntimeError):
            tf(torch.zeros(2, 3, 1, device=self.device), None)

        ms = (1, 2)
        batch_shapes = (torch.Size(), torch.Size([2]))
        dtypes = (torch.float, torch.double)

        # test transform, untransform, untransform_posterior
        for m, batch_shape, dtype in itertools.product(ms, batch_shapes, dtypes):

            # test init
            tf = Standardize(m=m, batch_shape=batch_shape)
            self.assertTrue(tf.training)
            self.assertEqual(tf._m, m)
            self.assertIsNone(tf._outputs)
            self.assertEqual(tf._batch_shape, batch_shape)
            self.assertEqual(tf._min_stdv, 1e-8)

            # no observation noise
            Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype)
            Y_tf, Yvar_tf = tf(Y, None)
            self.assertTrue(tf.training)
            self.assertTrue(torch.all(Y_tf.mean(dim=-2).abs() < 1e-4))
            self.assertIsNone(Yvar_tf)
            tf.eval()
            self.assertFalse(tf.training)
            Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf)
            torch.allclose(Y_utf, Y)
            self.assertIsNone(Yvar_utf)

            # subset_output
            tf_subset = tf.subset_output(idcs=[0])
            Y_tf_subset, Yvar_tf_subset = tf_subset(Y[..., [0]])
            self.assertTrue(torch.equal(Y_tf[..., [0]], Y_tf_subset))
            self.assertIsNone(Yvar_tf_subset)
            with self.assertRaises(RuntimeError):
                tf.subset_output(idcs=[0, 1, 2])

            # with observation noise
            tf = Standardize(m=m, batch_shape=batch_shape)
            Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype)
            Yvar = 1e-8 + torch.rand(
                *batch_shape, 3, m, device=self.device, dtype=dtype
            )
            Y_tf, Yvar_tf = tf(Y, Yvar)
            self.assertTrue(tf.training)
            self.assertTrue(torch.all(Y_tf.mean(dim=-2).abs() < 1e-4))
            Yvar_tf_expected = Yvar / Y.std(dim=-2, keepdim=True) ** 2
            self.assertTrue(torch.allclose(Yvar_tf, Yvar_tf_expected))
            tf.eval()
            self.assertFalse(tf.training)
            Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf)
            torch.allclose(Y_utf, Y)
            torch.allclose(Yvar_utf, Yvar)

            # untransform_posterior
            for interleaved, lazy in itertools.product((True, False), (True, False)):
                if m == 1 and interleaved:  # interleave has no meaning for m=1
                    continue
                shape = batch_shape + torch.Size([3, m])
                posterior = _get_test_posterior(
                    shape,
                    device=self.device,
                    dtype=dtype,
                    interleaved=interleaved,
                    lazy=lazy,
                )
                p_utf = tf.untransform_posterior(posterior)
                self.assertEqual(p_utf.device.type, self.device.type)
                self.assertTrue(p_utf.dtype == dtype)
                mean_expected = tf.means + tf.stdvs * posterior.mean
                variance_expected = tf.stdvs ** 2 * posterior.variance
                self.assertTrue(torch.allclose(p_utf.mean, mean_expected))
                self.assertTrue(torch.allclose(p_utf.variance, variance_expected))
                samples = p_utf.rsample()
                self.assertEqual(samples.shape, torch.Size([1]) + shape)
                samples = p_utf.rsample(sample_shape=torch.Size([4]))
                self.assertEqual(samples.shape, torch.Size([4]) + shape)
                samples2 = p_utf.rsample(sample_shape=torch.Size([4, 2]))
                self.assertEqual(samples2.shape, torch.Size([4, 2]) + shape)
                # TODO: Test expected covar (both interleaved and non-interleaved)

            # untransform_posterior for non-GPyTorch posterior
            posterior2 = TransformedPosterior(
                posterior=posterior,
                sample_transform=lambda s: s,
                mean_transform=lambda m, v: m,
                variance_transform=lambda m, v: v,
            )
            p_utf2 = tf.untransform_posterior(posterior2)
            self.assertEqual(p_utf2.device.type, self.device.type)
            self.assertTrue(p_utf2.dtype == dtype)
            mean_expected = tf.means + tf.stdvs * posterior.mean
            variance_expected = tf.stdvs ** 2 * posterior.variance
            self.assertTrue(torch.allclose(p_utf2.mean, mean_expected))
            self.assertTrue(torch.allclose(p_utf2.variance, variance_expected))
            # TODO: Test expected covar (both interleaved and non-interleaved)
            samples = p_utf2.rsample()
            self.assertEqual(samples.shape, torch.Size([1]) + shape)
            samples = p_utf2.rsample(sample_shape=torch.Size([4]))
            self.assertEqual(samples.shape, torch.Size([4]) + shape)
            samples2 = p_utf2.rsample(sample_shape=torch.Size([4, 2]))
            self.assertEqual(samples2.shape, torch.Size([4, 2]) + shape)

            # test error on incompatible output dimension
            tf_big = Standardize(m=4).eval()
            with self.assertRaises(RuntimeError):
                tf_big.untransform_posterior(posterior2)

        # test transforming a subset of outcomes
        for batch_shape, dtype in itertools.product(batch_shapes, dtypes):

            m = 2
            outputs = [-1]

            # test init
            tf = Standardize(m=m, outputs=outputs, batch_shape=batch_shape)
            self.assertTrue(tf.training)
            self.assertEqual(tf._m, m)
            self.assertEqual(tf._outputs, [1])
            self.assertEqual(tf._batch_shape, batch_shape)
            self.assertEqual(tf._min_stdv, 1e-8)

            # no observation noise
            Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype)
            Y_tf, Yvar_tf = tf(Y, None)
            self.assertTrue(tf.training)
            Y_tf_mean = Y_tf.mean(dim=-2)
            self.assertTrue(torch.all(Y_tf_mean[..., 1].abs() < 1e-4))
            self.assertTrue(torch.allclose(Y_tf_mean[..., 0], Y.mean(dim=-2)[..., 0]))
            self.assertIsNone(Yvar_tf)
            tf.eval()
            self.assertFalse(tf.training)
            Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf)
            torch.allclose(Y_utf, Y)
            self.assertIsNone(Yvar_utf)

            # subset_output
            tf_subset = tf.subset_output(idcs=[0])
            Y_tf_subset, Yvar_tf_subset = tf_subset(Y[..., [0]])
            self.assertTrue(torch.equal(Y_tf[..., [0]], Y_tf_subset))
            self.assertIsNone(Yvar_tf_subset)
            with self.assertRaises(RuntimeError):
                tf.subset_output(idcs=[0, 1, 2])

            # with observation noise
            tf = Standardize(m=m, outputs=outputs, batch_shape=batch_shape)
            Y = torch.rand(*batch_shape, 3, m, device=self.device, dtype=dtype)
            Yvar = 1e-8 + torch.rand(
                *batch_shape, 3, m, device=self.device, dtype=dtype
            )
            Y_tf, Yvar_tf = tf(Y, Yvar)
            self.assertTrue(tf.training)
            Y_tf_mean = Y_tf.mean(dim=-2)
            self.assertTrue(torch.all(Y_tf_mean[..., 1].abs() < 1e-4))
            self.assertTrue(torch.allclose(Y_tf_mean[..., 0], Y.mean(dim=-2)[..., 0]))
            Yvar_tf_expected = Yvar / Y.std(dim=-2, keepdim=True) ** 2
            self.assertTrue(torch.allclose(Yvar_tf[..., 1], Yvar_tf_expected[..., 1]))
            self.assertTrue(torch.allclose(Yvar_tf[..., 0], Yvar[..., 0]))
            tf.eval()
            self.assertFalse(tf.training)
            Y_utf, Yvar_utf = tf.untransform(Y_tf, Yvar_tf)
            torch.allclose(Y_utf, Y)
            torch.allclose(Yvar_utf, Yvar)

            # error on untransform_posterior
            with self.assertRaises(NotImplementedError):
                tf.untransform_posterior(None)