Example #1
0
 def test_scalarized_posterior_transform(self):
     for batch_shape, m, dtype in itertools.product(
         ([], [3]), (1, 2), (torch.float, torch.double)):
         offset = torch.rand(1).item()
         weights = torch.randn(m, device=self.device, dtype=dtype)
         obj = ScalarizedPosteriorTransform(weights=weights, offset=offset)
         posterior = _get_test_posterior(batch_shape,
                                         m=m,
                                         device=self.device,
                                         dtype=dtype)
         mean, covar = posterior.mvn.mean, posterior.mvn.covariance_matrix
         new_posterior = obj(posterior)
         exp_size = torch.Size(batch_shape + [1, 1])
         self.assertEqual(new_posterior.mean.shape, exp_size)
         new_mean_exp = offset + mean @ weights
         self.assertTrue(
             torch.allclose(new_posterior.mean[..., -1], new_mean_exp))
         self.assertEqual(new_posterior.variance.shape, exp_size)
         new_covar_exp = ((covar @ weights) @ weights).unsqueeze(-1)
         self.assertTrue(
             torch.allclose(new_posterior.variance[..., -1], new_covar_exp))
         # test error
         with self.assertRaises(ValueError):
             ScalarizedPosteriorTransform(weights=torch.rand(2, m))
         # test evaluate
         Y = torch.rand(2, m, device=self.device, dtype=dtype)
         val = obj.evaluate(Y)
         val_expected = offset + Y @ weights
         self.assertTrue(torch.equal(val, val_expected))
Example #2
0
    def test_expected_improvement(self):
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([[-0.5]], device=self.device, dtype=dtype)
            variance = torch.ones(1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))

            # basic test
            module = ExpectedImprovement(model=mm, best_f=0.0)
            X = torch.empty(1, 1, device=self.device, dtype=dtype)  # dummy
            ei = module(X)
            ei_expected = torch.tensor(0.19780,
                                       device=self.device,
                                       dtype=dtype)
            self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))

            # test maximize
            module = ExpectedImprovement(model=mm, best_f=0.0, maximize=False)
            X = torch.empty(1, 1, device=self.device, dtype=dtype)  # dummy
            ei = module(X)
            ei_expected = torch.tensor(0.6978, device=self.device, dtype=dtype)
            self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))
            with self.assertRaises(UnsupportedError):
                module.set_X_pending(None)

            # test posterior transform (single-output)
            mean = torch.tensor([0.5], device=self.device, dtype=dtype)
            covar = torch.tensor([[0.16]], device=self.device, dtype=dtype)
            mvn = MultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([0.5], device=self.device, dtype=dtype)
            transform = ScalarizedPosteriorTransform(weights)
            ei = ExpectedImprovement(model=mm,
                                     best_f=0.0,
                                     posterior_transform=transform)
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            ei_expected = torch.tensor(0.2601, device=self.device, dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)

            # test posterior transform (multi-output)
            mean = torch.tensor([[-0.25, 0.5]],
                                device=self.device,
                                dtype=dtype)
            covar = torch.tensor([[[0.5, 0.125], [0.125, 0.5]]],
                                 device=self.device,
                                 dtype=dtype)
            mvn = MultitaskMultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([2.0, 1.0], device=self.device, dtype=dtype)
            transform = ScalarizedPosteriorTransform(weights)
            ei = ExpectedImprovement(model=mm,
                                     best_f=0.0,
                                     posterior_transform=transform)
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            ei_expected = torch.tensor(0.6910, device=self.device, dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)
Example #3
0
    def _deprecate_acqf_objective(
        cls,
        posterior_transform: Optional[Callable[[Posterior], Posterior]],
        objective: Optional[Module],
    ) -> Optional[Callable[[Posterior], Posterior]]:
        from botorch.acquisition.objective import (
            ScalarizedObjective,
            ScalarizedPosteriorTransform,
        )

        if objective is None:
            return posterior_transform
        warnings.warn(
            f"{cls.__name__} got a non-MC `objective`. The non-MC "
            "AcquisitionObjectives and the `objective` argument to"
            "AnalyticAcquisitionFunctions are DEPRECATED and will be removed in the"
            "next version. Use `posterior_transform` instead.",
            DeprecationWarning,
        )
        if not isinstance(objective, ScalarizedObjective):
            raise UnsupportedError(
                f"{cls.__name__} only supports ScalarizedObjective "
                "(DEPRECATED) type objectives.")
        return ScalarizedPosteriorTransform(weights=objective.weights,
                                            offset=objective.offset)
Example #4
0
def _deprecate_objective_arg(
    posterior_transform: Optional[PosteriorTransform] = None,
    objective: Optional[AcquisitionObjective] = None,
) -> Optional[PosteriorTransform]:
    if posterior_transform is not None:
        if objective is None:
            return posterior_transform
        else:
            raise RuntimeError(
                "Got both a non-MC objective (DEPRECATED) and a posterior "
                "transform. Use only a posterior transform instead.")
    elif objective is not None:
        warnings.warn(
            "The `objective` argument to AnalyticAcquisitionFunctions is deprecated "
            "and will be removed in the next version. Use `posterior_transform` "
            "instead.",
            DeprecationWarning,
        )
        if not isinstance(objective, ScalarizedObjective):
            raise UnsupportedError(
                "Analytic acquisition functions only support ScalarizedObjective "
                "(DEPRECATED) type objectives.")
        return ScalarizedPosteriorTransform(weights=objective.weights,
                                            offset=objective.offset)
    else:
        return None
 def test_get_best_f_analytic(self):
     with self.assertRaises(NotImplementedError):
         get_best_f_analytic(training_data=self.nbd_td)
     best_f = get_best_f_analytic(training_data=self.bd_td)
     best_f_expected = self.bd_td.Y.squeeze().max()
     self.assertEqual(best_f, best_f_expected)
     with self.assertRaises(NotImplementedError):
         get_best_f_analytic(training_data=self.bd_td_mo)
     weights = torch.rand(2)
     obj = ScalarizedObjective(weights=weights)
     best_f_obj = get_best_f_analytic(training_data=self.bd_td_mo,
                                      objective=obj)
     post_tf = ScalarizedPosteriorTransform(weights=weights)
     best_f_tf = get_best_f_analytic(training_data=self.bd_td_mo,
                                     posterior_transform=post_tf)
     best_f_expected = post_tf.evaluate(self.bd_td_mo.Y).max()
     self.assertEqual(best_f_obj, best_f_expected)
     self.assertEqual(best_f_tf, best_f_expected)
Example #6
0
    def test_posterior_transform(self):
        def f(X):
            return X

        model = GenericDeterministicModel(f)
        test_X = torch.rand(3, 2)
        post_tf = ScalarizedPosteriorTransform(weights=torch.rand(2))
        # expect error due to post_tf expecting an MVN
        with self.assertRaises(AttributeError):
            model.posterior(test_X, posterior_transform=post_tf)
Example #7
0
 def test_posterior_transform(self):
     tkwargs = {"device": self.device, "dtype": torch.double}
     train_X = torch.rand(5, 2, **tkwargs)
     train_Y = torch.sin(train_X)
     model = SimpleBatchedMultiOutputGPyTorchModel(train_X, train_Y)
     post_tf = ScalarizedPosteriorTransform(
         weights=torch.zeros(2, **tkwargs))
     post = model.posterior(torch.rand(3, 2, **tkwargs),
                            posterior_transform=post_tf)
     self.assertTrue(torch.equal(post.mean, torch.zeros(3, 1, **tkwargs)))
Example #8
0
File: utils.py Project: Balandat/Ax
def get_botorch_objective_and_transform(
    model: Model,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    objective_thresholds: Optional[Tensor] = None,
    X_observed: Optional[Tensor] = None,
) -> Tuple[Optional[MCAcquisitionObjective], Optional[PosteriorTransform]]:
    """Constructs a BoTorch `AcquisitionObjective` object.

    Args:
        model: A BoTorch Model
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        objective_thresholds: A tensor containing thresholds forming a reference point
            from which to calculate pareto frontier hypervolume. Points that do not
            dominate the objective_thresholds contribute nothing to hypervolume.
        X_observed: Observed points that are feasible and appear in the
            objective or the constraints. None if there are no such points.

    Returns:
        A two-tuple containing (optioally) an `MCAcquisitionObjective` and
        (optionally) a `PosteriorTransform`.
    """
    if objective_thresholds is not None:
        # we are doing multi-objective optimization
        nonzero_idcs = torch.nonzero(objective_weights).view(-1)
        objective_weights = objective_weights[nonzero_idcs]
        objective = WeightedMCMultiOutputObjective(
            weights=objective_weights, outcomes=nonzero_idcs.tolist())
        return objective, None
    if X_observed is None:
        raise UnsupportedError(
            "X_observed is required to construct a BoTorch objective.")
    if outcome_constraints:
        # If there are outcome constraints, we use MC Acquistion functions
        obj_tf = get_objective_weights_transform(objective_weights)

        def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
            return obj_tf(samples)

        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_tf)
        objective = ConstrainedMCObjective(objective=objective,
                                           constraints=con_tfs or [],
                                           infeasible_cost=inf_cost)
        return objective, None
    # Case of linear weights - use ScalarizedPosteriorTransform
    transform = ScalarizedPosteriorTransform(weights=objective_weights)
    return None, transform
 def test_deprecate_objective_arg(self):
     objective = ScalarizedObjective(weights=torch.ones(1))
     post_tf = ScalarizedPosteriorTransform(weights=torch.zeros(1))
     with self.assertRaises(RuntimeError):
         _deprecate_objective_arg(posterior_transform=post_tf,
                                  objective=objective)
     with self.assertWarns(DeprecationWarning):
         new_tf = _deprecate_objective_arg(objective=objective)
     self.assertTrue(torch.equal(new_tf.weights, objective.weights))
     self.assertIsInstance(new_tf, ScalarizedPosteriorTransform)
     new_tf = _deprecate_objective_arg(posterior_transform=post_tf)
     self.assertEqual(id(new_tf), id(post_tf))
     self.assertIsNone(_deprecate_objective_arg())
     with self.assertRaises(UnsupportedError):
         _deprecate_objective_arg(objective=DummyObjective())
Example #10
0
    def test_sample_max_value_Thompson(self):
        for dtype in (torch.float, torch.double):
            torch.manual_seed(7)
            mm = MESMockModel()
            candidate_set = torch.rand(3, 10, 2, dtype=dtype)
            samples = _sample_max_value_Thompson(mm, candidate_set, 5)
            self.assertEqual(samples.shape, torch.Size([5, 3]))

            # Test with multi-output model w/ transform.
            mm = MESMockModel(num_outputs=2)
            pt = ScalarizedPosteriorTransform(weights=torch.ones(2, dtype=dtype))
            samples = _sample_max_value_Thompson(
                mm, candidate_set, 5, posterior_transform=pt
            )
            self.assertEqual(samples.shape, torch.Size([5, 3]))
    def test_optimize_objective(self, mock_optimize_acqf):
        from botorch.acquisition.input_constructors import optimize_objective

        mock_model = MockModel(
            posterior=MockPosterior(mean=None, variance=None))
        bounds = torch.rand(2, len(self.bounds))

        A = torch.rand(1, bounds.shape[-1])
        b = torch.zeros([1, 1])
        idx = A[0].nonzero(as_tuple=False).squeeze()
        inequality_constraints = ((idx, -A[0, idx], -b[0, 0]), )

        with self.subTest("scalarObjective_linearConstraints"):
            post_tf = ScalarizedPosteriorTransform(
                weights=torch.rand(bounds.shape[-1]))
            _ = optimize_objective(
                model=mock_model,
                bounds=bounds,
                q=1,
                posterior_transform=post_tf,
                linear_constraints=(A, b),
                fixed_features=None,
            )

            kwargs = mock_optimize_acqf.call_args[1]
            self.assertIsInstance(kwargs["acq_function"], PosteriorMean)
            self.assertTrue(torch.equal(kwargs["bounds"], bounds))
            self.assertEqual(len(kwargs["inequality_constraints"]), 1)
            for a, b in zip(kwargs["inequality_constraints"][0],
                            inequality_constraints[0]):
                self.assertTrue(torch.equal(a, b))

        with self.subTest("mcObjective_fixedFeatures"):
            _ = optimize_objective(
                model=mock_model,
                bounds=bounds,
                q=1,
                objective=LinearMCObjective(
                    weights=torch.rand(bounds.shape[-1])),
                fixed_features={0: 0.5},
            )

            kwargs = mock_optimize_acqf.call_args[1]
            self.assertIsInstance(kwargs["acq_function"],
                                  FixedFeatureAcquisitionFunction)
            self.assertIsInstance(kwargs["acq_function"].acq_func,
                                  qSimpleRegret)
            self.assertTrue(torch.equal(kwargs["bounds"], bounds[:, 1:]))
Example #12
0
 def test_posterior_transform(self):
     tkwargs = {"device": self.device, "dtype": torch.double}
     train_X1, train_X2 = (
         torch.rand(5, 1, **tkwargs),
         torch.rand(5, 1, **tkwargs),
     )
     train_Y1 = torch.sin(train_X1)
     train_Y2 = torch.cos(train_X2)
     # test different batch shapes
     m1 = SimpleGPyTorchModel(train_X1, train_Y1)
     m2 = SimpleGPyTorchModel(train_X2, train_Y2)
     model = SimpleModelListGPyTorchModel(m1, m2)
     post_tf = ScalarizedPosteriorTransform(torch.ones(2, **tkwargs))
     post = model.posterior(torch.rand(3, 1, **tkwargs),
                            posterior_transform=post_tf)
     self.assertEqual(post.mean.shape, torch.Size([3, 1]))
 def test_get_best_f_mc(self):
     with self.assertRaises(NotImplementedError):
         get_best_f_mc(training_data=self.nbd_td)
     best_f = get_best_f_mc(training_data=self.bd_td)
     best_f_expected = self.bd_td.Y.squeeze().max()
     self.assertEqual(best_f, best_f_expected)
     with self.assertRaises(UnsupportedError):
         get_best_f_mc(training_data=self.bd_td_mo)
     obj = LinearMCObjective(weights=torch.rand(2))
     best_f = get_best_f_mc(training_data=self.bd_td_mo, objective=obj)
     best_f_expected = (self.bd_td_mo.Y @ obj.weights).max()
     self.assertEqual(best_f, best_f_expected)
     post_tf = ScalarizedPosteriorTransform(weights=torch.ones(2))
     best_f = get_best_f_mc(training_data=self.bd_td_mo,
                            posterior_transform=post_tf)
     best_f_expected = (self.bd_td_mo.Y.sum(dim=-1)).max()
     self.assertEqual(best_f, best_f_expected)
    def test_q_multi_fidelity_max_value_entropy(self):
        for dtype in (torch.float, torch.double):
            torch.manual_seed(7)
            mm = MESMockModel()
            train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype)
            mm.train_inputs = (train_inputs, )
            candidate_set = torch.rand(10, 2, device=self.device, dtype=dtype)
            qMF_MVE = qMultiFidelityMaxValueEntropy(
                model=mm, candidate_set=candidate_set, num_mv_samples=10)

            # test initialization
            self.assertEqual(qMF_MVE.num_fantasies, 16)
            self.assertEqual(qMF_MVE.num_mv_samples, 10)
            self.assertIsInstance(qMF_MVE.sampler, SobolQMCNormalSampler)
            self.assertIsInstance(qMF_MVE.cost_sampler, SobolQMCNormalSampler)
            self.assertEqual(qMF_MVE.sampler.sample_shape, torch.Size([128]))
            self.assertIsInstance(qMF_MVE.fantasies_sampler,
                                  SobolQMCNormalSampler)
            self.assertEqual(qMF_MVE.fantasies_sampler.sample_shape,
                             torch.Size([16]))
            self.assertIsInstance(qMF_MVE.expand, Callable)
            self.assertIsInstance(qMF_MVE.project, Callable)
            self.assertIsNone(qMF_MVE.X_pending)
            self.assertEqual(qMF_MVE.posterior_max_values.shape,
                             torch.Size([10, 1]))
            self.assertIsInstance(qMF_MVE.cost_aware_utility,
                                  InverseCostWeightedUtility)

            # test evaluation
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            self.assertEqual(qMF_MVE(X).shape, torch.Size([1]))

            # Test with multi-output model w/ transform.
            mm = MESMockModel(num_outputs=2)
            pt = ScalarizedPosteriorTransform(
                weights=torch.ones(2, device=self.device, dtype=dtype))
            qMF_MVE = qMultiFidelityMaxValueEntropy(
                model=mm,
                candidate_set=candidate_set,
                num_mv_samples=10,
                posterior_transform=pt,
            )
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            self.assertEqual(qMF_MVE(X).shape, torch.Size([1]))
Example #15
0
    def test_MultiTaskGP_single_output(self):
        for dtype in (torch.float, torch.double):
            tkwargs = {"device": self.device, "dtype": dtype}
            model = _get_model_single_output(**tkwargs)
            self.assertIsInstance(model, MultiTaskGP)
            self.assertEqual(model.num_outputs, 1)
            self.assertIsInstance(model.likelihood, GaussianLikelihood)
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            matern_kernel = model.covar_module.base_kernel
            self.assertIsInstance(matern_kernel, MaternKernel)
            self.assertIsInstance(matern_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.task_covar_module, IndexKernel)
            self.assertEqual(model._rank, 2)
            self.assertEqual(model.task_covar_module.covar_factor.shape[-1],
                             model._rank)

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll,
                                         options={"maxiter": 1},
                                         max_retries=1)

            # test posterior
            test_x = torch.rand(2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 1, **tkwargs)
            posterior_f = model.posterior(test_x)
            self.assertIsInstance(posterior_f, GPyTorchPosterior)
            self.assertIsInstance(posterior_f.mvn, MultivariateNormal)

            # test posterior transform
            post_tf = ScalarizedPosteriorTransform(
                weights=torch.ones(1, **tkwargs))
            posterior_f_tf = model.posterior(test_x,
                                             posterior_transform=post_tf)
            self.assertTrue(torch.equal(posterior_f.mean, posterior_f_tf.mean))
Example #16
0
    def __init__(
        self,
        model: Model,
        objective: Optional[MCAcquisitionObjective] = None,
        posterior_transform: Optional[PosteriorTransform] = None,
        replacement: bool = True,
    ) -> None:
        r"""Constructor for the SamplingStrategy base class.

        Args:
            model: A fitted model.
            objective: The MCAcquisitionObjective under which the samples are
                evaluated. Defaults to `IdentityMCObjective()`.
            posterior_transform: An optional PosteriorTransform.
            replacement: If True, sample with replacement.
        """
        super().__init__()
        self.model = model
        if objective is None:
            objective = IdentityMCObjective()
        elif not isinstance(objective, MCAcquisitionObjective):
            # TODO: Clean up once ScalarizedObjective is removed.
            if posterior_transform is not None:
                raise RuntimeError(
                    "A ScalarizedObjective (DEPRECATED) and a posterior transform "
                    "are not supported at the same time. Use only a posterior "
                    "transform instead."
                )
            else:
                posterior_transform = ScalarizedPosteriorTransform(
                    weights=objective.weights, offset=objective.offset
                )
                objective = IdentityMCObjective()
        self.objective = objective
        self.posterior_transform = posterior_transform
        self.replacement = replacement
Example #17
0
    def test_expected_improvement_batch(self):
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([-0.5, 0.0, 0.5],
                                device=self.device,
                                dtype=dtype).view(3, 1, 1)
            variance = torch.ones(3, 1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))
            module = ExpectedImprovement(model=mm, best_f=0.0)
            X = torch.empty(3, 1, 1, device=self.device, dtype=dtype)  # dummy
            ei = module(X)
            ei_expected = torch.tensor([0.19780, 0.39894, 0.69780],
                                       device=self.device,
                                       dtype=dtype)
            self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))
            # check for proper error if multi-output model
            mean2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
            variance2 = torch.rand(3, 1, 2, device=self.device, dtype=dtype)
            mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
            with self.assertRaises(UnsupportedError):
                ExpectedImprovement(model=mm2, best_f=0.0)

            # test posterior transform (single-output)
            mean = torch.tensor([[[0.5]], [[0.25]]],
                                device=self.device,
                                dtype=dtype)
            covar = torch.tensor([[[[0.16]]], [[[0.125]]]],
                                 device=self.device,
                                 dtype=dtype)
            mvn = MultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([0.5], device=self.device, dtype=dtype)
            transform = ScalarizedPosteriorTransform(weights)
            ei = ExpectedImprovement(model=mm,
                                     best_f=0.0,
                                     posterior_transform=transform)
            X = torch.rand(2, 1, 2, device=self.device, dtype=dtype)
            ei_expected = torch.tensor([[0.2601], [0.1500]],
                                       device=self.device,
                                       dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)

            # test posterior transform (multi-output)
            mean = torch.tensor([[[-0.25, 0.5]], [[0.2, -0.1]]],
                                device=self.device,
                                dtype=dtype)
            covar = torch.tensor(
                [[[0.5, 0.125], [0.125, 0.5]], [[0.25, -0.1], [-0.1, 0.25]]],
                device=self.device,
                dtype=dtype,
            )
            mvn = MultitaskMultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([2.0, 1.0], device=self.device, dtype=dtype)
            transform = ScalarizedPosteriorTransform(weights)
            ei = ExpectedImprovement(model=mm,
                                     best_f=0.0,
                                     posterior_transform=transform)
            X = torch.rand(2, 1, 2, device=self.device, dtype=dtype)
            ei_expected = torch.tensor([0.6910, 0.5371],
                                       device=self.device,
                                       dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)

        # test bad posterior transform class
        with self.assertRaises(UnsupportedError):
            ExpectedImprovement(model=mm,
                                best_f=0.0,
                                posterior_transform=IdentityMCObjective())
 def test_initialize_q_knowledge_gradient(self):
     for dtype in (torch.float, torch.double):
         mean = torch.zeros(1, 1, device=self.device, dtype=dtype)
         mm = MockModel(MockPosterior(mean=mean))
         # test error when neither specifying neither sampler nor num_fantasies
         with self.assertRaises(ValueError):
             qKnowledgeGradient(model=mm, num_fantasies=None)
         # test error when sampler and num_fantasies arg are inconsistent
         sampler = IIDNormalSampler(num_samples=16)
         with self.assertRaises(ValueError):
             qKnowledgeGradient(model=mm, num_fantasies=32, sampler=sampler)
         # test default construction
         qKG = qKnowledgeGradient(model=mm, num_fantasies=32)
         self.assertEqual(qKG.num_fantasies, 32)
         self.assertIsInstance(qKG.sampler, SobolQMCNormalSampler)
         self.assertEqual(qKG.sampler.sample_shape, torch.Size([32]))
         self.assertIsNone(qKG.objective)
         self.assertIsNone(qKG.inner_sampler)
         self.assertIsNone(qKG.X_pending)
         self.assertIsNone(qKG.current_value)
         self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 32 + 3)
         # test custom construction
         obj = GenericMCObjective(lambda Y, X: Y.mean(dim=-1))
         sampler = IIDNormalSampler(num_samples=16)
         X_pending = torch.zeros(2, 2, device=self.device, dtype=dtype)
         qKG = qKnowledgeGradient(
             model=mm,
             num_fantasies=16,
             sampler=sampler,
             objective=obj,
             X_pending=X_pending,
         )
         self.assertEqual(qKG.num_fantasies, 16)
         self.assertEqual(qKG.sampler, sampler)
         self.assertEqual(qKG.sampler.sample_shape, torch.Size([16]))
         self.assertEqual(qKG.objective, obj)
         self.assertIsInstance(qKG.inner_sampler, SobolQMCNormalSampler)
         self.assertEqual(qKG.inner_sampler.sample_shape, torch.Size([128]))
         self.assertTrue(torch.equal(qKG.X_pending, X_pending))
         self.assertIsNone(qKG.current_value)
         self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 16 + 3)
         # test assignment of num_fantasies from sampler if not provided
         qKG = qKnowledgeGradient(model=mm,
                                  num_fantasies=None,
                                  sampler=sampler)
         self.assertEqual(qKG.sampler.sample_shape, torch.Size([16]))
         # test custom construction with inner sampler and current value
         inner_sampler = SobolQMCNormalSampler(num_samples=256)
         current_value = torch.zeros(1, device=self.device, dtype=dtype)
         qKG = qKnowledgeGradient(
             model=mm,
             num_fantasies=8,
             objective=obj,
             inner_sampler=inner_sampler,
             current_value=current_value,
         )
         self.assertEqual(qKG.num_fantasies, 8)
         self.assertEqual(qKG.sampler.sample_shape, torch.Size([8]))
         self.assertEqual(qKG.objective, obj)
         self.assertIsInstance(qKG.inner_sampler, SobolQMCNormalSampler)
         self.assertEqual(qKG.inner_sampler, inner_sampler)
         self.assertIsNone(qKG.X_pending)
         self.assertTrue(torch.equal(qKG.current_value, current_value))
         self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 8 + 3)
         # test construction with posterior_transform
         qKG_s = qKnowledgeGradient(
             model=mm,
             num_fantasies=16,
             sampler=sampler,
             posterior_transform=ScalarizedPosteriorTransform(
                 weights=torch.rand(2)),
         )
         self.assertIsNone(qKG_s.inner_sampler)
         self.assertIsInstance(qKG_s.posterior_transform,
                               ScalarizedPosteriorTransform)
         # test error if multi-output model and no objective or posterior transform
         mean2 = torch.zeros(1, 2, device=self.device, dtype=dtype)
         mm2 = MockModel(MockPosterior(mean=mean2))
         with self.assertRaises(UnsupportedError):
             qKnowledgeGradient(model=mm2)
         # test error if multi-output model and no objective and posterior transform
         # does not scalarize
         with self.assertRaises(UnsupportedError):
             qKnowledgeGradient(
                 model=mm2,
                 posterior_transform=DummyNonScalarizingPosteriorTransform(
                 ),
             )
         # test handling of scalarized objective
         obj = ScalarizedObjective(weights=torch.rand(2))
         post_tf = ScalarizedPosteriorTransform(weights=torch.rand(2))
         with self.assertRaises(RuntimeError):
             qKnowledgeGradient(model=mm2,
                                objective=obj,
                                posterior_transform=post_tf)
         acqf = qKnowledgeGradient(model=mm2, objective=obj)
         self.assertIsInstance(acqf.posterior_transform,
                               ScalarizedPosteriorTransform)
         self.assertIsNone(acqf.objective)
Example #19
0
    def test_max_posterior_sampling(self):
        batch_shapes = (torch.Size(), torch.Size([3]), torch.Size([3, 2]))
        dtypes = (torch.float, torch.double)
        for batch_shape, dtype, N, num_samples, d in itertools.product(
                batch_shapes, dtypes, (5, 6), (1, 2), (1, 2)):
            tkwargs = {"device": self.device, "dtype": dtype}
            # X is `batch_shape x N x d` = batch_shape x N x 1.
            X = torch.randn(*batch_shape, N, d, **tkwargs)
            # the event shape is `num_samples x batch_shape x N x m`
            psamples = torch.zeros(num_samples, *batch_shape, N, 1, **tkwargs)
            psamples[..., 0, :] = 1.0

            # IdentityMCObjective, with replacement
            with mock.patch.object(MockPosterior,
                                   "rsample",
                                   return_value=psamples):
                mp = MockPosterior(None)
                with mock.patch.object(MockModel, "posterior",
                                       return_value=mp):
                    mm = MockModel(None)
                    MPS = MaxPosteriorSampling(mm)
                    s = MPS(X, num_samples=num_samples)
                    self.assertTrue(
                        torch.equal(s, X[..., [0] * num_samples, :]))

            # ScalarizedObjective, with replacement
            with mock.patch.object(MockPosterior,
                                   "rsample",
                                   return_value=psamples):
                mp = MockPosterior(None)
                with mock.patch.object(MockModel, "posterior",
                                       return_value=mp):
                    mm = MockModel(None)
                    with mock.patch.object(ScalarizedObjective,
                                           "forward",
                                           return_value=mp):
                        obj = ScalarizedObjective(torch.rand(2, **tkwargs))
                        MPS = MaxPosteriorSampling(mm, objective=obj)
                        s = MPS(X, num_samples=num_samples)
                        self.assertTrue(
                            torch.equal(s, X[..., [0] * num_samples, :]))

            # ScalarizedPosteriorTransform w/ replacement
            with mock.patch.object(MockPosterior,
                                   "rsample",
                                   return_value=psamples):
                mp = MockPosterior(None)
                with mock.patch.object(MockModel, "posterior",
                                       return_value=mp):
                    mm = MockModel(None)
                    with mock.patch.object(ScalarizedPosteriorTransform,
                                           "forward",
                                           return_value=mp):
                        post_tf = ScalarizedPosteriorTransform(
                            torch.rand(2, **tkwargs))
                        MPS = MaxPosteriorSampling(mm,
                                                   posterior_transform=post_tf)
                        s = MPS(X, num_samples=num_samples)
                        self.assertTrue(
                            torch.equal(s, X[..., [0] * num_samples, :]))

            # ScalarizedPosteriorTransform and Scalarized obj
            mp = MockPosterior(None)
            mm = MockModel(posterior=mp)
            obj = ScalarizedObjective(torch.rand(2, **tkwargs))
            post_tf = ScalarizedPosteriorTransform(torch.rand(2, **tkwargs))
            with self.assertRaises(RuntimeError):
                MaxPosteriorSampling(mm,
                                     posterior_transform=post_tf,
                                     objective=obj)

            # without replacement
            psamples[..., 1, 0] = 1e-6
            with mock.patch.object(MockPosterior,
                                   "rsample",
                                   return_value=psamples):
                mp = MockPosterior(None)
                with mock.patch.object(MockModel, "posterior",
                                       return_value=mp):
                    mm = MockModel(None)
                    MPS = MaxPosteriorSampling(mm, replacement=False)
                    if len(batch_shape) > 1:
                        with self.assertRaises(NotImplementedError):
                            MPS(X, num_samples=num_samples)
                    else:
                        s = MPS(X, num_samples=num_samples)
                        # order is not guaranteed, need to sort
                        self.assertTrue(
                            torch.equal(
                                torch.sort(s, dim=-2).values,
                                torch.sort(X[..., :num_samples, :],
                                           dim=-2).values,
                            ))

            # ScalarizedMCObjective, without replacement
            with mock.patch.object(MockPosterior,
                                   "rsample",
                                   return_value=psamples):
                mp = MockPosterior(None)
                with mock.patch.object(MockModel, "posterior",
                                       return_value=mp):
                    mm = MockModel(None)
                    with mock.patch.object(ScalarizedObjective,
                                           "forward",
                                           return_value=mp):
                        obj = ScalarizedObjective(torch.rand(2, **tkwargs))
                        MPS = MaxPosteriorSampling(mm,
                                                   objective=obj,
                                                   replacement=False)
                        if len(batch_shape) > 1:
                            with self.assertRaises(NotImplementedError):
                                MPS(X, num_samples=num_samples)
                        else:
                            s = MPS(X, num_samples=num_samples)
                            # order is not guaranteed, need to sort
                            self.assertTrue(
                                torch.equal(
                                    torch.sort(s, dim=-2).values,
                                    torch.sort(X[..., :num_samples, :],
                                               dim=-2).values,
                                ))
    def test_qMS_init(self):
        d = 2
        q = 1
        num_data = 3
        q_batch_sizes = [1, 1, 1]
        num_fantasies = [2, 2, 1]
        t_batch_size = [2]
        for dtype in (torch.float, torch.double):
            bounds = torch.tensor([[0], [1]], device=self.device, dtype=dtype)
            bounds = bounds.repeat(1, d)
            train_X = torch.rand(num_data, d, device=self.device, dtype=dtype)
            train_Y = torch.rand(num_data, 1, device=self.device, dtype=dtype)
            model = SingleTaskGP(train_X, train_Y)

            # exactly one of samplers or num_fantasies
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[qExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    inner_mc_samples=[2] * 4,
                )

            # cannot use qMS as its own valfunc_cls
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[qMultiStepLookahead] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                    inner_mc_samples=[2] * 4,
                )

            # construct using samplers
            samplers = [
                SobolQMCNormalSampler(num_samples=nf,
                                      resample=False,
                                      collapse_batch_dims=True)
                for nf in num_fantasies
            ]
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                valfunc_cls=[qExpectedImprovement] * 4,
                valfunc_argfacs=[make_best_f] * 4,
                inner_mc_samples=[2] * 4,
                samplers=samplers,
            )
            self.assertEqual(qMS.num_fantasies, num_fantasies)

            # use default valfunc_cls, valfun_argfacs, inner_mc_samples
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                samplers=samplers,
            )
            self.assertEqual(len(qMS._valfunc_cls), 4)
            self.assertEqual(len(qMS.inner_samplers), 4)
            self.assertEqual(len(qMS._valfunc_argfacs), 4)

            # _construct_inner_samplers error catching tests below
            # AnalyticAcquisitionFunction with MCAcquisitionObjective
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    objective=IdentityMCObjective(),
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[ExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                )
            # AnalyticAcquisitionFunction and q > 1
            with self.assertRaises(UnsupportedError):
                qMultiStepLookahead(
                    model=model,
                    batch_sizes=[2, 2, 2],
                    valfunc_cls=[ExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                    inner_mc_samples=[2] * 4,
                )
            # AnalyticAcquisitionFunction and inner_mc_samples
            with self.assertWarns(Warning):
                qMultiStepLookahead(
                    model=model,
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[ExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                    inner_mc_samples=[2] * 4,
                )
            # AnalyticAcquisitionFunction with scalarized obj (deprecated)
            with self.assertWarns(DeprecationWarning):
                acqf = qMultiStepLookahead(
                    model=model,
                    objective=ScalarizedObjective(weights=torch.ones(1)),
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[ExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                )
            self.assertIsNone(acqf.objective)
            self.assertIsInstance(acqf.posterior_transform,
                                  ScalarizedPosteriorTransform)
            # Both scalarized obj and scalarized post_tf
            with self.assertRaises(RuntimeError):
                qMultiStepLookahead(
                    model=model,
                    objective=ScalarizedObjective(weights=torch.ones(1)),
                    posterior_transform=ScalarizedPosteriorTransform(
                        weights=torch.ones(1)),
                    batch_sizes=q_batch_sizes,
                    valfunc_cls=[ExpectedImprovement] * 4,
                    valfunc_argfacs=[make_best_f] * 4,
                    num_fantasies=num_fantasies,
                )
            # test warmstarting
            qMS = qMultiStepLookahead(
                model=model,
                batch_sizes=q_batch_sizes,
                samplers=samplers,
            )
            q_prime = qMS.get_augmented_q_batch_size(q)
            eval_X = torch.rand(t_batch_size + [q_prime, d],
                                device=self.device,
                                dtype=dtype)
            warmstarted_X = warmstart_multistep(
                acq_function=qMS,
                bounds=bounds,
                num_restarts=5,
                raw_samples=10,
                full_optimizer=eval_X,
            )
            self.assertEqual(warmstarted_X.shape, torch.Size([5, q_prime, d]))
    def test_evaluate_q_knowledge_gradient(self):
        # Stop gap measure to avoid test failures on Ampere devices
        # TODO: Find an elegant way of disallowing tf32 for botorch/gpytorch
        # without blanket-disallowing it for all of torch.
        torch.backends.cuda.matmul.allow_tf32 = False

        for dtype in (torch.float, torch.double):
            # basic test
            n_f = 4
            mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
            variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(mean=mean, variance=variance))
            with mock.patch.object(MockModel, "fantasize",
                                   return_value=mfm) as patch_f:
                with mock.patch(
                        NO,
                        new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qKG = qKnowledgeGradient(model=mm, num_fantasies=n_f)
                    X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
                    val = qKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
            self.assertTrue(torch.allclose(val, mean.mean(), atol=1e-4))
            self.assertTrue(
                torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :]))
            # batched evaluation
            b = 2
            mean = torch.rand(n_f, b, 1, device=self.device, dtype=dtype)
            variance = torch.rand(n_f, b, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(mean=mean, variance=variance))
            X = torch.rand(b, n_f + 1, 1, device=self.device, dtype=dtype)
            with mock.patch.object(MockModel, "fantasize",
                                   return_value=mfm) as patch_f:
                with mock.patch(
                        NO,
                        new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qKG = qKnowledgeGradient(model=mm, num_fantasies=n_f)
                    val = qKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([b, 1, 1]))
            self.assertTrue(
                torch.allclose(val, mean.mean(dim=0).squeeze(-1), atol=1e-4))
            self.assertTrue(
                torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :]))
            # pending points and current value
            X_pending = torch.rand(2, 1, device=self.device, dtype=dtype)
            mean = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
            variance = torch.rand(n_f, 1, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(mean=mean, variance=variance))
            current_value = torch.rand(1, device=self.device, dtype=dtype)
            X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
            with mock.patch.object(MockModel, "fantasize",
                                   return_value=mfm) as patch_f:
                with mock.patch(
                        NO,
                        new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qKG = qKnowledgeGradient(
                        model=mm,
                        num_fantasies=n_f,
                        X_pending=X_pending,
                        current_value=current_value,
                    )
                    val = qKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 3, 1]))
            self.assertTrue(
                torch.allclose(val, mean.mean() - current_value, atol=1e-4))
            self.assertTrue(
                torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :]))
            # test objective (inner MC sampling)
            objective = GenericMCObjective(
                objective=lambda Y, X: Y.norm(dim=-1))
            samples = torch.randn(3, 1, 1, device=self.device, dtype=dtype)
            mfm = MockModel(MockPosterior(samples=samples))
            X = torch.rand(n_f + 1, 1, device=self.device, dtype=dtype)
            with mock.patch.object(MockModel, "fantasize",
                                   return_value=mfm) as patch_f:
                with mock.patch(
                        NO,
                        new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 1
                    mm = MockModel(None)
                    qKG = qKnowledgeGradient(model=mm,
                                             num_fantasies=n_f,
                                             objective=objective)
                    val = qKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
            self.assertTrue(
                torch.allclose(val, objective(samples).mean(), atol=1e-4))
            self.assertTrue(
                torch.equal(qKG.extract_candidates(X), X[..., :-n_f, :]))
            # test scalarized posterior transform
            weights = torch.rand(2, device=self.device, dtype=dtype)
            post_tf = ScalarizedPosteriorTransform(weights=weights)
            mean = torch.tensor([1.0, 0.5], device=self.device,
                                dtype=dtype).expand(n_f, 1, 2)
            cov = torch.tensor([[1.0, 0.1], [0.1, 0.5]],
                               device=self.device,
                               dtype=dtype).expand(n_f, 2, 2)
            posterior = GPyTorchPosterior(
                MultitaskMultivariateNormal(mean, cov))
            mfm = MockModel(posterior)
            with mock.patch.object(MockModel, "fantasize",
                                   return_value=mfm) as patch_f:
                with mock.patch(
                        NO,
                        new_callable=mock.PropertyMock) as mock_num_outputs:
                    mock_num_outputs.return_value = 2
                    mm = MockModel(None)
                    qKG = qKnowledgeGradient(model=mm,
                                             num_fantasies=n_f,
                                             posterior_transform=post_tf)
                    val = qKG(X)
                    patch_f.assert_called_once()
                    cargs, ckwargs = patch_f.call_args
                    self.assertEqual(ckwargs["X"].shape, torch.Size([1, 1, 1]))
                    val_expected = (mean * weights).sum(-1).mean(0)
                    self.assertTrue(torch.allclose(val, val_expected))
Example #22
0
 def test_GetQEHVI(self, mock_acqf):
     # make sure ref_point is specified
     with self.assertRaises(ValueError):
         acqf = get_acquisition_function(
             acquisition_function_name="qEHVI",
             model=self.model,
             objective=self.mo_objective,
             X_observed=self.X_observed,
             X_pending=self.X_pending,
             mc_samples=self.mc_samples,
             seed=self.seed,
             Y=self.Y,
         )
     # make sure Y is specified
     with self.assertRaises(ValueError):
         acqf = get_acquisition_function(
             acquisition_function_name="qEHVI",
             model=self.model,
             objective=self.mo_objective,
             X_observed=self.X_observed,
             X_pending=self.X_pending,
             mc_samples=self.mc_samples,
             seed=self.seed,
             ref_point=self.ref_point,
         )
     # posterior transforms are not supported
     with self.assertRaises(NotImplementedError):
         acqf = get_acquisition_function(
             acquisition_function_name="qEHVI",
             model=self.model,
             objective=self.mo_objective,
             posterior_transform=ScalarizedPosteriorTransform(
                 weights=torch.rand(2)),
             X_observed=self.X_observed,
             X_pending=self.X_pending,
             mc_samples=self.mc_samples,
             seed=self.seed,
             ref_point=self.ref_point,
         )
     acqf = get_acquisition_function(
         acquisition_function_name="qEHVI",
         model=self.model,
         objective=self.mo_objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
         ref_point=self.ref_point,
         Y=self.Y,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     mock_acqf.assert_called_once_with(
         constraints=None,
         model=self.model,
         objective=self.mo_objective,
         ref_point=self.ref_point,
         partitioning=mock.ANY,
         sampler=mock.ANY,
         X_pending=self.X_pending,
     )
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 1)
     # test with non-qmc
     acqf = get_acquisition_function(
         acquisition_function_name="qEHVI",
         model=self.model,
         objective=self.mo_objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=2,
         qmc=False,
         ref_point=self.ref_point,
         Y=self.Y,
     )
     self.assertTrue(mock_acqf.call_count, 2)
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     self.assertEqual(kwargs["ref_point"], self.ref_point)
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, IIDNormalSampler)
     self.assertIsInstance(kwargs["objective"], DummyMCMultiOutputObjective)
     partitioning = kwargs["partitioning"]
     self.assertIsInstance(partitioning, FastNondominatedPartitioning)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 2)
     # test that approximate partitioning is used when alpha > 0
     # test with non-qmc
     acqf = get_acquisition_function(
         acquisition_function_name="qEHVI",
         model=self.model,
         objective=self.mo_objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=2,
         qmc=False,
         ref_point=self.ref_point,
         Y=self.Y,
         alpha=0.1,
     )
     _, kwargs = mock_acqf.call_args
     partitioning = kwargs["partitioning"]
     self.assertIsInstance(partitioning, NondominatedPartitioning)
     self.assertEqual(partitioning.alpha, 0.1)
     # test constraints
     acqf = get_acquisition_function(
         acquisition_function_name="qEHVI",
         model=self.model,
         objective=self.mo_objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         constraints=[lambda Y: Y[..., -1]],
         seed=2,
         qmc=False,
         ref_point=self.ref_point,
         Y=self.Y,
     )
     _, kwargs = mock_acqf.call_args
     partitioning = kwargs["partitioning"]
     self.assertEqual(partitioning.pareto_Y.shape[0], 0)
Example #23
0
    def test_GetQEI(self, mock_acqf):
        self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2)))
        acqf = get_acquisition_function(
            acquisition_function_name="qEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            marginalize_dim=0,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        best_f = self.objective(self.model.posterior(
            self.X_observed).mean).max().item()
        mock_acqf.assert_called_once_with(
            model=self.model,
            best_f=best_f,
            sampler=mock.ANY,
            objective=self.objective,
            posterior_transform=None,
            X_pending=self.X_pending,
        )
        # test batched model
        self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
        acqf = get_acquisition_function(
            acquisition_function_name="qEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        # test batched model without marginalize dim
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))

        # test w/ posterior transform
        pm = torch.tensor([1.0, 2.0])
        mvn = MultivariateNormal(pm, torch.eye(2))
        self.model._posterior.mvn = mvn
        self.model._posterior._mean = pm.unsqueeze(-1)
        pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1]))
        acqf = get_acquisition_function(
            acquisition_function_name="qEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            posterior_transform=pt,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            marginalize_dim=0,
        )
        self.assertEqual(mock_acqf.call_args[-1]["best_f"].item(), -1.0)
Example #24
0
    def test_cache_root(self):
        sample_cached_path = (
            "botorch.acquisition.cached_cholesky.sample_cached_cholesky")
        raw_state_dict = {
            "likelihood.noise_covar.raw_noise":
            torch.tensor([[0.0895], [0.2594]], dtype=torch.float64),
            "mean_module.constant":
            torch.tensor([[-0.4545], [-0.1285]], dtype=torch.float64),
            "covar_module.raw_outputscale":
            torch.tensor([1.4876, 1.4897], dtype=torch.float64),
            "covar_module.base_kernel.raw_lengthscale":
            torch.tensor([[[-0.7202, -0.2868]], [[-0.8794, -1.2877]]],
                         dtype=torch.float64),
        }
        # test batched models (e.g. for MCMC)
        for train_batch_shape, m, dtype in product(
            (torch.Size([]), torch.Size([3])), (1, 2),
            (torch.float, torch.double)):
            state_dict = deepcopy(raw_state_dict)
            for k, v in state_dict.items():
                if m == 1:
                    v = v[0]
                if len(train_batch_shape) > 0:
                    v = v.unsqueeze(0).expand(*train_batch_shape, *v.shape)
                state_dict[k] = v
            tkwargs = {"device": self.device, "dtype": dtype}
            if m == 2:
                objective = GenericMCObjective(lambda Y, X: Y.sum(dim=-1))
            else:
                objective = None
            for k, v in state_dict.items():
                state_dict[k] = v.to(**tkwargs)
            all_close_kwargs = ({
                "atol": 1e-1,
                "rtol": 0.0,
            } if dtype == torch.float else {
                "atol": 1e-4,
                "rtol": 0.0
            })
            torch.manual_seed(1234)
            train_X = torch.rand(*train_batch_shape, 3, 2, **tkwargs)
            train_Y = (
                torch.sin(train_X * 2 * pi) +
                torch.randn(*train_batch_shape, 3, 2, **tkwargs))[..., :m]
            train_Y = standardize(train_Y)
            model = SingleTaskGP(
                train_X,
                train_Y,
            )
            if len(train_batch_shape) > 0:
                X_baseline = train_X[0]
            else:
                X_baseline = train_X
            model.load_state_dict(state_dict, strict=False)
            # test sampler with collapse_batch_dims=False
            sampler = IIDNormalSampler(5, seed=0, collapse_batch_dims=False)
            with self.assertRaises(UnsupportedError):
                qNoisyExpectedImprovement(
                    model=model,
                    X_baseline=X_baseline,
                    sampler=sampler,
                    objective=objective,
                    prune_baseline=False,
                    cache_root=True,
                )
            sampler = IIDNormalSampler(5, seed=0)
            torch.manual_seed(0)
            acqf = qNoisyExpectedImprovement(
                model=model,
                X_baseline=X_baseline,
                sampler=sampler,
                objective=objective,
                prune_baseline=False,
                cache_root=True,
            )

            orig_base_samples = acqf.base_sampler.base_samples.detach().clone()
            sampler2 = IIDNormalSampler(5, seed=0)
            sampler2.base_samples = orig_base_samples
            torch.manual_seed(0)
            acqf_no_cache = qNoisyExpectedImprovement(
                model=model,
                X_baseline=X_baseline,
                sampler=sampler2,
                objective=objective,
                prune_baseline=False,
                cache_root=False,
            )
            for q, batch_shape in product(
                (1, 3), (torch.Size([]), torch.Size([3]), torch.Size([4, 3]))):
                test_X = (0.3 +
                          0.05 * torch.randn(*batch_shape, q, 2, **tkwargs)
                          ).requires_grad_(True)
                with mock.patch(
                        sample_cached_path,
                        wraps=sample_cached_cholesky) as mock_sample_cached:
                    torch.manual_seed(0)
                    val = acqf(test_X)
                    mock_sample_cached.assert_called_once()
                val.sum().backward()
                base_samples = acqf.sampler.base_samples.detach().clone()
                X_grad = test_X.grad.clone()
                test_X2 = test_X.detach().clone().requires_grad_(True)
                acqf_no_cache.sampler.base_samples = base_samples
                with mock.patch(
                        sample_cached_path,
                        wraps=sample_cached_cholesky) as mock_sample_cached:
                    torch.manual_seed(0)
                    val2 = acqf_no_cache(test_X2)
                mock_sample_cached.assert_not_called()
                self.assertTrue(torch.allclose(val, val2, **all_close_kwargs))
                val2.sum().backward()
                self.assertTrue(
                    torch.allclose(X_grad, test_X2.grad, **all_close_kwargs))
            # test we fall back to standard sampling for
            # ill-conditioned covariances
            acqf._baseline_L = torch.zeros_like(acqf._baseline_L)
            with warnings.catch_warnings(
                    record=True) as ws, settings.debug(True):
                with torch.no_grad():
                    acqf(test_X)
            self.assertEqual(len(ws), 1)
            self.assertTrue(issubclass(ws[-1].category, BotorchWarning))

        # test w/ posterior transform
        X_baseline = torch.rand(2, 1)
        model = SingleTaskGP(X_baseline, torch.randn(2, 1))
        pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1]))
        with mock.patch.object(
                qNoisyExpectedImprovement,
                "_cache_root_decomposition",
        ) as mock_cache_root:
            acqf = qNoisyExpectedImprovement(
                model=model,
                X_baseline=X_baseline,
                sampler=IIDNormalSampler(1),
                posterior_transform=pt,
                prune_baseline=False,
                cache_root=True,
            )
            tf_post = model.posterior(X_baseline, posterior_transform=pt)
            self.assertTrue(
                torch.allclose(
                    tf_post.mean,
                    mock_cache_root.call_args[-1]["posterior"].mean))
    def test_q_max_value_entropy(self):
        for dtype in (torch.float, torch.double):
            torch.manual_seed(7)
            mm = MESMockModel()
            with self.assertRaises(TypeError):
                qMaxValueEntropy(mm)

            candidate_set = torch.rand(1000,
                                       2,
                                       device=self.device,
                                       dtype=dtype)

            # test error in case of batch GP model
            mm = MESMockModel(batch_shape=torch.Size([2]))
            with self.assertRaises(NotImplementedError):
                qMaxValueEntropy(mm, candidate_set, num_mv_samples=10)
            mm = MESMockModel()
            train_inputs = torch.rand(5,
                                      10,
                                      2,
                                      device=self.device,
                                      dtype=dtype)
            with self.assertRaises(NotImplementedError):
                qMaxValueEntropy(mm,
                                 candidate_set,
                                 num_mv_samples=10,
                                 train_inputs=train_inputs)

            # test that init works if batch_shape is not implemented on the model
            mm = NoBatchShapeMESMockModel()
            qMaxValueEntropy(
                mm,
                candidate_set,
                num_mv_samples=10,
            )

            # test error when number of outputs > 1 and no transform is given.
            mm = MESMockModel()
            mm._num_outputs = 2
            with self.assertRaises(UnsupportedError):
                qMaxValueEntropy(mm, candidate_set, num_mv_samples=10)

            # test with X_pending is None
            mm = MESMockModel()
            train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype)
            mm.train_inputs = (train_inputs, )
            qMVE = qMaxValueEntropy(mm, candidate_set, num_mv_samples=10)

            # test initialization
            self.assertEqual(qMVE.num_fantasies, 16)
            self.assertEqual(qMVE.num_mv_samples, 10)
            self.assertIsInstance(qMVE.sampler, SobolQMCNormalSampler)
            self.assertEqual(qMVE.sampler.sample_shape, torch.Size([128]))
            self.assertIsInstance(qMVE.fantasies_sampler,
                                  SobolQMCNormalSampler)
            self.assertEqual(qMVE.fantasies_sampler.sample_shape,
                             torch.Size([16]))
            self.assertEqual(qMVE.use_gumbel, True)
            self.assertEqual(qMVE.posterior_max_values.shape,
                             torch.Size([10, 1]))

            # test evaluation
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            self.assertEqual(qMVE(X).shape, torch.Size([1]))

            # test set X pending to None in case of _init_model exists
            qMVE.set_X_pending(None)
            self.assertEqual(qMVE.model, qMVE._init_model)

            # test with use_gumbel = False
            qMVE = qMaxValueEntropy(mm,
                                    candidate_set,
                                    num_mv_samples=10,
                                    use_gumbel=False)
            self.assertEqual(qMVE(X).shape, torch.Size([1]))

            # test with X_pending is not None
            with mock.patch.object(MESMockModel, "fantasize",
                                   return_value=mm) as patch_f:
                qMVE = qMaxValueEntropy(
                    mm,
                    candidate_set,
                    num_mv_samples=10,
                    X_pending=torch.rand(1, 2, device=self.device,
                                         dtype=dtype),
                )
                patch_f.assert_called_once()

            # Test with multi-output model w/ transform.
            mm = MESMockModel(num_outputs=2)
            pt = ScalarizedPosteriorTransform(
                weights=torch.ones(2, device=self.device, dtype=dtype))
            for gumbel in (True, False):
                qMVE = qMaxValueEntropy(
                    mm,
                    candidate_set,
                    num_mv_samples=10,
                    use_gumbel=gumbel,
                    posterior_transform=pt,
                )
                self.assertEqual(qMVE(X).shape, torch.Size([1]))
Example #26
0
    def test_pairwise_gp(self):
        for batch_shape, dtype in itertools.product(
            (torch.Size(), torch.Size([2])), (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            X_dim = 2

            model, model_kwargs = self._get_model_and_data(
                batch_shape=batch_shape, X_dim=X_dim, **tkwargs)
            train_X = model_kwargs["datapoints"]
            train_comp = model_kwargs["comparisons"]

            # test training
            # regular training
            mll = PairwiseLaplaceMarginalLogLikelihood(model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 2}, max_retries=1)
            # prior training
            prior_m = PairwiseGP(None, None).to(**tkwargs)
            with self.assertRaises(RuntimeError):
                prior_m(train_X)
            # forward in training mode with non-training data
            custom_m = PairwiseGP(**model_kwargs)
            other_X = torch.rand(batch_shape + torch.Size([3, X_dim]),
                                 **tkwargs)
            other_comp = train_comp.clone()
            with self.assertRaises(RuntimeError):
                custom_m(other_X)
            custom_mll = PairwiseLaplaceMarginalLogLikelihood(custom_m).to(
                **tkwargs)
            post = custom_m(train_X)
            with self.assertRaises(RuntimeError):
                custom_mll(post, other_comp)

            # setting jitter = 0 with a singular covar will raise error
            sing_train_X = torch.ones(batch_shape + torch.Size([10, X_dim]),
                                      **tkwargs)
            with self.assertRaises(RuntimeError):
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore", category=RuntimeWarning)
                    custom_m = PairwiseGP(sing_train_X, train_comp, jitter=0)
                    custom_m.posterior(sing_train_X)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            self.assertIsInstance(model.covar_module.base_kernel, RBFKernel)
            self.assertIsInstance(
                model.covar_module.base_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.covar_module.outputscale_prior,
                                  SmoothedBoxPrior)
            self.assertEqual(model.num_outputs, 1)
            self.assertEqual(model.batch_shape, batch_shape)

            # test custom models
            custom_m = PairwiseGP(**model_kwargs, covar_module=LinearKernel())
            self.assertIsInstance(custom_m.covar_module, LinearKernel)

            # prior prediction
            prior_m = PairwiseGP(None, None).to(**tkwargs)
            prior_m.eval()
            post = prior_m.posterior(train_X)
            self.assertIsInstance(post, GPyTorchPosterior)

            # test trying adding jitter
            pd_mat = torch.eye(2, 2)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=RuntimeWarning)
                jittered_pd_mat = model._add_jitter(pd_mat)
            diag_diff = (jittered_pd_mat - pd_mat).diagonal(dim1=-2, dim2=-1)
            self.assertTrue(
                torch.allclose(
                    diag_diff,
                    torch.full_like(diag_diff, model._jitter),
                    atol=model._jitter / 10,
                ))

            # test initial utility val
            util_comp = torch.topk(model.utility, k=2,
                                   dim=-1).indices.unsqueeze(-2)
            self.assertTrue(torch.all(util_comp == train_comp))

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([3, X_dim]), **tkwargs)
            expected_shape = batch_shape + torch.Size([3, 1])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # test posterior transform
            post_tf = ScalarizedPosteriorTransform(weights=torch.ones(1))
            posterior_tf = model.posterior(X, posterior_transform=post_tf)
            self.assertTrue(torch.equal(posterior.mean, posterior_tf.mean))

            # expect to raise error when output_indices is not None
            with self.assertRaises(RuntimeError):
                model.posterior(X, output_indices=[0])

            # test re-evaluating utility when it's None
            model.utility = None
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, X_dim, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, 1])

            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)

            # test input_transform
            # the untransfomed one should be stored
            normalize_tf = Normalize(d=2,
                                     bounds=torch.tensor([[0, 0], [0.5, 1.5]]))
            model = PairwiseGP(**model_kwargs, input_transform=normalize_tf)
            self.assertTrue(torch.all(model.datapoints == train_X))

            # test set_train_data strict mode
            model = PairwiseGP(**model_kwargs)
            changed_train_X = train_X.unsqueeze(0)
            changed_train_comp = train_comp.unsqueeze(0)
            # expect to raise error when set data to something different
            with self.assertRaises(RuntimeError):
                model.set_train_data(changed_train_X,
                                     changed_train_comp,
                                     strict=True)

            # the same datapoints but changed comparison will also raise error
            with self.assertRaises(RuntimeError):
                model.set_train_data(train_X, changed_train_comp, strict=True)
Example #27
0
    def test_KnowledgeGradient_helpers(self):
        model = KnowledgeGradient()
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            search_space_digest=SearchSpaceDigest(
                feature_names=self.feature_names,
                bounds=self.bounds,
            ),
            metric_names=self.metric_names,
        )

        # test _instantiate_KG
        posterior_tf = ScalarizedPosteriorTransform(
            weights=self.objective_weights)

        # test acquisition setting
        acq_function = _instantiate_KG(
            model=model.model,
            posterior_transform=posterior_tf,
            n_fantasies=10,
            qmc=True,
        )
        self.assertIsInstance(acq_function.sampler, SobolQMCNormalSampler)
        self.assertIsInstance(acq_function.posterior_transform,
                              ScalarizedPosteriorTransform)
        self.assertEqual(acq_function.num_fantasies, 10)

        acq_function = _instantiate_KG(
            model=model.model,
            posterior_transform=posterior_tf,
            n_fantasies=10,
            qmc=False,
        )
        self.assertIsInstance(acq_function.sampler, IIDNormalSampler)

        acq_function = _instantiate_KG(model=model.model,
                                       posterior_transform=posterior_tf,
                                       qmc=False)
        self.assertIsNone(acq_function.inner_sampler)

        acq_function = _instantiate_KG(
            model=model.model,
            posterior_transform=posterior_tf,
            qmc=True,
            X_pending=self.X_dummy,
        )
        self.assertIsNone(acq_function.inner_sampler)
        self.assertTrue(torch.equal(acq_function.X_pending, self.X_dummy))

        # test _get_best_point_acqf
        acq_function, non_fixed_idcs = model._get_best_point_acqf(
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            X_observed=self.X_dummy,
        )
        self.assertIsInstance(acq_function, qSimpleRegret)
        self.assertIsInstance(acq_function.sampler, SobolQMCNormalSampler)
        self.assertIsNone(non_fixed_idcs)

        acq_function, non_fixed_idcs = model._get_best_point_acqf(
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            X_observed=self.X_dummy,
            qmc=False,
        )
        self.assertIsInstance(acq_function.sampler, IIDNormalSampler)
        self.assertIsNone(non_fixed_idcs)

        with self.assertRaises(RuntimeError):
            model._get_best_point_acqf(
                objective_weights=self.objective_weights,
                outcome_constraints=self.outcome_constraints,
                X_observed=self.X_dummy,
                target_fidelities={1: 1.0},
            )

        # multi-fidelity tests

        model = KnowledgeGradient()
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            search_space_digest=SearchSpaceDigest(
                feature_names=self.feature_names,
                bounds=self.bounds,
                fidelity_features=[-1],
            ),
            metric_names=self.metric_names,
        )

        acq_function = _instantiate_KG(
            model=model.model,
            posterior_transform=posterior_tf,
            target_fidelities={2: 1.0},
            current_value=0,
        )
        self.assertIsInstance(acq_function, qMultiFidelityKnowledgeGradient)

        acq_function = _instantiate_KG(
            model=model.model,
            objective=LinearMCObjective(weights=self.objective_weights),
        )
        self.assertIsInstance(acq_function.inner_sampler,
                              SobolQMCNormalSampler)

        # test error that target fidelity and fidelity weight indices must match
        with self.assertRaises(RuntimeError):
            _instantiate_KG(
                model=model.model,
                posterior_transform=posterior_tf,
                target_fidelities={1: 1.0},
                fidelity_weights={2: 1.0},
                current_value=0,
            )

        # test _get_best_point_acqf
        acq_function, non_fixed_idcs = model._get_best_point_acqf(
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            X_observed=self.X_dummy,
            target_fidelities={2: 1.0},
        )
        self.assertIsInstance(acq_function, FixedFeatureAcquisitionFunction)
        self.assertIsInstance(acq_function.acq_func.sampler,
                              SobolQMCNormalSampler)
        self.assertEqual(non_fixed_idcs, [0, 1])

        acq_function, non_fixed_idcs = model._get_best_point_acqf(
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            X_observed=self.X_dummy,
            target_fidelities={2: 1.0},
            qmc=False,
        )
        self.assertIsInstance(acq_function, FixedFeatureAcquisitionFunction)
        self.assertIsInstance(acq_function.acq_func.sampler, IIDNormalSampler)
        self.assertEqual(non_fixed_idcs, [0, 1])

        # test error that fixed features are provided
        with self.assertRaises(RuntimeError):
            model._get_best_point_acqf(
                objective_weights=self.objective_weights,
                outcome_constraints=self.outcome_constraints,
                X_observed=self.X_dummy,
                qmc=False,
            )

        # test error if fixed features are also fidelity features
        with self.assertRaises(RuntimeError):
            model._get_best_point_acqf(
                objective_weights=self.objective_weights,
                outcome_constraints=self.outcome_constraints,
                X_observed=self.X_dummy,
                fixed_features={2: 2.0},
                target_fidelities={2: 1.0},
                qmc=False,
            )
Example #28
0
    def test_KnowledgeGradient(self):
        model = KnowledgeGradient()
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            search_space_digest=SearchSpaceDigest(
                feature_names=self.feature_names,
                bounds=self.bounds,
            ),
            metric_names=self.metric_names,
        )

        n = 2

        X_dummy = torch.rand(1, n, 4, dtype=self.dtype, device=self.device)
        acq_dummy = torch.tensor(0.0, dtype=self.dtype, device=self.device)

        with mock.patch(self.optimize_acqf) as mock_optimize_acqf:
            mock_optimize_acqf.side_effect = [(X_dummy, acq_dummy)]
            Xgen, wgen, _, __ = model.gen(
                n=n,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                outcome_constraints=None,
                linear_constraints=None,
                model_gen_options={
                    "acquisition_function_kwargs": self.acq_options,
                    "optimizer_kwargs": self.optimizer_options,
                },
            )
            self.assertTrue(torch.equal(Xgen, X_dummy.cpu()))
            self.assertTrue(torch.equal(wgen, torch.ones(n, dtype=self.dtype)))

            # called once, the best point call is not caught by mock
            mock_optimize_acqf.assert_called_once()

        ini_dummy = torch.rand(10, 32, 3, dtype=self.dtype, device=self.device)
        optimizer_options2 = {
            "num_restarts": 1,
            "raw_samples": 1,
            "maxiter": 5,
            "batch_limit": 1,
            "partial_restarts": 2,
        }
        with mock.patch(
                "ax.models.torch.botorch_kg.gen_one_shot_kg_initial_conditions",
                return_value=ini_dummy,
        ) as mock_warmstart_initialization:
            Xgen, wgen, _, __ = model.gen(
                n=n,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                outcome_constraints=None,
                linear_constraints=None,
                model_gen_options={
                    "acquisition_function_kwargs": self.acq_options,
                    "optimizer_kwargs": optimizer_options2,
                },
            )
            mock_warmstart_initialization.assert_called_once()

        posterior_tf = ScalarizedPosteriorTransform(
            weights=self.objective_weights)
        dummy_acq = PosteriorMean(model=model.model,
                                  posterior_transform=posterior_tf)
        with mock.patch("ax.models.torch.utils.PosteriorMean",
                        return_value=dummy_acq) as mock_posterior_mean:
            Xgen, wgen, _, __ = model.gen(
                n=n,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                outcome_constraints=None,
                linear_constraints=None,
                model_gen_options={
                    "acquisition_function_kwargs": self.acq_options,
                    "optimizer_kwargs": optimizer_options2,
                },
            )
            self.assertEqual(mock_posterior_mean.call_count, 2)

        # Check best point selection within bounds (some numerical tolerance)
        xbest = model.best_point(bounds=self.bounds,
                                 objective_weights=self.objective_weights)
        lb = torch.tensor([b[0] for b in self.bounds]) - 1e-5
        ub = torch.tensor([b[1] for b in self.bounds]) + 1e-5
        self.assertTrue(torch.all(xbest <= ub))
        self.assertTrue(torch.all(xbest >= lb))

        # test error message
        linear_constraints = (
            torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),
            torch.tensor([[0.5], [1.0]]),
        )
        with self.assertRaises(UnsupportedError):
            Xgen, wgen = model.gen(
                n=n,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                outcome_constraints=None,
                linear_constraints=linear_constraints,
            )

        # test input warping
        self.assertFalse(model.use_input_warping)
        model = KnowledgeGradient(use_input_warping=True)
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            search_space_digest=SearchSpaceDigest(
                feature_names=self.feature_names,
                bounds=self.bounds,
            ),
            metric_names=self.metric_names,
        )
        self.assertTrue(model.use_input_warping)
        self.assertTrue(hasattr(model.model, "input_transform"))
        self.assertIsInstance(model.model.input_transform, Warp)

        # test loocv pseudo likelihood
        self.assertFalse(model.use_loocv_pseudo_likelihood)
        model = KnowledgeGradient(use_loocv_pseudo_likelihood=True)
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            search_space_digest=SearchSpaceDigest(
                feature_names=self.feature_names,
                bounds=self.bounds,
            ),
            metric_names=self.metric_names,
        )
        self.assertTrue(model.use_loocv_pseudo_likelihood)
Example #29
0
    def test_KroneckerMultiTaskGP_default(self):
        bounds = torch.tensor([[-1.0, 0.0], [1.0, 1.0]])

        for batch_shape, dtype, use_intf, use_octf in itertools.product(
            (torch.Size(),),  # torch.Size([3])), TODO: Fix and test batch mode
            (torch.float, torch.double),
            (False, True),
            (False, True),
        ):
            tkwargs = {"device": self.device, "dtype": dtype}

            octf = Standardize(m=2) if use_octf else None

            intf = (
                Normalize(d=2, bounds=bounds.to(**tkwargs), transform_on_train=True)
                if use_intf
                else None
            )

            # initialization with default settings
            model, train_X, _ = _get_kronecker_model_and_training_data(
                model_kwargs={"outcome_transform": octf, "input_transform": intf},
                batch_shape=batch_shape,
                **tkwargs,
            )
            self.assertIsInstance(model, KroneckerMultiTaskGP)
            self.assertEqual(model.num_outputs, 2)
            self.assertIsInstance(model.likelihood, MultitaskGaussianLikelihood)
            self.assertEqual(model.likelihood.rank, 0)
            self.assertIsInstance(model.mean_module, MultitaskMean)
            self.assertIsInstance(model.covar_module, MultitaskKernel)
            base_kernel = model.covar_module
            self.assertIsInstance(base_kernel.data_covar_module, MaternKernel)
            self.assertIsInstance(base_kernel.task_covar_module, IndexKernel)
            task_covar_prior = base_kernel.task_covar_module.IndexKernelPrior
            self.assertIsInstance(task_covar_prior, LKJCovariancePrior)
            self.assertEqual(task_covar_prior.correlation_prior.eta, 1.5)
            self.assertIsInstance(task_covar_prior.sd_prior, SmoothedBoxPrior)
            lengthscale_prior = base_kernel.data_covar_module.lengthscale_prior
            self.assertIsInstance(lengthscale_prior, GammaPrior)
            self.assertEqual(lengthscale_prior.concentration, 3.0)
            self.assertEqual(lengthscale_prior.rate, 6.0)
            self.assertEqual(base_kernel.task_covar_module.covar_factor.shape[-1], 2)

            # test model fitting
            mll = ExactMarginalLogLikelihood(model.likelihood, model)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                mll = fit_gpytorch_model(mll, options={"maxiter": 1}, max_retries=1)

            # test posterior
            test_x = torch.rand(2, 2, **tkwargs)
            posterior_f = model.posterior(test_x)
            if not use_octf:
                self.assertIsInstance(posterior_f, GPyTorchPosterior)
                self.assertIsInstance(posterior_f.mvn, MultitaskMultivariateNormal)
            else:
                self.assertIsInstance(posterior_f, TransformedPosterior)
                self.assertIsInstance(
                    posterior_f._posterior.mvn, MultitaskMultivariateNormal
                )

            self.assertEqual(posterior_f.mean.shape, torch.Size([2, 2]))
            self.assertEqual(posterior_f.variance.shape, torch.Size([2, 2]))

            if use_octf:
                # ensure un-transformation is applied
                tmp_tf = model.outcome_transform
                del model.outcome_transform
                p_tf = model.posterior(test_x)
                model.outcome_transform = tmp_tf
                expected_var = tmp_tf.untransform_posterior(p_tf).variance
                self.assertTrue(torch.allclose(posterior_f.variance, expected_var))
            else:
                # test observation noise
                # TODO: outcome transform + likelihood noise?
                posterior_noisy = model.posterior(test_x, observation_noise=True)
                self.assertTrue(
                    torch.allclose(
                        posterior_noisy.variance,
                        model.likelihood(posterior_f.mvn).variance,
                    )
                )

            # test posterior (batch eval)
            test_x = torch.rand(3, 2, 2, **tkwargs)
            posterior_f = model.posterior(test_x)
            if not use_octf:
                self.assertIsInstance(posterior_f, GPyTorchPosterior)
                self.assertIsInstance(posterior_f.mvn, MultitaskMultivariateNormal)
            else:
                self.assertIsInstance(posterior_f, TransformedPosterior)
                self.assertIsInstance(
                    posterior_f._posterior.mvn, MultitaskMultivariateNormal
                )
            self.assertEqual(posterior_f.mean.shape, torch.Size([3, 2, 2]))
            self.assertEqual(posterior_f.variance.shape, torch.Size([3, 2, 2]))

            # test that using a posterior transform throws error
            post_tf = ScalarizedPosteriorTransform(weights=torch.ones(2, **tkwargs))
            with self.assertRaises(NotImplementedError):
                model.posterior(test_x, posterior_transform=post_tf)
    def test_q_lower_bound_max_value_entropy(self):
        for dtype in (torch.float, torch.double):
            torch.manual_seed(7)
            mm = MESMockModel()
            with self.assertRaises(TypeError):
                qLowerBoundMaxValueEntropy(mm)

            candidate_set = torch.rand(1000,
                                       2,
                                       device=self.device,
                                       dtype=dtype)

            # test error in case of batch GP model
            # train_inputs = torch.rand(5, 10, 2, device=self.device, dtype=dtype)
            # mm.train_inputs = (train_inputs,)
            mm = MESMockModel(batch_shape=torch.Size([2]))
            with self.assertRaises(NotImplementedError):
                qLowerBoundMaxValueEntropy(mm,
                                           candidate_set,
                                           num_mv_samples=10)

            # test error when number of outputs > 1 and no transform
            mm = MESMockModel()
            mm._num_outputs = 2
            with self.assertRaises(UnsupportedError):
                qLowerBoundMaxValueEntropy(mm,
                                           candidate_set,
                                           num_mv_samples=10)
            mm._num_outputs = 1

            # test with X_pending is None
            mm = MESMockModel()
            train_inputs = torch.rand(10, 2, device=self.device, dtype=dtype)
            mm.train_inputs = (train_inputs, )
            qGIBBON = qLowerBoundMaxValueEntropy(mm,
                                                 candidate_set,
                                                 num_mv_samples=10)

            # test initialization
            self.assertEqual(qGIBBON.num_mv_samples, 10)
            self.assertEqual(qGIBBON.use_gumbel, True)
            self.assertEqual(qGIBBON.posterior_max_values.shape,
                             torch.Size([10, 1]))

            # test evaluation
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            self.assertEqual(qGIBBON(X).shape, torch.Size([1]))

            # test with use_gumbel = False
            qGIBBON = qLowerBoundMaxValueEntropy(mm,
                                                 candidate_set,
                                                 num_mv_samples=10,
                                                 use_gumbel=False)
            self.assertEqual(qGIBBON(X).shape, torch.Size([1]))

            # test with X_pending is not None
            qGIBBON = qLowerBoundMaxValueEntropy(
                mm,
                candidate_set,
                num_mv_samples=10,
                use_gumbel=False,
                X_pending=torch.rand(1, 2, device=self.device, dtype=dtype),
            )
            self.assertEqual(qGIBBON(X).shape, torch.Size([1]))

            # Test with multi-output model w/ transform.
            mm = MESMockModel(num_outputs=2)
            pt = ScalarizedPosteriorTransform(
                weights=torch.ones(2, device=self.device, dtype=dtype))
            qGIBBON = qLowerBoundMaxValueEntropy(
                mm,
                candidate_set,
                num_mv_samples=10,
                use_gumbel=False,
                X_pending=torch.rand(1, 2, device=self.device, dtype=dtype),
                posterior_transform=pt,
            )
            with self.assertRaisesRegex(UnsupportedError,
                                        "X_pending is not None"):
                qGIBBON(X)