示例#1
0
    def __init__(
        self,
        model: Model,
        candidate_set: Tensor,
        num_fantasies: int = 16,
        num_mv_samples: int = 10,
        num_y_samples: int = 128,
        use_gumbel: bool = True,
        maximize: bool = True,
        X_pending: Optional[Tensor] = None,
        train_inputs: Tensor = None,
        **kwargs: Any,
    ) -> None:
        r"""Single-outcome max-value entropy search acquisition function.

        Args:
            model: A fitted single-outcome model.
            candidate_set: A `n x d` Tensor including `n` candidate points to
                discretize the design space. Max values are sampled from the
                (joint) model posterior over these points.
            num_fantasies: Number of fantasies to generate. The higher this
                number the more accurate the model (at the expense of model
                complexity, wall time and memory). Ignored if `X_pending` is `None`.
            num_mv_samples: Number of max value samples.
            num_y_samples: Number of posterior samples at specific design point `X`.
            use_gumbel: If True, use Gumbel approximation to sample the max values.
            X_pending: A `m x d`-dim Tensor of `m` design points that have been
                submitted for function evaluation but have not yet been evaluated.
            maximize: If True, consider the problem a maximization problem.
            train_inputs: A `n_train x d` Tensor that the model has been fitted on,
                optional if model is an exact GP model.
        """
        sampler = SobolQMCNormalSampler(num_y_samples)
        super().__init__(model=model, sampler=sampler)

        # Batch GP models (e.g. fantasized models) are not currently supported
        if train_inputs is None:
            train_inputs = self.model.train_inputs[0]
        if train_inputs.ndim > 2:
            raise NotImplementedError(
                "Batch GP models (e.g. fantasized models) "
                "are not yet supported by qMaxValueEntropy")

        self._init_model = model  # only used for the `fantasize()` in `set_X_pending()`
        train_inputs = match_batch_shape(train_inputs, candidate_set)
        self.candidate_set = torch.cat([candidate_set, train_inputs], dim=0)
        self.fantasies_sampler = SobolQMCNormalSampler(num_fantasies)
        self.num_fantasies = num_fantasies
        self.use_gumbel = use_gumbel
        self.num_mv_samples = num_mv_samples
        self.maximize = maximize
        self.weight = 1.0 if maximize else -1.0

        # If we put the `self._sample_max_values()` to `set_X_pending()`,
        # it will throw errors when the initial `super().__init__()` is called,
        # since some members required by `_sample_max_values()` are not yet initialized.
        if X_pending is None:
            self._sample_max_values()
        else:
            self.set_X_pending(X_pending)
示例#2
0
    def test_q_simple_regret(self):
        for dtype in (torch.float, torch.double):
            # the event shape is `b x q x t` = 1 x 1 x 1
            samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(samples=samples))
            # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
            X = torch.zeros(1, 1, device=self.device, dtype=dtype)

            # basic test
            sampler = IIDNormalSampler(num_samples=2)
            acqf = qSimpleRegret(model=mm, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # basic test, no resample
            sampler = IIDNormalSampler(num_samples=2, seed=12345)
            acqf = qSimpleRegret(model=mm, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            res = acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, no resample
            sampler = SobolQMCNormalSampler(num_samples=2)
            acqf = qSimpleRegret(model=mm, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, resample
            sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
            acqf = qSimpleRegret(model=mm, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))

            # basic test for X_pending and warning
            acqf.set_X_pending()
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(None)
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(X)
            self.assertEqual(acqf.X_pending, X)
            res = acqf(X)
            X2 = torch.zeros(
                1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
            )
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                acqf.set_X_pending(X2)
                self.assertEqual(acqf.X_pending, X2)
                self.assertEqual(len(ws), 1)
                self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
示例#3
0
 def test_batch_range(self):
     # check batch_range default and can be changed
     sampler = SobolQMCNormalSampler(num_samples=4)
     self.assertEquals(sampler.batch_range, (0, -2))
     sampler.batch_range = (-3, -2)
     self.assertEquals(sampler.batch_range, (-3, -2))
     # check that base samples are cleared after batch_range set
     posterior = _get_test_posterior(self.device)
     _ = sampler(posterior)
     self.assertNotEquals(sampler.base_samples, None)
     sampler.batch_range = (0, -2)
     self.assertEquals(sampler.base_samples, None)
示例#4
0
    def test_forward(self):
        for dtype in (torch.float, torch.double):

            # no resample
            sampler = SobolQMCNormalSampler(num_samples=4, seed=1234)
            self.assertFalse(sampler.resample)
            self.assertEqual(sampler.seed, 1234)
            self.assertTrue(sampler.collapse_batch_dims)
            # check samples non-batched
            posterior = _get_posterior(device=self.device, dtype=dtype)
            samples = sampler(posterior)
            self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
            self.assertEqual(sampler.seed, 1235)
            # ensure samples are the same
            samples2 = sampler(posterior)
            self.assertTrue(torch.allclose(samples, samples2))
            self.assertEqual(sampler.seed, 1235)
            # ensure this works with a differently shaped posterior
            posterior_batched = _get_posterior_batched(device=self.device,
                                                       dtype=dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, 1235)
            # ensure this works when changing the dtype
            new_dtype = torch.float if dtype == torch.double else torch.double
            posterior_batched = _get_posterior_batched(device=self.device,
                                                       dtype=new_dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, 1235)

            # resample
            sampler = SobolQMCNormalSampler(num_samples=4,
                                            resample=True,
                                            seed=None)
            self.assertTrue(sampler.resample)
            self.assertTrue(sampler.collapse_batch_dims)
            initial_seed = sampler.seed
            # check samples non-batched
            posterior = _get_posterior(device=self.device, dtype=dtype)
            samples = sampler(posterior)
            self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
            self.assertEqual(sampler.seed, initial_seed + 1)
            # ensure samples are different
            samples2 = sampler(posterior)
            self.assertFalse(torch.allclose(samples, samples2))
            self.assertEqual(sampler.seed, initial_seed + 2)
            # ensure this works with a differently shaped posterior
            posterior_batched = _get_posterior_batched(device=self.device,
                                                       dtype=dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, initial_seed + 3)
示例#5
0
 def test_get_base_sample_shape_no_collapse(self):
     sampler = SobolQMCNormalSampler(num_samples=4, collapse_batch_dims=False)
     self.assertFalse(sampler.resample)
     self.assertEqual(sampler.sample_shape, torch.Size([4]))
     self.assertFalse(sampler.collapse_batch_dims)
     # check sample shape non-batched
     posterior = _get_posterior()
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 2, 1]))
     # check sample shape batched
     posterior = _get_posterior_batched()
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 3, 2, 1]))
示例#6
0
    def __init__(
        self,
        model: Model,
        candidate_set: Tensor,
        num_fantasies: int = 16,
        num_mv_samples: int = 10,
        num_y_samples: int = 128,
        posterior_transform: Optional[PosteriorTransform] = None,
        use_gumbel: bool = True,
        maximize: bool = True,
        X_pending: Optional[Tensor] = None,
        train_inputs: Optional[Tensor] = None,
        **kwargs: Any,
    ) -> None:
        r"""Single-outcome max-value entropy search acquisition function.

        Args:
            model: A fitted single-outcome model.
            candidate_set: A `n x d` Tensor including `n` candidate points to
                discretize the design space. Max values are sampled from the
                (joint) model posterior over these points.
            num_fantasies: Number of fantasies to generate. The higher this
                number the more accurate the model (at the expense of model
                complexity, wall time and memory). Ignored if `X_pending` is `None`.
            num_mv_samples: Number of max value samples.
            num_y_samples: Number of posterior samples at specific design point `X`.
            posterior_transform: A PosteriorTransform. If using a multi-output model,
                a PosteriorTransform that transforms the multi-output posterior into a
                single-output posterior is required.
            use_gumbel: If True, use Gumbel approximation to sample the max values.
            maximize: If True, consider the problem a maximization problem.
            X_pending: A `m x d`-dim Tensor of `m` design points that have been
                submitted for function evaluation but have not yet been evaluated.
            train_inputs: A `n_train x d` Tensor that the model has been fitted on.
                Not required if the model is an instance of a GPyTorch ExactGP model.
        """
        super().__init__(
            model=model,
            candidate_set=candidate_set,
            num_mv_samples=num_mv_samples,
            posterior_transform=posterior_transform,
            use_gumbel=use_gumbel,
            maximize=maximize,
            X_pending=X_pending,
            train_inputs=train_inputs,
        )
        self._init_model = model  # used for `fantasize()` when setting `X_pending`
        self.sampler = SobolQMCNormalSampler(num_y_samples)
        self.fantasies_sampler = SobolQMCNormalSampler(num_fantasies)
        self.num_fantasies = num_fantasies
        self.set_X_pending(X_pending)  # this did not happen in the super constructor
    def _torch_optimize_qehvi_and_get_observation(self):
        torch_anti_ideal_point = torch.tensor(
            self._transformed_anti_ideal_point, dtype=torch.double)
        qehvi_partitioning = NondominatedPartitioning(
            ref_point=torch_anti_ideal_point,
            Y=torch.stack(self._torch_model.train_targets, dim=1))
        qehvi_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
        self._acquisition = qExpectedHypervolumeImprovement(
            model=self._torch_model,
            ref_point=self._transformed_anti_ideal_point,
            partitioning=qehvi_partitioning,
            sampler=qehvi_sampler)

        # these options all come from the tutorial
        # and likely need a serious review
        candidates, _ = optimize_acqf(
            acq_function=self._acquisition,
            bounds=self._botorch_domain,
            q=BATCH_SIZE,
            num_restarts=NUM_RESTARTS,
            raw_samples=RAW_SAMPLES,  # used for intialization heuristic
            options={
                "batch_limit": 5,
                "maxiter": 200,
                "nonnegative": True
            },
            sequential=True,
        )

        # is unnormalize necessary here?
        # we are providing the same bounds here and in optimizer
        new_x = unnormalize(candidates.detach(), bounds=self._botorch_domain)
        transformed_eps, transformed_err = self._optimization_handler(new_x)
        return new_x, transformed_eps, transformed_err
 def test_batched_multi_output_gpytorch_model(self, cuda=False):
     tkwargs = {"device": torch.device("cuda" if cuda else "cpu")}
     for dtype in (torch.float, torch.double):
         tkwargs["dtype"] = dtype
         train_X = torch.rand(5, 1, **tkwargs)
         train_Y = torch.cat(
             [torch.sin(train_X), torch.cos(train_X)], dim=-1)
         # basic test
         model = SimpleBatchedMultiOutputGPyTorchModel(train_X, train_Y)
         test_X = torch.rand(2, 1, **tkwargs)
         posterior = model.posterior(test_X)
         self.assertIsInstance(posterior, GPyTorchPosterior)
         self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
         # test observation noise
         posterior = model.posterior(test_X, observation_noise=True)
         self.assertIsInstance(posterior, GPyTorchPosterior)
         self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
         # test conditioning on observations
         cm = model.condition_on_observations(torch.rand(2, 1, **tkwargs),
                                              torch.rand(2, 2, **tkwargs))
         self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
         # test fantasize
         sampler = SobolQMCNormalSampler(num_samples=2)
         cm = model.fantasize(torch.rand(2, 1, **tkwargs), sampler=sampler)
         self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
         cm = model.fantasize(torch.rand(2, 1, **tkwargs),
                              sampler=sampler,
                              observation_noise=True)
         self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
示例#9
0
    def __init__(
        self,
        model: Model,
        sampler: Optional[MCSampler] = None,
        objective: Optional[MCMultiOutputObjective] = None,
        X_pending: Optional[Tensor] = None,
    ) -> None:
        r"""Constructor for the MCAcquisitionFunction base class.

        Args:
            model: A fitted model.
            sampler: The sampler used to draw base samples. Defaults to
                `SobolQMCNormalSampler(num_samples=128, collapse_batch_dims=True)`.
            objective: The MCMultiOutputObjective under which the samples are
                evaluated. Defaults to `IdentityMultiOutputObjective()`.
            X_pending:  A `m x d`-dim Tensor of `m` design points that have
                points that have been submitted for function evaluation
                but have not yet been evaluated.
        """
        super().__init__(model=model)
        if sampler is None:
            sampler = SobolQMCNormalSampler(num_samples=128,
                                            collapse_batch_dims=True)
        self.add_module("sampler", sampler)
        if objective is None:
            objective = IdentityMCMultiOutputObjective()
        elif not isinstance(objective, MCMultiOutputObjective):
            raise UnsupportedError(
                "Only objectives of type MCMultiOutputObjective are supported for "
                "Multi-Objective MC acquisition functions.")
        self.add_module("objective", objective)
        self.X_pending = None
        if X_pending is not None:
            self.set_X_pending(X_pending)
示例#10
0
 def test_gpytorch_model(self):
     for dtype in (torch.float, torch.double):
         tkwargs = {"device": self.device, "dtype": dtype}
         train_X = torch.rand(5, 1, **tkwargs)
         train_Y = torch.sin(train_X)
         # basic test
         model = SimpleGPyTorchModel(train_X, train_Y)
         test_X = torch.rand(2, 1, **tkwargs)
         posterior = model.posterior(test_X)
         self.assertIsInstance(posterior, GPyTorchPosterior)
         self.assertEqual(posterior.mean.shape, torch.Size([2, 1]))
         # test observation noise
         posterior = model.posterior(test_X, observation_noise=True)
         self.assertIsInstance(posterior, GPyTorchPosterior)
         self.assertEqual(posterior.mean.shape, torch.Size([2, 1]))
         # test conditioning on observations
         cm = model.condition_on_observations(torch.rand(2, 1, **tkwargs),
                                              torch.rand(2, **tkwargs))
         self.assertIsInstance(cm, SimpleGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([7]))
         # test fantasize
         sampler = SobolQMCNormalSampler(num_samples=2)
         cm = model.fantasize(torch.rand(2, 1, **tkwargs), sampler=sampler)
         self.assertIsInstance(cm, SimpleGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
         cm = model.fantasize(torch.rand(2, 1, **tkwargs),
                              sampler=sampler,
                              observation_noise=True)
         self.assertIsInstance(cm, SimpleGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
示例#11
0
    def __init__(
        self,
        model: Model,
        objective: Optional[MCAcquisitionObjective] = None,
        sampler: Optional[MCSampler] = None,
    ) -> None:
        r"""Posterior Variance of Link Function

        Args:
            model: A fitted model.
            objective: An MCAcquisitionObjective representing the link function
                (e.g., logistic or probit.) applied on the difference of (usually 1-d)
                two samples. Can be implemented via GenericMCObjective.
            sampler: The sampler used for drawing MC samples.
        """
        if sampler is None:
            sampler = SobolQMCNormalSampler(num_samples=512,
                                            collapse_batch_dims=True)
        if objective is None:
            objective = ProbitObjective()
        super().__init__(model=model,
                         sampler=sampler,
                         objective=None,
                         X_pending=None)
        self.objective = objective
示例#12
0
 def test_init(self):
     mm = MockModel(MockPosterior(mean=torch.rand(2, 1)))
     # test default init
     acqf = DummyMultiObjectiveMCAcquisitionFunction(model=mm)
     self.assertIsInstance(acqf.objective, IdentityMCMultiOutputObjective)
     self.assertIsInstance(acqf.sampler, SobolQMCNormalSampler)
     self.assertEqual(acqf.sampler._sample_shape, torch.Size([512]))
     self.assertTrue(acqf.sampler.collapse_batch_dims, True)
     self.assertFalse(acqf.sampler.resample)
     self.assertIsNone(acqf.X_pending)
     # test custom init
     sampler = SobolQMCNormalSampler(num_samples=64,
                                     collapse_batch_dims=False,
                                     resample=True)
     objective = DummyMCMultiOutputObjective()
     X_pending = torch.rand(2, 1)
     acqf = DummyMultiObjectiveMCAcquisitionFunction(model=mm,
                                                     sampler=sampler,
                                                     objective=objective,
                                                     X_pending=X_pending)
     self.assertEqual(acqf.objective, objective)
     self.assertEqual(acqf.sampler, sampler)
     self.assertTrue(torch.equal(acqf.X_pending, X_pending))
     # test unsupported objective
     with self.assertRaises(UnsupportedError):
         acqf = DummyMultiObjectiveMCAcquisitionFunction(
             model=mm, objective=IdentityMCObjective())
示例#13
0
def _get_sampler(mc_samples: int, qmc: bool) -> MCSampler:
    """Set up MC sampler for q(N)EHVI."""
    # initialize the sampler
    seed = int(torch.randint(1, 10000, (1,)).item())
    if qmc:
        return SobolQMCNormalSampler(num_samples=mc_samples, seed=seed)
    return IIDNormalSampler(num_samples=mc_samples, seed=seed)
示例#14
0
 def test_batched_multi_output_gpytorch_model(self):
     train_X = torch.rand(5, 1)
     train_Y = torch.cat([torch.sin(train_X), torch.cos(train_X)], dim=-1)
     # basic test
     model = SimpleBatchedMultiOutputGPyTorchModel(train_X, train_Y)
     test_X = torch.rand(2, 1)
     posterior = model.posterior(test_X)
     self.assertIsInstance(posterior, GPyTorchPosterior)
     self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
     # test observation noise
     posterior = model.posterior(test_X, observation_noise=True)
     self.assertIsInstance(posterior, GPyTorchPosterior)
     self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
     # test conditioning on observations
     cm = model.condition_on_observations(torch.rand(2, 1), torch.rand(2, 2))
     self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
     self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
     # test fantasize
     sampler = SobolQMCNormalSampler(num_samples=2)
     cm = model.fantasize(torch.rand(2, 1), sampler=sampler)
     self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
     self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
     cm = model.fantasize(torch.rand(2, 1), sampler=sampler, observation_noise=True)
     self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
     self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
示例#15
0
    def test_forward_no_collapse(self, cuda=False):
        for dtype in (torch.float, torch.double):

            # no resample
            sampler = SobolQMCNormalSampler(
                num_samples=4, seed=1234, collapse_batch_dims=False
            )
            self.assertFalse(sampler.resample)
            self.assertEqual(sampler.seed, 1234)
            self.assertFalse(sampler.collapse_batch_dims)
            # check samples non-batched
            posterior = _get_posterior(cuda=cuda, dtype=dtype)
            samples = sampler(posterior)
            self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
            self.assertEqual(sampler.seed, 1235)
            # ensure samples are the same
            samples2 = sampler(posterior)
            self.assertTrue(torch.allclose(samples, samples2))
            self.assertEqual(sampler.seed, 1235)
            # ensure this works with a differently shaped posterior
            posterior_batched = _get_posterior_batched(cuda=cuda, dtype=dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, 1236)

            # resample
            sampler = SobolQMCNormalSampler(
                num_samples=4, resample=True, collapse_batch_dims=False
            )
            self.assertTrue(sampler.resample)
            self.assertFalse(sampler.collapse_batch_dims)
            initial_seed = sampler.seed
            # check samples non-batched
            posterior = _get_posterior(cuda=cuda, dtype=dtype)
            samples = sampler(posterior=posterior)
            self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
            self.assertEqual(sampler.seed, initial_seed + 1)
            # ensure samples are not the same
            samples2 = sampler(posterior)
            self.assertFalse(torch.allclose(samples, samples2))
            self.assertEqual(sampler.seed, initial_seed + 2)
            # ensure this works with a differently shaped posterior
            posterior_batched = _get_posterior_batched(cuda=cuda, dtype=dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, initial_seed + 3)
示例#16
0
 def test_unsupported_dimension(self):
     sampler = SobolQMCNormalSampler(num_samples=2)
     mean = torch.zeros(1112)
     cov = DiagLazyTensor(torch.ones(1112))
     mvn = MultivariateNormal(mean, cov)
     posterior = GPyTorchPosterior(mvn)
     with self.assertRaises(UnsupportedError) as e:
         sampler(posterior)
         self.assertIn("Requested: 1112", str(e.exception))
示例#17
0
 def __init__(
     self,
     num_samples: int,
     resample: bool = False,
     seed: Optional[int] = None,
     collapse_batch_dims: bool = True,
     max_num_comparisons: int = None,
 ) -> None:
     PairwiseMCSampler.__init__(self,
                                max_num_comparisons=max_num_comparisons,
                                seed=seed)
     SobolQMCNormalSampler.__init__(
         self,
         num_samples,
         resample=resample,
         seed=seed,
         collapse_batch_dims=collapse_batch_dims,
     )
示例#18
0
 def test_unsupported_dimension(self):
     sampler = SobolQMCNormalSampler(num_samples=2)
     maxdim = torch.quasirandom.SobolEngine.MAXDIM + 1
     mean = torch.zeros(maxdim)
     cov = DiagLazyTensor(torch.ones(maxdim))
     mvn = MultivariateNormal(mean, cov)
     posterior = GPyTorchPosterior(mvn)
     with self.assertRaises(UnsupportedError) as e:
         sampler(posterior)
         self.assertIn(f"Requested: {maxdim}", str(e.exception))
    def __init__(
        self,
        model: Model,
        sample_pareto_frontiers: Callable[[Model], Tensor],
        num_fantasies: int = 16,
        X_pending: Optional[Tensor] = None,
        sampler: Optional[MCSampler] = None,
        **kwargs: Any,
    ) -> None:
        r"""Multi-objective max-value entropy search acquisition function.

        Args:
            model: A fitted multi-output model.
            sample_pareto_frontiers: A callable that takes a model and returns a
                `num_samples x n' x m`-dim tensor of outcomes to use for constructing
                `num_samples` sampled Pareto frontiers.
            num_fantasies: Number of fantasies to generate. The higher this
                number the more accurate the model (at the expense of model
                complexity, wall time and memory). Ignored if `X_pending` is `None`.
            X_pending: A `m x d`-dim Tensor of `m` design points that have been
                submitted for function evaluation but have not yet been evaluated.
        """
        MultiObjectiveMCAcquisitionFunction.__init__(self, model=model, sampler=sampler)

        # Batch GP models (e.g. fantasized models) are not currently supported
        if isinstance(model, ModelListGP):
            train_X = model.models[0].train_inputs[0]
        else:
            train_X = model.train_inputs[0]
        if train_X.ndim > 3:
            raise NotImplementedError(
                "Batch GP models (e.g. fantasized models) "
                "are not yet supported by qMultiObjectiveMaxValueEntropy"
            )
        # convert to batched MO model
        batched_mo_model = (
            model_list_to_batched(model) if isinstance(model, ModelListGP) else model
        )
        self._init_model = batched_mo_model
        self.mo_model = batched_mo_model
        self.model = batched_multi_output_to_single_output(
            batch_mo_model=batched_mo_model
        )
        self.fantasies_sampler = SobolQMCNormalSampler(num_fantasies)
        self.num_fantasies = num_fantasies
        # weight is used in _compute_information_gain
        self.maximize = True
        self.weight = 1.0
        self.sample_pareto_frontiers = sample_pareto_frontiers

        # this avoids unnecessary model conversion if X_pending is None
        if X_pending is None:
            self._sample_max_values()
        else:
            self.set_X_pending(X_pending)
示例#20
0
    def test_learned_preference_objective(self):
        X_dim = 2
        train_X = torch.rand(2, X_dim)
        train_comps = torch.LongTensor([[0, 1]])
        pref_model = PairwiseGP(train_X, train_comps)

        og_sample_shape = 3
        batch_size = 2
        n = 8
        test_X = torch.rand(torch.Size(
            (og_sample_shape, batch_size, n, X_dim)))

        # test default setting where sampler = IIDNormalSampler(num_samples=1)
        pref_obj = LearnedObjective(pref_model=pref_model)
        self.assertEqual(
            pref_obj(test_X).shape,
            torch.Size([og_sample_shape, batch_size, n]))

        # test when sampler has num_samples = 16
        num_samples = 16
        pref_obj = LearnedObjective(
            pref_model=pref_model,
            sampler=SobolQMCNormalSampler(num_samples=num_samples),
        )
        self.assertEqual(
            pref_obj(test_X).shape,
            torch.Size([num_samples * og_sample_shape, batch_size, n]),
        )

        # test posterior mean
        mean_pref_model = PosteriorMeanModel(model=pref_model)
        pref_obj = LearnedObjective(pref_model=mean_pref_model)
        self.assertEqual(
            pref_obj(test_X).shape,
            torch.Size([og_sample_shape, batch_size, n]))

        # cannot use a deterministic model together with a sampler
        with self.assertRaises(AssertionError):
            LearnedObjective(
                pref_model=mean_pref_model,
                sampler=SobolQMCNormalSampler(num_samples=num_samples),
            )
示例#21
0
    def __init__(
        self,
        model: Model,
        sampler: Optional[MCSampler] = None,
        objective: Optional[MCMultiOutputObjective] = None,
        constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
        X_pending: Optional[Tensor] = None,
    ) -> None:
        r"""Constructor for the MCAcquisitionFunction base class.

        Args:
            model: A fitted model.
            sampler: The sampler used to draw base samples. Defaults to
                `SobolQMCNormalSampler(num_samples=128, collapse_batch_dims=True)`.
            objective: The MCMultiOutputObjective under which the samples are
                evaluated. Defaults to `IdentityMultiOutputObjective()`.
            constraints: A list of callables, each mapping a Tensor of dimension
                `sample_shape x batch-shape x q x m` to a Tensor of dimension
                `sample_shape x batch-shape x q`, where negative values imply
                feasibility.
            X_pending:  A `m x d`-dim Tensor of `m` design points that have
                points that have been submitted for function evaluation
                but have not yet been evaluated.
        """
        super().__init__(model=model)
        if sampler is None:
            sampler = SobolQMCNormalSampler(num_samples=128,
                                            collapse_batch_dims=True)
        self.add_module("sampler", sampler)
        if objective is None:
            objective = IdentityMCMultiOutputObjective()
        elif not isinstance(objective, MCMultiOutputObjective):
            raise UnsupportedError(
                "Only objectives of type MCMultiOutputObjective are supported for "
                "Multi-Objective MC acquisition functions.")
        if (hasattr(model, "input_transform")
                and isinstance(model.input_transform, InputPerturbation)
                and constraints is not None):
            raise UnsupportedError(
                "Constraints are not supported with input perturbations, due to"
                "sample q-batch shape being different than that of the inputs."
                "Use a composite objective that applies feasibility weighting to"
                "samples before calculating the risk measure.")
        self.add_module("objective", objective)
        self.constraints = constraints
        self.X_pending = None
        if X_pending is not None:
            self.set_X_pending(X_pending)
示例#22
0
 def test_fantasize_flag(self):
     train_X = torch.rand(5, 1)
     train_Y = torch.sin(train_X)
     model = SimpleGPyTorchModel(train_X, train_Y)
     model.eval()
     test_X = torch.ones(1, 1)
     model(test_X)
     self.assertFalse(model.last_fantasize_flag)
     model.posterior(test_X)
     self.assertFalse(model.last_fantasize_flag)
     model.fantasize(test_X, SobolQMCNormalSampler(2))
     self.assertTrue(model.last_fantasize_flag)
     model.last_fantasize_flag = False
     with fantasize():
         model.posterior(test_X)
         self.assertTrue(model.last_fantasize_flag)
示例#23
0
 def test_batched_multi_output_gpytorch_model(self):
     for dtype in (torch.float, torch.double):
         tkwargs = {"device": self.device, "dtype": dtype}
         train_X = torch.rand(5, 1, **tkwargs)
         train_Y = torch.cat(
             [torch.sin(train_X), torch.cos(train_X)], dim=-1)
         # basic test
         model = SimpleBatchedMultiOutputGPyTorchModel(train_X, train_Y)
         self.assertEqual(model.num_outputs, 2)
         test_X = torch.rand(2, 1, **tkwargs)
         posterior = model.posterior(test_X)
         self.assertIsInstance(posterior, GPyTorchPosterior)
         self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
         # test observation noise
         posterior = model.posterior(test_X, observation_noise=True)
         self.assertIsInstance(posterior, GPyTorchPosterior)
         self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
         posterior = model.posterior(test_X,
                                     observation_noise=torch.rand(
                                         2, 2, **tkwargs))
         self.assertIsInstance(posterior, GPyTorchPosterior)
         self.assertEqual(posterior.mean.shape, torch.Size([2, 2]))
         # test subset_output
         with self.assertRaises(NotImplementedError):
             model.subset_output([0])
         # test conditioning on observations
         cm = model.condition_on_observations(torch.rand(2, 1, **tkwargs),
                                              torch.rand(2, 2, **tkwargs))
         self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 7]))
         # test fantasize
         sampler = SobolQMCNormalSampler(num_samples=2)
         cm = model.fantasize(torch.rand(2, 1, **tkwargs), sampler=sampler)
         self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
         cm = model.fantasize(torch.rand(2, 1, **tkwargs),
                              sampler=sampler,
                              observation_noise=True)
         self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
         cm = model.fantasize(
             torch.rand(2, 1, **tkwargs),
             sampler=sampler,
             observation_noise=torch.rand(2, 2, **tkwargs),
         )
         self.assertIsInstance(cm, SimpleBatchedMultiOutputGPyTorchModel)
         self.assertEqual(cm.train_targets.shape, torch.Size([2, 2, 7]))
    def __init__(
        self,
        model: Model,
        objective: MCAcquisitionObjective,
        sampler: Optional[MCSampler] = None,
    ) -> None:
        r"""Single Bernoulli mutual information for active learning

        Args:
            model (Model): A fitted model.
            objective (MCAcquisitionObjective): An MCAcquisitionObjective representing the link function
                (e.g., logistic or probit)
            sampler (MCSampler, optional): The sampler used for drawing MC samples.
        """
        if sampler is None:
            sampler = SobolQMCNormalSampler(num_samples=1024, collapse_batch_dims=True)
        super().__init__(
            model=model, sampler=sampler, objective=objective, X_pending=None
        )
示例#25
0
    def __init__(
        self,
        model: Model,
        mc_points: Tensor,
        sampler: Optional[MCSampler] = None,
        posterior_transform: Optional[PosteriorTransform] = None,
        X_pending: Optional[Tensor] = None,
        **kwargs,
    ) -> None:
        r"""q-Integrated Negative Posterior Variance.

        Args:
            model: A fitted model.
            mc_points: A `batch_shape x N x d` tensor of points to use for
                MC-integrating the posterior variance. Usually, these are qMC
                samples on the whole design space, but biased sampling directly
                allows weighted integration of the posterior variance.
            sampler: The sampler used for drawing fantasy samples. In the basic setting
                of a standard GP (default) this is a dummy, since the variance of the
                model after conditioning does not actually depend on the sampled values.
            posterior_transform: A PosteriorTransform. If using a multi-output model,
                a PosteriorTransform that transforms the multi-output posterior into a
                single-output posterior is required.
            X_pending: A `n' x d`-dim Tensor of `n'` design points that have
                points that have been submitted for function evaluation but
                have not yet been evaluated.
        """
        super().__init__(model=model,
                         posterior_transform=posterior_transform,
                         **kwargs)
        if sampler is None:
            # If no sampler is provided, we use the following dummy sampler for the
            # fantasize() method in forward. IMPORTANT: This assumes that the posterior
            # variance does not depend on the samples y (only on x), which is true for
            # standard GP models, but not in general (e.g. for other likelihoods or
            # heteroskedastic GPs using a separate noise model fit on data).
            sampler = SobolQMCNormalSampler(num_samples=1,
                                            resample=False,
                                            collapse_batch_dims=True)
        self.sampler = sampler
        self.X_pending = X_pending
        self.register_buffer("mc_points", mc_points)
示例#26
0
    def __init__(
        self,
        model: Model,
        sampler: Optional[MCSampler] = None,
        objective: Optional[MCAcquisitionObjective] = None,
        posterior_transform: Optional[PosteriorTransform] = None,
        X_pending: Optional[Tensor] = None,
    ) -> None:
        r"""Constructor for the MCAcquisitionFunction base class.

        Args:
            model: A fitted model.
            sampler: The sampler used to draw base samples. Defaults to
                `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
            objective: The MCAcquisitionObjective under which the samples are
                evaluated. Defaults to `IdentityMCObjective()`.
            posterior_transform: A PosteriorTransform (optional).
            X_pending: A `batch_shape, m x d`-dim Tensor of `m` design points
                that have points that have been submitted for function evaluation
                but have not yet been evaluated.
        """
        super().__init__(model=model)
        if sampler is None:
            sampler = SobolQMCNormalSampler(num_samples=512,
                                            collapse_batch_dims=True)
        self.add_module("sampler", sampler)
        if objective is None and model.num_outputs != 1:
            if posterior_transform is None:
                raise UnsupportedError(
                    "Must specify an objective or a posterior transform when using "
                    "a multi-output model.")
            elif not posterior_transform.scalarize:
                raise UnsupportedError(
                    "If using a multi-output model without an objective, "
                    "posterior_transform must scalarize the output.")
        if objective is None:
            objective = IdentityMCObjective()
        self.posterior_transform = posterior_transform
        self.add_module("objective", objective)
        self.set_X_pending(X_pending)
示例#27
0
    def __init__(
        self,
        model: GPyTorchModel,
        X_observed: Tensor,
        num_fantasies: int = 20,
        maximize: bool = True,
    ) -> None:
        r"""Single-outcome Noisy Expected Improvement (via fantasies).

        Args:
            model: A fitted single-outcome model.
            X_observed: A `n x d` Tensor of observed points that are likely to
                be the best observed points so far.
            num_fantasies: The number of fantasies to generate. The higher this
                number the more accurate the model (at the expense of model
                complexity and performance).
            maximize: If True, consider the problem a maximization problem.
        """
        if not isinstance(model, FixedNoiseGP):
            raise UnsupportedError(
                "Only FixedNoiseGPs are currently supported for fantasy NEI")
        # sample fantasies
        with torch.no_grad():
            posterior = model.posterior(X=X_observed)
            sampler = SobolQMCNormalSampler(num_fantasies)
            Y_fantasized = sampler(posterior).squeeze(-1)
        batch_X_observed = X_observed.expand(num_fantasies, *X_observed.shape)
        # The fantasy model will operate in batch mode
        fantasy_model = _get_noiseless_fantasy_model(
            model=model,
            batch_X_observed=batch_X_observed,
            Y_fantasized=Y_fantasized)

        if maximize:
            best_f = Y_fantasized.max(dim=-1)[0]
        else:
            best_f = Y_fantasized.min(dim=-1)[0]

        super().__init__(model=fantasy_model, best_f=best_f, maximize=maximize)
示例#28
0
 def test_get_base_sample_shape(self):
     sampler = SobolQMCNormalSampler(num_samples=4)
     self.assertFalse(sampler.resample)
     self.assertEqual(sampler.sample_shape, torch.Size([4]))
     self.assertTrue(sampler.collapse_batch_dims)
     # check sample shape non-batched
     posterior = _get_test_posterior(self.device)
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 2, 1]))
     # check sample shape batched
     posterior = _get_test_posterior_batched(self.device)
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 1, 2, 1]))
     # check sample shape with different batch range
     sampler.batch_range = (-3, -1)
     posterior = _get_test_posterior_batched(self.device)
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 1, 1, 1]))
示例#29
0
    def __init__(
        self,
        model: Model,
        num_fantasies: Optional[int] = 64,
        sampler: Optional[MCSampler] = None,
        objective: Optional[AcquisitionObjective] = None,
        inner_sampler: Optional[MCSampler] = None,
        X_pending: Optional[Tensor] = None,
        current_value: Optional[Tensor] = None,
    ) -> None:
        r"""q-Knowledge Gradient (one-shot optimization).

        Args:
            model: A fitted model. Must support fantasizing.
            num_fantasies: The number of fantasy points to use. More fantasy
                points result in a better approximation, at the expense of
                memory and wall time. Unused if `sampler` is specified.
            sampler: The sampler used to sample fantasy observations. Optional
                if `num_fantasies` is specified.
            objective: The objective under which the samples are evaluated. If
                `None` or a ScalarizedObjective, then the analytic posterior mean
                is used, otherwise the objective is MC-evaluated (using
                inner_sampler).
            inner_sampler: The sampler used for inner sampling. Ignored if the
                objective is `None` or a ScalarizedObjective.
            X_pending: A `m x d`-dim Tensor of `m` design points that have
                points that have been submitted for function evaluation
                but have not yet been evaluated.
            current_value: The current value, i.e. the expected best objective
                given the observed points `D`. If omitted, forward will not
                return the actual KG value, but the expected best objective
                given the data set `D u X`.
        """
        if sampler is None:
            if num_fantasies is None:
                raise ValueError(
                    "Must specify `num_fantasies` if no `sampler` is provided."
                )
            # base samples should be fixed for joint optimization over X, X_fantasies
            sampler = SobolQMCNormalSampler(
                num_samples=num_fantasies, resample=False, collapse_batch_dims=True
            )
        elif num_fantasies is not None:
            if sampler.sample_shape != torch.Size([num_fantasies]):
                raise ValueError(
                    f"The sampler shape must match num_fantasies={num_fantasies}."
                )
        else:
            num_fantasies = sampler.sample_shape[0]
        super(MCAcquisitionFunction, self).__init__(model=model)
        # if not explicitly specified, we use the posterior mean for linear objs
        if isinstance(objective, MCAcquisitionObjective) and inner_sampler is None:
            inner_sampler = SobolQMCNormalSampler(
                num_samples=128, resample=False, collapse_batch_dims=True
            )
        if objective is None and model.num_outputs != 1:
            raise UnsupportedError(
                "Must specify an objective when using a multi-output model."
            )
        self.sampler = sampler
        self.objective = objective
        self.set_X_pending(X_pending)
        self.inner_sampler = inner_sampler
        self.num_fantasies = num_fantasies
        self.current_value = current_value
示例#30
0
文件: main.py 项目: stys/albo
def fit_augmented_objective(
        model, augmented_objective: AugmentedLagrangianMCObjective,
        x_train: Tensor, y_train: Tensor):
    sampler = SobolQMCNormalSampler(num_samples=1500)

    print()
    print("Optimizing augmented lagrangian on GP surrogate")
    print(f"x_1\tx_2\tf_est\tc1_est\tmult1\tmult2")
    for i in range(5):
        acqfn = qSimpleRegret(model=model,
                              sampler=sampler,
                              objective=augmented_objective)

        canditate, _ = optimize_acqf(acq_function=acqfn,
                                     bounds=Tensor([[0.0, 0.0], [6.0, 6.0]]),
                                     q=1,
                                     num_restarts=1,
                                     raw_samples=500)

        x = canditate.detach()
        samples = sampler(model.posterior(x))
        augmented_objective.update_mults(samples)
        augmented_objective.r = 100.0

        x_ = x.numpy()[0]
        acqfn_ = acqfn(x).detach().numpy()[0]
        pred_ = model.posterior(x).mean.detach().numpy()[0]
        mults_ = augmented_objective.mults.detach().numpy()
        print(
            f"{x_[0]:>6.4f}\t{x_[1]:>6.4f}\t{pred_[0]:>6.4f}\t{pred_[1]:>6.4f}\t{mults_[0][0]:>6.4f}"
        )

    f_best = augmented_objective(model.posterior(x).mean)

    ei = qExpectedImprovement(
        model=model,
        best_f=f_best,
        sampler=sampler,
        objective=augmented_objective  #linearized_objective
    )

    for i in range(x_train.shape[0]):
        xx = x_train[i, :]
        print(i, xx, ei(xx.unsqueeze(-1).T), f_best,
              augmented_objective(y_train[i, :].unsqueeze(-1).T),
              y_train[i, :])

    canditate, _ = optimize_acqf(acq_function=ei,
                                 bounds=Tensor([[0.0, 0.0], [6.0, 6.0]]),
                                 q=1,
                                 num_restarts=10,
                                 raw_samples=500)

    x_new = canditate.detach()

    x_new_ = canditate.detach().numpy()[0]
    f_best_ = f_best.detach().numpy()[0]
    f_new_ = augmented_objective(
        model.posterior(x_new).mean).detach().numpy()[0]
    ei_new_ = ei(x_new).detach().numpy()[0]
    print()
    print("Optimizing EI on linearized objective")
    print(f"x_1\tx_2\tf_best\tf_new_\tei")
    print(f"{x_new_[0]:>6.4f}\t", f"{x_new_[1]:>6.4f}\t", f"{f_best_:>6.4f}\t",
          f"{f_new_:>6.4f}\t", f"{ei_new_:>6.4f}")

    return x_new