示例#1
0
 def test_GetQUCB(self, mock_acqf):
     # make sure beta is specified
     with self.assertRaises(ValueError):
         acqf = get_acquisition_function(
             acquisition_function_name="qUCB",
             model=self.model,
             objective=self.objective,
             X_observed=self.X_observed,
             X_pending=self.X_pending,
             mc_samples=self.mc_samples,
             seed=self.seed,
         )
     acqf = get_acquisition_function(
         acquisition_function_name="qUCB",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
         beta=0.3,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     mock_acqf.assert_called_once_with(
         model=self.model,
         beta=0.3,
         sampler=mock.ANY,
         objective=self.objective,
         posterior_transform=None,
         X_pending=self.X_pending,
     )
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 1)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
     # test with different tau, non-qmc
     acqf = get_acquisition_function(
         acquisition_function_name="qUCB",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         qmc=False,
         seed=2,
         beta=0.2,
     )
     self.assertTrue(mock_acqf.call_count, 2)
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     self.assertEqual(kwargs["beta"], 0.2)
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, IIDNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 2)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
示例#2
0
 def test_GetUnknownAcquisitionFunction(self):
     with self.assertRaises(NotImplementedError):
         utils.get_acquisition_function(
             acquisition_function_name="foo",
             model=self.model,
             objective=self.objective,
             X_observed=self.X_observed,
             X_pending=self.X_pending,
             mc_samples=self.mc_samples,
             seed=self.seed,
         )
示例#3
0
 def test_GetQPI(self, mock_acqf):
     # basic test
     acqf = utils.get_acquisition_function(
         acquisition_function_name="qPI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     best_f = self.objective(self.model.posterior(
         self.X_observed).mean).max().item()
     mock_acqf.assert_called_once_with(
         model=self.model,
         best_f=best_f,
         sampler=mock.ANY,
         objective=self.objective,
         X_pending=self.X_pending,
         tau=1e-3,
     )
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 1)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
     # test with different tau, non-qmc
     acqf = utils.get_acquisition_function(
         acquisition_function_name="qPI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         qmc=False,
         seed=2,
         tau=1.0,
     )
     self.assertTrue(mock_acqf.call_count, 2)
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     self.assertEqual(kwargs["tau"], 1.0)
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, IIDNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 2)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
示例#4
0
 def test_GetQEI(self, mock_acqf):
     acqf = utils.get_acquisition_function(
         acquisition_function_name="qEI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     best_f = self.objective(self.model.posterior(
         self.X_observed).mean).max().item()
     mock_acqf.assert_called_once_with(
         model=self.model,
         best_f=best_f,
         sampler=mock.ANY,
         objective=self.objective,
         X_pending=self.X_pending,
     )
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 1)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
示例#5
0
def get_NEI(
    model: Model,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qNoisyExpectedImprovement acquisition function.

    Args:
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).
        prune_baseline: If True, prune the baseline points for NEI (default: True).

    Returns:
        qNoisyExpectedImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # Parse random_scalarization params
    objective_weights = _extract_random_scalarization_settings(
        objective_weights, outcome_constraints, **kwargs)
    # construct Objective module
    if outcome_constraints is None:
        objective = LinearMCObjective(weights=objective_weights)
    else:
        obj_tf = get_objective_weights_transform(objective_weights)
        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        X_observed = torch.as_tensor(X_observed)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_tf)
        objective = ConstrainedMCObjective(objective=obj_tf,
                                           constraints=con_tfs or [],
                                           infeasible_cost=inf_cost)
    return get_acquisition_function(
        acquisition_function_name="qNEI",
        model=model,
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        prune_baseline=kwargs.get("prune_baseline", True),
        mc_samples=kwargs.get("mc_samples", 512),
        qmc=kwargs.get("qmc", True),
        seed=torch.randint(1, 10000, (1, )).item(),
    )
示例#6
0
 def test_GetQSR(self, mock_acqf):
     # basic test
     acqf = get_acquisition_function(
         acquisition_function_name="qSR",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     mock_acqf.assert_called_once_with(
         model=self.model,
         sampler=mock.ANY,
         objective=self.objective,
         posterior_transform=None,
         X_pending=self.X_pending,
     )
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 1)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
     # test with non-qmc
     acqf = get_acquisition_function(
         acquisition_function_name="qSR",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         qmc=False,
         seed=2,
     )
     self.assertTrue(mock_acqf.call_count, 2)
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, IIDNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 2)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
示例#7
0
 def test_GetQNEI(self, mock_acqf):
     # basic test
     acqf = get_acquisition_function(
         acquisition_function_name="qNEI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
         marginalize_dim=0,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     self.assertTrue(mock_acqf.call_count, 1)
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 1)
     self.assertEqual(kwargs["marginalize_dim"], 0)
     # test with non-qmc, no X_pending
     acqf = get_acquisition_function(
         acquisition_function_name="qNEI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=None,
         mc_samples=self.mc_samples,
         qmc=False,
         seed=2,
     )
     self.assertTrue(mock_acqf.call_count, 2)
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
     self.assertEqual(kwargs["X_pending"], None)
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, IIDNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 2)
     self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
示例#8
0
 def test_GetQEI(self, mock_acqf):
     self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2)))
     acqf = get_acquisition_function(
         acquisition_function_name="qEI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
         marginalize_dim=0,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item()
     mock_acqf.assert_called_once_with(
         model=self.model,
         best_f=best_f,
         sampler=mock.ANY,
         objective=self.objective,
         posterior_transform=None,
         X_pending=self.X_pending,
     )
     # test batched model
     self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
     acqf = get_acquisition_function(
         acquisition_function_name="qEI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     # test batched model without marginalize dim
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 1)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
示例#9
0
    def test_GetQEHVI(self, mock_acqf):
        # make sure ref_point is specified
        with self.assertRaises(ValueError):
            acqf = get_acquisition_function(
                acquisition_function_name="qEHVI",
                model=self.model,
                objective=self.mo_objective,
                X_observed=self.X_observed,
                X_pending=self.X_pending,
                mc_samples=self.mc_samples,
                seed=self.seed,
                Y=self.Y,
            )
        # make sure Y is specified
        with self.assertRaises(ValueError):
            acqf = get_acquisition_function(
                acquisition_function_name="qEHVI",
                model=self.model,
                objective=self.mo_objective,
                X_observed=self.X_observed,
                X_pending=self.X_pending,
                mc_samples=self.mc_samples,
                seed=self.seed,
                ref_point=self.ref_point,
            )
        acqf = get_acquisition_function(
            acquisition_function_name="qEHVI",
            model=self.model,
            objective=self.mo_objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            ref_point=self.ref_point,
            Y=self.Y,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        mock_acqf.assert_called_once_with(
            constraints=None,
            model=self.model,
            objective=self.mo_objective,
            ref_point=self.ref_point,
            partitioning=mock.ANY,
            sampler=mock.ANY,
            X_pending=self.X_pending,
        )
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        # test with non-qmc
        acqf = get_acquisition_function(
            acquisition_function_name="qEHVI",
            model=self.model,
            objective=self.mo_objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=2,
            qmc=False,
            ref_point=self.ref_point,
            Y=self.Y,
        )
        self.assertTrue(mock_acqf.call_count, 2)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        self.assertEqual(kwargs["ref_point"], self.ref_point)
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        self.assertIsInstance(kwargs["objective"], DummyMCMultiOutputObjective)
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, NondominatedPartitioning)

        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 2)
        # test constraints
        acqf = get_acquisition_function(
            acquisition_function_name="qEHVI",
            model=self.model,
            objective=self.mo_objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            constraints=[lambda Y: Y[..., -1]],
            seed=2,
            qmc=False,
            ref_point=self.ref_point,
            Y=self.Y,
        )
        _, kwargs = mock_acqf.call_args
        partitioning = kwargs["partitioning"]
        self.assertEqual(partitioning.pareto_Y.shape[0], 0)
示例#10
0
def get_EHVI(
    model: Model,
    objective_weights: Tensor,
    objective_thresholds: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qExpectedHyperVolumeImprovement acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        objective_thresholds:  A tensor containing thresholds forming a reference point
            from which to calculate pareto frontier hypervolume. Points that do not
            dominate the objective_thresholds contribute nothing to hypervolume.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).

    Returns:
        qExpectedHypervolumeImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    (
        objective,
        objective_thresholds,
    ) = get_weighted_mc_objective_and_objective_thresholds(
        objective_weights=objective_weights,
        objective_thresholds=objective_thresholds)
    with torch.no_grad():
        Y = model.posterior(X_observed).mean
    # For EHVI acquisition functions we pass the constraint transform directly.
    if outcome_constraints is None:
        cons_tfs = None
    else:
        cons_tfs = get_outcome_constraint_transforms(outcome_constraints)
    num_objectives = objective_thresholds.shape[0]
    return get_acquisition_function(
        acquisition_function_name="qEHVI",
        model=model,
        # TODO (jej): Fix pyre error below by restructuring class hierarchy.
        # pyre-fixme[6]: Expected `botorch.acquisition.objective.
        #  MCAcquisitionObjective` for 3rd parameter `objective` to call
        #  `get_acquisition_function` but got `IdentityMCMultiOutputObjective`.
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        constraints=cons_tfs,
        mc_samples=kwargs.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES),
        qmc=kwargs.get("qmc", True),
        alpha=kwargs.get(
            "alpha",
            get_default_partitioning_alpha(num_objectives=num_objectives)),
        seed=torch.randint(1, 10000, (1, )).item(),
        ref_point=objective_thresholds.tolist(),
        Y=Y,
    )
示例#11
0
def get_NEHVI(
    model: Model,
    objective_weights: Tensor,
    objective_thresholds: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qNoisyExpectedHyperVolumeImprovement acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).
        prune_baseline: If True, prune the baseline points for NEI (default: True).
        chebyshev_scalarization: Use augmented Chebyshev scalarization.

    Returns:
        qNoisyExpectedHyperVolumeImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    (
        objective,
        objective_thresholds,
    ) = get_weighted_mc_objective_and_objective_thresholds(
        objective_weights=objective_weights,
        objective_thresholds=objective_thresholds)
    # For EHVI acquisition functions we pass the constraint transform directly.
    if outcome_constraints is None:
        cons_tfs = None
    else:
        cons_tfs = get_outcome_constraint_transforms(outcome_constraints)
    num_objectives = objective_thresholds.shape[0]
    return get_acquisition_function(
        acquisition_function_name="qNEHVI",
        model=model,
        objective=objective,  # pyre-ignore [6]
        X_observed=X_observed,
        X_pending=X_pending,
        constraints=cons_tfs,
        prune_baseline=kwargs.get("prune_baseline", True),
        mc_samples=kwargs.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES),
        alpha=kwargs.get(
            "alpha",
            get_default_partitioning_alpha(num_objectives=num_objectives)),
        qmc=kwargs.get("qmc", True),
        # pyre-fixme[6]: Expected `Optional[int]` for 11th param but got
        #  `Union[float, int]`.
        seed=torch.randint(1, 10000, (1, )).item(),
        ref_point=objective_thresholds.tolist(),
        marginalize_dim=kwargs.get("marginalize_dim"),
        match_right_most_batch_dim=kwargs.get("match_right_most_batch_dim",
                                              False),
        cache_root=kwargs.get("cache_root", True),
    )
示例#12
0
def get_NEI(
    model: Model,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qNoisyExpectedImprovement acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).
        prune_baseline: If True, prune the baseline points for NEI (default: True).
        chebyshev_scalarization: Use augmented Chebyshev scalarization.

    Returns:
        qNoisyExpectedImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    if kwargs.get("chebyshev_scalarization", False):
        if "Ys" not in kwargs:
            raise ValueError("Chebyshev Scalarization requires Ys argument")
        Y_tensor = torch.cat(kwargs.get("Ys"), dim=-1)
        obj_tf = get_chebyshev_scalarization(weights=objective_weights,
                                             Y=Y_tensor)
    else:
        obj_tf = get_objective_weights_transform(objective_weights)
    if outcome_constraints is None:
        objective = GenericMCObjective(objective=obj_tf)
    else:
        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_tf)
        objective = ConstrainedMCObjective(objective=obj_tf,
                                           constraints=con_tfs or [],
                                           infeasible_cost=inf_cost)
    return get_acquisition_function(
        acquisition_function_name="qNEI",
        model=model,
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        prune_baseline=kwargs.get("prune_baseline", True),
        mc_samples=kwargs.get("mc_samples", 512),
        qmc=kwargs.get("qmc", True),
        # pyre-fixme[6]: Expected `Optional[int]` for 9th param but got
        #  `Union[float, int]`.
        seed=torch.randint(1, 10000, (1, )).item(),
    )
示例#13
0
    def test_GetQEI(self, mock_acqf):
        self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2)))
        acqf = get_acquisition_function(
            acquisition_function_name="qEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            marginalize_dim=0,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        best_f = self.objective(self.model.posterior(
            self.X_observed).mean).max().item()
        mock_acqf.assert_called_once_with(
            model=self.model,
            best_f=best_f,
            sampler=mock.ANY,
            objective=self.objective,
            posterior_transform=None,
            X_pending=self.X_pending,
        )
        # test batched model
        self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
        acqf = get_acquisition_function(
            acquisition_function_name="qEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        # test batched model without marginalize dim
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))

        # test w/ posterior transform
        pm = torch.tensor([1.0, 2.0])
        mvn = MultivariateNormal(pm, torch.eye(2))
        self.model._posterior.mvn = mvn
        self.model._posterior._mean = pm.unsqueeze(-1)
        pt = ScalarizedPosteriorTransform(weights=torch.tensor([-1]))
        acqf = get_acquisition_function(
            acquisition_function_name="qEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            posterior_transform=pt,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            marginalize_dim=0,
        )
        self.assertEqual(mock_acqf.call_args[-1]["best_f"].item(), -1.0)
示例#14
0
    def test_GetQNEHVI(self, mock_acqf):
        # make sure ref_point is specified
        with self.assertRaises(ValueError):
            acqf = get_acquisition_function(
                acquisition_function_name="qNEHVI",
                model=self.model,
                objective=self.objective,
                X_observed=self.X_observed,
                X_pending=self.X_pending,
                mc_samples=self.mc_samples,
                seed=self.seed,
            )
        acqf = get_acquisition_function(
            acquisition_function_name="qNEHVI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            ref_point=self.ref_point,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        mock_acqf.assert_called_once_with(
            constraints=None,
            model=self.model,
            X_baseline=self.X_observed,
            objective=self.objective,
            ref_point=self.ref_point,
            sampler=mock.ANY,
            prune_baseline=True,
            alpha=0.0,
            X_pending=self.X_pending,
            marginalize_dim=None,
            cache_root=True,
        )
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        # test with non-qmc
        acqf = get_acquisition_function(
            acquisition_function_name="qNEHVI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=2,
            qmc=False,
            ref_point=self.ref_point,
        )
        self.assertTrue(mock_acqf.call_count, 2)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        self.assertEqual(kwargs["ref_point"], self.ref_point)
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        ref_point = kwargs["ref_point"]
        self.assertEqual(ref_point, self.ref_point)

        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 2)

        # test passing alpha
        acqf = get_acquisition_function(
            acquisition_function_name="qNEHVI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=2,
            qmc=False,
            ref_point=self.ref_point,
            alpha=0.01,
        )
        self.assertTrue(mock_acqf.call_count, 3)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(kwargs["alpha"], 0.01)
示例#15
0
def _get_acquisition_func(
    model: Model,
    acquisition_function_name: str,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    mc_objective: Type[GenericMCObjective] = GenericMCObjective,
    constrained_mc_objective: Optional[
        Type[ConstrainedMCObjective]
    ] = ConstrainedMCObjective,
    mc_objective_kwargs: Optional[Dict] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        acquisition_function_name: Name of the acquisition function.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_objective: GenericMCObjective class, used for constructing a
            MC-objective. If constructing a penalized MC-objective, pass in
            PenalizedMCObjective together with mc_objective_kwargs .
        constrained_mc_objective: ConstrainedMCObjective class, used when
            applying constraints on the outcomes.
        mc_objective_kwargs: kwargs for constructing MC-objective.
            For GenericMCObjective, leave it as None. For PenalizedMCObjective,
            it needs to be specified in the format of kwargs.
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).
        prune_baseline: If True, prune the baseline points for NEI (default: True).
        chebyshev_scalarization: Use augmented Chebyshev scalarization.

    Returns:
        The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    if kwargs.get("chebyshev_scalarization", False):
        with torch.no_grad():
            Y = model.posterior(X_observed).mean
        obj_tf = get_chebyshev_scalarization(weights=objective_weights, Y=Y)
    else:
        obj_tf = get_objective_weights_transform(objective_weights)

    def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
        return obj_tf(samples)

    if outcome_constraints is None:
        mc_objective_kwargs = {} if mc_objective_kwargs is None else mc_objective_kwargs
        objective = mc_objective(objective=objective, **mc_objective_kwargs)
    else:
        if constrained_mc_objective is None:
            raise ValueError(
                "constrained_mc_objective cannot be set to None "
                "when applying outcome constraints."
            )
        if issubclass(mc_objective, PenalizedMCObjective):
            raise RuntimeError(
                "Outcome constraints are not supported for PenalizedMCObjective."
            )
        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=objective)
        objective = constrained_mc_objective(
            objective=objective, constraints=con_tfs or [], infeasible_cost=inf_cost
        )
    return get_acquisition_function(
        acquisition_function_name=acquisition_function_name,
        model=model,
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        prune_baseline=kwargs.get("prune_baseline", True),
        mc_samples=kwargs.get("mc_samples", 512),
        qmc=kwargs.get("qmc", True),
        # pyre-fixme[6]: Expected `Optional[int]` for 9th param but got
        #  `Union[float, int]`.
        seed=torch.randint(1, 10000, (1,)).item(),
        marginalize_dim=kwargs.get("marginalize_dim"),
    )