示例#1
0
 def test_get_default_partitioning_alpha(self):
     for m in range(2, 7):
         expected_val = 0.0 if m < 5 else 10 ** (-8 + m)
         self.assertEqual(
             expected_val, get_default_partitioning_alpha(num_objectives=m)
         )
     # In `BotorchTestCase.setUp` warnings are filtered, so here we
     # remove the filter to ensure a warning is issued as expected.
     warnings.resetwarnings()
     with warnings.catch_warnings(record=True) as ws:
         self.assertEqual(0.1, get_default_partitioning_alpha(num_objectives=7))
     self.assertEqual(len(ws), 1)
示例#2
0
 def test_get_default_partitioning_alpha(self):
     self.assertEqual(0.0, get_default_partitioning_alpha(num_objectives=2))
     self.assertEqual(1e-5,
                      get_default_partitioning_alpha(num_objectives=3))
     self.assertEqual(1e-4,
                      get_default_partitioning_alpha(num_objectives=4))
     # In `BotorchTestCase.setUp` warnings are filtered, so here we
     # remove the filter to ensure a warning is issued as expected.
     warnings.resetwarnings()
     with warnings.catch_warnings(record=True) as ws:
         self.assertEqual(0.1,
                          get_default_partitioning_alpha(num_objectives=7))
     self.assertEqual(len(ws), 1)
示例#3
0
def construct_inputs_EHVI(
    model: Model,
    training_data: TrainingData,
    objective_thresholds: Tensor,
    objective: Optional[AnalyticMultiOutputObjective] = None,
    **kwargs: Any,
) -> Dict[str, Any]:
    r"""Construct kwargs for `ExpectedHypervolumeImprovement` constructor."""
    num_objectives = objective_thresholds.shape[0]
    if kwargs.get("outcome_constraints") is not None:
        raise NotImplementedError(
            "EHVI does not yet support outcome constraints.")

    X_observed = training_data.X
    alpha = kwargs.get(
        "alpha",
        get_default_partitioning_alpha(num_objectives=num_objectives),
    )
    # This selects the objectives (a subset of the outcomes) and set each
    # objective threhsold to have the proper optimization direction.
    if objective is None:
        objective = IdentityAnalyticMultiOutputObjective()
    ref_point = objective(objective_thresholds)

    # Compute posterior mean (for ref point computation ref pareto frontier)
    # if one is not provided among arguments.
    Y_pmean = kwargs.get("Y_pmean")
    if Y_pmean is None:
        with torch.no_grad():
            Y_pmean = model.posterior(X_observed).mean
    if alpha > 0:
        partitioning = NondominatedPartitioning(
            ref_point=ref_point,
            Y=objective(Y_pmean),
            alpha=alpha,
        )
    else:
        partitioning = FastNondominatedPartitioning(
            ref_point=ref_point,
            Y=objective(Y_pmean),
        )

    return {
        "model": model,
        "ref_point": ref_point,
        "partitioning": partitioning,
        "objective": objective,
    }
示例#4
0
def get_NEHVI(
    model: Model,
    objective_weights: Tensor,
    objective_thresholds: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qNoisyExpectedHyperVolumeImprovement acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).
        prune_baseline: If True, prune the baseline points for NEI (default: True).
        chebyshev_scalarization: Use augmented Chebyshev scalarization.

    Returns:
        qNoisyExpectedHyperVolumeImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    (
        objective,
        objective_thresholds,
    ) = get_weighted_mc_objective_and_objective_thresholds(
        objective_weights=objective_weights,
        objective_thresholds=objective_thresholds)
    # For EHVI acquisition functions we pass the constraint transform directly.
    if outcome_constraints is None:
        cons_tfs = None
    else:
        cons_tfs = get_outcome_constraint_transforms(outcome_constraints)
    num_objectives = objective_thresholds.shape[0]
    return get_acquisition_function(
        acquisition_function_name="qNEHVI",
        model=model,
        objective=objective,  # pyre-ignore [6]
        X_observed=X_observed,
        X_pending=X_pending,
        constraints=cons_tfs,
        prune_baseline=kwargs.get("prune_baseline", True),
        mc_samples=kwargs.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES),
        alpha=kwargs.get(
            "alpha",
            get_default_partitioning_alpha(num_objectives=num_objectives)),
        qmc=kwargs.get("qmc", True),
        # pyre-fixme[6]: Expected `Optional[int]` for 11th param but got
        #  `Union[float, int]`.
        seed=torch.randint(1, 10000, (1, )).item(),
        ref_point=objective_thresholds.tolist(),
        marginalize_dim=kwargs.get("marginalize_dim"),
        match_right_most_batch_dim=kwargs.get("match_right_most_batch_dim",
                                              False),
        cache_root=kwargs.get("cache_root", True),
    )
示例#5
0
def get_EHVI(
    model: Model,
    objective_weights: Tensor,
    objective_thresholds: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qExpectedHyperVolumeImprovement acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        objective_thresholds:  A tensor containing thresholds forming a reference point
            from which to calculate pareto frontier hypervolume. Points that do not
            dominate the objective_thresholds contribute nothing to hypervolume.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).

    Returns:
        qExpectedHypervolumeImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    (
        objective,
        objective_thresholds,
    ) = get_weighted_mc_objective_and_objective_thresholds(
        objective_weights=objective_weights,
        objective_thresholds=objective_thresholds)
    with torch.no_grad():
        Y = model.posterior(X_observed).mean
    # For EHVI acquisition functions we pass the constraint transform directly.
    if outcome_constraints is None:
        cons_tfs = None
    else:
        cons_tfs = get_outcome_constraint_transforms(outcome_constraints)
    num_objectives = objective_thresholds.shape[0]
    return get_acquisition_function(
        acquisition_function_name="qEHVI",
        model=model,
        # TODO (jej): Fix pyre error below by restructuring class hierarchy.
        # pyre-fixme[6]: Expected `botorch.acquisition.objective.
        #  MCAcquisitionObjective` for 3rd parameter `objective` to call
        #  `get_acquisition_function` but got `IdentityMCMultiOutputObjective`.
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        constraints=cons_tfs,
        mc_samples=kwargs.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES),
        qmc=kwargs.get("qmc", True),
        alpha=kwargs.get(
            "alpha",
            get_default_partitioning_alpha(num_objectives=num_objectives)),
        # pyre-fixme[6]: Expected `Optional[int]` for 10th param but got
        #  `Union[float, int]`.
        seed=torch.randint(1, 10000, (1, )).item(),
        ref_point=objective_thresholds.tolist(),
        Y=Y,
    )
    def test_construct_inputs_EHVI(self):
        c = get_acqf_input_constructor(ExpectedHypervolumeImprovement)
        mock_model = mock.Mock()
        objective_thresholds = torch.rand(6)

        # test error on unsupported outcome constraints
        with self.assertRaises(NotImplementedError):
            c(
                model=mock_model,
                training_data=self.bd_td,
                objective_thresholds=objective_thresholds,
                outcome_constraints=mock.Mock(),
            )

        # test with Y_pmean supplied explicitly
        Y_pmean = torch.rand(3, 6)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            Y_pmean=Y_pmean,
        )
        self.assertEqual(kwargs["model"], mock_model)
        self.assertIsInstance(kwargs["objective"],
                              IdentityAnalyticMultiOutputObjective)
        self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
        partitioning = kwargs["partitioning"]
        alpha_expected = get_default_partitioning_alpha(6)
        self.assertIsInstance(partitioning, NondominatedPartitioning)
        self.assertEqual(partitioning.alpha, alpha_expected)
        self.assertTrue(
            torch.equal(partitioning._neg_ref_point, -objective_thresholds))

        Y_pmean = torch.rand(3, 2)
        objective_thresholds = torch.rand(2)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            Y_pmean=Y_pmean,
        )
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, FastNondominatedPartitioning)
        self.assertTrue(
            torch.equal(partitioning.ref_point, objective_thresholds))

        # test with custom objective
        weights = torch.rand(2)
        obj = WeightedMCMultiOutputObjective(weights=weights)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            objective=obj,
            Y_pmean=Y_pmean,
            alpha=0.05,
        )
        self.assertEqual(kwargs["model"], mock_model)
        self.assertIsInstance(kwargs["objective"],
                              WeightedMCMultiOutputObjective)
        ref_point_expected = objective_thresholds * weights
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, NondominatedPartitioning)
        self.assertEqual(partitioning.alpha, 0.05)
        self.assertTrue(
            torch.equal(partitioning._neg_ref_point, -ref_point_expected))

        # Test without providing Y_pmean (computed from model)
        mean = torch.rand(1, 2)
        variance = torch.ones(1, 1)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
        )
        self.assertIsInstance(kwargs["objective"],
                              IdentityAnalyticMultiOutputObjective)
        self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, FastNondominatedPartitioning)
        self.assertTrue(
            torch.equal(partitioning.ref_point, objective_thresholds))
        self.assertTrue(torch.equal(partitioning._neg_Y, -mean))