Пример #1
0
 def _get_botorch_objective(
     self,
     model: Model,
     objective_weights: Tensor,
     outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
     X_observed: Optional[Tensor] = None,
 ) -> AcquisitionObjective:
     return get_botorch_objective(
         model=model,
         objective_weights=objective_weights,
         use_scalarized_objective=issubclass(self._botorch_acqf_class,
                                             AnalyticAcquisitionFunction),
         outcome_constraints=outcome_constraints,
         X_observed=X_observed,
     )
Пример #2
0
    def gen(
        self,
        n: int,
        bounds: List,
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        r"""Generate new candidates.

        Args:
            n: Number of candidates to generate.
            bounds: A list of (lower, upper) tuples for each column of X.
            objective_weights: The objective is to maximize a weighted sum of
                the columns of f(x). These are the weights.
            outcome_constraints: A tuple of (A, b). For k outcome constraints
                and m outputs at f(x), A is (k x m) and b is (k x 1) such that
                A f(x) <= b.
            linear_constraints: A tuple of (A, b). For k linear constraints on
                d-dimensional x, A is (k x d) and b is (k x 1) such that
                A x <= b.
            fixed_features: A map {feature_index: value} for features that
                should be fixed to a particular value during generation.
            pending_observations:  A list of m (k_i x d) feature tensors X
                for m outcomes and k_i pending observations for outcome i.
            model_gen_options: A config dictionary that can contain
                model-specific options.
            rounding_func: A function that rounds an optimization result
                appropriately (i.e., according to `round-trip` transformations).
            target_fidelities: A map {feature_index: value} of fidelity feature
                column indices to their respective target fidelities. Used for
                multi-fidelity optimization.

        Returns:
            3-element tuple containing

            - (n x d) tensor of generated points.
            - n-tensor of weights for each point.
            - Dictionary of model-specific metadata for the given
                generation candidates.
        """
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        # subset model only to the outcomes we need for the optimization
        model = not_none(self.model)
        if options.get("subset_model", True):
            model, objective_weights, outcome_constraints, _ = subset_model(
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        objective = get_botorch_objective(
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
        )

        inequality_constraints = _to_inequality_constraints(linear_constraints)
        # TODO: update optimizers to handle inequality_constraints
        if inequality_constraints is not None:
            raise UnsupportedError(
                "Inequality constraints are not yet supported for KnowledgeGradient!"
            )

        # extract a few options
        n_fantasies = acf_options.get("num_fantasies", 64)
        qmc = acf_options.get("qmc", True)
        seed_inner = acf_options.get("seed_inner", None)
        num_restarts = optimizer_options.get("num_restarts", 40)
        raw_samples = optimizer_options.get("raw_samples", 1024)

        # get current value
        current_value = self._get_current_value(
            model=model,
            bounds=bounds,
            X_observed=not_none(X_observed),
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            seed_inner=seed_inner,
            fixed_features=fixed_features,
            model_gen_options=model_gen_options,
            target_fidelities=target_fidelities,
            qmc=qmc,
        )

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        # get acquisition function
        acq_function = _instantiate_KG(
            model=model,
            objective=objective,
            qmc=qmc,
            n_fantasies=n_fantasies,
            num_trace_observations=options.get("num_trace_observations", 0),
            mc_samples=acf_options.get("mc_samples", 256),
            seed_inner=seed_inner,
            seed_outer=acf_options.get("seed_outer", None),
            X_pending=X_pending,
            target_fidelities=target_fidelities,
            fidelity_weights=options.get("fidelity_weights"),
            current_value=current_value,
            cost_intercept=self.cost_intercept,
        )

        # optimize and get new points
        new_x = _optimize_and_get_candidates(
            acq_function=acq_function,
            bounds_=bounds_,
            n=n,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            optimizer_options=optimizer_options,
            rounding_func=rounding_func,
            inequality_constraints=inequality_constraints,
            fixed_features=fixed_features,
        )

        return new_x, torch.ones(n, dtype=self.dtype), {}, None
Пример #3
0
    def test_KnowledgeGradient_helpers(self):

        model = KnowledgeGradient()
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            bounds=self.bounds,
            feature_names=self.feature_names,
            metric_names=self.metric_names,
            task_features=[],
            fidelity_features=[],
        )

        # test _instantiate_KG
        objective = ScalarizedObjective(weights=self.objective_weights)
        X_dummy = torch.ones(1, 3, dtype=self.dtype, device=self.device)

        # test acquisition setting
        acq_function = _instantiate_KG(model=model.model,
                                       objective=objective,
                                       n_fantasies=10,
                                       qmc=True)
        self.assertIsInstance(acq_function.sampler, SobolQMCNormalSampler)
        self.assertIsInstance(acq_function.objective, ScalarizedObjective)
        self.assertEqual(acq_function.num_fantasies, 10)

        acq_function = _instantiate_KG(model=model.model,
                                       objective=objective,
                                       n_fantasies=10,
                                       qmc=False)
        self.assertIsInstance(acq_function.sampler, IIDNormalSampler)

        acq_function = _instantiate_KG(model=model.model,
                                       objective=objective,
                                       qmc=False)
        self.assertIsNone(acq_function.inner_sampler)

        acq_function = _instantiate_KG(model=model.model,
                                       objective=objective,
                                       qmc=True,
                                       X_pending=X_dummy)
        self.assertIsNone(acq_function.inner_sampler)
        self.assertTrue(torch.equal(acq_function.X_pending, X_dummy))

        # test _get_obj()
        outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]]))
        objective_weights = torch.ones(1, dtype=self.dtype, device=self.device)
        # test use_scalarized_objective kwarg
        self.assertIsInstance(
            get_botorch_objective(
                model=model.model,
                outcome_constraints=outcome_constraints,
                objective_weights=objective_weights,
                X_observed=X_dummy,
                use_scalarized_objective=False,
            ),
            ConstrainedMCObjective,
        )
        self.assertIsInstance(
            get_botorch_objective(
                model=model.model,
                outcome_constraints=outcome_constraints,
                objective_weights=objective_weights,
                X_observed=X_dummy,
            ),
            ConstrainedMCObjective,
        )
        self.assertIsInstance(
            get_botorch_objective(
                model=model.model,
                objective_weights=objective_weights,
                X_observed=X_dummy,
                use_scalarized_objective=False,
            ),
            LinearMCObjective,
        )
        self.assertIsInstance(
            get_botorch_objective(
                model=model.model,
                objective_weights=objective_weights,
                X_observed=X_dummy,
            ),
            ScalarizedObjective,
        )

        # test _get_best_point_acqf
        acq_function, non_fixed_idcs = model._get_best_point_acqf(
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_dummy,
        )
        self.assertIsInstance(acq_function, qSimpleRegret)
        self.assertIsInstance(acq_function.sampler, SobolQMCNormalSampler)
        self.assertIsNone(non_fixed_idcs)

        acq_function, non_fixed_idcs = model._get_best_point_acqf(
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_dummy,
            qmc=False,
        )
        self.assertIsInstance(acq_function.sampler, IIDNormalSampler)
        self.assertIsNone(non_fixed_idcs)

        with self.assertRaises(RuntimeError):
            model._get_best_point_acqf(
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                X_observed=X_dummy,
                target_fidelities={1: 1.0},
            )

        # multi-fidelity tests

        model = KnowledgeGradient()
        model.fit(
            Xs=self.Xs,
            Ys=self.Ys,
            Yvars=self.Yvars,
            bounds=self.bounds,
            task_features=[],
            feature_names=self.feature_names,
            metric_names=self.metric_names,
            fidelity_features=[-1],
        )

        acq_function = _instantiate_KG(
            model=model.model,
            objective=objective,
            target_fidelities={2: 1.0},
            current_value=0,
        )
        self.assertIsInstance(acq_function, qMultiFidelityKnowledgeGradient)

        acq_function = _instantiate_KG(
            model=model.model,
            objective=LinearMCObjective(weights=self.objective_weights),
        )
        self.assertIsInstance(acq_function.inner_sampler,
                              SobolQMCNormalSampler)

        # test error that target fidelity and fidelity weight indices must match
        with self.assertRaises(RuntimeError):
            _instantiate_KG(
                model=model.model,
                objective=objective,
                target_fidelities={1: 1.0},
                fidelity_weights={2: 1.0},
                current_value=0,
            )

        # test _get_best_point_acqf
        acq_function, non_fixed_idcs = model._get_best_point_acqf(
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_dummy,
            target_fidelities={2: 1.0},
        )
        self.assertIsInstance(acq_function, FixedFeatureAcquisitionFunction)
        self.assertIsInstance(acq_function.acq_func.sampler,
                              SobolQMCNormalSampler)
        self.assertEqual(non_fixed_idcs, [0, 1])

        acq_function, non_fixed_idcs = model._get_best_point_acqf(
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_dummy,
            target_fidelities={2: 1.0},
            qmc=False,
        )
        self.assertIsInstance(acq_function, FixedFeatureAcquisitionFunction)
        self.assertIsInstance(acq_function.acq_func.sampler, IIDNormalSampler)
        self.assertEqual(non_fixed_idcs, [0, 1])

        # test error that fixed features are provided
        with self.assertRaises(RuntimeError):
            model._get_best_point_acqf(
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                X_observed=X_dummy,
                qmc=False,
            )

        # test error if fixed features are also fidelity features
        with self.assertRaises(RuntimeError):
            model._get_best_point_acqf(
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                X_observed=X_dummy,
                fixed_features={2: 2.0},
                target_fidelities={2: 1.0},
                qmc=False,
            )
Пример #4
0
    def test_get_botorch_objective(self, _):
        # Whether `use_scalarized_objective` is specified or not, when there are
        # outcome constraints, `ConstrainedMCObjective` should be picked.
        self.assertIsInstance(
            get_botorch_objective(
                model=self.mock_botorch_model,
                outcome_constraints=self.outcome_constraints,
                objective_weights=self.objective_weights,
                X_observed=self.X_dummy,
                use_scalarized_objective=False,
            ),
            ConstrainedMCObjective,
        )
        self.assertIsInstance(
            get_botorch_objective(
                model=self.mock_botorch_model,
                outcome_constraints=self.outcome_constraints,
                objective_weights=self.objective_weights,
                X_observed=self.X_dummy,
            ),
            ConstrainedMCObjective,
        )

        # In absence of outcome constraints and with not using scalarized objective,
        # `LinearMCObjective` should be picked.
        self.assertIsInstance(
            get_botorch_objective(
                model=self.mock_botorch_model,
                objective_weights=self.objective_weights,
                X_observed=self.X_dummy,
                use_scalarized_objective=False,
            ),
            LinearMCObjective,
        )

        # By default, `ScalarizedObjective` should be picked in absence of outcome
        # constraints.
        self.assertIsInstance(
            get_botorch_objective(
                model=self.mock_botorch_model,
                objective_weights=self.objective_weights,
                X_observed=self.X_dummy,
            ),
            ScalarizedObjective,
        )

        # Test MOO case.
        with self.assertRaises(BotorchTensorDimensionError):
            get_botorch_objective(
                model=self.mock_botorch_model,
                objective_weights=self.objective_weights,  # Only has 1 objective.
                X_observed=self.X_dummy,
                objective_thresholds=self.objective_thresholds,
            )

        self.assertIsInstance(
            get_botorch_objective(
                model=self.mock_botorch_model,
                objective_weights=self.moo_objective_weights,  # Has 2 objectives.
                X_observed=self.X_dummy,
                objective_thresholds=self.objective_thresholds,
            ),
            WeightedMCMultiOutputObjective,
        )
Пример #5
0
    def __init__(
        self,
        surrogate: Surrogate,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        botorch_acqf_class: Optional[Type[AcquisitionFunction]] = None,
        options: Optional[Dict[str, Any]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> None:
        if not botorch_acqf_class and not self.default_botorch_acqf_class:
            raise ValueError(
                f"Acquisition class {self.__class__} does not specify a default "
                "BoTorch `AcquisitionFunction`, so `botorch_acqf_class` "
                "argument must be specified.")
        botorch_acqf_class = not_none(botorch_acqf_class
                                      or self.default_botorch_acqf_class)
        self.surrogate = surrogate
        self.options = options or {}
        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=[self.surrogate.training_data.X],
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        # Subset model only to the outcomes we need for the optimization.
        if self.options.get(Keys.SUBSET_MODEL, True):
            model, objective_weights, outcome_constraints, _ = subset_model(
                self.surrogate.model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )
        else:
            model = self.surrogate.model

        objective = get_botorch_objective(
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
            use_scalarized_objective=issubclass(botorch_acqf_class,
                                                AnalyticAcquisitionFunction),
        )
        # NOTE: Computing model dependencies might be handled entirely on
        # BoTorch side.
        model_deps = self.compute_model_dependencies(
            surrogate=surrogate,
            bounds=bounds,
            objective_weights=objective_weights,
            pending_observations=pending_observations,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            options=self.options,
        )
        data_deps = self.compute_data_dependencies(
            training_data=self.surrogate.training_data)
        # pyre-ignore[28]: Some kwargs are not expected in base `Model`
        # but are expected in its subclasses.
        self.acqf = botorch_acqf_class(
            model=model,
            objective=objective,
            X_pending=X_pending,
            X_baseline=X_observed,
            **self.options,
            **model_deps,
            **data_deps,
        )
Пример #6
0
    def gen(
        self,
        n: int,
        bounds: List,
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        """
        Generate new candidates.

        Args:
            n: Number of candidates to generate.
            bounds: A list of (lower, upper) tuples for each column of X.
            objective_weights: The objective is to maximize a weighted sum of
                the columns of f(x). These are the weights.
            outcome_constraints: A tuple of (A, b). For k outcome constraints
                and m outputs at f(x), A is (k x m) and b is (k x 1) such that
                A f(x) <= b.
            linear_constraints: A tuple of (A, b). For k linear constraints on
                d-dimensional x, A is (k x d) and b is (k x 1) such that
                A x <= b.
            fixed_features: A map {feature_index: value} for features that
                should be fixed to a particular value during generation.
            pending_observations:  A list of m (k_i x d) feature tensors X
                for m outcomes and k_i pending observations for outcome i.
            model_gen_options: A config dictionary that can contain
                model-specific options.
            rounding_func: A function that rounds an optimization result
                appropriately (i.e., according to `round-trip` transformations).
            target_fidelities: A map {feature_index: value} of fidelity feature
                column indices to their respective target fidelities. Used for
                multi-fidelity optimization.

        Returns:
            3-element tuple containing

            - (n x d) tensor of generated points.
            - n-tensor of weights for each point.
            - Dictionary of model-specific metadata for the given
                generation candidates.
        """
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = not_none(self.model)

        # subset model only to the outcomes we need for the optimization
        if options.get("subset_model", True):
            model, objective_weights, outcome_constraints = subset_model(
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        objective = get_botorch_objective(
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
        )

        # get the acquisition function
        n_fantasies = acf_options.get("num_fantasies", 64)
        qmc = acf_options.get("qmc", True)
        seed_inner = acf_options.get("seed_inner", None)
        num_restarts = optimizer_options.get("num_restarts", 40)
        raw_samples = optimizer_options.get("raw_samples", 1024)

        inequality_constraints = _to_inequality_constraints(linear_constraints)
        # TODO: update optimizers to handle inequality_constraints
        if inequality_constraints is not None:
            raise UnsupportedError(
                "Inequality constraints are not yet supported for KnowledgeGradient!"
            )

        # get current value
        best_point_acqf, non_fixed_idcs = get_out_of_sample_best_point_acqf(
            model=model,
            Xs=self.Xs,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=not_none(X_observed),
            seed_inner=seed_inner,
            fixed_features=fixed_features,
            fidelity_features=self.fidelity_features,
            target_fidelities=target_fidelities,
            qmc=qmc,
        )

        # solution from previous iteration
        recommended_point = self.best_point(
            bounds=bounds,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            model_gen_options=model_gen_options,
            target_fidelities=target_fidelities,
        )
        recommended_point = recommended_point.detach().unsqueeze(0)
        # Extract acquisition value (TODO: Make this less painful and repetitive)
        if non_fixed_idcs is not None:
            recommended_point = recommended_point[..., non_fixed_idcs]
        current_value = best_point_acqf(recommended_point).max()

        acq_function = _instantiate_KG(
            model=model,
            objective=objective,
            qmc=qmc,
            n_fantasies=n_fantasies,
            num_trace_observations=options.get("num_trace_observations", 0),
            mc_samples=acf_options.get("mc_samples", 256),
            seed_inner=seed_inner,
            seed_outer=acf_options.get("seed_outer", None),
            X_pending=X_pending,
            target_fidelities=target_fidelities,
            fidelity_weights=options.get("fidelity_weights"),
            current_value=current_value,
            cost_intercept=self.cost_intercept,
        )

        # optimize and get new points
        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        batch_initial_conditions = gen_one_shot_kg_initial_conditions(
            acq_function=acq_function,
            bounds=bounds_,
            q=n,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options={
                "frac_random": optimizer_options.get("frac_random", 0.1),
                "num_inner_restarts": num_restarts,
                "raw_inner_samples": raw_samples,
            },
        )

        botorch_rounding_func = get_rounding_func(rounding_func)

        candidates, _ = optimize_acqf(
            acq_function=acq_function,
            bounds=bounds_,
            q=n,
            inequality_constraints=inequality_constraints,
            fixed_features=fixed_features,
            post_processing_func=botorch_rounding_func,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options={
                "batch_limit": optimizer_options.get("batch_limit", 8),
                "maxiter": optimizer_options.get("maxiter", 200),
                "method": "L-BFGS-B",
                "nonnegative": optimizer_options.get("nonnegative", False),
            },
            batch_initial_conditions=batch_initial_conditions,
        )
        new_x = candidates.detach().cpu()
        return new_x, torch.ones(n, dtype=self.dtype), {}, None