Ejemplo n.º 1
0
def get_MOO_EHVI(
    experiment: Experiment,
    data: Data,
    objective_thresholds: Optional[TRefPoint] = None,
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: Optional[torch.device] = None,
) -> TorchModelBridge:
    """Instantiates a multi-objective model that generates points with EHVI.

    Requires `objective_thresholds`,
    a list of `ax.core.ObjectiveThresholds`, for every objective being optimized.
    An arm only improves hypervolume if it is strictly better than all
    objective thresholds.

    `objective_thresholds` can be passed in the optimization_config or
    passed directly here.
    """
    device = device or (torch.device("cuda")
                        if torch.cuda.is_available() else torch.device("cpu"))
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(experiment.optimization_config.objective,
                      MultiObjective):
        raise ValueError(
            "Multi-objective optimization requires multiple objectives.")
    if data.df.empty:  # pragma: no cover
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")
    return checked_cast(
        TorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            objective_thresholds=objective_thresholds,
            search_space=search_space or experiment.search_space,
            torch_dtype=dtype,
            torch_device=device,
            acqf_constructor=get_EHVI,
            default_model_gen_options={
                "acquisition_function_kwargs": {
                    "sequential": True
                },
                "optimizer_kwargs": {
                    # having a batch limit is very important for avoiding
                    # memory issues in the initialization
                    "batch_limit": DEFAULT_EHVI_BATCH_LIMIT
                },
            },
        ),
    )
Ejemplo n.º 2
0
def get_MOO_EHVI(
    experiment: Experiment,
    data: Data,
    ref_point: Dict[str, float],
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: torch.device = (torch.device("cuda") if torch.cuda.is_available()
                            else torch.device("cpu")),
) -> MultiObjectiveTorchModelBridge:
    """Instantiates a multi-objective model that generates points with EHVI.

    Requires a `ref_point`, a dictionary of the metric name to the reference point value
    for every objective being optimized. An arm only improves hypervolume if it is
    strictly better than this point in all metrics.
    """
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(experiment.optimization_config.objective,
                      MultiObjective):
        raise ValueError(
            "Multi-objective optimization requires multiple objectives.")
    if data.df.empty:  # pragma: no cover
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")
    return checked_cast(
        MultiObjectiveTorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            ref_point=ref_point,
            search_space=search_space or experiment.search_space,
            torch_dtype=dtype,
            torch_device=device,
            default_model_gen_options={
                "acquisition_function_kwargs": {
                    "sequential": True
                },
                "optimizer_kwargs": {
                    # having a batch limit is very important for avoiding
                    # memory issues in the initialization
                    "batch_limit": DEFAULT_EHVI_BATCH_LIMIT
                },
            },
        ),
    )
Ejemplo n.º 3
0
def get_MOO_NEHVI(
    experiment: Experiment,
    data: Data,
    objective_thresholds: Optional[TRefPoint] = None,
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: torch.device = (
        torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    ),
    status_quo_features: Optional[ObservationFeatures] = None,
    use_input_warping: bool = False,
    optimization_config: Optional[OptimizationConfig] = None,
) -> MultiObjectiveTorchModelBridge:
    """Instantiates a multi-objective model using qNEHVI."""
    opt_config = optimization_config or experiment.optimization_config
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(opt_config.objective, MultiObjective):
        raise ValueError("Multi-objective optimization requires multiple objectives.")
    if data.df.empty:  # pragma: no cover
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")
    return checked_cast(
        MultiObjectiveTorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            objective_thresholds=objective_thresholds,
            search_space=search_space or experiment.search_space,
            torch_dtype=dtype,
            torch_device=device,
            status_quo_features=status_quo_features,
            default_model_gen_options={
                "optimizer_kwargs": {
                    # having a batch limit is very important for avoiding
                    # memory issues in the initialization
                    "batch_limit": DEFAULT_EHVI_BATCH_LIMIT,
                    "sequential": True,
                },
            },
            use_input_warping=use_input_warping,
            optimization_config=opt_config,
        ),
    )
Ejemplo n.º 4
0
def get_MOO_RS(
    experiment: Experiment,
    data: Data,
    objective_thresholds: Optional[TRefPoint] = None,
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: torch.device = DEFAULT_TORCH_DEVICE,
) -> TorchModelBridge:
    """Instantiates a Random Scalarization multi-objective model.

    Chooses a different random linear scalarization of the objectives
    for generating each new candidate arm. This will only explore the
    convex hull of the pareto frontier.
    """
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(experiment.optimization_config.objective,
                      MultiObjective):
        raise ValueError(
            "Multi-Objective optimization requires multiple objectives")
    if data.df.empty:
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")
    return checked_cast(
        TorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            objective_thresholds=objective_thresholds,
            search_space=search_space or experiment.search_space,
            torch_dtype=dtype,
            torch_device=device,
            acqf_constructor=get_NEI,
            default_model_gen_options={
                "acquisition_function_kwargs": {
                    "random_scalarization": True,
                    "sequential": True,
                }
            },
        ),
    )
Ejemplo n.º 5
0
def get_MOO_PAREGO(
    experiment: Experiment,
    data: Data,
    objective_thresholds: Optional[TRefPoint] = None,
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: torch.device = DEFAULT_TORCH_DEVICE,
) -> TorchModelBridge:
    """Instantiates a multi-objective model that generates points with ParEGO.

    qParEGO optimizes random augmented chebyshev scalarizations of the multiple
    objectives. This allows it to explore non-convex pareto frontiers.
    """
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(experiment.optimization_config.objective,
                      MultiObjective):
        raise ValueError(
            "Multi-Objective optimization requires multiple objectives")
    if data.df.empty:
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")
    return checked_cast(
        TorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            objective_thresholds=objective_thresholds,
            search_space=search_space or experiment.search_space,
            torch_dtype=dtype,
            torch_device=device,
            acqf_constructor=get_NEI,
            default_model_gen_options={
                "acquisition_function_kwargs": {
                    "chebyshev_scalarization": True,
                    "sequential": True,
                }
            },
        ),
    )
Ejemplo n.º 6
0
def get_pareto_optimal_parameters(
    experiment: Experiment,
    generation_strategy: GenerationStrategy,
    use_model_predictions: bool = True,
) -> Optional[Dict[int, Tuple[TParameterization, TModelPredictArm]]]:
    """Identifies the best parameterizations tried in the experiment so far,
    using model predictions if ``use_model_predictions`` is true and using
    observed values from the experiment otherwise. By default, uses model
    predictions to account for observation noise.

    NOTE: The format of this method's output is as follows:
    { trial_index --> (parameterization, (means, covariances) }, where means
    are a dictionary of form { metric_name --> metric_mean } and covariances
    are a nested dictionary of form
    { one_metric_name --> { another_metric_name: covariance } }.

    Args:
        experiment: Experiment, from which to find Pareto-optimal arms.
        generation_strategy: Generation strategy containing the modelbridge.
        use_model_predictions: Whether to extract the Pareto frontier using
            model predictions or directly observed values. If ``True``,
            the metric means and covariances in this method's output will
            also be based on model predictions and may differ from the
            observed values.

    Returns:
        ``None`` if it was not possible to extract the Pareto frontier,
        otherwise a mapping from trial index to the tuple of:
        - the parameterization of the arm in that trial,
        - two-item tuple of metric means dictionary and covariance matrix
            (model-predicted if ``use_model_predictions=True`` and observed
            otherwise).
    """
    # Validate aspects of the experiment: that it is a MOO experiment and
    # that the current model can be used to produce the Pareto frontier.
    if not not_none(experiment.optimization_config).is_moo_problem:
        raise UnsupportedError(
            "Please use `get_best_parameters` for single-objective problems.")

    moo_optimization_config = checked_cast(MultiObjectiveOptimizationConfig,
                                           experiment.optimization_config)
    if moo_optimization_config.outcome_constraints:
        # TODO[drfreund]: Test this flow and remove error.
        raise NotImplementedError(
            "Support for outcome constraints is currently under development.")

    # Extract or instantiate modelbridge to use for Pareto frontier extraction.
    mb = generation_strategy.model
    if mb is None or not isinstance(mb, MultiObjectiveTorchModelBridge):
        logger.info(
            "Can only extract a Pareto frontier using a multi-objective model bridge"
            f", but currently used model bridge is: {mb} of type {type(mb)}. Will "
            "use `Models.MOO` instead to extract Pareto frontier.")
        mb = checked_cast(
            MultiObjectiveTorchModelBridge,
            Models.MOO(experiment=experiment,
                       data=checked_cast(Data, experiment.lookup_data())),
        )
    else:
        # Make sure the model is up-to-date with the most recent data.
        generation_strategy._set_or_update_current_model(data=None)

    # If objective thresholds are not specified in optimization config, extract
    # the inferred ones if possible or infer them anew if not.
    objective_thresholds_override = None
    if not moo_optimization_config.objective_thresholds:
        lgr = generation_strategy.last_generator_run
        if lgr and lgr.gen_metadata and "objective_thresholds" in lgr.gen_metadata:
            objective_thresholds_override = lgr.gen_metadata[
                "objective_thresholds"]
        objective_thresholds_override = mb.infer_objective_thresholds(
            search_space=experiment.search_space,
            optimization_config=experiment.optimization_config,
            fixed_features=None,
        )
        logger.info(
            f"Using inferred objective thresholds: {objective_thresholds_override}, "
            "as objective thresholds were not specified as part of the optimization "
            "configuration on the experiment.")

    # Extract the Pareto frontier and format it as follows:
    # { trial_index --> (parameterization, (means, covariances) }
    pareto_util = predicted_pareto if use_model_predictions else observed_pareto
    pareto_optimal_observations = pareto_util(
        modelbridge=mb, objective_thresholds=objective_thresholds_override)
    return {
        int(not_none(obs.features.trial_index)): (
            obs.features.parameters,
            (obs.data.means_dict, obs.data.covariance_matrix),
        )
        for obs in pareto_optimal_observations
    }
Ejemplo n.º 7
0
def compute_pareto_frontier(
    experiment: Experiment,
    primary_objective: Metric,
    secondary_objective: Metric,
    data: Optional[Data] = None,
    outcome_constraints: Optional[List[OutcomeConstraint]] = None,
    absolute_metrics: Optional[List[str]] = None,
    num_points: int = 10,
    trial_index: Optional[int] = None,
    chebyshev: bool = True,
) -> ParetoFrontierResults:
    """Compute the Pareto frontier between two objectives. For experiments
    with batch trials, a trial index or data object must be provided.

    Args:
        experiment: The experiment to compute a pareto frontier for.
        primary_objective: The primary objective to optimize.
        secondary_objective: The secondary objective against which
            to trade off the primary objective.
        outcome_constraints: Outcome
            constraints to be respected by the optimization. Can only contain
            constraints on metrics that are not primary or secondary objectives.
        absolute_metrics: List of outcome metrics that
            should NOT be relativized w.r.t. the status quo (all other outcomes
            will be in % relative to status_quo).
        num_points: The number of points to compute on the
            Pareto frontier.
        chebyshev: Whether to use augmented_chebyshev_scalarization
            when computing Pareto Frontier points.

    Returns:
        ParetoFrontierResults: A NamedTuple with the following fields:
            - param_dicts: The parameter dicts of the
                points generated on the Pareto Frontier.
            - means: The posterior mean predictions of
                the model for each metric (same order as the param dicts).
            - sems: The posterior sem predictions of
                the model for each metric (same order as the param dicts).
            - primary_metric: The name of the primary metric.
            - secondary_metric: The name of the secondary metric.
            - absolute_metrics: List of outcome metrics that
                are NOT be relativized w.r.t. the status quo (all other metrics
                are in % relative to status_quo).
    """
    # TODO(jej): Implement using MultiObjectiveTorchModelBridge's _pareto_frontier
    model_gen_options = {
        "acquisition_function_kwargs": {
            "chebyshev_scalarization": chebyshev
        }
    }

    if (trial_index is None and data is None and any(
            isinstance(t, BatchTrial) for t in experiment.trials.values())):
        raise UnsupportedError(
            "Must specify trial index or data for experiment with batch trials"
        )
    absolute_metrics = [] if absolute_metrics is None else absolute_metrics
    for metric in absolute_metrics:
        if metric not in experiment.metrics:
            raise ValueError(f"Model was not fit on metric `{metric}`")

    if outcome_constraints is None:
        outcome_constraints = []
    else:
        # ensure we don't constrain an objective
        _validate_outcome_constraints(
            outcome_constraints=outcome_constraints,
            primary_objective=primary_objective,
            secondary_objective=secondary_objective,
        )

    # build posterior mean model
    if not data:
        try:
            data = (experiment.trials[trial_index].fetch_data()
                    if trial_index else experiment.fetch_data())
        except Exception as e:
            logger.info(f"Could not fetch data from experiment or trial: {e}")

    oc = _build_new_optimization_config(
        weights=np.array([0.5, 0.5]),
        primary_objective=primary_objective,
        secondary_objective=secondary_objective,
        outcome_constraints=outcome_constraints,
    )
    model = Models.MOO(
        experiment=experiment,
        data=data,
        acqf_constructor=get_PosteriorMean,
        optimization_config=oc,
    )

    status_quo = experiment.status_quo
    if status_quo:
        try:
            status_quo_prediction = model.predict([
                ObservationFeatures(
                    parameters=status_quo.parameters,
                    # pyre-fixme [6]: Expected `Optional[np.int64]` for trial_index
                    trial_index=trial_index,
                )
            ])
        except ValueError as e:
            logger.warning(f"Could not predict OOD status_quo outcomes: {e}")
            status_quo = None
            status_quo_prediction = None
    else:
        status_quo_prediction = None

    param_dicts: List[TParameterization] = []

    # Construct weightings with linear angular spacing.
    # TODO: Verify whether 0, 1 weights cause problems because of subset_model.
    alpha = np.linspace(0 + 0.01, np.pi / 2 - 0.01, num_points)
    primary_weight = (-1 if primary_objective.lower_is_better else
                      1) * np.cos(alpha)
    secondary_weight = (-1 if secondary_objective.lower_is_better else
                        1) * np.sin(alpha)
    weights_list = np.stack([primary_weight, secondary_weight]).transpose()
    for weights in weights_list:
        outcome_constraints = outcome_constraints
        oc = _build_new_optimization_config(
            weights=weights,
            primary_objective=primary_objective,
            secondary_objective=secondary_objective,
            outcome_constraints=outcome_constraints,
        )
        # TODO: (jej) T64002590 Let this serve as a starting point for optimization.
        # ex. Add global spacing criterion. Implement on BoTorch side.
        # pyre-fixme [6]: Expected different type for model_gen_options
        run = model.gen(1,
                        model_gen_options=model_gen_options,
                        optimization_config=oc)
        param_dicts.append(run.arms[0].parameters)

    # Call predict on points to get their decomposed metrics.
    means, cov = model.predict(
        [ObservationFeatures(parameters) for parameters in param_dicts])

    return _extract_pareto_frontier_results(
        param_dicts=param_dicts,
        means=means,
        variances=cov,
        primary_metric=primary_objective.name,
        secondary_metric=secondary_objective.name,
        absolute_metrics=absolute_metrics,
        outcome_constraints=outcome_constraints,
        status_quo_prediction=status_quo_prediction,
    )
Ejemplo n.º 8
0
    def test_get_standard_plots(self):
        exp = get_branin_experiment()
        self.assertEqual(
            len(
                get_standard_plots(experiment=exp,
                                   model=get_generation_strategy().model)),
            0,
        )
        exp = get_branin_experiment(with_batch=True, minimize=True)
        exp.trials[0].run()
        plots = get_standard_plots(
            experiment=exp,
            model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
        )
        self.assertEqual(len(plots), 6)
        self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
        exp = get_branin_experiment_with_multi_objective(with_batch=True)
        exp.optimization_config.objective.objectives[0].minimize = False
        exp.optimization_config.objective.objectives[1].minimize = True
        exp.optimization_config._objective_thresholds = [
            ObjectiveThreshold(metric=exp.metrics["branin_a"],
                               op=ComparisonOp.GEQ,
                               bound=-100.0),
            ObjectiveThreshold(metric=exp.metrics["branin_b"],
                               op=ComparisonOp.LEQ,
                               bound=100.0),
        ]
        exp.trials[0].run()
        plots = get_standard_plots(experiment=exp,
                                   model=Models.MOO(experiment=exp,
                                                    data=exp.fetch_data()))
        self.assertEqual(len(plots), 7)

        # All plots are successfully created when objective thresholds are absent
        exp.optimization_config._objective_thresholds = []
        plots = get_standard_plots(experiment=exp,
                                   model=Models.MOO(experiment=exp,
                                                    data=exp.fetch_data()))
        self.assertEqual(len(plots), 7)

        exp = get_branin_experiment_with_timestamp_map_metric(
            with_status_quo=True)
        exp.new_trial().add_arm(exp.status_quo)
        exp.trials[0].run()
        exp.new_trial(generator_run=Models.SOBOL(
            search_space=exp.search_space).gen(n=1))
        exp.trials[1].run()
        plots = get_standard_plots(
            experiment=exp,
            model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
            true_objective_metric_name="branin",
        )

        self.assertEqual(len(plots), 9)
        self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
        self.assertIn(
            "Objective branin_map vs. True Objective Metric branin",
            [p.layout.title.text for p in plots],
        )

        with self.assertRaisesRegex(
                ValueError, "Please add a valid true_objective_metric_name"):
            plots = get_standard_plots(
                experiment=exp,
                model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
                true_objective_metric_name="not_present",
            )
Ejemplo n.º 9
0
def get_MTGP_PAREGO(
    experiment: Experiment,
    data: Data,
    trial_index: Optional[int] = None,
    objective_thresholds: Optional[TRefPoint] = None,
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: torch.device = DEFAULT_TORCH_DEVICE,
) -> MultiObjectiveTorchModelBridge:
    """Instantiates a multi-objective, multi-task model that uses qParEGO.

    qParEGO optimizes random augmented chebyshev scalarizations of the multiple
    objectives. This allows it to explore non-convex pareto frontiers.
    """
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(experiment.optimization_config.objective, MultiObjective):
        raise ValueError("Multi-objective optimization requires multiple objectives.")
    elif data.df.empty:  # pragma: no cover
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")

    if isinstance(experiment, MultiTypeExperiment):
        trial_index_to_type = {
            t.index: t.trial_type for t in experiment.trials.values()
        }
        transforms = MT_MTGP_trans
        transform_configs = {
            "ConvertMetricNames": tconfig_from_mt_experiment(experiment),
            "TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}},
        }
    else:
        # Set transforms for a Single-type MTGP model.
        transforms = ST_MTGP_trans
        transform_configs = None

    # Choose the status quo features for the experiment from the selected trial.
    # If trial_index is None, we will look for a status quo from the last
    # experiment trial to use as a status quo for the experiment.
    if trial_index is None:
        trial_index = len(experiment.trials) - 1
    elif trial_index >= len(experiment.trials):
        raise ValueError("trial_index is bigger than the number of experiment trials")

    # pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.
    status_quo = experiment.trials[trial_index].status_quo
    if status_quo is None:
        status_quo_features = None
    else:
        status_quo_features = ObservationFeatures(
            parameters=status_quo.parameters,
            # pyre-fixme[6]: Expected `Optional[numpy.int64]` for 2nd param but got
            #  `int`.
            trial_index=trial_index,
        )
    return checked_cast(
        MultiObjectiveTorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            objective_thresholds=objective_thresholds,
            search_space=search_space or experiment.search_space,
            torch_dtype=dtype,
            torch_device=device,
            acqf_constructor=get_NEI,
            status_quo_features=status_quo_features,
            transforms=transforms,
            transform_configs=transform_configs,
            default_model_gen_options={
                "acquisition_function_kwargs": {
                    "chebyshev_scalarization": True,
                    "sequential": True,
                }
            },
        ),
    )
Ejemplo n.º 10
0
def get_MTGP_NEHVI(
    experiment: Experiment,
    data: Data,
    objective_thresholds: Optional[List[ObjectiveThreshold]] = None,
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: torch.device = DEFAULT_TORCH_DEVICE,
    trial_index: Optional[int] = None,
) -> TorchModelBridge:
    """Instantiates a Multi-task Gaussian Process (MTGP) model that generates
    points with qNEHVI.

    If the input experiment is a MultiTypeExperiment then a
    Multi-type Multi-task GP model will be instantiated.
    Otherwise, the model will be a Single-type Multi-task GP.
    """
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(experiment.optimization_config.objective, MultiObjective):
        raise ValueError("Multi-objective optimization requires multiple objectives.")
    elif data.df.empty:  # pragma: no cover
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")

    if isinstance(experiment, MultiTypeExperiment):
        trial_index_to_type = {
            t.index: t.trial_type for t in experiment.trials.values()
        }
        transforms = MT_MTGP_trans
        transform_configs = {
            "ConvertMetricNames": tconfig_from_mt_experiment(experiment),
            "TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}},
        }
    else:
        # Set transforms for a Single-type MTGP model.
        transforms = ST_MTGP_trans
        transform_configs = None

    # Choose the status quo features for the experiment from the selected trial.
    # If trial_index is None, we will look for a status quo from the last
    # experiment trial to use as a status quo for the experiment.
    if trial_index is None:
        trial_index = len(experiment.trials) - 1
    elif trial_index >= len(experiment.trials):
        raise ValueError("trial_index is bigger than the number of experiment trials")

    # pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.
    status_quo = experiment.trials[trial_index].status_quo
    if status_quo is None:
        status_quo_features = None
    else:
        status_quo_features = ObservationFeatures(
            parameters=status_quo.parameters,
            # pyre-fixme[6]: Expected `Optional[numpy.int64]` for 2nd param but got
            #  `int`.
            trial_index=trial_index,
        )

    return checked_cast(
        MultiObjectiveTorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            objective_thresholds=objective_thresholds,
            search_space=search_space or experiment.search_space,
            transforms=transforms,
            transform_configs=transform_configs,
            torch_dtype=dtype,
            torch_device=device,
            status_quo_features=status_quo_features,
            default_model_gen_options={
                "optimizer_kwargs": {
                    # having a batch limit is very important for avoiding
                    # memory issues in the initialization
                    "batch_limit": DEFAULT_EHVI_BATCH_LIMIT,
                    "sequential": True,
                },
            },
        ),
    )