Exemple #1
0
def _validate_and_maybe_get_default_metric_names(
    metric_names: Optional[Tuple[str, str]],
    optimization_config: Optional[OptimizationConfig],
) -> Tuple[str, str]:
    # Default metric_names is all metrics, producing an error if more than 2
    if metric_names is None:
        if not_none(optimization_config).is_moo_problem:
            multi_objective = checked_cast(
                MultiObjective,
                not_none(optimization_config).objective)
            metric_names = tuple(obj.metric.name
                                 for obj in multi_objective.objectives)
        else:
            raise UserInputError(
                "Inference of `metric_names` failed. Expected `MultiObjective` but "
                f"got {not_none(optimization_config).objective}. Please specify "
                "`metric_names` of length 2 or provide an experiment whose "
                "`optimization_config` has 2 objective metrics.")
    if metric_names is not None and len(metric_names) == 2:
        return metric_names
    raise UserInputError(
        f"Expected 2 metrics but got {len(metric_names or [])}: {metric_names}. "
        "Please specify `metric_names` of length 2 or provide an experiment whose "
        "`optimization_config` has 2 objective metrics.")
Exemple #2
0
def get_pareto_optimal_parameters(
    experiment: Experiment,
    generation_strategy: GenerationStrategy,
    use_model_predictions: bool = True,
) -> Optional[Dict[int, Tuple[TParameterization, TModelPredictArm]]]:
    """Identifies the best parameterizations tried in the experiment so far,
    using model predictions if ``use_model_predictions`` is true and using
    observed values from the experiment otherwise. By default, uses model
    predictions to account for observation noise.

    NOTE: The format of this method's output is as follows:
    { trial_index --> (parameterization, (means, covariances) }, where means
    are a dictionary of form { metric_name --> metric_mean } and covariances
    are a nested dictionary of form
    { one_metric_name --> { another_metric_name: covariance } }.

    Args:
        experiment: Experiment, from which to find Pareto-optimal arms.
        generation_strategy: Generation strategy containing the modelbridge.
        use_model_predictions: Whether to extract the Pareto frontier using
            model predictions or directly observed values. If ``True``,
            the metric means and covariances in this method's output will
            also be based on model predictions and may differ from the
            observed values.

    Returns:
        ``None`` if it was not possible to extract the Pareto frontier,
        otherwise a mapping from trial index to the tuple of:
        - the parameterization of the arm in that trial,
        - two-item tuple of metric means dictionary and covariance matrix
            (model-predicted if ``use_model_predictions=True`` and observed
            otherwise).
    """
    # Validate aspects of the experiment: that it is a MOO experiment and
    # that the current model can be used to produce the Pareto frontier.
    if not not_none(experiment.optimization_config).is_moo_problem:
        raise UnsupportedError(
            "Please use `get_best_parameters` for single-objective problems.")

    moo_optimization_config = checked_cast(MultiObjectiveOptimizationConfig,
                                           experiment.optimization_config)
    if moo_optimization_config.outcome_constraints:
        # TODO[drfreund]: Test this flow and remove error.
        raise NotImplementedError(
            "Support for outcome constraints is currently under development.")

    # Extract or instantiate modelbridge to use for Pareto frontier extraction.
    mb = generation_strategy.model
    if mb is None or not isinstance(mb, MultiObjectiveTorchModelBridge):
        logger.info(
            "Can only extract a Pareto frontier using a multi-objective model bridge"
            f", but currently used model bridge is: {mb} of type {type(mb)}. Will "
            "use `Models.MOO` instead to extract Pareto frontier.")
        mb = checked_cast(
            MultiObjectiveTorchModelBridge,
            Models.MOO(experiment=experiment,
                       data=checked_cast(Data, experiment.lookup_data())),
        )
    else:
        # Make sure the model is up-to-date with the most recent data.
        generation_strategy._set_or_update_current_model(data=None)

    # If objective thresholds are not specified in optimization config, extract
    # the inferred ones if possible or infer them anew if not.
    objective_thresholds_override = None
    if not moo_optimization_config.objective_thresholds:
        lgr = generation_strategy.last_generator_run
        if lgr and lgr.gen_metadata and "objective_thresholds" in lgr.gen_metadata:
            objective_thresholds_override = lgr.gen_metadata[
                "objective_thresholds"]
        objective_thresholds_override = mb.infer_objective_thresholds(
            search_space=experiment.search_space,
            optimization_config=experiment.optimization_config,
            fixed_features=None,
        )
        logger.info(
            f"Using inferred objective thresholds: {objective_thresholds_override}, "
            "as objective thresholds were not specified as part of the optimization "
            "configuration on the experiment.")

    # Extract the Pareto frontier and format it as follows:
    # { trial_index --> (parameterization, (means, covariances) }
    pareto_util = predicted_pareto if use_model_predictions else observed_pareto
    pareto_optimal_observations = pareto_util(
        modelbridge=mb, objective_thresholds=objective_thresholds_override)
    return {
        int(not_none(obs.features.trial_index)): (
            obs.features.parameters,
            (obs.data.means_dict, obs.data.covariance_matrix),
        )
        for obs in pareto_optimal_observations
    }
Exemple #3
0
 def f(self, x: np.ndarray) -> float:
     x1, x2, fidelity = x
     fidelity_penalty = random() * math.pow(1.0 - fidelity, 2.0)
     return checked_cast(float, branin(x1=x1, x2=x2)) - fidelity_penalty
Exemple #4
0
 def f(self, x: np.ndarray) -> float:
     return checked_cast(float, aug_branin(x))
Exemple #5
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,  # objective_directions
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        objective_thresholds: Optional[Tensor] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel")
        if (objective_thresholds is not None and
                objective_weights.shape[0] != objective_thresholds.shape[0]):
            raise AxError(
                "Objective weights and thresholds most both contain an element for"
                " each modeled metric.")

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = not_none(self.model)
        full_objective_thresholds = objective_thresholds
        full_objective_weights = objective_weights
        full_outcome_constraints = outcome_constraints
        # subset model only to the outcomes we need for the optimization
        if options.get(Keys.SUBSET_MODEL, True):
            full_objective_weights
            subset_model_results = subset_model(
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                objective_thresholds=objective_thresholds,
            )
            model = subset_model_results.model
            objective_weights = subset_model_results.objective_weights
            outcome_constraints = subset_model_results.outcome_constraints
            objective_thresholds = subset_model_results.objective_thresholds
            idcs = subset_model_results.indices
        else:
            idcs = None
        if objective_thresholds is None:
            full_objective_thresholds = infer_objective_thresholds(
                model=model,
                X_observed=not_none(X_observed),
                objective_weights=full_objective_weights,
                outcome_constraints=full_outcome_constraints,
                subset_idcs=idcs,
            )
            # subset the objective thresholds
            objective_thresholds = (full_objective_thresholds
                                    if idcs is None else
                                    full_objective_thresholds[idcs].clone())

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        botorch_rounding_func = get_rounding_func(rounding_func)
        if acf_options.get("random_scalarization", False) or acf_options.get(
                "chebyshev_scalarization", False):
            # If using a list of acquisition functions, the algorithm to generate
            # that list is configured by acquisition_function_kwargs.
            objective_weights_list = [
                randomize_objective_weights(objective_weights, **acf_options)
                for _ in range(n)
            ]
            acquisition_function_list = [
                self.acqf_constructor(  # pyre-ignore: [28]
                    model=model,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    X_observed=X_observed,
                    X_pending=X_pending,
                    **acf_options,
                ) for objective_weights in objective_weights_list
            ]
            acquisition_function_list = [
                checked_cast(AcquisitionFunction, acq_function)
                for acq_function in acquisition_function_list
            ]
            # Multiple acquisition functions require a sequential optimizer
            # always use scipy_optimizer_list.
            # TODO(jej): Allow any optimizer.
            candidates, expected_acquisition_value = scipy_optimizer_list(
                acq_function_list=acquisition_function_list,
                bounds=bounds_,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        else:
            acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
                model=model,
                objective_weights=objective_weights,
                objective_thresholds=objective_thresholds,
                outcome_constraints=outcome_constraints,
                X_observed=X_observed,
                X_pending=X_pending,
                **acf_options,
            )
            acquisition_function = checked_cast(AcquisitionFunction,
                                                acquisition_function)
            # pyre-ignore: [28]
            candidates, expected_acquisition_value = self.acqf_optimizer(
                acq_function=checked_cast(AcquisitionFunction,
                                          acquisition_function),
                bounds=bounds_,
                n=n,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        gen_metadata = {
            "expected_acquisition_value": expected_acquisition_value.tolist(),
            "objective_thresholds": not_none(full_objective_thresholds).cpu(),
        }
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            gen_metadata,
            None,
        )
Exemple #6
0
def get_standard_plots(
    experiment: Experiment,
    model: Optional[ModelBridge],
    data: Optional[Union[Data, MapData]] = None,
    model_transitions: Optional[List[int]] = None,
) -> List[go.Figure]:
    """Extract standard plots for single-objective optimization.

    Extracts a list of plots from an ``Experiment`` and ``ModelBridge`` of general
    interest to an Ax user. Currently not supported are
    - TODO: multi-objective optimization
    - TODO: ChoiceParameter plots

    Args:
        - experiment: The ``Experiment`` from which to obtain standard plots.
        - model: The ``ModelBridge`` used to suggest trial parameters.
        - data: If specified, data, to which to fit the model before generating plots.
        - model_transitions: The arm numbers at which shifts in generation_strategy
            occur.

    Returns:
        - a plot of objective value vs. trial index, to show experiment progression
        - a plot of objective value vs. range parameter values, only included if the
          model associated with generation_strategy can create predictions. This
          consists of:

            - a plot_slice plot if the search space contains one range parameter
            - an interact_contour plot if the search space contains multiple
              range parameters

    """
    objective = not_none(experiment.optimization_config).objective
    if isinstance(objective, ScalarizedObjective):
        logger.warning(
            "get_standard_plots does not currently support ScalarizedObjective "
            "optimization experiments. Returning an empty list.")
        return []

    if data is None:
        data = experiment.lookup_data()
        if isinstance(data, MapData):
            data = data.deduplicate_data()

    if data.df.empty:
        logger.info(
            f"Experiment {experiment} does not yet have data, nothing to plot."
        )
        return []

    output_plot_list = []
    output_plot_list.append(
        _get_objective_trace_plot(
            experiment=experiment,
            data=checked_cast(Data, data)
            if isinstance(data, Data) else checked_cast(MapData, data),
            model_transitions=model_transitions
            if model_transitions is not None else [],
        ))

    # Objective vs. parameter plot requires a `Model`, so add it only if model
    # is alrady available. In cases where initially custom trials are attached,
    # model might not yet be set on the generation strategy.
    if model:
        # TODO: Check if model can predict in favor of try/catch.
        try:
            output_plot_list.extend(
                _get_objective_v_param_plots(
                    experiment=experiment,
                    model=model,
                ))
            output_plot_list.extend(_get_cross_validation_plots(model=model))
            feature_importance_plot = plot_feature_importance_by_feature_plotly(
                model=model,
                relative=False,
                caption=feature_importance_caption)
            feature_importance_plot.layout.title = "[ADVANCED] " + str(
                # pyre-fixme[16]: go.Figure has no attribute `layout`
                feature_importance_plot.layout.title.text)
            output_plot_list.append(feature_importance_plot)
        except NotImplementedError:
            # Model does not implement `predict` method.
            pass

    return [plot for plot in output_plot_list if plot is not None]
Exemple #7
0
def get_factorial(search_space: SearchSpace) -> DiscreteModelBridge:
    """Instantiates a factorial generator."""
    return checked_cast(
        DiscreteModelBridge,
        Models.FACTORIAL(search_space=search_space, fit_out_of_design=True),
    )
Exemple #8
0
 def f(self, x: np.ndarray) -> float:
     x1, x2 = x
     return checked_cast(float, branin(x1=x1, x2=x2))
Exemple #9
0
def get_MTGP_PAREGO(
    experiment: Experiment,
    data: Data,
    trial_index: Optional[int] = None,
    objective_thresholds: Optional[TRefPoint] = None,
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: torch.device = DEFAULT_TORCH_DEVICE,
) -> MultiObjectiveTorchModelBridge:
    """Instantiates a multi-objective, multi-task model that uses qParEGO.

    qParEGO optimizes random augmented chebyshev scalarizations of the multiple
    objectives. This allows it to explore non-convex pareto frontiers.
    """
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(experiment.optimization_config.objective, MultiObjective):
        raise ValueError("Multi-objective optimization requires multiple objectives.")
    elif data.df.empty:  # pragma: no cover
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")

    if isinstance(experiment, MultiTypeExperiment):
        trial_index_to_type = {
            t.index: t.trial_type for t in experiment.trials.values()
        }
        transforms = MT_MTGP_trans
        transform_configs = {
            "ConvertMetricNames": tconfig_from_mt_experiment(experiment),
            "TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}},
        }
    else:
        # Set transforms for a Single-type MTGP model.
        transforms = ST_MTGP_trans
        transform_configs = None

    # Choose the status quo features for the experiment from the selected trial.
    # If trial_index is None, we will look for a status quo from the last
    # experiment trial to use as a status quo for the experiment.
    if trial_index is None:
        trial_index = len(experiment.trials) - 1
    elif trial_index >= len(experiment.trials):
        raise ValueError("trial_index is bigger than the number of experiment trials")

    # pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.
    status_quo = experiment.trials[trial_index].status_quo
    if status_quo is None:
        status_quo_features = None
    else:
        status_quo_features = ObservationFeatures(
            parameters=status_quo.parameters,
            # pyre-fixme[6]: Expected `Optional[numpy.int64]` for 2nd param but got
            #  `int`.
            trial_index=trial_index,
        )
    return checked_cast(
        MultiObjectiveTorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            objective_thresholds=objective_thresholds,
            search_space=search_space or experiment.search_space,
            torch_dtype=dtype,
            torch_device=device,
            acqf_constructor=get_NEI,
            status_quo_features=status_quo_features,
            transforms=transforms,
            transform_configs=transform_configs,
            default_model_gen_options={
                "acquisition_function_kwargs": {
                    "chebyshev_scalarization": True,
                    "sequential": True,
                }
            },
        ),
    )
Exemple #10
0
def get_MTGP_NEHVI(
    experiment: Experiment,
    data: Data,
    objective_thresholds: Optional[List[ObjectiveThreshold]] = None,
    search_space: Optional[SearchSpace] = None,
    dtype: torch.dtype = torch.double,
    device: torch.device = DEFAULT_TORCH_DEVICE,
    trial_index: Optional[int] = None,
) -> TorchModelBridge:
    """Instantiates a Multi-task Gaussian Process (MTGP) model that generates
    points with qNEHVI.

    If the input experiment is a MultiTypeExperiment then a
    Multi-type Multi-task GP model will be instantiated.
    Otherwise, the model will be a Single-type Multi-task GP.
    """
    # pyre-ignore: [16] `Optional` has no attribute `objective`.
    if not isinstance(experiment.optimization_config.objective, MultiObjective):
        raise ValueError("Multi-objective optimization requires multiple objectives.")
    elif data.df.empty:  # pragma: no cover
        raise ValueError("MultiObjectiveOptimization requires non-empty data.")

    if isinstance(experiment, MultiTypeExperiment):
        trial_index_to_type = {
            t.index: t.trial_type for t in experiment.trials.values()
        }
        transforms = MT_MTGP_trans
        transform_configs = {
            "ConvertMetricNames": tconfig_from_mt_experiment(experiment),
            "TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}},
        }
    else:
        # Set transforms for a Single-type MTGP model.
        transforms = ST_MTGP_trans
        transform_configs = None

    # Choose the status quo features for the experiment from the selected trial.
    # If trial_index is None, we will look for a status quo from the last
    # experiment trial to use as a status quo for the experiment.
    if trial_index is None:
        trial_index = len(experiment.trials) - 1
    elif trial_index >= len(experiment.trials):
        raise ValueError("trial_index is bigger than the number of experiment trials")

    # pyre-fixme[16]: `ax.core.base_trial.BaseTrial` has no attribute `status_quo`.
    status_quo = experiment.trials[trial_index].status_quo
    if status_quo is None:
        status_quo_features = None
    else:
        status_quo_features = ObservationFeatures(
            parameters=status_quo.parameters,
            # pyre-fixme[6]: Expected `Optional[numpy.int64]` for 2nd param but got
            #  `int`.
            trial_index=trial_index,
        )

    return checked_cast(
        MultiObjectiveTorchModelBridge,
        Models.MOO(
            experiment=experiment,
            data=data,
            objective_thresholds=objective_thresholds,
            search_space=search_space or experiment.search_space,
            transforms=transforms,
            transform_configs=transform_configs,
            torch_dtype=dtype,
            torch_device=device,
            status_quo_features=status_quo_features,
            default_model_gen_options={
                "optimizer_kwargs": {
                    # having a batch limit is very important for avoiding
                    # memory issues in the initialization
                    "batch_limit": DEFAULT_EHVI_BATCH_LIMIT,
                    "sequential": True,
                },
            },
        ),
    )
Exemple #11
0
def _get_cutoffs_from_transform_config(
    metric_name: str,
    metric_values: List[float],
    winsorization_config: Union[WinsorizationConfig,
                                Dict[str, WinsorizationConfig]],
    optimization_config: Optional[OptimizationConfig],
) -> Tuple[float, float]:
    # (1) Use the same config for all metrics if one WinsorizationConfig was specified
    if isinstance(winsorization_config, WinsorizationConfig):
        return _quantiles_to_cutoffs(
            metric_name=metric_name,
            metric_values=metric_values,
            metric_config=winsorization_config,
        )

    # (2) If `winsorization_config` is a dict, use it if `metric_name` is a key,
    # and the corresponding value is a WinsorizationConfig.
    if isinstance(winsorization_config,
                  dict) and metric_name in winsorization_config:
        metric_config = winsorization_config[metric_name]
        if not isinstance(metric_config, WinsorizationConfig):
            raise UserInputError(
                "Expected winsorization config of type "
                f"`WinsorizationConfig` but got {metric_config} of type "
                f"{type(metric_config)} for metric {metric_name}.")
        return _quantiles_to_cutoffs(
            metric_name=metric_name,
            metric_values=metric_values,
            metric_config=metric_config,
        )

    # (3) For constraints and objectives that don't have a pre-specified config we
    # choose the cutoffs automatically using the optimization config (if supplied).
    # We ignore ScalarizedOutcomeConstraint and ScalarizedObjective for now. An
    # exception is raised if we encounter relative constraints.
    if optimization_config:
        if metric_name in optimization_config.objective.metric_names:
            if isinstance(optimization_config.objective, ScalarizedObjective):
                warnings.warn(
                    "Automatic winsorization isn't supported for ScalarizedObjective. "
                    "Specify the winsorization settings manually if you want to "
                    f"winsorize metric {metric_name}.")
                return DEFAULT_CUTOFFS  # Don't winsorize a ScalarizedObjective
            elif optimization_config.is_moo_problem:
                # We deal with a multi-objective function the same way as we deal
                # with an output constraint. It may be worth investigating setting
                # the winsorization cutoffs based on the Pareto frontier in the future.
                optimization_config = checked_cast(
                    MultiObjectiveOptimizationConfig, optimization_config)
                objective_threshold = _get_objective_threshold_from_moo_config(
                    optimization_config=optimization_config,
                    metric_name=metric_name)
                if objective_threshold:
                    return _get_auto_winsorization_cutoffs_outcome_constraint(
                        metric_values=metric_values,
                        outcome_constraints=objective_threshold,
                    )
                warnings.warn(
                    "Automatic winsorization isn't supported for an objective in "
                    "`MultiObjective` without objective thresholds. Specify the "
                    "winsorization settings manually if you want to winsorize "
                    f"metric {metric_name}.")
                return DEFAULT_CUTOFFS  # Don't winsorize if there is no threshold
            else:  # Single objective
                return _get_auto_winsorization_cutoffs_single_objective(
                    metric_values=metric_values,
                    minimize=optimization_config.objective.minimize,
                )
        # Get all outcome constraints for metric_name that aren't relative or scalarized
        outcome_constraints = _get_outcome_constraints_from_config(
            optimization_config=optimization_config, metric_name=metric_name)
        if outcome_constraints:
            return _get_auto_winsorization_cutoffs_outcome_constraint(
                metric_values=metric_values,
                outcome_constraints=outcome_constraints,
            )

    # If none of the above, we don't winsorize.
    return DEFAULT_CUTOFFS
Exemple #12
0
 def get_trial_parameters(self, trial_index: int) -> TParameterization:
     """Retrieve the parameterization of the trial by the given index."""
     if trial_index not in self.experiment.trials:
         raise ValueError(f"Trial {trial_index} does not yet exist.")
     trial = checked_cast(Trial, self.experiment.trials.get(trial_index))
     return not_none(trial.arm).parameters
Exemple #13
0
 def test_default_generation_strategy(self) -> None:
     """Test that Sobol+GPEI is used if no GenerationStrategy is provided."""
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[  # pyre-fixme[6]: expected union that should include
             {
                 "name": "x1",
                 "type": "range",
                 "bounds": [-5.0, 10.0]
             },
             {
                 "name": "x2",
                 "type": "range",
                 "bounds": [0.0, 15.0]
             },
         ],
         objective_name="branin",
         minimize=True,
     )
     self.assertEqual(
         [s.model for s in not_none(ax_client.generation_strategy)._steps],
         [Models.SOBOL, Models.GPEI],
     )
     with self.assertRaisesRegex(ValueError, ".* no trials."):
         ax_client.get_optimization_trace(objective_optimum=branin.fmin)
     for i in range(6):
         parameterization, trial_index = ax_client.get_next_trial()
         x1, x2 = parameterization.get("x1"), parameterization.get("x2")
         ax_client.complete_trial(
             trial_index,
             raw_data={
                 "branin": (
                     checked_cast(
                         float,
                         branin(checked_cast(float, x1),
                                checked_cast(float, x2)),
                     ),
                     0.0,
                 )
             },
             sample_size=i,
         )
         if i < 5:
             with self.assertRaisesRegex(ValueError,
                                         "Could not obtain contour"):
                 ax_client.get_contour_plot(param_x="x1", param_y="x2")
     ax_client.get_optimization_trace(objective_optimum=branin.fmin)
     ax_client.get_contour_plot()
     self.assertIn("x1", ax_client.get_trials_data_frame())
     self.assertIn("x2", ax_client.get_trials_data_frame())
     self.assertIn("branin", ax_client.get_trials_data_frame())
     self.assertEqual(len(ax_client.get_trials_data_frame()), 6)
     # Test that Sobol is chosen when all parameters are choice.
     ax_client = AxClient()
     ax_client.create_experiment(
         parameters=[  # pyre-fixme[6]: expected union that should include
             {
                 "name": "x1",
                 "type": "choice",
                 "values": [1, 2, 3]
             },
             {
                 "name": "x2",
                 "type": "choice",
                 "values": [1, 2, 3]
             },
         ])
     self.assertEqual(
         [s.model for s in not_none(ax_client.generation_strategy)._steps],
         [Models.SOBOL],
     )
     self.assertEqual(ax_client.get_recommended_max_parallelism(),
                      [(-1, -1)])
     self.assertTrue(ax_client.get_trials_data_frame().empty)
Exemple #14
0
    def test_early_stopping_with_unaligned_results(self):
        # test case 1
        exp = get_branin_experiment_with_timestamp_map_metric(rate=0.5)
        for i in range(5):
            trial = exp.new_trial().add_arm(
                arm=get_branin_arms(n=1, seed=i)[0])
            trial.run()

        for _ in range(3):
            # each time we call fetch, we grab another timestamp
            exp.fetch_data()

        for trial in exp.trials.values():
            trial.mark_as(status=TrialStatus.COMPLETED)

        # manually "unalign" timestamps to simulate real-world scenario
        # where each curve reports results at different steps
        data = checked_cast(MapData, exp.fetch_data())

        unaligned_timestamps = [0, 1, 4, 1, 2, 3, 1, 3, 4, 0, 1, 2, 0, 2, 4]
        data.map_df.loc[data.map_df["metric_name"] == "branin_map",
                        "timestamp"] = unaligned_timestamps
        exp.attach_data(data=data)
        """
        Dataframe after interpolation:
                    0           1          2           3          4
        timestamp
        0          146.138620         NaN        NaN  143.375669  65.033535
        1          117.388086  113.057480  44.627226  115.168704  58.636359
        2          111.575393   90.815154  40.237365   98.060315  52.239184
        3          105.762700   77.324501  35.847504         NaN  48.359101
        4           99.950007         NaN  30.522333         NaN  44.479018
        """
        # We consider trials 0, 2, and 4 for early stopping at progression 4,
        #    and choose to stop trial 0.
        # We consider trial 1 for early stopping at progression 3, and
        #    choose to stop it.
        # We consider trial 3 for early stopping at progression 2, and
        #    choose to stop it.
        early_stopping_strategy = PercentileEarlyStoppingStrategy(
            percentile_threshold=50,
            min_curves=3,
        )
        should_stop = _evaluate_early_stopping_with_df(
            early_stopping_strategy=early_stopping_strategy,
            experiment=exp,
            df=data.map_df,
        )
        self.assertEqual(set(should_stop), {0, 1, 3})

        # test case 2, where trial 3 has only 1 data point
        exp = get_branin_experiment_with_timestamp_map_metric(rate=0.5)
        for i in range(5):
            trial = exp.new_trial().add_arm(
                arm=get_branin_arms(n=1, seed=i)[0])
            trial.run()

        for _ in range(3):
            # each time we call fetch, we grab another timestamp
            exp.fetch_data()

        for trial in exp.trials.values():
            trial.mark_as(status=TrialStatus.COMPLETED)

        # manually "unalign" timestamps to simulate real-world scenario
        # where each curve reports results at different steps
        data = checked_cast(MapData, exp.fetch_data())

        unaligned_timestamps = [0, 1, 4, 1, 2, 3, 1, 3, 4, 0, 1, 2, 0, 2, 4]
        data.map_df.loc[data.map_df["metric_name"] == "branin_map",
                        "timestamp"] = unaligned_timestamps
        # manually remove timestamps 1 and 2 for arm 3
        data.map_df.drop(
            [15, 16], inplace=True
        )  # TODO this wont work once we make map_df immutable (which we should)
        exp.attach_data(data=data)
        """
        Dataframe after interpolation:
                    0           1          2           3          4
        timestamp
        0          146.138620         NaN        NaN  143.375669  65.033535
        1          117.388086  113.057480  44.627226         NaN  58.636359
        2          111.575393   90.815154  40.237365         NaN  52.239184
        3          105.762700   77.324501  35.847504         NaN  48.359101
        4           99.950007         NaN  30.522333         NaN  44.479018
        """

        # We consider trials 0, 2, and 4 for early stopping at progression 4,
        #    and choose to stop trial 0.
        # We consider trial 1 for early stopping at progression 3, and
        #    choose to stop it.
        # We consider trial 3 for early stopping at progression 0, and
        #    choose not to stop it.
        early_stopping_strategy = PercentileEarlyStoppingStrategy(
            percentile_threshold=50,
            min_curves=3,
        )
        should_stop = _evaluate_early_stopping_with_df(
            early_stopping_strategy=early_stopping_strategy,
            experiment=exp,
            df=data.map_df,
        )
        self.assertEqual(set(should_stop), {0, 1})
Exemple #15
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]:
        options = model_gen_options or {}
        acf_options = options.get(Keys.ACQF_KWARGS, {})
        optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel"
            )
        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization	357
        if options.get(Keys.SUBSET_MODEL, True):
            model, objective_weights, outcome_constraints, _ = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        botorch_rounding_func = get_rounding_func(rounding_func)
        acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
            X_pending=X_pending,
            **acf_options,
        )
        acquisition_function = checked_cast(AcquisitionFunction, acquisition_function)
        # pyre-ignore: [28]
        candidates, expected_acquisition_value = self.acqf_optimizer(
            acq_function=checked_cast(AcquisitionFunction, acquisition_function),
            bounds=bounds_,
            n=n,
            inequality_constraints=_to_inequality_constraints(
                linear_constraints=linear_constraints
            ),
            fixed_features=fixed_features,
            rounding_func=botorch_rounding_func,
            **optimizer_options,
        )
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            {"expected_acquisition_value": expected_acquisition_value.tolist()},
            None,
        )
Exemple #16
0
 def test_checked_cast(self):
     self.assertEqual(checked_cast(float, 2.0), 2.0)
     with self.assertRaises(ValueError):
         checked_cast(float, 2)
Exemple #17
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata]:
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel")

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization
        if options.get("subset_model", True):
            model, objective_weights, outcome_constraints = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        if linear_constraints is not None:
            A, b = linear_constraints
            inequality_constraints = []
            k, d = A.shape
            for i in range(k):
                indicies = A[i, :].nonzero().view(-1)
                coefficients = -A[i, indicies]
                rhs = -b[i, 0]
                inequality_constraints.append((indicies, coefficients, rhs))
        else:
            inequality_constraints = None

        acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
            X_pending=X_pending,
            **acf_options,
        )

        botorch_rounding_func = get_rounding_func(rounding_func)
        # pyre-ignore: [28]
        candidates, expected_acquisition_value = self.acqf_optimizer(
            acq_function=checked_cast(AcquisitionFunction,
                                      acquisition_function),
            bounds=bounds_,
            n=n,
            inequality_constraints=inequality_constraints,
            fixed_features=fixed_features,
            rounding_func=botorch_rounding_func,
            **optimizer_options,
        )
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            {
                "expected_acquisition_value":
                expected_acquisition_value.tolist()
            },
        )
Exemple #18
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,  # objective_directions
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        objective_thresholds: Optional[Tensor] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel")

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization
        if options.get(Keys.SUBSET_MODEL, True):
            model, objective_weights, outcome_constraints, Ys = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                Ys=self.Ys,
            )
        else:
            Ys = self.Ys

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        botorch_rounding_func = get_rounding_func(rounding_func)
        if acf_options.get("random_scalarization", False) or acf_options.get(
                "chebyshev_scalarization", False):
            # If using a list of acquisition functions, the algorithm to generate
            # that list is configured by acquisition_function_kwargs.
            objective_weights_list = [
                randomize_objective_weights(objective_weights, **acf_options)
                for _ in range(n)
            ]
            acquisition_function_list = [
                self.acqf_constructor(  # pyre-ignore: [28]
                    model=model,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    X_observed=X_observed,
                    X_pending=X_pending,
                    Ys=Ys,  # Required for chebyshev scalarization calculations.
                    **acf_options,
                ) for objective_weights in objective_weights_list
            ]
            acquisition_function_list = [
                checked_cast(AcquisitionFunction, acq_function)
                for acq_function in acquisition_function_list
            ]
            # Multiple acquisition functions require a sequential optimizer
            # always use scipy_optimizer_list.
            # TODO(jej): Allow any optimizer.
            candidates, expected_acquisition_value = scipy_optimizer_list(
                acq_function_list=acquisition_function_list,
                bounds=bounds_,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        else:
            acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
                model=model,
                objective_weights=objective_weights,
                objective_thresholds=objective_thresholds,
                outcome_constraints=outcome_constraints,
                X_observed=X_observed,
                X_pending=X_pending,
                Ys=self.Ys,  # Required for qEHVI calculations.
                **acf_options,
            )
            acquisition_function = checked_cast(AcquisitionFunction,
                                                acquisition_function)
            # pyre-ignore: [28]
            candidates, expected_acquisition_value = self.acqf_optimizer(
                acq_function=checked_cast(AcquisitionFunction,
                                          acquisition_function),
                bounds=bounds_,
                n=n,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            {
                "expected_acquisition_value":
                expected_acquisition_value.tolist()
            },
            None,
        )
Exemple #19
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
    ) -> Tuple[Tensor, Tensor]:
        """Generate new candidates.

        An initialized acquisition function can be passed in as
        model_gen_options["acquisition_function"].

        Args:
            n: Number of candidates to generate.
            bounds: A list of (lower, upper) tuples for each column of X.
            objective_weights: The objective is to maximize a weighted sum of
                the columns of f(x). These are the weights.
            outcome_constraints: A tuple of (A, b). For k outcome constraints
                and m outputs at f(x), A is (k x m) and b is (k x 1) such that
                A f(x) <= b. (Not used by single task models)
            linear_constraints: A tuple of (A, b). For k linear constraints on
                d-dimensional x, A is (k x d) and b is (k x 1) such that
                A x <= b.
            fixed_features: A map {feature_index: value} for features that
                should be fixed to a particular value during generation.
            pending_observations:  A list of m (k_i x d) feature tensors X
                for m outcomes and k_i pending observations for outcome i.
            model_gen_options: A config dictionary that can contain
                model-specific options.
            rounding_func: A function that rounds an optimization result
                appropriately (i.e., according to `round-trip` transformations).

        Returns:
            Tensor: `n x d`-dim Tensor of generated points.
            Tensor: `n`-dim Tensor of weights for each point.
        """
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
            model=self.model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
            X_pending=X_pending,
            **acf_options,
        )

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        if linear_constraints is not None:
            A, b = linear_constraints
            inequality_constraints = []
            k, d = A.shape
            for i in range(k):
                indicies = A[i, :].nonzero().squeeze()
                coefficients = -A[i, indicies]
                rhs = -b[i, 0]
                inequality_constraints.append((indicies, coefficients, rhs))
        else:
            inequality_constraints = None

        botorch_rounding_func = get_rounding_func(rounding_func)

        candidates = self.acqf_optimizer(  # pyre-ignore: [28]
            acq_function=checked_cast(AcquisitionFunction,
                                      acquisition_function),
            bounds=bounds_,
            n=n,
            inequality_constraints=inequality_constraints,
            fixed_features=fixed_features,
            rounding_func=botorch_rounding_func,
            **optimizer_options,
        )
        return candidates.detach().cpu(), torch.ones(n, dtype=self.dtype)
Exemple #20
0
 def run(self) -> "BatchTrial":
     return checked_cast(BatchTrial, super().run())
Exemple #21
0
 def f(self, x: np.ndarray) -> float:
     return checked_cast(float, aug_hartmann6(x))