Exemple #1
0
    def _make_evaluations_and_data(
        self,
        trial: BaseTrial,
        raw_data: Union[TEvaluationOutcome, Dict[str, TEvaluationOutcome]],
        metadata: Optional[Dict[str, Union[str, int]]],
        sample_sizes: Optional[Dict[str, int]] = None,
    ) -> Tuple[Dict[str, TEvaluationOutcome], Data]:
        """Formats given raw data as Ax evaluations and `Data`.

        Args:
            trial: Trial within the experiment.
            raw_data: Metric outcomes for 1-arm trials, map from arm name to
                metric outcomes for batched trials.
            sample_size: Integer sample size for 1-arm trials, dict from arm
                name to sample size for batched trials. Optional.
            metadata: Additional metadata to track about this run.
            data_is_for_batched_trials: Whether making evaluations and data for
                a batched trial or a 1-arm trial.
        """
        if isinstance(trial, BatchTrial):
            assert isinstance(  # pragma: no cover
                raw_data, dict
            ), "Raw data must be a dict for batched trials."
        elif isinstance(trial, Trial):
            arm_name = not_none(trial.arm).name
            raw_data = {arm_name: raw_data}  # pyre-ignore[9]
        else:  # pragma: no cover
            raise ValueError(f"Unexpected trial type: {type(trial)}.")
        assert isinstance(raw_data, dict)
        not_trial_arm_names = set(raw_data.keys()) - set(trial.arms_by_name.keys())
        if not_trial_arm_names:
            raise ValueError(
                f"Arms {not_trial_arm_names} are not part of trial #{trial.index}."
            )
        evaluations = {
            arm_name: raw_data_to_evaluation(
                raw_data=raw_data[arm_name], objective_name=self.objective_name
            )
            for arm_name in raw_data
        }
        data = data_from_evaluations(
            evaluations=evaluations,
            trial_index=trial.index,
            sample_sizes=sample_sizes or {},
            start_time=(
                checked_cast_optional(int, metadata.get("start_time"))
                if metadata is not None
                else None
            ),
            end_time=(
                checked_cast_optional(int, metadata.get("end_time"))
                if metadata is not None
                else None
            ),
        )
        return evaluations, data
Exemple #2
0
    def complete_trial(
        self,
        trial_index: int,
        raw_data: TEvaluationOutcome,
        metadata: Optional[Dict[str, Union[str, int]]] = None,
        sample_size: Optional[int] = None,
    ) -> None:
        """
        Completes the trial with given metric values and adds optional metadata
        to it.

        Args:
            trial_index: Index of trial within the experiment.
            raw_data: Evaluation data for the trial. Can be a mapping from
                metric name to a tuple of mean and SEM, just a tuple of mean and
                SEM if only one metric in optimization, or just the mean if there
                is no SEM.  Can also be a list of (fidelities, mapping from
                metric name to a tuple of mean and SEM).
            metadata: Additional metadata to track about this run.
            sample_size: Number of samples collected for the underlying arm,
                optional.
        """
        assert isinstance(
            trial_index, int
        ), f"Trial index must be an int, got: {trial_index}."  # pragma: no cover
        trial = self._get_trial(trial_index=trial_index)
        if metadata is not None:
            trial._run_metadata = metadata

        arm_name = not_none(trial.arm).name
        evaluations = {
            arm_name:
            raw_data_to_evaluation(raw_data=raw_data,
                                   objective_name=self.objective_name)
        }
        sample_sizes = {arm_name: sample_size} if sample_size else {}
        data = data_from_evaluations(
            evaluations=evaluations,
            trial_index=trial.index,
            sample_sizes=sample_sizes,
            start_time=(checked_cast_optional(int, metadata.get("start_time"))
                        if metadata is not None else None),
            end_time=(checked_cast_optional(int, metadata.get("end_time"))
                      if metadata is not None else None),
        )
        # In service API, a trial may be completed multiple times (for multiple
        # metrics, for example).
        trial.mark_completed(allow_repeat_completion=True)
        self.experiment.attach_data(data)
        data_for_logging = _round_floats_for_logging(
            item=evaluations[next(iter(evaluations.keys()))])
        logger.info(f"Completed trial {trial_index} with data: "
                    f"{_round_floats_for_logging(item=data_for_logging)}.")
        self._updated_trials.append(trial_index)
        self._save_experiment_and_generation_strategy_to_db_if_possible()
Exemple #3
0
    def __init__(
        self,
        experiment: Experiment,
        search_space: SearchSpace,
        data: Data,
        model: TorchModel,
        transforms: List[Type[Transform]],
        transform_configs: Optional[Dict[str, TConfig]] = None,
        torch_dtype: Optional[torch.dtype] = None,  # noqa T484
        torch_device: Optional[torch.device] = None,
        status_quo_name: Optional[str] = None,
        status_quo_features: Optional[ObservationFeatures] = None,
        optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
        fit_out_of_design: bool = False,
        objective_thresholds: Optional[TRefPoint] = None,
        default_model_gen_options: Optional[TConfig] = None,
    ) -> None:
        if (isinstance(experiment, MultiTypeExperiment)
                and objective_thresholds is not None):
            raise NotImplementedError(
                "Objective threshold dependent multi-objective optimization algos "
                "like EHVI are not yet supported for MultiTypeExperiments. "
                "Remove the objective threshold arg and use a compatible algorithm "
                "like ParEGO.")
        self._objective_metric_names = None
        # Optimization_config
        mooc = optimization_config or checked_cast_optional(
            MultiObjectiveOptimizationConfig, experiment.optimization_config)
        # Extract objective_thresholds from optimization_config, or inject it.
        if not mooc:
            raise ValueError(
                ("experiment must have an existing optimization_config "
                 "of type MultiObjectiveOptimizationConfig "
                 "or `optimization_config` must be passed as an argument."))
        if not isinstance(mooc, MultiObjectiveOptimizationConfig):
            mooc = not_none(
                MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
        if objective_thresholds:
            mooc = mooc.clone_with_args(
                objective_thresholds=objective_thresholds)

        optimization_config = mooc

        super().__init__(
            experiment=experiment,
            search_space=search_space,
            data=data,
            model=model,
            transforms=transforms,
            transform_configs=transform_configs,
            torch_dtype=torch_dtype,
            torch_device=torch_device,
            status_quo_name=status_quo_name,
            status_quo_features=status_quo_features,
            optimization_config=optimization_config,
            fit_out_of_design=fit_out_of_design,
            default_model_gen_options=default_model_gen_options,
        )
Exemple #4
0
    def best_out_of_sample_point(
        self,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        fidelity_features: Optional[List[int]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
        options: Optional[TConfig] = None,
    ) -> Tuple[Tensor, Tensor]:
        """Finds the best predicted point and the corresponding value of the
        appropriate best point acquisition function.
        """
        if fixed_features:
            # When have fixed features, need `FixedFeatureAcquisitionFunction`
            # which has peculiar instantiation (wraps another acquisition fn.),
            # so need to figure out how to handle.
            # TODO (ref: https://fburl.com/diff/uneqb3n9)
            raise NotImplementedError("Fixed features not yet supported.")

        options = options or {}
        acqf_class, acqf_options = pick_best_out_of_sample_point_acqf_class(
            outcome_constraints=outcome_constraints,
            seed_inner=checked_cast_optional(int, options.get(Keys.SEED_INNER, None)),
            qmc=checked_cast(bool, options.get(Keys.QMC, True)),
        )

        # Avoiding circular import between `Surrogate` and `Acquisition`.
        from ax.models.torch.botorch_modular.acquisition import Acquisition

        acqf = Acquisition(  # TODO: For multi-fidelity, might need diff. class.
            surrogate=self,
            botorch_acqf_class=acqf_class,
            bounds=bounds,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            options=acqf_options,
        )
        candidates, acqf_values = acqf.optimize(
            # pyre-ignore[6]: Exp. Tensor, got List[Tuple[float, float]].
            # TODO: Fix typing of `bounds` in `TorchModel`-s.
            bounds=bounds,
            n=1,
            inequality_constraints=_to_inequality_constraints(
                linear_constraints=linear_constraints
            ),
            fixed_features=fixed_features,
        )
        return candidates[0], acqf_values[0]
Exemple #5
0
def _make_choice_param(name: str, representation: TParameterRepresentation,
                       parameter_type: Optional[str]) -> ChoiceParameter:
    values = representation["values"]
    assert isinstance(values, list) and len(values) > 1, (
        f"Cannot parse parameter {name}: for choice parameters, json representation"
        " should include a list of two or more values.")
    return ChoiceParameter(
        name=name,
        parameter_type=_to_parameter_type(values, parameter_type, name,
                                          "values"),
        values=values,
        is_ordered=checked_cast_optional(bool,
                                         representation.get("is_ordered")),
        is_fidelity=checked_cast(bool,
                                 representation.get("is_fidelity", False)),
        is_task=checked_cast(bool, representation.get("is_task", False)),
        target_value=representation.get("target_value",
                                        None),  # pyre-ignore[6]
        dependents=checked_cast_optional(
            dict, representation.get("dependents", None)),
    )
Exemple #6
0
    def __init__(
        self,
        experiment: Experiment,
        search_space: SearchSpace,
        data: Data,
        model: TorchModel,
        transforms: List[Type[Transform]],
        transform_configs: Optional[Dict[str, TConfig]] = None,
        torch_dtype: Optional[torch.dtype] = None,  # noqa T484
        torch_device: Optional[torch.device] = None,
        status_quo_name: Optional[str] = None,
        status_quo_features: Optional[ObservationFeatures] = None,
        optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
        fit_out_of_design: bool = False,
        objective_thresholds: Optional[TRefPoint] = None,
        default_model_gen_options: Optional[TConfig] = None,
    ) -> None:
        self._objective_metric_names = None
        # Optimization_config
        mooc = optimization_config or checked_cast_optional(
            MultiObjectiveOptimizationConfig, experiment.optimization_config)
        # Extract objective_thresholds from optimization_config, or inject it.
        if not mooc:
            raise ValueError(
                ("experiment must have an existing optimization_config "
                 "of type MultiObjectiveOptimizationConfig "
                 "or `optimization_config` must be passed as an argument."))
        if not isinstance(mooc, MultiObjectiveOptimizationConfig):
            raise ValueError(
                "optimization_config must be a MultiObjectiveOptimizationConfig."
            )
        if objective_thresholds:
            mooc = mooc.clone_with_args(
                objective_thresholds=objective_thresholds)

        optimization_config = mooc

        super().__init__(
            experiment=experiment,
            search_space=search_space,
            data=data,
            model=model,
            transforms=transforms,
            transform_configs=transform_configs,
            torch_dtype=torch_dtype,
            torch_device=torch_device,
            status_quo_name=status_quo_name,
            status_quo_features=status_quo_features,
            optimization_config=optimization_config,
            fit_out_of_design=fit_out_of_design,
            default_model_gen_options=default_model_gen_options,
        )
Exemple #7
0
def _get_multiobjective_optimization_config(
    modelbridge: modelbridge_module.array.ArrayModelBridge,
    optimization_config: Optional[OptimizationConfig] = None,
    objective_thresholds: Optional[TRefPoint] = None,
) -> MultiObjectiveOptimizationConfig:
    # Optimization_config
    mooc = optimization_config or checked_cast_optional(
        MultiObjectiveOptimizationConfig, modelbridge._optimization_config)
    if not mooc:
        raise ValueError(
            ("Experiment must have an existing optimization_config "
             "of type `MultiObjectiveOptimizationConfig` "
             "or `optimization_config` must be passed as an argument."))
    if not isinstance(mooc, MultiObjectiveOptimizationConfig):
        raise ValueError(
            "optimization_config must be a MultiObjectiveOptimizationConfig.")
    if objective_thresholds:
        mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)

    return mooc
Exemple #8
0
    def _pareto_frontier(
        self,
        objective_thresholds: Optional[TRefPoint] = None,
        observation_features: Optional[List[ObservationFeatures]] = None,
        observation_data: Optional[List[ObservationData]] = None,
        optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
    ) -> List[ObservationData]:
        # TODO(jej): This method should be refactored to move tensor
        # conversions into a separate utility, and eventually should be
        # moved into base.py.
        # The reason this method is currently implemented in array.py is to
        # allow the broadest possible set of models to call frontier and
        # hypervolume evaluation functions given the current API.
        X = (self.transform_observation_features(observation_features)
             if observation_features else None)
        X = self._array_to_tensor(X) if X is not None else None
        Y, Yvar = (None, None)
        if observation_data:
            Y, Yvar = self.transform_observation_data(observation_data)
        if Y is not None and Yvar is not None:
            Y, Yvar = (self._array_to_tensor(Y), self._array_to_tensor(Yvar))

        # Optimization_config
        mooc = optimization_config or checked_cast_optional(
            MultiObjectiveOptimizationConfig, self._optimization_config)
        if not mooc:
            raise ValueError(
                ("experiment must have an existing optimization_config "
                 "of type MultiObjectiveOptimizationConfig "
                 "or `optimization_config` must be passed as an argument."))
        if not isinstance(mooc, MultiObjectiveOptimizationConfig):
            mooc = not_none(
                MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
        if objective_thresholds:
            mooc = mooc.clone_with_args(
                objective_thresholds=objective_thresholds)

        optimization_config = mooc

        # Transform OptimizationConfig.
        optimization_config = self.transform_optimization_config(
            optimization_config=optimization_config,
            fixed_features=ObservationFeatures(parameters={}),
        )
        # Extract weights, constraints, and objective_thresholds
        objective_weights = extract_objective_weights(
            objective=optimization_config.objective, outcomes=self.outcomes)
        outcome_constraints = extract_outcome_constraints(
            outcome_constraints=optimization_config.outcome_constraints,
            outcomes=self.outcomes,
        )
        objective_thresholds_arr = extract_objective_thresholds(
            objective_thresholds=optimization_config.objective_thresholds,
            outcomes=self.outcomes,
        )
        # Transform to tensors.
        obj_w, oc_c, _, _ = validate_and_apply_final_transform(
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=None,
            pending_observations=None,
            final_transform=self._array_to_tensor,
        )
        obj_t = self._array_to_tensor(objective_thresholds_arr)
        frontier_evaluator = self._get_frontier_evaluator()
        # pyre-ignore[28]: Unexpected keyword `model` to anonymous call
        f, cov = frontier_evaluator(
            model=self.model,
            X=X,
            Y=Y,
            Yvar=Yvar,
            objective_thresholds=obj_t,
            objective_weights=obj_w,
            outcome_constraints=oc_c,
        )
        f, cov = f.detach().cpu().clone().numpy(), cov.detach().cpu().clone(
        ).numpy()
        frontier_observation_data = array_to_observation_data(
            f=f, cov=cov, outcomes=not_none(self.outcomes))
        # Untransform observations
        for t in reversed(self.transforms.values()):  # noqa T484
            frontier_observation_data = t.untransform_observation_data(
                frontier_observation_data, [])
        return frontier_observation_data
Exemple #9
0
 def test_checked_cast_optional(self):
     self.assertEqual(checked_cast_optional(float, None), None)
     with self.assertRaises(ValueError):
         checked_cast_optional(float, 2)
Exemple #10
0
def get_pareto_frontier_and_transformed_configs(
    modelbridge: modelbridge_module.array.ArrayModelBridge,
    observation_features: List[ObservationFeatures],
    observation_data: Optional[List[ObservationData]] = None,
    objective_thresholds: Optional[TRefPoint] = None,
    optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
    arm_names: Optional[List[Optional[str]]] = None,
    use_model_predictions: bool = True,
) -> Tuple[List[Observation], Tensor, Tensor, Optional[Tensor]]:
    """Helper that applies transforms and calls frontier_evaluator.

    Returns transformed configs in addition to the Pareto observations.

    Args:
        modelbridge: Modelbridge used to predict metrics outcomes.
        observation_features: observation features to predict, if provided and
            use_model_predictions is True.
        observation_data: data for computing the Pareto front, unless features
            are provided and model_predictions is True.
        objective_thresholds: metric values bounding the region of interest in
            the objective outcome space.
        optimization_config: Optimization config.
        arm_names: Arm names for each observation.
        use_model_predictions: If True, will use model predictions at
            observation_features to compute Pareto front, if provided. If False,
            will use observation_data directly to compute Pareto front, regardless
            of whether observation_features are provided.

    Returns:
        frontier_observations: Observations of points on the pareto frontier.
        f: n x m tensor representation of the Pareto frontier values where n is the
        length of frontier_observations and m is the number of metrics.
        obj_w: m tensor of objective weights.
        obj_t: m tensor of objective thresholds corresponding to Y, or None if no
        objective thresholds used.
    """

    array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
    X = (modelbridge.transform_observation_features(observation_features)
         if use_model_predictions else None)
    X = array_to_tensor(X) if X is not None else None
    Y, Yvar = (None, None)
    if observation_data is not None:
        Y, Yvar = modelbridge.transform_observation_data(observation_data)
        Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))
    if arm_names is None:
        arm_names = [None] * len(observation_features)

    # Optimization_config
    mooc = optimization_config or checked_cast_optional(
        MultiObjectiveOptimizationConfig, modelbridge._optimization_config)
    if not mooc:
        raise ValueError(
            ("Experiment must have an existing optimization_config "
             "of type `MultiObjectiveOptimizationConfig` "
             "or `optimization_config` must be passed as an argument."))
    if not isinstance(mooc, MultiObjectiveOptimizationConfig):
        mooc = not_none(MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
    if objective_thresholds:
        mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)

    optimization_config = mooc

    # Transform OptimizationConfig.
    optimization_config = modelbridge.transform_optimization_config(
        optimization_config=optimization_config,
        fixed_features=ObservationFeatures(parameters={}),
    )
    # Extract weights, constraints, and objective_thresholds
    objective_weights = extract_objective_weights(
        objective=optimization_config.objective, outcomes=modelbridge.outcomes)
    outcome_constraints = extract_outcome_constraints(
        outcome_constraints=optimization_config.outcome_constraints,
        outcomes=modelbridge.outcomes,
    )
    obj_t = extract_objective_thresholds(
        objective_thresholds=optimization_config.objective_thresholds,
        objective=optimization_config.objective,
        outcomes=modelbridge.outcomes,
    )
    obj_t = array_to_tensor(obj_t)
    # Transform to tensors.
    obj_w, oc_c, _, _, _ = validate_and_apply_final_transform(
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
        linear_constraints=None,
        pending_observations=None,
        final_transform=array_to_tensor,
    )
    frontier_evaluator = get_default_frontier_evaluator()
    # pyre-ignore[28]: Unexpected keyword `modelbridge` to anonymous call
    f, cov, indx = frontier_evaluator(
        model=modelbridge.model,
        X=X,
        Y=Y,
        Yvar=Yvar,
        objective_thresholds=obj_t,
        objective_weights=obj_w,
        outcome_constraints=oc_c,
    )
    f, cov = f.detach().cpu().clone(), cov.detach().cpu().clone()
    indx = indx.tolist()
    frontier_observation_data = array_to_observation_data(
        f=f.numpy(), cov=cov.numpy(), outcomes=not_none(modelbridge.outcomes))
    # Untransform observations
    for t in reversed(modelbridge.transforms.values()):  # noqa T484
        frontier_observation_data = t.untransform_observation_data(
            frontier_observation_data, [])
    # Construct observations
    frontier_observations = []
    for i, obsd in enumerate(frontier_observation_data):
        frontier_observations.append(
            Observation(
                features=observation_features[indx[i]],
                data=obsd,
                arm_name=arm_names[indx[i]],
            ))
    return frontier_observations, f, obj_w, obj_t
def pareto_frontier(
    modelbridge: modelbridge_module.array.ArrayModelBridge,
    objective_thresholds: Optional[TRefPoint] = None,
    observation_features: Optional[List[ObservationFeatures]] = None,
    observation_data: Optional[List[ObservationData]] = None,
    optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
) -> List[ObservationData]:
    """Helper that applies transforms and calls frontier_evaluator."""
    array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
    X = (modelbridge.transform_observation_features(observation_features)
         if observation_features else None)
    X = array_to_tensor(X) if X is not None else None
    Y, Yvar = (None, None)
    if observation_data:
        Y, Yvar = modelbridge.transform_observation_data(observation_data)
    if Y is not None and Yvar is not None:
        Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))

    # Optimization_config
    mooc = optimization_config or checked_cast_optional(
        MultiObjectiveOptimizationConfig, modelbridge._optimization_config)
    if not mooc:
        raise ValueError(
            ("Experiment must have an existing optimization_config "
             "of type `MultiObjectiveOptimizationConfig` "
             "or `optimization_config` must be passed as an argument."))
    if not isinstance(mooc, MultiObjectiveOptimizationConfig):
        mooc = not_none(MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
    if objective_thresholds:
        mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)

    optimization_config = mooc

    # Transform OptimizationConfig.
    optimization_config = modelbridge.transform_optimization_config(
        optimization_config=optimization_config,
        fixed_features=ObservationFeatures(parameters={}),
    )
    # Extract weights, constraints, and objective_thresholds
    objective_weights = extract_objective_weights(
        objective=optimization_config.objective, outcomes=modelbridge.outcomes)
    outcome_constraints = extract_outcome_constraints(
        outcome_constraints=optimization_config.outcome_constraints,
        outcomes=modelbridge.outcomes,
    )
    objective_thresholds_arr = extract_objective_thresholds(
        objective_thresholds=optimization_config.objective_thresholds,
        outcomes=modelbridge.outcomes,
    )
    # Transform to tensors.
    obj_w, oc_c, _, _ = validate_and_apply_final_transform(
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
        linear_constraints=None,
        pending_observations=None,
        final_transform=array_to_tensor,
    )
    obj_t = array_to_tensor(objective_thresholds_arr)
    frontier_evaluator = get_default_frontier_evaluator()
    # pyre-ignore[28]: Unexpected keyword `modelbridge` to anonymous call
    f, cov = frontier_evaluator(
        model=modelbridge.model,
        X=X,
        Y=Y,
        Yvar=Yvar,
        objective_thresholds=obj_t,
        objective_weights=obj_w,
        outcome_constraints=oc_c,
    )
    f, cov = f.detach().cpu().clone().numpy(), cov.detach().cpu().clone(
    ).numpy()
    frontier_observation_data = array_to_observation_data(
        f=f, cov=cov, outcomes=not_none(modelbridge.outcomes))
    # Untransform observations
    for t in reversed(modelbridge.transforms.values()):  # noqa T484
        frontier_observation_data = t.untransform_observation_data(
            frontier_observation_data, [])
    return frontier_observation_data