Пример #1
0
    def testConstraintValidation(self):
        # Cannot build with non-MultiObjective
        with self.assertRaises(ValueError):
            MultiObjectiveOptimizationConfig(objective=self.objective)

        # Two outcome_constraints on the same metric with the same op
        # should raise.
        duplicate_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + 1,
        )
        with self.assertRaises(ValueError):
            MultiObjectiveOptimizationConfig(
                objective=self.multi_objective,
                outcome_constraints=[self.outcome_constraint, duplicate_constraint],
            )

        # Three outcome_constraints on the same metric should raise.
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound,
        )
        with self.assertRaises(ValueError):
            MultiObjectiveOptimizationConfig(
                objective=self.multi_objective,
                outcome_constraints=self.outcome_constraints + [opposing_constraint],
            )

        # Two outcome_constraints on the same metric with different ops and
        # flipped bounds (lower < upper) should raise.
        add_bound = 1 if self.outcome_constraint.op == ComparisonOp.LEQ else -1
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + add_bound,
        )
        with self.assertRaises(ValueError):
            MultiObjectiveOptimizationConfig(
                objective=self.multi_objective,
                outcome_constraints=([self.outcome_constraint, opposing_constraint]),
            )

        # Two outcome_constraints on the same metric with different ops and
        # bounds should not raise.
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + 1,
        )
        config = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective,
            outcome_constraints=([self.outcome_constraint, opposing_constraint]),
        )
        self.assertEqual(
            config.outcome_constraints, [self.outcome_constraint, opposing_constraint]
        )
Пример #2
0
 def testCloneWithArgs(self):
     config1 = MultiObjectiveOptimizationConfig(
         objective=self.multi_objective_just_m2,
         outcome_constraints=[self.m1_constraint],
     )
     config2 = MultiObjectiveOptimizationConfig(
         objective=self.multi_objective_just_m2,
         objective_thresholds=self.objective_thresholds,
         outcome_constraints=[self.m1_constraint],
     )
     self.assertEqual(
         config1.clone_with_args(objective_thresholds=self.objective_thresholds),
         config2,
     )
Пример #3
0
def optimization_config_from_objectives(
    objectives: List[Objective],
    objective_thresholds: List[ObjectiveThreshold],
    outcome_constraints: List[OutcomeConstraint],
) -> OptimizationConfig:
    """Parse objectives and constraints to define optimization config.

    The resulting optimization config will be regular single-objective config
    if `objectives` is a list of one element and a multi-objective config
    otherwise.

    NOTE: If passing in multiple objectives, `objective_thresholds` must be a
    non-empty list definining constraints for each objective.
    """
    if len(objectives) == 1:
        if objective_thresholds:
            raise ValueError(
                "Single-objective optimizations must not specify objective thresholds."
            )
        return OptimizationConfig(
            objective=objectives[0],
            outcome_constraints=outcome_constraints,
        )

    if not objective_thresholds:
        logger.info(
            "Due to non-specification, we will use the heuristic for selecting "
            "objective thresholds.")

    return MultiObjectiveOptimizationConfig(
        objective=MultiObjective(objectives=objectives),
        outcome_constraints=outcome_constraints,
        objective_thresholds=objective_thresholds,
    )
Пример #4
0
    def opt_config_and_tracking_metrics_from_sqa(
        self, metrics_sqa: List[SQAMetric]
    ) -> Tuple[Optional[OptimizationConfig], List[Metric]]:
        """Convert a list of SQLAlchemy Metrics to a a tuple of Ax OptimizationConfig
        and tracking metrics.
        """
        objective = None
        objective_thresholds = []
        outcome_constraints = []
        tracking_metrics = []
        for metric_sqa in metrics_sqa:
            metric = self.metric_from_sqa(metric_sqa=metric_sqa)
            if isinstance(metric, Objective):
                objective = metric
            elif isinstance(metric, ObjectiveThreshold):
                objective_thresholds.append(metric)
            elif isinstance(metric, OutcomeConstraint):
                outcome_constraints.append(metric)
            else:
                tracking_metrics.append(metric)

        if objective is None:
            return None, tracking_metrics

        if objective_thresholds or type(objective) == MultiObjective:
            optimization_config = MultiObjectiveOptimizationConfig(
                objective=objective,
                outcome_constraints=outcome_constraints,
                objective_thresholds=objective_thresholds,
            )
        else:
            optimization_config = OptimizationConfig(
                objective=objective, outcome_constraints=outcome_constraints)
        return (optimization_config, tracking_metrics)
Пример #5
0
 def testTransformOptimizationConfigMOO(self):
     m1 = Metric(name="m1", lower_is_better=False)
     m2 = Metric(name="m2", lower_is_better=True)
     mo = MultiObjective(
         objectives=[
             Objective(metric=m1, minimize=False),
             Objective(metric=m2, minimize=True),
         ],
     )
     objective_thresholds = [
         ObjectiveThreshold(metric=m1, bound=1.234, relative=False),
         ObjectiveThreshold(metric=m2, bound=3.456, relative=False),
     ]
     oc = MultiObjectiveOptimizationConfig(
         objective=mo,
         objective_thresholds=objective_thresholds,
     )
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     oc.objective_thresholds[0].bound = math.log(1.234)
     self.assertEqual(oc_tf, oc)
Пример #6
0
def get_branin_multi_objective_optimization_config(
    has_objective_thresholds: bool = False,
) -> OptimizationConfig:
    objective_thresholds = (
        [
            ObjectiveThreshold(
                metric=get_branin_metric(name="branin_a"),
                bound=10,
                op=ComparisonOp.GEQ,
                relative=False,
            ),
            ObjectiveThreshold(
                metric=get_branin_metric(name="branin_b"),
                bound=20,
                op=ComparisonOp.GEQ,
                relative=False,
            ),
        ]
        if has_objective_thresholds
        else None
    )
    return MultiObjectiveOptimizationConfig(
        objective=get_branin_multi_objective(),
        objective_thresholds=objective_thresholds,
    )
Пример #7
0
def get_branin_multi_objective_optimization_config(
    has_objective_thresholds: bool = False,
    num_objectives: int = 2,
) -> MultiObjectiveOptimizationConfig:
    _validate_num_objectives(num_objectives=num_objectives)
    if has_objective_thresholds:
        objective_thresholds = [
            ObjectiveThreshold(
                metric=get_branin_metric(name="branin_a"),
                bound=10,
                op=ComparisonOp.GEQ,
                relative=False,
            ),
            ObjectiveThreshold(
                metric=get_branin_metric(name="branin_b"),
                bound=20,
                op=ComparisonOp.GEQ,
                relative=False,
            ),
        ]
        if num_objectives == 3:
            objective_thresholds.append(
                ObjectiveThreshold(
                    metric=get_branin_metric(name="branin_c"),
                    bound=5.0,
                    op=ComparisonOp.GEQ,
                    relative=False,
                ))
    else:
        objective_thresholds = None
    return MultiObjectiveOptimizationConfig(
        objective=get_branin_multi_objective(num_objectives=num_objectives),
        objective_thresholds=objective_thresholds,
    )
Пример #8
0
    def testEq(self):
        config1 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective, outcome_constraints=self.outcome_constraints
        )
        config2 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective, outcome_constraints=self.outcome_constraints
        )
        self.assertEqual(config1, config2)

        new_outcome_constraint = OutcomeConstraint(
            metric=self.metrics["m3"], op=ComparisonOp.LEQ, bound=0.5
        )
        config3 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective,
            outcome_constraints=[self.outcome_constraint, new_outcome_constraint],
        )
        self.assertNotEqual(config1, config3)
Пример #9
0
    def __init__(
        self,
        experiment: Experiment,
        search_space: SearchSpace,
        data: Data,
        model: TorchModel,
        transforms: List[Type[Transform]],
        transform_configs: Optional[Dict[str, TConfig]] = None,
        torch_dtype: Optional[torch.dtype] = None,  # noqa T484
        torch_device: Optional[torch.device] = None,
        status_quo_name: Optional[str] = None,
        status_quo_features: Optional[ObservationFeatures] = None,
        optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
        fit_out_of_design: bool = False,
        objective_thresholds: Optional[TRefPoint] = None,
        default_model_gen_options: Optional[TConfig] = None,
    ) -> None:
        if (isinstance(experiment, MultiTypeExperiment)
                and objective_thresholds is not None):
            raise NotImplementedError(
                "Objective threshold dependent multi-objective optimization algos "
                "like EHVI are not yet supported for MultiTypeExperiments. "
                "Remove the objective threshold arg and use a compatible algorithm "
                "like ParEGO.")
        self._objective_metric_names = None
        # Optimization_config
        mooc = optimization_config or checked_cast_optional(
            MultiObjectiveOptimizationConfig, experiment.optimization_config)
        # Extract objective_thresholds from optimization_config, or inject it.
        if not mooc:
            raise ValueError(
                ("experiment must have an existing optimization_config "
                 "of type MultiObjectiveOptimizationConfig "
                 "or `optimization_config` must be passed as an argument."))
        if not isinstance(mooc, MultiObjectiveOptimizationConfig):
            mooc = not_none(
                MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
        if objective_thresholds:
            mooc = mooc.clone_with_args(
                objective_thresholds=objective_thresholds)

        optimization_config = mooc

        super().__init__(
            experiment=experiment,
            search_space=search_space,
            data=data,
            model=model,
            transforms=transforms,
            transform_configs=transform_configs,
            torch_dtype=torch_dtype,
            torch_device=torch_device,
            status_quo_name=status_quo_name,
            status_quo_features=status_quo_features,
            optimization_config=optimization_config,
            fit_out_of_design=fit_out_of_design,
            default_model_gen_options=default_model_gen_options,
        )
Пример #10
0
def get_multi_objective_optimization_config() -> OptimizationConfig:
    objective = get_multi_objective()
    outcome_constraints = [get_outcome_constraint()]
    objective_thresholds = [get_objective_threshold()]
    return MultiObjectiveOptimizationConfig(
        objective=objective,
        outcome_constraints=outcome_constraints,
        objective_thresholds=objective_thresholds,
    )
Пример #11
0
    def transform_optimization_config(
        self,
        optimization_config: OptimizationConfig,
        modelbridge: Optional[modelbridge_module.base.ModelBridge],
        fixed_features: ObservationFeatures,
    ) -> OptimizationConfig:
        r"""
        Change the relative flag of the given relative optimization configuration
        to False. This is needed in order for the new opt config to pass ModelBridge
        that requires non-relativized opt config.

        Args:
            opt_config: Optimization configuaration relative to status quo.

        Returns:
            Optimization configuration relative to status quo with relative flag
            equal to false.

        """
        # Getting constraints
        constraints = [
            constraint.clone()
            for constraint in optimization_config.outcome_constraints
        ]
        if not all(constraint.relative
                   for constraint in optimization_config.outcome_constraints):
            raise ValueError(
                "All constraints must be relative to use the Relativize transform."
            )
        for constraint in constraints:
            constraint.relative = False

        if isinstance(optimization_config, MultiObjectiveOptimizationConfig):
            # Getting objective thresholds
            obj_thresholds = [
                obj_threshold.clone()
                for obj_threshold in optimization_config.objective_thresholds
            ]
            for obj_threshold in obj_thresholds:
                if not obj_threshold.relative:
                    raise ValueError(
                        "All objective thresholds must be relative to use "
                        "the Relativize transform.")
                obj_threshold.relative = False

            new_optimization_config = MultiObjectiveOptimizationConfig(
                objective=optimization_config.objective,
                outcome_constraints=constraints,
                objective_thresholds=obj_thresholds,
            )
        else:
            new_optimization_config = OptimizationConfig(
                objective=optimization_config.objective,
                outcome_constraints=constraints,
            )

        return new_optimization_config
Пример #12
0
 def test_feasible_hypervolume(self):
     ma = Metric(name="a", lower_is_better=False)
     mb = Metric(name="b", lower_is_better=True)
     mc = Metric(name="c", lower_is_better=False)
     optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(metrics=[ma, mb]),
         outcome_constraints=[
             OutcomeConstraint(
                 mc,
                 op=ComparisonOp.GEQ,
                 bound=0,
                 relative=False,
             )
         ],
         objective_thresholds=[
             ObjectiveThreshold(
                 ma,
                 bound=1.0,
             ),
             ObjectiveThreshold(
                 mb,
                 bound=1.0,
             ),
         ],
     )
     feas_hv = feasible_hypervolume(
         optimization_config,
         values={
             "a": np.array(
                 [
                     1.0,
                     3.0,
                     2.0,
                     2.0,
                 ]
             ),
             "b": np.array(
                 [
                     0.0,
                     1.0,
                     0.0,
                     0.0,
                 ]
             ),
             "c": np.array(
                 [
                     0.0,
                     -0.0,
                     1.0,
                     -2.0,
                 ]
             ),
         },
     )
     self.assertEqual(list(feas_hv), [0.0, 0.0, 1.0, 1.0])
Пример #13
0
 def test_MOO_with_more_outcomes_than_thresholds(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_optimization_config=False)
     metric_c = Metric(name="c", lower_is_better=False)
     metric_a = Metric(name="a", lower_is_better=False)
     objective_thresholds = [
         ObjectiveThreshold(
             metric=metric_c,
             bound=2.0,
             relative=False,
         ),
         ObjectiveThreshold(
             metric=metric_a,
             bound=1.0,
             relative=False,
         ),
     ]
     experiment.optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(objectives=[
             Objective(metric=metric_a),
             Objective(metric=metric_c),
         ]),
         objective_thresholds=objective_thresholds,
     )
     experiment.add_tracking_metric(Metric(name="b", lower_is_better=False))
     sobol = get_sobol(search_space=experiment.search_space, )
     sobol_run = sobol.gen(1)
     experiment.new_batch_trial().add_generator_run(
         sobol_run).run().mark_completed()
     data = Data(
         pd.DataFrame(
             data={
                 "arm_name": ["0_0", "0_0", "0_0"],
                 "metric_name": ["a", "b", "c"],
                 "mean": [1.0, 2.0, 3.0],
                 "trial_index": [0, 0, 0],
                 "sem": [0, 0, 0],
             }))
     test_names_to_fns = {
         "MOO_NEHVI": get_MOO_NEHVI,
         "MOO_EHVI": get_MOO_NEHVI,
         "MOO_PAREGO": get_MOO_PAREGO,
         "MOO_RS": get_MOO_RS,
     }
     for test_name, factory_fn in test_names_to_fns.items():
         with self.subTest(test_name):
             moo_model = factory_fn(
                 experiment=experiment,
                 data=data,
             )
             moo_gr = moo_model.gen(n=1)
             obj_t = moo_gr.gen_metadata["objective_thresholds"]
             self.assertEqual(obj_t[0], objective_thresholds[1])
             self.assertEqual(obj_t[1], objective_thresholds[0])
             self.assertEqual(len(obj_t), 2)
Пример #14
0
def get_multi_objective_optimization_config() -> OptimizationConfig:
    objective = get_multi_objective()
    outcome_constraints = [get_outcome_constraint()]
    objective_thresholds = [
        get_objective_threshold(metric_name="m1"),
        get_objective_threshold(metric_name="m3", comparison_op=ComparisonOp.LEQ),
    ]
    return MultiObjectiveOptimizationConfig(
        objective=objective,
        outcome_constraints=outcome_constraints,
        objective_thresholds=objective_thresholds,
    )
Пример #15
0
def _build_new_optimization_config(weights,
                                   primary_objective,
                                   secondary_objective,
                                   outcome_constraints=None):
    obj = ScalarizedObjective(
        metrics=[primary_objective, secondary_objective],
        weights=weights,
        minimize=False,
    )
    optimization_config = MultiObjectiveOptimizationConfig(
        objective=obj, outcome_constraints=outcome_constraints)
    return optimization_config
Пример #16
0
 def test_choose_generation_strategy(self):
     with self.subTest("GPEI"):
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space()
         )
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI")
     with self.subTest("MOO"):
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space(),
             optimization_config=MultiObjectiveOptimizationConfig(
                 objective=MultiObjective(objectives=[])
             ),
         )
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "MOO")
     with self.subTest("Sobol (we can try every option)"):
         sobol = choose_generation_strategy(
             search_space=get_factorial_search_space(), num_trials=1000
         )
         self.assertEqual(sobol._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol._steps), 1)
     with self.subTest("Sobol (because of too many categories)"):
         sobol_large = choose_generation_strategy(
             search_space=get_large_factorial_search_space()
         )
         self.assertEqual(sobol_large._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol_large._steps), 1)
     with self.subTest("GPEI-Batched"):
         sobol_gpei_batched = choose_generation_strategy(
             search_space=get_branin_search_space(), use_batch_trials=3
         )
         self.assertEqual(sobol_gpei_batched._steps[0].num_trials, 1)
     with self.subTest("BO_MIXED (purely categorical)"):
         bo_mixed = choose_generation_strategy(
             search_space=get_factorial_search_space()
         )
         self.assertEqual(bo_mixed._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed._steps[0].num_trials, 5)
         self.assertEqual(bo_mixed._steps[1].model.value, "BO_MIXED")
     with self.subTest("BO_MIXED (mixed search space)"):
         bo_mixed_2 = choose_generation_strategy(
             search_space=get_branin_search_space(with_choice_parameter=True)
         )
         self.assertEqual(bo_mixed_2._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed_2._steps[0].num_trials, 5)
         self.assertEqual(bo_mixed_2._steps[1].model.value, "BO_MIXED")
Пример #17
0
    def testClone(self):
        config1 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective, outcome_constraints=self.outcome_constraints
        )
        self.assertEqual(config1, config1.clone())

        config2 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective,
            objective_thresholds=self.objective_thresholds,
        )
        self.assertEqual(config2, config2.clone())
Пример #18
0
    def testHasGoodOptConfigModelFit(self):
        # Construct diagnostics
        result = []
        for i, obs in enumerate(self.training_data):
            result.append(
                CVResult(observed=obs, predicted=self.observation_data[i]))
        diag = compute_diagnostics(result=result)
        assess_model_fit_result = assess_model_fit(
            diagnostics=diag,
            significance_level=0.05,
        )

        # Test single objective
        optimization_config = OptimizationConfig(objective=Objective(
            metric=Metric("a")))
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)

        # Test multi objective
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=MultiObjective(
                metrics=[Metric("a"), Metric("b")]))
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)

        # Test constraints
        optimization_config = OptimizationConfig(
            objective=Objective(metric=Metric("a")),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric("b"),
                                  op=ComparisonOp.GEQ,
                                  bound=0.1)
            ],
        )
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)
Пример #19
0
def optimization_config_from_objectives(
    objectives: List[Metric],
    objective_thresholds: List[ObjectiveThreshold],
    outcome_constraints: List[OutcomeConstraint],
) -> OptimizationConfig:
    """Parse objectives and constraints to define optimization config.

    The resulting optimization config will be regular single-objective config
    if `objectives` is a list of one element and a multi-objective config
    otherwise.

    NOTE: If passing in multiple objectives, `objective_thresholds` must be a
    non-empty list definining constraints for each objective.
    """
    if len(objectives) == 1:
        if objective_thresholds:
            raise ValueError(
                "Single-objective optimizations must not specify objective thresholds."
            )
        return OptimizationConfig(
            objective=Objective(
                metric=objectives[0],
            ),
            outcome_constraints=outcome_constraints,
        )
    else:
        objective_names = {m.name for m in objectives}
        threshold_names = {oc.metric.name for oc in objective_thresholds}
        if objective_names != threshold_names:
            diff = objective_names.symmetric_difference(threshold_names)
            raise ValueError(
                "Multi-objective optimization requires one objective threshold "
                f"per objective metric; unmatched names are {diff}"
            )

        return MultiObjectiveOptimizationConfig(
            objective=MultiObjective(metrics=objectives),
            outcome_constraints=outcome_constraints,
            objective_thresholds=objective_thresholds,
        )
Пример #20
0
Файл: array.py Проект: dme65/Ax
    def _pareto_frontier(
        self,
        objective_thresholds: Optional[TRefPoint] = None,
        observation_features: Optional[List[ObservationFeatures]] = None,
        observation_data: Optional[List[ObservationData]] = None,
        optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
    ) -> List[ObservationData]:
        # TODO(jej): This method should be refactored to move tensor
        # conversions into a separate utility, and eventually should be
        # moved into base.py.
        # The reason this method is currently implemented in array.py is to
        # allow the broadest possible set of models to call frontier and
        # hypervolume evaluation functions given the current API.
        X = (self.transform_observation_features(observation_features)
             if observation_features else None)
        X = self._array_to_tensor(X) if X is not None else None
        Y, Yvar = (None, None)
        if observation_data:
            Y, Yvar = self.transform_observation_data(observation_data)
        if Y is not None and Yvar is not None:
            Y, Yvar = (self._array_to_tensor(Y), self._array_to_tensor(Yvar))

        # Optimization_config
        mooc = optimization_config or checked_cast_optional(
            MultiObjectiveOptimizationConfig, self._optimization_config)
        if not mooc:
            raise ValueError(
                ("experiment must have an existing optimization_config "
                 "of type MultiObjectiveOptimizationConfig "
                 "or `optimization_config` must be passed as an argument."))
        if not isinstance(mooc, MultiObjectiveOptimizationConfig):
            mooc = not_none(
                MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
        if objective_thresholds:
            mooc = mooc.clone_with_args(
                objective_thresholds=objective_thresholds)

        optimization_config = mooc

        # Transform OptimizationConfig.
        optimization_config = self.transform_optimization_config(
            optimization_config=optimization_config,
            fixed_features=ObservationFeatures(parameters={}),
        )
        # Extract weights, constraints, and objective_thresholds
        objective_weights = extract_objective_weights(
            objective=optimization_config.objective, outcomes=self.outcomes)
        outcome_constraints = extract_outcome_constraints(
            outcome_constraints=optimization_config.outcome_constraints,
            outcomes=self.outcomes,
        )
        objective_thresholds_arr = extract_objective_thresholds(
            objective_thresholds=optimization_config.objective_thresholds,
            outcomes=self.outcomes,
        )
        # Transform to tensors.
        obj_w, oc_c, _, _ = validate_and_apply_final_transform(
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=None,
            pending_observations=None,
            final_transform=self._array_to_tensor,
        )
        obj_t = self._array_to_tensor(objective_thresholds_arr)
        frontier_evaluator = self._get_frontier_evaluator()
        # pyre-ignore[28]: Unexpected keyword `model` to anonymous call
        f, cov = frontier_evaluator(
            model=self.model,
            X=X,
            Y=Y,
            Yvar=Yvar,
            objective_thresholds=obj_t,
            objective_weights=obj_w,
            outcome_constraints=oc_c,
        )
        f, cov = f.detach().cpu().clone().numpy(), cov.detach().cpu().clone(
        ).numpy()
        frontier_observation_data = array_to_observation_data(
            f=f, cov=cov, outcomes=not_none(self.outcomes))
        # Untransform observations
        for t in reversed(self.transforms.values()):  # noqa T484
            frontier_observation_data = t.untransform_observation_data(
                frontier_observation_data, [])
        return frontier_observation_data
Пример #21
0
 def test_choose_generation_strategy(self):
     with self.subTest("GPEI"):
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space())
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI")
         self.assertIsNone(sobol_gpei._steps[1].model_kwargs)
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space(), verbose=True)
         self.assertIsNone(sobol_gpei._steps[1].model_kwargs)
     with self.subTest("MOO"):
         optimization_config = MultiObjectiveOptimizationConfig(
             objective=MultiObjective(objectives=[]))
         sobol_gpei = choose_generation_strategy(
             search_space=get_branin_search_space(),
             optimization_config=optimization_config,
         )
         self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol")
         self.assertEqual(sobol_gpei._steps[0].num_trials, 5)
         self.assertEqual(sobol_gpei._steps[1].model.value, "MOO")
         model_kwargs = sobol_gpei._steps[1].model_kwargs
         self.assertEqual(list(model_kwargs.keys()),
                          ["transforms", "transform_configs"])
         self.assertGreater(len(model_kwargs["transforms"]), 0)
         transform_config_dict = {
             "Winsorize": {
                 "optimization_config": optimization_config
             }
         }
         self.assertEqual(model_kwargs["transform_configs"],
                          transform_config_dict)
     with self.subTest("Sobol (we can try every option)"):
         sobol = choose_generation_strategy(
             search_space=get_factorial_search_space(), num_trials=1000)
         self.assertEqual(sobol._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol._steps), 1)
     with self.subTest("Sobol (because of too many categories)"):
         ss = get_large_factorial_search_space()
         sobol_large = choose_generation_strategy(
             search_space=get_large_factorial_search_space(), verbose=True)
         self.assertEqual(sobol_large._steps[0].model.value, "Sobol")
         self.assertEqual(len(sobol_large._steps), 1)
     with self.subTest("GPEI-Batched"):
         sobol_gpei_batched = choose_generation_strategy(
             search_space=get_branin_search_space(), use_batch_trials=3)
         self.assertEqual(sobol_gpei_batched._steps[0].num_trials, 1)
     with self.subTest("BO_MIXED (purely categorical)"):
         bo_mixed = choose_generation_strategy(
             search_space=get_factorial_search_space())
         self.assertEqual(bo_mixed._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed._steps[0].num_trials, 6)
         self.assertEqual(bo_mixed._steps[1].model.value, "BO_MIXED")
         self.assertIsNone(bo_mixed._steps[1].model_kwargs)
     with self.subTest("BO_MIXED (mixed search space)"):
         ss = get_branin_search_space(with_choice_parameter=True)
         ss.parameters["x2"]._is_ordered = False
         bo_mixed_2 = choose_generation_strategy(search_space=ss)
         self.assertEqual(bo_mixed_2._steps[0].model.value, "Sobol")
         self.assertEqual(bo_mixed_2._steps[0].num_trials, 5)
         self.assertEqual(bo_mixed_2._steps[1].model.value, "BO_MIXED")
         self.assertIsNone(bo_mixed_2._steps[1].model_kwargs)
     with self.subTest("BO_MIXED (mixed multi-objective optimization)"):
         search_space = get_branin_search_space(with_choice_parameter=True)
         search_space.parameters["x2"]._is_ordered = False
         optimization_config = MultiObjectiveOptimizationConfig(
             objective=MultiObjective(objectives=[]))
         moo_mixed = choose_generation_strategy(
             search_space=search_space,
             optimization_config=optimization_config)
         self.assertEqual(moo_mixed._steps[0].model.value, "Sobol")
         self.assertEqual(moo_mixed._steps[0].num_trials, 5)
         self.assertEqual(moo_mixed._steps[1].model.value, "BO_MIXED")
         model_kwargs = moo_mixed._steps[1].model_kwargs
         self.assertEqual(list(model_kwargs.keys()),
                          ["transforms", "transform_configs"])
         self.assertGreater(len(model_kwargs["transforms"]), 0)
         transform_config_dict = {
             "Winsorize": {
                 "optimization_config": optimization_config
             }
         }
         self.assertEqual(model_kwargs["transform_configs"],
                          transform_config_dict)
     with self.subTest("SAASBO"):
         sobol_fullybayesian = choose_generation_strategy(
             search_space=get_branin_search_space(),
             use_batch_trials=True,
             num_initialization_trials=3,
             use_saasbo=True,
         )
         self.assertEqual(sobol_fullybayesian._steps[0].model.value,
                          "Sobol")
         self.assertEqual(sobol_fullybayesian._steps[0].num_trials, 3)
         self.assertEqual(sobol_fullybayesian._steps[1].model.value,
                          "FullyBayesian")
         self.assertTrue(
             sobol_fullybayesian._steps[1].model_kwargs["verbose"])
     with self.subTest("SAASBO MOO"):
         sobol_fullybayesianmoo = choose_generation_strategy(
             search_space=get_branin_search_space(),
             use_batch_trials=True,
             num_initialization_trials=3,
             use_saasbo=True,
             optimization_config=MultiObjectiveOptimizationConfig(
                 objective=MultiObjective(objectives=[])),
         )
         self.assertEqual(sobol_fullybayesianmoo._steps[0].model.value,
                          "Sobol")
         self.assertEqual(sobol_fullybayesianmoo._steps[0].num_trials, 3)
         self.assertEqual(sobol_fullybayesianmoo._steps[1].model.value,
                          "FullyBayesianMOO")
         self.assertTrue(
             sobol_fullybayesianmoo._steps[1].model_kwargs["verbose"])
     with self.subTest("SAASBO"):
         sobol_fullybayesian_large = choose_generation_strategy(
             search_space=get_large_ordinal_search_space(
                 n_ordinal_choice_parameters=5,
                 n_continuous_range_parameters=10),
             use_saasbo=True,
         )
         self.assertEqual(sobol_fullybayesian_large._steps[0].model.value,
                          "Sobol")
         self.assertEqual(sobol_fullybayesian_large._steps[0].num_trials,
                          30)
         self.assertEqual(sobol_fullybayesian_large._steps[1].model.value,
                          "FullyBayesian")
         self.assertTrue(
             sobol_fullybayesian_large._steps[1].model_kwargs["verbose"])
     with self.subTest("num_initialization_trials"):
         ss = get_large_factorial_search_space()
         for _, param in ss.parameters.items():
             param._is_ordered = True
         # 2 * len(ss.parameters) init trials are performed if num_trials is large
         gs_12_init_trials = choose_generation_strategy(search_space=ss,
                                                        num_trials=100)
         self.assertEqual(gs_12_init_trials._steps[0].model.value, "Sobol")
         self.assertEqual(gs_12_init_trials._steps[0].num_trials, 12)
         self.assertEqual(gs_12_init_trials._steps[1].model.value, "GPEI")
         # at least 5 initialization trials are performed
         gs_5_init_trials = choose_generation_strategy(search_space=ss,
                                                       num_trials=0)
         self.assertEqual(gs_5_init_trials._steps[0].model.value, "Sobol")
         self.assertEqual(gs_5_init_trials._steps[0].num_trials, 5)
         self.assertEqual(gs_5_init_trials._steps[1].model.value, "GPEI")
         # avoid spending >20% of budget on initialization trials if there are
         # more than 5 initialization trials
         gs_6_init_trials = choose_generation_strategy(search_space=ss,
                                                       num_trials=30)
         self.assertEqual(gs_6_init_trials._steps[0].model.value, "Sobol")
         self.assertEqual(gs_6_init_trials._steps[0].num_trials, 6)
         self.assertEqual(gs_6_init_trials._steps[1].model.value, "GPEI")
Пример #22
0
    def testGen(self, mock_init, mock_best_point, mock_gen):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = NumpyModelBridge()
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        ma.transforms = OrderedDict()
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertTrue(
            np.array_equal(
                gen_args["linear_constraints"][0],
                np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
            ))
        self.assertTrue(
            np.array_equal(gen_args["linear_constraints"][1],
                           np.array([[0.0], [-3.5]])))
        self.assertEqual(gen_args["fixed_features"], {2: 3.0})
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][0], np.array([])))
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][1],
                           np.array([[0.6, 1.6, 3.0]])))
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 1.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 3.0,
            "y": 4.0,
            "z": 3.0
        })
        self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))

        # Test with multiple objectives.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="a"), Metric(name="b")], minimize=True))
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=oc2,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[1][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"],
                           np.array([-1.0, -1.0])))

        # Test with MultiObjective (unweighted multiple objectives)
        oc3 = MultiObjectiveOptimizationConfig(objective=MultiObjective(
            metrics=[Metric(name="a"),
                     Metric(name="b", lower_is_better=True)],
            minimize=True,
        ))
        search_space = SearchSpace(self.parameters)  # Unconstrained
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=oc3,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[2][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([1.0,
                                                                    -1.0])))

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(3, search_space, {}, ObservationFeatures({}), None,
                optimization_config)
        gen_args = mock_gen.mock_calls[3][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["linear_constraints"])
        self.assertIsNone(gen_args["fixed_features"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
        optimization_config.objective.minimize = True
        optimization_config.outcome_constraints[0].relative = True
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
Пример #23
0
def get_pareto_frontier_and_transformed_configs(
    modelbridge: modelbridge_module.array.ArrayModelBridge,
    observation_features: List[ObservationFeatures],
    observation_data: Optional[List[ObservationData]] = None,
    objective_thresholds: Optional[TRefPoint] = None,
    optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
    arm_names: Optional[List[Optional[str]]] = None,
    use_model_predictions: bool = True,
) -> Tuple[List[Observation], Tensor, Tensor, Optional[Tensor]]:
    """Helper that applies transforms and calls frontier_evaluator.

    Returns transformed configs in addition to the Pareto observations.

    Args:
        modelbridge: Modelbridge used to predict metrics outcomes.
        observation_features: observation features to predict, if provided and
            use_model_predictions is True.
        observation_data: data for computing the Pareto front, unless features
            are provided and model_predictions is True.
        objective_thresholds: metric values bounding the region of interest in
            the objective outcome space.
        optimization_config: Optimization config.
        arm_names: Arm names for each observation.
        use_model_predictions: If True, will use model predictions at
            observation_features to compute Pareto front, if provided. If False,
            will use observation_data directly to compute Pareto front, regardless
            of whether observation_features are provided.

    Returns:
        frontier_observations: Observations of points on the pareto frontier.
        f: n x m tensor representation of the Pareto frontier values where n is the
        length of frontier_observations and m is the number of metrics.
        obj_w: m tensor of objective weights.
        obj_t: m tensor of objective thresholds corresponding to Y, or None if no
        objective thresholds used.
    """

    array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
    X = (modelbridge.transform_observation_features(observation_features)
         if use_model_predictions else None)
    X = array_to_tensor(X) if X is not None else None
    Y, Yvar = (None, None)
    if observation_data is not None:
        Y, Yvar = modelbridge.transform_observation_data(observation_data)
        Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))
    if arm_names is None:
        arm_names = [None] * len(observation_features)

    # Optimization_config
    mooc = optimization_config or checked_cast_optional(
        MultiObjectiveOptimizationConfig, modelbridge._optimization_config)
    if not mooc:
        raise ValueError(
            ("Experiment must have an existing optimization_config "
             "of type `MultiObjectiveOptimizationConfig` "
             "or `optimization_config` must be passed as an argument."))
    if not isinstance(mooc, MultiObjectiveOptimizationConfig):
        mooc = not_none(MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
    if objective_thresholds:
        mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)

    optimization_config = mooc

    # Transform OptimizationConfig.
    optimization_config = modelbridge.transform_optimization_config(
        optimization_config=optimization_config,
        fixed_features=ObservationFeatures(parameters={}),
    )
    # Extract weights, constraints, and objective_thresholds
    objective_weights = extract_objective_weights(
        objective=optimization_config.objective, outcomes=modelbridge.outcomes)
    outcome_constraints = extract_outcome_constraints(
        outcome_constraints=optimization_config.outcome_constraints,
        outcomes=modelbridge.outcomes,
    )
    obj_t = extract_objective_thresholds(
        objective_thresholds=optimization_config.objective_thresholds,
        objective=optimization_config.objective,
        outcomes=modelbridge.outcomes,
    )
    obj_t = array_to_tensor(obj_t)
    # Transform to tensors.
    obj_w, oc_c, _, _, _ = validate_and_apply_final_transform(
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
        linear_constraints=None,
        pending_observations=None,
        final_transform=array_to_tensor,
    )
    frontier_evaluator = get_default_frontier_evaluator()
    # pyre-ignore[28]: Unexpected keyword `modelbridge` to anonymous call
    f, cov, indx = frontier_evaluator(
        model=modelbridge.model,
        X=X,
        Y=Y,
        Yvar=Yvar,
        objective_thresholds=obj_t,
        objective_weights=obj_w,
        outcome_constraints=oc_c,
    )
    f, cov = f.detach().cpu().clone(), cov.detach().cpu().clone()
    indx = indx.tolist()
    frontier_observation_data = array_to_observation_data(
        f=f.numpy(), cov=cov.numpy(), outcomes=not_none(modelbridge.outcomes))
    # Untransform observations
    for t in reversed(modelbridge.transforms.values()):  # noqa T484
        frontier_observation_data = t.untransform_observation_data(
            frontier_observation_data, [])
    # Construct observations
    frontier_observations = []
    for i, obsd in enumerate(frontier_observation_data):
        frontier_observations.append(
            Observation(
                features=observation_features[indx[i]],
                data=obsd,
                arm_name=arm_names[indx[i]],
            ))
    return frontier_observations, f, obj_w, obj_t
Пример #24
0
def pareto_frontier(
    modelbridge: modelbridge_module.array.ArrayModelBridge,
    objective_thresholds: Optional[TRefPoint] = None,
    observation_features: Optional[List[ObservationFeatures]] = None,
    observation_data: Optional[List[ObservationData]] = None,
    optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
) -> List[ObservationData]:
    """Helper that applies transforms and calls frontier_evaluator."""
    array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
    X = (modelbridge.transform_observation_features(observation_features)
         if observation_features else None)
    X = array_to_tensor(X) if X is not None else None
    Y, Yvar = (None, None)
    if observation_data:
        Y, Yvar = modelbridge.transform_observation_data(observation_data)
    if Y is not None and Yvar is not None:
        Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))

    # Optimization_config
    mooc = optimization_config or checked_cast_optional(
        MultiObjectiveOptimizationConfig, modelbridge._optimization_config)
    if not mooc:
        raise ValueError(
            ("Experiment must have an existing optimization_config "
             "of type `MultiObjectiveOptimizationConfig` "
             "or `optimization_config` must be passed as an argument."))
    if not isinstance(mooc, MultiObjectiveOptimizationConfig):
        mooc = not_none(MultiObjectiveOptimizationConfig.from_opt_conf(mooc))
    if objective_thresholds:
        mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)

    optimization_config = mooc

    # Transform OptimizationConfig.
    optimization_config = modelbridge.transform_optimization_config(
        optimization_config=optimization_config,
        fixed_features=ObservationFeatures(parameters={}),
    )
    # Extract weights, constraints, and objective_thresholds
    objective_weights = extract_objective_weights(
        objective=optimization_config.objective, outcomes=modelbridge.outcomes)
    outcome_constraints = extract_outcome_constraints(
        outcome_constraints=optimization_config.outcome_constraints,
        outcomes=modelbridge.outcomes,
    )
    objective_thresholds_arr = extract_objective_thresholds(
        objective_thresholds=optimization_config.objective_thresholds,
        outcomes=modelbridge.outcomes,
    )
    # Transform to tensors.
    obj_w, oc_c, _, _ = validate_and_apply_final_transform(
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
        linear_constraints=None,
        pending_observations=None,
        final_transform=array_to_tensor,
    )
    obj_t = array_to_tensor(objective_thresholds_arr)
    frontier_evaluator = get_default_frontier_evaluator()
    # pyre-ignore[28]: Unexpected keyword `modelbridge` to anonymous call
    f, cov = frontier_evaluator(
        model=modelbridge.model,
        X=X,
        Y=Y,
        Yvar=Yvar,
        objective_thresholds=obj_t,
        objective_weights=obj_w,
        outcome_constraints=oc_c,
    )
    f, cov = f.detach().cpu().clone().numpy(), cov.detach().cpu().clone(
    ).numpy()
    frontier_observation_data = array_to_observation_data(
        f=f, cov=cov, outcomes=not_none(modelbridge.outcomes))
    # Untransform observations
    for t in reversed(modelbridge.transforms.values()):  # noqa T484
        frontier_observation_data = t.untransform_observation_data(
            frontier_observation_data, [])
    return frontier_observation_data
Пример #25
0
    def testInit(self):
        config1 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective,
            outcome_constraints=self.outcome_constraints)
        self.assertEqual(str(config1), MOOC_STR)
        with self.assertRaises(TypeError):
            config1.objective = self.objective  # Wrong objective type
        # updating constraints is fine.
        config1.outcome_constraints = [self.outcome_constraint]
        self.assertEqual(len(config1.metrics), 2)

        # objective without outcome_constraints is also supported
        config2 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective)

        # setting objective is fine too, if it's compatible with constraints.
        config2.objective = self.multi_objective

        # setting constraints on objectives is fine for MultiObjective components.
        config2.outcome_constraints = [self.outcome_constraint]
        self.assertEqual(config2.outcome_constraints,
                         [self.outcome_constraint])

        # construct constraints with objective_thresholds:
        config3 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective,
            objective_thresholds=self.objective_thresholds,
        )
        self.assertEqual(config3.all_constraints, self.objective_thresholds)

        # objective_thresholds and outcome constraints together.
        config4 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective,
            objective_thresholds=self.objective_thresholds,
            outcome_constraints=[self.m3_constraint],
        )
        self.assertEqual(config4.all_constraints,
                         [self.m3_constraint] + self.objective_thresholds)
        self.assertEqual(config4.outcome_constraints, [self.m3_constraint])
        self.assertEqual(config4.objective_thresholds,
                         self.objective_thresholds)

        # verify relative_objective_thresholds works:
        config5 = MultiObjectiveOptimizationConfig(
            objective=self.multi_objective,
            objective_thresholds=[self.outcome_constraint],
        )
        threshold = config5.objective_thresholds[0]
        self.assertTrue(threshold.relative)
        self.assertEqual(threshold.bound, -0.25)

        # ValueError on wrong direction constraints
        with self.assertRaises(ValueError):
            MultiObjectiveOptimizationConfig(
                objective=self.multi_objective,
                objective_thresholds=[self.additional_outcome_constraint],
            )
Пример #26
0
def get_branin_multi_objective_optimization_config() -> OptimizationConfig:
    return MultiObjectiveOptimizationConfig(objective=get_branin_multi_objective())
Пример #27
0
    def testObservedParetoFrontiers(self):
        experiment = get_branin_experiment(
            with_batch=True, has_optimization_config=False, with_status_quo=True
        )

        # Optimization config is not optional
        with self.assertRaises(ValueError):
            get_observed_pareto_frontiers(experiment=experiment, data=Data())

        metrics = [
            BraninMetric(name="m1", param_names=["x1", "x2"], lower_is_better=True),
            NegativeBraninMetric(
                name="m2", param_names=["x1", "x2"], lower_is_better=True
            ),
            BraninMetric(name="m3", param_names=["x1", "x2"], lower_is_better=True),
        ]
        bounds = [0, -100, 0]
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metric,
                bound=bounds[i],
                relative=True,
                op=ComparisonOp.LEQ,
            )
            for i, metric in enumerate(metrics)
        ]
        objective = MultiObjective(metrics=metrics, minimize=True)
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=objective,
            objective_thresholds=objective_thresholds,
        )
        experiment.optimization_config = optimization_config
        experiment.trials[0].run()

        # For the check below, compute which arms are better than SQ
        df = experiment.fetch_data().df
        df["sem"] = np.nan
        data = Data(df)
        sq_val = df[(df["arm_name"] == "status_quo") & (df["metric_name"] == "m1")][
            "mean"
        ].values[0]
        pareto_arms = sorted(
            df[(df["mean"] <= sq_val) & (df["metric_name"] == "m1")]["arm_name"]
            .unique()
            .tolist()
        )

        pfrs = get_observed_pareto_frontiers(experiment=experiment, data=data)
        # We have all pairs of metrics
        self.assertEqual(len(pfrs), 3)
        true_pairs = [("m1", "m2"), ("m1", "m3"), ("m2", "m3")]
        for i, pfr in enumerate(pfrs):
            self.assertEqual(pfr.primary_metric, true_pairs[i][0])
            self.assertEqual(pfr.secondary_metric, true_pairs[i][1])
            self.assertEqual(pfr.absolute_metrics, [])
            self.assertEqual(list(pfr.means.keys()), ["m1", "m2", "m3"])
            self.assertEqual(len(pfr.means["m1"]), len(pareto_arms))
            self.assertTrue(np.isnan(pfr.sems["m1"]).all())
            self.assertEqual(len(pfr.arm_names), len(pareto_arms))
            arm_idx = np.argsort(pfr.arm_names)
            for i, idx in enumerate(arm_idx):
                name = pareto_arms[i]
                self.assertEqual(pfr.arm_names[idx], name)
                self.assertEqual(
                    pfr.param_dicts[idx], experiment.arms_by_name[name].parameters
                )