Example #1
0
def get_branin_multi_objective_optimization_config(
    has_objective_thresholds: bool = False,
) -> OptimizationConfig:
    objective_thresholds = (
        [
            ObjectiveThreshold(
                metric=get_branin_metric(name="branin_a"),
                bound=10,
                op=ComparisonOp.GEQ,
                relative=False,
            ),
            ObjectiveThreshold(
                metric=get_branin_metric(name="branin_b"),
                bound=20,
                op=ComparisonOp.GEQ,
                relative=False,
            ),
        ]
        if has_objective_thresholds
        else None
    )
    return MultiObjectiveOptimizationConfig(
        objective=get_branin_multi_objective(),
        objective_thresholds=objective_thresholds,
    )
Example #2
0
 def testTransformOptimizationConfigMOO(self):
     m1 = Metric(name="m1", lower_is_better=False)
     m2 = Metric(name="m2", lower_is_better=True)
     mo = MultiObjective(
         objectives=[
             Objective(metric=m1, minimize=False),
             Objective(metric=m2, minimize=True),
         ],
     )
     objective_thresholds = [
         ObjectiveThreshold(metric=m1, bound=1.234, relative=False),
         ObjectiveThreshold(metric=m2, bound=3.456, relative=False),
     ]
     oc = MultiObjectiveOptimizationConfig(
         objective=mo,
         objective_thresholds=objective_thresholds,
     )
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     oc.objective_thresholds[0].bound = math.log(1.234)
     self.assertEqual(oc_tf, oc)
Example #3
0
    def test_pareto_frontier(self, _):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True
        )
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp.optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds
        )
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys())
        )
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
            PARETO_FRONTIER_EVALUATOR_PATH, wraps=pareto_frontier_evaluator
        ) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            observed_frontier_data = modelbridge.observed_pareto_frontier(
                objective_thresholds=objective_thresholds
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertEqual(1, len(observed_frontier_data))

        with self.assertRaises(ValueError):
            modelbridge.predicted_pareto_frontier(
                objective_thresholds=objective_thresholds, observation_features=[]
            )

        observation_features = [
            ObservationFeatures(parameters={"x1": 0.0, "x2": 1.0}),
            ObservationFeatures(parameters={"x1": 1.0, "x2": 0.0}),
        ]
        predicted_frontier_data = modelbridge.predicted_pareto_frontier(
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(len(predicted_frontier_data) <= 2)
Example #4
0
    def test_extract_objective_thresholds(self):
        outcomes = ["m1", "m2", "m3", "m4"]
        objective = MultiObjective(metrics=[Metric(name) for name in outcomes[:3]])
        objective_thresholds = [
            ObjectiveThreshold(
                metric=Metric(name), op=ComparisonOp.LEQ, bound=float(i + 2)
            )
            for i, name in enumerate(outcomes[:3])
        ]

        # None of no thresholds
        self.assertIsNone(
            extract_objective_thresholds(
                objective_thresholds=[], objective=objective, outcomes=outcomes
            )
        )

        # Working case
        obj_t = extract_objective_thresholds(
            objective_thresholds=objective_thresholds,
            objective=objective,
            outcomes=outcomes,
        )
        self.assertTrue(np.array_equal(obj_t, np.array([2.0, 3.0, 4.0, 0.0])))

        # Fails if threshold not provided for all objective metrics
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds[:2],
                objective=objective,
                outcomes=outcomes,
            )

        # Fails if number of thresholds doesn't equal number of objectives
        objective2 = Objective(Metric("m1"))
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds,
                objective=objective2,
                outcomes=outcomes,
            )

        # Works with a single objective, single threshold
        obj_t = extract_objective_thresholds(
            objective_thresholds=objective_thresholds[:1],
            objective=objective2,
            outcomes=outcomes,
        )
        self.assertTrue(np.array_equal(obj_t, np.array([2.0, 0.0, 0.0, 0.0])))

        # Fails if relative
        objective_thresholds[2] = ObjectiveThreshold(
            metric=Metric("m3"), op=ComparisonOp.LEQ, bound=3, relative=True
        )
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds,
                objective=objective,
                outcomes=outcomes,
            )
Example #5
0
def get_branin_multi_objective_optimization_config(
    has_objective_thresholds: bool = False,
    num_objectives: int = 2,
) -> MultiObjectiveOptimizationConfig:
    _validate_num_objectives(num_objectives=num_objectives)
    if has_objective_thresholds:
        objective_thresholds = [
            ObjectiveThreshold(
                metric=get_branin_metric(name="branin_a"),
                bound=10,
                op=ComparisonOp.GEQ,
                relative=False,
            ),
            ObjectiveThreshold(
                metric=get_branin_metric(name="branin_b"),
                bound=20,
                op=ComparisonOp.GEQ,
                relative=False,
            ),
        ]
        if num_objectives == 3:
            objective_thresholds.append(
                ObjectiveThreshold(
                    metric=get_branin_metric(name="branin_c"),
                    bound=5.0,
                    op=ComparisonOp.GEQ,
                    relative=False,
                ))
    else:
        objective_thresholds = None
    return MultiObjectiveOptimizationConfig(
        objective=get_branin_multi_objective(num_objectives=num_objectives),
        objective_thresholds=objective_thresholds,
    )
Example #6
0
 def test_MOO_with_more_outcomes_than_thresholds(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_optimization_config=False)
     metric_c = Metric(name="c", lower_is_better=False)
     metric_a = Metric(name="a", lower_is_better=False)
     objective_thresholds = [
         ObjectiveThreshold(
             metric=metric_c,
             bound=2.0,
             relative=False,
         ),
         ObjectiveThreshold(
             metric=metric_a,
             bound=1.0,
             relative=False,
         ),
     ]
     experiment.optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(objectives=[
             Objective(metric=metric_a),
             Objective(metric=metric_c),
         ]),
         objective_thresholds=objective_thresholds,
     )
     experiment.add_tracking_metric(Metric(name="b", lower_is_better=False))
     sobol = get_sobol(search_space=experiment.search_space, )
     sobol_run = sobol.gen(1)
     experiment.new_batch_trial().add_generator_run(
         sobol_run).run().mark_completed()
     data = Data(
         pd.DataFrame(
             data={
                 "arm_name": ["0_0", "0_0", "0_0"],
                 "metric_name": ["a", "b", "c"],
                 "mean": [1.0, 2.0, 3.0],
                 "trial_index": [0, 0, 0],
                 "sem": [0, 0, 0],
             }))
     test_names_to_fns = {
         "MOO_NEHVI": get_MOO_NEHVI,
         "MOO_EHVI": get_MOO_NEHVI,
         "MOO_PAREGO": get_MOO_PAREGO,
         "MOO_RS": get_MOO_RS,
     }
     for test_name, factory_fn in test_names_to_fns.items():
         with self.subTest(test_name):
             moo_model = factory_fn(
                 experiment=experiment,
                 data=data,
             )
             moo_gr = moo_model.gen(n=1)
             obj_t = moo_gr.gen_metadata["objective_thresholds"]
             self.assertEqual(obj_t[0], objective_thresholds[1])
             self.assertEqual(obj_t[1], objective_thresholds[0])
             self.assertEqual(len(obj_t), 2)
Example #7
0
 def test_feasible_hypervolume(self):
     ma = Metric(name="a", lower_is_better=False)
     mb = Metric(name="b", lower_is_better=True)
     mc = Metric(name="c", lower_is_better=False)
     optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(metrics=[ma, mb]),
         outcome_constraints=[
             OutcomeConstraint(
                 mc,
                 op=ComparisonOp.GEQ,
                 bound=0,
                 relative=False,
             )
         ],
         objective_thresholds=[
             ObjectiveThreshold(
                 ma,
                 bound=1.0,
             ),
             ObjectiveThreshold(
                 mb,
                 bound=1.0,
             ),
         ],
     )
     feas_hv = feasible_hypervolume(
         optimization_config,
         values={
             "a": np.array(
                 [
                     1.0,
                     3.0,
                     2.0,
                     2.0,
                 ]
             ),
             "b": np.array(
                 [
                     0.0,
                     1.0,
                     0.0,
                     0.0,
                 ]
             ),
             "c": np.array(
                 [
                     0.0,
                     -0.0,
                     1.0,
                     -2.0,
                 ]
             ),
         },
     )
     self.assertEqual(list(feas_hv), [0.0, 0.0, 1.0, 1.0])
Example #8
0
    def testEq(self):
        threshold1 = ObjectiveThreshold(metric=self.minimize_metric,
                                        bound=self.bound)
        threshold2 = ObjectiveThreshold(metric=self.minimize_metric,
                                        bound=self.bound)
        self.assertEqual(threshold1, threshold2)

        constraint3 = OutcomeConstraint(metric=self.minimize_metric,
                                        op=ComparisonOp.LEQ,
                                        bound=self.bound)
        self.assertNotEqual(threshold1, constraint3)
Example #9
0
 def setUp(self):
     self.metrics = {
         "m1": Metric(name="m1", lower_is_better=True),
         "m2": Metric(name="m2", lower_is_better=False),
         "m3": Metric(name="m3", lower_is_better=False),
     }
     self.objectives = {
         "o1": Objective(metric=self.metrics["m1"]),
         "o2": Objective(metric=self.metrics["m2"], minimize=False),
         "o3": Objective(metric=self.metrics["m3"], minimize=False),
     }
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.multi_objective = MultiObjective(
         objectives=[self.objectives["o1"], self.objectives["o2"]])
     self.multi_objective_just_m2 = MultiObjective(
         objectives=[self.objectives["o2"]])
     self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"],
                                                 op=ComparisonOp.GEQ,
                                                 bound=-0.25)
     self.additional_outcome_constraint = OutcomeConstraint(
         metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25)
     self.outcome_constraints = [
         self.outcome_constraint,
         self.additional_outcome_constraint,
     ]
     self.objective_thresholds = [
         ObjectiveThreshold(metric=self.metrics["m1"],
                            bound=-1.0,
                            relative=False),
         ObjectiveThreshold(metric=self.metrics["m2"],
                            bound=-1.0,
                            relative=False),
     ]
     self.relative_objective_thresholds = [
         ObjectiveThreshold(metric=self.metrics["m1"],
                            bound=-1.0,
                            relative=True),
         ObjectiveThreshold(
             metric=self.metrics["m2"],
             op=ComparisonOp.GEQ,
             bound=-1.0,
             relative=True,
         ),
     ]
     self.m1_constraint = OutcomeConstraint(metric=self.metrics["m1"],
                                            op=ComparisonOp.LEQ,
                                            bound=0.1,
                                            relative=True)
     self.m3_constraint = OutcomeConstraint(metric=self.metrics["m3"],
                                            op=ComparisonOp.GEQ,
                                            bound=0.1,
                                            relative=True)
Example #10
0
 def testObjectiveThresholdFail(self):
     logger_name = OUTCOME_CONSTRAINT_PATH + ".logger"
     with mock.patch(logger_name) as mock_warning:
         ObjectiveThreshold(metric=self.minimize_metric,
                            op=ComparisonOp.GEQ,
                            bound=self.bound)
         mock_warning.debug.assert_called_once_with(
             CONSTRAINT_WARNING_MESSAGE.format(**LOWER_BOUND_MISMATCH))
     with mock.patch(logger_name) as mock_warning:
         ObjectiveThreshold(metric=self.maximize_metric,
                            op=ComparisonOp.LEQ,
                            bound=self.bound)
         mock_warning.debug.assert_called_once_with(
             CONSTRAINT_WARNING_MESSAGE.format(**UPPER_BOUND_MISMATCH))
Example #11
0
def get_objective_threshold(
        metric_name: str = "m1",
        bound=-0.25,
        comparison_op: ComparisonOp = ComparisonOp.GEQ) -> ObjectiveThreshold:
    return ObjectiveThreshold(metric=Metric(name=metric_name),
                              bound=bound,
                              op=comparison_op)
Example #12
0
    def test_ST_MTGP_NEHVI(self):
        """Tests single type MTGP NEHVI instantiation."""
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True,
            with_status_quo=True,
        )
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
            ObjectiveThreshold(metric=metrics[1],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
        ]
        sobol = Models.SOBOL(search_space=multi_obj_exp.search_space)
        self.assertIsInstance(sobol, RandomModelBridge)
        for _ in range(2):
            sobol_run = sobol.gen(n=1)
            t = multi_obj_exp.new_batch_trial().add_generator_run(sobol_run)
            t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
            t.run().mark_completed()
        status_quo_features = ObservationFeatures(
            parameters=multi_obj_exp.trials[0].status_quo.parameters,
            trial_index=0,
        )
        mtgp = Models.ST_MTGP_NEHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            status_quo_features=status_quo_features,
            objective_thresholds=multi_objective_thresholds,
        )
        self.assertIsInstance(mtgp, TorchModelBridge)
        self.assertIsInstance(mtgp.model, MultiObjectiveBotorchModel)

        # test it can generate
        mtgp_run = mtgp.gen(n=1,
                            fixed_features=ObservationFeatures(parameters={},
                                                               trial_index=1))
        self.assertEqual(len(mtgp_run.arms), 1)

        # test a generated trial can be completed
        t = multi_obj_exp.new_batch_trial().add_generator_run(mtgp_run)
        t.set_status_quo_with_weight(status_quo=t.arms[0], weight=0.5)
        t.run().mark_completed()
Example #13
0
    def test_MOO_EHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        metrics = single_obj_exp.optimization_config.objective.metrics
        objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ)
        ]
        # ValueError: Multi-objective optimization requires multiple objectives.
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                objective_thresholds=objective_thresholds,
            )
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
            ObjectiveThreshold(metric=metrics[1],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ),
        ]
        # ValueError: MultiObjectiveOptimization requires non-empty data.
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
            )

        multi_obj_exp.trials[0].run().mark_completed()
        moo_ehvi = get_MOO_EHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            objective_thresholds=multi_objective_thresholds,
        )
        self.assertIsInstance(moo_ehvi, MultiObjectiveTorchModelBridge)
        moo_ehvi_run = moo_ehvi.gen(n=1)
        self.assertEqual(len(moo_ehvi_run.arms), 1)
Example #14
0
 def setUp(self):
     self.minimize_metric = Metric(name="bar", lower_is_better=True)
     self.maximize_metric = Metric(name="baz", lower_is_better=False)
     self.ambiguous_metric = Metric(name="buz")
     self.bound = 0
     self.threshold = ObjectiveThreshold(metric=self.maximize_metric,
                                         op=ComparisonOp.GEQ,
                                         bound=self.bound)
Example #15
0
 def untransform_objective_thresholds(
     self,
     objective_thresholds: Tensor,
     objective_weights: Tensor,
     bounds: List[Tuple[Union[int, float], Union[int, float]]],
     fixed_features: Optional[Dict[int, float]],
 ) -> List[ObjectiveThreshold]:
     objective_thresholds_np = objective_thresholds.cpu().numpy()
     # pyre-ignore [16]
     objective_indices = objective_weights.nonzero().view(-1).tolist()
     objective_names = [self.outcomes[i] for i in objective_indices]
     # create an ObservationData object for untransforming the objective thresholds
     observation_data = [
         ObservationData(
             metric_names=objective_names,
             means=objective_thresholds_np[objective_indices].copy(),
             covariance=np.zeros(
                 (len(objective_indices), len(objective_indices))),
         )
     ]
     # Untransform objective thresholds. Note: there is one objective threshold
     # for every outcome.
     # Construct dummy observation features
     X = [bound[0] for bound in bounds]
     fixed_features = fixed_features or {}
     for i, val in fixed_features.items():
         X[i] = val
     observation_features = parse_observation_features(
         X=np.array([X]),
         param_names=self.parameters,
     )
     # Apply reverse transforms, in reverse order
     for t in reversed(self.transforms.values()):
         observation_data = t.untransform_observation_data(
             observation_data=observation_data,
             observation_features=observation_features,
         )
         observation_features = t.untransform_observation_features(
             observation_features=observation_features, )
     observation_data = observation_data[0]
     oc = not_none(self._optimization_config)
     metrics_names_to_metric = oc.metrics
     obj_thresholds = []
     for idx, (name, bound) in enumerate(
             zip(observation_data.metric_names, observation_data.means)):
         if not np.isnan(bound):
             obj_weight = objective_weights[objective_indices[idx]]
             op = (ComparisonOp.LEQ
                   if torch.sign(obj_weight) == -1.0 else ComparisonOp.GEQ)
             obj_thresholds.append(
                 ObjectiveThreshold(
                     metric=metrics_names_to_metric[name],
                     bound=bound,
                     relative=False,
                     op=op,
                 ))
     return obj_thresholds
Example #16
0
 def testRelativize(self):
     self.assertTrue(
         ObjectiveThreshold(metric=self.maximize_metric,
                            op=ComparisonOp.LEQ,
                            bound=self.bound).relative)
     self.assertTrue(
         ObjectiveThreshold(
             metric=self.maximize_metric,
             op=ComparisonOp.LEQ,
             bound=self.bound,
             relative=True,
         ).relative)
     self.assertFalse(
         ObjectiveThreshold(
             metric=self.maximize_metric,
             op=ComparisonOp.LEQ,
             bound=self.bound,
             relative=False,
         ).relative)
Example #17
0
    def test_MOO_EHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        metrics = single_obj_exp.optimization_config.objective.metrics
        metrics[0].lower_is_better = True
        objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0],
                               bound=0.0,
                               relative=False,
                               op=ComparisonOp.GEQ)
        ]
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                objective_thresholds=objective_thresholds,
            )
        multi_obj_exp = get_branin_experiment_with_multi_objective(
            with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        metrics[0].lower_is_better = False
        metrics[1].lower_is_better = True
        multi_objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0], bound=0.0, relative=False),
            ObjectiveThreshold(metric=metrics[1], bound=0.0, relative=False),
        ]
        with self.assertRaises(ValueError):
            get_MOO_EHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
            )

        multi_obj_exp.trials[0].run()
        moo_ehvi = get_MOO_EHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            objective_thresholds=multi_objective_thresholds,
        )
        self.assertIsInstance(moo_ehvi, MultiObjectiveTorchModelBridge)
        moo_ehvi_run = moo_ehvi.gen(n=1)
        self.assertEqual(len(moo_ehvi_run.arms), 1)
Example #18
0
    def metric_from_sqa(
            self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""

        metric = self.metric_from_sqa_util(metric_sqa)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            if metric_sqa.scalarized_objective_weight is not None:
                raise SQADecodeError(  # pragma: no cover
                    "The metric corresponding to regular objective does not \
                    have weight attribute")
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in MultiObjective
            try:
                metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            except DetachedInstanceError:
                metrics_sqa_children = _get_scalarized_objective_children_metrics(
                    metric_id=metric_sqa.id, decoder=self)

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            objectives = [
                Objective(
                    metric=self.metric_from_sqa_util(metric_sqa),
                    minimize=metric_sqa.minimize,
                ) for metric_sqa in metrics_sqa_children
            ]

            multi_objective = MultiObjective(objectives=objectives)
            multi_objective.db_id = metric_sqa.id
            return multi_objective
        elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in Scalarized Objective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because minimize is None.")

            try:
                metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            except DetachedInstanceError:
                metrics_sqa_children = _get_scalarized_objective_children_metrics(
                    metric_id=metric_sqa.id, decoder=self)

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_objective_weight,
            ) for child in metrics_sqa_children])
            scalarized_objective = ScalarizedObjective(
                metrics=list(metrics),
                weights=list(weights),
                minimize=not_none(metric_sqa.minimize),
            )
            scalarized_objective.db_id = metric_sqa.id
            return scalarized_objective
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None.")
            return OutcomeConstraint(
                metric=metric,
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint because "
                    "bound, op, or relative is None.")

            try:
                metrics_sqa_children = (
                    metric_sqa.scalarized_outcome_constraint_children_metrics)
            except DetachedInstanceError:
                metrics_sqa_children = (
                    _get_scalarized_outcome_constraint_children_metrics(
                        metric_id=metric_sqa.id, decoder=self))

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_outcome_constraint_weight,
            ) for child in metrics_sqa_children])
            scalarized_outcome_constraint = ScalarizedOutcomeConstraint(
                metrics=list(metrics),
                weights=list(weights),
                bound=not_none(metric_sqa.bound),
                op=not_none(metric_sqa.op),
                relative=not_none(metric_sqa.relative),
            )
            scalarized_outcome_constraint.db_id = metric_sqa.id
            return scalarized_outcome_constraint
        elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD:
            if metric_sqa.bound is None or metric_sqa.relative is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to ObjectiveThreshold because "
                    "bound, op, or relative is None.")
            ot = ObjectiveThreshold(
                metric=metric,
                bound=metric_sqa.bound,
                relative=metric_sqa.relative,
                op=metric_sqa.op,
            )
            # ObjectiveThreshold constructor clones the passed-in metric, which means
            # the db id gets lost and so we need to reset it
            ot.metric._db_id = metric.db_id
            return ot
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent.")
Example #19
0
    def test_pareto_frontier(self, _):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        for trial in exp.trials.values():
            trial.mark_running(no_runner_required=True).mark_completed()
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp.optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials.keys()))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            observed_frontier = observed_pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds)
            wrapped_frontier_evaluator.assert_called_once()
            self.assertIsNone(wrapped_frontier_evaluator.call_args.kwargs["X"])
            self.assertEqual(1, len(observed_frontier))
            self.assertEqual(observed_frontier[0].arm_name, "0_0")

        with self.assertRaises(ValueError):
            predicted_pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=[],
            )

        predicted_frontier = predicted_pareto_frontier(
            modelbridge=modelbridge,
            objective_thresholds=objective_thresholds,
            observation_features=None,
        )
        self.assertEqual(predicted_frontier[0].arm_name, "0_0")

        observation_features = [
            ObservationFeatures(parameters={
                "x1": 0.0,
                "x2": 1.0
            }),
            ObservationFeatures(parameters={
                "x1": 1.0,
                "x2": 0.0
            }),
        ]
        observation_data = [
            ObservationData(
                metric_names=["branin_b", "branin_a"],
                means=np.array([1.0, 2.0]),
                covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            ),
            ObservationData(
                metric_names=["branin_a", "branin_b"],
                means=np.array([3.0, 4.0]),
                covariance=np.array([[1.0, 2.0], [3.0, 4.0]]),
            ),
        ]
        predicted_frontier = predicted_pareto_frontier(
            modelbridge=modelbridge,
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(len(predicted_frontier) <= 2)
        self.assertIsNone(predicted_frontier[0].arm_name, None)

        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            observed_frontier = pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
                observation_data=observation_data,
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertTrue(
                torch.equal(
                    wrapped_frontier_evaluator.call_args.kwargs["X"],
                    torch.tensor([[1.0, 4.0], [4.0, 1.0]]),
                ))

        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            observed_frontier = pareto_frontier(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
                observation_data=observation_data,
                use_model_predictions=False,
            )
            wrapped_frontier_evaluator.assert_called_once()
            self.assertIsNone(wrapped_frontier_evaluator.call_args.kwargs["X"])
            self.assertTrue(
                torch.equal(
                    wrapped_frontier_evaluator.call_args.kwargs["Y"],
                    torch.tensor([[9.0, 4.0], [16.0, 25.0]]),
                ))
Example #20
0
    def metric_from_sqa(
            self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""

        metric = self.metric_from_sqa_util(metric_sqa)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            if metric_sqa.scalarized_objective_weight is not None:
                raise SQADecodeError(  # pragma: no cover
                    "The metric corresponding to regular objective does not \
                    have weight attribute")
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in MultiObjective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because minimize is None.")
            metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics = [
                self.metric_from_sqa_util(child)
                for child in metrics_sqa_children
            ]

            return MultiObjective(
                metrics=list(metrics),
                # pyre-fixme[6]: Expected `bool` for 2nd param but got `Optional[bool]`.
                minimize=metric_sqa.minimize,
            )
        elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in Scalarized Objective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because minimize is None.")
            metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_objective_weight,
            ) for child in metrics_sqa_children])
            return ScalarizedObjective(
                metrics=list(metrics),
                weights=list(weights),
                # pyre-fixme[6]: Expected `bool` for 3nd param but got `Optional[bool]`.
                minimize=metric_sqa.minimize,
            )
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None.")
            return OutcomeConstraint(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint because "
                    "bound, op, or relative is None.")
            metrics_sqa_children = (
                metric_sqa.scalarized_outcome_constraint_children_metrics)
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_outcome_constraint_weight,
            ) for child in metrics_sqa_children])
            return ScalarizedOutcomeConstraint(
                metrics=list(metrics),
                weights=list(weights),
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )

        elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD:
            if metric_sqa.bound is None or metric_sqa.relative is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to ObjectiveThreshold because "
                    "bound, op, or relative is None.")
            return ObjectiveThreshold(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                relative=metric_sqa.relative,
                op=metric_sqa.op,
            )
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent.")
Example #21
0
    def test_hypervolume(self, _, cuda=False):
        for num_objectives in (2, 3):
            exp = get_branin_experiment_with_multi_objective(
                has_optimization_config=True,
                with_batch=True,
                num_objectives=num_objectives,
            )
            for trial in exp.trials.values():
                trial.mark_running(no_runner_required=True).mark_completed()
            metrics_dict = exp.optimization_config.metrics
            objective_thresholds = [
                ObjectiveThreshold(
                    metric=metrics_dict["branin_a"],
                    bound=0.0,
                    relative=False,
                    op=ComparisonOp.GEQ,
                ),
                ObjectiveThreshold(
                    metric=metrics_dict["branin_b"],
                    bound=1.0,
                    relative=False,
                    op=ComparisonOp.GEQ,
                ),
            ]
            if num_objectives == 3:
                objective_thresholds.append(
                    ObjectiveThreshold(
                        metric=metrics_dict["branin_c"],
                        bound=2.0,
                        relative=False,
                        op=ComparisonOp.GEQ,
                    )
                )
            optimization_config = exp.optimization_config.clone_with_args(
                objective_thresholds=objective_thresholds
            )
            exp.attach_data(
                get_branin_data_multi_objective(
                    trial_indices=exp.trials.keys(), num_objectives=num_objectives
                )
            )
            modelbridge = TorchModelBridge(
                search_space=exp.search_space,
                model=MultiObjectiveBotorchModel(),
                optimization_config=optimization_config,
                transforms=[],
                experiment=exp,
                data=exp.fetch_data(),
                torch_device=torch.device("cuda" if cuda else "cpu"),
                objective_thresholds=objective_thresholds,
            )
            with patch(
                PARETO_FRONTIER_EVALUATOR_PATH, wraps=pareto_frontier_evaluator
            ) as wrapped_frontier_evaluator:
                modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
                hv = observed_hypervolume(
                    modelbridge=modelbridge, objective_thresholds=objective_thresholds
                )
                expected_hv = 20 if num_objectives == 2 else 60  # 5 * 4 (* 3)
                wrapped_frontier_evaluator.assert_called_once()
                self.assertEqual(expected_hv, hv)
                if num_objectives == 3:
                    # Test selected_metrics
                    hv = observed_hypervolume(
                        modelbridge=modelbridge,
                        objective_thresholds=objective_thresholds,
                        selected_metrics=["branin_a", "branin_c"],
                    )
                    expected_hv = 15  # (5 - 0) * (5 - 2)
                    self.assertEqual(expected_hv, hv)
                    # test that non-objective outcome raises value error
                    with self.assertRaises(ValueError):
                        hv = observed_hypervolume(
                            modelbridge=modelbridge,
                            objective_thresholds=objective_thresholds,
                            selected_metrics=["tracking"],
                        )

            with self.assertRaises(ValueError):
                predicted_hypervolume(
                    modelbridge=modelbridge,
                    objective_thresholds=objective_thresholds,
                    observation_features=[],
                )

            observation_features = [
                ObservationFeatures(parameters={"x1": 1.0, "x2": 2.0}),
                ObservationFeatures(parameters={"x1": 2.0, "x2": 1.0}),
            ]
            predicted_hv = predicted_hypervolume(
                modelbridge=modelbridge,
                objective_thresholds=objective_thresholds,
                observation_features=observation_features,
            )
            self.assertTrue(predicted_hv >= 0)
            if num_objectives == 3:
                # Test selected_metrics
                predicted_hv = predicted_hypervolume(
                    modelbridge=modelbridge,
                    objective_thresholds=objective_thresholds,
                    observation_features=observation_features,
                    selected_metrics=["branin_a", "branin_c"],
                )
                self.assertTrue(predicted_hv >= 0)
Example #22
0
    def test_MTGP_NEHVI(self):
        single_obj_exp = get_branin_experiment(with_batch=True)
        metrics = single_obj_exp.optimization_config.objective.metrics
        metrics[0].lower_is_better = True
        objective_thresholds = [
            ObjectiveThreshold(metric=metrics[0], bound=0.0, relative=False)
        ]
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=single_obj_exp,
                data=single_obj_exp.fetch_data(),
                objective_thresholds=objective_thresholds,
            )

        multi_obj_exp = get_branin_experiment_with_multi_objective(with_batch=True)
        metrics = multi_obj_exp.optimization_config.objective.metrics
        multi_objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics[0], bound=0.0, relative=False, op=ComparisonOp.GEQ
            ),
            ObjectiveThreshold(
                metric=metrics[1], bound=0.0, relative=False, op=ComparisonOp.GEQ
            ),
        ]
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
            )

        multi_obj_exp.trials[0].run()
        sobol_generator = get_sobol(search_space=multi_obj_exp.search_space)
        sobol_run = sobol_generator.gen(n=3)
        multi_obj_exp.new_batch_trial(optimize_for_power=False).add_generator_run(
            sobol_run
        )
        multi_obj_exp.trials[1].run()
        mt_ehvi = get_MTGP_NEHVI(
            experiment=multi_obj_exp,
            data=multi_obj_exp.fetch_data(),
            objective_thresholds=multi_objective_thresholds,
            trial_index=1,
        )
        self.assertIsInstance(mt_ehvi, TorchModelBridge)
        self.assertIsInstance(mt_ehvi.model.model.models[0], MultiTaskGP)
        task_covar_factor = mt_ehvi.model.model.models[0].task_covar_module.covar_factor
        self.assertEqual(task_covar_factor.shape, torch.Size([2, 2]))
        mt_ehvi_run = mt_ehvi.gen(
            n=1, fixed_features=ObservationFeatures(parameters={}, trial_index=1)
        )
        self.assertEqual(len(mt_ehvi_run.arms), 1)

        # Bad index given
        with self.assertRaises(ValueError):
            get_MTGP_NEHVI(
                experiment=multi_obj_exp,
                data=multi_obj_exp.fetch_data(),
                objective_thresholds=multi_objective_thresholds,
                trial_index=999,
            )

        # Multi-type + multi-objective experiment
        multi_type_multi_obj_exp = get_multi_type_experiment_with_multi_objective(
            add_trials=True
        )
        data = multi_type_multi_obj_exp.fetch_data()
        mt_ehvi = get_MTGP_NEHVI(
            experiment=multi_type_multi_obj_exp,
            data=data,
            objective_thresholds=multi_objective_thresholds,
        )
Example #23
0
    def testObservedParetoFrontiers(self):
        experiment = get_branin_experiment(
            with_batch=True, has_optimization_config=False, with_status_quo=True
        )

        # Optimization config is not optional
        with self.assertRaises(ValueError):
            get_observed_pareto_frontiers(experiment=experiment, data=Data())

        metrics = [
            BraninMetric(name="m1", param_names=["x1", "x2"], lower_is_better=True),
            NegativeBraninMetric(
                name="m2", param_names=["x1", "x2"], lower_is_better=True
            ),
            BraninMetric(name="m3", param_names=["x1", "x2"], lower_is_better=True),
        ]
        bounds = [0, -100, 0]
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metric,
                bound=bounds[i],
                relative=True,
                op=ComparisonOp.LEQ,
            )
            for i, metric in enumerate(metrics)
        ]
        objective = MultiObjective(metrics=metrics, minimize=True)
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=objective,
            objective_thresholds=objective_thresholds,
        )
        experiment.optimization_config = optimization_config
        experiment.trials[0].run()

        # For the check below, compute which arms are better than SQ
        df = experiment.fetch_data().df
        df["sem"] = np.nan
        data = Data(df)
        sq_val = df[(df["arm_name"] == "status_quo") & (df["metric_name"] == "m1")][
            "mean"
        ].values[0]
        pareto_arms = sorted(
            df[(df["mean"] <= sq_val) & (df["metric_name"] == "m1")]["arm_name"]
            .unique()
            .tolist()
        )

        pfrs = get_observed_pareto_frontiers(experiment=experiment, data=data)
        # We have all pairs of metrics
        self.assertEqual(len(pfrs), 3)
        true_pairs = [("m1", "m2"), ("m1", "m3"), ("m2", "m3")]
        for i, pfr in enumerate(pfrs):
            self.assertEqual(pfr.primary_metric, true_pairs[i][0])
            self.assertEqual(pfr.secondary_metric, true_pairs[i][1])
            self.assertEqual(pfr.absolute_metrics, [])
            self.assertEqual(list(pfr.means.keys()), ["m1", "m2", "m3"])
            self.assertEqual(len(pfr.means["m1"]), len(pareto_arms))
            self.assertTrue(np.isnan(pfr.sems["m1"]).all())
            self.assertEqual(len(pfr.arm_names), len(pareto_arms))
            arm_idx = np.argsort(pfr.arm_names)
            for i, idx in enumerate(arm_idx):
                name = pareto_arms[i]
                self.assertEqual(pfr.arm_names[idx], name)
                self.assertEqual(
                    pfr.param_dicts[idx], experiment.arms_by_name[name].parameters
                )
Example #24
0
    def test_get_standard_plots(self):
        exp = get_branin_experiment()
        self.assertEqual(
            len(
                get_standard_plots(experiment=exp,
                                   model=get_generation_strategy().model)),
            0,
        )
        exp = get_branin_experiment(with_batch=True, minimize=True)
        exp.trials[0].run()
        plots = get_standard_plots(
            experiment=exp,
            model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
        )
        self.assertEqual(len(plots), 6)
        self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
        exp = get_branin_experiment_with_multi_objective(with_batch=True)
        exp.optimization_config.objective.objectives[0].minimize = False
        exp.optimization_config.objective.objectives[1].minimize = True
        exp.optimization_config._objective_thresholds = [
            ObjectiveThreshold(metric=exp.metrics["branin_a"],
                               op=ComparisonOp.GEQ,
                               bound=-100.0),
            ObjectiveThreshold(metric=exp.metrics["branin_b"],
                               op=ComparisonOp.LEQ,
                               bound=100.0),
        ]
        exp.trials[0].run()
        plots = get_standard_plots(experiment=exp,
                                   model=Models.MOO(experiment=exp,
                                                    data=exp.fetch_data()))
        self.assertEqual(len(plots), 7)

        # All plots are successfully created when objective thresholds are absent
        exp.optimization_config._objective_thresholds = []
        plots = get_standard_plots(experiment=exp,
                                   model=Models.MOO(experiment=exp,
                                                    data=exp.fetch_data()))
        self.assertEqual(len(plots), 7)

        exp = get_branin_experiment_with_timestamp_map_metric(
            with_status_quo=True)
        exp.new_trial().add_arm(exp.status_quo)
        exp.trials[0].run()
        exp.new_trial(generator_run=Models.SOBOL(
            search_space=exp.search_space).gen(n=1))
        exp.trials[1].run()
        plots = get_standard_plots(
            experiment=exp,
            model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
            true_objective_metric_name="branin",
        )

        self.assertEqual(len(plots), 9)
        self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots))
        self.assertIn(
            "Objective branin_map vs. True Objective Metric branin",
            [p.layout.title.text for p in plots],
        )

        with self.assertRaisesRegex(
                ValueError, "Please add a valid true_objective_metric_name"):
            plots = get_standard_plots(
                experiment=exp,
                model=Models.BOTORCH(experiment=exp, data=exp.fetch_data()),
                true_objective_metric_name="not_present",
            )
Example #25
0
def get_objective_threshold() -> ObjectiveThreshold:
    return ObjectiveThreshold(
        metric=Metric(name="m1"), bound=-0.25, op=ComparisonOp.GEQ
    )
    def test_hypervolume(self):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=False)
        metrics_dict = exp.optimization_config.metrics
        objective_thresholds = [
            ObjectiveThreshold(
                metric=metrics_dict["branin_a"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
            ObjectiveThreshold(
                metric=metrics_dict["branin_b"],
                bound=0.0,
                relative=False,
                op=ComparisonOp.GEQ,
            ),
        ]
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        optimization_config = exp.optimization_config.clone_with_args(
            objective_thresholds=objective_thresholds)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            objective_thresholds=objective_thresholds,
        )
        with patch(
                PARETO_FRONTIER_EVALUATOR_PATH,
                wraps=pareto_frontier_evaluator) as wrapped_frontier_evaluator:
            modelbridge.model.frontier_evaluator = wrapped_frontier_evaluator
            hv = modelbridge.observed_hypervolume(
                objective_thresholds=objective_thresholds)
            expected_hv = 25  # (5 - 0) * (5 - 0)
            wrapped_frontier_evaluator.assert_called_once()
            self.assertEqual(expected_hv, hv)

        with self.assertRaises(ValueError):
            modelbridge.predicted_hypervolume(
                objective_thresholds=objective_thresholds,
                observation_features=[])

        observation_features = [
            ObservationFeatures(parameters={
                "x1": 1.0,
                "x2": 2.0
            }),
            ObservationFeatures(parameters={
                "x1": 2.0,
                "x2": 1.0
            }),
        ]
        predicted_hv = modelbridge.predicted_hypervolume(
            objective_thresholds=objective_thresholds,
            observation_features=observation_features,
        )
        self.assertTrue(predicted_hv >= 0)