예제 #1
0
 def setUp(self):
     self.minimize_metric = Metric(name="bar", lower_is_better=True)
     self.maximize_metric = Metric(name="baz", lower_is_better=False)
     self.bound = 0
     simple_metric = Metric(name="foo")
     self.constraint = OutcomeConstraint(metric=simple_metric,
                                         op=ComparisonOp.GEQ,
                                         bound=self.bound)
예제 #2
0
 def setUp(self):
     self.metrics = {"m1": Metric(name="m1"), "m2": Metric(name="m2")}
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.alt_objective = Objective(metric=self.metrics["m2"],
                                    minimize=False)
     self.multi_objective = MultiObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]])
     self.m2_objective = ScalarizedObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]])
     self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"],
                                                 op=ComparisonOp.GEQ,
                                                 bound=-0.25)
     self.additional_outcome_constraint = OutcomeConstraint(
         metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25)
     self.outcome_constraints = [
         self.outcome_constraint,
         self.additional_outcome_constraint,
     ]
예제 #3
0
파일: test_utils.py 프로젝트: facebook/Ax
 def test_feasible_hypervolume(self):
     ma = Metric(name="a", lower_is_better=False)
     mb = Metric(name="b", lower_is_better=True)
     mc = Metric(name="c", lower_is_better=False)
     optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(metrics=[ma, mb]),
         outcome_constraints=[
             OutcomeConstraint(
                 mc,
                 op=ComparisonOp.GEQ,
                 bound=0,
                 relative=False,
             )
         ],
         objective_thresholds=[
             ObjectiveThreshold(
                 ma,
                 bound=1.0,
             ),
             ObjectiveThreshold(
                 mb,
                 bound=1.0,
             ),
         ],
     )
     feas_hv = feasible_hypervolume(
         optimization_config,
         values={
             "a": np.array(
                 [
                     1.0,
                     3.0,
                     2.0,
                     2.0,
                 ]
             ),
             "b": np.array(
                 [
                     0.0,
                     1.0,
                     0.0,
                     0.0,
                 ]
             ),
             "c": np.array(
                 [
                     0.0,
                     -0.0,
                     1.0,
                     -2.0,
                 ]
             ),
         },
     )
     self.assertEqual(list(feas_hv), [0.0, 0.0, 1.0, 1.0])
예제 #4
0
    def testEq(self):
        threshold1 = ObjectiveThreshold(metric=self.minimize_metric,
                                        bound=self.bound)
        threshold2 = ObjectiveThreshold(metric=self.minimize_metric,
                                        bound=self.bound)
        self.assertEqual(threshold1, threshold2)

        constraint3 = OutcomeConstraint(metric=self.minimize_metric,
                                        op=ComparisonOp.LEQ,
                                        bound=self.bound)
        self.assertNotEqual(threshold1, constraint3)
    def test_transform_ref_point(self, _mock_fit, _mock_predict, _mock_unwrap):
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=False)
        metrics = exp.optimization_config.objective.metrics
        ref_point = {metrics[0].name: 0.0, metrics[1].name: 0.0}
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNone(modelbridge._transformed_ref_point)
        exp = get_branin_experiment_with_multi_objective(
            has_optimization_config=True, with_batch=True)
        exp.attach_data(
            get_branin_data_multi_objective(trial_indices=exp.trials))
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=exp.optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point=ref_point,
        )
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(2, len(modelbridge._transformed_ref_point))

        mixed_objective_constraints_optimization_config = OptimizationConfig(
            objective=MultiObjective(
                metrics=[get_branin_metric(name="branin_b")], minimize=False),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric(name="branin_a"),
                                  op=ComparisonOp.LEQ,
                                  bound=1)
            ],
        )
        modelbridge = MultiObjectiveTorchModelBridge(
            search_space=exp.search_space,
            model=MultiObjectiveBotorchModel(),
            optimization_config=mixed_objective_constraints_optimization_config,
            transforms=[t1, t2],
            experiment=exp,
            data=exp.fetch_data(),
            ref_point={"branin_b": 0.0},
        )
        self.assertEqual({"branin_a", "branin_b"}, modelbridge._metric_names)
        self.assertEqual(["branin_b"], modelbridge._objective_metric_names)
        self.assertIsNotNone(modelbridge._transformed_ref_point)
        self.assertEqual(1, len(modelbridge._transformed_ref_point))
예제 #6
0
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective, outcome_constraints=[])
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(oc, None, None)
     self.assertTrue(oc_tf == oc)
     # test error if transformed metric appears in outcome constraints
     m2 = Metric(name="m2")
     cons = [
         OutcomeConstraint(metric=m2,
                           op=ComparisonOp.GEQ,
                           bound=0.0,
                           relative=False)
     ]
     oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
     oc_tf = tf.transform_optimization_config(oc, None, None)
     self.assertTrue(oc_tf == oc)
     m2 = Metric(name="m2")
     cons = [
         OutcomeConstraint(metric=m2,
                           op=ComparisonOp.GEQ,
                           bound=0.0,
                           relative=False)
     ]
     oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
     tf2 = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m2"]},
     )
     with self.assertRaises(ValueError):
         tf2.transform_optimization_config(oc, None, None)
예제 #7
0
파일: instantiation.py 프로젝트: emailhy/Ax
def outcome_constraint_from_str(representation: str) -> OutcomeConstraint:
    """Parse string representation of an outcome constraint."""
    tokens = representation.split()
    assert len(tokens) == 3 and tokens[1] in COMPARISON_OPS, (
        "Outcome constraint should be of form `metric_name >= x`, where x is a "
        "float bound and comparison operator is >= or <=."
    )
    op = COMPARISON_OPS[tokens[1]]
    try:
        bound = float(tokens[2])
    except ValueError:
        raise ValueError("Outcome constraint bound should be a float.")
    return OutcomeConstraint(Metric(name=tokens[0]), op=op, bound=bound, relative=False)
예제 #8
0
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=2.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=3.5,
                              relative=False),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        oc = self.t.transform_optimization_config(oc, None, None)
        cons_t = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=1.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=4.0,
                              relative=False),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # Check fail with relative
        con = OutcomeConstraint(metric=m1,
                                op=ComparisonOp.GEQ,
                                bound=2.0,
                                relative=True)
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, None)
예제 #9
0
 def setUp(self):
     self.metrics = {
         "m1": Metric(name="m1", lower_is_better=True),
         "m2": Metric(name="m2", lower_is_better=False),
         "m3": Metric(name="m3", lower_is_better=False),
     }
     self.objectives = {
         "o1": Objective(metric=self.metrics["m1"]),
         "o2": Objective(metric=self.metrics["m2"], minimize=False),
         "o3": Objective(metric=self.metrics["m3"], minimize=False),
     }
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.multi_objective = MultiObjective(
         objectives=[self.objectives["o1"], self.objectives["o2"]])
     self.multi_objective_just_m2 = MultiObjective(
         objectives=[self.objectives["o2"]])
     self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"],
                                                 op=ComparisonOp.GEQ,
                                                 bound=-0.25)
     self.additional_outcome_constraint = OutcomeConstraint(
         metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25)
     self.outcome_constraints = [
         self.outcome_constraint,
         self.additional_outcome_constraint,
     ]
     self.objective_thresholds = [
         ObjectiveThreshold(metric=self.metrics["m2"],
                            bound=-1.0,
                            relative=False)
     ]
     self.m1_constraint = OutcomeConstraint(metric=self.metrics["m1"],
                                            op=ComparisonOp.LEQ,
                                            bound=0.1,
                                            relative=True)
     self.m3_constraint = OutcomeConstraint(metric=self.metrics["m3"],
                                            op=ComparisonOp.GEQ,
                                            bound=0.1,
                                            relative=True)
예제 #10
0
파일: decoder.py 프로젝트: HaysS/Ax
    def metric_from_sqa(
        self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""
        metric_class = REVERSE_METRIC_REGISTRY.get(metric_sqa.metric_type)
        if metric_class is None:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.metric_type} "
                f"is an invalid type."
            )

        args = self.get_init_args_from_properties(
            # pyre-fixme[6]: Expected `SQABase` for ...es` but got `SQAMetric`.
            object_sqa=metric_sqa,
            class_=metric_class,
        )
        metric = metric_class(**args)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            # pyre-fixme[6]: Expected `bool` for 2nd param but got `Optional[bool]`.
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (
                metric_sqa.bound is None
                or metric_sqa.op is None
                or metric_sqa.relative is None
            ):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None."
                )
            return OutcomeConstraint(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent."
            )
예제 #11
0
    def test_best_raw_objective_point_unsatisfiable(self):
        exp = get_branin_experiment()
        trial = exp.new_trial(
            generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})])
        ).run()
        trial.mark_completed()

        opt_conf = exp.optimization_config.clone()
        opt_conf.outcome_constraints.append(
            OutcomeConstraint(
                metric=get_branin_metric(), op=ComparisonOp.LEQ, bound=0, relative=False
            )
        )

        with self.assertRaisesRegex(ValueError, "No points satisfied"):
            get_best_raw_objective_point(exp, opt_conf)
예제 #12
0
    def testEq(self):
        config1 = OptimizationConfig(
            objective=self.objective, outcome_constraints=self.outcome_constraints
        )
        config2 = OptimizationConfig(
            objective=self.objective, outcome_constraints=self.outcome_constraints
        )
        self.assertEqual(config1, config2)

        new_outcome_constraint = OutcomeConstraint(
            metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.5
        )
        config3 = OptimizationConfig(
            objective=self.objective,
            outcome_constraints=[self.outcome_constraint, new_outcome_constraint],
        )
        self.assertNotEqual(config1, config3)
예제 #13
0
    def testExperimentObjectiveThresholdUpdates(self):
        experiment = get_experiment_with_batch_trial()
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(),
                         len(experiment.metrics))

        # update objective threshold
        # (should perform update in place)
        optimization_config = get_multi_objective_optimization_config()
        objective_threshold = get_objective_threshold()
        optimization_config.objective_thresholds = [objective_threshold]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(), 6)

        # add outcome constraint
        outcome_constraint2 = OutcomeConstraint(metric=Metric(name="outcome"),
                                                op=ComparisonOp.GEQ,
                                                bound=-0.5)
        optimization_config.outcome_constraints = [
            optimization_config.outcome_constraints[0],
            outcome_constraint2,
        ]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(), 7)

        # remove outcome constraint
        # (old one should become tracking metric)
        optimization_config.outcome_constraints = []
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(), 5)

        loaded_experiment = load_experiment(experiment.name)
        self.assertEqual(experiment, loaded_experiment)

        # Optimization config should correctly reload even with no
        # objective_thresholds
        optimization_config.objective_thresholds = []
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(), 4)

        loaded_experiment = load_experiment(experiment.name)
        self.assertEqual(experiment, loaded_experiment)
예제 #14
0
    def testHasGoodOptConfigModelFit(self):
        # Construct diagnostics
        result = []
        for i, obs in enumerate(self.training_data):
            result.append(
                CVResult(observed=obs, predicted=self.observation_data[i]))
        diag = compute_diagnostics(result=result)
        assess_model_fit_result = assess_model_fit(
            diagnostics=diag,
            significance_level=0.05,
        )

        # Test single objective
        optimization_config = OptimizationConfig(objective=Objective(
            metric=Metric("a")))
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)

        # Test multi objective
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=MultiObjective(
                metrics=[Metric("a"), Metric("b")]))
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)

        # Test constraints
        optimization_config = OptimizationConfig(
            objective=Objective(metric=Metric("a")),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric("b"),
                                  op=ComparisonOp.GEQ,
                                  bound=0.1)
            ],
        )
        has_good_fit = has_good_opt_config_model_fit(
            optimization_config=optimization_config,
            assess_model_fit_result=assess_model_fit_result,
        )
        self.assertFalse(has_good_fit)
예제 #15
0
    def testExperimentOutcomeConstraintUpdates(self):
        experiment = get_experiment_with_batch_trial()
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # update outcome constraint
        # (should perform update in place)
        optimization_config = get_optimization_config()
        outcome_constraint = get_outcome_constraint()
        outcome_constraint.bound = -1.0
        optimization_config.outcome_constraints = [outcome_constraint]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # add outcome constraint
        outcome_constraint2 = OutcomeConstraint(
            metric=Metric(name="outcome"), op=ComparisonOp.GEQ, bound=-0.5
        )
        optimization_config.outcome_constraints = [
            outcome_constraint,
            outcome_constraint2,
        ]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # remove outcome constraint
        # (old one should become tracking metric)
        optimization_config.outcome_constraints = [outcome_constraint]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        loaded_experiment = load_experiment(experiment.name)
        self.assertEqual(experiment, loaded_experiment)
예제 #16
0
 def testErrors(self):
     t = Derelativize(search_space=None,
                      observation_features=None,
                      observation_data=None)
     oc = OptimizationConfig(
         objective=Objective(Metric("c")),
         outcome_constraints=[
             OutcomeConstraint(Metric("a"),
                               ComparisonOp.LEQ,
                               bound=2,
                               relative=True)
         ],
     )
     search_space = SearchSpace(
         parameters=[RangeParameter("x", ParameterType.FLOAT, 0, 20)])
     g = ModelBridge(search_space, None, [])
     with self.assertRaises(ValueError):
         t.transform_optimization_config(oc, None, None)
     with self.assertRaises(ValueError):
         t.transform_optimization_config(oc, g, None)
예제 #17
0
 def test_transform_optimization_config_with_non_relative_constraints(self):
     relativize = Relativize(
         search_space=None,
         observation_features=[],
         observation_data=[],
         modelbridge=self.model,
     )
     optimization_config = get_branin_optimization_config()
     optimization_config.outcome_constraints = [
         OutcomeConstraint(
             metric=BraninMetric("b2", ["x2", "x1"]),
             op=ComparisonOp.GEQ,
             bound=-200.0,
             relative=False,
         )
     ]
     with self.assertRaisesRegex(ValueError, "All constraints must be relative"):
         relativize.transform_optimization_config(
             optimization_config=optimization_config,
             modelbridge=None,
             fixed_features=Mock(),
         )
예제 #18
0
 def testRelativeConstraint(self):
     branin_rel = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.LEQ,
                     bound=5.0,
                     relative=True,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         total_iterations=5,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_rel],
     )
     with self.assertRaises(ValueError):
         suite.generate_report()
예제 #19
0
 def testLowerBound(self):
     branin_lb = BenchmarkProblem(
         name="constrained_branin",
         fbest=0.397887,
         optimization_config=OptimizationConfig(
             objective=Objective(
                 metric=BraninMetric(name="branin_objective",
                                     param_names=["x1", "x2"],
                                     noise_sd=5.0),
                 minimize=True,
             ),
             outcome_constraints=[
                 OutcomeConstraint(
                     metric=L2NormMetric(
                         name="branin_constraint",
                         param_names=["x1", "x2"],
                         noise_sd=5.0,
                     ),
                     op=ComparisonOp.GEQ,
                     bound=5.0,
                     relative=False,
                 )
             ],
         ),
         search_space=get_branin_search_space(),
     )
     suite = BOBenchmarkingSuite()
     suite.run(
         num_runs=1,
         batch_size=2,
         total_iterations=4,
         bo_strategies=[
             GenerationStrategy(
                 [GenerationStep(model=Models.SOBOL, num_arms=5)])
         ],
         bo_problems=[branin_lb],
     )
     suite.generate_report(include_individual=True)
예제 #20
0
    def test_best_raw_objective_point_unsatisfiable_relative(self):
        exp = get_branin_experiment()

        # Optimization config with unsatisfiable constraint
        opt_conf = exp.optimization_config.clone()
        opt_conf.outcome_constraints.append(
            OutcomeConstraint(
                metric=get_branin_metric(),
                op=ComparisonOp.GEQ,
                bound=9999,
                relative=True,
            ))

        trial = exp.new_trial(generator_run=GeneratorRun(
            arms=[Arm(parameters={
                "x1": 5.0,
                "x2": 5.0
            })])).run()
        trial.mark_completed()

        with self.assertLogs(logger="ax.service.utils.best_point",
                             level="WARN") as lg:
            get_best_raw_objective_point(exp, opt_conf)
            self.assertTrue(
                any("No status quo provided" in warning
                    for warning in lg.output),
                msg=lg.output,
            )

        exp.status_quo = Arm(parameters={"x1": 0, "x2": 0}, name="status_quo")
        sq_trial = exp.new_trial(generator_run=GeneratorRun(
            arms=[exp.status_quo])).run()
        sq_trial.mark_completed()

        with self.assertRaisesRegex(ValueError, "No points satisfied"):
            get_best_raw_objective_point(exp, opt_conf)
예제 #21
0
 def test_create_experiment(self) -> None:
     """Test basic experiment creation."""
     ax_client = AxClient(
         GenerationStrategy(
             steps=[GenerationStep(model=Models.SOBOL, num_trials=30)]))
     with self.assertRaisesRegex(ValueError,
                                 "Experiment not set on Ax client"):
         ax_client.experiment
     ax_client.create_experiment(
         name="test_experiment",
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [0.001, 0.1],
                 "value_type": "float",
                 "log_scale": True,
             },
             {
                 "name": "y",
                 "type": "choice",
                 "values": [1, 2, 3],
                 "value_type": "int",
                 "is_ordered": True,
             },
             {
                 "name": "x3",
                 "type": "fixed",
                 "value": 2,
                 "value_type": "int"
             },
             {
                 "name": "x4",
                 "type": "range",
                 "bounds": [1.0, 3.0],
                 "value_type": "int",
             },
             {
                 "name": "x5",
                 "type": "choice",
                 "values": ["one", "two", "three"],
                 "value_type": "str",
             },
             {
                 "name": "x6",
                 "type": "range",
                 "bounds": [1.0, 3.0],
                 "value_type": "int",
             },
         ],
         objective_name="test_objective",
         minimize=True,
         outcome_constraints=["some_metric >= 3", "some_metric <= 4.0"],
         parameter_constraints=["x4 <= x6"],
     )
     assert ax_client._experiment is not None
     self.assertEqual(ax_client._experiment, ax_client.experiment)
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x"],
         RangeParameter(
             name="x",
             parameter_type=ParameterType.FLOAT,
             lower=0.001,
             upper=0.1,
             log_scale=True,
         ),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["y"],
         ChoiceParameter(
             name="y",
             parameter_type=ParameterType.INT,
             values=[1, 2, 3],
             is_ordered=True,
         ),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x3"],
         FixedParameter(name="x3",
                        parameter_type=ParameterType.INT,
                        value=2),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x4"],
         RangeParameter(name="x4",
                        parameter_type=ParameterType.INT,
                        lower=1.0,
                        upper=3.0),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x5"],
         ChoiceParameter(
             name="x5",
             parameter_type=ParameterType.STRING,
             values=["one", "two", "three"],
         ),
     )
     self.assertEqual(
         ax_client._experiment.optimization_config.outcome_constraints[0],
         OutcomeConstraint(
             metric=Metric(name="some_metric"),
             op=ComparisonOp.GEQ,
             bound=3.0,
             relative=False,
         ),
     )
     self.assertEqual(
         ax_client._experiment.optimization_config.outcome_constraints[1],
         OutcomeConstraint(
             metric=Metric(name="some_metric"),
             op=ComparisonOp.LEQ,
             bound=4.0,
             relative=False,
         ),
     )
     self.assertTrue(
         ax_client._experiment.optimization_config.objective.minimize)
예제 #22
0
파일: core_stubs.py 프로젝트: tangzhenyu/ax
def get_branin_outcome_constraint() -> OutcomeConstraint:
    return OutcomeConstraint(metric=get_branin_metric(), op=ComparisonOp.LEQ, bound=0)
예제 #23
0
파일: core_stubs.py 프로젝트: tangzhenyu/ax
def get_outcome_constraint() -> OutcomeConstraint:
    return OutcomeConstraint(metric=Metric(name="m2"), op=ComparisonOp.GEQ, bound=-0.25)
예제 #24
0
    def metric_from_sqa(
            self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""

        metric = self.metric_from_sqa_util(metric_sqa)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            if metric_sqa.scalarized_objective_weight is not None:
                raise SQADecodeError(  # pragma: no cover
                    "The metric corresponding to regular objective does not \
                    have weight attribute")
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in MultiObjective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because minimize is None.")
            metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics = [
                self.metric_from_sqa_util(child)
                for child in metrics_sqa_children
            ]

            return MultiObjective(
                metrics=list(metrics),
                # pyre-fixme[6]: Expected `bool` for 2nd param but got `Optional[bool]`.
                minimize=metric_sqa.minimize,
            )
        elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in Scalarized Objective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because minimize is None.")
            metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_objective_weight,
            ) for child in metrics_sqa_children])
            return ScalarizedObjective(
                metrics=list(metrics),
                weights=list(weights),
                # pyre-fixme[6]: Expected `bool` for 3nd param but got `Optional[bool]`.
                minimize=metric_sqa.minimize,
            )
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None.")
            return OutcomeConstraint(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint because "
                    "bound, op, or relative is None.")
            metrics_sqa_children = (
                metric_sqa.scalarized_outcome_constraint_children_metrics)
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_outcome_constraint_weight,
            ) for child in metrics_sqa_children])
            return ScalarizedOutcomeConstraint(
                metrics=list(metrics),
                weights=list(weights),
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )

        elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD:
            if metric_sqa.bound is None or metric_sqa.relative is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to ObjectiveThreshold because "
                    "bound, op, or relative is None.")
            return ObjectiveThreshold(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                relative=metric_sqa.relative,
                op=metric_sqa.op,
            )
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent.")
예제 #25
0
    def testGen(self, mock_init, mock_best_point, mock_gen):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = NumpyModelBridge()
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        ma.transforms = OrderedDict()
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertTrue(
            np.array_equal(
                gen_args["linear_constraints"][0],
                np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
            ))
        self.assertTrue(
            np.array_equal(gen_args["linear_constraints"][1],
                           np.array([[0.0], [-3.5]])))
        self.assertEqual(gen_args["fixed_features"], {2: 3.0})
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][0], np.array([])))
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][1],
                           np.array([[0.6, 1.6, 3.0]])))
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 1.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 3.0,
            "y": 4.0,
            "z": 3.0
        })
        self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))

        # Test with multiple objectives.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="a"), Metric(name="b")], minimize=True))
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=oc2,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[1][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"],
                           np.array([-1.0, -1.0])))

        # Test with MultiObjective (unweighted multiple objectives)
        oc3 = MultiObjectiveOptimizationConfig(objective=MultiObjective(
            metrics=[Metric(name="a"),
                     Metric(name="b", lower_is_better=True)],
            minimize=True,
        ))
        search_space = SearchSpace(self.parameters)  # Unconstrained
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=oc3,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[2][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([1.0,
                                                                    -1.0])))

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(3, search_space, {}, ObservationFeatures({}), None,
                optimization_config)
        gen_args = mock_gen.mock_calls[3][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["linear_constraints"])
        self.assertIsNone(gen_args["fixed_features"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
        optimization_config.objective.minimize = True
        optimization_config.outcome_constraints[0].relative = True
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
예제 #26
0
    def testConstraintValidation(self):
        # Can't constrain on objective metric.
        objective_constraint = OutcomeConstraint(
            metric=self.objective.metric, op=ComparisonOp.GEQ, bound=0
        )
        with self.assertRaises(ValueError):
            OptimizationConfig(
                objective=self.objective, outcome_constraints=[objective_constraint]
            )

        # Two outcome_constraints on the same metric with the same op
        # should raise.
        duplicate_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + 1,
        )
        with self.assertRaises(ValueError):
            OptimizationConfig(
                objective=self.objective,
                outcome_constraints=[self.outcome_constraint, duplicate_constraint],
            )

        # Three outcome_constraints on the same metric should raise.
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound,
        )
        with self.assertRaises(ValueError):
            OptimizationConfig(
                objective=self.objective,
                outcome_constraints=self.outcome_constraints + [opposing_constraint],
            )

        # Two outcome_constraints on the same metric with different ops and
        # flipped bounds (lower < upper) should raise.
        add_bound = 1 if self.outcome_constraint.op == ComparisonOp.LEQ else -1
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + add_bound,
        )
        with self.assertRaises(ValueError):
            OptimizationConfig(
                objective=self.objective,
                outcome_constraints=([self.outcome_constraint, opposing_constraint]),
            )

        # Two outcome_constraints on the same metric with different ops and
        # bounds should not raise.
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + 1,
        )
        config = OptimizationConfig(
            objective=self.objective,
            outcome_constraints=([self.outcome_constraint, opposing_constraint]),
        )
        self.assertEqual(
            config.outcome_constraints, [self.outcome_constraint, opposing_constraint]
        )
    def setUp(self):
        self.df = pd.DataFrame([
            {
                "arm_name": "0_0",
                "mean": 2.0,
                "sem": 0.2,
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_0",
                "mean": 1.8,
                "sem": 0.3,
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_1",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_1",
                "mean": 3.7,
                "sem": 0.5,
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": 0.5,
                "sem": None,
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "c",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
        ])

        self.data = Data(df=self.df)

        self.optimization_config = OptimizationConfig(
            objective=Objective(metric=Metric(name="a")),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric(name="b"),
                                  op=ComparisonOp.GEQ,
                                  bound=0)
            ],
        )
    def testGen(self, mock_init):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = DiscreteModelBridge()
        model = mock.MagicMock(DiscreteModel, autospec=True, instance=True)
        model.gen.return_value = ([[0.0, 2.0, 3.0], [1.0, 1.0,
                                                     3.0]], [1.0, 2.0])
        ma.model = model
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        observation_features, weights, best_observation = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = model.gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["parameter_values"],
                         [[0.0, 1.0], ["foo", "bar"], [True]])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertEqual(gen_args["pending_observations"][0], [])
        self.assertEqual(gen_args["pending_observations"][1],
                         [[0, "foo", True]])
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 0.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 1.0,
            "y": 1.0,
            "z": 3.0
        })
        self.assertEqual(weights, [1.0, 2.0])

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=optimization_config,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options={},
        )
        gen_args = model.gen.mock_calls[1][2]
        self.assertEqual(gen_args["parameter_values"],
                         [[0.0, 1.0], ["foo", "bar"]])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, True)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
                model_gen_options={},
            )
예제 #29
0
파일: decoder.py 프로젝트: Balandat/Ax
    def metric_from_sqa(
            self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""

        metric = self.metric_from_sqa_util(metric_sqa)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            if metric_sqa.scalarized_objective_weight is not None:
                raise SQADecodeError(  # pragma: no cover
                    "The metric corresponding to regular objective does not \
                    have weight attribute")
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in MultiObjective
            try:
                metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            except DetachedInstanceError:
                metrics_sqa_children = _get_scalarized_objective_children_metrics(
                    metric_id=metric_sqa.id, decoder=self)

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            objectives = [
                Objective(
                    metric=self.metric_from_sqa_util(metric_sqa),
                    minimize=metric_sqa.minimize,
                ) for metric_sqa in metrics_sqa_children
            ]

            multi_objective = MultiObjective(objectives=objectives)
            multi_objective.db_id = metric_sqa.id
            return multi_objective
        elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in Scalarized Objective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because minimize is None.")

            try:
                metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            except DetachedInstanceError:
                metrics_sqa_children = _get_scalarized_objective_children_metrics(
                    metric_id=metric_sqa.id, decoder=self)

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_objective_weight,
            ) for child in metrics_sqa_children])
            scalarized_objective = ScalarizedObjective(
                metrics=list(metrics),
                weights=list(weights),
                minimize=not_none(metric_sqa.minimize),
            )
            scalarized_objective.db_id = metric_sqa.id
            return scalarized_objective
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None.")
            return OutcomeConstraint(
                metric=metric,
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint because "
                    "bound, op, or relative is None.")

            try:
                metrics_sqa_children = (
                    metric_sqa.scalarized_outcome_constraint_children_metrics)
            except DetachedInstanceError:
                metrics_sqa_children = (
                    _get_scalarized_outcome_constraint_children_metrics(
                        metric_id=metric_sqa.id, decoder=self))

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_outcome_constraint_weight,
            ) for child in metrics_sqa_children])
            scalarized_outcome_constraint = ScalarizedOutcomeConstraint(
                metrics=list(metrics),
                weights=list(weights),
                bound=not_none(metric_sqa.bound),
                op=not_none(metric_sqa.op),
                relative=not_none(metric_sqa.relative),
            )
            scalarized_outcome_constraint.db_id = metric_sqa.id
            return scalarized_outcome_constraint
        elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD:
            if metric_sqa.bound is None or metric_sqa.relative is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to ObjectiveThreshold because "
                    "bound, op, or relative is None.")
            ot = ObjectiveThreshold(
                metric=metric,
                bound=metric_sqa.bound,
                relative=metric_sqa.relative,
                op=metric_sqa.op,
            )
            # ObjectiveThreshold constructor clones the passed-in metric, which means
            # the db id gets lost and so we need to reset it
            ot.metric._db_id = metric.db_id
            return ot
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent.")
예제 #30
0
    def testDerelativizeTransform(self, mock_predict, mock_fit,
                                  mock_observations_from_data):
        t = Derelativize(search_space=None,
                         observation_features=None,
                         observation_data=None)

        # ModelBridge with in-design status quo
        search_space = SearchSpace(parameters=[
            RangeParameter("x", ParameterType.FLOAT, 0, 20),
            RangeParameter("y", ParameterType.FLOAT, 0, 20),
        ])
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_1",
        )

        # Test with no relative constraints
        objective = Objective(Metric("c"))
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False)
            ],
        )
        oc2 = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc == oc2)

        # Test with relative constraint, in-design status quo
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=4.5, relative=False),
        ])
        obsf = mock_predict.mock_calls[0][1][1][0]
        obsf2 = ObservationFeatures(parameters={"x": 2.0, "y": 10.0})
        self.assertTrue(obsf == obsf2)

        # Test with relative constraint, out-of-design status quo
        mock_predict.side_effect = Exception()
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_2",
        )
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=3.6, relative=False),
        ])
        self.assertEqual(mock_predict.call_count, 2)

        # Raises error if predict fails with in-design status quo
        g = ModelBridge(search_space, None, [], status_quo_name="1_1")
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        with self.assertRaises(Exception):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no status quo
        exp = Experiment(search_space, "name")
        g = ModelBridge(search_space, None, [], exp)
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no modelbridge
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, None, None)