Beispiel #1
0
def get_augmented_hartmann_objective() -> Objective:
    return Objective(metric=get_augmented_hartmann_metric(), minimize=False)
Beispiel #2
0
    def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [t1, t2]
        exp = get_experiment()
        modelbridge = ModelBridge(search_space_for_value(), 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(fit_args["search_space"] == search_space_for_value(8.0))
        self.assertTrue(
            fit_args["observation_features"]
            == [observation1trans().features, observation2trans().features]
        )
        self.assertTrue(
            fit_args["observation_data"]
            == [observation1trans().data, observation2trans().data]
        )
        self.assertTrue(mock_observations_from_data.called)

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[observation2trans().data],
        )

        modelbridge.predict([observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([observation1trans().features], [2], None),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric")))
        modelbridge._set_kwargs_to_save(
            model_key="TestModel", model_kwargs={}, bridge_kwargs={}
        )
        gr = modelbridge.gen(
            n=1,
            search_space=search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={}, observation_features=[observation1().features]
        )

        # Gen with no pending observations and no fixed features
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=None
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(
            objective=ScalarizedObjective(
                metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")]
            )
        )
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=oc2
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[observation1trans().data],
        )
        cv_training_data = [observation2()]
        cv_test_points = [observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points
        )
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[observation2trans().features],
            obs_data=[observation2trans().data],
            cv_test_points=[observation1().features],  # untransformed after
        )
        self.assertTrue(cv_predictions == [observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [observation1(), observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(modelbridge.model_space == search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [True, True])

        modelbridge.training_in_design = [True, False]
        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        ood_obs = modelbridge.out_of_design_data()
        self.assertTrue(ood_obs == unwrap_observation_data([observation2().data]))
    def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [transform_1, transform_2]
        exp = get_experiment_for_value()
        ss = get_search_space_for_value()
        modelbridge = ModelBridge(
            search_space=ss,
            model=Model(),
            transforms=transforms,
            experiment=exp,
            data=0,
        )
        self.assertFalse(
            modelbridge._experiment_has_immutable_search_space_and_opt_config
        )
        self.assertEqual(
            list(modelbridge.transforms.keys()), ["Cast", "transform_1", "transform_2"]
        )
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(fit_args["search_space"] == get_search_space_for_value(8.0))
        self.assertTrue(fit_args["observation_features"] == [])
        self.assertTrue(fit_args["observation_data"] == [])
        self.assertTrue(mock_observations_from_data.called)

        # Test prediction on out of design features.
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            side_effect=ValueError("Out of Design"),
        )
        # This point is in design, and thus failures in predict are legitimate.
        with mock.patch.object(
            ModelBridge, "model_space", return_value=get_search_space_for_range_values
        ):
            with self.assertRaises(ValueError):
                modelbridge.predict([get_observation2().features])

        # This point is out of design, and not in training data.
        with self.assertRaises(ValueError):
            modelbridge.predict([get_observation_status_quo0().features])

        # Now it's in the training data.
        with mock.patch.object(
            ModelBridge,
            "get_training_data",
            return_value=[get_observation_status_quo0()],
        ):
            # Return raw training value.
            self.assertEqual(
                modelbridge.predict([get_observation_status_quo0().features]),
                unwrap_observation_data([get_observation_status_quo0().data]),
            )

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[get_observation2trans().data],
        )
        modelbridge.predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Check that _single_predict is equivalent here.
        modelbridge._single_predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([get_observation1trans().features], [2], None, {}),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric")))
        modelbridge._set_kwargs_to_save(
            model_key="TestModel", model_kwargs={}, bridge_kwargs={}
        )
        gr = modelbridge.gen(
            n=1,
            search_space=get_search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [get_observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [get_observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={}, observation_features=[get_observation1().features]
        )

        # Gen with no pending observations and no fixed features
        modelbridge.gen(
            n=1, search_space=get_search_space_for_value(), optimization_config=None
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(
            objective=ScalarizedObjective(
                metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")]
            )
        )
        modelbridge.gen(
            n=1, search_space=get_search_space_for_value(), optimization_config=oc2
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[get_observation1trans().data],
        )
        cv_training_data = [get_observation2()]
        cv_test_points = [get_observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points
        )
        modelbridge._cross_validate.assert_called_with(
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            obs_feats=[get_observation2trans().features],
            obs_data=[get_observation2trans().data],
            cv_test_points=[get_observation1().features],  # untransformed after
        )
        self.assertTrue(cv_predictions == [get_observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [get_observation1(), get_observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(modelbridge.model_space == get_search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [False, False])

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        # Test feature_importances
        with self.assertRaises(NotImplementedError):
            modelbridge.feature_importances("a")

        # Test transform observation features
        with mock.patch(
            "ax.modelbridge.base.ModelBridge._transform_observation_features",
            autospec=True,
        ) as mock_tr:
            modelbridge.transform_observation_features([get_observation2().features])
        mock_tr.assert_called_with(modelbridge, [get_observation2trans().features])
Beispiel #4
0
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective_m1 = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[])
     tf = PowerTransformY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # Output constraint on a different metric should not transform the bound
     m2 = Metric(name="m2")
     for bound in [-1.234, 0, 2.345]:
         oc = OptimizationConfig(
             objective=objective_m1,
             outcome_constraints=get_constraint(
                 metric=m2, bound=bound, relative=False
             ),
         )
         oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
         self.assertEqual(oc_tf, oc)
     # Output constraint on the same metric should transform the bound
     objective_m2 = Objective(metric=m2, minimize=False)
     for bound in [-1.234, 0, 2.345]:
         oc = OptimizationConfig(
             objective=objective_m2,
             outcome_constraints=get_constraint(
                 metric=m1, bound=bound, relative=False
             ),
         )
         oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
         oc_true = deepcopy(oc)
         tf_bound = (
             tf.power_transforms["m1"].transform(np.array(bound, ndmin=2)).item()
         )
         oc_true.outcome_constraints[0].bound = tf_bound
         self.assertEqual(oc_tf, oc_true)
     # Relative constraints aren't supported
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=get_constraint(metric=m1, bound=2.345, relative=True),
     )
     with self.assertRaisesRegex(
         ValueError,
         "PowerTransformY cannot be applied to metric m1 since it is "
         "subject to a relative constraint.",
     ):
         tf.transform_optimization_config(oc, None, None)
     # Support for scalarized outcome constraints isn't implemented
     m3 = Metric(name="m3")
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=[
             ScalarizedOutcomeConstraint(
                 metrics=[m1, m3], op=ComparisonOp.GEQ, bound=2.345, relative=False
             )
         ],
     )
     with self.assertRaises(NotImplementedError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "PowerTransformY cannot be used for metric(s) {'m1'} "
         "that are part of a ScalarizedOutcomeConstraint.",
         str(cm.exception),
     )
Beispiel #5
0
        search_space: search space, on which this problem is defined
    """

    name: str
    fbest: float
    optimization_config: OptimizationConfig
    search_space: SearchSpace


# Branin problems
branin = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmin,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=BraninMetric(
            name="branin_objective", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=True,
    )),
    search_space=get_branin_search_space(),
)

branin_max = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmax,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=NegativeBraninMetric(
            name="neg_branin", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=False,
    )),
    search_space=get_branin_search_space(),
)
Beispiel #6
0
 def setUp(self):
     self.metrics = {"m1": Metric(name="m1"), "m2": Metric(name="m2")}
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.multi_objective = ScalarizedObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]]
     )
Beispiel #7
0
def get_multi_objective() -> Objective:
    return MultiObjective(objectives=[
        Objective(metric=Metric(name="m1")),
        Objective(metric=Metric(name="m3", lower_is_better=True),
                  minimize=True),
    ], )
Beispiel #8
0
    def test_extract_objective_thresholds(self):
        outcomes = ["m1", "m2", "m3", "m4"]
        objective = MultiObjective(objectives=[
            Objective(metric=Metric(name)) for name in outcomes[:3]
        ])
        objective_thresholds = [
            ObjectiveThreshold(
                metric=Metric(name),
                op=ComparisonOp.LEQ,
                bound=float(i + 2),
                relative=False,
            ) for i, name in enumerate(outcomes[:3])
        ]

        # None of no thresholds
        self.assertIsNone(
            extract_objective_thresholds(objective_thresholds=[],
                                         objective=objective,
                                         outcomes=outcomes))

        # Working case
        obj_t = extract_objective_thresholds(
            objective_thresholds=objective_thresholds,
            objective=objective,
            outcomes=outcomes,
        )
        expected_obj_t_not_nan = np.array([2.0, 3.0, 4.0])
        self.assertTrue(np.array_equal(obj_t[:3], expected_obj_t_not_nan[:3]))
        self.assertTrue(np.isnan(obj_t[-1]))
        self.assertEqual(obj_t.shape[0], 4)

        # Fails if threshold not provided for all objective metrics
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds[:2],
                objective=objective,
                outcomes=outcomes,
            )

        # Fails if number of thresholds doesn't equal number of objectives
        objective2 = Objective(Metric("m1"))
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds,
                objective=objective2,
                outcomes=outcomes,
            )

        # Works with a single objective, single threshold
        obj_t = extract_objective_thresholds(
            objective_thresholds=objective_thresholds[:1],
            objective=objective2,
            outcomes=outcomes,
        )
        self.assertEqual(obj_t[0], 2.0)
        self.assertTrue(np.all(np.isnan(obj_t[1:])))
        self.assertEqual(obj_t.shape[0], 4)

        # Fails if relative
        objective_thresholds[2] = ObjectiveThreshold(metric=Metric("m3"),
                                                     op=ComparisonOp.LEQ,
                                                     bound=3)
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds,
                objective=objective,
                outcomes=outcomes,
            )
        objective_thresholds[2] = ObjectiveThreshold(metric=Metric("m3"),
                                                     op=ComparisonOp.LEQ,
                                                     bound=3,
                                                     relative=True)
        with self.assertRaises(ValueError):
            extract_objective_thresholds(
                objective_thresholds=objective_thresholds,
                objective=objective,
                outcomes=outcomes,
            )
Beispiel #9
0
    def test_REMBOStrategy(self, mock_fit_gpytorch_model, mock_optimize_acqf):
        # Construct a high-D test experiment with multiple metrics
        hartmann_search_space = SearchSpace(
            parameters=[
                RangeParameter(
                    name=f"x{i}",
                    parameter_type=ParameterType.FLOAT,
                    lower=0.0,
                    upper=1.0,
                )
                for i in range(20)
            ]
        )

        exp = Experiment(
            name="test",
            search_space=hartmann_search_space,
            optimization_config=OptimizationConfig(
                objective=Objective(
                    metric=Hartmann6Metric(
                        name="hartmann6", param_names=[f"x{i}" for i in range(6)]
                    ),
                    minimize=True,
                ),
                outcome_constraints=[
                    OutcomeConstraint(
                        metric=L2NormMetric(
                            name="l2norm",
                            param_names=[f"x{i}" for i in range(6)],
                            noise_sd=0.2,
                        ),
                        op=ComparisonOp.LEQ,
                        bound=1.25,
                        relative=False,
                    )
                ],
            ),
            runner=SyntheticRunner(),
        )

        # Instantiate the strategy
        gs = REMBOStrategy(D=20, d=6, k=4, init_per_proj=4)

        # Check that arms and data are correctly segmented by projection
        exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run()
        self.assertEqual(len(gs.arms_by_proj[0]), 2)
        self.assertEqual(len(gs.arms_by_proj[1]), 0)

        exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run()

        self.assertEqual(len(gs.arms_by_proj[0]), 2)
        self.assertEqual(len(gs.arms_by_proj[1]), 2)

        # Iterate until the first projection fits a GP
        for _ in range(4):
            exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run()
            mock_fit_gpytorch_model.assert_not_called()

        self.assertEqual(len(gs.arms_by_proj[0]), 4)
        self.assertEqual(len(gs.arms_by_proj[1]), 4)
        self.assertEqual(len(gs.arms_by_proj[2]), 2)
        self.assertEqual(len(gs.arms_by_proj[3]), 2)

        # Keep iterating until GP is used for gen
        for i in range(4):
            # First two trials will go towards 3rd and 4th proj. getting enough
            if i < 1:  # data for GP.
                self.assertLess(len(gs.arms_by_proj[2]), 4)
            if i < 2:
                self.assertLess(len(gs.arms_by_proj[3]), 4)

            exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run()
            if i < 2:
                mock_fit_gpytorch_model.assert_not_called()
            else:
                # After all proj. have > 4 arms' worth of data, GP can be fit.
                self.assertFalse(any(len(x) < 4 for x in gs.arms_by_proj.values()))
                mock_fit_gpytorch_model.assert_called()

        self.assertTrue(len(gs.model_transitions) > 0)
        gs2 = gs.clone_reset()
        self.assertEqual(gs2.D, 20)
        self.assertEqual(gs2.d, 6)
    def testDerelativizeTransform(self, mock_predict, mock_fit,
                                  mock_observations_from_data):
        t = Derelativize(search_space=None,
                         observation_features=None,
                         observation_data=None)

        # ModelBridge with in-design status quo
        search_space = SearchSpace(parameters=[
            RangeParameter("x", ParameterType.FLOAT, 0, 20),
            RangeParameter("y", ParameterType.FLOAT, 0, 20),
        ])
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_1",
        )

        # Test with no relative constraints
        objective = Objective(Metric("c"))
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                ScalarizedOutcomeConstraint(
                    metrics=[Metric("a"), Metric("b")],
                    op=ComparisonOp.LEQ,
                    bound=2,
                    weights=[0.5, 0.5],
                    relative=False,
                ),
            ],
        )
        oc2 = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc == oc2)

        # Test with relative constraint, in-design status quo
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
                ScalarizedOutcomeConstraint(
                    metrics=[Metric("a"), Metric("b")],
                    weights=[0.0, 1.0],
                    op=ComparisonOp.LEQ,
                    bound=-10,
                    relative=True,
                ),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=4.5, relative=False),
            ScalarizedOutcomeConstraint(
                metrics=[Metric("a"), Metric("b")],
                weights=[0.0, 1.0],
                op=ComparisonOp.LEQ,
                bound=4.5,
                relative=False,
            ),
        ])
        obsf = mock_predict.mock_calls[0][1][1][0]
        obsf2 = ObservationFeatures(parameters={"x": 2.0, "y": 10.0})
        self.assertTrue(obsf == obsf2)

        # Test with relative constraint, out-of-design status quo
        mock_predict.side_effect = RuntimeError()
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_2",
        )
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
                ScalarizedOutcomeConstraint(
                    metrics=[Metric("a"), Metric("b")],
                    weights=[0.0, 1.0],
                    op=ComparisonOp.LEQ,
                    bound=-10,
                    relative=True,
                ),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=3.6, relative=False),
            ScalarizedOutcomeConstraint(
                metrics=[Metric("a"), Metric("b")],
                weights=[0.0, 1.0],
                op=ComparisonOp.LEQ,
                bound=3.6,
                relative=False,
            ),
        ])
        self.assertEqual(mock_predict.call_count, 2)

        # Raises error if predict fails with in-design status quo
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_1",
        )
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        with self.assertRaises(RuntimeError):
            oc = t.transform_optimization_config(oc, g, None)

        # Bypasses error if use_raw_sq
        t2 = Derelativize(
            search_space=None,
            observation_features=None,
            observation_data=None,
            config={"use_raw_status_quo": True},
        )
        oc2 = t2.transform_optimization_config(deepcopy(oc), g, None)

        # Raises error with relative constraint, no status quo
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
        )
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no modelbridge
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, None, None)
Beispiel #11
0
    def testGen(self, mock_init):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = DiscreteModelBridge()
        model = mock.MagicMock(DiscreteModel, autospec=True, instance=True)
        model.gen.return_value = ([[0.0, 2.0, 3.0], [1.0, 1.0,
                                                     3.0]], [1.0, 2.0], {})
        ma.model = model
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        observation_features, weights, best_observation, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = model.gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["parameter_values"],
                         [[0.0, 1.0], ["foo", "bar"], [True]])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertEqual(gen_args["pending_observations"][0], [])
        self.assertEqual(gen_args["pending_observations"][1],
                         [[0, "foo", True]])
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 0.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 1.0,
            "y": 1.0,
            "z": 3.0
        })
        self.assertEqual(weights, [1.0, 2.0])

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=optimization_config,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options={},
        )
        gen_args = model.gen.mock_calls[1][2]
        self.assertEqual(gen_args["parameter_values"],
                         [[0.0, 1.0], ["foo", "bar"]])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, True)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
                model_gen_options={},
            )
Beispiel #12
0
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective_m1 = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[])
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # output constraint on a different metric should work
     m2 = Metric(name="m2")
     oc = OptimizationConfig(
         objective=objective_m1,
         outcome_constraints=self.get_constraint(
             metric=m2, bound=-1, relative=False
         ),
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # output constraint with a negative bound should fail
     objective_m2 = Objective(metric=m2, minimize=False)
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=-1.234, relative=False
         ),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since the "
         "bound isn't positive, got: -1.234.",
         str(cm.exception),
     )
     # output constraint with a zero bound should also fail
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(metric=m1, bound=0, relative=False),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since the "
         "bound isn't positive, got: 0.",
         str(cm.exception),
     )
     # output constraint with a positive bound should work
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=2.345, relative=False
         ),
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     oc.outcome_constraints[0].bound = math.log(2.345)
     self.assertEqual(oc_tf, oc)
     # output constraint with a relative bound should fail
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=2.345, relative=True
         ),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since it is "
         "subject to a relative constraint.",
         str(cm.exception),
     )
Beispiel #13
0
    def test_best_point(
        self,
        _mock_gen,
        _mock_best_point,
        _mock_fit,
        _mock_predict,
        _mock_gen_arms,
        _mock_unwrap,
        _mock_obs_from_data,
    ):
        exp = Experiment(search_space=get_search_space_for_range_value(), name="test")
        oc = OptimizationConfig(
            objective=Objective(metric=Metric("a"), minimize=False),
            outcome_constraints=[],
        )
        modelbridge = ArrayModelBridge(
            search_space=get_search_space_for_range_value(),
            model=NumpyModel(),
            transforms=[t1, t2],
            experiment=exp,
            data=Data(),
            optimization_config=oc,
        )

        self.assertEqual(list(modelbridge.transforms.keys()), ["Cast", "t1", "t2"])

        # test check that optimization config is required
        with self.assertRaises(ValueError):
            run = modelbridge.gen(n=1, optimization_config=None)

        # _fit is mocked, which typically sets this.
        modelbridge.outcomes = ["a"]
        run = modelbridge.gen(
            n=1,
            optimization_config=oc,
        )

        arm, predictions = run.best_arm_predictions
        self.assertEqual(arm.parameters, {})
        self.assertEqual(predictions[0], {"m": 1.0})
        self.assertEqual(predictions[1], {"m": {"m": 2.0}})

        model_arm, model_predictions = modelbridge.model_best_point()
        self.assertEqual(model_predictions[0], {"m": 1.0})
        self.assertEqual(model_predictions[1], {"m": {"m": 2.0}})

        # test optimization config validation - raise error when
        # ScalarizedOutcomeConstraint contains a metric that is not in the outcomes
        with self.assertRaises(ValueError):
            run = modelbridge.gen(
                n=1,
                optimization_config=OptimizationConfig(
                    objective=Objective(metric=Metric("a"), minimize=False),
                    outcome_constraints=[
                        ScalarizedOutcomeConstraint(
                            metrics=[Metric("wrong_metric_name")],
                            weights=[1.0],
                            op=ComparisonOp.LEQ,
                            bound=0,
                        )
                    ],
                ),
            )
Beispiel #14
0
def get_best_trial(
    exp: Experiment,
    additional_metrics: Optional[List[Metric]] = None,
    run_metadata_fields: Optional[List[str]] = None,
    true_objective_metric_name: Optional[str] = None,
    true_objective_minimize: Optional[bool] = None,
    **kwargs: Any,
) -> Optional[pd.DataFrame]:
    """Finds the optimal trial given an experiment, based on raw objective value.

    Returns a 1-row dataframe. Should match the row of ``exp_to_df`` with the best
    raw objective value, given the same arguments.

    Args:
        exp: An Experiment that may have pending trials.
        additional_metrics: List of metrics to return in addition to the objective
            metric. Return all metrics if None.
        run_metadata_fields: fields to extract from trial.run_metadata for trial
            in experiment.trials. If there are multiple arms per trial, these
            fields will be replicated across the arms of a trial.
        true_objective_metric_name: objective by which to choose best point (if
            the objective attached to the experiment is being used as a proxy).
        true_objective_minimize: whether the true objective should be minimized
            instead of maximized. If not present will default to the experiment's
            objective's direction.
        **kwargs: Custom named arguments, useful for passing complex
            objects from call-site to the `fetch_data` callback.

    Returns:
        DataFrame: A dataframe of inputs and metrics of the optimal trial.
    """
    objective = (Objective(
        metric=exp.metrics[true_objective_metric_name],
        minimize=true_objective_minimize if true_objective_minimize is not None
        else not_none(exp.optimization_config).objective.minimize,
    ) if true_objective_metric_name is not None else not_none(
        exp.optimization_config).objective)
    if isinstance(objective, MultiObjective):
        logger.warning(
            "No best trial is available for `MultiObjective` optimization. "
            "Returning None for best trial.")
        return None
    if isinstance(objective, ScalarizedObjective):
        logger.warning(
            "No best trial is available for `ScalarizedObjective` optimization. "
            "Returning None for best trial.")
        return None
    if (additional_metrics is not None) and (objective.metric
                                             not in additional_metrics):
        additional_metrics.append(objective.metric)
    trials_df = exp_to_df(
        exp=exp,
        metrics=additional_metrics,
        run_metadata_fields=run_metadata_fields,
        **kwargs,
    )
    if len(trials_df.index) == 0:
        logger.warning(
            "`exp_to_df` returned 0 trials. Returning None for best trial.")
        return None

    metric_name = objective.metric.name
    minimize = objective.minimize
    if metric_name not in trials_df.columns:
        logger.warning(
            f"`exp_to_df` did not have data for metric {metric_name}. "
            "Returning None for best trial.")
        return None

    metric_optimum = (trials_df[metric_name].min()
                      if minimize else trials_df[metric_name].max())
    return pd.DataFrame(
        trials_df[trials_df[metric_name] == metric_optimum].head(1))
Beispiel #15
0
    def setUp(self):
        self.df = pd.DataFrame([
            {
                "arm_name": "0_0",
                "mean": 2.0,
                "sem": 0.2,
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_0",
                "mean": 1.8,
                "sem": 0.3,
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_1",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_1",
                "mean": 3.7,
                "sem": 0.5,
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": 0.5,
                "sem": None,
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "c",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
        ])

        self.data = Data(df=self.df)

        self.optimization_config = OptimizationConfig(
            objective=Objective(metric=Metric(name="a")),
            outcome_constraints=[
                OutcomeConstraint(
                    metric=Metric(name="b"),
                    op=ComparisonOp.GEQ,
                    bound=0,
                    relative=False,
                )
            ],
        )
Beispiel #16
0
def get_optimization_config_no_constraints() -> OptimizationConfig:
    return OptimizationConfig(objective=Objective(
        metric=Metric("test_metric")))
Beispiel #17
0
    def metric_from_sqa(
            self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""

        metric = self.metric_from_sqa_util(metric_sqa)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            if metric_sqa.scalarized_objective_weight is not None:
                raise SQADecodeError(  # pragma: no cover
                    "The metric corresponding to regular objective does not \
                    have weight attribute")
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in MultiObjective
            try:
                metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            except DetachedInstanceError:
                metrics_sqa_children = _get_scalarized_objective_children_metrics(
                    metric_id=metric_sqa.id, decoder=self)

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            objectives = [
                Objective(
                    metric=self.metric_from_sqa_util(metric_sqa),
                    minimize=metric_sqa.minimize,
                ) for metric_sqa in metrics_sqa_children
            ]

            multi_objective = MultiObjective(objectives=objectives)
            multi_objective.db_id = metric_sqa.id
            return multi_objective
        elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in Scalarized Objective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because minimize is None.")

            try:
                metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            except DetachedInstanceError:
                metrics_sqa_children = _get_scalarized_objective_children_metrics(
                    metric_id=metric_sqa.id, decoder=self)

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_objective_weight,
            ) for child in metrics_sqa_children])
            scalarized_objective = ScalarizedObjective(
                metrics=list(metrics),
                weights=list(weights),
                minimize=not_none(metric_sqa.minimize),
            )
            scalarized_objective.db_id = metric_sqa.id
            return scalarized_objective
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None.")
            return OutcomeConstraint(
                metric=metric,
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint because "
                    "bound, op, or relative is None.")

            try:
                metrics_sqa_children = (
                    metric_sqa.scalarized_outcome_constraint_children_metrics)
            except DetachedInstanceError:
                metrics_sqa_children = (
                    _get_scalarized_outcome_constraint_children_metrics(
                        metric_id=metric_sqa.id, decoder=self))

            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized OutcomeConstraint \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_outcome_constraint_weight,
            ) for child in metrics_sqa_children])
            scalarized_outcome_constraint = ScalarizedOutcomeConstraint(
                metrics=list(metrics),
                weights=list(weights),
                bound=not_none(metric_sqa.bound),
                op=not_none(metric_sqa.op),
                relative=not_none(metric_sqa.relative),
            )
            scalarized_outcome_constraint.db_id = metric_sqa.id
            return scalarized_outcome_constraint
        elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD:
            if metric_sqa.bound is None or metric_sqa.relative is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to ObjectiveThreshold because "
                    "bound, op, or relative is None.")
            ot = ObjectiveThreshold(
                metric=metric,
                bound=metric_sqa.bound,
                relative=metric_sqa.relative,
                op=metric_sqa.op,
            )
            # ObjectiveThreshold constructor clones the passed-in metric, which means
            # the db id gets lost and so we need to reset it
            ot.metric._db_id = metric.db_id
            return ot
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent.")
Beispiel #18
0
def get_branin_objective() -> Objective:
    return Objective(metric=get_branin_metric(), minimize=False)
Beispiel #19
0
    def testObservedParetoFrontiers(self):
        experiment = get_branin_experiment_with_multi_objective(
            with_batch=True,
            has_optimization_config=False,
            with_status_quo=True)

        # Optimization config is not optional
        with self.assertRaises(ValueError):
            get_observed_pareto_frontiers(experiment=experiment, data=Data())

        objectives = [
            Objective(
                metric=BraninMetric(name="m1",
                                    param_names=["x1", "x2"],
                                    lower_is_better=True),
                minimize=True,
            ),
            Objective(
                metric=NegativeBraninMetric(name="m2",
                                            param_names=["x1", "x2"],
                                            lower_is_better=True),
                minimize=True,
            ),
            Objective(
                metric=BraninMetric(name="m3",
                                    param_names=["x1", "x2"],
                                    lower_is_better=True),
                minimize=True,
            ),
        ]
        bounds = [0, -100, 0]
        objective_thresholds = [
            ObjectiveThreshold(
                metric=objective.metric,
                bound=bounds[i],
                relative=True,
                op=ComparisonOp.LEQ,
            ) for i, objective in enumerate(objectives)
        ]
        objective = MultiObjective(objectives=objectives)
        optimization_config = MultiObjectiveOptimizationConfig(
            objective=objective,
            objective_thresholds=objective_thresholds,
        )
        experiment.optimization_config = optimization_config
        experiment.trials[0].run()

        # For the check below, compute which arms are better than SQ
        df = experiment.fetch_data().df
        df["sem"] = np.nan
        data = Data(df)
        sq_val = df[(df["arm_name"] == "status_quo")
                    & (df["metric_name"] == "m1")]["mean"].values[0]
        pareto_arms = sorted(
            df[(df["mean"] <= sq_val)
               & (df["metric_name"] == "m1")]["arm_name"].unique().tolist())

        pfrs = get_observed_pareto_frontiers(experiment=experiment, data=data)
        # We have all pairs of metrics
        self.assertEqual(len(pfrs), 3)
        true_pairs = [("m1", "m2"), ("m1", "m3"), ("m2", "m3")]
        for i, pfr in enumerate(pfrs):
            self.assertEqual(pfr.primary_metric, true_pairs[i][0])
            self.assertEqual(pfr.secondary_metric, true_pairs[i][1])
            self.assertEqual(pfr.absolute_metrics, [])
            self.assertEqual(list(pfr.means.keys()), ["m1", "m2", "m3"])
            self.assertEqual(len(pfr.means["m1"]), len(pareto_arms))
            self.assertTrue(np.isnan(pfr.sems["m1"]).all())
            self.assertEqual(len(pfr.arm_names), len(pareto_arms))
            arm_idx = np.argsort(pfr.arm_names)
            for i, idx in enumerate(arm_idx):
                name = pareto_arms[i]
                self.assertEqual(pfr.arm_names[idx], name)
                self.assertEqual(pfr.param_dicts[idx],
                                 experiment.arms_by_name[name].parameters)
Beispiel #20
0
    def metric_from_sqa(
            self, metric_sqa: SQAMetric
    ) -> Union[Metric, Objective, OutcomeConstraint]:
        """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint."""

        metric = self.metric_from_sqa_util(metric_sqa)

        if metric_sqa.intent == MetricIntent.TRACKING:
            return metric
        elif metric_sqa.intent == MetricIntent.OBJECTIVE:
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Objective because minimize is None."
                )
            if metric_sqa.scalarized_objective_weight is not None:
                raise SQADecodeError(  # pragma: no cover
                    "The metric corresponding to regular objective does not \
                    have weight attribute")
            return Objective(metric=metric, minimize=metric_sqa.minimize)
        elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in MultiObjective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because minimize is None.")
            metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to MultiObjective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics = [
                self.metric_from_sqa_util(child)
                for child in metrics_sqa_children
            ]

            return MultiObjective(
                metrics=list(metrics),
                # pyre-fixme[6]: Expected `bool` for 2nd param but got `Optional[bool]`.
                minimize=metric_sqa.minimize,
            )
        elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE
              ):  # metric_sqa is a parent whose children are individual
            # metrics in Scalarized Objective
            if metric_sqa.minimize is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because minimize is None.")
            metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics
            if metrics_sqa_children is None:
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to Scalarized Objective \
                    because the parent metric has no children metrics.")

            # Extracting metric and weight for each child
            metrics, weights = zip(*[(
                self.metric_from_sqa_util(child),
                child.scalarized_objective_weight,
            ) for child in metrics_sqa_children])
            return ScalarizedObjective(
                metrics=list(metrics),
                weights=list(weights),
                # pyre-fixme[6]: Expected `bool` for 3nd param but got `Optional[bool]`.
                minimize=metric_sqa.minimize,
            )
        elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT:
            if (metric_sqa.bound is None or metric_sqa.op is None
                    or metric_sqa.relative is None):
                raise SQADecodeError(  # pragma: no cover
                    "Cannot decode SQAMetric to OutcomeConstraint because "
                    "bound, op, or relative is None.")
            return OutcomeConstraint(
                metric=metric,
                # pyre-fixme[6]: Expected `float` for 2nd param but got
                #  `Optional[float]`.
                bound=metric_sqa.bound,
                op=metric_sqa.op,
                relative=metric_sqa.relative,
            )
        else:
            raise SQADecodeError(
                f"Cannot decode SQAMetric because {metric_sqa.intent} "
                f"is an invalid intent.")
Beispiel #21
0
### Hartmann6 problem, D=100 and D=1000

# Relevant parameters were chosen randomly using
# x = np.arange(100)
# np.random.seed(10)
# np.random.shuffle(x)
# print(x[:6])  # [19 14 43 37 66  3]

hartmann6_100 = BenchmarkProblem(
    name="Hartmann6, D=100",
    optimal_value=-3.32237,
    optimization_config=OptimizationConfig(
        objective=Objective(
            metric=Hartmann6Metric(
                name="objective",
                param_names=["x19", "x14", "x43", "x37", "x66", "x3"],
                noise_sd=0.0,
            ),
            minimize=True,
        )
    ),
    search_space=SearchSpace(
        parameters=[
            RangeParameter(
                name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0
            )
            for i in range(100)
        ]
    ),
)

            
Beispiel #22
0
    def testGen(self, mock_init, mock_best_point, mock_gen):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = NumpyModelBridge()
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        ma.transforms = OrderedDict()
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertTrue(
            np.array_equal(
                gen_args["linear_constraints"][0],
                np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
            ))
        self.assertTrue(
            np.array_equal(gen_args["linear_constraints"][1],
                           np.array([[0.0], [-3.5]])))
        self.assertEqual(gen_args["fixed_features"], {2: 3.0})
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][0], np.array([])))
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][1],
                           np.array([[0.6, 1.6, 3.0]])))
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 1.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 3.0,
            "y": 4.0,
            "z": 3.0
        })
        self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))

        # Test with multiple objectives.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="a"), Metric(name="b")], minimize=True))
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=oc2,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[1][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"],
                           np.array([-1.0, -1.0])))

        # Test with MultiObjective (unweighted multiple objectives)
        oc3 = MultiObjectiveOptimizationConfig(objective=MultiObjective(
            metrics=[Metric(name="a"),
                     Metric(name="b", lower_is_better=True)],
            minimize=True,
        ))
        search_space = SearchSpace(self.parameters)  # Unconstrained
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=oc3,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[2][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([1.0,
                                                                    -1.0])))

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(3, search_space, {}, ObservationFeatures({}), None,
                optimization_config)
        gen_args = mock_gen.mock_calls[3][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["linear_constraints"])
        self.assertIsNone(gen_args["fixed_features"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
        optimization_config.objective.minimize = True
        optimization_config.outcome_constraints[0].relative = True
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
Beispiel #23
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
    tracking_metric_names: Optional[List[str]] = None,
    # Single-objective optimization arguments:
    objective_name: Optional[str] = None,
    minimize: bool = False,
    # Multi-objective optimization arguments:
    objectives: Optional[Dict[str, str]] = None,
    objective_thresholds: Optional[List[str]] = None,
    support_intermediate_data: Optional[bool] = False,
) -> Experiment:
    """Instantiation wrapper that allows for Ax `Experiment` creation
    without importing or instantiating any Ax classes.

    Args:
        parameters: List of dictionaries representing parameters in the
            experiment search space.
            Required elements in the dictionaries are:
            1. "name" (name of parameter, string),
            2. "type" (type of parameter: "range", "fixed", or "choice", string),
            and one of the following:
            3a. "bounds" for range parameters (list of two values, lower bound
            first),
            3b. "values" for choice parameters (list of values), or
            3c. "value" for fixed parameters (single value).
            Optional elements are:
            1. "log_scale" (for float-valued range parameters, bool),
            2. "value_type" (to specify type that values of this parameter should
            take; expects "float", "int", "bool" or "str"),
            3. "is_fidelity" (bool) and "target_value" (float) for fidelity
            parameters,
            4. "is_ordered" (bool) for choice parameters,
            5. "is_task" (bool) for task parameters, and
            6. "digits" (int) for float-valued range parameters.
        name: Name of the experiment to be created.
        parameter_constraints: List of string representation of parameter
            constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
            the latter constraints, any number of arguments is accepted, and
            acceptable operators are "<=" and ">=".
        parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
                the latter constraints, any number of arguments is accepted, and
                acceptable operators are "<=" and ">=".
        outcome_constraints: List of string representation of outcome
            constraints of form "metric_name >= bound", like "m1 <= 3."
        status_quo: Parameterization of the current state of the system.
            If set, this will be added to each trial to be evaluated alongside
            test configurations.
        experiment_type: String indicating type of the experiment (e.g. name of
            a product in which it is used), if any.
        tracking_metric_names: Names of additional tracking metrics not used for
            optimization.
        objective_name: Name of the metric used as objective in this experiment,
            if experiment is single-objective optimization.
        minimize: Whether this experiment represents a minimization problem, if
            experiment is a single-objective optimization.
        objectives: Mapping from an objective name to "minimize" or "maximize"
            representing the direction for that objective. Used only for
            multi-objective optimization experiments.
        objective_thresholds: A list of objective threshold constraints for multi-
            objective optimization, in the same string format as `outcome_constraints`
            argument.
        support_intermediate_data: whether trials may report metrics results for
            incomplete runs.
    """
    if objective_name is not None and (
        objectives is not None or objective_thresholds is not None
    ):
        raise UnsupportedError(
            "Ambiguous objective definition: for single-objective optimization "
            "`objective_name` and `minimize` arguments expected. For multi-objective "
            "optimization `objectives` and `objective_thresholds` arguments expected."
        )

    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)

    # TODO(jej): Needs to be decided per-metric when supporting heterogenous data.
    metric_cls = MapMetric if support_intermediate_data else Metric
    if objectives is None:
        optimization_config = OptimizationConfig(
            objective=Objective(
                metric=metric_cls(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=make_outcome_constraints(
                outcome_constraints or [], status_quo_arm is not None
            ),
        )
    else:
        optimization_config = make_optimization_config(
            objectives,
            objective_thresholds or [],
            outcome_constraints or [],
            status_quo_arm is not None,
        )

    tracking_metrics = (
        None
        if tracking_metric_names is None
        else [Metric(name=metric_name) for metric_name in tracking_metric_names]
    )

    default_data_type = (
        DataType.MAP_DATA if support_intermediate_data else DataType.DATA
    )

    return Experiment(
        name=name,
        search_space=make_search_space(parameters, parameter_constraints or []),
        optimization_config=optimization_config,
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
        tracking_metrics=tracking_metrics,
        default_data_type=default_data_type,
    )
Beispiel #24
0
def get_objective() -> Objective:
    return Objective(metric=Metric(name="m1"), minimize=False)
Beispiel #25
0
 def test_MOO_with_more_outcomes_than_thresholds(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_optimization_config=False
     )
     metric_c = Metric(name="c", lower_is_better=False)
     metric_a = Metric(name="a", lower_is_better=False)
     objective_thresholds = [
         ObjectiveThreshold(
             metric=metric_c,
             bound=2.0,
             relative=False,
         ),
         ObjectiveThreshold(
             metric=metric_a,
             bound=1.0,
             relative=False,
         ),
     ]
     experiment.optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(
             objectives=[
                 Objective(metric=metric_a),
                 Objective(metric=metric_c),
             ]
         ),
         objective_thresholds=objective_thresholds,
     )
     experiment.add_tracking_metric(Metric(name="b", lower_is_better=False))
     sobol = get_sobol(
         search_space=experiment.search_space,
     )
     sobol_run = sobol.gen(1)
     experiment.new_batch_trial().add_generator_run(sobol_run).run().mark_completed()
     data = Data(
         pd.DataFrame(
             data={
                 "arm_name": ["0_0", "0_0", "0_0"],
                 "metric_name": ["a", "b", "c"],
                 "mean": [1.0, 2.0, 3.0],
                 "trial_index": [0, 0, 0],
                 "sem": [0, 0, 0],
             }
         )
     )
     test_names_to_fns = {
         "MOO_NEHVI": get_MOO_NEHVI,
         "MOO_EHVI": get_MOO_NEHVI,
         "MOO_PAREGO": get_MOO_PAREGO,
         "MOO_RS": get_MOO_RS,
     }
     for test_name, factory_fn in test_names_to_fns.items():
         with self.subTest(test_name):
             moo_model = factory_fn(
                 experiment=experiment,
                 data=data,
             )
             moo_gr = moo_model.gen(n=1)
             obj_t = moo_gr.gen_metadata["objective_thresholds"]
             self.assertEqual(obj_t[0], objective_thresholds[1])
             self.assertEqual(obj_t[1], objective_thresholds[0])
             self.assertEqual(len(obj_t), 2)