Exemplo n.º 1
0
    def testConstraintValidation(self):
        # Can build OptimizationConfig with MultiObjective
        with self.assertRaises(ValueError):
            OptimizationConfig(objective=self.multi_objective)

        # Can't constrain on objective metric.
        objective_constraint = OutcomeConstraint(metric=self.objective.metric,
                                                 op=ComparisonOp.GEQ,
                                                 bound=0)
        with self.assertRaises(ValueError):
            OptimizationConfig(objective=self.objective,
                               outcome_constraints=[objective_constraint])

        # Two outcome_constraints on the same metric with the same op
        # should raise.
        duplicate_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + 1,
        )
        with self.assertRaises(ValueError):
            OptimizationConfig(
                objective=self.objective,
                outcome_constraints=[
                    self.outcome_constraint, duplicate_constraint
                ],
            )

        # Three outcome_constraints on the same metric should raise.
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound,
        )
        with self.assertRaises(ValueError):
            OptimizationConfig(
                objective=self.objective,
                outcome_constraints=self.outcome_constraints +
                [opposing_constraint],
            )

        # Two outcome_constraints on the same metric with different ops and
        # flipped bounds (lower < upper) should raise.
        add_bound = 1 if self.outcome_constraint.op == ComparisonOp.LEQ else -1
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + add_bound,
        )
        with self.assertRaises(ValueError):
            OptimizationConfig(
                objective=self.objective,
                outcome_constraints=([
                    self.outcome_constraint, opposing_constraint
                ]),
            )

        # Two outcome_constraints on the same metric with different ops and
        # bounds should not raise.
        opposing_constraint = OutcomeConstraint(
            metric=self.outcome_constraint.metric,
            op=not self.outcome_constraint.op,
            bound=self.outcome_constraint.bound + 1,
        )
        config = OptimizationConfig(
            objective=self.objective,
            outcome_constraints=([
                self.outcome_constraint, opposing_constraint
            ]),
        )
        self.assertEqual(config.outcome_constraints,
                         [self.outcome_constraint, opposing_constraint])
Exemplo n.º 2
0
 def testClone(self):
     config1 = OptimizationConfig(
         objective=self.objective,
         outcome_constraints=self.outcome_constraints)
     self.assertEqual(config1, config1.clone())
Exemplo n.º 3
0
    def test_best_point(
        self,
        _mock_gen,
        _mock_best_point,
        _mock_fit,
        _mock_predict,
        _mock_gen_arms,
        _mock_unwrap,
        _mock_obs_from_data,
    ):
        exp = Experiment(search_space=get_search_space_for_range_value(), name="test")
        oc = OptimizationConfig(
            objective=Objective(metric=Metric("a"), minimize=False),
            outcome_constraints=[],
        )
        modelbridge = ArrayModelBridge(
            search_space=get_search_space_for_range_value(),
            model=NumpyModel(),
            transforms=[t1, t2],
            experiment=exp,
            data=Data(),
            optimization_config=oc,
        )

        self.assertEqual(list(modelbridge.transforms.keys()), ["Cast", "t1", "t2"])

        # test check that optimization config is required
        with self.assertRaises(ValueError):
            run = modelbridge.gen(n=1, optimization_config=None)

        # _fit is mocked, which typically sets this.
        modelbridge.outcomes = ["a"]
        run = modelbridge.gen(
            n=1,
            optimization_config=oc,
        )

        arm, predictions = run.best_arm_predictions
        self.assertEqual(arm.parameters, {})
        self.assertEqual(predictions[0], {"m": 1.0})
        self.assertEqual(predictions[1], {"m": {"m": 2.0}})

        model_arm, model_predictions = modelbridge.model_best_point()
        self.assertEqual(model_predictions[0], {"m": 1.0})
        self.assertEqual(model_predictions[1], {"m": {"m": 2.0}})

        # test optimization config validation - raise error when
        # ScalarizedOutcomeConstraint contains a metric that is not in the outcomes
        with self.assertRaises(ValueError):
            run = modelbridge.gen(
                n=1,
                optimization_config=OptimizationConfig(
                    objective=Objective(metric=Metric("a"), minimize=False),
                    outcome_constraints=[
                        ScalarizedOutcomeConstraint(
                            metrics=[Metric("wrong_metric_name")],
                            weights=[1.0],
                            op=ComparisonOp.LEQ,
                            bound=0,
                        )
                    ],
                ),
            )
Exemplo n.º 4
0
    def testModelBridge(self, mock_fit, mock_gen_arms,
                        mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [transform_1, transform_2]
        exp = get_experiment_for_value()
        ss = get_search_space_for_value()
        modelbridge = ModelBridge(
            search_space=ss,
            model=Model(),
            transforms=transforms,
            experiment=exp,
            data=0,
        )
        self.assertFalse(
            modelbridge._experiment_has_immutable_search_space_and_opt_config)
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["Cast", "transform_1", "transform_2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(
            fit_args["search_space"] == get_search_space_for_value(8.0))
        self.assertTrue(fit_args["observation_features"] == [])
        self.assertTrue(fit_args["observation_data"] == [])
        self.assertTrue(mock_observations_from_data.called)

        # Test prediction on out of design features.
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            side_effect=ValueError("Out of Design"),
        )
        # This point is in design, and thus failures in predict are legitimate.
        with mock.patch.object(ModelBridge,
                               "model_space",
                               return_value=get_search_space_for_range_values):
            with self.assertRaises(ValueError):
                modelbridge.predict([get_observation2().features])

        # This point is out of design, and not in training data.
        with self.assertRaises(ValueError):
            modelbridge.predict([get_observation_status_quo0().features])

        # Now it's in the training data.
        with mock.patch.object(
                ModelBridge,
                "get_training_data",
                return_value=[get_observation_status_quo0()],
        ):
            # Return raw training value.
            self.assertEqual(
                modelbridge.predict([get_observation_status_quo0().features]),
                unwrap_observation_data([get_observation_status_quo0().data]),
            )

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[get_observation2trans().data],
        )
        modelbridge.predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Check that _single_predict is equivalent here.
        modelbridge._single_predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([get_observation1trans().features], [2], None, {}),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(
            name="test_metric")))
        modelbridge._set_kwargs_to_save(model_key="TestModel",
                                        model_kwargs={},
                                        bridge_kwargs={})
        gr = modelbridge.gen(
            n=1,
            search_space=get_search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [get_observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [get_observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={},
            observation_features=[get_observation1().features])

        # Gen with no pending observations and no fixed features
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=None)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="test_metric"),
                     Metric(name="test_metric_2")]))
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=oc2)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[get_observation1trans().data],
        )
        cv_training_data = [get_observation2()]
        cv_test_points = [get_observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points)
        modelbridge._cross_validate.assert_called_with(
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            obs_feats=[get_observation2trans().features],
            obs_data=[get_observation2trans().data],
            cv_test_points=[get_observation1().features
                            ],  # untransformed after
        )
        self.assertTrue(cv_predictions == [get_observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [get_observation1(), get_observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(
            modelbridge.model_space == get_search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [False, False])

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        # Test feature_importances
        with self.assertRaises(NotImplementedError):
            modelbridge.feature_importances("a")

        # Test transform observation features
        with mock.patch(
                "ax.modelbridge.base.ModelBridge._transform_observation_features",
                autospec=True,
        ) as mock_tr:
            modelbridge.transform_observation_features(
                [get_observation2().features])
        mock_tr.assert_called_with(modelbridge,
                                   [get_observation2trans().features])
Exemplo n.º 5
0
def get_map_optimization_config() -> OptimizationConfig:
    objective = get_map_objective()
    return OptimizationConfig(objective=objective)
Exemplo n.º 6
0
def get_branin_optimization_config(
        minimize: bool = False) -> OptimizationConfig:
    return OptimizationConfig(objective=get_branin_objective(
        minimize=minimize))
Exemplo n.º 7
0
def get_branin_multi_objective_optimization_config() -> OptimizationConfig:
    return OptimizationConfig(objective=get_branin_multi_objective())
Exemplo n.º 8
0
    def testGen(self, mock_init, mock_best_point, mock_gen):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = NumpyModelBridge()
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        ma.transforms = OrderedDict()
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertTrue(
            np.array_equal(
                gen_args["linear_constraints"][0],
                np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
            ))
        self.assertTrue(
            np.array_equal(gen_args["linear_constraints"][1],
                           np.array([[0.0], [-3.5]])))
        self.assertEqual(gen_args["fixed_features"], {2: 3.0})
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][0], np.array([])))
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][1],
                           np.array([[0.6, 1.6, 3.0]])))
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 1.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 3.0,
            "y": 4.0,
            "z": 3.0
        })
        self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))

        # Test with multiple objectives.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="a"), Metric(name="b")], minimize=True))
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=oc2,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[1][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"],
                           np.array([-1.0, -1.0])))

        # Test with MultiObjective (unweighted multiple objectives)
        oc3 = MultiObjectiveOptimizationConfig(objective=MultiObjective(
            metrics=[Metric(name="a"),
                     Metric(name="b", lower_is_better=True)],
            minimize=True,
        ))
        search_space = SearchSpace(self.parameters)  # Unconstrained
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=oc3,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[2][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([1.0,
                                                                    -1.0])))

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(3, search_space, {}, ObservationFeatures({}), None,
                optimization_config)
        gen_args = mock_gen.mock_calls[3][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["linear_constraints"])
        self.assertIsNone(gen_args["fixed_features"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
        optimization_config.objective.minimize = True
        optimization_config.outcome_constraints[0].relative = True
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
Exemplo n.º 9
0
### Hartmann6 problem, D=100 and D=1000

# Relevant parameters were chosen randomly using
# x = np.arange(100)
# np.random.seed(10)
# np.random.shuffle(x)
# print(x[:6])  # [19 14 43 37 66  3]

hartmann6_100 = BenchmarkProblem(
    name="Hartmann6, D=100",
    optimal_value=-3.32237,
    optimization_config=OptimizationConfig(
        objective=Objective(
            metric=Hartmann6Metric(
                name="objective",
                param_names=["x19", "x14", "x43", "x37", "x66", "x3"],
                noise_sd=0.0,
            ),
            minimize=True,
        )
    ),
    search_space=SearchSpace(
        parameters=[
            RangeParameter(
                name=f"x{i}", parameter_type=ParameterType.FLOAT, lower=0.0, upper=1.0
            )
            for i in range(100)
        ]
    ),
)

            
Exemplo n.º 10
0
    def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [t1, t2]
        exp = get_experiment()
        modelbridge = ModelBridge(search_space_for_value(), 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(fit_args["search_space"] == search_space_for_value(8.0))
        self.assertTrue(
            fit_args["observation_features"]
            == [observation1trans().features, observation2trans().features]
        )
        self.assertTrue(
            fit_args["observation_data"]
            == [observation1trans().data, observation2trans().data]
        )
        self.assertTrue(mock_observations_from_data.called)

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[observation2trans().data],
        )

        modelbridge.predict([observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([observation1trans().features], [2], None),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric")))
        modelbridge._set_kwargs_to_save(
            model_key="TestModel", model_kwargs={}, bridge_kwargs={}
        )
        gr = modelbridge.gen(
            n=1,
            search_space=search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={}, observation_features=[observation1().features]
        )

        # Gen with no pending observations and no fixed features
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=None
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(
            objective=ScalarizedObjective(
                metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")]
            )
        )
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=oc2
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[observation1trans().data],
        )
        cv_training_data = [observation2()]
        cv_test_points = [observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points
        )
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[observation2trans().features],
            obs_data=[observation2trans().data],
            cv_test_points=[observation1().features],  # untransformed after
        )
        self.assertTrue(cv_predictions == [observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [observation1(), observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(modelbridge.model_space == search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [True, True])

        modelbridge.training_in_design = [True, False]
        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        ood_obs = modelbridge.out_of_design_data()
        self.assertTrue(ood_obs == unwrap_observation_data([observation2().data]))
Exemplo n.º 11
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
    # Single-objective optimization arguments:
    objective_name: Optional[str] = None,
    minimize: bool = False,
    # Multi-objective optimization arguments:
    objectives: Optional[Dict[str, str]] = None,
    objective_thresholds: Optional[List[str]] = None,
) -> Experiment:
    """Instantiation wrapper that allows for Ax `Experiment` creation
    without importing or instantiating any Ax classes.

    Args:
        parameters: List of dictionaries representing parameters in the
            experiment search space.
            Required elements in the dictionaries are:
            1. "name" (name of parameter, string),
            2. "type" (type of parameter: "range", "fixed", or "choice", string),
            and one of the following:
            3a. "bounds" for range parameters (list of two values, lower bound
            first),
            3b. "values" for choice parameters (list of values), or
            3c. "value" for fixed parameters (single value).
            Optional elements are:
            1. "log_scale" (for float-valued range parameters, bool),
            2. "value_type" (to specify type that values of this parameter should
            take; expects "float", "int", "bool" or "str"),
            3. "is_fidelity" (bool) and "target_value" (float) for fidelity
            parameters,
            4. "is_ordered" (bool) for choice parameters,
            5. "is_task" (bool) for task parameters, and
            6. "digits" (int) for float-valued range parameters.
        name: Name of the experiment to be created.
        parameter_constraints: List of string representation of parameter
            constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
            the latter constraints, any number of arguments is accepted, and
            acceptable operators are "<=" and ">=".
        parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
                the latter constraints, any number of arguments is accepted, and
                acceptable operators are "<=" and ">=".
        outcome_constraints: List of string representation of outcome
            constraints of form "metric_name >= bound", like "m1 <= 3."
        status_quo: Parameterization of the current state of the system.
            If set, this will be added to each trial to be evaluated alongside
            test configurations.
        experiment_type: String indicating type of the experiment (e.g. name of
            a product in which it is used), if any.
        objective_name: Name of the metric used as objective in this experiment,
            if experiment is single-objective optimization.
        minimize: Whether this experiment represents a minimization problem, if
            experiment is a single-objective optimization.
        objectives: Mapping from an objective name to "minimize" or "maximize"
            representing the direction for that objective. Used only for
            multi-objective optimization experiments.
        objective_thresholds: A list of objective threshold constraints for multi-
            objective optimization, in the same string format as `outcome_constraints`
            argument.
    """
    if objective_name is not None and (objectives is not None
                                       or objective_thresholds is not None):
        raise UnsupportedError(
            "Ambiguous objective definition: for single-objective optimization "
            "`objective_name` and `minimize` arguments expected. For multi-objective "
            "optimization `objectives` and `objective_thresholds` arguments expected."
        )

    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)

    if objectives is None:
        optimization_config = OptimizationConfig(
            objective=Objective(
                metric=Metric(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=make_outcome_constraints(
                outcome_constraints or [], status_quo_arm is not None),
        )
    else:
        optimization_config = make_optimization_config(
            objectives,
            objective_thresholds or [],
            outcome_constraints or [],
            status_quo_arm is not None,
        )

    return Experiment(
        name=name,
        search_space=make_search_space(parameters, parameter_constraints
                                       or []),
        optimization_config=optimization_config,
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
    )
Exemplo n.º 12
0
    def test_REMBOStrategy(self, mock_fit_gpytorch_model, mock_optimize_acqf):
        # Construct a high-D test experiment with multiple metrics
        hartmann_search_space = SearchSpace(
            parameters=[
                RangeParameter(
                    name=f"x{i}",
                    parameter_type=ParameterType.FLOAT,
                    lower=0.0,
                    upper=1.0,
                )
                for i in range(20)
            ]
        )

        exp = Experiment(
            name="test",
            search_space=hartmann_search_space,
            optimization_config=OptimizationConfig(
                objective=Objective(
                    metric=Hartmann6Metric(
                        name="hartmann6", param_names=[f"x{i}" for i in range(6)]
                    ),
                    minimize=True,
                ),
                outcome_constraints=[
                    OutcomeConstraint(
                        metric=L2NormMetric(
                            name="l2norm",
                            param_names=[f"x{i}" for i in range(6)],
                            noise_sd=0.2,
                        ),
                        op=ComparisonOp.LEQ,
                        bound=1.25,
                        relative=False,
                    )
                ],
            ),
            runner=SyntheticRunner(),
        )

        # Instantiate the strategy
        gs = REMBOStrategy(D=20, d=6, k=4, init_per_proj=4)

        # Check that arms and data are correctly segmented by projection
        exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run()
        self.assertEqual(len(gs.arms_by_proj[0]), 2)
        self.assertEqual(len(gs.arms_by_proj[1]), 0)

        exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run()

        self.assertEqual(len(gs.arms_by_proj[0]), 2)
        self.assertEqual(len(gs.arms_by_proj[1]), 2)

        # Iterate until the first projection fits a GP
        for _ in range(4):
            exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run()
            mock_fit_gpytorch_model.assert_not_called()

        self.assertEqual(len(gs.arms_by_proj[0]), 4)
        self.assertEqual(len(gs.arms_by_proj[1]), 4)
        self.assertEqual(len(gs.arms_by_proj[2]), 2)
        self.assertEqual(len(gs.arms_by_proj[3]), 2)

        # Keep iterating until GP is used for gen
        for i in range(4):
            # First two trials will go towards 3rd and 4th proj. getting enough
            if i < 1:  # data for GP.
                self.assertLess(len(gs.arms_by_proj[2]), 4)
            if i < 2:
                self.assertLess(len(gs.arms_by_proj[3]), 4)

            exp.new_batch_trial(generator_run=gs.gen(experiment=exp, n=2)).run()
            if i < 2:
                mock_fit_gpytorch_model.assert_not_called()
            else:
                # After all proj. have > 4 arms' worth of data, GP can be fit.
                self.assertFalse(any(len(x) < 4 for x in gs.arms_by_proj.values()))
                mock_fit_gpytorch_model.assert_called()

        self.assertTrue(len(gs.model_transitions) > 0)
        gs2 = gs.clone_reset()
        self.assertEqual(gs2.D, 20)
        self.assertEqual(gs2.d, 6)
Exemplo n.º 13
0
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective_m1 = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[])
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # output constraint on a different metric should work
     m2 = Metric(name="m2")
     oc = OptimizationConfig(
         objective=objective_m1,
         outcome_constraints=self.get_constraint(
             metric=m2, bound=-1, relative=False
         ),
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # output constraint with a negative bound should fail
     objective_m2 = Objective(metric=m2, minimize=False)
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=-1.234, relative=False
         ),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since the "
         "bound isn't positive, got: -1.234.",
         str(cm.exception),
     )
     # output constraint with a zero bound should also fail
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(metric=m1, bound=0, relative=False),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since the "
         "bound isn't positive, got: 0.",
         str(cm.exception),
     )
     # output constraint with a positive bound should work
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=2.345, relative=False
         ),
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     oc.outcome_constraints[0].bound = math.log(2.345)
     self.assertEqual(oc_tf, oc)
     # output constraint with a relative bound should fail
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=2.345, relative=True
         ),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since it is "
         "subject to a relative constraint.",
         str(cm.exception),
     )
Exemplo n.º 14
0
def get_hartmann_optimization_config() -> OptimizationConfig:
    return OptimizationConfig(objective=get_hartmann_objective())
Exemplo n.º 15
0
        search_space: search space, on which this problem is defined
    """

    name: str
    fbest: float
    optimization_config: OptimizationConfig
    search_space: SearchSpace


# Branin problems
branin = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmin,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=BraninMetric(
            name="branin_objective", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=True,
    )),
    search_space=get_branin_search_space(),
)

branin_max = BenchmarkProblem(
    name=branin_function.name,
    fbest=branin_function.fmax,
    optimization_config=OptimizationConfig(objective=Objective(
        metric=NegativeBraninMetric(
            name="neg_branin", param_names=["x1", "x2"], noise_sd=5.0),
        minimize=False,
    )),
    search_space=get_branin_search_space(),
)
    def testGen(self, mock_init):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = DiscreteModelBridge()
        model = mock.MagicMock(DiscreteModel, autospec=True, instance=True)
        model.gen.return_value = ([[0.0, 2.0, 3.0], [1.0, 1.0,
                                                     3.0]], [1.0, 2.0])
        ma.model = model
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        observation_features, weights, best_observation = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = model.gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["parameter_values"],
                         [[0.0, 1.0], ["foo", "bar"], [True]])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertEqual(gen_args["pending_observations"][0], [])
        self.assertEqual(gen_args["pending_observations"][1],
                         [[0, "foo", True]])
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 0.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 1.0,
            "y": 1.0,
            "z": 3.0
        })
        self.assertEqual(weights, [1.0, 2.0])

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=optimization_config,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options={},
        )
        gen_args = model.gen.mock_calls[1][2]
        self.assertEqual(gen_args["parameter_values"],
                         [[0.0, 1.0], ["foo", "bar"]])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, True)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
                model_gen_options={},
            )
    def setUp(self):
        self.df = pd.DataFrame([
            {
                "arm_name": "0_0",
                "mean": 2.0,
                "sem": 0.2,
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_0",
                "mean": 1.8,
                "sem": 0.3,
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_1",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_1",
                "mean": 3.7,
                "sem": 0.5,
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": 0.5,
                "sem": None,
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "c",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
        ])

        self.data = Data(df=self.df)

        self.optimization_config = OptimizationConfig(
            objective=Objective(metric=Metric(name="a")),
            outcome_constraints=[
                OutcomeConstraint(metric=Metric(name="b"),
                                  op=ComparisonOp.GEQ,
                                  bound=0)
            ],
        )
Exemplo n.º 18
0
def get_optimization_config() -> OptimizationConfig:
    objective = get_objective()
    outcome_constraints = [get_outcome_constraint()]
    return OptimizationConfig(objective=objective,
                              outcome_constraints=outcome_constraints)
Exemplo n.º 19
0
    def testDerelativizeTransform(self, mock_predict, mock_fit,
                                  mock_observations_from_data):
        t = Derelativize(search_space=None,
                         observation_features=None,
                         observation_data=None)

        # ModelBridge with in-design status quo
        search_space = SearchSpace(parameters=[
            RangeParameter("x", ParameterType.FLOAT, 0, 20),
            RangeParameter("y", ParameterType.FLOAT, 0, 20),
        ])
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_1",
        )

        # Test with no relative constraints
        objective = Objective(Metric("c"))
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False)
            ],
        )
        oc2 = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc == oc2)

        # Test with relative constraint, in-design status quo
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=4.5, relative=False),
        ])
        obsf = mock_predict.mock_calls[0][1][1][0]
        obsf2 = ObservationFeatures(parameters={"x": 2.0, "y": 10.0})
        self.assertTrue(obsf == obsf2)

        # Test with relative constraint, out-of-design status quo
        mock_predict.side_effect = Exception()
        g = ModelBridge(
            search_space=search_space,
            model=None,
            transforms=[],
            experiment=Experiment(search_space, "test"),
            data=Data(),
            status_quo_name="1_2",
        )
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        oc = t.transform_optimization_config(oc, g, None)
        self.assertTrue(oc.outcome_constraints == [
            OutcomeConstraint(
                Metric("a"), ComparisonOp.LEQ, bound=2, relative=False),
            OutcomeConstraint(
                Metric("b"), ComparisonOp.LEQ, bound=3.6, relative=False),
        ])
        self.assertEqual(mock_predict.call_count, 2)

        # Raises error if predict fails with in-design status quo
        g = ModelBridge(search_space, None, [], status_quo_name="1_1")
        oc = OptimizationConfig(
            objective=objective,
            outcome_constraints=[
                OutcomeConstraint(Metric("a"),
                                  ComparisonOp.LEQ,
                                  bound=2,
                                  relative=False),
                OutcomeConstraint(Metric("b"),
                                  ComparisonOp.LEQ,
                                  bound=-10,
                                  relative=True),
            ],
        )
        with self.assertRaises(Exception):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no status quo
        exp = Experiment(search_space, "name")
        g = ModelBridge(search_space, None, [], exp)
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, g, None)

        # Raises error with relative constraint, no modelbridge
        with self.assertRaises(ValueError):
            oc = t.transform_optimization_config(oc, None, None)
Exemplo n.º 20
0
def get_optimization_config_no_constraints() -> OptimizationConfig:
    return OptimizationConfig(objective=Objective(
        metric=Metric("test_metric")))
Exemplo n.º 21
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
    tracking_metric_names: Optional[List[str]] = None,
    # Single-objective optimization arguments:
    objective_name: Optional[str] = None,
    minimize: bool = False,
    # Multi-objective optimization arguments:
    objectives: Optional[Dict[str, str]] = None,
    objective_thresholds: Optional[List[str]] = None,
    support_intermediate_data: Optional[bool] = False,
    immutable_search_space_and_opt_config: Optional[bool] = True,
) -> Experiment:
    """Instantiation wrapper that allows for Ax `Experiment` creation
    without importing or instantiating any Ax classes.

    Args:
        parameters: List of dictionaries representing parameters in the
            experiment search space.
            Required elements in the dictionaries are:
            1. "name" (name of parameter, string),
            2. "type" (type of parameter: "range", "fixed", or "choice", string),
            and one of the following:
            3a. "bounds" for range parameters (list of two values, lower bound
            first),
            3b. "values" for choice parameters (list of values), or
            3c. "value" for fixed parameters (single value).
            Optional elements are:
            1. "log_scale" (for float-valued range parameters, bool),
            2. "value_type" (to specify type that values of this parameter should
            take; expects "float", "int", "bool" or "str"),
            3. "is_fidelity" (bool) and "target_value" (float) for fidelity
            parameters,
            4. "is_ordered" (bool) for choice parameters,
            5. "is_task" (bool) for task parameters, and
            6. "digits" (int) for float-valued range parameters.
        name: Name of the experiment to be created.
        parameter_constraints: List of string representation of parameter
            constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
            the latter constraints, any number of arguments is accepted, and
            acceptable operators are "<=" and ">=".
        parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
                the latter constraints, any number of arguments is accepted, and
                acceptable operators are "<=" and ">=".
        outcome_constraints: List of string representation of outcome
            constraints of form "metric_name >= bound", like "m1 <= 3."
        status_quo: Parameterization of the current state of the system.
            If set, this will be added to each trial to be evaluated alongside
            test configurations.
        experiment_type: String indicating type of the experiment (e.g. name of
            a product in which it is used), if any.
        tracking_metric_names: Names of additional tracking metrics not used for
            optimization.
        objective_name: Name of the metric used as objective in this experiment,
            if experiment is single-objective optimization.
        minimize: Whether this experiment represents a minimization problem, if
            experiment is a single-objective optimization.
        objectives: Mapping from an objective name to "minimize" or "maximize"
            representing the direction for that objective. Used only for
            multi-objective optimization experiments.
        objective_thresholds: A list of objective threshold constraints for multi-
            objective optimization, in the same string format as `outcome_constraints`
            argument.
        support_intermediate_data: whether trials may report metrics results for
            incomplete runs.
        immutable_search_space_and_opt_config: Whether it's possible to update the
            search space and optimization config on this experiment after creation.
            Defaults to True. If set to True, we won't store or load copies of the
            search space and optimization config on each generator run, which will
            improve storage performance.
    """
    if objective_name is not None and (objectives is not None
                                       or objective_thresholds is not None):
        raise UnsupportedError(
            "Ambiguous objective definition: for single-objective optimization "
            "`objective_name` and `minimize` arguments expected. For multi-objective "
            "optimization `objectives` and `objective_thresholds` arguments expected."
        )

    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)

    # TODO(jej): Needs to be decided per-metric when supporting heterogenous data.
    metric_cls = MapMetric if support_intermediate_data else Metric
    if objectives is None:
        optimization_config = OptimizationConfig(
            objective=Objective(
                metric=metric_cls(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=make_outcome_constraints(
                outcome_constraints or [], status_quo_arm is not None),
        )
    else:
        optimization_config = make_optimization_config(
            objectives,
            objective_thresholds or [],
            outcome_constraints or [],
            status_quo_arm is not None,
        )

    tracking_metrics = (None if tracking_metric_names is None else [
        Metric(name=metric_name) for metric_name in tracking_metric_names
    ])

    default_data_type = (DataType.MAP_DATA
                         if support_intermediate_data else DataType.DATA)

    immutable_ss_and_oc = immutable_search_space_and_opt_config
    properties = ({} if not immutable_search_space_and_opt_config else {
        Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value:
        immutable_ss_and_oc
    })

    return Experiment(
        name=name,
        search_space=make_search_space(parameters, parameter_constraints
                                       or []),
        optimization_config=optimization_config,
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
        tracking_metrics=tracking_metrics,
        default_data_type=default_data_type,
        properties=properties,
    )
Exemplo n.º 22
0
def get_augmented_branin_optimization_config() -> OptimizationConfig:
    return OptimizationConfig(objective=get_augmented_branin_objective())
Exemplo n.º 23
0
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective_m1 = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[])
     tf = PowerTransformY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # Output constraint on a different metric should not transform the bound
     m2 = Metric(name="m2")
     for bound in [-1.234, 0, 2.345]:
         oc = OptimizationConfig(
             objective=objective_m1,
             outcome_constraints=get_constraint(
                 metric=m2, bound=bound, relative=False
             ),
         )
         oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
         self.assertEqual(oc_tf, oc)
     # Output constraint on the same metric should transform the bound
     objective_m2 = Objective(metric=m2, minimize=False)
     for bound in [-1.234, 0, 2.345]:
         oc = OptimizationConfig(
             objective=objective_m2,
             outcome_constraints=get_constraint(
                 metric=m1, bound=bound, relative=False
             ),
         )
         oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
         oc_true = deepcopy(oc)
         tf_bound = (
             tf.power_transforms["m1"].transform(np.array(bound, ndmin=2)).item()
         )
         oc_true.outcome_constraints[0].bound = tf_bound
         self.assertEqual(oc_tf, oc_true)
     # Relative constraints aren't supported
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=get_constraint(metric=m1, bound=2.345, relative=True),
     )
     with self.assertRaisesRegex(
         ValueError,
         "PowerTransformY cannot be applied to metric m1 since it is "
         "subject to a relative constraint.",
     ):
         tf.transform_optimization_config(oc, None, None)
     # Support for scalarized outcome constraints isn't implemented
     m3 = Metric(name="m3")
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=[
             ScalarizedOutcomeConstraint(
                 metrics=[m1, m3], op=ComparisonOp.GEQ, bound=2.345, relative=False
             )
         ],
     )
     with self.assertRaises(NotImplementedError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "PowerTransformY cannot be used for metric(s) {'m1'} "
         "that are part of a ScalarizedOutcomeConstraint.",
         str(cm.exception),
     )