Ejemplo n.º 1
0
def get_objective_threshold(
    metric_name: str = "m1", bound=-0.25, comparison_op: ComparisonOp = ComparisonOp.GEQ
) -> ObjectiveThreshold:
    return ObjectiveThreshold(
        metric=Metric(name=metric_name), bound=bound, op=comparison_op
    )
Ejemplo n.º 2
0
def get_scalarized_objective() -> Objective:
    return ScalarizedObjective(
        metrics=[Metric(name="m1"), Metric(name="m3")],
        weights=[1.0, 2.0],
        minimize=False,
    )
Ejemplo n.º 3
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
    tracking_metric_names: Optional[List[str]] = None,
    # Single-objective optimization arguments:
    objective_name: Optional[str] = None,
    minimize: bool = False,
    # Multi-objective optimization arguments:
    objectives: Optional[Dict[str, str]] = None,
    objective_thresholds: Optional[List[str]] = None,
    support_intermediate_data: bool = False,
    immutable_search_space_and_opt_config: bool = True,
    is_test: bool = False,
) -> Experiment:
    """Instantiation wrapper that allows for Ax `Experiment` creation
    without importing or instantiating any Ax classes.

    Args:
        parameters: List of dictionaries representing parameters in the
            experiment search space.
            Required elements in the dictionaries are:
            1. "name" (name of parameter, string),
            2. "type" (type of parameter: "range", "fixed", or "choice", string),
            and one of the following:
            3a. "bounds" for range parameters (list of two values, lower bound
            first),
            3b. "values" for choice parameters (list of values), or
            3c. "value" for fixed parameters (single value).
            Optional elements are:
            1. "log_scale" (for float-valued range parameters, bool),
            2. "value_type" (to specify type that values of this parameter should
            take; expects "float", "int", "bool" or "str"),
            3. "is_fidelity" (bool) and "target_value" (float) for fidelity
            parameters,
            4. "is_ordered" (bool) for choice parameters,
            5. "is_task" (bool) for task parameters, and
            6. "digits" (int) for float-valued range parameters.
        name: Name of the experiment to be created.
        parameter_constraints: List of string representation of parameter
            constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
            the latter constraints, any number of arguments is accepted, and
            acceptable operators are "<=" and ">=".
        outcome_constraints: List of string representation of outcome
            constraints of form "metric_name >= bound", like "m1 <= 3."
        status_quo: Parameterization of the current state of the system.
            If set, this will be added to each trial to be evaluated alongside
            test configurations.
        experiment_type: String indicating type of the experiment (e.g. name of
            a product in which it is used), if any.
        tracking_metric_names: Names of additional tracking metrics not used for
            optimization.
        objective_name: Name of the metric used as objective in this experiment,
            if experiment is single-objective optimization.
        minimize: Whether this experiment represents a minimization problem, if
            experiment is a single-objective optimization.
        objectives: Mapping from an objective name to "minimize" or "maximize"
            representing the direction for that objective. Used only for
            multi-objective optimization experiments.
        objective_thresholds: A list of objective threshold constraints for multi-
            objective optimization, in the same string format as `outcome_constraints`
            argument.
        support_intermediate_data: Whether trials may report metrics results for
            incomplete runs.
        immutable_search_space_and_opt_config: Whether it's possible to update the
            search space and optimization config on this experiment after creation.
            Defaults to True. If set to True, we won't store or load copies of the
            search space and optimization config on each generator run, which will
            improve storage performance.
        is_test: Whether this experiment will be a test experiment (useful for
            marking test experiments in storage etc). Defaults to False.
    """
    if objective_name is not None and (objectives is not None
                                       or objective_thresholds is not None):
        raise UnsupportedError(
            "Ambiguous objective definition: for single-objective optimization "
            "`objective_name` and `minimize` arguments expected. For multi-objective "
            "optimization `objectives` and `objective_thresholds` arguments expected."
        )

    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)

    # TODO(jej): Needs to be decided per-metric when supporting heterogenous data.
    metric_cls = MapMetric if support_intermediate_data else Metric
    if objectives is None:
        optimization_config = OptimizationConfig(
            objective=Objective(
                metric=metric_cls(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=make_outcome_constraints(
                outcome_constraints or [], status_quo_arm is not None),
        )
    else:
        optimization_config = make_optimization_config(
            objectives,
            objective_thresholds or [],
            outcome_constraints or [],
            status_quo_arm is not None,
        )

    tracking_metrics = (None if tracking_metric_names is None else [
        Metric(name=metric_name) for metric_name in tracking_metric_names
    ])

    default_data_type = (DataType.MAP_DATA
                         if support_intermediate_data else DataType.DATA)

    immutable_ss_and_oc = immutable_search_space_and_opt_config
    properties = ({} if not immutable_search_space_and_opt_config else {
        Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value:
        immutable_ss_and_oc
    })

    return Experiment(
        name=name,
        search_space=make_search_space(parameters, parameter_constraints
                                       or []),
        optimization_config=optimization_config,
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
        tracking_metrics=tracking_metrics,
        default_data_type=default_data_type,
        properties=properties,
        is_test=is_test,
    )
Ejemplo n.º 4
0
def get_metric() -> Metric:
    return Metric(name="m1", properties={"prop": "val"})
Ejemplo n.º 5
0
def get_objective() -> Objective:
    return Objective(metric=Metric(name="m1"), minimize=False)
Ejemplo n.º 6
0
    def testFetchAndStoreData(self):
        n = 10
        exp = self._setupBraninExperiment(n)
        batch = exp.trials[0]
        batch.mark_completed()

        # Test fetch data
        batch_data = batch.fetch_data()
        self.assertEqual(len(batch_data.df), n)

        exp_data = exp.fetch_data()
        exp_data2 = exp.metrics["b"].fetch_experiment_data(exp)
        self.assertEqual(len(exp_data2.df), 4 * n)
        self.assertEqual(len(exp_data.df), 4 * n)
        self.assertEqual(len(exp.arms_by_name), 4 * n)

        # Verify that `metrics` kwarg to `experiment.fetch_data` is respected.
        exp.add_tracking_metric(Metric(name="not_yet_on_experiment"))
        exp.attach_data(
            Data(
                df=pd.DataFrame.from_records(
                    [
                        {
                            "arm_name": "0_0",
                            "metric_name": "not_yet_on_experiment",
                            "mean": 3,
                            "sem": 0,
                            "trial_index": 0,
                        }
                    ]
                )
            )
        )
        self.assertEqual(
            set(
                exp.fetch_data(metrics=[Metric(name="not_yet_on_experiment")])
                .df["metric_name"]
                .values
            ),
            {"not_yet_on_experiment"},
        )

        # Verify data lookup is empty for trial that does not yet have data.
        self.assertEqual(len(exp.lookup_data_for_trial(1)[0].df), 0)

        # Test local storage
        t1 = exp.attach_data(batch_data)
        t2 = exp.attach_data(exp_data)

        full_dict = exp.data_by_trial
        self.assertEqual(len(full_dict), 2)  # data for 2 trials
        self.assertEqual(len(full_dict[0]), 3)  # 3 data objs for batch 0

        # Test retrieving original batch 0 data
        self.assertEqual(len(exp.lookup_data_for_ts(t1).df), n)
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), n)

        # Test retrieving full exp data
        self.assertEqual(len(exp.lookup_data_for_ts(t2).df), 4 * n)

        with self.assertRaisesRegex(ValueError, ".* for metric"):
            exp.attach_data(batch_data, combine_with_last_data=True)

        new_data = Data(
            df=pd.DataFrame.from_records(
                [
                    {
                        "arm_name": "0_0",
                        "metric_name": "z",
                        "mean": 3,
                        "sem": 0,
                        "trial_index": 0,
                    }
                ]
            )
        )
        t3 = exp.attach_data(new_data, combine_with_last_data=True)
        self.assertEqual(len(full_dict[0]), 4)  # 4 data objs for batch 0 now
        self.assertIn("z", exp.lookup_data_for_ts(t3).df["metric_name"].tolist())

        # Verify we don't get the data if the trial is abandoned
        batch._status = TrialStatus.ABANDONED
        self.assertEqual(len(batch.fetch_data().df), 0)
        self.assertEqual(len(exp.fetch_data().df), 3 * n)

        # Verify we do get the stored data if there are an unimplemented metrics.
        del exp._data_by_trial[0][t3]  # Remove attached data for nonexistent metric.
        # Remove implemented metric that is `available_while_running`
        # (and therefore not pulled from cache).
        exp.remove_tracking_metric(metric_name="b")
        exp.add_tracking_metric(Metric(name="b"))  # Add unimplemented metric.
        batch._status = TrialStatus.COMPLETED
        # Data should be getting looked up now.
        self.assertEqual(batch.fetch_data(), exp.lookup_data_for_ts(t1))
        self.assertEqual(exp.fetch_data(), exp.lookup_data_for_ts(t1))
        metrics_in_data = set(batch.fetch_data().df["metric_name"].values)
        # Data for metric "z" should no longer be present since we removed it.
        self.assertEqual(metrics_in_data, {"b"})

        # Verify that `metrics` kwarg to `experiment.fetch_data` is respected
        # when pulling looked-up data.
        self.assertEqual(
            exp.fetch_data(metrics=[Metric(name="not_on_experiment")]), Data()
        )
Ejemplo n.º 7
0
 def test_create_experiment(self) -> None:
     """Test basic experiment creation."""
     ax_client = AxClient(
         GenerationStrategy(
             steps=[GenerationStep(model=Models.SOBOL, num_trials=30)]))
     with self.assertRaisesRegex(ValueError,
                                 "Experiment not set on Ax client"):
         ax_client.experiment
     ax_client.create_experiment(
         name="test_experiment",
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [0.001, 0.1],
                 "value_type": "float",
                 "log_scale": True,
             },
             {
                 "name": "y",
                 "type": "choice",
                 "values": [1, 2, 3],
                 "value_type": "int",
                 "is_ordered": True,
             },
             {
                 "name": "x3",
                 "type": "fixed",
                 "value": 2,
                 "value_type": "int"
             },
             {
                 "name": "x4",
                 "type": "range",
                 "bounds": [1.0, 3.0],
                 "value_type": "int",
             },
             {
                 "name": "x5",
                 "type": "choice",
                 "values": ["one", "two", "three"],
                 "value_type": "str",
             },
             {
                 "name": "x6",
                 "type": "range",
                 "bounds": [1.0, 3.0],
                 "value_type": "int",
             },
         ],
         objective_name="test_objective",
         minimize=True,
         outcome_constraints=["some_metric >= 3", "some_metric <= 4.0"],
         parameter_constraints=["x4 <= x6"],
     )
     assert ax_client._experiment is not None
     self.assertEqual(ax_client._experiment, ax_client.experiment)
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x"],
         RangeParameter(
             name="x",
             parameter_type=ParameterType.FLOAT,
             lower=0.001,
             upper=0.1,
             log_scale=True,
         ),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["y"],
         ChoiceParameter(
             name="y",
             parameter_type=ParameterType.INT,
             values=[1, 2, 3],
             is_ordered=True,
         ),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x3"],
         FixedParameter(name="x3",
                        parameter_type=ParameterType.INT,
                        value=2),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x4"],
         RangeParameter(name="x4",
                        parameter_type=ParameterType.INT,
                        lower=1.0,
                        upper=3.0),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x5"],
         ChoiceParameter(
             name="x5",
             parameter_type=ParameterType.STRING,
             values=["one", "two", "three"],
         ),
     )
     self.assertEqual(
         ax_client._experiment.optimization_config.outcome_constraints[0],
         OutcomeConstraint(
             metric=Metric(name="some_metric"),
             op=ComparisonOp.GEQ,
             bound=3.0,
             relative=False,
         ),
     )
     self.assertEqual(
         ax_client._experiment.optimization_config.outcome_constraints[1],
         OutcomeConstraint(
             metric=Metric(name="some_metric"),
             op=ComparisonOp.LEQ,
             bound=4.0,
             relative=False,
         ),
     )
     self.assertTrue(
         ax_client._experiment.optimization_config.objective.minimize)
Ejemplo n.º 8
0
    def testFetchAndStoreData(self):
        n = 10
        exp = self._setupBraninExperiment(n)
        batch = exp.trials[0]

        # Test fetch data
        batch_data = batch.fetch_data()
        self.assertEqual(len(batch_data.df), n)

        exp_data = exp.fetch_data()
        exp_data2 = exp.metrics["b"].fetch_experiment_data(exp)
        self.assertEqual(len(exp_data2.df), 4 * n)
        self.assertEqual(len(exp_data.df), 4 * n)
        self.assertEqual(len(exp.arms_by_name), 4 * n)

        # Verify data lookup is empty
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), 0)

        # Test local storage
        t1 = exp.attach_data(batch_data)
        t2 = exp.attach_data(exp_data)

        full_dict = exp.data_by_trial
        self.assertEqual(len(full_dict), 2)  # data for 2 trials
        self.assertEqual(len(full_dict[0]), 2)  # 2 data objs for batch 0

        # Test retrieving original batch 0 data
        self.assertEqual(len(exp.lookup_data_for_ts(t1).df), n)
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), n)

        # Test retrieving full exp data
        self.assertEqual(len(exp.lookup_data_for_ts(t2).df), 4 * n)

        with self.assertRaisesRegex(ValueError, ".* for metric"):
            exp.attach_data(batch_data, combine_with_last_data=True)

        new_data = Data(
            df=pd.DataFrame.from_records(
                [{"arm_name": "0_0", "metric_name": "z", "mean": 3, "trial_index": 0}]
            )
        )
        t3 = exp.attach_data(new_data, combine_with_last_data=True)
        self.assertEqual(len(full_dict[0]), 3)  # 3 data objs for batch 0 now
        self.assertIn("z", exp.lookup_data_for_ts(t3).df["metric_name"].tolist())

        # Verify we don't get the data if the trial is abandoned
        batch._status = TrialStatus.ABANDONED
        self.assertEqual(len(batch.fetch_data().df), 0)
        self.assertEqual(len(exp.fetch_data().df), 3 * n)

        # For `CANDIDATE` trials, we append attached data to fetched data,
        # so the attached data row with metric name "z" should appear in fetched
        # data.
        batch._status = TrialStatus.CANDIDATE
        self.assertEqual(len(batch.fetch_data().df), n + 1)
        # n arms in trial #0, 3 * n arms in trial #1
        self.assertEqual(len(exp.fetch_data().df), 4 * n + 1)
        metrics_in_data = set(batch.fetch_data().df["metric_name"].values)
        self.assertEqual(metrics_in_data, {"b", "z"})

        # Verify we do get the stored data if there are an unimplemented metrics.
        del exp._data_by_trial[0][t3]  # Remove attached data for nonexistent metric.
        exp.remove_tracking_metric(metric_name="b")  # Remove implemented metric.
        exp.add_tracking_metric(Metric(name="dummy"))  # Add unimplemented metric.
        batch._status = TrialStatus.RUNNING
        # Data should be getting looked up now.
        self.assertEqual(batch.fetch_data(), exp.lookup_data_for_ts(t1))
        self.assertEqual(exp.fetch_data(), exp.lookup_data_for_ts(t2))
        metrics_in_data = set(batch.fetch_data().df["metric_name"].values)
        # Data for metric "z" should no longer be present since we removed it.
        self.assertEqual(metrics_in_data, {"b"})

        # Check that error will be raised if dummy and implemented metrics are
        # fetched at once.
        with self.assertRaisesRegex(ValueError, "Unexpected combination"):
            exp.fetch_data(
                [BraninMetric(name="b", param_names=["x1", "x2"]), Metric(name="m")]
            )
Ejemplo n.º 9
0
def get_metric() -> Metric:
    return Metric(name="m1")
Ejemplo n.º 10
0
    def test_best_point(
        self,
        _mock_gen,
        _mock_best_point,
        _mock_fit,
        _mock_predict,
        _mock_gen_arms,
        _mock_unwrap,
        _mock_obs_from_data,
    ):
        exp = Experiment(search_space=get_search_space_for_range_value(), name="test")
        oc = OptimizationConfig(
            objective=Objective(metric=Metric("a"), minimize=False),
            outcome_constraints=[],
        )
        modelbridge = ArrayModelBridge(
            search_space=get_search_space_for_range_value(),
            model=NumpyModel(),
            transforms=[t1, t2],
            experiment=exp,
            data=Data(),
            optimization_config=oc,
        )

        self.assertEqual(list(modelbridge.transforms.keys()), ["Cast", "t1", "t2"])

        # test check that optimization config is required
        with self.assertRaises(ValueError):
            run = modelbridge.gen(n=1, optimization_config=None)

        # _fit is mocked, which typically sets this.
        modelbridge.outcomes = ["a"]
        run = modelbridge.gen(
            n=1,
            optimization_config=oc,
        )

        arm, predictions = run.best_arm_predictions
        self.assertEqual(arm.parameters, {})
        self.assertEqual(predictions[0], {"m": 1.0})
        self.assertEqual(predictions[1], {"m": {"m": 2.0}})

        model_arm, model_predictions = modelbridge.model_best_point()
        self.assertEqual(model_predictions[0], {"m": 1.0})
        self.assertEqual(model_predictions[1], {"m": {"m": 2.0}})

        # test optimization config validation - raise error when
        # ScalarizedOutcomeConstraint contains a metric that is not in the outcomes
        with self.assertRaises(ValueError):
            run = modelbridge.gen(
                n=1,
                optimization_config=OptimizationConfig(
                    objective=Objective(metric=Metric("a"), minimize=False),
                    outcome_constraints=[
                        ScalarizedOutcomeConstraint(
                            metrics=[Metric("wrong_metric_name")],
                            weights=[1.0],
                            op=ComparisonOp.LEQ,
                            bound=0,
                        )
                    ],
                ),
            )
Ejemplo n.º 11
0
def get_multi_objective() -> Objective:
    return MultiObjective(metrics=[Metric(name="m1"),
                                   Metric(name="m2")],
                          minimize=False)
Ejemplo n.º 12
0
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective_m1 = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[])
     tf = PowerTransformY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # Output constraint on a different metric should not transform the bound
     m2 = Metric(name="m2")
     for bound in [-1.234, 0, 2.345]:
         oc = OptimizationConfig(
             objective=objective_m1,
             outcome_constraints=get_constraint(
                 metric=m2, bound=bound, relative=False
             ),
         )
         oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
         self.assertEqual(oc_tf, oc)
     # Output constraint on the same metric should transform the bound
     objective_m2 = Objective(metric=m2, minimize=False)
     for bound in [-1.234, 0, 2.345]:
         oc = OptimizationConfig(
             objective=objective_m2,
             outcome_constraints=get_constraint(
                 metric=m1, bound=bound, relative=False
             ),
         )
         oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
         oc_true = deepcopy(oc)
         tf_bound = (
             tf.power_transforms["m1"].transform(np.array(bound, ndmin=2)).item()
         )
         oc_true.outcome_constraints[0].bound = tf_bound
         self.assertEqual(oc_tf, oc_true)
     # Relative constraints aren't supported
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=get_constraint(metric=m1, bound=2.345, relative=True),
     )
     with self.assertRaisesRegex(
         ValueError,
         "PowerTransformY cannot be applied to metric m1 since it is "
         "subject to a relative constraint.",
     ):
         tf.transform_optimization_config(oc, None, None)
     # Support for scalarized outcome constraints isn't implemented
     m3 = Metric(name="m3")
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=[
             ScalarizedOutcomeConstraint(
                 metrics=[m1, m3], op=ComparisonOp.GEQ, bound=2.345, relative=False
             )
         ],
     )
     with self.assertRaises(NotImplementedError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "PowerTransformY cannot be used for metric(s) {'m1'} "
         "that are part of a ScalarizedOutcomeConstraint.",
         str(cm.exception),
     )
Ejemplo n.º 13
0
 def test_MOO_with_more_outcomes_than_thresholds(self):
     experiment = get_branin_experiment_with_multi_objective(
         has_optimization_config=False
     )
     metric_c = Metric(name="c", lower_is_better=False)
     metric_a = Metric(name="a", lower_is_better=False)
     objective_thresholds = [
         ObjectiveThreshold(
             metric=metric_c,
             bound=2.0,
             relative=False,
         ),
         ObjectiveThreshold(
             metric=metric_a,
             bound=1.0,
             relative=False,
         ),
     ]
     experiment.optimization_config = MultiObjectiveOptimizationConfig(
         objective=MultiObjective(
             objectives=[
                 Objective(metric=metric_a),
                 Objective(metric=metric_c),
             ]
         ),
         objective_thresholds=objective_thresholds,
     )
     experiment.add_tracking_metric(Metric(name="b", lower_is_better=False))
     sobol = get_sobol(
         search_space=experiment.search_space,
     )
     sobol_run = sobol.gen(1)
     experiment.new_batch_trial().add_generator_run(sobol_run).run().mark_completed()
     data = Data(
         pd.DataFrame(
             data={
                 "arm_name": ["0_0", "0_0", "0_0"],
                 "metric_name": ["a", "b", "c"],
                 "mean": [1.0, 2.0, 3.0],
                 "trial_index": [0, 0, 0],
                 "sem": [0, 0, 0],
             }
         )
     )
     test_names_to_fns = {
         "MOO_NEHVI": get_MOO_NEHVI,
         "MOO_EHVI": get_MOO_NEHVI,
         "MOO_PAREGO": get_MOO_PAREGO,
         "MOO_RS": get_MOO_RS,
     }
     for test_name, factory_fn in test_names_to_fns.items():
         with self.subTest(test_name):
             moo_model = factory_fn(
                 experiment=experiment,
                 data=data,
             )
             moo_gr = moo_model.gen(n=1)
             obj_t = moo_gr.gen_metadata["objective_thresholds"]
             self.assertEqual(obj_t[0], objective_thresholds[1])
             self.assertEqual(obj_t[1], objective_thresholds[0])
             self.assertEqual(len(obj_t), 2)
Ejemplo n.º 14
0
    def testGetProperties(self):
        properties = get_object_properties(Metric(name="foo"))
        self.assertEqual(properties, {"name": "foo"})

        properties = get_object_properties(Metric(name="foo", lower_is_better=True))
        self.assertEqual(properties, {"name": "foo", "lower_is_better": True})
Ejemplo n.º 15
0
 def setUp(self):
     self.metrics = {"m1": Metric(name="m1"), "m2": Metric(name="m2")}
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.multi_objective = ScalarizedObjective(
         metrics=[self.metrics["m1"], self.metrics["m2"]])
Ejemplo n.º 16
0
    def testModelBridge(self, mock_fit, mock_gen_arms,
                        mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [t1, t2]
        exp = get_experiment()
        modelbridge = ModelBridge(search_space_for_value(), 0, transforms, exp,
                                  0)
        self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(
            fit_args["search_space"] == search_space_for_value(8.0))
        self.assertTrue(
            fit_args["observation_features"] ==
            [observation1trans().features,
             observation2trans().features])
        self.assertTrue(fit_args["observation_data"] ==
                        [observation1trans().data,
                         observation2trans().data])
        self.assertTrue(mock_observations_from_data.called)

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[observation2trans().data],
        )

        modelbridge.predict([observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([observation1trans().features], [2], None),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(
            name="test_metric")))
        modelbridge.gen(
            n=1,
            search_space=search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={},
            observation_features=[observation1().features])

        # Gen with no pending observations and no fixed features
        modelbridge.gen(n=1,
                        search_space=search_space_for_value(),
                        optimization_config=None)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="test_metric"),
                     Metric(name="test_metric_2")]))
        modelbridge.gen(n=1,
                        search_space=search_space_for_value(),
                        optimization_config=oc2)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[observation1trans().data],
        )
        cv_training_data = [observation2()]
        cv_test_points = [observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points)
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[observation2trans().features],
            obs_data=[observation2trans().data],
            cv_test_points=[observation1().features],  # untransformed after
        )
        self.assertTrue(cv_predictions == [observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [observation1(), observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(modelbridge.model_space == search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [True, True])

        modelbridge.training_in_design = [True, False]
        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        ood_obs = modelbridge.out_of_design_data()
        self.assertTrue(
            ood_obs == unwrap_observation_data([observation2().data]))
Ejemplo n.º 17
0
    def testMetricSetters(self):
        # Establish current metrics size
        self.assertEqual(
            len(get_optimization_config().metrics) + 1, len(self.experiment.metrics)
        )

        # Add optimization config with 1 different metric
        opt_config = get_optimization_config()
        opt_config.outcome_constraints[0].metric = Metric(name="m3")
        self.experiment.optimization_config = opt_config

        # Verify total metrics size is the same.
        self.assertEqual(
            len(get_optimization_config().metrics) + 1, len(self.experiment.metrics)
        )

        # Test adding new tracking metric
        self.experiment.add_tracking_metric(Metric(name="m4"))
        self.assertEqual(
            len(get_optimization_config().metrics) + 2, len(self.experiment.metrics)
        )

        # Test adding new tracking metrics
        self.experiment.add_tracking_metrics([Metric(name="z1")])
        self.assertEqual(
            len(get_optimization_config().metrics) + 3, len(self.experiment.metrics)
        )

        # Verify update_tracking_metric updates the metric definition
        self.assertIsNone(self.experiment.metrics["m4"].lower_is_better)
        self.experiment.update_tracking_metric(Metric(name="m4", lower_is_better=True))
        self.assertTrue(self.experiment.metrics["m4"].lower_is_better)

        # Verify unable to add existing metric
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metric(Metric(name="m4"))

        # Verify unable to add existing metric
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metrics([Metric(name="z1"), Metric(name="m4")])

        # Verify unable to add metric in optimization config
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metric(Metric(name="m1"))

        # Verify unable to add metric in optimization config
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metrics([Metric(name="z2"), Metric(name="m1")])

        # Cannot update metric not already on experiment
        with self.assertRaises(ValueError):
            self.experiment.update_tracking_metric(Metric(name="m5"))

        # Cannot remove metric not already on experiment
        with self.assertRaises(ValueError):
            self.experiment.remove_tracking_metric(metric_name="m5")
Ejemplo n.º 18
0
 def testInit(self):
     metric = Metric(name="m1", lower_is_better=False)
     self.assertEqual(str(metric), METRIC_STRING)
Ejemplo n.º 19
0
    def testGen(self, mock_init, mock_best_point, mock_gen):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = NumpyModelBridge()
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        ma.transforms = OrderedDict()
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0,
                                                                    0.0])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0],
                           np.array([[0.0, -1.0]])))
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1],
                           np.array([[-2]])))
        self.assertTrue(
            np.array_equal(
                gen_args["linear_constraints"][0],
                np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
            ))
        self.assertTrue(
            np.array_equal(gen_args["linear_constraints"][1],
                           np.array([[0.0], [-3.5]])))
        self.assertEqual(gen_args["fixed_features"], {2: 3.0})
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][0], np.array([])))
        self.assertTrue(
            np.array_equal(gen_args["pending_observations"][1],
                           np.array([[0.6, 1.6, 3.0]])))
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(observation_features[0].parameters, {
            "x": 1.0,
            "y": 2.0,
            "z": 3.0
        })
        self.assertEqual(observation_features[1].parameters, {
            "x": 3.0,
            "y": 4.0,
            "z": 3.0
        })
        self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))

        # Test with multiple objectives.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="a"), Metric(name="b")], minimize=True))
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=oc2,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[1][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"],
                           np.array([-1.0, -1.0])))

        # Test with MultiObjective (unweighted multiple objectives)
        oc3 = MultiObjectiveOptimizationConfig(
            objective=MultiObjective(objectives=[
                Objective(metric=Metric(name="a")),
                Objective(metric=Metric(name="b", lower_is_better=True),
                          minimize=True),
            ], ))
        search_space = SearchSpace(self.parameters)  # Unconstrained
        observation_features, weights, best_obsf, _ = ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=oc3,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({"z": 3.0}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = mock_gen.mock_calls[2][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0),
                                              (0.0, 5.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([1.0,
                                                                    -1.0])))

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(3, search_space, {}, ObservationFeatures({}), None,
                optimization_config)
        gen_args = mock_gen.mock_calls[3][2]
        self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["linear_constraints"])
        self.assertIsNone(gen_args["fixed_features"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
        optimization_config.objective.minimize = True
        optimization_config.outcome_constraints[0].relative = True
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=self.search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
            )
Ejemplo n.º 20
0
 def testClone(self):
     metric1 = Metric(name="m1", lower_is_better=False)
     self.assertEqual(metric1, metric1.clone())
Ejemplo n.º 21
0
    def testFetchAndStoreData(self):
        n = 10
        exp = self._setupBraninExperiment(n)
        batch = exp.trials[0]

        # Test fetch data
        batch_data = batch.fetch_data()
        print(batch_data.df)
        self.assertEqual(len(batch_data.df), n)

        exp_data = exp.fetch_data()
        exp_data2 = exp.metrics["b"].fetch_experiment_data(exp)
        self.assertEqual(len(exp_data2.df), 4 * n)
        self.assertEqual(len(exp_data.df), 4 * n)
        self.assertEqual(len(exp.arms_by_name), 4 * n)

        # Verify data lookup is empty
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), 0)

        # Test local storage
        t1 = exp.attach_data(batch_data)

        t2 = exp.attach_data(exp_data)

        full_dict = exp.data_by_trial
        self.assertEqual(len(full_dict), 2)  # data for 2 trials
        self.assertEqual(len(full_dict[0]), 2)  # 2 data objs for batch 0

        # Test retrieving original batch 0 data
        self.assertEqual(len(exp.lookup_data_for_ts(t1).df), n)
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), n)

        # Test retrieving full exp data
        self.assertEqual(len(exp.lookup_data_for_ts(t2).df), 4 * n)

        with self.assertRaisesRegex(ValueError, ".* for metric"):
            exp.attach_data(batch_data, combine_with_last_data=True)

        new_data = Data(df=pd.DataFrame.from_records([{
            "arm_name": "0_0",
            "metric_name": "z",
            "mean": 3,
            "trial_index": 0
        }]))
        t3 = exp.attach_data(new_data, combine_with_last_data=True)
        self.assertEqual(len(full_dict[0]), 3)  # 3 data objs for batch 0 now
        self.assertIn("z",
                      exp.lookup_data_for_ts(t3).df["metric_name"].tolist())
        # Remove the newly added data.
        del exp._data_by_trial[0][t3]

        # Verify we don't get the data if the trial is abandoned
        batch._status = TrialStatus.ABANDONED
        self.assertEqual(len(batch.fetch_data().df), 0)
        self.assertEqual(len(exp.fetch_data().df), 3 * n)

        # Verify we do get the data if the trial is a candidate
        batch._status = TrialStatus.CANDIDATE
        self.assertEqual(len(batch.fetch_data().df), n)
        self.assertEqual(len(exp.fetch_data().df), 4 * n)

        # Verify we do get the stored data if there is an unimplemented metric
        batch._status = TrialStatus.RUNNING
        exp.add_tracking_metric(Metric(name="m"))
        self.assertEqual(len(batch.fetch_data().df), n)
        self.assertEqual(len(exp.fetch_data().df), 4 * n)
Ejemplo n.º 22
0
 def testSortable(self):
     metric1 = Metric(name="m1", lower_is_better=False)
     metric2 = Metric(name="m2", lower_is_better=False)
     self.assertTrue(metric1 < metric2)
Ejemplo n.º 23
0
def get_outcome_constraint() -> OutcomeConstraint:
    return OutcomeConstraint(metric=Metric(name="m2"), op=ComparisonOp.GEQ, bound=-0.25)
Ejemplo n.º 24
0
    def testGen(self, mock_init):
        # Test with constraints
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=True),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
            ],
        )
        ma = DiscreteModelBridge()
        model = mock.MagicMock(DiscreteModel, autospec=True, instance=True)
        model.gen.return_value = ([[0.0, 2.0, 3.0], [1.0, 1.0, 3.0]], [1.0, 2.0], {})
        ma.model = model
        ma.parameters = ["x", "y", "z"]
        ma.outcomes = ["a", "b"]
        observation_features, weights, best_observation, _ = ma._gen(
            n=3,
            search_space=self.search_space,
            optimization_config=optimization_config,
            pending_observations=self.pending_observations,
            fixed_features=ObservationFeatures({}),
            model_gen_options=self.model_gen_options,
        )
        gen_args = model.gen.mock_calls[0][2]
        self.assertEqual(gen_args["n"], 3)
        self.assertEqual(
            gen_args["parameter_values"], [[0.0, 1.0], ["foo", "bar"], [True]]
        )
        self.assertTrue(
            np.array_equal(gen_args["objective_weights"], np.array([-1.0, 0.0]))
        )
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][0], np.array([[0.0, -1.0]]))
        )
        self.assertTrue(
            np.array_equal(gen_args["outcome_constraints"][1], np.array([[-2]]))
        )
        self.assertEqual(gen_args["pending_observations"][0], [])
        self.assertEqual(gen_args["pending_observations"][1], [[0, "foo", True]])
        self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
        self.assertEqual(
            observation_features[0].parameters, {"x": 0.0, "y": 2.0, "z": 3.0}
        )
        self.assertEqual(
            observation_features[1].parameters, {"x": 1.0, "y": 1.0, "z": 3.0}
        )
        self.assertEqual(weights, [1.0, 2.0])

        # Test with no constraints, no fixed feature, no pending observations
        search_space = SearchSpace(self.parameters[:2])
        optimization_config.outcome_constraints = []
        ma.parameters = ["x", "y"]
        ma._gen(
            n=3,
            search_space=search_space,
            optimization_config=optimization_config,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options={},
        )
        gen_args = model.gen.mock_calls[1][2]
        self.assertEqual(gen_args["parameter_values"], [[0.0, 1.0], ["foo", "bar"]])
        self.assertIsNone(gen_args["outcome_constraints"])
        self.assertIsNone(gen_args["pending_observations"])

        # Test validation
        optimization_config = OptimizationConfig(
            objective=Objective(Metric("a"), minimize=False),
            outcome_constraints=[
                OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, True)
            ],
        )
        with self.assertRaises(ValueError):
            ma._gen(
                n=3,
                search_space=search_space,
                optimization_config=optimization_config,
                pending_observations={},
                fixed_features=ObservationFeatures({}),
                model_gen_options={},
            )
Ejemplo n.º 25
0
def get_multi_objective() -> Objective:
    return MultiObjective(
        metrics=[Metric(name="m1"), Metric(name="m3", lower_is_better=True)],
        minimize=False,
    )
Ejemplo n.º 26
0
def get_objective_threshold() -> ObjectiveThreshold:
    return ObjectiveThreshold(metric=Metric(name="m1"),
                              bound=-0.25,
                              op=ComparisonOp.GEQ)
Ejemplo n.º 27
0
def get_optimization_config_no_constraints() -> OptimizationConfig:
    return OptimizationConfig(objective=Objective(metric=Metric("test_metric")))
Ejemplo n.º 28
0
 def testTransformOptimizationConfig(self):
     # basic test
     m1 = Metric(name="m1")
     objective_m1 = Objective(metric=m1, minimize=False)
     oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[])
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # output constraint on a different metric should work
     m2 = Metric(name="m2")
     oc = OptimizationConfig(
         objective=objective_m1,
         outcome_constraints=self.get_constraint(
             metric=m2, bound=-1, relative=False
         ),
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     self.assertEqual(oc_tf, oc)
     # output constraint with a negative bound should fail
     objective_m2 = Objective(metric=m2, minimize=False)
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=-1.234, relative=False
         ),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since the "
         "bound isn't positive, got: -1.234.",
         str(cm.exception),
     )
     # output constraint with a zero bound should also fail
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(metric=m1, bound=0, relative=False),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since the "
         "bound isn't positive, got: 0.",
         str(cm.exception),
     )
     # output constraint with a positive bound should work
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=2.345, relative=False
         ),
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     oc.outcome_constraints[0].bound = math.log(2.345)
     self.assertEqual(oc_tf, oc)
     # output constraint with a relative bound should fail
     oc = OptimizationConfig(
         objective=objective_m2,
         outcome_constraints=self.get_constraint(
             metric=m1, bound=2.345, relative=True
         ),
     )
     with self.assertRaises(ValueError) as cm:
         tf.transform_optimization_config(oc, None, None)
     self.assertEqual(
         "LogY transform cannot be applied to metric m1 since it is "
         "subject to a relative constraint.",
         str(cm.exception),
     )
Ejemplo n.º 29
0
    def setUp(self):
        self.df = pd.DataFrame([
            {
                "arm_name": "0_0",
                "mean": 2.0,
                "sem": 0.2,
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_0",
                "mean": 1.8,
                "sem": 0.3,
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_1",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_1",
                "mean": 3.7,
                "sem": 0.5,
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": 0.5,
                "sem": None,
                "trial_index": 1,
                "metric_name": "a",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "b",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
            {
                "arm_name": "0_2",
                "mean": float("nan"),
                "sem": float("nan"),
                "trial_index": 1,
                "metric_name": "c",
                "start_time": "2018-01-01",
                "end_time": "2018-01-02",
            },
        ])

        self.data = Data(df=self.df)

        self.optimization_config = OptimizationConfig(
            objective=Objective(metric=Metric(name="a")),
            outcome_constraints=[
                OutcomeConstraint(
                    metric=Metric(name="b"),
                    op=ComparisonOp.GEQ,
                    bound=0,
                    relative=False,
                )
            ],
        )
Ejemplo n.º 30
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
    # Single-objective optimization arguments:
    objective_name: Optional[str] = None,
    minimize: bool = False,
    # Multi-objective optimization arguments:
    objectives: Optional[Dict[str, str]] = None,
    objective_thresholds: Optional[List[str]] = None,
) -> Experiment:
    """Instantiation wrapper that allows for Ax `Experiment` creation
    without importing or instantiating any Ax classes.

    Args:
        parameters: List of dictionaries representing parameters in the
            experiment search space.
            Required elements in the dictionaries are:
            1. "name" (name of parameter, string),
            2. "type" (type of parameter: "range", "fixed", or "choice", string),
            and one of the following:
            3a. "bounds" for range parameters (list of two values, lower bound
            first),
            3b. "values" for choice parameters (list of values), or
            3c. "value" for fixed parameters (single value).
            Optional elements are:
            1. "log_scale" (for float-valued range parameters, bool),
            2. "value_type" (to specify type that values of this parameter should
            take; expects "float", "int", "bool" or "str"),
            3. "is_fidelity" (bool) and "target_value" (float) for fidelity
            parameters,
            4. "is_ordered" (bool) for choice parameters, and
            5. "is_task" (bool) for task parameters.
        name: Name of the experiment to be created.
        parameter_constraints: List of string representation of parameter
            constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
            the latter constraints, any number of arguments is accepted, and
            acceptable operators are "<=" and ">=".
        parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
                the latter constraints, any number of arguments is accepted, and
                acceptable operators are "<=" and ">=".
        outcome_constraints: List of string representation of outcome
            constraints of form "metric_name >= bound", like "m1 <= 3."
        status_quo: Parameterization of the current state of the system.
            If set, this will be added to each trial to be evaluated alongside
            test configurations.
        experiment_type: String indicating type of the experiment (e.g. name of
            a product in which it is used), if any.
        objective_name: Name of the metric used as objective in this experiment,
            if experiment is single-objective optimization.
        minimize: Whether this experiment represents a minimization problem, if
            experiment is a single-objective optimization.
        objectives: Mapping from an objective name to "minimize" or "maximize"
            representing the direction for that objective. Used only for
            multi-objective optimization experiments.
        objective_thresholds: A list of objective threshold constraints for multi-
            objective optimization, in the same string format as `outcome_constraints`
            argument.
    """
    if objective_name is not None and (objectives is not None
                                       or objective_thresholds is not None):
        raise UnsupportedError(
            "Ambiguous objective definition: for single-objective optimization "
            "`objective_name` and `minimize` arguments expected. For multi-objective "
            "optimization `objectives` and `objective_thresholds` arguments expected."
        )

    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)

    if objectives is None:
        optimization_config = OptimizationConfig(
            objective=Objective(
                metric=Metric(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=make_outcome_constraints(
                outcome_constraints or [], status_quo_arm is not None),
        )
    else:
        optimization_config = make_optimization_config(
            objectives,
            objective_thresholds or [],
            outcome_constraints or [],
            status_quo_arm is not None,
        )

    return Experiment(
        name=name,
        search_space=make_search_space(parameters, parameter_constraints
                                       or []),
        optimization_config=optimization_config,
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
    )