예제 #1
0
파일: instantiation.py 프로젝트: emailhy/Ax
def outcome_constraint_from_str(representation: str) -> OutcomeConstraint:
    """Parse string representation of an outcome constraint."""
    tokens = representation.split()
    assert len(tokens) == 3 and tokens[1] in COMPARISON_OPS, (
        "Outcome constraint should be of form `metric_name >= x`, where x is a "
        "float bound and comparison operator is >= or <=."
    )
    op = COMPARISON_OPS[tokens[1]]
    try:
        bound = float(tokens[2])
    except ValueError:
        raise ValueError("Outcome constraint bound should be a float.")
    return OutcomeConstraint(Metric(name=tokens[0]), op=op, bound=bound, relative=False)
예제 #2
0
 def testEmptyMetrics(self):
     empty_experiment = Experiment(name="test_experiment",
                                   search_space=get_search_space())
     self.assertEqual(empty_experiment.num_trials, 0)
     with self.assertRaises(ValueError):
         empty_experiment.fetch_data()
     batch = empty_experiment.new_batch_trial()
     self.assertEqual(empty_experiment.num_trials, 1)
     with self.assertRaises(ValueError):
         batch.fetch_data()
     empty_experiment.add_tracking_metric(Metric(name="some_metric"))
     empty_experiment.attach_data(get_data())
     self.assertFalse(empty_experiment.fetch_data().df.empty)
예제 #3
0
 def testInit(self):
     with self.assertRaises(ValueError):
         ScalarizedObjective(
             metrics=[self.metrics["m1"], self.metrics["m2"]],
             weights=[1.0])
     warnings.resetwarnings()
     warnings.simplefilter("always", append=True)
     with warnings.catch_warnings(record=True) as ws:
         Objective(metric=self.metrics["m1"])
         self.assertTrue(
             any(issubclass(w.category, DeprecationWarning) for w in ws))
         self.assertTrue(
             any("Defaulting to `minimize=False`" in str(w.message)
                 for w in ws))
     with warnings.catch_warnings(record=True) as ws:
         Objective(Metric(name="m4", lower_is_better=True), minimize=False)
         self.assertTrue(
             any("Attempting to maximize" in str(w.message) for w in ws))
     with warnings.catch_warnings(record=True) as ws:
         Objective(Metric(name="m4", lower_is_better=False), minimize=True)
         self.assertTrue(
             any("Attempting to minimize" in str(w.message) for w in ws))
예제 #4
0
    def testTransformOptimizationConfig(self):
        m1 = Metric(name="m1")
        m2 = Metric(name="m2")
        m3 = Metric(name="m3")
        objective = Objective(metric=m3, minimize=False)
        cons = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=2.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=3.5,
                              relative=False),
        ]
        oc = OptimizationConfig(objective=objective, outcome_constraints=cons)
        oc = self.t.transform_optimization_config(oc, None, None)
        cons_t = [
            OutcomeConstraint(metric=m1,
                              op=ComparisonOp.GEQ,
                              bound=1.0,
                              relative=False),
            OutcomeConstraint(metric=m2,
                              op=ComparisonOp.LEQ,
                              bound=4.0,
                              relative=False),
        ]
        self.assertTrue(oc.outcome_constraints == cons_t)
        self.assertTrue(oc.objective == objective)

        # Check fail with relative
        con = OutcomeConstraint(metric=m1,
                                op=ComparisonOp.GEQ,
                                bound=2.0,
                                relative=True)
        oc = OptimizationConfig(objective=objective, outcome_constraints=[con])
        with self.assertRaises(ValueError):
            oc = self.t.transform_optimization_config(oc, None, None)
예제 #5
0
 def setUp(self):
     self.metrics = {
         "m1": Metric(name="m1", lower_is_better=True),
         "m2": Metric(name="m2", lower_is_better=False),
         "m3": Metric(name="m3", lower_is_better=False),
     }
     self.objectives = {
         "o1": Objective(metric=self.metrics["m1"]),
         "o2": Objective(metric=self.metrics["m2"], minimize=False),
         "o3": Objective(metric=self.metrics["m3"], minimize=False),
     }
     self.objective = Objective(metric=self.metrics["m1"], minimize=False)
     self.multi_objective = MultiObjective(
         objectives=[self.objectives["o1"], self.objectives["o2"]])
     self.multi_objective_just_m2 = MultiObjective(
         objectives=[self.objectives["o2"]])
     self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"],
                                                 op=ComparisonOp.GEQ,
                                                 bound=-0.25)
     self.additional_outcome_constraint = OutcomeConstraint(
         metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25)
     self.outcome_constraints = [
         self.outcome_constraint,
         self.additional_outcome_constraint,
     ]
     self.objective_thresholds = [
         ObjectiveThreshold(metric=self.metrics["m2"],
                            bound=-1.0,
                            relative=False)
     ]
     self.m1_constraint = OutcomeConstraint(metric=self.metrics["m1"],
                                            op=ComparisonOp.LEQ,
                                            bound=0.1,
                                            relative=True)
     self.m3_constraint = OutcomeConstraint(metric=self.metrics["m3"],
                                            op=ComparisonOp.GEQ,
                                            bound=0.1,
                                            relative=True)
예제 #6
0
파일: core_stubs.py 프로젝트: tangzhenyu/ax
def get_experiment_with_multi_objective() -> Experiment:
    optimization_config = get_multi_objective_optimization_config()

    exp = Experiment(
        name="test_experiment_multi_objective",
        search_space=get_branin_search_space(),
        optimization_config=optimization_config,
        description="test experiment with multi objective",
        runner=SyntheticRunner(),
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )

    return exp
예제 #7
0
    def testGetProperties(self):
        # Extract default value.
        properties = serialize_init_args(Metric(name="foo"))
        self.assertEqual(properties, {
            "name": "foo",
            "lower_is_better": None,
            "properties": {}
        })

        # Extract passed value.
        properties = serialize_init_args(
            Metric(name="foo", lower_is_better=True, properties={"foo":
                                                                 "bar"}))
        self.assertEqual(
            properties,
            {
                "name": "foo",
                "lower_is_better": True,
                "properties": {
                    "foo": "bar"
                }
            },
        )
예제 #8
0
def make_objectives(objectives: Dict[str, str]) -> List[Metric]:
    try:
        return [
            Metric(
                name=metric_name,
                lower_is_better=(MetricObjective[min_or_max.upper()] ==
                                 MetricObjective.MINIMIZE),
            ) for metric_name, min_or_max in objectives.items()
        ]
    except KeyError as k:
        raise ValueError(
            f"Objective values should specify '{MetricObjective.MINIMIZE.name.lower()}'"
            f" or '{MetricObjective.MAXIMIZE.name.lower()}', got {k} in"
            f" objectives({objectives})")
예제 #9
0
def get_experiment_with_scalarized_objective() -> Experiment:
    objective = get_scalarized_objective()
    outcome_constraints = [get_outcome_constraint()]
    optimization_config = OptimizationConfig(
        objective=objective, outcome_constraints=outcome_constraints)
    return Experiment(
        name="test_experiment_scalarized_objective",
        search_space=get_search_space(),
        optimization_config=optimization_config,
        status_quo=get_status_quo(),
        description="test experiment with scalarized objective",
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )
예제 #10
0
    def testFetchAndStoreData(self):
        n = 10
        exp = self._setupBraninExperiment(n)
        batch = exp.trials[0]

        # Test fetch data
        batch_data = batch.fetch_data()
        self.assertEqual(len(batch_data.df), n)

        exp_data = exp.fetch_data()
        exp_data2 = exp.metrics["b"].fetch_experiment_data(exp)
        self.assertEqual(len(exp_data2.df), 4 * n)
        self.assertEqual(len(exp_data.df), 4 * n)
        self.assertEqual(len(exp.arms_by_name), 4 * n)

        # Verify data lookup is empty
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), 0)

        # Test local storage
        t1 = exp.attach_data(batch_data)

        t2 = exp.attach_data(exp_data)

        full_dict = exp.data_by_trial
        self.assertEqual(len(full_dict), 2)  # data for 2 trials
        self.assertEqual(len(full_dict[0]), 2)  # 2 data objs for batch 0

        # Test retrieving original batch 0 data
        self.assertEqual(len(exp.lookup_data_for_ts(t1).df), n)
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), n)

        # Test retrieving full exp data
        self.assertEqual(len(exp.lookup_data_for_ts(t2).df), 4 * n)

        # Verify we don't get the data if the trial is abandoned
        batch._status = TrialStatus.ABANDONED
        self.assertEqual(len(batch.fetch_data().df), 0)
        self.assertEqual(len(exp.fetch_data().df), 3 * n)

        # Verify we do get the data if the trial is a candidate
        batch._status = TrialStatus.CANDIDATE
        self.assertEqual(len(batch.fetch_data().df), n)
        self.assertEqual(len(exp.fetch_data().df), 4 * n)

        # Verify we do get the stored data if there is an unimplemented metric
        batch._status = TrialStatus.RUNNING
        exp.add_tracking_metric(Metric(name="m"))
        self.assertEqual(len(batch.fetch_data().df), n)
        self.assertEqual(len(exp.fetch_data().df), 4 * n)
예제 #11
0
 def testTransformOptimizationConfigMOO(self):
     m1 = Metric(name="m1", lower_is_better=False)
     m2 = Metric(name="m2", lower_is_better=True)
     mo = MultiObjective(objectives=[
         Objective(metric=m1, minimize=False),
         Objective(metric=m2, minimize=True),
     ], )
     objective_thresholds = [
         ObjectiveThreshold(metric=m1, bound=1.234, relative=False),
         ObjectiveThreshold(metric=m2, bound=3.456, relative=False),
     ]
     oc = MultiObjectiveOptimizationConfig(
         objective=mo,
         objective_thresholds=objective_thresholds,
     )
     tf = LogY(
         search_space=None,
         observation_features=None,
         observation_data=[self.obsd1, self.obsd2],
         config={"metrics": ["m1"]},
     )
     oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None)
     oc.objective_thresholds[0].bound = math.log(1.234)
     self.assertEqual(oc_tf, oc)
예제 #12
0
    def test_add_tracking_metrics(self):
        experiment = make_experiment(
            parameters=[{"name": "x", "type": "range", "bounds": [0, 1]}],
            tracking_metric_names=None,
        )
        self.assertDictEqual(experiment._tracking_metrics, {})

        metrics_names = ["metric_1", "metric_2"]
        experiment = make_experiment(
            parameters=[{"name": "x", "type": "range", "bounds": [0, 1]}],
            tracking_metric_names=metrics_names,
        )
        self.assertDictEqual(
            experiment._tracking_metrics,
            {metric_name: Metric(name=metric_name) for metric_name in metrics_names},
        )
예제 #13
0
 def testEmptyMetrics(self):
     empty_experiment = Experiment(name="test_experiment",
                                   search_space=get_search_space())
     self.assertEqual(empty_experiment.num_trials, 0)
     with self.assertRaises(ValueError):
         empty_experiment.fetch_data()
     batch = empty_experiment.new_batch_trial()
     batch.mark_running(no_runner_required=True)
     self.assertEqual(empty_experiment.num_trials, 1)
     with self.assertRaises(ValueError):
         batch.fetch_data()
     empty_experiment.add_tracking_metric(Metric(name="ax_test_metric"))
     self.assertTrue(empty_experiment.fetch_data().df.empty)
     empty_experiment.attach_data(get_data())
     batch.mark_completed()
     self.assertFalse(empty_experiment.fetch_data().df.empty)
예제 #14
0
def get_experiment_with_multi_objective() -> Experiment:
    objective = get_multi_objective()
    outcome_constraints = [get_outcome_constraint()]
    optimization_config = OptimizationConfig(
        objective=objective, outcome_constraints=outcome_constraints)

    exp = Experiment(
        name="test_experiment_multi_objective",
        search_space=get_branin_search_space(),
        optimization_config=optimization_config,
        description="test experiment with multi objective",
        runner=SyntheticRunner(),
        tracking_metrics=[Metric(name="tracking")],
        is_test=True,
    )

    return exp
예제 #15
0
    def testExperimentObjectiveThresholdUpdates(self):
        experiment = get_experiment_with_batch_trial()
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(),
                         len(experiment.metrics))

        # update objective threshold
        # (should perform update in place)
        optimization_config = get_multi_objective_optimization_config()
        objective_threshold = get_objective_threshold()
        optimization_config.objective_thresholds = [objective_threshold]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(), 6)

        # add outcome constraint
        outcome_constraint2 = OutcomeConstraint(metric=Metric(name="outcome"),
                                                op=ComparisonOp.GEQ,
                                                bound=-0.5)
        optimization_config.outcome_constraints = [
            optimization_config.outcome_constraints[0],
            outcome_constraint2,
        ]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(), 7)

        # remove outcome constraint
        # (old one should become tracking metric)
        optimization_config.outcome_constraints = []
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(), 5)

        loaded_experiment = load_experiment(experiment.name)
        self.assertEqual(experiment, loaded_experiment)

        # Optimization config should correctly reload even with no
        # objective_thresholds
        optimization_config.objective_thresholds = []
        save_experiment(experiment)
        self.assertEqual(get_session().query(SQAMetric).count(), 4)

        loaded_experiment = load_experiment(experiment.name)
        self.assertEqual(experiment, loaded_experiment)
예제 #16
0
 def testGenWithDefaults(self, _, mock_gen):
     exp = get_experiment()
     exp.optimization_config = get_optimization_config()
     ss = search_space_for_range_value()
     modelbridge = ModelBridge(ss, None, [], exp)
     modelbridge.gen(1)
     mock_gen.assert_called_with(
         modelbridge,
         n=1,
         search_space=ss,
         fixed_features=ObservationFeatures(parameters={}),
         model_gen_options=None,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("test_metric"), minimize=False),
             outcome_constraints=[],
         ),
         pending_observations={},
     )
예제 #17
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    objective_name: Optional[str] = None,
    minimize: bool = False,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
) -> Experiment:
    """Instantiation wrapper that allows for creation of SimpleExperiment
    without importing or instantiating any Ax classes."""

    exp_parameters: List[Parameter] = [
        parameter_from_json(p) for p in parameters
    ]
    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)
    parameter_map = {p.name: p for p in exp_parameters}
    ocs = [outcome_constraint_from_str(c) for c in (outcome_constraints or [])]
    if status_quo_arm is None and any(oc.relative for oc in ocs):
        raise ValueError(
            "Must set status_quo to have relative outcome constraints.")
    return Experiment(
        name=name,
        search_space=SearchSpace(
            parameters=exp_parameters,
            parameter_constraints=None if parameter_constraints is None else [
                constraint_from_str(c, parameter_map)
                for c in parameter_constraints
            ],
        ),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=Metric(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=ocs,
        ),
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
    )
예제 #18
0
    def testExperimentOutcomeConstraintUpdates(self):
        experiment = get_experiment_with_batch_trial()
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # update outcome constraint
        # (should perform update in place)
        optimization_config = get_optimization_config()
        outcome_constraint = get_outcome_constraint()
        outcome_constraint.bound = -1.0
        optimization_config.outcome_constraints = [outcome_constraint]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # add outcome constraint
        outcome_constraint2 = OutcomeConstraint(
            metric=Metric(name="outcome"), op=ComparisonOp.GEQ, bound=-0.5
        )
        optimization_config.outcome_constraints = [
            outcome_constraint,
            outcome_constraint2,
        ]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        # remove outcome constraint
        # (old one should become tracking metric)
        optimization_config.outcome_constraints = [outcome_constraint]
        experiment.optimization_config = optimization_config
        save_experiment(experiment)
        self.assertEqual(
            get_session().query(SQAMetric).count(), len(experiment.metrics)
        )

        loaded_experiment = load_experiment(experiment.name)
        self.assertEqual(experiment, loaded_experiment)
예제 #19
0
    def testFetchTrialsData(self):
        exp = self._setupBraninExperiment(n=5)
        batch_0 = exp.trials[0]
        batch_1 = exp.trials[1]
        batch_0.mark_completed()
        batch_1.mark_completed()
        batch_0_data = exp.fetch_trials_data(trial_indices=[0])
        self.assertEqual(set(batch_0_data.df["trial_index"].values), {0})
        self.assertEqual(set(batch_0_data.df["arm_name"].values),
                         {a.name
                          for a in batch_0.arms})
        batch_1_data = exp.fetch_trials_data(trial_indices=[1])
        self.assertEqual(set(batch_1_data.df["trial_index"].values), {1})
        self.assertEqual(set(batch_1_data.df["arm_name"].values),
                         {a.name
                          for a in batch_1.arms})
        self.assertEqual(
            exp.fetch_trials_data(trial_indices=[0, 1]),
            Data.from_multiple_data([batch_0_data, batch_1_data]),
        )

        # Since NoisyFunction metric has overwrite_existing_data = False,
        # we should have two dfs per trial now
        self.assertEqual(len(exp.data_by_trial[0]), 2)

        with self.assertRaisesRegex(ValueError, ".* not associated .*"):
            exp.fetch_trials_data(trial_indices=[2])
        # Try to fetch data when there are only metrics and no attached data.
        exp.remove_tracking_metric(
            metric_name="b")  # Remove implemented metric.
        exp.add_tracking_metric(Metric(name="b"))  # Add unimplemented metric.
        self.assertEqual(len(exp.fetch_trials_data(trial_indices=[0]).df), 5)
        # Try fetching attached data.
        exp.attach_data(batch_0_data)
        exp.attach_data(batch_1_data)
        self.assertEqual(exp.fetch_trials_data(trial_indices=[0]),
                         batch_0_data)
        self.assertEqual(exp.fetch_trials_data(trial_indices=[1]),
                         batch_1_data)
        self.assertEqual(set(batch_0_data.df["trial_index"].values), {0})
        self.assertEqual(set(batch_0_data.df["arm_name"].values),
                         {a.name
                          for a in batch_0.arms})
예제 #20
0
 def testFetchTrialsData(self):
     exp = self._setupBraninExperiment(n=5)
     batch_0 = exp.trials[0]
     batch_1 = exp.trials[1]
     batch_0.mark_completed()
     batch_1.mark_completed()
     batch_0_data = exp.fetch_trials_data(trial_indices=[0])
     self.assertEqual(set(batch_0_data.df["trial_index"].values), {0})
     self.assertEqual(set(batch_0_data.df["arm_name"].values),
                      {a.name
                       for a in batch_0.arms})
     batch_1_data = exp.fetch_trials_data(trial_indices=[1])
     self.assertEqual(set(batch_1_data.df["trial_index"].values), {1})
     self.assertEqual(set(batch_1_data.df["arm_name"].values),
                      {a.name
                       for a in batch_1.arms})
     self.assertEqual(
         exp.fetch_trials_data(trial_indices=[0, 1]),
         Data.from_multiple_data([batch_0_data, batch_1_data]),
     )
     with self.assertRaisesRegex(ValueError, ".* not associated .*"):
         exp.fetch_trials_data(trial_indices=[2])
     # Try to fetch data when there are only metrics and no attached data.
     exp.remove_tracking_metric(
         metric_name="b")  # Remove implemented metric.
     exp.add_tracking_metric(Metric(name="b"))  # Add unimplemented metric.
     self.assertTrue(exp.fetch_trials_data(trial_indices=[0]).df.empty)
     # Try fetching attached data.
     exp.attach_data(batch_0_data)
     exp.attach_data(batch_1_data)
     self.assertEqual(exp.fetch_trials_data(trial_indices=[0]),
                      batch_0_data)
     self.assertEqual(exp.fetch_trials_data(trial_indices=[1]),
                      batch_1_data)
     self.assertEqual(set(batch_0_data.df["trial_index"].values), {0})
     self.assertEqual(set(batch_0_data.df["arm_name"].values),
                      {a.name
                       for a in batch_0.arms})
예제 #21
0
 def test_best_point(
     self,
     _mock_gen,
     _mock_best_point,
     _mock_fit,
     _mock_predict,
     _mock_gen_arms,
     _mock_unwrap,
     _mock_obs_from_data,
 ):
     exp = Experiment(search_space=get_search_space_for_range_value(),
                      name="test")
     modelbridge = ArrayModelBridge(
         search_space=get_search_space_for_range_value(),
         model=NumpyModel(),
         transforms=[t1, t2],
         experiment=exp,
         data=Data(),
     )
     self.assertEqual(list(modelbridge.transforms.keys()),
                      ["Cast", "t1", "t2"])
     # _fit is mocked, which typically sets this.
     modelbridge.outcomes = ["a"]
     run = modelbridge.gen(
         n=1,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("a"), minimize=False),
             outcome_constraints=[],
         ),
     )
     arm, predictions = run.best_arm_predictions
     self.assertEqual(arm.parameters, {})
     self.assertEqual(predictions[0], {"m": 1.0})
     self.assertEqual(predictions[1], {"m": {"m": 2.0}})
     # test check that optimization config is required
     with self.assertRaises(ValueError):
         run = modelbridge.gen(n=1, optimization_config=None)
예제 #22
0
 def __init__(
     self,
     search_space: SearchSpace,
     name: Optional[str] = None,
     objective_name: Optional[str] = None,
     evaluation_function: TEvaluationFunction = unimplemented_evaluation_function,
     minimize: bool = False,
     outcome_constraints: Optional[List[OutcomeConstraint]] = None,
     status_quo: Optional[Arm] = None,
 ) -> None:
     optimization_config = OptimizationConfig(
         objective=Objective(
             metric=Metric(name=objective_name or DEFAULT_OBJECTIVE_NAME),
             minimize=minimize,
         ),
         outcome_constraints=outcome_constraints,
     )
     super().__init__(
         name=name,
         search_space=search_space,
         optimization_config=optimization_config,
         status_quo=status_quo,
     )
     self._evaluation_function = evaluation_function
예제 #23
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    objective_name: Optional[str] = None,
    minimize: bool = False,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
) -> Experiment:
    """Instantiation wrapper that allows for creation of SimpleExperiment without
    importing or instantiating any Ax classes."""

    exp_parameters: List[Parameter] = [
        parameter_from_json(p) for p in parameters
    ]
    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)
    parameter_map = {p.name: p for p in exp_parameters}
    return Experiment(
        name=name,
        search_space=SearchSpace(
            parameters=exp_parameters,
            parameter_constraints=None if parameter_constraints is None else [
                constraint_from_str(c, parameter_map)
                for c in parameter_constraints
            ],
        ),
        optimization_config=OptimizationConfig(
            objective=Objective(
                metric=Metric(name=objective_name or DEFAULT_OBJECTIVE_NAME),
                minimize=minimize,
            ),
            outcome_constraints=None if outcome_constraints is None else
            [outcome_constraint_from_str(c) for c in outcome_constraints],
        ),
        status_quo=status_quo_arm,
    )
예제 #24
0
    def testMetricSetters(self):
        # Establish current metrics size
        self.assertEqual(
            len(get_optimization_config().metrics) + 1,
            len(self.experiment.metrics))

        # Add optimization config with 1 different metric
        opt_config = get_optimization_config()
        opt_config.outcome_constraints[0].metric = Metric(name="m3")
        self.experiment.optimization_config = opt_config

        # Verify total metrics size is the same.
        self.assertEqual(
            len(get_optimization_config().metrics) + 1,
            len(self.experiment.metrics))

        # Test adding new tracking metric
        self.experiment.add_tracking_metric(Metric(name="m4"))
        self.assertEqual(
            len(get_optimization_config().metrics) + 2,
            len(self.experiment.metrics))

        # Verify update_tracking_metric updates the metric definition
        self.assertIsNone(self.experiment.metrics["m4"].lower_is_better)
        self.experiment.update_tracking_metric(
            Metric(name="m4", lower_is_better=True))
        self.assertTrue(self.experiment.metrics["m4"].lower_is_better)

        # Verify unable to add existing metric
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metric(Metric(name="m4"))

        # Verify unable to add metric in optimization config
        with self.assertRaises(ValueError):
            self.experiment.add_tracking_metric(Metric(name="m1"))

        # Cannot update metric not already on experiment
        with self.assertRaises(ValueError):
            self.experiment.update_tracking_metric(Metric(name="m5"))

        # Cannot remove metric not already on experiment
        with self.assertRaises(ValueError):
            self.experiment.remove_tracking_metric(metric_name="m5")
예제 #25
0
    def testModelBridge(self, mock_fit, mock_gen_arms,
                        mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [transform_1, transform_2]
        exp = get_experiment_for_value()
        ss = get_search_space_for_value()
        modelbridge = ModelBridge(ss, 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["transform_1", "transform_2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(
            fit_args["search_space"] == get_search_space_for_value(8.0))
        self.assertTrue(fit_args["observation_features"] == [])
        self.assertTrue(fit_args["observation_data"] == [])
        self.assertTrue(mock_observations_from_data.called)

        # Test prediction on out of design features.
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            side_effect=ValueError("Out of Design"),
        )
        # This point is in design, and thus failures in predict are legitimate.
        with mock.patch.object(ModelBridge,
                               "model_space",
                               return_value=get_search_space_for_range_values):
            with self.assertRaises(ValueError):
                modelbridge.predict([get_observation2().features])

        # This point is out of design, and not in training data.
        with self.assertRaises(ValueError):
            modelbridge.predict([get_observation_status_quo0().features])

        # Now it's in the training data.
        with mock.patch.object(
                ModelBridge,
                "get_training_data",
                return_value=[get_observation_status_quo0()],
        ):
            # Return raw training value.
            self.assertEqual(
                modelbridge.predict([get_observation_status_quo0().features]),
                unwrap_observation_data([get_observation_status_quo0().data]),
            )

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[get_observation2trans().data],
        )
        modelbridge.predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Check that _single_predict is equivalent here.
        modelbridge._single_predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([get_observation1trans().features], [2], None, {}),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(
            name="test_metric")))
        modelbridge._set_kwargs_to_save(model_key="TestModel",
                                        model_kwargs={},
                                        bridge_kwargs={})
        gr = modelbridge.gen(
            n=1,
            search_space=get_search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [get_observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [get_observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={},
            observation_features=[get_observation1().features])

        # Gen with no pending observations and no fixed features
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=None)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="test_metric"),
                     Metric(name="test_metric_2")]))
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=oc2)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[get_observation1trans().data],
        )
        cv_training_data = [get_observation2()]
        cv_test_points = [get_observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points)
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[get_observation2trans().features],
            obs_data=[get_observation2trans().data],
            cv_test_points=[get_observation1().features
                            ],  # untransformed after
        )
        self.assertTrue(cv_predictions == [get_observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [get_observation1(), get_observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(
            modelbridge.model_space == get_search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [False, False])

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        # Test feature_importances
        with self.assertRaises(NotImplementedError):
            modelbridge.feature_importances("a")
예제 #26
0
def make_experiment(
    parameters: List[TParameterRepresentation],
    name: Optional[str] = None,
    parameter_constraints: Optional[List[str]] = None,
    outcome_constraints: Optional[List[str]] = None,
    status_quo: Optional[TParameterization] = None,
    experiment_type: Optional[str] = None,
    tracking_metric_names: Optional[List[str]] = None,
    # Single-objective optimization arguments:
    objective_name: Optional[str] = None,
    minimize: bool = False,
    # Multi-objective optimization arguments:
    objectives: Optional[Dict[str, str]] = None,
    objective_thresholds: Optional[List[str]] = None,
    support_intermediate_data: bool = False,
    immutable_search_space_and_opt_config: bool = True,
    is_test: bool = False,
) -> Experiment:
    """Instantiation wrapper that allows for Ax `Experiment` creation
    without importing or instantiating any Ax classes.

    Args:
        parameters: List of dictionaries representing parameters in the
            experiment search space.
            Required elements in the dictionaries are:
            1. "name" (name of parameter, string),
            2. "type" (type of parameter: "range", "fixed", or "choice", string),
            and one of the following:
            3a. "bounds" for range parameters (list of two values, lower bound
            first),
            3b. "values" for choice parameters (list of values), or
            3c. "value" for fixed parameters (single value).
            Optional elements are:
            1. "log_scale" (for float-valued range parameters, bool),
            2. "value_type" (to specify type that values of this parameter should
            take; expects "float", "int", "bool" or "str"),
            3. "is_fidelity" (bool) and "target_value" (float) for fidelity
            parameters,
            4. "is_ordered" (bool) for choice parameters,
            5. "is_task" (bool) for task parameters, and
            6. "digits" (int) for float-valued range parameters.
        name: Name of the experiment to be created.
        parameter_constraints: List of string representation of parameter
            constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
            the latter constraints, any number of arguments is accepted, and
            acceptable operators are "<=" and ">=".
        parameter_constraints: List of string representation of parameter
                constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For
                the latter constraints, any number of arguments is accepted, and
                acceptable operators are "<=" and ">=".
        outcome_constraints: List of string representation of outcome
            constraints of form "metric_name >= bound", like "m1 <= 3."
        status_quo: Parameterization of the current state of the system.
            If set, this will be added to each trial to be evaluated alongside
            test configurations.
        experiment_type: String indicating type of the experiment (e.g. name of
            a product in which it is used), if any.
        tracking_metric_names: Names of additional tracking metrics not used for
            optimization.
        objective_name: Name of the metric used as objective in this experiment,
            if experiment is single-objective optimization.
        minimize: Whether this experiment represents a minimization problem, if
            experiment is a single-objective optimization.
        objectives: Mapping from an objective name to "minimize" or "maximize"
            representing the direction for that objective. Used only for
            multi-objective optimization experiments.
        objective_thresholds: A list of objective threshold constraints for multi-
            objective optimization, in the same string format as `outcome_constraints`
            argument.
        support_intermediate_data: Whether trials may report metrics results for
            incomplete runs.
        immutable_search_space_and_opt_config: Whether it's possible to update the
            search space and optimization config on this experiment after creation.
            Defaults to True. If set to True, we won't store or load copies of the
            search space and optimization config on each generator run, which will
            improve storage performance.
        is_test: Whether this experiment will be a test experiment (useful for
            marking test experiments in storage etc). Defaults to False.
    """
    if objective_name is not None and (
        objectives is not None or objective_thresholds is not None
    ):
        raise UnsupportedError(
            "Ambiguous objective definition: for single-objective optimization "
            "`objective_name` and `minimize` arguments expected. For multi-objective "
            "optimization `objectives` and `objective_thresholds` arguments expected."
        )

    status_quo_arm = None if status_quo is None else Arm(parameters=status_quo)

    # TODO(jej): Needs to be decided per-metric when supporting heterogenous data.
    metric_cls = MapMetric if support_intermediate_data else Metric
    if objectives is None:
        optimization_config = OptimizationConfig(
            objective=Objective(
                metric=metric_cls(
                    name=objective_name or DEFAULT_OBJECTIVE_NAME,
                    lower_is_better=minimize,
                ),
                minimize=minimize,
            ),
            outcome_constraints=make_outcome_constraints(
                outcome_constraints or [], status_quo_arm is not None
            ),
        )
    else:
        optimization_config = make_optimization_config(
            objectives,
            objective_thresholds or [],
            outcome_constraints or [],
            status_quo_arm is not None,
        )

    tracking_metrics = (
        None
        if tracking_metric_names is None
        else [Metric(name=metric_name) for metric_name in tracking_metric_names]
    )

    default_data_type = (
        DataType.MAP_DATA if support_intermediate_data else DataType.DATA
    )

    immutable_ss_and_oc = immutable_search_space_and_opt_config
    properties = (
        {}
        if not immutable_search_space_and_opt_config
        else {Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value: immutable_ss_and_oc}
    )

    return Experiment(
        name=name,
        search_space=make_search_space(parameters, parameter_constraints or []),
        optimization_config=optimization_config,
        status_quo=status_quo_arm,
        experiment_type=experiment_type,
        tracking_metrics=tracking_metrics,
        default_data_type=default_data_type,
        properties=properties,
        is_test=is_test,
    )
예제 #27
0
    def testFetchAndStoreData(self):
        n = 10
        exp = self._setupBraninExperiment(n)
        batch = exp.trials[0]
        batch.mark_completed()

        # Test fetch data
        batch_data = batch.fetch_data()
        self.assertEqual(len(batch_data.df), n)

        exp_data = exp.fetch_data()
        exp_data2 = exp.metrics["b"].fetch_experiment_data(exp)
        self.assertEqual(len(exp_data2.df), 4 * n)
        self.assertEqual(len(exp_data.df), 4 * n)
        self.assertEqual(len(exp.arms_by_name), 4 * n)

        # Verify that `metrics` kwarg to `experiment.fetch_data` is respected.
        exp.add_tracking_metric(Metric(name="not_yet_on_experiment"))
        exp.attach_data(
            Data(df=pd.DataFrame.from_records([{
                "arm_name": "0_0",
                "metric_name": "not_yet_on_experiment",
                "mean": 3,
                "sem": 0,
                "trial_index": 0,
            }])))
        self.assertEqual(
            set(
                exp.fetch_data(metrics=[Metric(
                    name="not_yet_on_experiment")]).df["metric_name"].values),
            {"not_yet_on_experiment"},
        )

        # Verify data lookup is empty for trial that does not yet have data.
        self.assertEqual(len(exp.lookup_data_for_trial(1)[0].df), 0)

        # Test local storage
        t1 = exp.attach_data(batch_data)
        t2 = exp.attach_data(exp_data)

        full_dict = exp.data_by_trial
        self.assertEqual(len(full_dict), 2)  # data for 2 trials
        self.assertEqual(len(full_dict[0]), 3)  # 3 data objs for batch 0

        # Test retrieving original batch 0 data
        self.assertEqual(len(exp.lookup_data_for_ts(t1).df), n)
        self.assertEqual(len(exp.lookup_data_for_trial(0)[0].df), n)

        # Test retrieving full exp data
        self.assertEqual(len(exp.lookup_data_for_ts(t2).df), 4 * n)

        # Test merging multiple timestamps of data
        self.assertEqual(
            len(exp.lookup_data_for_trial(0, merge_trial_data=True)), 2)

        with self.assertRaisesRegex(ValueError, ".* for metric"):
            exp.attach_data(batch_data, combine_with_last_data=True)

        new_data = Data(df=pd.DataFrame.from_records([{
            "arm_name": "0_0",
            "metric_name": "z",
            "mean": 3,
            "sem": 0,
            "trial_index": 0,
        }]))
        t3 = exp.attach_data(new_data, combine_with_last_data=True)
        self.assertEqual(len(full_dict[0]), 4)  # 4 data objs for batch 0 now
        self.assertIn("z",
                      exp.lookup_data_for_ts(t3).df["metric_name"].tolist())

        # Verify we don't get the data if the trial is abandoned
        batch._status = TrialStatus.ABANDONED
        self.assertEqual(len(batch.fetch_data().df), 0)
        self.assertEqual(len(exp.fetch_data().df), 3 * n)

        # Verify we do get the stored data if there are an unimplemented metrics.
        del exp._data_by_trial[0][
            t3]  # Remove attached data for nonexistent metric.
        # Remove implemented metric that is `available_while_running`
        # (and therefore not pulled from cache).
        exp.remove_tracking_metric(metric_name="b")
        exp.add_tracking_metric(Metric(name="b"))  # Add unimplemented metric.
        batch._status = TrialStatus.COMPLETED
        # Data should be getting looked up now.
        self.assertEqual(batch.fetch_data(), exp.lookup_data_for_ts(t1))
        self.assertEqual(exp.fetch_data(), exp.lookup_data_for_ts(t1))
        metrics_in_data = set(batch.fetch_data().df["metric_name"].values)
        # Data for metric "z" should no longer be present since we removed it.
        self.assertEqual(metrics_in_data, {"b"})

        # Verify that `metrics` kwarg to `experiment.fetch_data` is respected
        # when pulling looked-up data.
        self.assertEqual(
            exp.fetch_data(metrics=[Metric(name="not_on_experiment")]), Data())
예제 #28
0
 def test_create_experiment(self) -> None:
     """Test basic experiment creation."""
     ax_client = AxClient(
         GenerationStrategy(
             steps=[GenerationStep(model=Models.SOBOL, num_trials=30)]))
     with self.assertRaisesRegex(ValueError,
                                 "Experiment not set on Ax client"):
         ax_client.experiment
     ax_client.create_experiment(
         name="test_experiment",
         parameters=[
             {
                 "name": "x",
                 "type": "range",
                 "bounds": [0.001, 0.1],
                 "value_type": "float",
                 "log_scale": True,
             },
             {
                 "name": "y",
                 "type": "choice",
                 "values": [1, 2, 3],
                 "value_type": "int",
                 "is_ordered": True,
             },
             {
                 "name": "x3",
                 "type": "fixed",
                 "value": 2,
                 "value_type": "int"
             },
             {
                 "name": "x4",
                 "type": "range",
                 "bounds": [1.0, 3.0],
                 "value_type": "int",
             },
             {
                 "name": "x5",
                 "type": "choice",
                 "values": ["one", "two", "three"],
                 "value_type": "str",
             },
             {
                 "name": "x6",
                 "type": "range",
                 "bounds": [1.0, 3.0],
                 "value_type": "int",
             },
         ],
         objective_name="test_objective",
         minimize=True,
         outcome_constraints=["some_metric >= 3", "some_metric <= 4.0"],
         parameter_constraints=["x4 <= x6"],
     )
     assert ax_client._experiment is not None
     self.assertEqual(ax_client._experiment, ax_client.experiment)
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x"],
         RangeParameter(
             name="x",
             parameter_type=ParameterType.FLOAT,
             lower=0.001,
             upper=0.1,
             log_scale=True,
         ),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["y"],
         ChoiceParameter(
             name="y",
             parameter_type=ParameterType.INT,
             values=[1, 2, 3],
             is_ordered=True,
         ),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x3"],
         FixedParameter(name="x3",
                        parameter_type=ParameterType.INT,
                        value=2),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x4"],
         RangeParameter(name="x4",
                        parameter_type=ParameterType.INT,
                        lower=1.0,
                        upper=3.0),
     )
     self.assertEqual(
         ax_client._experiment.search_space.parameters["x5"],
         ChoiceParameter(
             name="x5",
             parameter_type=ParameterType.STRING,
             values=["one", "two", "three"],
         ),
     )
     self.assertEqual(
         ax_client._experiment.optimization_config.outcome_constraints[0],
         OutcomeConstraint(
             metric=Metric(name="some_metric"),
             op=ComparisonOp.GEQ,
             bound=3.0,
             relative=False,
         ),
     )
     self.assertEqual(
         ax_client._experiment.optimization_config.outcome_constraints[1],
         OutcomeConstraint(
             metric=Metric(name="some_metric"),
             op=ComparisonOp.LEQ,
             bound=4.0,
             relative=False,
         ),
     )
     self.assertTrue(
         ax_client._experiment.optimization_config.objective.minimize)
예제 #29
0
파일: core_stubs.py 프로젝트: tangzhenyu/ax
def get_optimization_config_no_constraints() -> OptimizationConfig:
    return OptimizationConfig(objective=Objective(metric=Metric("test_metric")))
예제 #30
0
파일: core_stubs.py 프로젝트: tangzhenyu/ax
def get_scalarized_objective() -> Objective:
    return ScalarizedObjective(
        metrics=[Metric(name="m1"), Metric(name="m3")],
        weights=[1.0, 2.0],
        minimize=False,
    )