示例#1
0
    def test_status_quo_for_non_monolithic_data(self, mock_gen):
        mock_gen.return_value = (
            [
                ObservationFeatures(parameters={
                    "x1": float(i),
                    "x2": float(i)
                },
                                    trial_index=np.int64(1)) for i in range(5)
            ],
            [1] * 5,
            None,
            {},
        )
        exp = get_branin_experiment_with_multi_objective(with_status_quo=True)
        sobol = Models.SOBOL(search_space=exp.search_space)
        exp.new_batch_trial(sobol.gen(5)).set_status_quo_and_optimize_power(
            status_quo=exp.status_quo).run()

        # create data where metrics vary in start and end times
        data = get_non_monolithic_branin_moo_data()
        with warnings.catch_warnings(record=True) as ws:
            bridge = ModelBridge(
                experiment=exp,
                data=data,
                model=Model(),
                search_space=exp.search_space,
            )
        # just testing it doesn't error
        bridge.gen(5)
        self.assertTrue(any("start_time" in str(w.message) for w in ws))
        self.assertTrue(any("end_time" in str(w.message) for w in ws))
        self.assertEqual(bridge.status_quo.arm_name, "status_quo")
示例#2
0
 def test_update(self, _mock_update, _mock_gen):
     exp = get_experiment_for_value()
     exp.optimization_config = get_optimization_config_no_constraints()
     ss = get_search_space_for_range_values()
     exp.search_space = ss
     modelbridge = ModelBridge(
         search_space=ss, model=Model(), transforms=[Log], experiment=exp
     )
     exp.new_trial(generator_run=modelbridge.gen(1))
     modelbridge._set_training_data(
         observations_from_data(
             data=Data(
                 pd.DataFrame(
                     [
                         {
                             "arm_name": "0_0",
                             "metric_name": "m1",
                             "mean": 3.0,
                             "sem": 1.0,
                         }
                     ]
                 )
             ),
             experiment=exp,
         ),
         ss,
     )
     exp.new_trial(generator_run=modelbridge.gen(1))
     modelbridge.update(
         new_data=Data(
             pd.DataFrame(
                 [{"arm_name": "1_0", "metric_name": "m1", "mean": 5.0, "sem": 0.0}]
             )
         ),
         experiment=exp,
     )
     exp.new_trial(generator_run=modelbridge.gen(1))
     # Trying to update with unrecognised metric should error.
     with self.assertRaisesRegex(ValueError, "Unrecognised metric"):
         modelbridge.update(
             new_data=Data(
                 pd.DataFrame(
                     [
                         {
                             "arm_name": "1_0",
                             "metric_name": "m2",
                             "mean": 5.0,
                             "sem": 0.0,
                         }
                     ]
                 )
             ),
             experiment=exp,
         )
示例#3
0
def _produce_generator_run_from_model(
    input_max_gen_draws: int,
    model: ModelBridge,
    n: int,
    pending_observations: Optional[Dict[str, List[ObservationFeatures]]],
    model_gen_kwargs: Any,
    should_deduplicate: bool,
    arms_by_signature: Dict[str, Arm],
) -> GeneratorRun:
    """Produces a ``GeneratorRun`` with ``n`` arms using the provided ``model``. if
    ``should_deduplicate is True``, these arms are deduplicated against previous arms
    using rejection sampling before returning. If more than ``input_max_gen_draws``
    samples are generated during deduplication, this function produces a
    ``GenerationStrategyRepeatedPoints`` exception.
    """
    # NOTE: Might need to revisit the behavior of deduplication when
    # generating multi-arm generator runs (to be made into batch trials).
    should_generate_run = True
    generator_run = None
    n_gen_draws = 0
    # Keep generating until each of `generator_run.arms` is not a duplicate
    # of a previous arm, if `should_deduplicate is True`
    while should_generate_run:
        if n_gen_draws > input_max_gen_draws:
            raise GenerationStrategyRepeatedPoints(
                MAX_GEN_DRAWS_EXCEEDED_MESSAGE)
        generator_run = model.gen(
            n=n,
            pending_observations=pending_observations,
            **model_gen_kwargs,
        )
        should_generate_run = should_deduplicate and any(
            arm.signature in arms_by_signature for arm in generator_run.arms)
        n_gen_draws += 1
    return not_none(generator_run)
示例#4
0
 def testGenWithDefaults(self, _, mock_gen):
     exp = get_experiment()
     exp.optimization_config = get_optimization_config()
     ss = search_space_for_range_value()
     modelbridge = ModelBridge(ss, None, [], exp)
     modelbridge.gen(1)
     mock_gen.assert_called_with(
         modelbridge,
         n=1,
         search_space=ss,
         fixed_features=ObservationFeatures(parameters={}),
         model_gen_options=None,
         optimization_config=OptimizationConfig(
             objective=Objective(metric=Metric("test_metric"), minimize=False),
             outcome_constraints=[],
         ),
         pending_observations={},
     )
示例#5
0
    def test_ood_gen(self, _):
        # Test fit_out_of_design by returning OOD candidats
        exp = get_experiment_for_value()
        ss = SearchSpace([RangeParameter("x", ParameterType.FLOAT, 0.0, 1.0)])
        modelbridge = ModelBridge(
            search_space=ss,
            model=Model(),
            transforms=[],
            experiment=exp,
            data=0,
            fit_out_of_design=True,
        )
        obs = ObservationFeatures(parameters={"x": 3.0})
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([obs], [2], None, {}),
        )
        gr = modelbridge.gen(n=1)
        self.assertEqual(gr.arms[0].parameters, obs.parameters)

        # Test clamping arms by setting fit_out_of_design=False
        modelbridge = ModelBridge(
            search_space=ss,
            model=Model(),
            transforms=[],
            experiment=exp,
            data=0,
            fit_out_of_design=False,
        )
        obs = ObservationFeatures(parameters={"x": 3.0})
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([obs], [2], None, {}),
        )
        gr = modelbridge.gen(n=1)
        self.assertEqual(gr.arms[0].parameters, {"x": 1.0})
示例#6
0
 def test_gen_on_experiment_with_imm_ss_and_opt_conf(self, _, __):
     exp = get_experiment_for_value()
     exp._properties[Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF] = True
     exp.optimization_config = get_optimization_config_no_constraints()
     ss = get_search_space_for_range_value()
     modelbridge = ModelBridge(search_space=ss,
                               model=Model(),
                               transforms=[],
                               experiment=exp)
     self.assertTrue(
         modelbridge._experiment_has_immutable_search_space_and_opt_config)
     gr = modelbridge.gen(1)
     self.assertIsNone(gr.optimization_config)
     self.assertIsNone(gr.search_space)
示例#7
0
    def testModelBridge(self, mock_fit, mock_gen_arms,
                        mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [transform_1, transform_2]
        exp = get_experiment_for_value()
        ss = get_search_space_for_value()
        modelbridge = ModelBridge(ss, 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()),
                         ["transform_1", "transform_2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(
            fit_args["search_space"] == get_search_space_for_value(8.0))
        self.assertTrue(fit_args["observation_features"] == [])
        self.assertTrue(fit_args["observation_data"] == [])
        self.assertTrue(mock_observations_from_data.called)

        # Test prediction on out of design features.
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            side_effect=ValueError("Out of Design"),
        )
        # This point is in design, and thus failures in predict are legitimate.
        with mock.patch.object(ModelBridge,
                               "model_space",
                               return_value=get_search_space_for_range_values):
            with self.assertRaises(ValueError):
                modelbridge.predict([get_observation2().features])

        # This point is out of design, and not in training data.
        with self.assertRaises(ValueError):
            modelbridge.predict([get_observation_status_quo0().features])

        # Now it's in the training data.
        with mock.patch.object(
                ModelBridge,
                "get_training_data",
                return_value=[get_observation_status_quo0()],
        ):
            # Return raw training value.
            self.assertEqual(
                modelbridge.predict([get_observation_status_quo0().features]),
                unwrap_observation_data([get_observation_status_quo0().data]),
            )

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[get_observation2trans().data],
        )
        modelbridge.predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Check that _single_predict is equivalent here.
        modelbridge._single_predict([get_observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([get_observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([get_observation1trans().features], [2], None, {}),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(
            name="test_metric")))
        modelbridge._set_kwargs_to_save(model_key="TestModel",
                                        model_kwargs={},
                                        bridge_kwargs={})
        gr = modelbridge.gen(
            n=1,
            search_space=get_search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [get_observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [get_observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={},
            observation_features=[get_observation1().features])

        # Gen with no pending observations and no fixed features
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=None)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(objective=ScalarizedObjective(
            metrics=[Metric(name="test_metric"),
                     Metric(name="test_metric_2")]))
        modelbridge.gen(n=1,
                        search_space=get_search_space_for_value(),
                        optimization_config=oc2)
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace(
                [FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[get_observation1trans().data],
        )
        cv_training_data = [get_observation2()]
        cv_test_points = [get_observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points)
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[get_observation2trans().features],
            obs_data=[get_observation2trans().data],
            cv_test_points=[get_observation1().features
                            ],  # untransformed after
        )
        self.assertTrue(cv_predictions == [get_observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [get_observation1(), get_observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(
            modelbridge.model_space == get_search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [False, False])

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        # Test feature_importances
        with self.assertRaises(NotImplementedError):
            modelbridge.feature_importances("a")
示例#8
0
    def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data):
        # Test that on init transforms are stored and applied in the correct order
        transforms = [t1, t2]
        exp = get_experiment()
        modelbridge = ModelBridge(search_space_for_value(), 0, transforms, exp, 0)
        self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"])
        fit_args = mock_fit.mock_calls[0][2]
        self.assertTrue(fit_args["search_space"] == search_space_for_value(8.0))
        self.assertTrue(
            fit_args["observation_features"]
            == [observation1trans().features, observation2trans().features]
        )
        self.assertTrue(
            fit_args["observation_data"]
            == [observation1trans().data, observation2trans().data]
        )
        self.assertTrue(mock_observations_from_data.called)

        # Test that transforms are applied correctly on predict
        modelbridge._predict = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._predict",
            autospec=True,
            return_value=[observation2trans().data],
        )

        modelbridge.predict([observation2().features])
        # Observation features sent to _predict are un-transformed afterwards
        modelbridge._predict.assert_called_with([observation2().features])

        # Test transforms applied on gen
        modelbridge._gen = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._gen",
            autospec=True,
            return_value=([observation1trans().features], [2], None),
        )
        oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric")))
        modelbridge._set_kwargs_to_save(
            model_key="TestModel", model_kwargs={}, bridge_kwargs={}
        )
        gr = modelbridge.gen(
            n=1,
            search_space=search_space_for_value(),
            optimization_config=oc,
            pending_observations={"a": [observation2().features]},
            fixed_features=ObservationFeatures({"x": 5}),
        )
        self.assertEqual(gr._model_key, "TestModel")
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc,
            pending_observations={"a": [observation2trans().features]},
            fixed_features=ObservationFeatures({"x": 36}),
            model_gen_options=None,
        )
        mock_gen_arms.assert_called_with(
            arms_by_signature={}, observation_features=[observation1().features]
        )

        # Gen with no pending observations and no fixed features
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=None
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=None,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Gen with multi-objective optimization config.
        oc2 = OptimizationConfig(
            objective=ScalarizedObjective(
                metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")]
            )
        )
        modelbridge.gen(
            n=1, search_space=search_space_for_value(), optimization_config=oc2
        )
        modelbridge._gen.assert_called_with(
            n=1,
            search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]),
            optimization_config=oc2,
            pending_observations={},
            fixed_features=ObservationFeatures({}),
            model_gen_options=None,
        )

        # Test transforms applied on cross_validate
        modelbridge._cross_validate = mock.MagicMock(
            "ax.modelbridge.base.ModelBridge._cross_validate",
            autospec=True,
            return_value=[observation1trans().data],
        )
        cv_training_data = [observation2()]
        cv_test_points = [observation1().features]
        cv_predictions = modelbridge.cross_validate(
            cv_training_data=cv_training_data, cv_test_points=cv_test_points
        )
        modelbridge._cross_validate.assert_called_with(
            obs_feats=[observation2trans().features],
            obs_data=[observation2trans().data],
            cv_test_points=[observation1().features],  # untransformed after
        )
        self.assertTrue(cv_predictions == [observation1().data])

        # Test stored training data
        obs = modelbridge.get_training_data()
        self.assertTrue(obs == [observation1(), observation2()])
        self.assertEqual(modelbridge.metric_names, {"a", "b"})
        self.assertIsNone(modelbridge.status_quo)
        self.assertTrue(modelbridge.model_space == search_space_for_value())
        self.assertEqual(modelbridge.training_in_design, [True, True])

        modelbridge.training_in_design = [True, False]
        with self.assertRaises(ValueError):
            modelbridge.training_in_design = [True, True, False]

        ood_obs = modelbridge.out_of_design_data()
        self.assertTrue(ood_obs == unwrap_observation_data([observation2().data]))