def setUp(self): self.metrics = { "m1": Metric(name="m1"), "m2": Metric(name="m2", lower_is_better=True), "m3": Metric(name="m3", lower_is_better=False), } self.objective = Objective(metric=self.metrics["m1"], minimize=False) self.multi_objective = MultiObjective(metrics=[ self.metrics["m1"], self.metrics["m2"], self.metrics["m3"] ]) self.scalarized_objective = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]])
def setUp(self): self.metrics = {"m1": Metric(name="m1"), "m2": Metric(name="m2")} self.objective = Objective(metric=self.metrics["m1"], minimize=False) self.alt_objective = Objective(metric=self.metrics["m2"], minimize=False) self.multi_objective = MultiObjective( metrics=[self.metrics["m1"], self.metrics["m2"]]) self.m2_objective = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]]) self.outcome_constraint = OutcomeConstraint(metric=self.metrics["m2"], op=ComparisonOp.GEQ, bound=-0.25) self.additional_outcome_constraint = OutcomeConstraint( metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.25) self.scalarized_outcome_constraint = ScalarizedOutcomeConstraint( metrics=[self.metrics["m1"], self.metrics["m2"]], weights=[0.5, 0.5], op=ComparisonOp.GEQ, bound=-0.25, ) self.outcome_constraints = [ self.outcome_constraint, self.additional_outcome_constraint, self.scalarized_outcome_constraint, ]
def test_best_raw_objective_point_scalarized(self): exp = get_branin_experiment() exp.optimization_config = OptimizationConfig( ScalarizedObjective(metrics=[get_branin_metric()], minimize=False) ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point(exp) self.assertEqual(get_best_parameters(exp, Models), None) exp.new_trial( generator_run=GeneratorRun(arms=[Arm(parameters={"x1": 5.0, "x2": 5.0})]) ).run() self.assertEqual(get_best_raw_objective_point(exp)[0], {"x1": 5.0, "x2": 5.0})
def _build_new_optimization_config(weights, primary_objective, secondary_objective, outcome_constraints=None): obj = ScalarizedObjective( metrics=[primary_objective, secondary_objective], weights=weights, minimize=False, ) optimization_config = MultiObjectiveOptimizationConfig( objective=obj, outcome_constraints=outcome_constraints) return optimization_config
class ObjectiveTest(TestCase): def setUp(self): self.metrics = { "m1": Metric(name="m1"), "m2": Metric(name="m2", lower_is_better=True), "m3": Metric(name="m3", lower_is_better=False), } self.objective = Objective(metric=self.metrics["m1"], minimize=False) self.multi_objective = MultiObjective(metrics=[ self.metrics["m1"], self.metrics["m2"], self.metrics["m3"] ]) self.scalarized_objective = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]]) def testBadInit(self): with self.assertRaises(ValueError): self.scalarized_objective_weighted = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]], weights=[1.0]) def testMultiObjective(self): with self.assertRaises(NotImplementedError): return self.multi_objective.metric self.assertEqual(self.multi_objective.metrics, list(self.metrics.values())) weights = [mw[1] for mw in self.multi_objective.metric_weights] self.assertEqual(weights, [1.0, -1.0, 1.0]) self.assertEqual(self.multi_objective.clone(), self.multi_objective) self.assertEqual( str(self.multi_objective), "MultiObjective(metric_names=['m1', 'm2', 'm3'], minimize=False)", ) def testScalarizedObjective(self): with self.assertRaises(NotImplementedError): return self.scalarized_objective.metric self.assertEqual(self.scalarized_objective.metrics, [self.metrics["m1"], self.metrics["m2"]]) weights = [mw[1] for mw in self.scalarized_objective.metric_weights] self.assertEqual(weights, [1.0, 1.0]) self.assertEqual(self.scalarized_objective.clone(), self.scalarized_objective) self.assertEqual( str(self.scalarized_objective), ("ScalarizedObjective(metric_names=['m1', 'm2'], weights=[1.0, 1.0], " "minimize=False)"), )
def testInit(self): with self.assertRaises(ValueError): ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]], weights=[1.0]) warnings.resetwarnings() warnings.simplefilter("always", append=True) with warnings.catch_warnings(record=True) as ws: Objective(metric=self.metrics["m1"]) self.assertTrue( any(issubclass(w.category, DeprecationWarning) for w in ws)) self.assertTrue( any("Defaulting to `minimize=False`" in str(w.message) for w in ws)) with warnings.catch_warnings(record=True) as ws: Objective(Metric(name="m4", lower_is_better=True), minimize=False) self.assertTrue( any("Attempting to maximize" in str(w.message) for w in ws)) with warnings.catch_warnings(record=True) as ws: Objective(Metric(name="m4", lower_is_better=False), minimize=True) self.assertTrue( any("Attempting to minimize" in str(w.message) for w in ws))
class ObjectiveTest(TestCase): def setUp(self): self.metrics = {"m1": Metric(name="m1"), "m2": Metric(name="m2")} self.objective = Objective(metric=self.metrics["m1"], minimize=False) self.multi_objective = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]]) def testBadInit(self): with self.assertRaises(ValueError): self.multi_objective_weighted = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]], weights=[1.0]) def testScalarizedObjective(self): with self.assertRaises(NotImplementedError): return self.multi_objective.metric self.assertEqual(self.multi_objective.metrics, list(self.metrics.values())) weights = [mw[1] for mw in self.multi_objective.metric_weights] self.assertEqual(weights, [1.0, 1.0]) self.assertEqual(self.multi_objective.clone(), self.multi_objective)
def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data): # Test that on init transforms are stored and applied in the correct order transforms = [transform_1, transform_2] exp = get_experiment_for_value() ss = get_search_space_for_value() modelbridge = ModelBridge(ss, 0, transforms, exp, 0) self.assertEqual(list(modelbridge.transforms.keys()), ["transform_1", "transform_2"]) fit_args = mock_fit.mock_calls[0][2] self.assertTrue( fit_args["search_space"] == get_search_space_for_value(8.0)) self.assertTrue(fit_args["observation_features"] == []) self.assertTrue(fit_args["observation_data"] == []) self.assertTrue(mock_observations_from_data.called) # Test prediction on out of design features. modelbridge._predict = mock.MagicMock( "ax.modelbridge.base.ModelBridge._predict", autospec=True, side_effect=ValueError("Out of Design"), ) # This point is in design, and thus failures in predict are legitimate. with mock.patch.object(ModelBridge, "model_space", return_value=get_search_space_for_range_values): with self.assertRaises(ValueError): modelbridge.predict([get_observation2().features]) # This point is out of design, and not in training data. with self.assertRaises(ValueError): modelbridge.predict([get_observation_status_quo0().features]) # Now it's in the training data. with mock.patch.object( ModelBridge, "get_training_data", return_value=[get_observation_status_quo0()], ): # Return raw training value. self.assertEqual( modelbridge.predict([get_observation_status_quo0().features]), unwrap_observation_data([get_observation_status_quo0().data]), ) # Test that transforms are applied correctly on predict modelbridge._predict = mock.MagicMock( "ax.modelbridge.base.ModelBridge._predict", autospec=True, return_value=[get_observation2trans().data], ) modelbridge.predict([get_observation2().features]) # Observation features sent to _predict are un-transformed afterwards modelbridge._predict.assert_called_with([get_observation2().features]) # Check that _single_predict is equivalent here. modelbridge._single_predict([get_observation2().features]) # Observation features sent to _predict are un-transformed afterwards modelbridge._predict.assert_called_with([get_observation2().features]) # Test transforms applied on gen modelbridge._gen = mock.MagicMock( "ax.modelbridge.base.ModelBridge._gen", autospec=True, return_value=([get_observation1trans().features], [2], None, {}), ) oc = OptimizationConfig(objective=Objective(metric=Metric( name="test_metric"))) modelbridge._set_kwargs_to_save(model_key="TestModel", model_kwargs={}, bridge_kwargs={}) gr = modelbridge.gen( n=1, search_space=get_search_space_for_value(), optimization_config=oc, pending_observations={"a": [get_observation2().features]}, fixed_features=ObservationFeatures({"x": 5}), ) self.assertEqual(gr._model_key, "TestModel") modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace( [FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=oc, pending_observations={"a": [get_observation2trans().features]}, fixed_features=ObservationFeatures({"x": 36}), model_gen_options=None, ) mock_gen_arms.assert_called_with( arms_by_signature={}, observation_features=[get_observation1().features]) # Gen with no pending observations and no fixed features modelbridge.gen(n=1, search_space=get_search_space_for_value(), optimization_config=None) modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace( [FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=None, pending_observations={}, fixed_features=ObservationFeatures({}), model_gen_options=None, ) # Gen with multi-objective optimization config. oc2 = OptimizationConfig(objective=ScalarizedObjective( metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")])) modelbridge.gen(n=1, search_space=get_search_space_for_value(), optimization_config=oc2) modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace( [FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=oc2, pending_observations={}, fixed_features=ObservationFeatures({}), model_gen_options=None, ) # Test transforms applied on cross_validate modelbridge._cross_validate = mock.MagicMock( "ax.modelbridge.base.ModelBridge._cross_validate", autospec=True, return_value=[get_observation1trans().data], ) cv_training_data = [get_observation2()] cv_test_points = [get_observation1().features] cv_predictions = modelbridge.cross_validate( cv_training_data=cv_training_data, cv_test_points=cv_test_points) modelbridge._cross_validate.assert_called_with( obs_feats=[get_observation2trans().features], obs_data=[get_observation2trans().data], cv_test_points=[get_observation1().features ], # untransformed after ) self.assertTrue(cv_predictions == [get_observation1().data]) # Test stored training data obs = modelbridge.get_training_data() self.assertTrue(obs == [get_observation1(), get_observation2()]) self.assertEqual(modelbridge.metric_names, {"a", "b"}) self.assertIsNone(modelbridge.status_quo) self.assertTrue( modelbridge.model_space == get_search_space_for_value()) self.assertEqual(modelbridge.training_in_design, [False, False]) with self.assertRaises(ValueError): modelbridge.training_in_design = [True, True, False] with self.assertRaises(ValueError): modelbridge.training_in_design = [True, True, False] # Test feature_importances with self.assertRaises(NotImplementedError): modelbridge.feature_importances("a")
def get_scalarized_objective() -> Objective: return ScalarizedObjective( metrics=[Metric(name="m1"), Metric(name="m3")], weights=[1.0, 2.0], minimize=False, )
def testBadInit(self): with self.assertRaises(ValueError): self.multi_objective_weighted = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]], weights=[1.0] )
def setUp(self): self.metrics = {"m1": Metric(name="m1"), "m2": Metric(name="m2")} self.objective = Objective(metric=self.metrics["m1"], minimize=False) self.multi_objective = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]] )
def metric_from_sqa( self, metric_sqa: SQAMetric ) -> Union[Metric, Objective, OutcomeConstraint]: """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint.""" metric = self.metric_from_sqa_util(metric_sqa) if metric_sqa.intent == MetricIntent.TRACKING: return metric elif metric_sqa.intent == MetricIntent.OBJECTIVE: if metric_sqa.minimize is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Objective because minimize is None." ) if metric_sqa.scalarized_objective_weight is not None: raise SQADecodeError( # pragma: no cover "The metric corresponding to regular objective does not \ have weight attribute") return Objective(metric=metric, minimize=metric_sqa.minimize) elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE ): # metric_sqa is a parent whose children are individual # metrics in MultiObjective if metric_sqa.minimize is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to MultiObjective \ because minimize is None.") metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics if metrics_sqa_children is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to MultiObjective \ because the parent metric has no children metrics.") # Extracting metric and weight for each child metrics = [ self.metric_from_sqa_util(child) for child in metrics_sqa_children ] return MultiObjective( metrics=list(metrics), # pyre-fixme[6]: Expected `bool` for 2nd param but got `Optional[bool]`. minimize=metric_sqa.minimize, ) elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE ): # metric_sqa is a parent whose children are individual # metrics in Scalarized Objective if metric_sqa.minimize is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Scalarized Objective \ because minimize is None.") metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics if metrics_sqa_children is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Scalarized Objective \ because the parent metric has no children metrics.") # Extracting metric and weight for each child metrics, weights = zip(*[( self.metric_from_sqa_util(child), child.scalarized_objective_weight, ) for child in metrics_sqa_children]) return ScalarizedObjective( metrics=list(metrics), weights=list(weights), # pyre-fixme[6]: Expected `bool` for 3nd param but got `Optional[bool]`. minimize=metric_sqa.minimize, ) elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT: if (metric_sqa.bound is None or metric_sqa.op is None or metric_sqa.relative is None): raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to OutcomeConstraint because " "bound, op, or relative is None.") return OutcomeConstraint( metric=metric, # pyre-fixme[6]: Expected `float` for 2nd param but got # `Optional[float]`. bound=metric_sqa.bound, op=metric_sqa.op, relative=metric_sqa.relative, ) elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT: if (metric_sqa.bound is None or metric_sqa.op is None or metric_sqa.relative is None): raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Scalarized OutcomeConstraint because " "bound, op, or relative is None.") metrics_sqa_children = ( metric_sqa.scalarized_outcome_constraint_children_metrics) if metrics_sqa_children is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Scalarized OutcomeConstraint \ because the parent metric has no children metrics.") # Extracting metric and weight for each child metrics, weights = zip(*[( self.metric_from_sqa_util(child), child.scalarized_outcome_constraint_weight, ) for child in metrics_sqa_children]) return ScalarizedOutcomeConstraint( metrics=list(metrics), weights=list(weights), # pyre-fixme[6]: Expected `float` for 2nd param but got # `Optional[float]`. bound=metric_sqa.bound, op=metric_sqa.op, relative=metric_sqa.relative, ) elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD: if metric_sqa.bound is None or metric_sqa.relative is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to ObjectiveThreshold because " "bound, op, or relative is None.") return ObjectiveThreshold( metric=metric, # pyre-fixme[6]: Expected `float` for 2nd param but got # `Optional[float]`. bound=metric_sqa.bound, relative=metric_sqa.relative, op=metric_sqa.op, ) else: raise SQADecodeError( f"Cannot decode SQAMetric because {metric_sqa.intent} " f"is an invalid intent.")
def testGen(self, mock_init, mock_best_point, mock_gen): # Test with constraints optimization_config = OptimizationConfig( objective=Objective(Metric("a"), minimize=True), outcome_constraints=[ OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False) ], ) ma = NumpyModelBridge() ma.parameters = ["x", "y", "z"] ma.outcomes = ["a", "b"] ma.transforms = OrderedDict() observation_features, weights, best_obsf, _ = ma._gen( n=3, search_space=self.search_space, optimization_config=optimization_config, pending_observations=self.pending_observations, fixed_features=ObservationFeatures({"z": 3.0}), model_gen_options=self.model_gen_options, ) gen_args = mock_gen.mock_calls[0][2] self.assertEqual(gen_args["n"], 3) self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)]) self.assertTrue( np.array_equal(gen_args["objective_weights"], np.array([-1.0, 0.0]))) self.assertTrue( np.array_equal(gen_args["outcome_constraints"][0], np.array([[0.0, -1.0]]))) self.assertTrue( np.array_equal(gen_args["outcome_constraints"][1], np.array([[-2]]))) self.assertTrue( np.array_equal( gen_args["linear_constraints"][0], np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]), )) self.assertTrue( np.array_equal(gen_args["linear_constraints"][1], np.array([[0.0], [-3.5]]))) self.assertEqual(gen_args["fixed_features"], {2: 3.0}) self.assertTrue( np.array_equal(gen_args["pending_observations"][0], np.array([]))) self.assertTrue( np.array_equal(gen_args["pending_observations"][1], np.array([[0.6, 1.6, 3.0]]))) self.assertEqual(gen_args["model_gen_options"], {"option": "yes"}) self.assertEqual(observation_features[0].parameters, { "x": 1.0, "y": 2.0, "z": 3.0 }) self.assertEqual(observation_features[1].parameters, { "x": 3.0, "y": 4.0, "z": 3.0 }) self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0]))) # Test with multiple objectives. oc2 = OptimizationConfig(objective=ScalarizedObjective( metrics=[Metric(name="a"), Metric(name="b")], minimize=True)) observation_features, weights, best_obsf, _ = ma._gen( n=3, search_space=self.search_space, optimization_config=oc2, pending_observations=self.pending_observations, fixed_features=ObservationFeatures({"z": 3.0}), model_gen_options=self.model_gen_options, ) gen_args = mock_gen.mock_calls[1][2] self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)]) self.assertIsNone(gen_args["outcome_constraints"]) self.assertTrue( np.array_equal(gen_args["objective_weights"], np.array([-1.0, -1.0]))) # Test with MultiObjective (unweighted multiple objectives) oc3 = MultiObjectiveOptimizationConfig(objective=MultiObjective( metrics=[Metric(name="a"), Metric(name="b", lower_is_better=True)], minimize=True, )) search_space = SearchSpace(self.parameters) # Unconstrained observation_features, weights, best_obsf, _ = ma._gen( n=3, search_space=search_space, optimization_config=oc3, pending_observations=self.pending_observations, fixed_features=ObservationFeatures({"z": 3.0}), model_gen_options=self.model_gen_options, ) gen_args = mock_gen.mock_calls[2][2] self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)]) self.assertIsNone(gen_args["outcome_constraints"]) self.assertTrue( np.array_equal(gen_args["objective_weights"], np.array([1.0, -1.0]))) # Test with no constraints, no fixed feature, no pending observations search_space = SearchSpace(self.parameters[:2]) optimization_config.outcome_constraints = [] ma.parameters = ["x", "y"] ma._gen(3, search_space, {}, ObservationFeatures({}), None, optimization_config) gen_args = mock_gen.mock_calls[3][2] self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)]) self.assertIsNone(gen_args["outcome_constraints"]) self.assertIsNone(gen_args["linear_constraints"]) self.assertIsNone(gen_args["fixed_features"]) self.assertIsNone(gen_args["pending_observations"]) # Test validation optimization_config = OptimizationConfig( objective=Objective(Metric("a"), minimize=False), outcome_constraints=[ OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False) ], ) with self.assertRaises(ValueError): ma._gen( n=3, search_space=self.search_space, optimization_config=optimization_config, pending_observations={}, fixed_features=ObservationFeatures({}), ) optimization_config.objective.minimize = True optimization_config.outcome_constraints[0].relative = True with self.assertRaises(ValueError): ma._gen( n=3, search_space=self.search_space, optimization_config=optimization_config, pending_observations={}, fixed_features=ObservationFeatures({}), )
def testModelBridge(self, mock_fit, mock_gen_arms, mock_observations_from_data): # Test that on init transforms are stored and applied in the correct order transforms = [t1, t2] exp = get_experiment() modelbridge = ModelBridge(search_space_for_value(), 0, transforms, exp, 0) self.assertEqual(list(modelbridge.transforms.keys()), ["t1", "t2"]) fit_args = mock_fit.mock_calls[0][2] self.assertTrue(fit_args["search_space"] == search_space_for_value(8.0)) self.assertTrue( fit_args["observation_features"] == [observation1trans().features, observation2trans().features] ) self.assertTrue( fit_args["observation_data"] == [observation1trans().data, observation2trans().data] ) self.assertTrue(mock_observations_from_data.called) # Test that transforms are applied correctly on predict modelbridge._predict = mock.MagicMock( "ax.modelbridge.base.ModelBridge._predict", autospec=True, return_value=[observation2trans().data], ) modelbridge.predict([observation2().features]) # Observation features sent to _predict are un-transformed afterwards modelbridge._predict.assert_called_with([observation2().features]) # Test transforms applied on gen modelbridge._gen = mock.MagicMock( "ax.modelbridge.base.ModelBridge._gen", autospec=True, return_value=([observation1trans().features], [2], None), ) oc = OptimizationConfig(objective=Objective(metric=Metric(name="test_metric"))) modelbridge._set_kwargs_to_save( model_key="TestModel", model_kwargs={}, bridge_kwargs={} ) gr = modelbridge.gen( n=1, search_space=search_space_for_value(), optimization_config=oc, pending_observations={"a": [observation2().features]}, fixed_features=ObservationFeatures({"x": 5}), ) self.assertEqual(gr._model_key, "TestModel") modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=oc, pending_observations={"a": [observation2trans().features]}, fixed_features=ObservationFeatures({"x": 36}), model_gen_options=None, ) mock_gen_arms.assert_called_with( arms_by_signature={}, observation_features=[observation1().features] ) # Gen with no pending observations and no fixed features modelbridge.gen( n=1, search_space=search_space_for_value(), optimization_config=None ) modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=None, pending_observations={}, fixed_features=ObservationFeatures({}), model_gen_options=None, ) # Gen with multi-objective optimization config. oc2 = OptimizationConfig( objective=ScalarizedObjective( metrics=[Metric(name="test_metric"), Metric(name="test_metric_2")] ) ) modelbridge.gen( n=1, search_space=search_space_for_value(), optimization_config=oc2 ) modelbridge._gen.assert_called_with( n=1, search_space=SearchSpace([FixedParameter("x", ParameterType.FLOAT, 8.0)]), optimization_config=oc2, pending_observations={}, fixed_features=ObservationFeatures({}), model_gen_options=None, ) # Test transforms applied on cross_validate modelbridge._cross_validate = mock.MagicMock( "ax.modelbridge.base.ModelBridge._cross_validate", autospec=True, return_value=[observation1trans().data], ) cv_training_data = [observation2()] cv_test_points = [observation1().features] cv_predictions = modelbridge.cross_validate( cv_training_data=cv_training_data, cv_test_points=cv_test_points ) modelbridge._cross_validate.assert_called_with( obs_feats=[observation2trans().features], obs_data=[observation2trans().data], cv_test_points=[observation1().features], # untransformed after ) self.assertTrue(cv_predictions == [observation1().data]) # Test stored training data obs = modelbridge.get_training_data() self.assertTrue(obs == [observation1(), observation2()]) self.assertEqual(modelbridge.metric_names, {"a", "b"}) self.assertIsNone(modelbridge.status_quo) self.assertTrue(modelbridge.model_space == search_space_for_value()) self.assertEqual(modelbridge.training_in_design, [True, True]) modelbridge.training_in_design = [True, False] with self.assertRaises(ValueError): modelbridge.training_in_design = [True, True, False] ood_obs = modelbridge.out_of_design_data() self.assertTrue(ood_obs == unwrap_observation_data([observation2().data]))
class ObjectiveTest(TestCase): def setUp(self): self.metrics = { "m1": Metric(name="m1"), "m2": Metric(name="m2", lower_is_better=True), "m3": Metric(name="m3", lower_is_better=False), } self.objective = Objective(metric=self.metrics["m1"], minimize=False) self.multi_objective = MultiObjective(metrics=[ self.metrics["m1"], self.metrics["m2"], self.metrics["m3"] ]) self.scalarized_objective = ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]]) def testInit(self): with self.assertRaises(ValueError): ScalarizedObjective( metrics=[self.metrics["m1"], self.metrics["m2"]], weights=[1.0]) warnings.resetwarnings() warnings.simplefilter("always", append=True) with warnings.catch_warnings(record=True) as ws: Objective(metric=self.metrics["m1"]) self.assertTrue( any(issubclass(w.category, DeprecationWarning) for w in ws)) self.assertTrue( any("Defaulting to `minimize=False`" in str(w.message) for w in ws)) with warnings.catch_warnings(record=True) as ws: Objective(Metric(name="m4", lower_is_better=True), minimize=False) self.assertTrue( any("Attempting to maximize" in str(w.message) for w in ws)) with warnings.catch_warnings(record=True) as ws: Objective(Metric(name="m4", lower_is_better=False), minimize=True) self.assertTrue( any("Attempting to minimize" in str(w.message) for w in ws)) self.assertEqual(self.objective.get_unconstrainable_metrics(), [self.metrics["m1"]]) def testMultiObjective(self): with self.assertRaises(NotImplementedError): return self.multi_objective.metric self.assertEqual(self.multi_objective.metrics, list(self.metrics.values())) weights = [mw[1] for mw in self.multi_objective.metric_weights] self.assertEqual(weights, [1.0, -1.0, 1.0]) self.assertEqual(self.multi_objective.clone(), self.multi_objective) self.assertEqual( str(self.multi_objective), "MultiObjective(metric_names=['m1', 'm2', 'm3'], minimize=False)", ) self.assertEqual(self.multi_objective.get_unconstrainable_metrics(), []) def testScalarizedObjective(self): with self.assertRaises(NotImplementedError): return self.scalarized_objective.metric self.assertEqual(self.scalarized_objective.metrics, [self.metrics["m1"], self.metrics["m2"]]) weights = [mw[1] for mw in self.scalarized_objective.metric_weights] self.assertEqual(weights, [1.0, 1.0]) self.assertEqual(self.scalarized_objective.clone(), self.scalarized_objective) self.assertEqual( str(self.scalarized_objective), ("ScalarizedObjective(metric_names=['m1', 'm2'], weights=[1.0, 1.0], " "minimize=False)"), ) self.assertEqual( self.scalarized_objective.get_unconstrainable_metrics(), [])
def create_new_experiment(input_data, runner, metric, saver_loader): ## parse search space search_space = parse_search_space(input_data['search_space']) ## define experiment experiment = Experiment(name=input_data['test_name'], search_space=search_space, description=input_data['test_description']) ## set control_group if input_data['control_group']: experiment.status_quo = Arm(name="control", parameters=input_data['control_group']) else: pass ## create objectives metrics = [] weights = [] for i, j in input_data['metrics_weights'].items(): metrics += [metric(name=i, lower_is_better=False)] weights += [j] main_objective = ScalarizedObjective(metrics=metrics, weights=weights, minimize=False) optimization_config = OptimizationConfig(objective=main_objective) experiment.optimization_config = optimization_config ## create generator strategy if input_data['arms_to_generate'] == -1: generation_step0_model = Models.FACTORIAL else: generation_step0_model = Models.SOBOL if input_data['test_description']['module'] == 'bayesian_optimization': if 'choice' in [ j['type'] for i, j in input_data['search_space']['parameters'].items() ]: return 'choice param not implemented for bayesian opt' else: generation_step1_model = Models.BOTORCH elif input_data['test_description']['module'] == 'bandit': generation_step1_model = Models.THOMPSON generation_strategy = GenerationStrategy(steps=[ GenerationStep(model=generation_step0_model, num_trials=1), GenerationStep(model=generation_step1_model, num_trials=-1, model_kwargs={'min_weight': 0.01}), ]) ## generate primary arms generation_strategy.gen(experiment=experiment, search_space=search_space, n=input_data['arms_to_generate']) ## Runners can also be manually added to a trial to override the experiment default. experiment.runner = runner() ## create first trial with starting arms if input_data['control_group']: optimize_for_power = True else: optimize_for_power = False experiment.new_batch_trial( generator_run=generation_strategy.last_generator_run, optimize_for_power=optimize_for_power) ## save experiment saver_loader.save_full_experiment(experiment, generation_strategy) ## return information exp_json = object_to_json(experiment) experiment_metadata = { 'experiment_name': exp_json['name'], 'experiment_description': exp_json['description'], 'search_space': exp_json['search_space'], 'trial0_arms': { object_to_json(arm)['name']: { 'parameters': object_to_json(arm)['parameters'], 'weight': weight } for arm, weight in experiment.trials[0].normalized_arm_weights().items() }, 'optimization_config': exp_json['optimization_config'], 'control_group': exp_json['status_quo'], 'runner': exp_json['runner'], 'time_created': exp_json['time_created']['value'] } return experiment_metadata
def metric_from_sqa( self, metric_sqa: SQAMetric ) -> Union[Metric, Objective, OutcomeConstraint]: """Convert SQLAlchemy Metric to Ax Metric, Objective, or OutcomeConstraint.""" metric = self.metric_from_sqa_util(metric_sqa) if metric_sqa.intent == MetricIntent.TRACKING: return metric elif metric_sqa.intent == MetricIntent.OBJECTIVE: if metric_sqa.minimize is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Objective because minimize is None." ) if metric_sqa.scalarized_objective_weight is not None: raise SQADecodeError( # pragma: no cover "The metric corresponding to regular objective does not \ have weight attribute") return Objective(metric=metric, minimize=metric_sqa.minimize) elif (metric_sqa.intent == MetricIntent.MULTI_OBJECTIVE ): # metric_sqa is a parent whose children are individual # metrics in MultiObjective try: metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics except DetachedInstanceError: metrics_sqa_children = _get_scalarized_objective_children_metrics( metric_id=metric_sqa.id, decoder=self) if metrics_sqa_children is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to MultiObjective \ because the parent metric has no children metrics.") # Extracting metric and weight for each child objectives = [ Objective( metric=self.metric_from_sqa_util(metric_sqa), minimize=metric_sqa.minimize, ) for metric_sqa in metrics_sqa_children ] multi_objective = MultiObjective(objectives=objectives) multi_objective.db_id = metric_sqa.id return multi_objective elif (metric_sqa.intent == MetricIntent.SCALARIZED_OBJECTIVE ): # metric_sqa is a parent whose children are individual # metrics in Scalarized Objective if metric_sqa.minimize is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Scalarized Objective \ because minimize is None.") try: metrics_sqa_children = metric_sqa.scalarized_objective_children_metrics except DetachedInstanceError: metrics_sqa_children = _get_scalarized_objective_children_metrics( metric_id=metric_sqa.id, decoder=self) if metrics_sqa_children is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Scalarized Objective \ because the parent metric has no children metrics.") # Extracting metric and weight for each child metrics, weights = zip(*[( self.metric_from_sqa_util(child), child.scalarized_objective_weight, ) for child in metrics_sqa_children]) scalarized_objective = ScalarizedObjective( metrics=list(metrics), weights=list(weights), minimize=not_none(metric_sqa.minimize), ) scalarized_objective.db_id = metric_sqa.id return scalarized_objective elif metric_sqa.intent == MetricIntent.OUTCOME_CONSTRAINT: if (metric_sqa.bound is None or metric_sqa.op is None or metric_sqa.relative is None): raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to OutcomeConstraint because " "bound, op, or relative is None.") return OutcomeConstraint( metric=metric, bound=metric_sqa.bound, op=metric_sqa.op, relative=metric_sqa.relative, ) elif metric_sqa.intent == MetricIntent.SCALARIZED_OUTCOME_CONSTRAINT: if (metric_sqa.bound is None or metric_sqa.op is None or metric_sqa.relative is None): raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Scalarized OutcomeConstraint because " "bound, op, or relative is None.") try: metrics_sqa_children = ( metric_sqa.scalarized_outcome_constraint_children_metrics) except DetachedInstanceError: metrics_sqa_children = ( _get_scalarized_outcome_constraint_children_metrics( metric_id=metric_sqa.id, decoder=self)) if metrics_sqa_children is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to Scalarized OutcomeConstraint \ because the parent metric has no children metrics.") # Extracting metric and weight for each child metrics, weights = zip(*[( self.metric_from_sqa_util(child), child.scalarized_outcome_constraint_weight, ) for child in metrics_sqa_children]) scalarized_outcome_constraint = ScalarizedOutcomeConstraint( metrics=list(metrics), weights=list(weights), bound=not_none(metric_sqa.bound), op=not_none(metric_sqa.op), relative=not_none(metric_sqa.relative), ) scalarized_outcome_constraint.db_id = metric_sqa.id return scalarized_outcome_constraint elif metric_sqa.intent == MetricIntent.OBJECTIVE_THRESHOLD: if metric_sqa.bound is None or metric_sqa.relative is None: raise SQADecodeError( # pragma: no cover "Cannot decode SQAMetric to ObjectiveThreshold because " "bound, op, or relative is None.") ot = ObjectiveThreshold( metric=metric, bound=metric_sqa.bound, relative=metric_sqa.relative, op=metric_sqa.op, ) # ObjectiveThreshold constructor clones the passed-in metric, which means # the db id gets lost and so we need to reset it ot.metric._db_id = metric.db_id return ot else: raise SQADecodeError( f"Cannot decode SQAMetric because {metric_sqa.intent} " f"is an invalid intent.")