def test_immutable_search_space_and_opt_config(self): mutable_exp = self._setupBraninExperiment(n=5) self.assertFalse(mutable_exp.immutable_search_space_and_opt_config) immutable_exp = Experiment( name="test4", search_space=get_branin_search_space(), tracking_metrics=[ BraninMetric(name="b", param_names=["x1", "x2"]) ], optimization_config=get_branin_optimization_config(), runner=SyntheticRunner(), properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True}, ) self.assertTrue(immutable_exp.immutable_search_space_and_opt_config) with self.assertRaises(UnsupportedError): immutable_exp.optimization_config = get_branin_optimization_config( ) immutable_exp.new_batch_trial() with self.assertRaises(UnsupportedError): immutable_exp.search_space = get_branin_search_space() # Check that passing the property as just a string is processed # correctly. immutable_exp_2 = Experiment( name="test4", search_space=get_branin_search_space(), tracking_metrics=[ BraninMetric(name="b", param_names=["x1", "x2"]) ], runner=SyntheticRunner(), properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value: True}, ) self.assertTrue(immutable_exp_2.immutable_search_space_and_opt_config)
def test_minimize_callable(self): problem = BenchmarkProblem( name="Branin", search_space=get_branin_search_space(), optimization_config=get_branin_optimization_config(), ) experiment, f = benchmark_minimize_callable(problem=problem, num_trials=20, method_name="scipy", replication_index=2) res = minimize( fun=f, x0=np.zeros(2), bounds=[(-5, 10), (0, 15)], options={"maxiter": 3}, method="Nelder-Mead", ) self.assertTrue(res.fun < 0) # maximization problem self.assertEqual(len(experiment.trials), res.nfev) self.assertEqual(len(experiment.fetch_data().df), res.nfev) self.assertEqual(experiment.name, "scipy_on_Branin__v2") with self.assertRaises(ValueError): minimize(fun=f, x0=np.zeros(2), bounds=[(-5, 10), (0, 15)], method="Nelder-Mead")
def test_transform_optimization_config_with_relative_constraints(self): relativize = Relativize( search_space=None, observation_features=[], observation_data=[], modelbridge=self.model, ) optimization_config = get_branin_optimization_config() optimization_config.outcome_constraints = [ OutcomeConstraint( metric=BraninMetric("b2", ["x2", "x1"]), op=ComparisonOp.GEQ, bound=-200.0, relative=True, ) ] new_config = relativize.transform_optimization_config( optimization_config=optimization_config, modelbridge=None, fixed_features=Mock(), ) self.assertEqual(new_config.objective, optimization_config.objective) self.assertEqual( new_config.outcome_constraints[0].bound, optimization_config.outcome_constraints[0].bound, ) self.assertFalse(new_config.outcome_constraints[0].relative)
def test_basic(self): """Run through the benchmarking loop.""" results = full_benchmark_run( problem_groups={ self.CATEGORY_NAME: [ SimpleBenchmarkProblem(branin, noise_sd=0.4), BenchmarkProblem( name="Branin", search_space=get_branin_search_space(), optimization_config=get_branin_optimization_config(), ), BenchmarkProblem( search_space=get_branin_search_space(), optimization_config=get_optimization_config(), ), ] }, method_groups={ self.CATEGORY_NAME: [ GenerationStrategy(steps=[ GenerationStep(model=Models.SOBOL, num_trials=-1) ]) ] }, num_replications=3, num_trials=5, # Just to have it be more telling if something is broken raise_all_exceptions=True, batch_size=[[1], [3], [1]], ) self.assertEqual(len(results["Branin"]["Sobol"]), 3)
def get_branin_benchmark_problem() -> BenchmarkProblem: return BenchmarkProblem( search_space=get_branin_search_space(), optimization_config=get_branin_optimization_config(), optimal_value=branin.fmin, evaluate_suggested=False, )
def test_enum_sobol_GPEI(self): """Tests Soboland GPEI instantiation through the Models enum.""" exp = get_branin_experiment() # Check that factory generates a valid sobol modelbridge. sobol = Models.SOBOL(search_space=exp.search_space) self.assertIsInstance(sobol, RandomModelBridge) for _ in range(5): sobol_run = sobol.gen(n=1) self.assertEqual(sobol_run._model_key, "Sobol") exp.new_batch_trial().add_generator_run(sobol_run).run() # Check that factory generates a valid GP+EI modelbridge. exp.optimization_config = get_branin_optimization_config() gpei = Models.GPEI(experiment=exp, data=exp.fetch_data()) self.assertIsInstance(gpei, TorchModelBridge) self.assertEqual(gpei._model_key, "GPEI") botorch_defaults = "ax.models.torch.botorch_defaults" # Check that the callable kwargs and the torch kwargs were recorded. self.assertEqual( gpei._model_kwargs, { "acqf_constructor": { "is_callable_as_path": True, "value": f"{botorch_defaults}.get_NEI", }, "acqf_optimizer": { "is_callable_as_path": True, "value": f"{botorch_defaults}.scipy_optimizer", }, "model_constructor": { "is_callable_as_path": True, "value": f"{botorch_defaults}.get_and_fit_model", }, "model_predictor": { "is_callable_as_path": True, "value": f"{botorch_defaults}.predict_from_model", }, "refit_on_cv": False, "refit_on_update": True, "warm_start_refitting": True, }, ) self.assertEqual( gpei._bridge_kwargs, { "transform_configs": None, "torch_dtype": torch_float64, "torch_device": torch_device(type="cpu"), "status_quo_name": None, "status_quo_features": None, "optimization_config": None, "transforms": Cont_X_trans + Y_trans, }, ) gpei = Models.GPEI(experiment=exp, data=exp.fetch_data(), search_space=exp.search_space) self.assertIsInstance(gpei, TorchModelBridge)
def test_transform_optimization_config_without_constraints(self): relativize = Relativize( search_space=None, observation_features=[], observation_data=[], modelbridge=self.model, ) optimization_config = get_branin_optimization_config() new_config = relativize.transform_optimization_config( optimization_config=optimization_config, modelbridge=None, fixed_features=Mock(), ) self.assertEqual(new_config.objective, optimization_config.objective)
def test_enum_sobol_GPEI(self): """Tests Sobol instantiation through the Models enum.""" exp = get_branin_experiment() # Check that factory generates a valid sobol modelbridge. sobol = Models.SOBOL(search_space=exp.search_space) self.assertIsInstance(sobol, RandomModelBridge) for _ in range(5): sobol_run = sobol.gen(n=1) self.assertEqual(sobol_run._model_key, "Sobol") exp.new_batch_trial().add_generator_run(sobol_run).run() # Check that factory generates a valid GP+EI modelbridge. exp.optimization_config = get_branin_optimization_config() gpei = Models.GPEI(experiment=exp, data=exp.fetch_data()) self.assertIsInstance(gpei, TorchModelBridge) gpei = Models.GPEI(experiment=exp, data=exp.fetch_data(), search_space=exp.search_space) self.assertIsInstance(gpei, TorchModelBridge)
def test_sobol_GPEI(self): """Tests sobol + GPEI instantiation.""" exp = get_branin_experiment() # Check that factory generates a valid sobol modelbridge. sobol = get_sobol(search_space=exp.search_space) self.assertIsInstance(sobol, RandomModelBridge) for _ in range(5): sobol_run = sobol.gen(n=1) exp.new_batch_trial().add_generator_run(sobol_run).run() # Check that factory generates a valid GP+EI modelbridge. exp.optimization_config = get_branin_optimization_config() gpei = get_GPEI(experiment=exp, data=exp.fetch_data()) self.assertIsInstance(gpei, TorchModelBridge) gpei = get_GPEI(experiment=exp, data=exp.fetch_data(), search_space=exp.search_space) self.assertIsInstance(gpei, TorchModelBridge) botorch = get_botorch(experiment=exp, data=exp.fetch_data()) self.assertIsInstance(botorch, TorchModelBridge)
def test_transform_optimization_config_with_non_relative_constraints(self): relativize = Relativize( search_space=None, observation_features=[], observation_data=[], modelbridge=self.model, ) optimization_config = get_branin_optimization_config() optimization_config.outcome_constraints = [ OutcomeConstraint( metric=BraninMetric("b2", ["x2", "x1"]), op=ComparisonOp.GEQ, bound=-200.0, relative=False, ) ] with self.assertRaisesRegex(ValueError, "All constraints must be relative"): relativize.transform_optimization_config( optimization_config=optimization_config, modelbridge=None, fixed_features=Mock(), )