def test_choose_generation_strategy(self): sobol_gpei = choose_generation_strategy(search_space=get_branin_search_space()) self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol") self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI") sobol = choose_generation_strategy(search_space=get_factorial_search_space()) self.assertEqual(sobol._steps[0].model.value, "Sobol") self.assertEqual(len(sobol._steps), 1)
def test_max_parallelism_adjustments(self): # No adjustment. sobol_gpei = choose_generation_strategy(search_space=get_branin_search_space()) self.assertIsNone(sobol_gpei._steps[0].max_parallelism) self.assertEqual( sobol_gpei._steps[1].max_parallelism, DEFAULT_BAYESIAN_PARALLELISM ) # Impose a cap of 1 on max parallelism for all steps. sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), max_parallelism_cap=1 ) self.assertEqual( sobol_gpei._steps[0].max_parallelism, sobol_gpei._steps[1].max_parallelism, 1, ) # Disable enforcing max parallelism for all steps. sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), max_parallelism_override=-1 ) self.assertIsNone(sobol_gpei._steps[0].max_parallelism) self.assertIsNone(sobol_gpei._steps[1].max_parallelism) # Override max parallelism for all steps. sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), max_parallelism_override=10 ) self.assertEqual(sobol_gpei._steps[0].max_parallelism, 10) self.assertEqual(sobol_gpei._steps[1].max_parallelism, 10)
def test_num_trials(self): ss = get_discrete_search_space() # Check that with budget that is lower than exhaustive, BayesOpt is used. sobol_gpei = choose_generation_strategy(search_space=ss, num_trials=11) self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol") self.assertEqual(sobol_gpei._steps[1].model.value, "BO_MIXED") # Check that with budget that is exhaustive, Sobol is used. sobol = choose_generation_strategy(search_space=ss, num_trials=12) self.assertEqual(sobol._steps[0].model.value, "Sobol") self.assertEqual(len(sobol._steps), 1)
def test_enforce_sequential_optimization(self): sobol_gpei = choose_generation_strategy(search_space=get_branin_search_space()) self.assertEqual(sobol_gpei._steps[0].num_arms, 5) self.assertTrue(sobol_gpei._steps[0].enforce_num_arms) sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), enforce_sequential_optimization=False, ) self.assertEqual(sobol_gpei._steps[0].num_arms, 5) self.assertFalse(sobol_gpei._steps[0].enforce_num_arms)
def test_max_parallelism_adjustments(self): sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), max_parallelism_cap=1) self.assertEqual( sobol_gpei._steps[0].max_parallelism, sobol_gpei._steps[1].max_parallelism, 1, ) sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), no_max_parallelism=True) self.assertIsNone(sobol_gpei._steps[0].max_parallelism) self.assertIsNone(sobol_gpei._steps[1].max_parallelism)
def test_enforce_sequential_optimization(self): sobol_gpei = choose_generation_strategy(search_space=get_branin_search_space()) self.assertEqual(sobol_gpei._steps[0].num_trials, 5) self.assertTrue(sobol_gpei._steps[0].enforce_num_trials) self.assertIsNotNone(sobol_gpei._steps[1].max_parallelism) sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), enforce_sequential_optimization=False, ) self.assertEqual(sobol_gpei._steps[0].num_trials, 5) self.assertFalse(sobol_gpei._steps[0].enforce_num_trials) self.assertIsNone(sobol_gpei._steps[1].max_parallelism)
def __init__( self, experiment: Experiment, total_trials: int = 20, arms_per_trial: int = 1, random_seed: Optional[int] = None, wait_time: int = 0, run_async: bool = False, # TODO[Lena], generation_strategy: Optional[GenerationStrategy] = None, ) -> None: assert not run_async, "OptimizationLoop does not yet support async." self.wait_time = wait_time self.total_trials = total_trials self.arms_per_trial = arms_per_trial self.random_seed = random_seed assert len(experiment.trials) == 0, ( "Optimization Loop should not be initialized with an experiment " "that has trials already.") self.experiment = experiment if generation_strategy is None: self.generation_strategy = choose_generation_strategy( search_space=experiment.search_space, use_batch_trials=self.arms_per_trial > 1, random_seed=self.random_seed, ) else: self.generation_strategy = generation_strategy self.current_trial = 0
def test_set_should_deduplicate(self): sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), use_batch_trials=True, num_initialization_trials=3, ) self.assertListEqual([s.should_deduplicate for s in sobol_gpei._steps], [False] * 2) sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), use_batch_trials=True, num_initialization_trials=3, should_deduplicate=True, ) self.assertListEqual([s.should_deduplicate for s in sobol_gpei._steps], [True] * 2)
def setUp(self): self.branin_experiment = get_branin_experiment() self.branin_experiment._properties[ Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF] = True self.branin_experiment_no_impl_metrics = Experiment( search_space=get_branin_search_space(), optimization_config=OptimizationConfig(objective=Objective( metric=Metric(name="branin"))), ) self.sobol_GPEI_GS = choose_generation_strategy( search_space=get_branin_search_space()) self.two_sobol_steps_GS = GenerationStrategy( # Contrived GS to ensure steps=[ # that `DataRequiredError` is property handled in scheduler. GenerationStep( # This error is raised when not enough trials model=Models. SOBOL, # have been observed to proceed to next num_trials=5, # geneneration step. min_trials_observed=3, max_parallelism=2, ), GenerationStep(model=Models.SOBOL, num_trials=-1, max_parallelism=3), ]) # GS to force the scheduler to poll completed trials after each ran trial. self.sobol_GS_no_parallelism = GenerationStrategy(steps=[ GenerationStep( model=Models.SOBOL, num_trials=-1, max_parallelism=1) ])
def test_setting_random_seed(self): sobol = choose_generation_strategy( search_space=get_factorial_search_space(), random_seed=9 ) sobol.gen(experiment=get_experiment()) # First model is actually a bridge, second is the Sobol engine. self.assertEqual(sobol.model.model.seed, 9)
def load_experiment_from_database(self, experiment_name: str) -> None: """Load an existing experiment from database using the `DBSettings` passed to this `AxClient` on instantiation. Args: experiment_name: Name of the experiment. Returns: Experiment object. """ if not self.db_settings: raise ValueError( # pragma: no cover "Cannot load an experiment in the absence of the DB settings." "Please initialize `AxClient` with DBSettings.") experiment, generation_strategy = load_experiment_and_generation_strategy( experiment_name=experiment_name, db_settings=self.db_settings) self._experiment = experiment logger.info(f"Loaded {experiment}.") if generation_strategy is None: # pragma: no cover self._generation_strategy = choose_generation_strategy( # pyre-fixme[16]: `Optional` has no attribute `search_space`. search_space=self._experiment.search_space, enforce_sequential_optimization=self. _enforce_sequential_optimization, random_seed=self._random_seed, ) else: self._generation_strategy = generation_strategy logger.info( f"Using generation strategy associated with the loaded experiment:" f" {generation_strategy}.")
def testUpdateGenerationStrategyIncrementally(self): experiment = get_branin_experiment() generation_strategy = choose_generation_strategy( experiment.search_space) save_experiment(experiment=experiment) save_generation_strategy(generation_strategy=generation_strategy) # add generator runs, save, reload generator_runs = [] for i in range(7): data = get_branin_data() if i > 0 else None gr = generation_strategy.gen(experiment, data=data) generator_runs.append(gr) trial = experiment.new_trial(generator_run=gr).mark_running( no_runner_required=True) trial.mark_completed() save_experiment(experiment=experiment) update_generation_strategy(generation_strategy=generation_strategy, generator_runs=generator_runs) loaded_generation_strategy = load_generation_strategy_by_experiment_name( experiment_name=experiment.name) self.assertEqual(generation_strategy._curr.index, loaded_generation_strategy._curr.index, 1) self.assertEqual(len(loaded_generation_strategy._generator_runs), 7)
def test_fixed_num_initialization_trials(self): sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), use_batch_trials=True, num_initialization_trials=3, ) self.assertEqual(sobol_gpei._steps[0].num_trials, 3)
def test_winsorization(self): winsorized = choose_generation_strategy( search_space=get_branin_search_space(), winsorization_config=WinsorizationConfig(upper_quantile_margin=2), ) self.assertIn( "Winsorize", winsorized._steps[1].model_kwargs.get("transform_configs"))
def test_winsorization(self): winsorized = choose_generation_strategy( search_space=get_branin_search_space(), winsorize_botorch_model=True, winsorization_limits=(None, 0, 2), ) self.assertIn( "Winsorize", winsorized._steps[1].model_kwargs.get("transform_configs") )
def get_generation_strategy( with_experiment: bool = False) -> GenerationStrategy: gs = choose_generation_strategy(search_space=get_search_space()) if with_experiment: gs._experiment = get_experiment() fake_func = get_experiment # pyre-ignore[16]: testing hack to test serialization of callable kwargs # in generation steps. gs._steps[0].model_kwargs["model_constructor"] = fake_func return gs
def test_get_standard_plots(self): exp = get_branin_experiment() self.assertEqual( len( get_standard_plots(experiment=exp, model=get_generation_strategy().model)), 0, ) exp = get_branin_experiment(with_batch=True, minimize=True) exp.trials[0].run() gs = choose_generation_strategy(search_space=exp.search_space) gs._model = Models.BOTORCH(experiment=exp, data=exp.fetch_data()) plots = get_standard_plots(experiment=exp, model=gs.model) self.assertEqual(len(plots), 5) self.assertTrue(all(isinstance(plot, go.Figure) for plot in plots)) exp = get_branin_experiment_with_multi_objective(with_batch=True) exp.trials[0].run() gs = choose_generation_strategy( search_space=exp.search_space, optimization_config=exp.optimization_config) gs._model = Models.BOTORCH(experiment=exp, data=exp.fetch_data()) plots = get_standard_plots(experiment=exp, model=gs.model) self.assertEqual(len(plots), 6)
def test_choose_generation_strategy(self): with self.subTest("GPEI"): sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space() ) self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol") self.assertEqual(sobol_gpei._steps[0].num_trials, 5) self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI") with self.subTest("MOO"): sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), optimization_config=MultiObjectiveOptimizationConfig( objective=MultiObjective(objectives=[]) ), ) self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol") self.assertEqual(sobol_gpei._steps[0].num_trials, 5) self.assertEqual(sobol_gpei._steps[1].model.value, "MOO") with self.subTest("Sobol (we can try every option)"): sobol = choose_generation_strategy( search_space=get_factorial_search_space(), num_trials=1000 ) self.assertEqual(sobol._steps[0].model.value, "Sobol") self.assertEqual(len(sobol._steps), 1) with self.subTest("Sobol (because of too many categories)"): sobol_large = choose_generation_strategy( search_space=get_large_factorial_search_space() ) self.assertEqual(sobol_large._steps[0].model.value, "Sobol") self.assertEqual(len(sobol_large._steps), 1) with self.subTest("GPEI-Batched"): sobol_gpei_batched = choose_generation_strategy( search_space=get_branin_search_space(), use_batch_trials=3 ) self.assertEqual(sobol_gpei_batched._steps[0].num_trials, 1) with self.subTest("BO_MIXED (purely categorical)"): bo_mixed = choose_generation_strategy( search_space=get_factorial_search_space() ) self.assertEqual(bo_mixed._steps[0].model.value, "Sobol") self.assertEqual(bo_mixed._steps[0].num_trials, 5) self.assertEqual(bo_mixed._steps[1].model.value, "BO_MIXED") with self.subTest("BO_MIXED (mixed search space)"): bo_mixed_2 = choose_generation_strategy( search_space=get_branin_search_space(with_choice_parameter=True) ) self.assertEqual(bo_mixed_2._steps[0].model.value, "Sobol") self.assertEqual(bo_mixed_2._steps[0].num_trials, 5) self.assertEqual(bo_mixed_2._steps[1].model.value, "BO_MIXED")
def _set_generation_strategy( self, choose_generation_strategy_kwargs: Optional[Dict[str, Any]] = None ) -> None: """Selects the generation strategy and applies specified dispatch kwargs, if any. """ choose_generation_strategy_kwargs = choose_generation_strategy_kwargs or {} random_seed = choose_generation_strategy_kwargs.pop( "random_seed", self._random_seed ) enforce_sequential_optimization = choose_generation_strategy_kwargs.pop( "enforce_sequential_optimization", self._enforce_sequential_optimization ) if self._generation_strategy is None: self._generation_strategy = choose_generation_strategy( search_space=self.experiment.search_space, enforce_sequential_optimization=enforce_sequential_optimization, random_seed=random_seed, **choose_generation_strategy_kwargs, )
def __init__(self, serialized_filepath=None): # Give ourselves the ability to resume this experiment later. self.serialized_filepath = serialized_filepath if serialized_filepath is not None and os.path.exists( serialized_filepath): with open(serialized_filepath, "r") as f: serialized = json.load(f) self.initialize_from_json_snapshot(serialized) else: # Create a CoreAxClient. search_space = SearchSpace(parameters=[ RangeParameter( "x", ParameterType.FLOAT, lower=12.2, upper=602.2), ]) optimization_config = OptimizationConfig( objective=MultiObjective( metrics=[ # Currently MultiObjective doesn't work with # lower_is_better=True. # https://github.com/facebook/Ax/issues/289 Metric(name="neg_distance17", lower_is_better=False), Metric(name="neg_distance33", lower_is_better=False) ], minimize=False, ), ) generation_strategy = choose_generation_strategy( search_space, enforce_sequential_optimization=False, no_max_parallelism=True, num_trials=NUM_TRIALS, num_initialization_trials=NUM_RANDOM) super().__init__(experiment=Experiment( search_space=search_space, optimization_config=optimization_config), generation_strategy=generation_strategy, verbose=True)
search_space = SearchSpace(parameters=[ RangeParameter("x", ParameterType.FLOAT, lower=12.2, upper=602.2), ]) optimization_config = OptimizationConfig( objective=MultiObjective( metrics=[ # Currently MultiObjective doesn't work with lower_is_better=True. # https://github.com/facebook/Ax/issues/289 Metric(name="neg_distance17", lower_is_better=False), Metric(name="neg_distance33", lower_is_better=False) ], minimize=False, ), ) generation_strategy = choose_generation_strategy( search_space, num_trials=NUM_TRIALS, num_initialization_trials=NUM_RANDOM) ax_client = CoreAxClient(experiment=Experiment( search_space=search_space, optimization_config=optimization_config), generation_strategy=generation_strategy) for _ in range(NUM_TRIALS): parameters, trial_index = ax_client.get_next_trial(model_gen_options={ "acquisition_function_kwargs": { "random_scalarization": True, }, }) ax_client.complete_trial(trial_index=trial_index, raw_data={ "neg_distance17": (-example_f17(parameters["x"]), None),
def get_generation_strategy() -> GenerationStrategy: return choose_generation_strategy(search_space=get_search_space())
def test_choose_generation_strategy(self): with self.subTest("GPEI"): sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space()) self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol") self.assertEqual(sobol_gpei._steps[0].num_trials, 5) self.assertEqual(sobol_gpei._steps[1].model.value, "GPEI") self.assertIsNone(sobol_gpei._steps[1].model_kwargs) sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), verbose=True) self.assertIsNone(sobol_gpei._steps[1].model_kwargs) with self.subTest("MOO"): optimization_config = MultiObjectiveOptimizationConfig( objective=MultiObjective(objectives=[])) sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), optimization_config=optimization_config, ) self.assertEqual(sobol_gpei._steps[0].model.value, "Sobol") self.assertEqual(sobol_gpei._steps[0].num_trials, 5) self.assertEqual(sobol_gpei._steps[1].model.value, "MOO") model_kwargs = sobol_gpei._steps[1].model_kwargs self.assertEqual(list(model_kwargs.keys()), ["transforms", "transform_configs"]) self.assertGreater(len(model_kwargs["transforms"]), 0) transform_config_dict = { "Winsorize": { "optimization_config": optimization_config } } self.assertEqual(model_kwargs["transform_configs"], transform_config_dict) with self.subTest("Sobol (we can try every option)"): sobol = choose_generation_strategy( search_space=get_factorial_search_space(), num_trials=1000) self.assertEqual(sobol._steps[0].model.value, "Sobol") self.assertEqual(len(sobol._steps), 1) with self.subTest("Sobol (because of too many categories)"): ss = get_large_factorial_search_space() sobol_large = choose_generation_strategy( search_space=get_large_factorial_search_space(), verbose=True) self.assertEqual(sobol_large._steps[0].model.value, "Sobol") self.assertEqual(len(sobol_large._steps), 1) with self.subTest("GPEI-Batched"): sobol_gpei_batched = choose_generation_strategy( search_space=get_branin_search_space(), use_batch_trials=3) self.assertEqual(sobol_gpei_batched._steps[0].num_trials, 1) with self.subTest("BO_MIXED (purely categorical)"): bo_mixed = choose_generation_strategy( search_space=get_factorial_search_space()) self.assertEqual(bo_mixed._steps[0].model.value, "Sobol") self.assertEqual(bo_mixed._steps[0].num_trials, 6) self.assertEqual(bo_mixed._steps[1].model.value, "BO_MIXED") self.assertIsNone(bo_mixed._steps[1].model_kwargs) with self.subTest("BO_MIXED (mixed search space)"): ss = get_branin_search_space(with_choice_parameter=True) ss.parameters["x2"]._is_ordered = False bo_mixed_2 = choose_generation_strategy(search_space=ss) self.assertEqual(bo_mixed_2._steps[0].model.value, "Sobol") self.assertEqual(bo_mixed_2._steps[0].num_trials, 5) self.assertEqual(bo_mixed_2._steps[1].model.value, "BO_MIXED") self.assertIsNone(bo_mixed_2._steps[1].model_kwargs) with self.subTest("BO_MIXED (mixed multi-objective optimization)"): search_space = get_branin_search_space(with_choice_parameter=True) search_space.parameters["x2"]._is_ordered = False optimization_config = MultiObjectiveOptimizationConfig( objective=MultiObjective(objectives=[])) moo_mixed = choose_generation_strategy( search_space=search_space, optimization_config=optimization_config) self.assertEqual(moo_mixed._steps[0].model.value, "Sobol") self.assertEqual(moo_mixed._steps[0].num_trials, 5) self.assertEqual(moo_mixed._steps[1].model.value, "BO_MIXED") model_kwargs = moo_mixed._steps[1].model_kwargs self.assertEqual(list(model_kwargs.keys()), ["transforms", "transform_configs"]) self.assertGreater(len(model_kwargs["transforms"]), 0) transform_config_dict = { "Winsorize": { "optimization_config": optimization_config } } self.assertEqual(model_kwargs["transform_configs"], transform_config_dict) with self.subTest("SAASBO"): sobol_fullybayesian = choose_generation_strategy( search_space=get_branin_search_space(), use_batch_trials=True, num_initialization_trials=3, use_saasbo=True, ) self.assertEqual(sobol_fullybayesian._steps[0].model.value, "Sobol") self.assertEqual(sobol_fullybayesian._steps[0].num_trials, 3) self.assertEqual(sobol_fullybayesian._steps[1].model.value, "FullyBayesian") self.assertTrue( sobol_fullybayesian._steps[1].model_kwargs["verbose"]) with self.subTest("SAASBO MOO"): sobol_fullybayesianmoo = choose_generation_strategy( search_space=get_branin_search_space(), use_batch_trials=True, num_initialization_trials=3, use_saasbo=True, optimization_config=MultiObjectiveOptimizationConfig( objective=MultiObjective(objectives=[])), ) self.assertEqual(sobol_fullybayesianmoo._steps[0].model.value, "Sobol") self.assertEqual(sobol_fullybayesianmoo._steps[0].num_trials, 3) self.assertEqual(sobol_fullybayesianmoo._steps[1].model.value, "FullyBayesianMOO") self.assertTrue( sobol_fullybayesianmoo._steps[1].model_kwargs["verbose"]) with self.subTest("SAASBO"): sobol_fullybayesian_large = choose_generation_strategy( search_space=get_large_ordinal_search_space( n_ordinal_choice_parameters=5, n_continuous_range_parameters=10), use_saasbo=True, ) self.assertEqual(sobol_fullybayesian_large._steps[0].model.value, "Sobol") self.assertEqual(sobol_fullybayesian_large._steps[0].num_trials, 30) self.assertEqual(sobol_fullybayesian_large._steps[1].model.value, "FullyBayesian") self.assertTrue( sobol_fullybayesian_large._steps[1].model_kwargs["verbose"]) with self.subTest("num_initialization_trials"): ss = get_large_factorial_search_space() for _, param in ss.parameters.items(): param._is_ordered = True # 2 * len(ss.parameters) init trials are performed if num_trials is large gs_12_init_trials = choose_generation_strategy(search_space=ss, num_trials=100) self.assertEqual(gs_12_init_trials._steps[0].model.value, "Sobol") self.assertEqual(gs_12_init_trials._steps[0].num_trials, 12) self.assertEqual(gs_12_init_trials._steps[1].model.value, "GPEI") # at least 5 initialization trials are performed gs_5_init_trials = choose_generation_strategy(search_space=ss, num_trials=0) self.assertEqual(gs_5_init_trials._steps[0].model.value, "Sobol") self.assertEqual(gs_5_init_trials._steps[0].num_trials, 5) self.assertEqual(gs_5_init_trials._steps[1].model.value, "GPEI") # avoid spending >20% of budget on initialization trials if there are # more than 5 initialization trials gs_6_init_trials = choose_generation_strategy(search_space=ss, num_trials=30) self.assertEqual(gs_6_init_trials._steps[0].model.value, "Sobol") self.assertEqual(gs_6_init_trials._steps[0].num_trials, 6) self.assertEqual(gs_6_init_trials._steps[1].model.value, "GPEI")
def create_experiment( self, parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]], name: Optional[str] = None, objective_name: Optional[str] = None, minimize: bool = False, parameter_constraints: Optional[List[str]] = None, outcome_constraints: Optional[List[str]] = None, status_quo: Optional[TParameterization] = None, overwrite_existing_experiment: bool = False, experiment_type: Optional[str] = None, ) -> None: """Create a new experiment and save it if DBSettings available. Args: parameters: List of dictionaries representing parameters in the experiment search space. Required elements in the dictionaries are: "name" (name of this parameter, string), "type" (type of the parameter: "range", "fixed", or "choice", string), and "bounds" for range parameters (list of two values, lower bound first), "values" for choice parameters (list of values), and "value" for fixed parameters (single value). objective: Name of the metric used as objective in this experiment. This metric must be present in `raw_data` argument to `complete_trial`. name: Name of the experiment to be created. minimize: Whether this experiment represents a minimization problem. parameter_constraints: List of string representation of parameter constraints, such as "x3 >= x4" or "x3 + x4 + x5 >= 2". For sum constraints, any number of arguments is accepted, and acceptable operators are "<=" and ">=". outcome_constraints: List of string representation of outcome constraints of form "metric_name >= bound", like "m1 <= 3." status_quo: Parameterization of the current state of the system. If set, this will be added to each trial to be evaluated alongside test configurations. overwrite_existing_experiment: If `DBSettings` were provided on instantiation and the experiment being created has the same name as some experiment already stored, whether to overwrite the existing experiment. Defaults to False. """ if self.db_settings and not name: raise ValueError( # pragma: no cover "Must give the experiment a name if `db_settings` is not None." ) if self.db_settings: existing = None try: existing, _ = load_experiment_and_generation_strategy( experiment_name=not_none(name), db_settings=self.db_settings) except ValueError: # Experiment does not exist, nothing to do. pass if existing and overwrite_existing_experiment: logger.info(f"Overwriting existing experiment {name}.") elif existing: raise ValueError( f"Experiment {name} exists; set the `overwrite_existing_" "experiment` to `True` to overwrite with new experiment " "or use `ax_client.load_experiment_from_database` to " "continue an existing experiment.") self._experiment = make_experiment( name=name, parameters=parameters, objective_name=objective_name, minimize=minimize, parameter_constraints=parameter_constraints, outcome_constraints=outcome_constraints, status_quo=status_quo, experiment_type=experiment_type, ) if self._generation_strategy is None: self._generation_strategy = choose_generation_strategy( search_space=self._experiment.search_space, enforce_sequential_optimization=self. _enforce_sequential_optimization, random_seed=self._random_seed, ) self._save_experiment_and_generation_strategy_to_db_if_possible( overwrite_existing_experiment=True)
def test_setting_experiment_attribute(self): exp = get_experiment() gs = choose_generation_strategy(search_space=exp.search_space, experiment=exp) self.assertEqual(gs._experiment, exp)
def get_generation_strategy( with_experiment: bool = False) -> GenerationStrategy: gs = choose_generation_strategy(search_space=get_search_space()) if with_experiment: gs._experiment = get_experiment() return gs
def test_use_batch_trials(self): sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), use_batch_trials=True ) self.assertEqual(sobol_gpei._steps[0].num_trials, 1)
def __init__(self, serialized_filepath=None): self.serialized_filepath = serialized_filepath if serialized_filepath is not None and os.path.exists( serialized_filepath): with open(serialized_filepath, "r") as f: serialized = json.load(f) other = CoreAxClient.from_json_snapshot(serialized) self.__dict__.update(other.__dict__) else: parameters = [ RangeParameter("num_epochs", ParameterType.INT, lower=30, upper=200), RangeParameter("log2_batch_size", ParameterType.INT, lower=5, upper=8), RangeParameter("lr", ParameterType.FLOAT, lower=1e-5, upper=0.3, log_scale=True), RangeParameter("gamma_prewarmup", ParameterType.FLOAT, lower=0.5, upper=1.0), RangeParameter("gamma_warmup", ParameterType.FLOAT, lower=0.5, upper=1.0), RangeParameter("gamma_postwarmup", ParameterType.FLOAT, lower=0.5, upper=0.985), RangeParameter("reg_warmup_start_epoch", ParameterType.INT, lower=1, upper=200), RangeParameter("reg_warmup_end_epoch", ParameterType.INT, lower=1, upper=200), # Parameter constraints not allowed on log scale # parameters. So implement the log ourselves. RangeParameter("log_reg_factor_start", ParameterType.FLOAT, lower=math.log(1e-4), upper=math.log(1.0)), RangeParameter("log_reg_factor_end", ParameterType.FLOAT, lower=math.log(0.1), upper=math.log(10.0)), ] pm = {p.name: p for p in parameters} search_space = SearchSpace( parameters=parameters, parameter_constraints=[ # reg_warmup_start_epoch <= reg_warmup_end_epoch OrderConstraint(pm["reg_warmup_start_epoch"], pm["reg_warmup_end_epoch"]), # reg_warmup_end_epoch <= num_epochs OrderConstraint(pm["reg_warmup_end_epoch"], pm["num_epochs"]), # log_reg_factor_start <= log_reg_factor_end OrderConstraint(pm["log_reg_factor_start"], pm["log_reg_factor_end"]), ]) optimization_config = OptimizationConfig(objective=MultiObjective( metrics=[ Metric(name="neg_log_error", lower_is_better=False), Metric(name="neg_log_num_nonzero_weights", lower_is_better=False) ], minimize=False, ), ) generation_strategy = choose_generation_strategy( search_space, enforce_sequential_optimization=False, no_max_parallelism=True, num_trials=NUM_TRIALS, num_initialization_trials=NUM_RANDOM) super().__init__(experiment=Experiment( search_space=search_space, optimization_config=optimization_config), generation_strategy=generation_strategy)
def test_max_parallelism_override(self): sobol_gpei = choose_generation_strategy( search_space=get_branin_search_space(), max_parallelism_override=10 ) self.assertTrue(all(s.max_parallelism == 10 for s in sobol_gpei._steps))
def create_experiment( self, parameters: List[Dict[str, Union[TParamValue, List[TParamValue]]]], name: Optional[str] = None, objective_name: Optional[str] = None, minimize: bool = False, parameter_constraints: Optional[List[str]] = None, outcome_constraints: Optional[List[str]] = None, status_quo: Optional[TParameterization] = None, overwrite_existing_experiment: bool = False, experiment_type: Optional[str] = None, choose_generation_strategy_kwargs: Optional[Dict[str, Any]] = None, ) -> None: """Create a new experiment and save it if DBSettings available. Args: parameters: List of dictionaries representing parameters in the experiment search space. Required elements in the dictionaries are: "name" (name of this parameter, string), "type" (type of the parameter: "range", "fixed", or "choice", string), and "bounds" for range parameters (list of two values, lower bound first), "values" for choice parameters (list of values), and "value" for fixed parameters (single value). objective: Name of the metric used as objective in this experiment. This metric must be present in `raw_data` argument to `complete_trial`. name: Name of the experiment to be created. minimize: Whether this experiment represents a minimization problem. parameter_constraints: List of string representation of parameter constraints, such as "x3 >= x4" or "-x3 + 2*x4 - 3.5*x5 >= 2". For the latter constraints, any number of arguments is accepted, and acceptable operators are "<=" and ">=". outcome_constraints: List of string representation of outcome constraints of form "metric_name >= bound", like "m1 <= 3." status_quo: Parameterization of the current state of the system. If set, this will be added to each trial to be evaluated alongside test configurations. overwrite_existing_experiment: If an experiment has already been set on this `AxClient` instance, whether to reset it to the new one. If overwriting the experiment, generation strategy will be re-selected for the new experiment and restarted. choose_generation_strategy_kwargs: Keyword arguments to pass to `choose_generation_strategy` function which determines what generation strategy should be used when none was specified on init. """ if self.db_settings and not name: raise ValueError( # pragma: no cover "Must give the experiment a name if `db_settings` is not None." ) if self.db_settings: existing = None try: existing, _ = load_experiment_and_generation_strategy( experiment_name=not_none(name), db_settings=self.db_settings) except ValueError: # Experiment does not exist, nothing to do. pass if existing and overwrite_existing_experiment: logger.info(f"Overwriting existing experiment {name}.") elif existing: raise ValueError( f"Experiment {name} exists; set the `overwrite_existing_" "experiment` to `True` to overwrite with new experiment " "or use `ax_client.load_experiment_from_database` to " "continue an existing experiment.") if self._experiment is not None: if overwrite_existing_experiment: exp_name = self.experiment._name or "untitled" new_exp_name = name or "untitled" logger.info( f"Overwriting existing experiment ({exp_name}) on this client " f"with new experiment ({new_exp_name}) and restarting the " "generation strategy.") self._generation_strategy = None else: raise ValueError( f"Experiment already created for this client instance. " "Set the `overwrite_existing_experiment` to `True` to overwrite " "with new experiment.") self._experiment = make_experiment( name=name, parameters=parameters, objective_name=objective_name, minimize=minimize, parameter_constraints=parameter_constraints, outcome_constraints=outcome_constraints, status_quo=status_quo, experiment_type=experiment_type, ) choose_generation_strategy_kwargs = choose_generation_strategy_kwargs or {} random_seed = choose_generation_strategy_kwargs.pop( "random_seed", self._random_seed) enforce_sequential_optimization = choose_generation_strategy_kwargs.pop( "enforce_sequential_optimization", self._enforce_sequential_optimization) if self._generation_strategy is None: self._generation_strategy = choose_generation_strategy( search_space=not_none(self._experiment).search_space, enforce_sequential_optimization=enforce_sequential_optimization, random_seed=random_seed, **choose_generation_strategy_kwargs, ) self._save_experiment_and_generation_strategy_to_db_if_possible( overwrite_existing_experiment=True)