def test_write(self): """ Test whether a reloaded scenario still holds all the necessary information. A subset of parameters might change, such as the paths to pcs- or instance-files, so they are checked manually. """ def check_scen_eq(scen1, scen2): print('check_scen_eq') """ Customized check for scenario-equality, ignoring file-paths """ for name in scen1._arguments: dest = scen1._arguments[name]['dest'] name = dest if dest else name # if 'dest' is None, use 'name' if name in ["pcs_fn", "train_inst_fn", "test_inst_fn", "feature_fn", "output_dir"]: continue # Those values are allowed to change when writing to disk elif name == 'cs': # Using repr because of cs-bug # (https://github.com/automl/ConfigSpace/issues/25) self.assertEqual(repr(scen1.cs), repr(scen2.cs)) elif name == 'feature_dict': self.assertEqual(len(scen1.feature_dict), len(scen2.feature_dict)) for key in scen1.feature_dict: self.assertTrue((scen1.feature_dict[key] == scen2.feature_dict[key]).all()) else: print(name, getattr(scen1, name), getattr(scen2, name)) self.assertEqual(getattr(scen1, name), getattr(scen2, name)) # First check with file-paths defined feature_filename = 'test/test_files/scenario_test/features_multiple.txt' feature_filename = os.path.abspath(feature_filename) self.test_scenario_dict['feature_file'] = feature_filename scenario = Scenario(self.test_scenario_dict) # This injection would usually happen by the facade object! scenario.output_dir_for_this_run = scenario.output_dir scenario.write() path = os.path.join(scenario.output_dir, 'scenario.txt') scenario_reloaded = Scenario(path) check_scen_eq(scenario, scenario_reloaded) # Test whether json is the default pcs_fn self.assertTrue(os.path.exists(os.path.join(scenario.output_dir, 'param.pcs'))) self.assertTrue(os.path.exists(os.path.join(scenario.output_dir, 'param.json'))) self.assertEqual(scenario_reloaded.pcs_fn, os.path.join(scenario.output_dir, 'param.json')) # Now create new scenario without filepaths self.test_scenario_dict.update({ 'paramfile': None, 'cs': scenario.cs, 'feature_file': None, 'features': scenario.feature_dict, 'feature_names': scenario.feature_names, 'instance_file': None, 'instances': scenario.train_insts, 'test_instance_file': None, 'test_instances': scenario.test_insts}) logging.debug(scenario_reloaded) scenario_no_fn = Scenario(self.test_scenario_dict) scenario_reloaded = Scenario(path) check_scen_eq(scenario_no_fn, scenario_reloaded) # Test whether json is the default pcs_fn self.assertTrue(os.path.exists(os.path.join(scenario.output_dir, 'param.pcs'))) self.assertTrue(os.path.exists(os.path.join(scenario.output_dir, 'param.json'))) self.assertEqual(scenario_reloaded.pcs_fn, os.path.join(scenario.output_dir, 'param.json'))
def test_write_except(self, patch_isdir, patch_mkdirs): patch_isdir.return_value = False patch_mkdirs.side_effect = OSError() scenario = Scenario(self.test_scenario_dict) # This injection would usually happen by the facade object! scenario.output_dir_for_this_run = scenario.output_dir with self.assertRaises(OSError) as cm: scenario.write()
def __init__( self, scenario: Scenario, tae_runner: Optional[Union[Type[BaseRunner], Callable]] = None, tae_runner_kwargs: Optional[Dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[Dict] = None, intensifier: Optional[Type[AbstractRacer]] = None, intensifier_kwargs: Optional[Dict] = None, acquisition_function: Optional[ Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[Dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[ Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[Dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[Dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[Dict] = None, multi_objective_algorithm: Optional[ Type[AbstractMultiObjectiveAlgorithm]] = None, multi_objective_kwargs: Optional[Dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[Dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[Type[SMBO]] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[ Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[Dict] = None, dask_client: Optional[dask.distributed.Client] = None, n_jobs: Optional[int] = 1, ): self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) self.output_dir = create_output_directory(scenario, run_id) elif scenario.output_dir is not None: # type: ignore[attr-defined] # noqa F821 run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = cast(str, scenario.output_dir_for_this_run ) # type: ignore[attr-defined] # noqa F821 rng = cast(np.random.RandomState, rng) if (scenario.deterministic is True # type: ignore[attr-defined] # noqa F821 and getattr(scenario, "tuner_timeout", None) is None and scenario.run_obj == "quality" # type: ignore[attr-defined] # noqa F821 ): self.logger.info( "Optimizing a deterministic scenario for quality without a tuner timeout - will make " "SMAC deterministic and only evaluate one configuration per iteration!" ) scenario.intensification_percentage = 1e-10 # type: ignore[attr-defined] # noqa F821 scenario.min_chall = 1 # type: ignore[attr-defined] # noqa F821 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 self.logger.warning( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # type: ignore[attr-defined] # noqa F821 # initialize empty runhistory num_obj = len(scenario.multi_objectives ) # type: ignore[attr-defined] # noqa F821 runhistory_def_kwargs = {} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs) elif inspect.isclass(runhistory): runhistory = runhistory( **runhistory_def_kwargs) # type: ignore[operator] # noqa F821 elif isinstance(runhistory, RunHistory): pass else: raise ValueError( "runhistory has to be a class or an object of RunHistory") rand_conf_chooser_kwargs = {"rng": rng} if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update( random_configuration_chooser_kwargs) if random_configuration_chooser is None: if "prob" not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs[ "prob"] = scenario.rand_prob # type: ignore[attr-defined] # noqa F821 random_configuration_chooser_instance = ChooserProb( ** rand_conf_chooser_kwargs # type: ignore[arg-type] # noqa F821 # type: RandomConfigurationChooser ) elif inspect.isclass(random_configuration_chooser): random_configuration_chooser_instance = random_configuration_chooser( # type: ignore # noqa F821 ** rand_conf_chooser_kwargs # type: ignore[arg-type] # noqa F821 ) elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError( "random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed( rng.randint(MAXINT)) # type: ignore[attr-defined] # noqa F821 # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types( scenario.cs, scenario.feature_array) # type: ignore[attr-defined] # noqa F821 model_def_kwargs = { "types": types, "bounds": bounds, "instance_features": scenario.feature_array, "seed": rng.randint(MAXINT), "pca_components": scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { "log_y": scenario.transform_y in ["LOG", "LOGS"], # type: ignore[attr-defined] # noqa F821 "num_trees": scenario. rf_num_trees, # type: ignore[attr-defined] # noqa F821 "do_bootstrapping": scenario. rf_do_bootstrapping, # type: ignore[attr-defined] # noqa F821 "ratio_features": scenario. rf_ratio_features, # type: ignore[attr-defined] # noqa F821 "min_samples_split": scenario. rf_min_samples_split, # type: ignore[attr-defined] # noqa F821 "min_samples_leaf": scenario. rf_min_samples_leaf, # type: ignore[attr-defined] # noqa F821 "max_depth": scenario. rf_max_depth, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs[ "configspace"] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = RandomForestWithInstances( ** model_def_kwargs # type: ignore[arg-type] # noqa F821 # type: AbstractEPM ) elif inspect.isclass(model): model_def_kwargs[ "configspace"] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = model( **model_def_kwargs) # type: ignore # noqa F821 else: raise TypeError("Model not recognized: %s" % (type(model))) # initial acquisition function acq_def_kwargs = {"model": model_instance} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) acquisition_function_instance = ( None) # type: Optional[AbstractAcquisitionFunction] if acquisition_function is None: if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 acquisition_function_instance = LogEI( **acq_def_kwargs # type: ignore[arg-type] # noqa F821 ) else: acquisition_function_instance = EI( **acq_def_kwargs # type: ignore[arg-type] # noqa F821 ) elif inspect.isclass(acquisition_function): acquisition_function_instance = acquisition_function( **acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function)) if integrate_acquisition_function: acquisition_function_instance = IntegratedAcquisitionFunction( acquisition_function= acquisition_function_instance, # type: ignore **acq_def_kwargs, ) # initialize optimizer on acquisition function acq_func_opt_kwargs = { "acquisition_function": acquisition_function_instance, "config_space": scenario.cs, # type: ignore[attr-defined] # noqa F821 "rng": rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { "max_steps": scenario. sls_max_steps, # type: ignore[attr-defined] # noqa F821 "n_steps_plateau_walk": scenario. sls_n_steps_plateau_walk, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer_instance = LocalAndSortedRandomSearch( **acq_func_opt_kwargs # type: ignore ) elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer_instance = acquisition_function_optimizer( # type: ignore # noqa F821 **acq_func_opt_kwargs) # type: ignore # noqa F821 else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { "stats": self.stats, "run_obj": scenario.run_obj, "par_factor": scenario.par_factor, # type: ignore[attr-defined] # noqa F821 "cost_for_crash": scenario.cost_for_crash, # type: ignore[attr-defined] # noqa F821 "abort_on_first_run_crash": scenario. abort_on_first_run_crash, # type: ignore[attr-defined] # noqa F821 "multi_objectives": scenario. multi_objectives, # type: ignore[attr-defined] # noqa F821 } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if "ta" not in tae_def_kwargs: tae_def_kwargs[ "ta"] = scenario.ta # type: ignore[attr-defined] # noqa F821 if tae_runner is None: tae_def_kwargs[ "ta"] = scenario.ta # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ExecuteTARunOld( **tae_def_kwargs ) # type: ignore[arg-type] # noqa F821 # type: BaseRunner elif inspect.isclass(tae_runner): tae_runner_instance = cast( BaseRunner, tae_runner(**tae_def_kwargs)) # type: ignore elif callable(tae_runner): tae_def_kwargs["ta"] = tae_runner tae_def_kwargs[ "use_pynisher"] = scenario.limit_resources # type: ignore[attr-defined] # noqa F821 tae_def_kwargs[ "memory_limit"] = scenario.memory_limit # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ExecuteTAFuncDict( **tae_def_kwargs) # type: ignore else: raise TypeError( "Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "BaseRunner. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # In case of a parallel run, wrap the single worker in a parallel # runner if n_jobs is None or n_jobs == 1: _n_jobs = 1 elif n_jobs == -1: _n_jobs = joblib.cpu_count() elif n_jobs > 0: _n_jobs = n_jobs else: raise ValueError( "Number of tasks must be positive, None or -1, but is %s" % str(n_jobs)) if _n_jobs > 1 or dask_client is not None: tae_runner_instance = DaskParallelRunner( # type: ignore tae_runner_instance, n_workers=_n_jobs, output_directory=self.output_dir, dask_client=dask_client, ) # Check that overall objective and tae objective are the same # TODO: remove these two ignores once the scenario object knows all its attributes! if tae_runner_instance.run_obj != scenario.run_obj: # type: ignore[union-attr] # noqa F821 raise ValueError( "Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner_instance.run_obj, scenario.run_obj)) # type: ignore[union-attr] # noqa F821 if intensifier is None: intensifier = Intensifier if isinstance(intensifier, AbstractRacer): intensifier_instance = intensifier elif inspect.isclass(intensifier): # initialize intensification intensifier_def_kwargs = { "stats": self.stats, "traj_logger": traj_logger, "rng": rng, "instances": scenario.train_insts, # type: ignore[attr-defined] # noqa F821 "cutoff": scenario.cutoff, # type: ignore[attr-defined] # noqa F821 "deterministic": scenario. deterministic, # type: ignore[attr-defined] # noqa F821 "run_obj_time": scenario.run_obj == "runtime", # type: ignore[attr-defined] # noqa F821 "instance_specifics": scenario. instance_specific, # type: ignore[attr-defined] # noqa F821 "adaptive_capping_slackfactor": scenario. intens_adaptive_capping_slackfactor, # type: ignore[attr-defined] # noqa F821 "min_chall": scenario. intens_min_chall, # type: ignore[attr-defined] # noqa F821 } if issubclass(intensifier, Intensifier): intensifier_def_kwargs[ "always_race_against"] = scenario.cs.get_default_configuration( ) # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ "use_ta_time_bound"] = scenario.use_ta_time # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ "minR"] = scenario.minR # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ "maxR"] = scenario.maxR # type: ignore[attr-defined] # noqa F821 if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) intensifier_instance = intensifier( **intensifier_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument intensifier must be None or an object implementing the AbstractRacer, but is '%s'" % type(intensifier)) # initialize multi objective # the multi_objective_algorithm_instance will be passed to the runhistory2epm object multi_objective_algorithm_instance = ( None) # type: Optional[AbstractMultiObjectiveAlgorithm] if scenario.multi_objectives is not None and num_obj > 1: # type: ignore[attr-defined] # noqa F821 # define any defaults here _multi_objective_kwargs = {"rng": rng, "num_obj": num_obj} if multi_objective_kwargs is not None: _multi_objective_kwargs.update(multi_objective_kwargs) if multi_objective_algorithm is None: multi_objective_algorithm_instance = MeanAggregationStrategy( **_multi_objective_kwargs ) # type: ignore[arg-type] # noqa F821 elif inspect.isclass(multi_objective_algorithm): multi_objective_algorithm_instance = multi_objective_algorithm( **_multi_objective_kwargs) else: raise TypeError( "Multi-objective algorithm not recognized: %s" % (type(multi_objective_algorithm))) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) init_design_def_kwargs = { "cs": scenario.cs, # type: ignore[attr-defined] # noqa F821 "traj_logger": traj_logger, "rng": rng, "ta_run_limit": scenario.ta_run_limit, # type: ignore[attr-defined] # noqa F821 "configs": initial_configurations, "n_configs_x_params": 0, "max_config_fracs": 0.0, } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design_instance = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs["max_config_fracs"] = 0.0 initial_design_instance = DefaultConfiguration( **init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs["max_config_fracs"] = 0.0 initial_design_instance = RandomConfigurations( **init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": # type: ignore[attr-defined] # noqa F821 initial_design_instance = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = FactorialInitialDesign( **init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent # type: ignore ) # type: ignore[attr-defined] # noqa F821 elif inspect.isclass(initial_design): initial_design_instance = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design)) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 cutoff = np.log(np.nanmin([ np.inf, np.float_(scenario.cutoff) ])) # type: ignore[attr-defined] # noqa F821 threshold = cutoff + np.log( scenario.par_factor) # type: ignore[attr-defined] # noqa F821 else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff) ]) # type: ignore[attr-defined] # noqa F821 threshold = cutoff * scenario.par_factor # type: ignore[attr-defined] # noqa F821 num_params = len(scenario.cs.get_hyperparameters() ) # type: ignore[attr-defined] # noqa F821 imputor = RFRImputator( rng=rng, cutoff=cutoff, threshold=threshold, model=model_instance, change_threshold=0.01, max_iter=2, ) r2e_def_kwargs = { "scenario": scenario, "num_params": num_params, "success_states": [ StatusType.SUCCESS, ], "impute_censored_data": True, "impute_state": [ StatusType.CAPPED, ], "imputor": imputor, "scale_perc": 5, } # TODO: consider other sorts of multi-objective algorithms if isinstance(multi_objective_algorithm_instance, AggregationStrategy): r2e_def_kwargs.update({ "multi_objective_algorithm": multi_objective_algorithm_instance }) if scenario.run_obj == "quality": r2e_def_kwargs.update({ "success_states": [ StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT, ], "impute_censored_data": False, "impute_state": None, }) if (isinstance(intensifier_instance, (SuccessiveHalving, Hyperband)) and scenario.run_obj == "quality"): r2e_def_kwargs.update({ "success_states": [ StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT, StatusType.DONOTADVANCE, ], "consider_for_higher_budgets_state": [ StatusType.DONOTADVANCE, StatusType.TIMEOUT, StatusType.CRASHED, StatusType.MEMOUT, ], }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == "runtime": rh2epm = RunHistory2EPM4LogCost( **r2e_def_kwargs # type: ignore ) # type: ignore[arg-type] # noqa F821 # type: AbstractRunHistory2EPM elif scenario.run_obj == "quality": if scenario.transform_y == "NONE": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4Cost( **r2e_def_kwargs) # type: ignore # noqa F821 elif scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore # noqa F821 elif scenario.transform_y == "LOGS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogScaledCost( **r2e_def_kwargs) # type: ignore # noqa F821 elif scenario.transform_y == "INVS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4InvScaledCost( **r2e_def_kwargs) # type: ignore # noqa F821 else: raise ValueError( "Unknown run objective: %s. Should be either " "quality or runtime." % self.scenario.run_obj # type: ignore # noqa F821 ) elif inspect.isclass(runhistory2epm): rh2epm = runhistory2epm(** r2e_def_kwargs) # type: ignore # noqa F821 else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm)) smbo_args = { "scenario": scenario, "stats": self.stats, "initial_design": initial_design_instance, "runhistory": runhistory, "runhistory2epm": rh2epm, "intensifier": intensifier_instance, "num_run": run_id, "model": model_instance, "acq_optimizer": acquisition_function_optimizer_instance, "acquisition_func": acquisition_function_instance, "rng": rng, "restore_incumbent": restore_incumbent, "random_configuration_chooser": random_configuration_chooser_instance, "tae_runner": tae_runner_instance, } # type: Dict[str, Any] if smbo_class is None: self.solver = SMBO(** smbo_args) # type: ignore[arg-type] # noqa F821 else: self.solver = smbo_class( **smbo_args) # type: ignore[arg-type] # noqa F821
def __init__( self, scenario: Scenario, tae_runner: typing.Union[ExecuteTARun, typing.Callable] = None, runhistory: RunHistory = None, intensifier: Intensifier = None, acquisition_function: AbstractAcquisitionFunction = None, acquisition_function_optimizer: AcquisitionFunctionMaximizer = None, model: AbstractEPM = None, runhistory2epm: AbstractRunHistory2EPM = None, initial_design: InitialDesign = None, initial_configurations: typing.List[Configuration] = None, stats: Stats = None, restore_incumbent: Configuration = None, rng: typing.Union[np.random.RandomState, int] = None, smbo_class: SMBO = None, run_id: int = 1): """Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.execute_ta_run.ExecuteTARun or callable Callable or implementation of :class:`~smac.tae.execute_ta_run.ExecuteTARun`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. runhistory : RunHistory runhistory to store all algorithm runs intensifier : Intensifier intensification object to issue a racing to decide the current incumbent acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` if not set. acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set. model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. initial_design : InitialDesign initial sampling design initial_configurations : typing.List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id: int, (default: 1) Run ID will be used as subfolder for output_dir. """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.output_dir = create_output_directory(scenario, run_id) scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func) # inject aggr_func if necessary if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func # initial random number generator num_run, rng = self._get_rng(rng=rng) # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) if model is None: model = RandomForestWithInstances( types=types, bounds=bounds, instance_features=scenario.feature_array, seed=rng.randint(MAXINT), pca_components=scenario.PCA_DIM) # initial acquisition function if acquisition_function is None: if scenario.run_obj == "runtime": acquisition_function = LogEI(model=model) else: acquisition_function = EI(model=model) # inject model if necessary if acquisition_function.model is None: acquisition_function.model = model # initialize optimizer on acquisition function if acquisition_function_optimizer is None: acquisition_function_optimizer = InterleavedLocalAndRandomSearch( acquisition_function, scenario.cs, np.random.RandomState(seed=rng.randint(MAXINT))) elif not isinstance( acquisition_function_optimizer, AcquisitionFunctionMaximizer, ): raise ValueError( "Argument 'acquisition_function_optimizer' must be of type" "'AcquisitionFunctionMaximizer', but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld( ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict( ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Third case, if it is an ExecuteTaRun we can simply use the # instance. Otherwise, the next check raises an exception elif not isinstance(tae_runner, ExecuteTARun): raise TypeError("Argument 'tae_runner' is %s, but must be " "either a callable or an instance of " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # inject cost_for_crash if tae_runner.crash_cost != scenario.cost_for_crash: tae_runner.crash_cost = scenario.cost_for_crash # initialize intensification if intensifier is None: intensifier = Intensifier(tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", always_race_against=scenario.cs.get_default_configuration() \ if scenario.always_race_default else None, instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR) # inject deps if necessary if intensifier.tae_runner is None: intensifier.tae_runner = tae_runner if intensifier.stats is None: intensifier.stats = self.stats if intensifier.traj_logger is None: intensifier.traj_logger = traj_logger # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) if initial_configurations is not None: initial_design = MultiConfigInitialDesign( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, configs=initial_configurations, intensifier=intensifier, aggregate_func=aggregate_func) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": initial_design = DefaultConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "RANDOM": initial_design = RandomConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) # inject deps if necessary if initial_design.tae_runner is None: initial_design.tae_runner = tae_runner if initial_design.scenario is None: initial_design.scenario = scenario if initial_design.stats is None: initial_design.stats = self.stats if initial_design.traj_logger is None: initial_design.traj_logger = traj_logger # initial conversion of runhistory into EPM data if runhistory2epm is None: num_params = len(scenario.cs.get_hyperparameters()) if scenario.run_obj == "runtime": # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory cutoff = np.log10(scenario.cutoff) threshold = np.log10(scenario.cutoff * scenario.par_factor) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=True, impute_state=[ StatusType.CAPPED, ], imputor=imputor) elif scenario.run_obj == 'quality': runhistory2epm = RunHistory2EPM4Cost( scenario=scenario, num_params=num_params, success_states=[StatusType.SUCCESS, StatusType.CRASHED], impute_censored_data=False, impute_state=None) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) # inject scenario if necessary: if runhistory2epm.scenario is None: runhistory2epm.scenario = scenario smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design, 'runhistory': runhistory, 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'num_run': num_run, 'model': model, 'acq_optimizer': acquisition_function_optimizer, 'acquisition_func': acquisition_function, 'rng': rng, 'restore_incumbent': restore_incumbent } if smbo_class is None: self.solver = SMBO(**smbo_args) else: self.solver = smbo_class(**smbo_args)
def __init__(self, scenario: Scenario, tae_runner: Optional[Union[Type[ExecuteTARun], Callable]] = None, tae_runner_kwargs: Optional[dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[dict] = None, intensifier: Optional[Type[Intensifier]] = None, intensifier_kwargs: Optional[dict] = None, acquisition_function: Optional[Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[SMBO] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[dict] = None ): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.execute_ta_run.ExecuteTARun or callable Callable or implementation of :class:`~smac.tae.execute_ta_run.ExecuteTARun`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. tae_runner_kwargs: Optional[dict] arguments passed to constructor of '~tae_runner' runhistory : RunHistory runhistory to store all algorithm runs runhistory_kwargs : Optional[dict] arguments passed to constructor of runhistory. We strongly advise against changing the aggregation function, since it will break some code assumptions intensifier : Intensifier intensification object to issue a racing to decide the current incumbent intensifier_kwargs: Optional[dict] arguments passed to the constructor of '~intensifier' acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Class or object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` or :class:`~smac.optimizer.acquisition.LogEI` if not set. `~acquisition_function_kwargs` is passed to the class constructor. acquisition_function_kwargs : Optional[dict] dictionary to pass specific arguments to ~acquisition_function integrate_acquisition_function : bool, default=False Whether to integrate the acquisition function. Works only with models which can sample their hyperparameters (i.e. GaussianProcessMCMC). acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set. acquisition_function_optimizer_kwargs: Optional[dict] Arguments passed to constructor of '~acquisition_function_optimizer' model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. model_kwargs : Optional[dict] Arguments passed to constructor of '~model' runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. runhistory2epm_kwargs: Optional[dict] Arguments passed to the constructor of '~runhistory2epm' initial_design : InitialDesign initial sampling design initial_design_kwargs: Optional[dict] arguments passed to constructor of `~initial_design' initial_configurations : List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. random_configuration_chooser_kwargs : Optional[dict] arguments of constructor for '~random_configuration_chooser' """ self.logger = logging.getLogger( self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator # run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # run_id=datetime.now().strftime("%Y%m%d%H%M%S%f") run_id=uuid1() self.output_dir = create_output_directory(scenario, run_id) # fixme run_id elif scenario.output_dir is not None: run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = scenario.output_dir_for_this_run if ( scenario.deterministic is True and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality' ): self.logger.info('Optimizing a deterministic scenario for quality without a tuner timeout - will make ' 'SMAC deterministic and only evaluate one configuration per iteration!') scenario.intensification_percentage = 1e-10 scenario.min_chall = 1 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario,file_system=scenario.file_system) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": self.logger.warning("Runtime as objective automatically activates log(y) transformation") self.scenario.transform_y = "LOG" # initialize empty runhistory runhistory_def_kwargs = {'aggregate_func': aggregate_func} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs,file_system=scenario.file_system) elif inspect.isclass(runhistory): runhistory = runhistory(**runhistory_def_kwargs) else: if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func rand_conf_chooser_kwargs = { 'rng': rng } if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update(random_configuration_chooser_kwargs) if random_configuration_chooser is None: if 'prob' not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs['prob'] = scenario.rand_prob random_configuration_chooser = ChooserProb(**rand_conf_chooser_kwargs) elif inspect.isclass(random_configuration_chooser): random_configuration_chooser = random_configuration_chooser(**rand_conf_chooser_kwargs) elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError("random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats,file_system=scenario.file_system) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) model_def_kwargs = { 'types': types, 'bounds': bounds, 'instance_features': scenario.feature_array, 'seed': rng.randint(MAXINT), 'pca_components': scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { 'log_y': scenario.transform_y in ["LOG", "LOGS"], 'num_trees': scenario.rf_num_trees, 'do_bootstrapping': scenario.rf_do_bootstrapping, 'ratio_features': scenario.rf_ratio_features, 'min_samples_split': scenario.rf_min_samples_split, 'min_samples_leaf': scenario.rf_min_samples_leaf, 'max_depth': scenario.rf_max_depth, }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs['configspace'] = self.scenario.cs model = RandomForestWithInstances(**model_def_kwargs) elif inspect.isclass(model): model_def_kwargs['configspace'] = self.scenario.cs model = model(**model_def_kwargs) else: raise TypeError( "Model not recognized: %s" %(type(model))) # initial acquisition function acq_def_kwargs = {'model': model} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) if acquisition_function is None: if scenario.transform_y in ["LOG", "LOGS"]: acquisition_function = LogEI(**acq_def_kwargs) else: acquisition_function = EI(**acq_def_kwargs) elif inspect.isclass(acquisition_function): acquisition_function = acquisition_function(**acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function) ) if integrate_acquisition_function: acquisition_function = IntegratedAcquisitionFunction( acquisition_function=acquisition_function, **acq_def_kwargs ) # initialize optimizer on acquisition function acq_func_opt_kwargs = { 'acquisition_function': acquisition_function, 'config_space': scenario.cs, 'rng': rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { 'max_steps': scenario.sls_max_steps, 'n_steps_plateau_walk': scenario.sls_n_steps_plateau_walk, }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer = InterleavedLocalAndRandomSearch(**acq_func_opt_kwargs) elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer = acquisition_function_optimizer(**acq_func_opt_kwargs) else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer) ) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { 'stats': self.stats, 'run_obj': scenario.run_obj, 'runhistory': runhistory, 'par_factor': scenario.par_factor, 'cost_for_crash': scenario.cost_for_crash, 'abort_on_first_run_crash': scenario.abort_on_first_run_crash } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if 'ta' not in tae_def_kwargs: tae_def_kwargs['ta'] = scenario.ta if tae_runner is None: tae_def_kwargs['ta'] = scenario.ta tae_runner = ExecuteTARunOld(**tae_def_kwargs) elif inspect.isclass(tae_runner): tae_runner = tae_runner(**tae_def_kwargs) elif callable(tae_runner): tae_def_kwargs['ta'] = tae_runner tae_runner = ExecuteTAFuncDict(**tae_def_kwargs) else: raise TypeError("Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # initialize intensification intensifier_def_kwargs = { 'tae_runner': tae_runner, 'stats': self.stats, 'traj_logger': traj_logger, 'rng': rng, 'instances': scenario.train_insts, 'cutoff': scenario.cutoff, 'deterministic': scenario.deterministic, 'run_obj_time': scenario.run_obj == "runtime", 'always_race_against': scenario.cs.get_default_configuration() if scenario.always_race_default else None, 'use_ta_time_bound': scenario.use_ta_time, 'instance_specifics': scenario.instance_specific, 'minR': scenario.minR, 'maxR': scenario.maxR, 'adaptive_capping_slackfactor': scenario.intens_adaptive_capping_slackfactor, 'min_chall': scenario.intens_min_chall } if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) if intensifier is None: intensifier = Intensifier(**intensifier_def_kwargs) elif inspect.isclass(intensifier): intensifier = intensifier(**intensifier_def_kwargs) else: raise TypeError( "Argument intensifier must be None or an object implementing the Intensifier, but is '%s'" % type(intensifier) ) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both") init_design_def_kwargs = { 'tae_runner': tae_runner, 'scenario': scenario, 'stats': self.stats, 'traj_logger': traj_logger, 'runhistory': runhistory, 'rng': rng, 'configs': initial_configurations, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'n_configs_x_params': 0, 'max_config_fracs': 0.0 } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design = DefaultConfiguration(**init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design = RandomConfigurations(**init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": initial_design = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": initial_design = FactorialInitialDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": initial_design = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) elif inspect.isclass(initial_design): initial_design = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design) ) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in ["LOG", "LOGS"]: cutoff = np.log(np.nanmin([np.inf, np.float_(scenario.cutoff)])) threshold = cutoff + np.log(scenario.par_factor) else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff)]) threshold = cutoff * scenario.par_factor num_params = len(scenario.cs.get_hyperparameters()) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) r2e_def_kwargs = { 'scenario': scenario, 'num_params': num_params, 'success_states': [StatusType.SUCCESS, ], 'impute_censored_data': True, 'impute_state': [StatusType.CAPPED, ], 'imputor': imputor, 'scale_perc': 5 } if scenario.run_obj == 'quality': r2e_def_kwargs.update({ 'success_states': [StatusType.SUCCESS, StatusType.CRASHED], 'impute_censored_data': False, 'impute_state': None, }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == 'runtime': runhistory2epm = RunHistory2EPM4LogCost(**r2e_def_kwargs) elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": runhistory2epm = RunHistory2EPM4Cost(**r2e_def_kwargs) elif scenario.transform_y == "LOG": runhistory2epm = RunHistory2EPM4LogCost(**r2e_def_kwargs) elif scenario.transform_y == "LOGS": runhistory2epm = RunHistory2EPM4LogScaledCost(**r2e_def_kwargs) elif scenario.transform_y == "INVS": runhistory2epm = RunHistory2EPM4InvScaledCost(**r2e_def_kwargs) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) elif inspect.isclass(runhistory2epm): runhistory2epm = runhistory2epm(**r2e_def_kwargs) else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm) ) smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design, 'runhistory': runhistory, 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'num_run': run_id, 'model': model, 'acq_optimizer': acquisition_function_optimizer, 'acquisition_func': acquisition_function, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser } if smbo_class is None: self.solver = SMBO(**smbo_args) else: self.solver = smbo_class(**smbo_args)
def hpbandster2smac(self, folder2result, cs_options, output_dir: str): """Reading hpbandster-result-object and creating RunHistory and trajectory... treats each budget as an individual 'smac'-run, creates an output-directory with subdirectories for each budget. Parameters ---------- folder2result: Dict(str : hpbandster.core.result.Result) folder mapping to bohb's result-objects cs_options: list[ConfigurationSpace] the configuration spaces. in the best case it's a single element, but for pcs-format we need to guess through a list of possible configspaces output_dir: str the output-dir to save the smac-runs to Returns ------- folder2budgets: dict(dict(str) - str) maps each folder (from parallel execution) to a dict, which in turn maps all budgets of the specific parallel execution to their paths """ folder2budgets = OrderedDict() self.logger.debug("Loading with %d configspace alternative options...", len(cs_options)) self.logger.info( "Assuming BOHB treats target algorithms as deterministic (and does not re-evaluate)" ) for folder, result in folder2result.items(): folder2budgets[folder] = OrderedDict() self.logger.debug("Budgets for '%s': %s" % (folder, str(result.HB_config['budgets']))) ########################## # 1. Create runhistory # ########################## id2config_mapping = result.get_id2config_mapping() skipped = {'None': 0, 'NaN': 0} budget2rh = OrderedDict() for run in result.get_all_runs(): # Choose runhistory to add run to if not run.budget in budget2rh: budget2rh[run.budget] = RunHistory(average_cost) rh = budget2rh[run.budget] # Load config... config = None while config is None: if len(cs_options) == 0: self.logger.debug("None of the alternatives worked...") raise ValueError( "Your configspace seems to be corrupt. If you use floats (or mix up ints, bools and strings) as categoricals, " "please consider using the .json-format, as the .pcs-format cannot recover the type " "of categoricals. Otherwise please report this to " "https://github.com/automl/CAVE/issues (and attach the debug.log)" ) try: config = self._get_config(run.config_id, id2config_mapping, cs_options[0]) except ValueError as err: self.logger.debug( "Loading configuration failed... trying %d alternatives" % len(cs_options) - 1, exc_info=1) cs_options = cs_options[ 1:] # remove the failing cs-version # Filter corrupted loss-values (ignore them) if run.loss is None: skipped['None'] += 1 continue if np.isnan(run.loss): skipped['NaN'] += 1 continue rh.add(config=config, cost=run.loss, time=run.time_stamps['finished'] - run.time_stamps['started'], status=StatusType.SUCCESS, seed=0, additional_info={ 'info': run.info, 'timestamps': run.time_stamps }) self.logger.debug( "Skipped %d None- and %d NaN-loss-values in BOHB-result", skipped['None'], skipped['NaN']) ########################## # 2. Create all else # ########################## formatted_budgets = format_budgets( budget2rh.keys() ) # Make budget-names readable [0.021311, 0.031211] to [0.02, 0.03] for b, rh in budget2rh.items(): output_path = os.path.join(output_dir, folder, formatted_budgets[b]) folder2budgets[folder][b] = output_path scenario = Scenario({ 'run_obj': 'quality', 'cs': cs_options[0], 'output_dir': output_dir, 'deterministic': True, # At the time of writing, BOHB is always treating ta's as deterministic }) scenario.output_dir_for_this_run = output_path scenario.write() with open(os.path.join(output_path, 'configspace.json'), 'w') as fh: fh.write(pcs_json.write(cs_options[0])) rh.save_json(fn=os.path.join(output_path, 'runhistory.json')) self.get_trajectory(folder2result[folder], output_path, scenario, rh, budget=b) return folder2budgets
def __init__( self, scenario: Scenario, # TODO: once we drop python3.4 add type hint # typing.Union[ExecuteTARun, callable] tae_runner=None, runhistory: RunHistory = None, intensifier: Intensifier = None, acquisition_function: AbstractAcquisitionFunction = None, model: AbstractEPM = None, runhistory2epm: AbstractRunHistory2EPM = None, initial_design: InitialDesign = None, initial_configurations: typing.List[Configuration] = None, stats: Stats = None, rng: np.random.RandomState = None, run_id: int = 1): """Constructor""" self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.runhistory = None self.trajectory = None # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) self.output_dir = create_output_directory(scenario, run_id) scenario.write() # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func) # inject aggr_func if necessary if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func # initial random number generator num_run, rng = self._get_rng(rng=rng) # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) if model is None: model = RandomForestWithInstances( types=types, bounds=bounds, instance_features=scenario.feature_array, seed=rng.randint(MAXINT), pca_components=scenario.PCA_DIM, num_trees=scenario.rf_num_trees, do_bootstrapping=scenario.rf_do_bootstrapping, ratio_features=scenario.rf_ratio_features, min_samples_split=scenario.rf_min_samples_split, min_samples_leaf=scenario.rf_min_samples_leaf, max_depth=scenario.rf_max_depth) # initial acquisition function if acquisition_function is None: if scenario.run_obj == "runtime": acquisition_function = LogEI(model=model) else: acquisition_function = EI(model=model) # inject model if necessary if acquisition_function.model is None: acquisition_function.model = model # initialize optimizer on acquisition function local_search = LocalSearch( acquisition_function, scenario.cs, max_steps=scenario.sls_max_steps, n_steps_plateau_walk=scenario.sls_n_steps_plateau_walk) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld( ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict( ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Third case, if it is an ExecuteTaRun we can simply use the # instance. Otherwise, the next check raises an exception elif not isinstance(tae_runner, ExecuteTARun): raise TypeError("Argument 'tae_runner' is %s, but must be " "either a callable or an instance of " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # inject cost_for_crash if tae_runner.crash_cost != scenario.cost_for_crash: tae_runner.crash_cost = scenario.cost_for_crash # initialize intensification if intensifier is None: intensifier = Intensifier( tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", always_race_against=scenario.cs.get_default_configuration() if scenario.always_race_default else None, instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR, adaptive_capping_slackfactor=scenario. intens_adaptive_capping_slackfactor, min_chall=scenario.intens_min_chall) # inject deps if necessary if intensifier.tae_runner is None: intensifier.tae_runner = tae_runner if intensifier.stats is None: intensifier.stats = self.stats if intensifier.traj_logger is None: intensifier.traj_logger = traj_logger # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) if initial_configurations is not None: initial_design = MultiConfigInitialDesign( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, configs=initial_configurations, intensifier=intensifier, aggregate_func=aggregate_func) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": initial_design = DefaultConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "RANDOM": initial_design = RandomConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) # inject deps if necessary if initial_design.tae_runner is None: initial_design.tae_runner = tae_runner if initial_design.scenario is None: initial_design.scenario = scenario if initial_design.stats is None: initial_design.stats = self.stats if initial_design.traj_logger is None: initial_design.traj_logger = traj_logger # initial conversion of runhistory into EPM data if runhistory2epm is None: num_params = len(scenario.cs.get_hyperparameters()) if scenario.run_obj == "runtime": # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory cutoff = np.log(scenario.cutoff) threshold = np.log(scenario.cutoff * scenario.par_factor) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=True, impute_state=[ StatusType.CAPPED, ], imputor=imputor) elif scenario.run_obj == 'quality': runhistory2epm = RunHistory2EPM4Cost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=False, impute_state=None) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) # inject scenario if necessary: if runhistory2epm.scenario is None: runhistory2epm.scenario = scenario self.solver = EPILS_Solver(scenario=scenario, stats=self.stats, initial_design=initial_design, runhistory=runhistory, runhistory2epm=runhistory2epm, intensifier=intensifier, aggregate_func=aggregate_func, num_run=num_run, model=model, acq_optimizer=local_search, acquisition_func=acquisition_function, rng=rng)
def main_cli(self, commandline_arguments: typing.List[str] = None): """Main function of SMAC for CLI interface""" self.logger.info("SMAC call: %s" % (" ".join(sys.argv))) cmd_reader = CMDReader() kwargs = {} if commandline_arguments: kwargs['commandline_arguments'] = commandline_arguments main_args_, smac_args_, scen_args_ = cmd_reader.read_cmd(**kwargs) root_logger = logging.getLogger() root_logger.setLevel(main_args_.verbose_level) logger_handler = logging.StreamHandler(stream=sys.stdout) if root_logger.level >= logging.INFO: formatter = logging.Formatter("%(levelname)s:\t%(message)s") else: formatter = logging.Formatter( "%(asctime)s:%(levelname)s:%(name)s:%(message)s", "%Y-%m-%d %H:%M:%S") logger_handler.setFormatter(formatter) root_logger.addHandler(logger_handler) # remove default handler if len(root_logger.handlers) > 1: root_logger.removeHandler(root_logger.handlers[0]) # Create defaults rh = None initial_configs = None stats = None incumbent = None # Create scenario-object scenario = {} scenario.update(vars(smac_args_)) scenario.update(vars(scen_args_)) scen = Scenario(scenario=scenario) # Restore state if main_args_.restore_state: root_logger.debug("Restoring state from %s...", main_args_.restore_state) rh, stats, traj_list_aclib, traj_list_old = self.restore_state( scen, main_args_) scen.output_dir_for_this_run = create_output_directory( scen, main_args_.seed, root_logger, ) scen.write() incumbent = self.restore_state_after_output_dir( scen, stats, traj_list_aclib, traj_list_old) if main_args_.warmstart_runhistory: aggregate_func = average_cost rh = RunHistory(aggregate_func=aggregate_func) scen, rh = merge_foreign_data_from_file( scenario=scen, runhistory=rh, in_scenario_fn_list=main_args_.warmstart_scenario, in_runhistory_fn_list=main_args_.warmstart_runhistory, cs=scen.cs, aggregate_func=aggregate_func) if main_args_.warmstart_incumbent: initial_configs = [scen.cs.get_default_configuration()] for traj_fn in main_args_.warmstart_incumbent: trajectory = TrajLogger.read_traj_aclib_format(fn=traj_fn, cs=scen.cs) initial_configs.append(trajectory[-1]["incumbent"]) if main_args_.mode == "SMAC": optimizer = SMAC(scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=main_args_.seed) elif main_args_.mode == "BORF": optimizer = BORF(scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=main_args_.seed) elif main_args_.mode == "BOGP": optimizer = BOGP(scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=main_args_.seed) elif main_args_.mode == "ROAR": optimizer = ROAR(scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, run_id=main_args_.seed) elif main_args_.mode == "EPILS": optimizer = EPILS(scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, run_id=main_args_.seed) elif main_args_.mode == "Hydra": optimizer = Hydra( scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=main_args_.seed, random_configuration_chooser=main_args_. random_configuration_chooser, n_iterations=main_args_.hydra_iterations, val_set=main_args_.hydra_validation, incs_per_round=main_args_.hydra_incumbents_per_round, n_optimizers=main_args_.hydra_n_optimizers) elif main_args_.mode == "PSMAC": optimizer = PSMAC( scenario=scen, rng=np.random.RandomState(main_args_.seed), run_id=main_args_.seed, shared_model=smac_args_.shared_model, validate=main_args_.psmac_validate, n_optimizers=main_args_.hydra_n_optimizers, n_incs=main_args_.hydra_incumbents_per_round, ) try: optimizer.optimize() except (TAEAbortException, FirstRunCrashedException) as err: self.logger.error(err)
def hpbandster2smac(self, result, cs: ConfigurationSpace, output_dir: str): """Reading hpbandster-result-object and creating RunHistory and trajectory... treats each budget as an individual 'smac'-run, creates an output-directory with subdirectories for each budget. Parameters ---------- result: hpbandster.core.result.Result bohb's result-object cs: ConfigurationSpace the configuration space output_dir: str the output-dir to save the smac-runs to """ # Create runhistories (one per budget) id2config_mapping = result.get_id2config_mapping() budget2rh = {} for run in result.get_all_runs(): if not run.budget in budget2rh: budget2rh[run.budget] = RunHistory(average_cost) rh = budget2rh[run.budget] rh.add(config=Configuration( cs, id2config_mapping[run.config_id]['config']), cost=run.loss, time=run.time_stamps['finished'] - run.time_stamps['started'], status=StatusType.SUCCESS, seed=0, additional_info={'info': run.info}) # Write to disk budget2path = {} # paths to individual budgets for b, rh in budget2rh.items(): output_path = os.path.join(output_dir, 'budget_' + str(b)) budget2path[b] = output_path scenario = Scenario({'run_obj': 'quality', 'cs': cs}) scenario.output_dir_for_this_run = output_path scenario.write() rh.save_json(fn=os.path.join(output_path, 'runhistory.json')) # trajectory traj_dict = result.get_incumbent_trajectory() traj_logger = TrajLogger(output_path, Stats(scenario)) for config_id, time, budget, loss in zip( traj_dict['config_ids'], traj_dict['times_finished'], traj_dict['budgets'], traj_dict['losses']): incumbent = Configuration( cs, id2config_mapping[config_id]['config']) try: incumbent_id = rh.config_ids[incumbent] except KeyError as e: # This config was not evaluated on this budget, just skip it continue except: raise ta_runs = -1 ta_time_used = -1 wallclock_time = time train_perf = loss # add traj_logger._add_in_old_format(train_perf, incumbent_id, incumbent, ta_time_used, wallclock_time) traj_logger._add_in_aclib_format(train_perf, incumbent_id, incumbent, ta_time_used, wallclock_time) return budget2path
def __init__(self, scenario: Scenario, tae_runner: typing.Union[ExecuteTARun, typing.Callable] = None, stats: Stats = None, runhistory: RunHistory = None, intensifier: Intensifier = None, rng: typing.Union[np.random.RandomState, int] = None, run_id: int = 1, parallel_options: str = None): self._logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.output_dir = create_output_directory(scenario, run_id) scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func) # inject aggr_func if necessary if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func # initial random number generator num_run, rng = self._get_rng(rng=rng) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld( ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict( ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) else: raise TypeError( "Target algorithm not supported. Must be either a call " "string in the scenario file or a callable.") # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # inject cost_for_crash if tae_runner.crash_cost != scenario.cost_for_crash: tae_runner.crash_cost = scenario.cost_for_crash # initialize intensification if intensifier is None: intensifier = Intensifier(tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", always_race_against=scenario.cs.get_default_configuration() \ if scenario.always_race_default else None, instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR) # inject deps if necessary if intensifier.tae_runner is None: intensifier.tae_runner = tae_runner if intensifier.stats is None: intensifier.stats = self.stats if intensifier.traj_logger is None: intensifier.traj_logger = traj_logger if parallel_options is None: parallel_options = "CL+LIST" es_args = { 'scenario': scenario, 'stats': self.stats, # 'initial_design': initial_design, 'runhistory': runhistory, # 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': aggregate_func, # 'num_run': num_run, # 'model': model, # 'acq_optimizer': acquisition_function_optimizer, # 'acquisition_func': acquisition_function, 'rng': rng, 'parallel_options': parallel_options } self.solver = ESOptimizer(**es_args)
def __init__( self, scenario: Scenario, tae_runner: typing.Optional[typing.Union[ExecuteTARun, typing.Callable]] = None, runhistory: typing.Optional[RunHistory] = None, intensifier: typing.Optional[Intensifier] = None, acquisition_function: typing. Optional[AbstractAcquisitionFunction] = None, acquisition_function_optimizer: typing. Optional[AcquisitionFunctionMaximizer] = None, model: typing.Optional[AbstractEPM] = None, runhistory2epm: typing.Optional[AbstractRunHistory2EPM] = None, initial_design: typing.Optional[InitialDesign] = None, initial_configurations: typing.Optional[ typing.List[Configuration]] = None, stats: typing.Optional[Stats] = None, restore_incumbent: typing.Optional[Configuration] = None, rng: typing.Optional[typing.Union[np.random.RandomState, int]] = None, smbo_class: typing.Optional[SMBO] = None, run_id: typing.Optional[int] = None, random_configuration_chooser: typing. Optional[RandomConfigurationChooser] = None): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.execute_ta_run.ExecuteTARun or callable Callable or implementation of :class:`~smac.tae.execute_ta_run.ExecuteTARun`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. runhistory : RunHistory runhistory to store all algorithm runs intensifier : Intensifier intensification object to issue a racing to decide the current incumbent acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` if not set. acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set. model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. initial_design : InitialDesign initial sampling design initial_configurations : typing.List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) self.output_dir = create_output_directory(scenario, run_id) elif scenario.output_dir is not None: run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = scenario.output_dir_for_this_run if (scenario.deterministic is True and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality'): self.logger.info('Optimizing a deterministic scenario for ' 'quality without a tuner timeout - will make ' 'SMAC deterministic!') scenario.intensification_percentage = 1e-10 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": self.logger.warn( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func) # inject aggr_func if necessary if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func if not random_configuration_chooser: random_configuration_chooser = ChooserProb(prob=scenario.rand_prob, rng=rng) # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) if model is None: model = RandomForestWithInstances( types=types, bounds=bounds, instance_features=scenario.feature_array, seed=rng.randint(MAXINT), pca_components=scenario.PCA_DIM, log_y=scenario.transform_y in ["LOG", "LOGS"], num_trees=scenario.rf_num_trees, do_bootstrapping=scenario.rf_do_bootstrapping, ratio_features=scenario.rf_ratio_features, min_samples_split=scenario.rf_min_samples_split, min_samples_leaf=scenario.rf_min_samples_leaf, max_depth=scenario.rf_max_depth) # initial acquisition function if acquisition_function is None: if scenario.transform_y in ["LOG", "LOGS"]: acquisition_function = LogEI(model=model) else: acquisition_function = EI(model=model) # inject model if necessary if acquisition_function.model is None: acquisition_function.model = model # initialize optimizer on acquisition function if acquisition_function_optimizer is None: acquisition_function_optimizer = InterleavedLocalAndRandomSearch( acquisition_function=acquisition_function, config_space=scenario.cs, rng=np.random.RandomState(seed=rng.randint(MAXINT)), max_steps=scenario.sls_max_steps, n_steps_plateau_walk=scenario.sls_n_steps_plateau_walk) elif not isinstance( acquisition_function_optimizer, AcquisitionFunctionMaximizer, ): raise ValueError( "Argument 'acquisition_function_optimizer' must be of type" "'AcquisitionFunctionMaximizer', but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld( ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash, abort_on_first_run_crash=scenario.abort_on_first_run_crash) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict( ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash, abort_on_first_run_crash=scenario.abort_on_first_run_crash) # Third case, if it is an ExecuteTaRun we can simply use the # instance. Otherwise, the next check raises an exception elif not isinstance(tae_runner, ExecuteTARun): raise TypeError("Argument 'tae_runner' is %s, but must be " "either a callable or an instance of " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # inject cost_for_crash if tae_runner.crash_cost != scenario.cost_for_crash: tae_runner.crash_cost = scenario.cost_for_crash # initialize intensification if intensifier is None: intensifier = Intensifier( tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", always_race_against=scenario.cs.get_default_configuration() if scenario.always_race_default else None, use_ta_time_bound=scenario.use_ta_time, instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR, adaptive_capping_slackfactor=scenario. intens_adaptive_capping_slackfactor, min_chall=scenario.intens_min_chall) # inject deps if necessary if intensifier.tae_runner is None: intensifier.tae_runner = tae_runner if intensifier.stats is None: intensifier.stats = self.stats if intensifier.traj_logger is None: intensifier.traj_logger = traj_logger # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) if initial_configurations is not None: initial_design = MultiConfigInitialDesign( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, configs=initial_configurations, intensifier=intensifier, aggregate_func=aggregate_func) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": initial_design = DefaultConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "RANDOM": initial_design = RandomConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "LHD": initial_design = LHDesign(runhistory=runhistory, intensifier=intensifier, aggregate_func=aggregate_func, tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "FACTORIAL": initial_design = FactorialInitialDesign( runhistory=runhistory, intensifier=intensifier, aggregate_func=aggregate_func, tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "SOBOL": initial_design = SobolDesign(runhistory=runhistory, intensifier=intensifier, aggregate_func=aggregate_func, tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) # inject deps if necessary if initial_design.tae_runner is None: initial_design.tae_runner = tae_runner if initial_design.scenario is None: initial_design.scenario = scenario if initial_design.stats is None: initial_design.stats = self.stats if initial_design.traj_logger is None: initial_design.traj_logger = traj_logger # initial conversion of runhistory into EPM data if runhistory2epm is None: num_params = len(scenario.cs.get_hyperparameters()) if scenario.run_obj == 'runtime': # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory cutoff = np.log(scenario.cutoff) threshold = np.log(scenario.cutoff * scenario.par_factor) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=True, impute_state=[ StatusType.CAPPED, ], imputor=imputor) elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": runhistory2epm = RunHistory2EPM4Cost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, StatusType.CRASHED ], impute_censored_data=False, impute_state=None) elif scenario.transform_y == "LOG": runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, StatusType.CRASHED ], impute_censored_data=False, impute_state=None) elif scenario.transform_y == "LOGS": runhistory2epm = RunHistory2EPM4LogScaledCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, StatusType.CRASHED ], impute_censored_data=False, impute_state=None) elif scenario.transform_y == "INVS": runhistory2epm = RunHistory2EPM4InvScaledCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, StatusType.CRASHED ], impute_censored_data=False, impute_state=None) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) # inject scenario if necessary: if runhistory2epm.scenario is None: runhistory2epm.scenario = scenario smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design, 'runhistory': runhistory, 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'num_run': run_id, 'model': model, 'acq_optimizer': acquisition_function_optimizer, 'acquisition_func': acquisition_function, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser } if smbo_class is None: self.solver = SMBO(**smbo_args) else: self.solver = smbo_class(**smbo_args)
def hpbandster2smac(self, folder, result, cs_options, output_dir: str): """Reading hpbandster-result-object and creating RunHistory and trajectory... Parameters ---------- folder: str (path) original folder result: hpbandster.core.result.Result bohb's result-object cs_options: list[ConfigurationSpace] the configuration spaces. in the best case it's a single element, but for pcs-format we need to guess through a list of possible configspaces output_dir_base: str the output-dir to save the smac-runs to Returns ------- converted: dict{ 'new_path' : path_to_converted_input, 'hp_bandster_result' : result_in_hpbandster_format, 'config_space' : config_space, 'runhistory' : runhistory, 'validated_runhistory' : validated_runhistory, 'scenario' : scenario, 'trajectory' : trajectory, } """ self.logger.debug("Budgets for '%s': %s" % (folder, str(result.HB_config['budgets']))) ########################## # 1. Create runhistory # ########################## id2config_mapping = result.get_id2config_mapping() skipped = {'None': 0, 'NaN': 0} rh = RunHistory() for run in result.get_all_runs(): # Load config... config = None while config is None: if len(cs_options) == 0: self.logger.debug("None of the alternatives worked...") raise ValueError( "Your configspace seems to be corrupt. If you use floats (or mix up ints, bools " "and strings) as categoricals, please consider using the .json-format, as the " ".pcs-format cannot recover the type of categoricals. Otherwise please report " "this to https://github.com/automl/CAVE/issues (and attach the debug.log)" ) try: config = self._get_config(run.config_id, id2config_mapping, cs_options[0]) except ValueError as err: self.logger.debug( "Loading config failed. Trying %d alternatives" % len(cs_options) - 1, exc_info=1) cs_options = cs_options[ 1:] # remove the failing cs-version # Filter corrupted loss-values (ignore them) if run.loss is None: skipped['None'] += 1 continue if np.isnan(run.loss): skipped['NaN'] += 1 continue rh.add(config=config, cost=run.loss, time=run.time_stamps['finished'] - run.time_stamps['started'], status=StatusType.SUCCESS, budget=run.budget, seed=0, additional_info={ 'info': run.info, 'timestamps': run.time_stamps }) self.logger.debug( "Skipped %d None- and %d NaN-loss-values in BOHB-result", skipped['None'], skipped['NaN']) ########################## # 2. Create all else # ########################## scenario = Scenario({ 'run_obj': 'quality', 'cs': cs_options[0], 'output_dir': output_dir, 'deterministic': True, # At the time of writing, BOHB is always treating ta's as deterministic }) scenario.output_dir_for_this_run = output_dir scenario.write() with open(os.path.join(output_dir, 'configspace.json'), 'w') as fh: fh.write(pcs_json.write(cs_options[0])) rh.save_json(fn=os.path.join(output_dir, 'runhistory.json')) trajectory = self.get_trajectory(result, output_dir, scenario, rh) return { 'new_path': output_dir, 'hpbandster_result': result, 'config_space': cs_options[0], 'runhistory': rh, 'validated_runhistory': None, 'scenario': scenario, 'trajectory': trajectory, }
def hpbandster2smac(self, folder2result, cs: ConfigurationSpace, backup_cs, output_dir: str): """Reading hpbandster-result-object and creating RunHistory and trajectory... treats each budget as an individual 'smac'-run, creates an output-directory with subdirectories for each budget. Parameters ---------- folder2result: Dict(str : hpbandster.core.result.Result) folder mapping to bohb's result-objects cs: ConfigurationSpace the configuration space backup_cs: List[ConfigurationSpace] if loading a configuration fails, try configspaces from this list until succeed output_dir: str the output-dir to save the smac-runs to """ # Create runhistories (one per budget) budget2rh = OrderedDict() for folder, result in folder2result.items(): self.logger.debug("Budgets for '%s': %s" % (folder, str(result.HB_config['budgets']))) id2config_mapping = result.get_id2config_mapping() skipped = {'None': 0, 'NaN': 0} for run in result.get_all_runs(): if not run.budget in budget2rh: budget2rh[run.budget] = RunHistory(average_cost) rh = budget2rh[run.budget] # Load config... try: config = self._get_config(run.config_id, id2config_mapping, cs) except ValueError as err: self.logger.debug( "Loading configuration failed... trying alternatives", exc_info=1) for bcs in backup_cs: try: config = self._get_config(run.config_id, id2config_mapping, bcs) cs = bcs break except ValueError: self.logger.debug("", exc_info=1) pass else: self.logger.debug("None of the alternatives worked...") raise ValueError( "Your configspace seems to be corrupt. If you use floats (or mix up ints, bools and strings) as categoricals, " "please consider using the .json-format, as the .pcs-format cannot recover the type " "of categoricals. Otherwise please report this to " "https://github.com/automl/CAVE/issues (and attach the debug.log)" ) if run.loss is None: skipped['None'] += 1 continue if np.isnan(run.loss): skipped['NaN'] += 1 continue rh.add(config=config, cost=run.loss, time=run.time_stamps['finished'] - run.time_stamps['started'], status=StatusType.SUCCESS, seed=0, additional_info={ 'info': run.info, 'timestamps': run.time_stamps }) self.logger.debug( "Skipped %d None- and %d NaN-loss-values in BOHB-result", skipped['None'], skipped['NaN']) # Write to disk budget2path = OrderedDict() # paths to individual budgets self.logger.info( "Assuming BOHB treats target algorithms as deterministic (and does not re-evaluate)" ) formatted_budgets = format_budgets(budget2rh.keys()) for b, rh in budget2rh.items(): output_path = os.path.join(output_dir, formatted_budgets[b]) budget2path[b] = output_path scenario = Scenario({ 'run_obj': 'quality', 'cs': cs, 'output_dir': output_dir, 'deterministic': True, # At the time of writing, BOHB is always treating ta's as deterministic }) scenario.output_dir_for_this_run = output_path scenario.write() with open(os.path.join(output_path, 'configspace.json'), 'w') as fh: fh.write(pcs_json.write(cs)) rh.save_json(fn=os.path.join(output_path, 'runhistory.json')) self.get_trajectory(folder2result, output_path, scenario, rh, budget=b) return budget2path
def __init__( self, scenario: Scenario, tae_runner: Optional[Union[Type[BaseRunner], Callable]] = None, tae_runner_kwargs: Optional[Dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[Dict] = None, intensifier: Optional[Type[AbstractRacer]] = None, intensifier_kwargs: Optional[Dict] = None, acquisition_function: Optional[ Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[Dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[ Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[Dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[Dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[Dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[Dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[Type[SMBO]] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[ Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[Dict] = None, dask_client: Optional[dask.distributed.Client] = None, n_jobs: Optional[int] = 1, ): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.base.BaseRunner or callable Callable or implementation of :class:`~smac.tae.base.BaseRunner`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. tae_runner_kwargs: Optional[Dict] arguments passed to constructor of '~tae_runner' runhistory : RunHistory runhistory to store all algorithm runs runhistory_kwargs : Optional[Dict] arguments passed to constructor of runhistory. We strongly advise against changing the aggregation function, since it will break some code assumptions intensifier : Intensifier intensification object to issue a racing to decide the current incumbent intensifier_kwargs: Optional[Dict] arguments passed to the constructor of '~intensifier' acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Class or object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` or :class:`~smac.optimizer.acquisition.LogEI` if not set. `~acquisition_function_kwargs` is passed to the class constructor. acquisition_function_kwargs : Optional[Dict] dictionary to pass specific arguments to ~acquisition_function integrate_acquisition_function : bool, default=False Whether to integrate the acquisition function. Works only with models which can sample their hyperparameters (i.e. GaussianProcessMCMC). acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.LocalAndSortedRandomSearch` if not set. acquisition_function_optimizer_kwargs: Optional[Dict] Arguments passed to constructor of '~acquisition_function_optimizer' model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. model_kwargs : Optional[Dict] Arguments passed to constructor of '~model' runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. runhistory2epm_kwargs: Optional[Dict] Arguments passed to the constructor of '~runhistory2epm' initial_design : InitialDesign initial sampling design initial_design_kwargs: Optional[Dict] arguments passed to constructor of `~initial_design' initial_configurations : List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. random_configuration_chooser_kwargs : Optional[Dict] arguments of constructor for '~random_configuration_chooser' dask_client : dask.distributed.Client User-created dask client, can be used to start a dask cluster and then attach SMAC to it. n_jobs : int, optional Number of jobs. If > 1 or -1, this creates a dask client if ``dask_client`` is ``None``. Will be ignored if ``dask_client`` is not ``None``. If ``None``, this value will be set to 1, if ``-1``, this will be set to the number of cpu cores. """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) self.output_dir = create_output_directory(scenario, run_id) elif scenario.output_dir is not None: # type: ignore[attr-defined] # noqa F821 run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = cast(str, scenario.output_dir_for_this_run ) # type: ignore[attr-defined] # noqa F821 rng = cast(np.random.RandomState, rng) if (scenario.deterministic is True # type: ignore[attr-defined] # noqa F821 and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality' # type: ignore[attr-defined] # noqa F821 ): self.logger.info( 'Optimizing a deterministic scenario for quality without a tuner timeout - will make ' 'SMAC deterministic and only evaluate one configuration per iteration!' ) scenario.intensification_percentage = 1e-10 # type: ignore[attr-defined] # noqa F821 scenario.min_chall = 1 # type: ignore[attr-defined] # noqa F821 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 self.logger.warning( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # type: ignore[attr-defined] # noqa F821 # initialize empty runhistory runhistory_def_kwargs = {} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs) elif inspect.isclass(runhistory): runhistory = runhistory( **runhistory_def_kwargs) # type: ignore[operator] # noqa F821 elif isinstance(runhistory, RunHistory): pass else: raise ValueError( 'runhistory has to be a class or an object of RunHistory') rand_conf_chooser_kwargs = {'rng': rng} if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update( random_configuration_chooser_kwargs) if random_configuration_chooser is None: if 'prob' not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs[ 'prob'] = scenario.rand_prob # type: ignore[attr-defined] # noqa F821 random_configuration_chooser_instance = ( ChooserProb(**rand_conf_chooser_kwargs ) # type: ignore[arg-type] # noqa F821 ) # type: RandomConfigurationChooser elif inspect.isclass(random_configuration_chooser): random_configuration_chooser_instance = random_configuration_chooser( ** rand_conf_chooser_kwargs) # type: ignore[arg-type] # noqa F821 elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError( "random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed( rng.randint(MAXINT)) # type: ignore[attr-defined] # noqa F821 # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types( scenario.cs, scenario.feature_array) # type: ignore[attr-defined] # noqa F821 model_def_kwargs = { 'types': types, 'bounds': bounds, 'instance_features': scenario.feature_array, 'seed': rng.randint(MAXINT), 'pca_components': scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { 'log_y': scenario.transform_y in ["LOG", "LOGS"], # type: ignore[attr-defined] # noqa F821 'num_trees': scenario. rf_num_trees, # type: ignore[attr-defined] # noqa F821 'do_bootstrapping': scenario. rf_do_bootstrapping, # type: ignore[attr-defined] # noqa F821 'ratio_features': scenario. rf_ratio_features, # type: ignore[attr-defined] # noqa F821 'min_samples_split': scenario. rf_min_samples_split, # type: ignore[attr-defined] # noqa F821 'min_samples_leaf': scenario. rf_min_samples_leaf, # type: ignore[attr-defined] # noqa F821 'max_depth': scenario. rf_max_depth, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs[ 'configspace'] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = ( RandomForestWithInstances( **model_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractEPM elif inspect.isclass(model): model_def_kwargs[ 'configspace'] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = model( **model_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError("Model not recognized: %s" % (type(model))) # initial acquisition function acq_def_kwargs = {'model': model_instance} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) if acquisition_function is None: if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 acquisition_function_instance = ( LogEI(** acq_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractAcquisitionFunction else: acquisition_function_instance = EI( **acq_def_kwargs) # type: ignore[arg-type] # noqa F821 elif inspect.isclass(acquisition_function): acquisition_function_instance = acquisition_function( **acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function)) if integrate_acquisition_function: acquisition_function_instance = IntegratedAcquisitionFunction( acquisition_function=acquisition_function_instance, **acq_def_kwargs) # initialize optimizer on acquisition function acq_func_opt_kwargs = { 'acquisition_function': acquisition_function_instance, 'config_space': scenario.cs, # type: ignore[attr-defined] # noqa F821 'rng': rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { 'max_steps': scenario. sls_max_steps, # type: ignore[attr-defined] # noqa F821 'n_steps_plateau_walk': scenario. sls_n_steps_plateau_walk, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer_instance = ( LocalAndSortedRandomSearch( ** acq_func_opt_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AcquisitionFunctionMaximizer elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer_instance = acquisition_function_optimizer( **acq_func_opt_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { 'stats': self.stats, 'run_obj': scenario.run_obj, 'par_factor': scenario.par_factor, # type: ignore[attr-defined] # noqa F821 'cost_for_crash': scenario.cost_for_crash, # type: ignore[attr-defined] # noqa F821 'abort_on_first_run_crash': scenario. abort_on_first_run_crash # type: ignore[attr-defined] # noqa F821 } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if 'ta' not in tae_def_kwargs: tae_def_kwargs[ 'ta'] = scenario.ta # type: ignore[attr-defined] # noqa F821 if tae_runner is None: tae_def_kwargs[ 'ta'] = scenario.ta # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ( ExecuteTARunOld( **tae_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: BaseRunner elif inspect.isclass(tae_runner): tae_runner_instance = cast(BaseRunner, tae_runner( **tae_def_kwargs)) # type: ignore[arg-type] # noqa F821 elif callable(tae_runner): tae_def_kwargs['ta'] = tae_runner tae_def_kwargs[ 'use_pynisher'] = scenario.limit_resources # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ExecuteTAFuncDict( **tae_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "BaseRunner. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # In case of a parallel run, wrap the single worker in a parallel # runner if n_jobs is None or n_jobs == 1: _n_jobs = 1 elif n_jobs == -1: _n_jobs = joblib.cpu_count() elif n_jobs > 0: _n_jobs = n_jobs else: raise ValueError( 'Number of tasks must be positive, None or -1, but is %s' % str(n_jobs)) if _n_jobs > 1 or dask_client is not None: tae_runner_instance = DaskParallelRunner( tae_runner_instance, n_workers=_n_jobs, output_directory=self.output_dir, dask_client=dask_client, ) # Check that overall objective and tae objective are the same # TODO: remove these two ignores once the scenario object knows all its attributes! if tae_runner_instance.run_obj != scenario.run_obj: # type: ignore[union-attr] # noqa F821 raise ValueError( "Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner_instance.run_obj, scenario.run_obj)) # type: ignore[union-attr] # noqa F821 # initialize intensification intensifier_def_kwargs = { 'stats': self.stats, 'traj_logger': traj_logger, 'rng': rng, 'instances': scenario.train_insts, # type: ignore[attr-defined] # noqa F821 'cutoff': scenario.cutoff, # type: ignore[attr-defined] # noqa F821 'deterministic': scenario.deterministic, # type: ignore[attr-defined] # noqa F821 'run_obj_time': scenario.run_obj == "runtime", # type: ignore[attr-defined] # noqa F821 'instance_specifics': scenario. instance_specific, # type: ignore[attr-defined] # noqa F821 'adaptive_capping_slackfactor': scenario. intens_adaptive_capping_slackfactor, # type: ignore[attr-defined] # noqa F821 'min_chall': scenario.intens_min_chall # type: ignore[attr-defined] # noqa F821 } if isinstance(intensifier, Intensifier) \ or (intensifier is not None and inspect.isclass(intensifier) and issubclass(intensifier, Intensifier)): intensifier_def_kwargs[ 'always_race_against'] = scenario.cs.get_default_configuration( ) # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'use_ta_time_bound'] = scenario.use_ta_time # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'minR'] = scenario.minR # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'maxR'] = scenario.maxR # type: ignore[attr-defined] # noqa F821 if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) if intensifier is None: intensifier_instance = ( Intensifier(**intensifier_def_kwargs ) # type: ignore[arg-type] # noqa F821 ) # type: AbstractRacer elif inspect.isclass(intensifier): intensifier_instance = intensifier( **intensifier_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument intensifier must be None or an object implementing the AbstractRacer, but is '%s'" % type(intensifier)) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) init_design_def_kwargs = { 'cs': scenario.cs, # type: ignore[attr-defined] # noqa F821 'traj_logger': traj_logger, 'rng': rng, 'ta_run_limit': scenario.ta_run_limit, # type: ignore[attr-defined] # noqa F821 'configs': initial_configurations, 'n_configs_x_params': 0, 'max_config_fracs': 0.0 } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design_instance = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design_instance = DefaultConfiguration( **init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design_instance = RandomConfigurations( **init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": # type: ignore[attr-defined] # noqa F821 initial_design_instance = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = FactorialInitialDesign( **init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent ) # type: ignore[attr-defined] # noqa F821 elif inspect.isclass(initial_design): initial_design_instance = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design)) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 cutoff = np.log(np.nanmin([ np.inf, np.float_(scenario.cutoff) ])) # type: ignore[attr-defined] # noqa F821 threshold = cutoff + np.log( scenario.par_factor) # type: ignore[attr-defined] # noqa F821 else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff) ]) # type: ignore[attr-defined] # noqa F821 threshold = cutoff * scenario.par_factor # type: ignore[attr-defined] # noqa F821 num_params = len(scenario.cs.get_hyperparameters() ) # type: ignore[attr-defined] # noqa F821 imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model_instance, change_threshold=0.01, max_iter=2) r2e_def_kwargs = { 'scenario': scenario, 'num_params': num_params, 'success_states': [ StatusType.SUCCESS, ], 'impute_censored_data': True, 'impute_state': [ StatusType.CAPPED, ], 'imputor': imputor, 'scale_perc': 5 } if scenario.run_obj == 'quality': r2e_def_kwargs.update({ 'success_states': [StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT], 'impute_censored_data': False, 'impute_state': None, }) if isinstance( intensifier_instance, (SuccessiveHalving, Hyperband)) and scenario.run_obj == "quality": r2e_def_kwargs.update({ 'success_states': [ StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT, StatusType.DONOTADVANCE, ], 'consider_for_higher_budgets_state': [ StatusType.DONOTADVANCE, StatusType.TIMEOUT, StatusType.CRASHED, StatusType.MEMOUT, ], }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == 'runtime': rh2epm = ( RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractRunHistory2EPM elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4Cost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "LOGS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogScaledCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "INVS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4InvScaledCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) elif inspect.isclass(runhistory2epm): rh2epm = runhistory2epm( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm)) smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design_instance, 'runhistory': runhistory, 'runhistory2epm': rh2epm, 'intensifier': intensifier_instance, 'num_run': run_id, 'model': model_instance, 'acq_optimizer': acquisition_function_optimizer_instance, 'acquisition_func': acquisition_function_instance, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser_instance, 'tae_runner': tae_runner_instance, } # type: Dict[str, Any] if smbo_class is None: self.solver = SMBO(** smbo_args) # type: ignore[arg-type] # noqa F821 else: self.solver = smbo_class( **smbo_args) # type: ignore[arg-type] # noqa F821
def main_cli(self): """Main function of SMAC for CLI interface""" self.logger.info("SMAC call: %s" % (" ".join(sys.argv))) cmd_reader = CMDReader() args_, misc_args = cmd_reader.read_cmd() root_logger = logging.getLogger() root_logger.setLevel(args_.verbose_level) logger_handler = logging.StreamHandler(stream=sys.stdout) if root_logger.level >= logging.INFO: formatter = logging.Formatter("%(levelname)s:\t%(message)s") else: formatter = logging.Formatter( "%(asctime)s:%(levelname)s:%(name)s:%(message)s", "%Y-%m-%d %H:%M:%S") logger_handler.setFormatter(formatter) root_logger.addHandler(logger_handler) # remove default handler root_logger.removeHandler(root_logger.handlers[0]) # Create defaults rh = None initial_configs = None stats = None incumbent = None # Restore state (needs to be before scenario-creation!) if args_.restore_state: root_logger.debug("Restoring state from %s...", args_.restore_state) rh, stats, traj_list_aclib, traj_list_old = self.restore_state_before_scen( args_) # Create scenario-object scen = Scenario(args_.scenario_file, misc_args) # Restore state (continued, needs to be after scenario-creation!) if args_.restore_state: scen.output_dir_for_this_run = create_output_directory( scen, args_.seed, root_logger, ) scen.write() stats, incumbent = self.restore_state_after_scen( scen, stats, traj_list_aclib, traj_list_old) if args_.warmstart_runhistory: aggregate_func = average_cost rh = RunHistory(aggregate_func=aggregate_func) scen, rh = merge_foreign_data_from_file( scenario=scen, runhistory=rh, in_scenario_fn_list=args_.warmstart_scenario, in_runhistory_fn_list=args_.warmstart_runhistory, cs=scen.cs, aggregate_func=aggregate_func) if args_.warmstart_incumbent: initial_configs = [scen.cs.get_default_configuration()] for traj_fn in args_.warmstart_incumbent: trajectory = TrajLogger.read_traj_aclib_format(fn=traj_fn, cs=scen.cs) initial_configs.append(trajectory[-1]["incumbent"]) if args_.mode == "SMAC": optimizer = SMAC(scenario=scen, rng=np.random.RandomState(args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=args_.seed) elif args_.mode == "ROAR": optimizer = ROAR(scenario=scen, rng=np.random.RandomState(args_.seed), runhistory=rh, initial_configurations=initial_configs, run_id=args_.seed) elif args_.mode == "EPILS": optimizer = EPILS(scenario=scen, rng=np.random.RandomState(args_.seed), runhistory=rh, initial_configurations=initial_configs, run_id=args_.seed) try: optimizer.optimize() except (TAEAbortException, FirstRunCrashedException) as err: self.logger.error(err)