def __init__(self, scenario: typing.Type[Scenario], rng: typing.Optional[typing.Union[np.random.RandomState, int]] = None, run_id: int = 1, tae: typing.Type[ExecuteTARun] = ExecuteTARunOld, tae_kwargs: typing.Union[dict, None] = None, shared_model: bool = True, validate: bool = True, n_optimizers: int = 2, val_set: typing.Union[typing.List[str], None] = None, n_incs: int = 1, **kwargs): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object n_optimizers: int Number of optimizers to run in parallel per round rng: int/np.random.RandomState The randomState/seed to pass to each smac run run_id: int run_id for this hydra run tae: ExecuteTARun Target Algorithm Runner (supports old and aclib format as well as AbstractTAFunc) tae_kwargs: Optional[dict] arguments passed to constructor of '~tae' shared_model: bool Flag to indicate whether information is shared between SMAC runs or not validate: bool / None Flag to indicate whether to validate the found configurations or to use the SMAC estimates None => neither and return the full portfolio n_incs: int Number of incumbents to return (n_incs <= 0 ==> all found configurations) val_set: typing.List[str] List of instance-ids to validate on """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) self.scenario = scenario self.run_id, self.rng = get_rng(rng, run_id, logger=self.logger) self.kwargs = kwargs self.output_dir = None self.rh = RunHistory() self._tae = tae self._tae_kwargs = tae_kwargs if n_optimizers <= 1: self.logger.warning('Invalid value in %s: %d. Setting to 2', 'n_optimizers', n_optimizers) self.n_optimizers = max(n_optimizers, 2) self.validate = validate self.shared_model = shared_model self.n_incs = min(max(1, n_incs), self.n_optimizers) if val_set is None: self.val_set = scenario.train_insts else: self.val_set = val_set
def __init__(self, model_type='gp_mcmc', **kwargs): """ Constructor see ~smac.facade.smac_facade for documentation """ scenario = kwargs['scenario'] kwargs['initial_design'] = kwargs.get('initial_design', SobolDesign) kwargs['runhistory2epm'] = kwargs.get('runhistory2epm', RunHistory2EPM4Cost) init_kwargs = kwargs.get('initial_design_kwargs', dict()) init_kwargs['n_configs_x_params'] = init_kwargs.get( 'n_configs_x_params', 8) init_kwargs['max_config_fracs'] = init_kwargs.get( 'max_config_fracs', 0.25) kwargs['initial_design_kwargs'] = init_kwargs if kwargs.get('model') is None: from smac.epm.gp_kernels import ConstantKernel, Matern, WhiteKernel, HammingKernel model_kwargs = kwargs.get('model_kwargs', dict()) _, rng = get_rng(rng=kwargs.get("rng", None), run_id=kwargs.get("run_id", None), logger=None) types, bounds = get_types(kwargs['scenario'].cs, instance_features=None) cov_amp = ConstantKernel( 2.0, constant_value_bounds=(np.exp(-10), np.exp(2)), prior=LognormalPrior(mean=0.0, sigma=1.0, rng=rng), ) cont_dims = np.nonzero(types == 0)[0] cat_dims = np.nonzero(types != 0)[0] if len(cont_dims) > 0: exp_kernel = Matern( np.ones([len(cont_dims)]), [(np.exp(-6.754111155189306), np.exp(0.0858637988771976)) for _ in range(len(cont_dims))], nu=2.5, operate_on=cont_dims, ) if len(cat_dims) > 0: ham_kernel = HammingKernel( np.ones([len(cat_dims)]), [(np.exp(-6.754111155189306), np.exp(0.0858637988771976)) for _ in range(len(cat_dims))], operate_on=cat_dims, ) noise_kernel = WhiteKernel( noise_level=1e-8, noise_level_bounds=(np.exp(-25), np.exp(2)), prior=HorseshoePrior(scale=0.1, rng=rng), ) if len(cont_dims) > 0 and len(cat_dims) > 0: # both kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel elif len(cont_dims) > 0 and len(cat_dims) == 0: # only cont kernel = cov_amp * exp_kernel + noise_kernel elif len(cont_dims) == 0 and len(cat_dims) > 0: # only cont kernel = cov_amp * ham_kernel + noise_kernel else: raise ValueError() if model_type == "gp": model_class = GaussianProcess kwargs['model'] = model_class model_kwargs['kernel'] = kernel model_kwargs['normalize_y'] = True model_kwargs['seed'] = rng.randint(0, 2**20) elif model_type == "gp_mcmc": model_class = GaussianProcessMCMC kwargs['model'] = model_class kwargs['integrate_acquisition_function'] = True model_kwargs['kernel'] = kernel n_mcmc_walkers = 3 * len(kernel.theta) if n_mcmc_walkers % 2 == 1: n_mcmc_walkers += 1 model_kwargs['n_mcmc_walkers'] = n_mcmc_walkers model_kwargs['chain_length'] = 250 model_kwargs['burnin_steps'] = 250 model_kwargs['normalize_y'] = True model_kwargs['seed'] = rng.randint(0, 2**20) else: raise ValueError('Unknown model type %s' % model_type) kwargs['model_kwargs'] = model_kwargs if kwargs.get('random_configuration_chooser') is None: random_config_chooser_kwargs = kwargs.get( 'random_configuration_chooser_kwargs', dict()) random_config_chooser_kwargs[ 'prob'] = random_config_chooser_kwargs.get( 'prob', 0.08447232371720552) kwargs[ 'random_configuration_chooser_kwargs'] = random_config_chooser_kwargs if kwargs.get('acquisition_function_optimizer') is None: acquisition_function_optimizer_kwargs = kwargs.get( 'acquisition_function_optimizer_kwargs', dict()) acquisition_function_optimizer_kwargs['n_sls_iterations'] = 10 kwargs[ 'acquisition_function_optimizer_kwargs'] = acquisition_function_optimizer_kwargs # only 1 configuration per SMBO iteration intensifier_kwargs = kwargs.get('intensifier_kwargs', dict()) intensifier_kwargs['min_chall'] = 1 kwargs['intensifier_kwargs'] = intensifier_kwargs scenario.intensification_percentage = 1e-10 super().__init__(**kwargs) if self.solver.scenario.n_features > 0: raise NotImplementedError("BOGP cannot handle instances") self.logger.info(self.__class__) self.solver.scenario.acq_opt_challengers = 1000 # activate predict incumbent self.solver.predict_incumbent = True
def test_check_random_states(self, patch): patch.return_value = None smac = SMAC4AC() smac.logger = unittest.mock.MagicMock() # Check some properties # Check whether different seeds give different random states _, rng_1 = get_rng(1) _, rng_2 = get_rng(2) self.assertNotEqual(sum(rng_1.get_state()[1] - rng_2.get_state()[1]), 0) # Check whether no seeds gives different random states _, rng_1 = get_rng(logger=smac.logger) self.assertEqual(smac.logger.debug.call_count, 1) _, rng_2 = get_rng(logger=smac.logger) self.assertEqual(smac.logger.debug.call_count, 2) self.assertNotEqual(sum(rng_1.get_state()[1] - rng_2.get_state()[1]), 0) # Check whether the same int seeds give the same random states _, rng_1 = get_rng(1) _, rng_2 = get_rng(1) self.assertEqual(sum(rng_1.get_state()[1] - rng_2.get_state()[1]), 0) # Check all execution paths self.assertRaisesRegex( TypeError, "Argument rng accepts only arguments of type None, int or np.random.RandomState, " "you provided <class 'str'>.", get_rng, rng='ABC', ) self.assertRaisesRegex( TypeError, "Argument run_id accepts only arguments of type None, int, you provided <class 'str'>.", get_rng, run_id='ABC') run_id, rng_1 = get_rng(rng=None, run_id=None, logger=smac.logger) self.assertIsInstance(run_id, int) self.assertIsInstance(rng_1, np.random.RandomState) self.assertEqual(smac.logger.debug.call_count, 3) run_id, rng_1 = get_rng(rng=None, run_id=1, logger=smac.logger) self.assertEqual(run_id, 1) self.assertIsInstance(rng_1, np.random.RandomState) run_id, rng_1 = get_rng(rng=1, run_id=None, logger=smac.logger) self.assertEqual(run_id, 1) self.assertIsInstance(rng_1, np.random.RandomState) run_id, rng_1 = get_rng(rng=1, run_id=1337, logger=smac.logger) self.assertEqual(run_id, 1337) self.assertIsInstance(rng_1, np.random.RandomState) rs = np.random.RandomState(1) run_id, rng_1 = get_rng(rng=rs, run_id=None, logger=smac.logger) self.assertIsInstance(run_id, int) self.assertIs(rng_1, rs) run_id, rng_1 = get_rng(rng=rs, run_id=2505, logger=smac.logger) self.assertEqual(run_id, 2505) self.assertIs(rng_1, rs)
def __init__(self, scenario: typing.Type[Scenario], n_iterations: int, val_set: str = 'train', incs_per_round: int = 1, n_optimizers: int = 1, rng: typing.Optional[typing.Union[np.random.RandomState, int]] = None, run_id: int = 1, tae: typing.Type[BaseRunner] = ExecuteTARunOld, tae_kwargs: typing.Union[dict, None] = None, **kwargs): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object n_iterations: int, number of Hydra iterations val_set: str Set to validate incumbent(s) on. [train, valX]. train => whole training set, valX => train_set * 100/X where X in (0, 100) incs_per_round: int Number of incumbents to keep per round n_optimizers: int Number of optimizers to run in parallel per round rng: int/np.random.RandomState The randomState/seed to pass to each smac run run_id: int run_id for this hydra run tae: BaseRunner Target Algorithm Runner (supports old and aclib format as well as AbstractTAFunc) tae_kwargs: Optional[dict] arguments passed to constructor of '~tae' """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) self.n_iterations = n_iterations self.scenario = scenario self.run_id, self.rng = get_rng(rng, run_id, self.logger) self.kwargs = kwargs self.output_dir = None self.top_dir = None self.solver = None self.portfolio = None self.rh = RunHistory() self._tae = tae self._tae_kwargs = tae_kwargs if incs_per_round <= 0: self.logger.warning('Invalid value in %s: %d. Setting to 1', 'incs_per_round', incs_per_round) self.incs_per_round = max(incs_per_round, 1) if n_optimizers <= 0: self.logger.warning('Invalid value in %s: %d. Setting to 1', 'n_optimizers', n_optimizers) self.n_optimizers = max(n_optimizers, 1) self.val_set = self._get_validation_set(val_set) self.cost_per_inst = {} self.optimizer = None self.portfolio_cost = None
def __init__( self, scenario: Scenario, tae_runner: Optional[Union[Type[BaseRunner], Callable]] = None, tae_runner_kwargs: Optional[Dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[Dict] = None, intensifier: Optional[Type[AbstractRacer]] = None, intensifier_kwargs: Optional[Dict] = None, acquisition_function: Optional[ Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[Dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[ Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[Dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[Dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[Dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[Dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[Type[SMBO]] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[ Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[Dict] = None, dask_client: Optional[dask.distributed.Client] = None, n_jobs: Optional[int] = 1, ): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.base.BaseRunner or callable Callable or implementation of :class:`~smac.tae.base.BaseRunner`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. tae_runner_kwargs: Optional[Dict] arguments passed to constructor of '~tae_runner' runhistory : RunHistory runhistory to store all algorithm runs runhistory_kwargs : Optional[Dict] arguments passed to constructor of runhistory. We strongly advise against changing the aggregation function, since it will break some code assumptions intensifier : AbstractRacer intensification object or class to issue a racing to decide the current incumbent. Default: class `Intensifier` intensifier_kwargs: Optional[Dict] arguments passed to the constructor of '~intensifier' acquisition_function : `~smac.optimizer.acquisition.AbstractAcquisitionFunction` Class or object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` or :class:`~smac.optimizer.acquisition.LogEI` if not set. `~acquisition_function_kwargs` is passed to the class constructor. acquisition_function_kwargs : Optional[Dict] dictionary to pass specific arguments to ~acquisition_function integrate_acquisition_function : bool, default=False Whether to integrate the acquisition function. Works only with models which can sample their hyperparameters (i.e. GaussianProcessMCMC). acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set. acquisition_function_optimizer_kwargs: Optional[dict] Arguments passed to constructor of `~acquisition_function_optimizer` model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. model_kwargs : Optional[dict] Arguments passed to constructor of `~model` runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. runhistory2epm_kwargs: Optional[dict] Arguments passed to the constructor of `~runhistory2epm` initial_design : InitialDesign initial sampling design initial_design_kwargs: Optional[dict] arguments passed to constructor of `~initial_design` initial_configurations : List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. random_configuration_chooser_kwargs : Optional[dict] arguments of constructor for `~random_configuration_chooser` dask_client : dask.distributed.Client User-created dask client, can be used to start a dask cluster and then attach SMAC to it. n_jobs : int, optional Number of jobs. If > 1 or -1, this creates a dask client if ``dask_client`` is ``None``. Will be ignored if ``dask_client`` is not ``None``. If ``None``, this value will be set to 1, if ``-1``, this will be set to the number of cpu cores. """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) self.output_dir = create_output_directory(scenario, run_id) elif scenario.output_dir is not None: # type: ignore[attr-defined] # noqa F821 run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = cast(str, scenario.output_dir_for_this_run ) # type: ignore[attr-defined] # noqa F821 rng = cast(np.random.RandomState, rng) if (scenario.deterministic is True # type: ignore[attr-defined] # noqa F821 and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality' # type: ignore[attr-defined] # noqa F821 ): self.logger.info( 'Optimizing a deterministic scenario for quality without a tuner timeout - will make ' 'SMAC deterministic and only evaluate one configuration per iteration!' ) scenario.intensification_percentage = 1e-10 # type: ignore[attr-defined] # noqa F821 scenario.min_chall = 1 # type: ignore[attr-defined] # noqa F821 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 self.logger.warning( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # type: ignore[attr-defined] # noqa F821 # initialize empty runhistory runhistory_def_kwargs = {} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs) elif inspect.isclass(runhistory): runhistory = runhistory( **runhistory_def_kwargs) # type: ignore[operator] # noqa F821 elif isinstance(runhistory, RunHistory): pass else: raise ValueError( 'runhistory has to be a class or an object of RunHistory') rand_conf_chooser_kwargs = {'rng': rng} if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update( random_configuration_chooser_kwargs) if random_configuration_chooser is None: if 'prob' not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs[ 'prob'] = scenario.rand_prob # type: ignore[attr-defined] # noqa F821 random_configuration_chooser_instance = ( ChooserProb(**rand_conf_chooser_kwargs ) # type: ignore[arg-type] # noqa F821 ) # type: RandomConfigurationChooser elif inspect.isclass(random_configuration_chooser): random_configuration_chooser_instance = random_configuration_chooser( ** rand_conf_chooser_kwargs) # type: ignore[arg-type] # noqa F821 elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError( "random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed( rng.randint(MAXINT)) # type: ignore[attr-defined] # noqa F821 # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types( scenario.cs, scenario.feature_array) # type: ignore[attr-defined] # noqa F821 model_def_kwargs = { 'types': types, 'bounds': bounds, 'instance_features': scenario.feature_array, 'seed': rng.randint(MAXINT), 'pca_components': scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { 'log_y': scenario.transform_y in ["LOG", "LOGS"], # type: ignore[attr-defined] # noqa F821 'num_trees': scenario. rf_num_trees, # type: ignore[attr-defined] # noqa F821 'do_bootstrapping': scenario. rf_do_bootstrapping, # type: ignore[attr-defined] # noqa F821 'ratio_features': scenario. rf_ratio_features, # type: ignore[attr-defined] # noqa F821 'min_samples_split': scenario. rf_min_samples_split, # type: ignore[attr-defined] # noqa F821 'min_samples_leaf': scenario. rf_min_samples_leaf, # type: ignore[attr-defined] # noqa F821 'max_depth': scenario. rf_max_depth, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs[ 'configspace'] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = ( RandomForestWithInstances( **model_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractEPM elif inspect.isclass(model): model_def_kwargs[ 'configspace'] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = model( **model_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError("Model not recognized: %s" % (type(model))) # initial acquisition function acq_def_kwargs = {'model': model_instance} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) if acquisition_function is None: if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 acquisition_function_instance = ( LogEI(** acq_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractAcquisitionFunction else: acquisition_function_instance = EI( **acq_def_kwargs) # type: ignore[arg-type] # noqa F821 elif inspect.isclass(acquisition_function): acquisition_function_instance = acquisition_function( **acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function)) if integrate_acquisition_function: acquisition_function_instance = IntegratedAcquisitionFunction( acquisition_function=acquisition_function_instance, **acq_def_kwargs) # initialize optimizer on acquisition function acq_func_opt_kwargs = { 'acquisition_function': acquisition_function_instance, 'config_space': scenario.cs, # type: ignore[attr-defined] # noqa F821 'rng': rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { 'max_steps': scenario. sls_max_steps, # type: ignore[attr-defined] # noqa F821 'n_steps_plateau_walk': scenario. sls_n_steps_plateau_walk, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer_instance = ( LocalAndSortedRandomSearch( ** acq_func_opt_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AcquisitionFunctionMaximizer elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer_instance = acquisition_function_optimizer( **acq_func_opt_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { 'stats': self.stats, 'run_obj': scenario.run_obj, 'par_factor': scenario.par_factor, # type: ignore[attr-defined] # noqa F821 'cost_for_crash': scenario.cost_for_crash, # type: ignore[attr-defined] # noqa F821 'abort_on_first_run_crash': scenario. abort_on_first_run_crash, # type: ignore[attr-defined] # noqa F821 } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if 'ta' not in tae_def_kwargs: tae_def_kwargs[ 'ta'] = scenario.ta # type: ignore[attr-defined] # noqa F821 if tae_runner is None: tae_def_kwargs[ 'ta'] = scenario.ta # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ( ExecuteTARunOld( **tae_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: BaseRunner elif inspect.isclass(tae_runner): tae_runner_instance = cast(BaseRunner, tae_runner( **tae_def_kwargs)) # type: ignore[arg-type] # noqa F821 elif callable(tae_runner): tae_def_kwargs['ta'] = tae_runner tae_def_kwargs[ 'use_pynisher'] = scenario.limit_resources # type: ignore[attr-defined] # noqa F821 tae_def_kwargs[ 'memory_limit'] = scenario.memory_limit # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ExecuteTAFuncDict( **tae_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "BaseRunner. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # In case of a parallel run, wrap the single worker in a parallel # runner if n_jobs is None or n_jobs == 1: _n_jobs = 1 elif n_jobs == -1: _n_jobs = joblib.cpu_count() elif n_jobs > 0: _n_jobs = n_jobs else: raise ValueError( 'Number of tasks must be positive, None or -1, but is %s' % str(n_jobs)) if _n_jobs > 1 or dask_client is not None: tae_runner_instance = DaskParallelRunner( tae_runner_instance, n_workers=_n_jobs, output_directory=self.output_dir, dask_client=dask_client, ) # Check that overall objective and tae objective are the same # TODO: remove these two ignores once the scenario object knows all its attributes! if tae_runner_instance.run_obj != scenario.run_obj: # type: ignore[union-attr] # noqa F821 raise ValueError( "Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner_instance.run_obj, scenario.run_obj)) # type: ignore[union-attr] # noqa F821 if intensifier is None: intensifier = Intensifier if isinstance(intensifier, AbstractRacer): intensifier_instance = intensifier elif inspect.isclass(intensifier): # initialize intensification intensifier_def_kwargs = { 'stats': self.stats, 'traj_logger': traj_logger, 'rng': rng, 'instances': scenario.train_insts, # type: ignore[attr-defined] # noqa F821 'cutoff': scenario.cutoff, # type: ignore[attr-defined] # noqa F821 'deterministic': scenario. deterministic, # type: ignore[attr-defined] # noqa F821 'run_obj_time': scenario.run_obj == "runtime", # type: ignore[attr-defined] # noqa F821 'instance_specifics': scenario. instance_specific, # type: ignore[attr-defined] # noqa F821 'adaptive_capping_slackfactor': scenario. intens_adaptive_capping_slackfactor, # type: ignore[attr-defined] # noqa F821 'min_chall': scenario. intens_min_chall # type: ignore[attr-defined] # noqa F821 } if issubclass(intensifier, Intensifier): intensifier_def_kwargs[ 'always_race_against'] = scenario.cs.get_default_configuration( ) # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'use_ta_time_bound'] = scenario.use_ta_time # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'minR'] = scenario.minR # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'maxR'] = scenario.maxR # type: ignore[attr-defined] # noqa F821 if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) intensifier_instance = intensifier( **intensifier_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument intensifier must be None or an object implementing the AbstractRacer, but is '%s'" % type(intensifier)) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) init_design_def_kwargs = { 'cs': scenario.cs, # type: ignore[attr-defined] # noqa F821 'traj_logger': traj_logger, 'rng': rng, 'ta_run_limit': scenario.ta_run_limit, # type: ignore[attr-defined] # noqa F821 'configs': initial_configurations, 'n_configs_x_params': 0, 'max_config_fracs': 0.0 } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design_instance = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design_instance = DefaultConfiguration( **init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design_instance = RandomConfigurations( **init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": # type: ignore[attr-defined] # noqa F821 initial_design_instance = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = FactorialInitialDesign( **init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent ) # type: ignore[attr-defined] # noqa F821 elif inspect.isclass(initial_design): initial_design_instance = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design)) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 cutoff = np.log(np.nanmin([ np.inf, np.float_(scenario.cutoff) ])) # type: ignore[attr-defined] # noqa F821 threshold = cutoff + np.log( scenario.par_factor) # type: ignore[attr-defined] # noqa F821 else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff) ]) # type: ignore[attr-defined] # noqa F821 threshold = cutoff * scenario.par_factor # type: ignore[attr-defined] # noqa F821 num_params = len(scenario.cs.get_hyperparameters() ) # type: ignore[attr-defined] # noqa F821 imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model_instance, change_threshold=0.01, max_iter=2) r2e_def_kwargs = { 'scenario': scenario, 'num_params': num_params, 'success_states': [ StatusType.SUCCESS, ], 'impute_censored_data': True, 'impute_state': [ StatusType.CAPPED, ], 'imputor': imputor, 'scale_perc': 5 } if scenario.run_obj == 'quality': r2e_def_kwargs.update({ 'success_states': [StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT], 'impute_censored_data': False, 'impute_state': None, }) if isinstance( intensifier_instance, (SuccessiveHalving, Hyperband)) and scenario.run_obj == "quality": r2e_def_kwargs.update({ 'success_states': [ StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT, StatusType.DONOTADVANCE, ], 'consider_for_higher_budgets_state': [ StatusType.DONOTADVANCE, StatusType.TIMEOUT, StatusType.CRASHED, StatusType.MEMOUT, ], }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == 'runtime': rh2epm = ( RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractRunHistory2EPM elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4Cost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "LOGS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogScaledCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "INVS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4InvScaledCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) elif inspect.isclass(runhistory2epm): rh2epm = runhistory2epm( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm)) smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design_instance, 'runhistory': runhistory, 'runhistory2epm': rh2epm, 'intensifier': intensifier_instance, 'num_run': run_id, 'model': model_instance, 'acq_optimizer': acquisition_function_optimizer_instance, 'acquisition_func': acquisition_function_instance, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser_instance, 'tae_runner': tae_runner_instance, } # type: Dict[str, Any] if smbo_class is None: self.solver = SMBO(** smbo_args) # type: ignore[arg-type] # noqa F821 else: self.solver = smbo_class( **smbo_args) # type: ignore[arg-type] # noqa F821