def test_additional_info_crash_msg(self): """ We want to make sure we catch errors as additional info, and in particular when doing multiprocessing runs, we want to make sure we capture dask exceptions """ def target_nonpickable(x, seed, instance): return x**2, {'key': seed, 'instance': instance} runner = ExecuteTAFuncDict(ta=target_nonpickable, stats=self.stats, run_obj='quality') runner = DaskParallelRunner(runner, n_workers=2) run_info = RunInfo(config=2, instance='test', instance_specific="0", seed=0, cutoff=None, capped=False, budget=0.0) runner.submit_run(run_info) runner.wait() run_info, result = runner.get_finished_runs()[0] # Make sure the traceback message is included self.assertIn('traceback', result.additional_info) self.assertIn( # We expect the problem to occur in the run wrapper # So traceback should show this! 'target_nonpickable', result.additional_info['traceback']) # Make sure the error message is included self.assertIn('error', result.additional_info) self.assertIn( 'Can\'t pickle local object', result.additional_info['error'])
def test_file_output(self): tmp_dir = tempfile.mkdtemp() single_worker_mock = unittest.mock.Mock() parallel_runner = DaskParallelRunner( # noqa F841 single_worker=single_worker_mock, n_workers=1, output_directory=tmp_dir ) self.assertTrue(os.path.exists(os.path.join(tmp_dir, '.dask_scheduler_file')))
def test_do_not_close_external_client(self): tmp_dir = tempfile.mkdtemp() single_worker_mock = unittest.mock.Mock() client = Client() parallel_runner = DaskParallelRunner( single_worker=single_worker_mock, dask_client=client, n_workers=1, output_directory=tmp_dir ) # noqa F841 del parallel_runner self.assertFalse(os.path.exists(os.path.join(tmp_dir, '.dask_scheduler_file'))) self.assertEqual(client.status, 'running') parallel_runner = DaskParallelRunner( single_worker=single_worker_mock, dask_client=client, n_workers=1, output_directory=tmp_dir ) # noqa F841 del parallel_runner self.assertEqual(client.status, 'running') client.shutdown()
def test_num_workers(self): """Make sure we can properly return the number of workers""" # We use the funcdict as a mechanism to test Runner runner = ExecuteTAFuncDict(ta=target_delayed, stats=self.stats, run_obj='quality') runner = DaskParallelRunner(runner, n_workers=2) self.assertEqual(runner.num_workers(), 2) # Reduce the number of workers # have to give time for the worker to be killed runner.client.cluster.scale(1) time.sleep(2) self.assertEqual(runner.num_workers(), 1)
def __init__( self, scenario: Scenario, tae_runner: Optional[Union[Type[BaseRunner], Callable]] = None, tae_runner_kwargs: Optional[Dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[Dict] = None, intensifier: Optional[Type[AbstractRacer]] = None, intensifier_kwargs: Optional[Dict] = None, acquisition_function: Optional[ Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[Dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[ Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[Dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[Dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[Dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[Dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[Type[SMBO]] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[ Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[Dict] = None, dask_client: Optional[dask.distributed.Client] = None, n_jobs: Optional[int] = 1, ): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.base.BaseRunner or callable Callable or implementation of :class:`~smac.tae.base.BaseRunner`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. tae_runner_kwargs: Optional[Dict] arguments passed to constructor of '~tae_runner' runhistory : RunHistory runhistory to store all algorithm runs runhistory_kwargs : Optional[Dict] arguments passed to constructor of runhistory. We strongly advise against changing the aggregation function, since it will break some code assumptions intensifier : Intensifier intensification object to issue a racing to decide the current incumbent intensifier_kwargs: Optional[Dict] arguments passed to the constructor of '~intensifier' acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Class or object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` or :class:`~smac.optimizer.acquisition.LogEI` if not set. `~acquisition_function_kwargs` is passed to the class constructor. acquisition_function_kwargs : Optional[Dict] dictionary to pass specific arguments to ~acquisition_function integrate_acquisition_function : bool, default=False Whether to integrate the acquisition function. Works only with models which can sample their hyperparameters (i.e. GaussianProcessMCMC). acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.LocalAndSortedRandomSearch` if not set. acquisition_function_optimizer_kwargs: Optional[Dict] Arguments passed to constructor of '~acquisition_function_optimizer' model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. model_kwargs : Optional[Dict] Arguments passed to constructor of '~model' runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. runhistory2epm_kwargs: Optional[Dict] Arguments passed to the constructor of '~runhistory2epm' initial_design : InitialDesign initial sampling design initial_design_kwargs: Optional[Dict] arguments passed to constructor of `~initial_design' initial_configurations : List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. random_configuration_chooser_kwargs : Optional[Dict] arguments of constructor for '~random_configuration_chooser' dask_client : dask.distributed.Client User-created dask client, can be used to start a dask cluster and then attach SMAC to it. n_jobs : int, optional Number of jobs. If > 1 or -1, this creates a dask client if ``dask_client`` is ``None``. Will be ignored if ``dask_client`` is not ``None``. If ``None``, this value will be set to 1, if ``-1``, this will be set to the number of cpu cores. """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) self.output_dir = create_output_directory(scenario, run_id) elif scenario.output_dir is not None: # type: ignore[attr-defined] # noqa F821 run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = cast(str, scenario.output_dir_for_this_run ) # type: ignore[attr-defined] # noqa F821 rng = cast(np.random.RandomState, rng) if (scenario.deterministic is True # type: ignore[attr-defined] # noqa F821 and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality' # type: ignore[attr-defined] # noqa F821 ): self.logger.info( 'Optimizing a deterministic scenario for quality without a tuner timeout - will make ' 'SMAC deterministic and only evaluate one configuration per iteration!' ) scenario.intensification_percentage = 1e-10 # type: ignore[attr-defined] # noqa F821 scenario.min_chall = 1 # type: ignore[attr-defined] # noqa F821 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 self.logger.warning( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # type: ignore[attr-defined] # noqa F821 # initialize empty runhistory runhistory_def_kwargs = {} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs) elif inspect.isclass(runhistory): runhistory = runhistory( **runhistory_def_kwargs) # type: ignore[operator] # noqa F821 elif isinstance(runhistory, RunHistory): pass else: raise ValueError( 'runhistory has to be a class or an object of RunHistory') rand_conf_chooser_kwargs = {'rng': rng} if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update( random_configuration_chooser_kwargs) if random_configuration_chooser is None: if 'prob' not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs[ 'prob'] = scenario.rand_prob # type: ignore[attr-defined] # noqa F821 random_configuration_chooser_instance = ( ChooserProb(**rand_conf_chooser_kwargs ) # type: ignore[arg-type] # noqa F821 ) # type: RandomConfigurationChooser elif inspect.isclass(random_configuration_chooser): random_configuration_chooser_instance = random_configuration_chooser( ** rand_conf_chooser_kwargs) # type: ignore[arg-type] # noqa F821 elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError( "random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed( rng.randint(MAXINT)) # type: ignore[attr-defined] # noqa F821 # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types( scenario.cs, scenario.feature_array) # type: ignore[attr-defined] # noqa F821 model_def_kwargs = { 'types': types, 'bounds': bounds, 'instance_features': scenario.feature_array, 'seed': rng.randint(MAXINT), 'pca_components': scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { 'log_y': scenario.transform_y in ["LOG", "LOGS"], # type: ignore[attr-defined] # noqa F821 'num_trees': scenario. rf_num_trees, # type: ignore[attr-defined] # noqa F821 'do_bootstrapping': scenario. rf_do_bootstrapping, # type: ignore[attr-defined] # noqa F821 'ratio_features': scenario. rf_ratio_features, # type: ignore[attr-defined] # noqa F821 'min_samples_split': scenario. rf_min_samples_split, # type: ignore[attr-defined] # noqa F821 'min_samples_leaf': scenario. rf_min_samples_leaf, # type: ignore[attr-defined] # noqa F821 'max_depth': scenario. rf_max_depth, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs[ 'configspace'] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = ( RandomForestWithInstances( **model_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractEPM elif inspect.isclass(model): model_def_kwargs[ 'configspace'] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = model( **model_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError("Model not recognized: %s" % (type(model))) # initial acquisition function acq_def_kwargs = {'model': model_instance} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) if acquisition_function is None: if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 acquisition_function_instance = ( LogEI(** acq_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractAcquisitionFunction else: acquisition_function_instance = EI( **acq_def_kwargs) # type: ignore[arg-type] # noqa F821 elif inspect.isclass(acquisition_function): acquisition_function_instance = acquisition_function( **acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function)) if integrate_acquisition_function: acquisition_function_instance = IntegratedAcquisitionFunction( acquisition_function=acquisition_function_instance, **acq_def_kwargs) # initialize optimizer on acquisition function acq_func_opt_kwargs = { 'acquisition_function': acquisition_function_instance, 'config_space': scenario.cs, # type: ignore[attr-defined] # noqa F821 'rng': rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { 'max_steps': scenario. sls_max_steps, # type: ignore[attr-defined] # noqa F821 'n_steps_plateau_walk': scenario. sls_n_steps_plateau_walk, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer_instance = ( LocalAndSortedRandomSearch( ** acq_func_opt_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AcquisitionFunctionMaximizer elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer_instance = acquisition_function_optimizer( **acq_func_opt_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { 'stats': self.stats, 'run_obj': scenario.run_obj, 'par_factor': scenario.par_factor, # type: ignore[attr-defined] # noqa F821 'cost_for_crash': scenario.cost_for_crash, # type: ignore[attr-defined] # noqa F821 'abort_on_first_run_crash': scenario. abort_on_first_run_crash # type: ignore[attr-defined] # noqa F821 } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if 'ta' not in tae_def_kwargs: tae_def_kwargs[ 'ta'] = scenario.ta # type: ignore[attr-defined] # noqa F821 if tae_runner is None: tae_def_kwargs[ 'ta'] = scenario.ta # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ( ExecuteTARunOld( **tae_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: BaseRunner elif inspect.isclass(tae_runner): tae_runner_instance = cast(BaseRunner, tae_runner( **tae_def_kwargs)) # type: ignore[arg-type] # noqa F821 elif callable(tae_runner): tae_def_kwargs['ta'] = tae_runner tae_def_kwargs[ 'use_pynisher'] = scenario.limit_resources # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ExecuteTAFuncDict( **tae_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "BaseRunner. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # In case of a parallel run, wrap the single worker in a parallel # runner if n_jobs is None or n_jobs == 1: _n_jobs = 1 elif n_jobs == -1: _n_jobs = joblib.cpu_count() elif n_jobs > 0: _n_jobs = n_jobs else: raise ValueError( 'Number of tasks must be positive, None or -1, but is %s' % str(n_jobs)) if _n_jobs > 1 or dask_client is not None: tae_runner_instance = DaskParallelRunner( tae_runner_instance, n_workers=_n_jobs, output_directory=self.output_dir, dask_client=dask_client, ) # Check that overall objective and tae objective are the same # TODO: remove these two ignores once the scenario object knows all its attributes! if tae_runner_instance.run_obj != scenario.run_obj: # type: ignore[union-attr] # noqa F821 raise ValueError( "Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner_instance.run_obj, scenario.run_obj)) # type: ignore[union-attr] # noqa F821 # initialize intensification intensifier_def_kwargs = { 'stats': self.stats, 'traj_logger': traj_logger, 'rng': rng, 'instances': scenario.train_insts, # type: ignore[attr-defined] # noqa F821 'cutoff': scenario.cutoff, # type: ignore[attr-defined] # noqa F821 'deterministic': scenario.deterministic, # type: ignore[attr-defined] # noqa F821 'run_obj_time': scenario.run_obj == "runtime", # type: ignore[attr-defined] # noqa F821 'instance_specifics': scenario. instance_specific, # type: ignore[attr-defined] # noqa F821 'adaptive_capping_slackfactor': scenario. intens_adaptive_capping_slackfactor, # type: ignore[attr-defined] # noqa F821 'min_chall': scenario.intens_min_chall # type: ignore[attr-defined] # noqa F821 } if isinstance(intensifier, Intensifier) \ or (intensifier is not None and inspect.isclass(intensifier) and issubclass(intensifier, Intensifier)): intensifier_def_kwargs[ 'always_race_against'] = scenario.cs.get_default_configuration( ) # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'use_ta_time_bound'] = scenario.use_ta_time # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'minR'] = scenario.minR # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'maxR'] = scenario.maxR # type: ignore[attr-defined] # noqa F821 if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) if intensifier is None: intensifier_instance = ( Intensifier(**intensifier_def_kwargs ) # type: ignore[arg-type] # noqa F821 ) # type: AbstractRacer elif inspect.isclass(intensifier): intensifier_instance = intensifier( **intensifier_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument intensifier must be None or an object implementing the AbstractRacer, but is '%s'" % type(intensifier)) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) init_design_def_kwargs = { 'cs': scenario.cs, # type: ignore[attr-defined] # noqa F821 'traj_logger': traj_logger, 'rng': rng, 'ta_run_limit': scenario.ta_run_limit, # type: ignore[attr-defined] # noqa F821 'configs': initial_configurations, 'n_configs_x_params': 0, 'max_config_fracs': 0.0 } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design_instance = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design_instance = DefaultConfiguration( **init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design_instance = RandomConfigurations( **init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": # type: ignore[attr-defined] # noqa F821 initial_design_instance = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = FactorialInitialDesign( **init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent ) # type: ignore[attr-defined] # noqa F821 elif inspect.isclass(initial_design): initial_design_instance = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design)) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 cutoff = np.log(np.nanmin([ np.inf, np.float_(scenario.cutoff) ])) # type: ignore[attr-defined] # noqa F821 threshold = cutoff + np.log( scenario.par_factor) # type: ignore[attr-defined] # noqa F821 else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff) ]) # type: ignore[attr-defined] # noqa F821 threshold = cutoff * scenario.par_factor # type: ignore[attr-defined] # noqa F821 num_params = len(scenario.cs.get_hyperparameters() ) # type: ignore[attr-defined] # noqa F821 imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model_instance, change_threshold=0.01, max_iter=2) r2e_def_kwargs = { 'scenario': scenario, 'num_params': num_params, 'success_states': [ StatusType.SUCCESS, ], 'impute_censored_data': True, 'impute_state': [ StatusType.CAPPED, ], 'imputor': imputor, 'scale_perc': 5 } if scenario.run_obj == 'quality': r2e_def_kwargs.update({ 'success_states': [StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT], 'impute_censored_data': False, 'impute_state': None, }) if isinstance( intensifier_instance, (SuccessiveHalving, Hyperband)) and scenario.run_obj == "quality": r2e_def_kwargs.update({ 'success_states': [ StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT, StatusType.DONOTADVANCE, ], 'consider_for_higher_budgets_state': [ StatusType.DONOTADVANCE, StatusType.TIMEOUT, StatusType.CRASHED, StatusType.MEMOUT, ], }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == 'runtime': rh2epm = ( RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractRunHistory2EPM elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4Cost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "LOGS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogScaledCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "INVS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4InvScaledCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) elif inspect.isclass(runhistory2epm): rh2epm = runhistory2epm( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm)) smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design_instance, 'runhistory': runhistory, 'runhistory2epm': rh2epm, 'intensifier': intensifier_instance, 'num_run': run_id, 'model': model_instance, 'acq_optimizer': acquisition_function_optimizer_instance, 'acquisition_func': acquisition_function_instance, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser_instance, 'tae_runner': tae_runner_instance, } # type: Dict[str, Any] if smbo_class is None: self.solver = SMBO(** smbo_args) # type: ignore[arg-type] # noqa F821 else: self.solver = smbo_class( **smbo_args) # type: ignore[arg-type] # noqa F821