def restore_state_after_output_dir(self, scen, stats, traj_list_aclib, traj_list_old): """Finish processing files for state-restoration. Trajectory is read in, but needs to be written to new output-folder. Therefore, the output-dir is created. This needs to be considered in the SMAC-facade.""" # write trajectory-list traj_path_aclib = os.path.join(scen.output_dir, "traj_aclib2.json") traj_path_old = os.path.join(scen.output_dir, "traj_old.csv") with open(traj_path_aclib, 'w') as traj_fn: traj_fn.writelines(traj_list_aclib) with open(traj_path_old, 'w') as traj_fn: traj_fn.writelines(traj_list_old) # read trajectory to retrieve incumbent # TODO replace this with simple traj_path_aclib? trajectory = TrajLogger.read_traj_aclib_format(fn=traj_path_aclib, cs=scen.cs) incumbent = trajectory[-1]["incumbent"] self.logger.debug("Restored incumbent %s from %s", incumbent, traj_path_aclib) return incumbent
def __init__( self, scenario: Scenario, tae_runner: Optional[Union[Type[ExecuteTARun], Callable]] = None, tae_runner_kwargs: Optional[dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[dict] = None, intensifier: Optional[Type[Intensifier]] = None, intensifier_kwargs: Optional[dict] = None, acquisition_function: Optional[ Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[ Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[SMBO] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[ Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[dict] = None, ): """ Constructor Parameters ---------- scenario : ~dsmac.scenario.scenario.Scenario Scenario object tae_runner : ~dsmac.tae.execute_ta_run.ExecuteTARun or callable Callable or implementation of :class:`~dsmac.tae.execute_ta_run.ExecuteTARun`. In case a callable is passed it will be wrapped by :class:`~dsmac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~dsmac.tae.execute_ta_run_old.ExecuteTARunOld`. tae_runner_kwargs: Optional[dict] arguments passed to constructor of '~tae_runner' runhistory : RunHistory runhistory to store all algorithm runs runhistory_kwargs : Optional[dict] arguments passed to constructor of runhistory. We strongly advise against changing the aggregation function, since it will break some code assumptions intensifier : Intensifier intensification object to issue a racing to decide the current incumbent intensifier_kwargs: Optional[dict] arguments passed to the constructor of '~intensifier' acquisition_function : ~dsmac.optimizer.acquisition.AbstractAcquisitionFunction Class or object that implements the :class:`~dsmac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~dsmac.optimizer.acquisition.EI` or :class:`~dsmac.optimizer.acquisition.LogEI` if not set. `~acquisition_function_kwargs` is passed to the class constructor. acquisition_function_kwargs : Optional[dict] dictionary to pass specific arguments to ~acquisition_function integrate_acquisition_function : bool, default=False Whether to integrate the acquisition function. Works only with models which can sample their hyperparameters (i.e. GaussianProcessMCMC). acquisition_function_optimizer : ~dsmac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~dsmac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`dsmac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set. acquisition_function_optimizer_kwargs: Optional[dict] Arguments passed to constructor of '~acquisition_function_optimizer' model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~dsmac.epm.rf_with_instances.RandomForestWithInstances` if not set. model_kwargs : Optional[dict] Arguments passed to constructor of '~model' runhistory2epm : ~dsmac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~dsmac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~dsmac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. runhistory2epm_kwargs: Optional[dict] Arguments passed to the constructor of '~runhistory2epm' initial_design : InitialDesign initial sampling design initial_design_kwargs: Optional[dict] arguments passed to constructor of `~initial_design' initial_configurations : List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~dsmac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~dsmac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. random_configuration_chooser_kwargs : Optional[dict] arguments of constructor for '~random_configuration_chooser' """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator # run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # run_id=datetime.now().strftime("%Y%m%d%H%M%S%f") run_id = uuid1() # self.output_dir = create_output_directory(scenario, run_id) # fixme run_id self.output_dir = scenario.output_dir # create_output_directory(scenario, run_id) # fixme run_id elif scenario.output_dir is not None: run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = scenario.output_dir_for_this_run if (scenario.deterministic is True and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality'): self.logger.info( 'Optimizing a deterministic scenario for quality without a tuner timeout - will make ' 'SMAC deterministic and only evaluate one configuration per iteration!' ) scenario.intensification_percentage = 1e-10 scenario.min_chall = 1 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario, file_system=scenario.file_system) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": self.logger.warning( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # initialize empty runhistory runhistory_def_kwargs = {'aggregate_func': aggregate_func} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs, file_system=scenario.file_system, db_type=scenario.db_type, db_args=scenario.db_args, db_kwargs=scenario.db_kwargs) elif inspect.isclass(runhistory): runhistory = runhistory(**runhistory_def_kwargs) else: if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func rand_conf_chooser_kwargs = {'rng': rng} if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update( random_configuration_chooser_kwargs) if random_configuration_chooser is None: if 'prob' not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs['prob'] = scenario.rand_prob random_configuration_chooser = ChooserProb( **rand_conf_chooser_kwargs) elif inspect.isclass(random_configuration_chooser): random_configuration_chooser = random_configuration_chooser( **rand_conf_chooser_kwargs) elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError( "random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats, file_system=scenario.file_system) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) model_def_kwargs = { 'types': types, 'bounds': bounds, 'instance_features': scenario.feature_array, 'seed': rng.randint(MAXINT), 'pca_components': scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { 'log_y': scenario.transform_y in ["LOG", "LOGS"], 'num_trees': scenario.rf_num_trees, 'do_bootstrapping': scenario.rf_do_bootstrapping, 'ratio_features': scenario.rf_ratio_features, 'min_samples_split': scenario.rf_min_samples_split, 'min_samples_leaf': scenario.rf_min_samples_leaf, 'max_depth': scenario.rf_max_depth, }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs['configspace'] = self.scenario.cs model = RandomForestWithInstances(**model_def_kwargs) elif inspect.isclass(model): model_def_kwargs['configspace'] = self.scenario.cs model = model(**model_def_kwargs) else: raise TypeError("Model not recognized: %s" % (type(model))) # initial acquisition function acq_def_kwargs = {'model': model} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) if acquisition_function is None: if scenario.transform_y in ["LOG", "LOGS"]: acquisition_function = LogEI(**acq_def_kwargs) else: acquisition_function = EI(**acq_def_kwargs) elif inspect.isclass(acquisition_function): acquisition_function = acquisition_function(**acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function)) if integrate_acquisition_function: acquisition_function = IntegratedAcquisitionFunction( acquisition_function=acquisition_function, **acq_def_kwargs) # initialize optimizer on acquisition function acq_func_opt_kwargs = { 'acquisition_function': acquisition_function, 'config_space': scenario.cs, 'rng': rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { 'max_steps': scenario.sls_max_steps, 'n_steps_plateau_walk': scenario.sls_n_steps_plateau_walk, }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer = InterleavedLocalAndRandomSearch( **acq_func_opt_kwargs) elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer = acquisition_function_optimizer( **acq_func_opt_kwargs) else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { 'stats': self.stats, 'run_obj': scenario.run_obj, 'runhistory': runhistory, 'par_factor': scenario.par_factor, 'cost_for_crash': scenario.cost_for_crash, 'abort_on_first_run_crash': scenario.abort_on_first_run_crash } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if 'ta' not in tae_def_kwargs: tae_def_kwargs['ta'] = scenario.ta if tae_runner is None: tae_def_kwargs['ta'] = scenario.ta tae_runner = ExecuteTARunOld(**tae_def_kwargs) elif inspect.isclass(tae_runner): tae_runner = tae_runner(**tae_def_kwargs) elif callable(tae_runner): tae_def_kwargs['ta'] = tae_runner tae_runner = ExecuteTAFuncDict(**tae_def_kwargs) else: raise TypeError( "Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # initialize intensification intensifier_def_kwargs = { 'tae_runner': tae_runner, 'stats': self.stats, 'traj_logger': traj_logger, 'rng': rng, 'instances': scenario.train_insts, 'cutoff': scenario.cutoff, 'deterministic': scenario.deterministic, 'run_obj_time': scenario.run_obj == "runtime", 'always_race_against': scenario.cs.get_default_configuration() if scenario.always_race_default else None, 'use_ta_time_bound': scenario.use_ta_time, 'instance_specifics': scenario.instance_specific, 'minR': scenario.minR, 'maxR': scenario.maxR, 'adaptive_capping_slackfactor': scenario.intens_adaptive_capping_slackfactor, 'min_chall': scenario.intens_min_chall, } if hasattr(scenario, 'filter_callback') and scenario.filter_callback is not None: print('update callback') intensifier_def_kwargs.update( {'filter_callback': scenario.filter_callback}) if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) if intensifier is None: intensifier = Intensifier(**intensifier_def_kwargs) elif inspect.isclass(intensifier): intensifier = intensifier(**intensifier_def_kwargs) else: raise TypeError( "Argument intensifier must be None or an object implementing the Intensifier, but is '%s'" % type(intensifier)) # initial design if initial_design is not None and initial_configurations is not None: initial_design.initial_configurations = initial_configurations initial_configurations = None init_design_def_kwargs = { 'tae_runner': tae_runner, 'scenario': scenario, 'stats': self.stats, 'traj_logger': traj_logger, 'runhistory': runhistory, 'rng': rng, 'configs': initial_configurations, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'n_configs_x_params': 0, 'max_config_fracs': 0.0, 'initial_configurations': initial_design.initial_configurations } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design = DefaultConfiguration(**init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design = RandomConfigurations(**init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": initial_design = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": initial_design = FactorialInitialDesign( **init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": initial_design = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) elif inspect.isclass(initial_design): initial_design = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design)) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in ["LOG", "LOGS"]: cutoff = np.log(np.nanmin([np.inf, np.float_(scenario.cutoff)])) threshold = cutoff + np.log(scenario.par_factor) else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff)]) threshold = cutoff * scenario.par_factor num_params = len(scenario.cs.get_hyperparameters()) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) r2e_def_kwargs = { 'scenario': scenario, 'num_params': num_params, 'success_states': [ StatusType.SUCCESS, ], 'impute_censored_data': True, 'impute_state': [ StatusType.CAPPED, ], 'imputor': imputor, 'scale_perc': 5 } if scenario.run_obj == 'quality': r2e_def_kwargs.update({ 'success_states': [StatusType.SUCCESS, StatusType.CRASHED], 'impute_censored_data': False, 'impute_state': None, }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == 'runtime': runhistory2epm = RunHistory2EPM4LogCost(**r2e_def_kwargs) elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": runhistory2epm = RunHistory2EPM4Cost(**r2e_def_kwargs) elif scenario.transform_y == "LOG": runhistory2epm = RunHistory2EPM4LogCost(**r2e_def_kwargs) elif scenario.transform_y == "LOGS": runhistory2epm = RunHistory2EPM4LogScaledCost( **r2e_def_kwargs) elif scenario.transform_y == "INVS": runhistory2epm = RunHistory2EPM4InvScaledCost( **r2e_def_kwargs) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) elif inspect.isclass(runhistory2epm): runhistory2epm = runhistory2epm(**r2e_def_kwargs) else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm)) smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design, 'runhistory': runhistory, 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'num_run': run_id, 'model': model, 'acq_optimizer': acquisition_function_optimizer, 'acquisition_func': acquisition_function, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser } if smbo_class is None: self.solver = SMBO(**smbo_args) else: self.solver = smbo_class(**smbo_args)
def validate(self, config_mode='inc', instance_mode='train+test', repetitions=1, use_epm=False, n_jobs=-1, backend='threading'): """Create validator-object and run validation, using scenario-information, runhistory from smbo and tae_runner from intensify Parameters ---------- config_mode: str or list<Configuration> string or directly a list of Configuration str from [def, inc, def+inc, wallclock_time, cpu_time, all] time evaluates at cpu- or wallclock-timesteps of: [max_time/2^0, max_time/2^1, max_time/2^3, ..., default] with max_time being the highest recorded time instance_mode: string what instances to use for validation, from [train, test, train+test] repetitions: int number of repetitions in nondeterministic algorithms (in deterministic will be fixed to 1) use_epm: bool whether to use an EPM instead of evaluating all runs with the TAE n_jobs: int number of parallel processes used by joblib Returns ------- runhistory: RunHistory runhistory containing all specified runs """ if isinstance(config_mode, str): traj_fn = os.path.join(self.scenario.output_dir_for_this_run, "traj_aclib2.json") trajectory = TrajLogger.read_traj_aclib_format(fn=traj_fn, cs=self.scenario.cs) else: trajectory = None if self.scenario.output_dir_for_this_run: new_rh_path = os.path.join(self.scenario.output_dir_for_this_run, "validated_runhistory.json") else: new_rh_path = None validator = Validator(self.scenario, trajectory, self.rng) if use_epm: new_rh = validator.validate_epm(config_mode=config_mode, instance_mode=instance_mode, repetitions=repetitions, runhistory=self.runhistory, output_fn=new_rh_path) else: new_rh = validator.validate(config_mode, instance_mode, repetitions, n_jobs, backend, self.runhistory, self.intensifier.tae_runner, output_fn=new_rh_path) return new_rh
def __init__( self, scenario: Scenario, # TODO: once we drop python3.4 add type hint # typing.Union[ExecuteTARun, callable] tae_runner=None, runhistory: RunHistory = None, intensifier: Intensifier = None, acquisition_function: AbstractAcquisitionFunction = None, model: AbstractEPM = None, runhistory2epm: AbstractRunHistory2EPM = None, initial_design: InitialDesign = None, initial_configurations: typing.List[Configuration] = None, stats: Stats = None, rng: np.random.RandomState = None, run_id: int = 1): """Constructor""" self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.runhistory = None self.trajectory = None # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario, file_system=scenario.file_system) self.output_dir = create_output_directory(scenario, run_id) scenario.write() # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func, file_system=scenario.file_system) # inject aggr_func if necessary if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func # initial random number generator num_run, rng = self._get_rng(rng=rng) # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats, file_system=scenario.file_system) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) if model is None: model = RandomForestWithInstances( configspace=scenario.cs, types=types, bounds=bounds, instance_features=scenario.feature_array, seed=rng.randint(MAXINT), pca_components=scenario.PCA_DIM, num_trees=scenario.rf_num_trees, do_bootstrapping=scenario.rf_do_bootstrapping, ratio_features=scenario.rf_ratio_features, min_samples_split=scenario.rf_min_samples_split, min_samples_leaf=scenario.rf_min_samples_leaf, max_depth=scenario.rf_max_depth, ) # initial acquisition function if acquisition_function is None: if scenario.run_obj == "runtime": acquisition_function = LogEI(model=model) else: acquisition_function = EI(model=model) # inject model if necessary if acquisition_function.model is None: acquisition_function.model = model # initialize optimizer on acquisition function local_search = LocalSearch( acquisition_function, scenario.cs, max_steps=scenario.sls_max_steps, n_steps_plateau_walk=scenario.sls_n_steps_plateau_walk) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld( ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict( ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Third case, if it is an ExecuteTaRun we can simply use the # instance. Otherwise, the next check raises an exception elif not isinstance(tae_runner, ExecuteTARun): raise TypeError("Argument 'tae_runner' is %s, but must be " "either a callable or an instance of " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # inject cost_for_crash if tae_runner.crash_cost != scenario.cost_for_crash: tae_runner.crash_cost = scenario.cost_for_crash # initialize intensification if intensifier is None: intensifier = Intensifier( tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", always_race_against=scenario.cs.get_default_configuration() if scenario.always_race_default else None, instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR, adaptive_capping_slackfactor=scenario. intens_adaptive_capping_slackfactor, min_chall=scenario.intens_min_chall, distributer=scenario.distributer) # inject deps if necessary if intensifier.tae_runner is None: intensifier.tae_runner = tae_runner if intensifier.stats is None: intensifier.stats = self.stats if intensifier.traj_logger is None: intensifier.traj_logger = traj_logger # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) if initial_configurations is not None: initial_design = InitialDesign(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, configs=initial_configurations, intensifier=intensifier, aggregate_func=aggregate_func) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": initial_design = DefaultConfiguration( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, intensifier=intensifier, aggregate_func=aggregate_func, max_config_fracs=0.0) elif scenario.initial_incumbent == "RANDOM": initial_design = RandomConfigurations( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, intensifier=intensifier, aggregate_func=aggregate_func, max_config_fracs=0.0) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) # inject deps if necessary if initial_design.tae_runner is None: initial_design.tae_runner = tae_runner if initial_design.scenario is None: initial_design.scenario = scenario if initial_design.stats is None: initial_design.stats = self.stats if initial_design.traj_logger is None: initial_design.traj_logger = traj_logger # initial conversion of runhistory into EPM data if runhistory2epm is None: num_params = len(scenario.cs.get_hyperparameters()) if scenario.run_obj == "runtime": # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory cutoff = np.log(scenario.cutoff) threshold = np.log(scenario.cutoff * scenario.par_factor) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=True, impute_state=[ StatusType.CAPPED, ], imputor=imputor) elif scenario.run_obj == 'quality': runhistory2epm = RunHistory2EPM4Cost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=False, impute_state=None) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) # inject scenario if necessary: if runhistory2epm.scenario is None: runhistory2epm.scenario = scenario self.solver = EPILS_Solver(scenario=scenario, stats=self.stats, initial_design=initial_design, runhistory=runhistory, runhistory2epm=runhistory2epm, intensifier=intensifier, aggregate_func=aggregate_func, num_run=num_run, model=model, acq_optimizer=local_search, acquisition_func=acquisition_function, rng=rng)
def main_cli(self, commandline_arguments: typing.List[str]=None): """Main function of SMAC for CLI interface""" self.logger.info("SMAC call: %s" % (" ".join(sys.argv))) cmd_reader = CMDReader() kwargs = {} if commandline_arguments: kwargs['commandline_arguments'] = commandline_arguments main_args_, smac_args_, scen_args_ = cmd_reader.read_cmd(**kwargs) root_logger = logging.getLogger() root_logger.setLevel(main_args_.verbose_level) logger_handler = logging.StreamHandler( stream=sys.stdout) if root_logger.level >= logging.INFO: formatter = logging.Formatter( "%(levelname)s:\t%(message)s") else: formatter = logging.Formatter( "%(asctime)s:%(levelname)s:%(name)s:%(message)s", "%Y-%m-%d %H:%M:%S") logger_handler.setFormatter(formatter) root_logger.addHandler(logger_handler) # remove default handler if len(root_logger.handlers) > 1: root_logger.removeHandler(root_logger.handlers[0]) # Create defaults rh = None initial_configs = None stats = None incumbent = None # Create scenario-object scenario = {} scenario.update(vars(smac_args_)) scenario.update(vars(scen_args_)) scen = Scenario(scenario=scenario) # Restore state if main_args_.restore_state: root_logger.debug("Restoring state from %s...", main_args_.restore_state) rh, stats, traj_list_aclib, traj_list_old = self.restore_state(scen, main_args_) scen.output_dir_for_this_run = create_output_directory( scen, main_args_.seed, root_logger, ) scen.write() incumbent = self.restore_state_after_output_dir(scen, stats, traj_list_aclib, traj_list_old) if main_args_.warmstart_runhistory: aggregate_func = average_cost rh = RunHistory(aggregate_func=aggregate_func) scen, rh = merge_foreign_data_from_file( scenario=scen, runhistory=rh, in_scenario_fn_list=main_args_.warmstart_scenario, in_runhistory_fn_list=main_args_.warmstart_runhistory, cs=scen.cs, aggregate_func=aggregate_func) if main_args_.warmstart_incumbent: initial_configs = [scen.cs.get_default_configuration()] for traj_fn in main_args_.warmstart_incumbent: trajectory = TrajLogger.read_traj_aclib_format( fn=traj_fn, cs=scen.cs) initial_configs.append(trajectory[-1]["incumbent"]) if main_args_.mode == "SMAC4AC": optimizer = SMAC4AC( scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=main_args_.seed) elif main_args_.mode == "SMAC4HPO": optimizer = SMAC4HPO( scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=main_args_.seed) elif main_args_.mode == "SMAC4BO": optimizer = SMAC4BO( scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=main_args_.seed) elif main_args_.mode == "ROAR": optimizer = ROAR( scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, run_id=main_args_.seed) elif main_args_.mode == "EPILS": optimizer = EPILS( scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, run_id=main_args_.seed) elif main_args_.mode == "Hydra": optimizer = Hydra( scenario=scen, rng=np.random.RandomState(main_args_.seed), runhistory=rh, initial_configurations=initial_configs, stats=stats, restore_incumbent=incumbent, run_id=main_args_.seed, random_configuration_chooser=main_args_.random_configuration_chooser, n_iterations=main_args_.hydra_iterations, val_set=main_args_.hydra_validation, incs_per_round=main_args_.hydra_incumbents_per_round, n_optimizers=main_args_.hydra_n_optimizers) elif main_args_.mode == "PSMAC": optimizer = PSMAC( scenario=scen, rng=np.random.RandomState(main_args_.seed), run_id=main_args_.seed, shared_model=smac_args_.shared_model, validate=main_args_.psmac_validate, n_optimizers=main_args_.hydra_n_optimizers, n_incs=main_args_.hydra_incumbents_per_round, ) try: optimizer.optimize() except (TAEAbortException, FirstRunCrashedException) as err: self.logger.error(err)