def test_single_default_config_design(self): self.stats.start_timing() tj = TrajLogger(output_dir=None, stats=self.stats) dc = DefaultConfiguration(cs=self.cs, traj_logger=tj, rng=np.random.RandomState(seed=12345), ta_run_limit=self.scenario.ta_run_limit) # should return only the default config configs = dc.select_configurations() self.assertEqual(len(configs), 1) self.assertEqual(configs[0]['x1'], 1)
def test_single_default_config_design(self): stats = Stats(scenario=self.scenario) stats.start_timing() self.ta.stats = stats tj = TrajLogger(output_dir=None, stats=stats) rh = RunHistory(aggregate_func=average_cost) dc = DefaultConfiguration(tae_runner=self.ta, scenario=self.scenario, stats=stats, traj_logger=tj, rng=np.random.RandomState(seed=12345)) inc = dc.run() self.assertTrue(stats.ta_runs==1) self.assertTrue(len(rh.data)==0)
def test_inject_dependencies(self): # initialize objects with missing dependencies ta = ExecuteTAFuncDict(lambda x: x**2) rh = RunHistory(aggregate_func=None) acqu_func = EI(model=None) intensifier = Intensifier(tae_runner=None, stats=None, traj_logger=None, rng=np.random.RandomState(), instances=None) init_design = DefaultConfiguration(tae_runner=None, scenario=None, stats=None, traj_logger=None, rng=np.random.RandomState()) rh2epm = RunHistory2EPM4Cost(scenario=self.scenario, num_params=0) rh2epm.scenario = None # assert missing dependencies self.assertIsNone(rh.aggregate_func) self.assertIsNone(acqu_func.model) self.assertIsNone(intensifier.tae_runner) self.assertIsNone(intensifier.stats) self.assertIsNone(intensifier.traj_logger) self.assertIsNone(init_design.tae_runner) self.assertIsNone(init_design.scenario) self.assertIsNone(init_design.stats) self.assertIsNone(init_design.traj_logger) self.assertIsNone(rh2epm.scenario) # initialize smac-object SMAC(scenario=self.scenario, tae_runner=ta, runhistory=rh, intensifier=intensifier, acquisition_function=acqu_func, runhistory2epm=rh2epm, initial_design=init_design) # assert that missing dependencies are injected self.assertIsNotNone(rh.aggregate_func, AbstractAcquisitionFunction) self.assertIsInstance(acqu_func.model, AbstractEPM) self.assertIsInstance(intensifier.tae_runner, ExecuteTARun) self.assertIsInstance(intensifier.stats, Stats) self.assertIsInstance(intensifier.traj_logger, TrajLogger) self.assertIsInstance(init_design.tae_runner, ExecuteTARun) self.assertIsInstance(init_design.scenario, Scenario) self.assertIsInstance(init_design.stats, Stats) self.assertIsInstance(init_design.traj_logger, TrajLogger) self.assertIsInstance(rh2epm.scenario, Scenario)
def __init__( self, scenario: Scenario, tae_runner: Optional[Union[Type[BaseRunner], Callable]] = None, tae_runner_kwargs: Optional[Dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[Dict] = None, intensifier: Optional[Type[AbstractRacer]] = None, intensifier_kwargs: Optional[Dict] = None, acquisition_function: Optional[ Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[Dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[ Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[Dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[Dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[Dict] = None, multi_objective_algorithm: Optional[ Type[AbstractMultiObjectiveAlgorithm]] = None, multi_objective_kwargs: Optional[Dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[Dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[Type[SMBO]] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[ Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[Dict] = None, dask_client: Optional[dask.distributed.Client] = None, n_jobs: Optional[int] = 1, ): self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) self.output_dir = create_output_directory(scenario, run_id) elif scenario.output_dir is not None: # type: ignore[attr-defined] # noqa F821 run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = cast(str, scenario.output_dir_for_this_run ) # type: ignore[attr-defined] # noqa F821 rng = cast(np.random.RandomState, rng) if (scenario.deterministic is True # type: ignore[attr-defined] # noqa F821 and getattr(scenario, "tuner_timeout", None) is None and scenario.run_obj == "quality" # type: ignore[attr-defined] # noqa F821 ): self.logger.info( "Optimizing a deterministic scenario for quality without a tuner timeout - will make " "SMAC deterministic and only evaluate one configuration per iteration!" ) scenario.intensification_percentage = 1e-10 # type: ignore[attr-defined] # noqa F821 scenario.min_chall = 1 # type: ignore[attr-defined] # noqa F821 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 self.logger.warning( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # type: ignore[attr-defined] # noqa F821 # initialize empty runhistory num_obj = len(scenario.multi_objectives ) # type: ignore[attr-defined] # noqa F821 runhistory_def_kwargs = {} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs) elif inspect.isclass(runhistory): runhistory = runhistory( **runhistory_def_kwargs) # type: ignore[operator] # noqa F821 elif isinstance(runhistory, RunHistory): pass else: raise ValueError( "runhistory has to be a class or an object of RunHistory") rand_conf_chooser_kwargs = {"rng": rng} if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update( random_configuration_chooser_kwargs) if random_configuration_chooser is None: if "prob" not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs[ "prob"] = scenario.rand_prob # type: ignore[attr-defined] # noqa F821 random_configuration_chooser_instance = ChooserProb( ** rand_conf_chooser_kwargs # type: ignore[arg-type] # noqa F821 # type: RandomConfigurationChooser ) elif inspect.isclass(random_configuration_chooser): random_configuration_chooser_instance = random_configuration_chooser( # type: ignore # noqa F821 ** rand_conf_chooser_kwargs # type: ignore[arg-type] # noqa F821 ) elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError( "random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed( rng.randint(MAXINT)) # type: ignore[attr-defined] # noqa F821 # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types( scenario.cs, scenario.feature_array) # type: ignore[attr-defined] # noqa F821 model_def_kwargs = { "types": types, "bounds": bounds, "instance_features": scenario.feature_array, "seed": rng.randint(MAXINT), "pca_components": scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { "log_y": scenario.transform_y in ["LOG", "LOGS"], # type: ignore[attr-defined] # noqa F821 "num_trees": scenario. rf_num_trees, # type: ignore[attr-defined] # noqa F821 "do_bootstrapping": scenario. rf_do_bootstrapping, # type: ignore[attr-defined] # noqa F821 "ratio_features": scenario. rf_ratio_features, # type: ignore[attr-defined] # noqa F821 "min_samples_split": scenario. rf_min_samples_split, # type: ignore[attr-defined] # noqa F821 "min_samples_leaf": scenario. rf_min_samples_leaf, # type: ignore[attr-defined] # noqa F821 "max_depth": scenario. rf_max_depth, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs[ "configspace"] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = RandomForestWithInstances( ** model_def_kwargs # type: ignore[arg-type] # noqa F821 # type: AbstractEPM ) elif inspect.isclass(model): model_def_kwargs[ "configspace"] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = model( **model_def_kwargs) # type: ignore # noqa F821 else: raise TypeError("Model not recognized: %s" % (type(model))) # initial acquisition function acq_def_kwargs = {"model": model_instance} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) acquisition_function_instance = ( None) # type: Optional[AbstractAcquisitionFunction] if acquisition_function is None: if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 acquisition_function_instance = LogEI( **acq_def_kwargs # type: ignore[arg-type] # noqa F821 ) else: acquisition_function_instance = EI( **acq_def_kwargs # type: ignore[arg-type] # noqa F821 ) elif inspect.isclass(acquisition_function): acquisition_function_instance = acquisition_function( **acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function)) if integrate_acquisition_function: acquisition_function_instance = IntegratedAcquisitionFunction( acquisition_function= acquisition_function_instance, # type: ignore **acq_def_kwargs, ) # initialize optimizer on acquisition function acq_func_opt_kwargs = { "acquisition_function": acquisition_function_instance, "config_space": scenario.cs, # type: ignore[attr-defined] # noqa F821 "rng": rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { "max_steps": scenario. sls_max_steps, # type: ignore[attr-defined] # noqa F821 "n_steps_plateau_walk": scenario. sls_n_steps_plateau_walk, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer_instance = LocalAndSortedRandomSearch( **acq_func_opt_kwargs # type: ignore ) elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer_instance = acquisition_function_optimizer( # type: ignore # noqa F821 **acq_func_opt_kwargs) # type: ignore # noqa F821 else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { "stats": self.stats, "run_obj": scenario.run_obj, "par_factor": scenario.par_factor, # type: ignore[attr-defined] # noqa F821 "cost_for_crash": scenario.cost_for_crash, # type: ignore[attr-defined] # noqa F821 "abort_on_first_run_crash": scenario. abort_on_first_run_crash, # type: ignore[attr-defined] # noqa F821 "multi_objectives": scenario. multi_objectives, # type: ignore[attr-defined] # noqa F821 } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if "ta" not in tae_def_kwargs: tae_def_kwargs[ "ta"] = scenario.ta # type: ignore[attr-defined] # noqa F821 if tae_runner is None: tae_def_kwargs[ "ta"] = scenario.ta # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ExecuteTARunOld( **tae_def_kwargs ) # type: ignore[arg-type] # noqa F821 # type: BaseRunner elif inspect.isclass(tae_runner): tae_runner_instance = cast( BaseRunner, tae_runner(**tae_def_kwargs)) # type: ignore elif callable(tae_runner): tae_def_kwargs["ta"] = tae_runner tae_def_kwargs[ "use_pynisher"] = scenario.limit_resources # type: ignore[attr-defined] # noqa F821 tae_def_kwargs[ "memory_limit"] = scenario.memory_limit # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ExecuteTAFuncDict( **tae_def_kwargs) # type: ignore else: raise TypeError( "Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "BaseRunner. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # In case of a parallel run, wrap the single worker in a parallel # runner if n_jobs is None or n_jobs == 1: _n_jobs = 1 elif n_jobs == -1: _n_jobs = joblib.cpu_count() elif n_jobs > 0: _n_jobs = n_jobs else: raise ValueError( "Number of tasks must be positive, None or -1, but is %s" % str(n_jobs)) if _n_jobs > 1 or dask_client is not None: tae_runner_instance = DaskParallelRunner( # type: ignore tae_runner_instance, n_workers=_n_jobs, output_directory=self.output_dir, dask_client=dask_client, ) # Check that overall objective and tae objective are the same # TODO: remove these two ignores once the scenario object knows all its attributes! if tae_runner_instance.run_obj != scenario.run_obj: # type: ignore[union-attr] # noqa F821 raise ValueError( "Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner_instance.run_obj, scenario.run_obj)) # type: ignore[union-attr] # noqa F821 if intensifier is None: intensifier = Intensifier if isinstance(intensifier, AbstractRacer): intensifier_instance = intensifier elif inspect.isclass(intensifier): # initialize intensification intensifier_def_kwargs = { "stats": self.stats, "traj_logger": traj_logger, "rng": rng, "instances": scenario.train_insts, # type: ignore[attr-defined] # noqa F821 "cutoff": scenario.cutoff, # type: ignore[attr-defined] # noqa F821 "deterministic": scenario. deterministic, # type: ignore[attr-defined] # noqa F821 "run_obj_time": scenario.run_obj == "runtime", # type: ignore[attr-defined] # noqa F821 "instance_specifics": scenario. instance_specific, # type: ignore[attr-defined] # noqa F821 "adaptive_capping_slackfactor": scenario. intens_adaptive_capping_slackfactor, # type: ignore[attr-defined] # noqa F821 "min_chall": scenario. intens_min_chall, # type: ignore[attr-defined] # noqa F821 } if issubclass(intensifier, Intensifier): intensifier_def_kwargs[ "always_race_against"] = scenario.cs.get_default_configuration( ) # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ "use_ta_time_bound"] = scenario.use_ta_time # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ "minR"] = scenario.minR # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ "maxR"] = scenario.maxR # type: ignore[attr-defined] # noqa F821 if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) intensifier_instance = intensifier( **intensifier_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument intensifier must be None or an object implementing the AbstractRacer, but is '%s'" % type(intensifier)) # initialize multi objective # the multi_objective_algorithm_instance will be passed to the runhistory2epm object multi_objective_algorithm_instance = ( None) # type: Optional[AbstractMultiObjectiveAlgorithm] if scenario.multi_objectives is not None and num_obj > 1: # type: ignore[attr-defined] # noqa F821 # define any defaults here _multi_objective_kwargs = {"rng": rng, "num_obj": num_obj} if multi_objective_kwargs is not None: _multi_objective_kwargs.update(multi_objective_kwargs) if multi_objective_algorithm is None: multi_objective_algorithm_instance = MeanAggregationStrategy( **_multi_objective_kwargs ) # type: ignore[arg-type] # noqa F821 elif inspect.isclass(multi_objective_algorithm): multi_objective_algorithm_instance = multi_objective_algorithm( **_multi_objective_kwargs) else: raise TypeError( "Multi-objective algorithm not recognized: %s" % (type(multi_objective_algorithm))) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) init_design_def_kwargs = { "cs": scenario.cs, # type: ignore[attr-defined] # noqa F821 "traj_logger": traj_logger, "rng": rng, "ta_run_limit": scenario.ta_run_limit, # type: ignore[attr-defined] # noqa F821 "configs": initial_configurations, "n_configs_x_params": 0, "max_config_fracs": 0.0, } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design_instance = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs["max_config_fracs"] = 0.0 initial_design_instance = DefaultConfiguration( **init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs["max_config_fracs"] = 0.0 initial_design_instance = RandomConfigurations( **init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": # type: ignore[attr-defined] # noqa F821 initial_design_instance = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = FactorialInitialDesign( **init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent # type: ignore ) # type: ignore[attr-defined] # noqa F821 elif inspect.isclass(initial_design): initial_design_instance = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design)) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 cutoff = np.log(np.nanmin([ np.inf, np.float_(scenario.cutoff) ])) # type: ignore[attr-defined] # noqa F821 threshold = cutoff + np.log( scenario.par_factor) # type: ignore[attr-defined] # noqa F821 else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff) ]) # type: ignore[attr-defined] # noqa F821 threshold = cutoff * scenario.par_factor # type: ignore[attr-defined] # noqa F821 num_params = len(scenario.cs.get_hyperparameters() ) # type: ignore[attr-defined] # noqa F821 imputor = RFRImputator( rng=rng, cutoff=cutoff, threshold=threshold, model=model_instance, change_threshold=0.01, max_iter=2, ) r2e_def_kwargs = { "scenario": scenario, "num_params": num_params, "success_states": [ StatusType.SUCCESS, ], "impute_censored_data": True, "impute_state": [ StatusType.CAPPED, ], "imputor": imputor, "scale_perc": 5, } # TODO: consider other sorts of multi-objective algorithms if isinstance(multi_objective_algorithm_instance, AggregationStrategy): r2e_def_kwargs.update({ "multi_objective_algorithm": multi_objective_algorithm_instance }) if scenario.run_obj == "quality": r2e_def_kwargs.update({ "success_states": [ StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT, ], "impute_censored_data": False, "impute_state": None, }) if (isinstance(intensifier_instance, (SuccessiveHalving, Hyperband)) and scenario.run_obj == "quality"): r2e_def_kwargs.update({ "success_states": [ StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT, StatusType.DONOTADVANCE, ], "consider_for_higher_budgets_state": [ StatusType.DONOTADVANCE, StatusType.TIMEOUT, StatusType.CRASHED, StatusType.MEMOUT, ], }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == "runtime": rh2epm = RunHistory2EPM4LogCost( **r2e_def_kwargs # type: ignore ) # type: ignore[arg-type] # noqa F821 # type: AbstractRunHistory2EPM elif scenario.run_obj == "quality": if scenario.transform_y == "NONE": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4Cost( **r2e_def_kwargs) # type: ignore # noqa F821 elif scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore # noqa F821 elif scenario.transform_y == "LOGS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogScaledCost( **r2e_def_kwargs) # type: ignore # noqa F821 elif scenario.transform_y == "INVS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4InvScaledCost( **r2e_def_kwargs) # type: ignore # noqa F821 else: raise ValueError( "Unknown run objective: %s. Should be either " "quality or runtime." % self.scenario.run_obj # type: ignore # noqa F821 ) elif inspect.isclass(runhistory2epm): rh2epm = runhistory2epm(** r2e_def_kwargs) # type: ignore # noqa F821 else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm)) smbo_args = { "scenario": scenario, "stats": self.stats, "initial_design": initial_design_instance, "runhistory": runhistory, "runhistory2epm": rh2epm, "intensifier": intensifier_instance, "num_run": run_id, "model": model_instance, "acq_optimizer": acquisition_function_optimizer_instance, "acquisition_func": acquisition_function_instance, "rng": rng, "restore_incumbent": restore_incumbent, "random_configuration_chooser": random_configuration_chooser_instance, "tae_runner": tae_runner_instance, } # type: Dict[str, Any] if smbo_class is None: self.solver = SMBO(** smbo_args) # type: ignore[arg-type] # noqa F821 else: self.solver = smbo_class( **smbo_args) # type: ignore[arg-type] # noqa F821
def __init__( self, scenario: Scenario, tae_runner: typing.Union[ExecuteTARun, typing.Callable] = None, runhistory: RunHistory = None, intensifier: Intensifier = None, acquisition_function: AbstractAcquisitionFunction = None, acquisition_function_optimizer: AcquisitionFunctionMaximizer = None, model: AbstractEPM = None, runhistory2epm: AbstractRunHistory2EPM = None, initial_design: InitialDesign = None, initial_configurations: typing.List[Configuration] = None, stats: Stats = None, restore_incumbent: Configuration = None, rng: typing.Union[np.random.RandomState, int] = None, smbo_class: SMBO = None, run_id: int = 1): """Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.execute_ta_run.ExecuteTARun or callable Callable or implementation of :class:`~smac.tae.execute_ta_run.ExecuteTARun`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. runhistory : RunHistory runhistory to store all algorithm runs intensifier : Intensifier intensification object to issue a racing to decide the current incumbent acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` if not set. acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set. model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. initial_design : InitialDesign initial sampling design initial_configurations : typing.List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id: int, (default: 1) Run ID will be used as subfolder for output_dir. """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.output_dir = create_output_directory(scenario, run_id) scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func) # inject aggr_func if necessary if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func # initial random number generator num_run, rng = self._get_rng(rng=rng) # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) if model is None: model = RandomForestWithInstances( types=types, bounds=bounds, instance_features=scenario.feature_array, seed=rng.randint(MAXINT), pca_components=scenario.PCA_DIM) # initial acquisition function if acquisition_function is None: if scenario.run_obj == "runtime": acquisition_function = LogEI(model=model) else: acquisition_function = EI(model=model) # inject model if necessary if acquisition_function.model is None: acquisition_function.model = model # initialize optimizer on acquisition function if acquisition_function_optimizer is None: acquisition_function_optimizer = InterleavedLocalAndRandomSearch( acquisition_function, scenario.cs, np.random.RandomState(seed=rng.randint(MAXINT))) elif not isinstance( acquisition_function_optimizer, AcquisitionFunctionMaximizer, ): raise ValueError( "Argument 'acquisition_function_optimizer' must be of type" "'AcquisitionFunctionMaximizer', but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld( ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict( ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Third case, if it is an ExecuteTaRun we can simply use the # instance. Otherwise, the next check raises an exception elif not isinstance(tae_runner, ExecuteTARun): raise TypeError("Argument 'tae_runner' is %s, but must be " "either a callable or an instance of " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # inject cost_for_crash if tae_runner.crash_cost != scenario.cost_for_crash: tae_runner.crash_cost = scenario.cost_for_crash # initialize intensification if intensifier is None: intensifier = Intensifier(tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", always_race_against=scenario.cs.get_default_configuration() \ if scenario.always_race_default else None, instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR) # inject deps if necessary if intensifier.tae_runner is None: intensifier.tae_runner = tae_runner if intensifier.stats is None: intensifier.stats = self.stats if intensifier.traj_logger is None: intensifier.traj_logger = traj_logger # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) if initial_configurations is not None: initial_design = MultiConfigInitialDesign( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, configs=initial_configurations, intensifier=intensifier, aggregate_func=aggregate_func) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": initial_design = DefaultConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "RANDOM": initial_design = RandomConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) # inject deps if necessary if initial_design.tae_runner is None: initial_design.tae_runner = tae_runner if initial_design.scenario is None: initial_design.scenario = scenario if initial_design.stats is None: initial_design.stats = self.stats if initial_design.traj_logger is None: initial_design.traj_logger = traj_logger # initial conversion of runhistory into EPM data if runhistory2epm is None: num_params = len(scenario.cs.get_hyperparameters()) if scenario.run_obj == "runtime": # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory cutoff = np.log10(scenario.cutoff) threshold = np.log10(scenario.cutoff * scenario.par_factor) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=True, impute_state=[ StatusType.CAPPED, ], imputor=imputor) elif scenario.run_obj == 'quality': runhistory2epm = RunHistory2EPM4Cost( scenario=scenario, num_params=num_params, success_states=[StatusType.SUCCESS, StatusType.CRASHED], impute_censored_data=False, impute_state=None) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) # inject scenario if necessary: if runhistory2epm.scenario is None: runhistory2epm.scenario = scenario smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design, 'runhistory': runhistory, 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'num_run': num_run, 'model': model, 'acq_optimizer': acquisition_function_optimizer, 'acquisition_func': acquisition_function, 'rng': rng, 'restore_incumbent': restore_incumbent } if smbo_class is None: self.solver = SMBO(**smbo_args) else: self.solver = smbo_class(**smbo_args)
def __init__(self, scenario: Scenario, tae_runner: Optional[Union[Type[ExecuteTARun], Callable]] = None, tae_runner_kwargs: Optional[dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[dict] = None, intensifier: Optional[Type[Intensifier]] = None, intensifier_kwargs: Optional[dict] = None, acquisition_function: Optional[Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[SMBO] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[dict] = None ): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.execute_ta_run.ExecuteTARun or callable Callable or implementation of :class:`~smac.tae.execute_ta_run.ExecuteTARun`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. tae_runner_kwargs: Optional[dict] arguments passed to constructor of '~tae_runner' runhistory : RunHistory runhistory to store all algorithm runs runhistory_kwargs : Optional[dict] arguments passed to constructor of runhistory. We strongly advise against changing the aggregation function, since it will break some code assumptions intensifier : Intensifier intensification object to issue a racing to decide the current incumbent intensifier_kwargs: Optional[dict] arguments passed to the constructor of '~intensifier' acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Class or object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` or :class:`~smac.optimizer.acquisition.LogEI` if not set. `~acquisition_function_kwargs` is passed to the class constructor. acquisition_function_kwargs : Optional[dict] dictionary to pass specific arguments to ~acquisition_function integrate_acquisition_function : bool, default=False Whether to integrate the acquisition function. Works only with models which can sample their hyperparameters (i.e. GaussianProcessMCMC). acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set. acquisition_function_optimizer_kwargs: Optional[dict] Arguments passed to constructor of '~acquisition_function_optimizer' model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. model_kwargs : Optional[dict] Arguments passed to constructor of '~model' runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. runhistory2epm_kwargs: Optional[dict] Arguments passed to the constructor of '~runhistory2epm' initial_design : InitialDesign initial sampling design initial_design_kwargs: Optional[dict] arguments passed to constructor of `~initial_design' initial_configurations : List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. random_configuration_chooser_kwargs : Optional[dict] arguments of constructor for '~random_configuration_chooser' """ self.logger = logging.getLogger( self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator # run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # run_id=datetime.now().strftime("%Y%m%d%H%M%S%f") run_id=uuid1() self.output_dir = create_output_directory(scenario, run_id) # fixme run_id elif scenario.output_dir is not None: run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = scenario.output_dir_for_this_run if ( scenario.deterministic is True and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality' ): self.logger.info('Optimizing a deterministic scenario for quality without a tuner timeout - will make ' 'SMAC deterministic and only evaluate one configuration per iteration!') scenario.intensification_percentage = 1e-10 scenario.min_chall = 1 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario,file_system=scenario.file_system) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": self.logger.warning("Runtime as objective automatically activates log(y) transformation") self.scenario.transform_y = "LOG" # initialize empty runhistory runhistory_def_kwargs = {'aggregate_func': aggregate_func} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs,file_system=scenario.file_system) elif inspect.isclass(runhistory): runhistory = runhistory(**runhistory_def_kwargs) else: if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func rand_conf_chooser_kwargs = { 'rng': rng } if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update(random_configuration_chooser_kwargs) if random_configuration_chooser is None: if 'prob' not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs['prob'] = scenario.rand_prob random_configuration_chooser = ChooserProb(**rand_conf_chooser_kwargs) elif inspect.isclass(random_configuration_chooser): random_configuration_chooser = random_configuration_chooser(**rand_conf_chooser_kwargs) elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError("random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats,file_system=scenario.file_system) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) model_def_kwargs = { 'types': types, 'bounds': bounds, 'instance_features': scenario.feature_array, 'seed': rng.randint(MAXINT), 'pca_components': scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { 'log_y': scenario.transform_y in ["LOG", "LOGS"], 'num_trees': scenario.rf_num_trees, 'do_bootstrapping': scenario.rf_do_bootstrapping, 'ratio_features': scenario.rf_ratio_features, 'min_samples_split': scenario.rf_min_samples_split, 'min_samples_leaf': scenario.rf_min_samples_leaf, 'max_depth': scenario.rf_max_depth, }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs['configspace'] = self.scenario.cs model = RandomForestWithInstances(**model_def_kwargs) elif inspect.isclass(model): model_def_kwargs['configspace'] = self.scenario.cs model = model(**model_def_kwargs) else: raise TypeError( "Model not recognized: %s" %(type(model))) # initial acquisition function acq_def_kwargs = {'model': model} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) if acquisition_function is None: if scenario.transform_y in ["LOG", "LOGS"]: acquisition_function = LogEI(**acq_def_kwargs) else: acquisition_function = EI(**acq_def_kwargs) elif inspect.isclass(acquisition_function): acquisition_function = acquisition_function(**acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function) ) if integrate_acquisition_function: acquisition_function = IntegratedAcquisitionFunction( acquisition_function=acquisition_function, **acq_def_kwargs ) # initialize optimizer on acquisition function acq_func_opt_kwargs = { 'acquisition_function': acquisition_function, 'config_space': scenario.cs, 'rng': rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { 'max_steps': scenario.sls_max_steps, 'n_steps_plateau_walk': scenario.sls_n_steps_plateau_walk, }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer = InterleavedLocalAndRandomSearch(**acq_func_opt_kwargs) elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer = acquisition_function_optimizer(**acq_func_opt_kwargs) else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer) ) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { 'stats': self.stats, 'run_obj': scenario.run_obj, 'runhistory': runhistory, 'par_factor': scenario.par_factor, 'cost_for_crash': scenario.cost_for_crash, 'abort_on_first_run_crash': scenario.abort_on_first_run_crash } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if 'ta' not in tae_def_kwargs: tae_def_kwargs['ta'] = scenario.ta if tae_runner is None: tae_def_kwargs['ta'] = scenario.ta tae_runner = ExecuteTARunOld(**tae_def_kwargs) elif inspect.isclass(tae_runner): tae_runner = tae_runner(**tae_def_kwargs) elif callable(tae_runner): tae_def_kwargs['ta'] = tae_runner tae_runner = ExecuteTAFuncDict(**tae_def_kwargs) else: raise TypeError("Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # initialize intensification intensifier_def_kwargs = { 'tae_runner': tae_runner, 'stats': self.stats, 'traj_logger': traj_logger, 'rng': rng, 'instances': scenario.train_insts, 'cutoff': scenario.cutoff, 'deterministic': scenario.deterministic, 'run_obj_time': scenario.run_obj == "runtime", 'always_race_against': scenario.cs.get_default_configuration() if scenario.always_race_default else None, 'use_ta_time_bound': scenario.use_ta_time, 'instance_specifics': scenario.instance_specific, 'minR': scenario.minR, 'maxR': scenario.maxR, 'adaptive_capping_slackfactor': scenario.intens_adaptive_capping_slackfactor, 'min_chall': scenario.intens_min_chall } if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) if intensifier is None: intensifier = Intensifier(**intensifier_def_kwargs) elif inspect.isclass(intensifier): intensifier = intensifier(**intensifier_def_kwargs) else: raise TypeError( "Argument intensifier must be None or an object implementing the Intensifier, but is '%s'" % type(intensifier) ) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both") init_design_def_kwargs = { 'tae_runner': tae_runner, 'scenario': scenario, 'stats': self.stats, 'traj_logger': traj_logger, 'runhistory': runhistory, 'rng': rng, 'configs': initial_configurations, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'n_configs_x_params': 0, 'max_config_fracs': 0.0 } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design = DefaultConfiguration(**init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design = RandomConfigurations(**init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": initial_design = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": initial_design = FactorialInitialDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": initial_design = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) elif inspect.isclass(initial_design): initial_design = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design) ) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in ["LOG", "LOGS"]: cutoff = np.log(np.nanmin([np.inf, np.float_(scenario.cutoff)])) threshold = cutoff + np.log(scenario.par_factor) else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff)]) threshold = cutoff * scenario.par_factor num_params = len(scenario.cs.get_hyperparameters()) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) r2e_def_kwargs = { 'scenario': scenario, 'num_params': num_params, 'success_states': [StatusType.SUCCESS, ], 'impute_censored_data': True, 'impute_state': [StatusType.CAPPED, ], 'imputor': imputor, 'scale_perc': 5 } if scenario.run_obj == 'quality': r2e_def_kwargs.update({ 'success_states': [StatusType.SUCCESS, StatusType.CRASHED], 'impute_censored_data': False, 'impute_state': None, }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == 'runtime': runhistory2epm = RunHistory2EPM4LogCost(**r2e_def_kwargs) elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": runhistory2epm = RunHistory2EPM4Cost(**r2e_def_kwargs) elif scenario.transform_y == "LOG": runhistory2epm = RunHistory2EPM4LogCost(**r2e_def_kwargs) elif scenario.transform_y == "LOGS": runhistory2epm = RunHistory2EPM4LogScaledCost(**r2e_def_kwargs) elif scenario.transform_y == "INVS": runhistory2epm = RunHistory2EPM4InvScaledCost(**r2e_def_kwargs) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) elif inspect.isclass(runhistory2epm): runhistory2epm = runhistory2epm(**r2e_def_kwargs) else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm) ) smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design, 'runhistory': runhistory, 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'num_run': run_id, 'model': model, 'acq_optimizer': acquisition_function_optimizer, 'acquisition_func': acquisition_function, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser } if smbo_class is None: self.solver = SMBO(**smbo_args) else: self.solver = smbo_class(**smbo_args)
def __init__( self, scenario: Scenario, # TODO: once we drop python3.4 add type hint # typing.Union[ExecuteTARun, callable] tae_runner=None, runhistory: RunHistory = None, intensifier: Intensifier = None, acquisition_function: AbstractAcquisitionFunction = None, model: AbstractEPM = None, runhistory2epm: AbstractRunHistory2EPM = None, initial_design: InitialDesign = None, initial_configurations: typing.List[Configuration] = None, stats: Stats = None, rng: np.random.RandomState = None, run_id: int = 1): """Constructor""" self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.runhistory = None self.trajectory = None # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) self.output_dir = create_output_directory(scenario, run_id) scenario.write() # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func) # inject aggr_func if necessary if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func # initial random number generator num_run, rng = self._get_rng(rng=rng) # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) if model is None: model = RandomForestWithInstances( types=types, bounds=bounds, instance_features=scenario.feature_array, seed=rng.randint(MAXINT), pca_components=scenario.PCA_DIM, num_trees=scenario.rf_num_trees, do_bootstrapping=scenario.rf_do_bootstrapping, ratio_features=scenario.rf_ratio_features, min_samples_split=scenario.rf_min_samples_split, min_samples_leaf=scenario.rf_min_samples_leaf, max_depth=scenario.rf_max_depth) # initial acquisition function if acquisition_function is None: if scenario.run_obj == "runtime": acquisition_function = LogEI(model=model) else: acquisition_function = EI(model=model) # inject model if necessary if acquisition_function.model is None: acquisition_function.model = model # initialize optimizer on acquisition function local_search = LocalSearch( acquisition_function, scenario.cs, max_steps=scenario.sls_max_steps, n_steps_plateau_walk=scenario.sls_n_steps_plateau_walk) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld( ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict( ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash) # Third case, if it is an ExecuteTaRun we can simply use the # instance. Otherwise, the next check raises an exception elif not isinstance(tae_runner, ExecuteTARun): raise TypeError("Argument 'tae_runner' is %s, but must be " "either a callable or an instance of " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # inject cost_for_crash if tae_runner.crash_cost != scenario.cost_for_crash: tae_runner.crash_cost = scenario.cost_for_crash # initialize intensification if intensifier is None: intensifier = Intensifier( tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", always_race_against=scenario.cs.get_default_configuration() if scenario.always_race_default else None, instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR, adaptive_capping_slackfactor=scenario. intens_adaptive_capping_slackfactor, min_chall=scenario.intens_min_chall) # inject deps if necessary if intensifier.tae_runner is None: intensifier.tae_runner = tae_runner if intensifier.stats is None: intensifier.stats = self.stats if intensifier.traj_logger is None: intensifier.traj_logger = traj_logger # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) if initial_configurations is not None: initial_design = MultiConfigInitialDesign( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, configs=initial_configurations, intensifier=intensifier, aggregate_func=aggregate_func) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": initial_design = DefaultConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "RANDOM": initial_design = RandomConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) # inject deps if necessary if initial_design.tae_runner is None: initial_design.tae_runner = tae_runner if initial_design.scenario is None: initial_design.scenario = scenario if initial_design.stats is None: initial_design.stats = self.stats if initial_design.traj_logger is None: initial_design.traj_logger = traj_logger # initial conversion of runhistory into EPM data if runhistory2epm is None: num_params = len(scenario.cs.get_hyperparameters()) if scenario.run_obj == "runtime": # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory cutoff = np.log(scenario.cutoff) threshold = np.log(scenario.cutoff * scenario.par_factor) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=True, impute_state=[ StatusType.CAPPED, ], imputor=imputor) elif scenario.run_obj == 'quality': runhistory2epm = RunHistory2EPM4Cost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=False, impute_state=None) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) # inject scenario if necessary: if runhistory2epm.scenario is None: runhistory2epm.scenario = scenario self.solver = EPILS_Solver(scenario=scenario, stats=self.stats, initial_design=initial_design, runhistory=runhistory, runhistory2epm=runhistory2epm, intensifier=intensifier, aggregate_func=aggregate_func, num_run=num_run, model=model, acq_optimizer=local_search, acquisition_func=acquisition_function, rng=rng)
def __init__( self, scenario: Scenario, tae_runner: typing.Optional[typing.Union[ExecuteTARun, typing.Callable]] = None, runhistory: typing.Optional[RunHistory] = None, intensifier: typing.Optional[Intensifier] = None, acquisition_function: typing. Optional[AbstractAcquisitionFunction] = None, acquisition_function_optimizer: typing. Optional[AcquisitionFunctionMaximizer] = None, model: typing.Optional[AbstractEPM] = None, runhistory2epm: typing.Optional[AbstractRunHistory2EPM] = None, initial_design: typing.Optional[InitialDesign] = None, initial_configurations: typing.Optional[ typing.List[Configuration]] = None, stats: typing.Optional[Stats] = None, restore_incumbent: typing.Optional[Configuration] = None, rng: typing.Optional[typing.Union[np.random.RandomState, int]] = None, smbo_class: typing.Optional[SMBO] = None, run_id: typing.Optional[int] = None, random_configuration_chooser: typing. Optional[RandomConfigurationChooser] = None): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.execute_ta_run.ExecuteTARun or callable Callable or implementation of :class:`~smac.tae.execute_ta_run.ExecuteTARun`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. runhistory : RunHistory runhistory to store all algorithm runs intensifier : Intensifier intensification object to issue a racing to decide the current incumbent acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` if not set. acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.InterleavedLocalAndRandomSearch` if not set. model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. initial_design : InitialDesign initial sampling design initial_configurations : typing.List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) aggregate_func = average_cost self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) self.output_dir = create_output_directory(scenario, run_id) elif scenario.output_dir is not None: run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = scenario.output_dir_for_this_run if (scenario.deterministic is True and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality'): self.logger.info('Optimizing a deterministic scenario for ' 'quality without a tuner timeout - will make ' 'SMAC deterministic!') scenario.intensification_percentage = 1e-10 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": self.logger.warn( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func) # inject aggr_func if necessary if runhistory.aggregate_func is None: runhistory.aggregate_func = aggregate_func if not random_configuration_chooser: random_configuration_chooser = ChooserProb(prob=scenario.rand_prob, rng=rng) # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types(scenario.cs, scenario.feature_array) if model is None: model = RandomForestWithInstances( types=types, bounds=bounds, instance_features=scenario.feature_array, seed=rng.randint(MAXINT), pca_components=scenario.PCA_DIM, log_y=scenario.transform_y in ["LOG", "LOGS"], num_trees=scenario.rf_num_trees, do_bootstrapping=scenario.rf_do_bootstrapping, ratio_features=scenario.rf_ratio_features, min_samples_split=scenario.rf_min_samples_split, min_samples_leaf=scenario.rf_min_samples_leaf, max_depth=scenario.rf_max_depth) # initial acquisition function if acquisition_function is None: if scenario.transform_y in ["LOG", "LOGS"]: acquisition_function = LogEI(model=model) else: acquisition_function = EI(model=model) # inject model if necessary if acquisition_function.model is None: acquisition_function.model = model # initialize optimizer on acquisition function if acquisition_function_optimizer is None: acquisition_function_optimizer = InterleavedLocalAndRandomSearch( acquisition_function=acquisition_function, config_space=scenario.cs, rng=np.random.RandomState(seed=rng.randint(MAXINT)), max_steps=scenario.sls_max_steps, n_steps_plateau_walk=scenario.sls_n_steps_plateau_walk) elif not isinstance( acquisition_function_optimizer, AcquisitionFunctionMaximizer, ): raise ValueError( "Argument 'acquisition_function_optimizer' must be of type" "'AcquisitionFunctionMaximizer', but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld( ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash, abort_on_first_run_crash=scenario.abort_on_first_run_crash) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict( ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor, cost_for_crash=scenario.cost_for_crash, abort_on_first_run_crash=scenario.abort_on_first_run_crash) # Third case, if it is an ExecuteTaRun we can simply use the # instance. Otherwise, the next check raises an exception elif not isinstance(tae_runner, ExecuteTARun): raise TypeError("Argument 'tae_runner' is %s, but must be " "either a callable or an instance of " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # inject cost_for_crash if tae_runner.crash_cost != scenario.cost_for_crash: tae_runner.crash_cost = scenario.cost_for_crash # initialize intensification if intensifier is None: intensifier = Intensifier( tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", always_race_against=scenario.cs.get_default_configuration() if scenario.always_race_default else None, use_ta_time_bound=scenario.use_ta_time, instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR, adaptive_capping_slackfactor=scenario. intens_adaptive_capping_slackfactor, min_chall=scenario.intens_min_chall) # inject deps if necessary if intensifier.tae_runner is None: intensifier.tae_runner = tae_runner if intensifier.stats is None: intensifier.stats = self.stats if intensifier.traj_logger is None: intensifier.traj_logger = traj_logger # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) if initial_configurations is not None: initial_design = MultiConfigInitialDesign( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, configs=initial_configurations, intensifier=intensifier, aggregate_func=aggregate_func) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": initial_design = DefaultConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "RANDOM": initial_design = RandomConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "LHD": initial_design = LHDesign(runhistory=runhistory, intensifier=intensifier, aggregate_func=aggregate_func, tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "FACTORIAL": initial_design = FactorialInitialDesign( runhistory=runhistory, intensifier=intensifier, aggregate_func=aggregate_func, tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "SOBOL": initial_design = SobolDesign(runhistory=runhistory, intensifier=intensifier, aggregate_func=aggregate_func, tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) # inject deps if necessary if initial_design.tae_runner is None: initial_design.tae_runner = tae_runner if initial_design.scenario is None: initial_design.scenario = scenario if initial_design.stats is None: initial_design.stats = self.stats if initial_design.traj_logger is None: initial_design.traj_logger = traj_logger # initial conversion of runhistory into EPM data if runhistory2epm is None: num_params = len(scenario.cs.get_hyperparameters()) if scenario.run_obj == 'runtime': # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory cutoff = np.log(scenario.cutoff) threshold = np.log(scenario.cutoff * scenario.par_factor) imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=True, impute_state=[ StatusType.CAPPED, ], imputor=imputor) elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": runhistory2epm = RunHistory2EPM4Cost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, StatusType.CRASHED ], impute_censored_data=False, impute_state=None) elif scenario.transform_y == "LOG": runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, StatusType.CRASHED ], impute_censored_data=False, impute_state=None) elif scenario.transform_y == "LOGS": runhistory2epm = RunHistory2EPM4LogScaledCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, StatusType.CRASHED ], impute_censored_data=False, impute_state=None) elif scenario.transform_y == "INVS": runhistory2epm = RunHistory2EPM4InvScaledCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, StatusType.CRASHED ], impute_censored_data=False, impute_state=None) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) # inject scenario if necessary: if runhistory2epm.scenario is None: runhistory2epm.scenario = scenario smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design, 'runhistory': runhistory, 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': aggregate_func, 'num_run': run_id, 'model': model, 'acq_optimizer': acquisition_function_optimizer, 'acquisition_func': acquisition_function, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser } if smbo_class is None: self.solver = SMBO(**smbo_args) else: self.solver = smbo_class(**smbo_args)
def __init__( self, scenario: Scenario, tae_runner: Optional[Union[Type[BaseRunner], Callable]] = None, tae_runner_kwargs: Optional[Dict] = None, runhistory: Optional[Union[Type[RunHistory], RunHistory]] = None, runhistory_kwargs: Optional[Dict] = None, intensifier: Optional[Type[AbstractRacer]] = None, intensifier_kwargs: Optional[Dict] = None, acquisition_function: Optional[ Type[AbstractAcquisitionFunction]] = None, acquisition_function_kwargs: Optional[Dict] = None, integrate_acquisition_function: bool = False, acquisition_function_optimizer: Optional[ Type[AcquisitionFunctionMaximizer]] = None, acquisition_function_optimizer_kwargs: Optional[Dict] = None, model: Optional[Type[AbstractEPM]] = None, model_kwargs: Optional[Dict] = None, runhistory2epm: Optional[Type[AbstractRunHistory2EPM]] = None, runhistory2epm_kwargs: Optional[Dict] = None, initial_design: Optional[Type[InitialDesign]] = None, initial_design_kwargs: Optional[Dict] = None, initial_configurations: Optional[List[Configuration]] = None, stats: Optional[Stats] = None, restore_incumbent: Optional[Configuration] = None, rng: Optional[Union[np.random.RandomState, int]] = None, smbo_class: Optional[Type[SMBO]] = None, run_id: Optional[int] = None, random_configuration_chooser: Optional[ Type[RandomConfigurationChooser]] = None, random_configuration_chooser_kwargs: Optional[Dict] = None, dask_client: Optional[dask.distributed.Client] = None, n_jobs: Optional[int] = 1, ): """ Constructor Parameters ---------- scenario : ~smac.scenario.scenario.Scenario Scenario object tae_runner : ~smac.tae.base.BaseRunner or callable Callable or implementation of :class:`~smac.tae.base.BaseRunner`. In case a callable is passed it will be wrapped by :class:`~smac.tae.execute_func.ExecuteTAFuncDict`. If not set, it will be initialized with the :class:`~smac.tae.execute_ta_run_old.ExecuteTARunOld`. tae_runner_kwargs: Optional[Dict] arguments passed to constructor of '~tae_runner' runhistory : RunHistory runhistory to store all algorithm runs runhistory_kwargs : Optional[Dict] arguments passed to constructor of runhistory. We strongly advise against changing the aggregation function, since it will break some code assumptions intensifier : Intensifier intensification object to issue a racing to decide the current incumbent intensifier_kwargs: Optional[Dict] arguments passed to the constructor of '~intensifier' acquisition_function : ~smac.optimizer.acquisition.AbstractAcquisitionFunction Class or object that implements the :class:`~smac.optimizer.acquisition.AbstractAcquisitionFunction`. Will use :class:`~smac.optimizer.acquisition.EI` or :class:`~smac.optimizer.acquisition.LogEI` if not set. `~acquisition_function_kwargs` is passed to the class constructor. acquisition_function_kwargs : Optional[Dict] dictionary to pass specific arguments to ~acquisition_function integrate_acquisition_function : bool, default=False Whether to integrate the acquisition function. Works only with models which can sample their hyperparameters (i.e. GaussianProcessMCMC). acquisition_function_optimizer : ~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer Object that implements the :class:`~smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer`. Will use :class:`smac.optimizer.ei_optimization.LocalAndSortedRandomSearch` if not set. acquisition_function_optimizer_kwargs: Optional[Dict] Arguments passed to constructor of '~acquisition_function_optimizer' model : AbstractEPM Model that implements train() and predict(). Will use a :class:`~smac.epm.rf_with_instances.RandomForestWithInstances` if not set. model_kwargs : Optional[Dict] Arguments passed to constructor of '~model' runhistory2epm : ~smac.runhistory.runhistory2epm.RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4Cost` if objective is cost or :class:`~smac.runhistory.runhistory2epm.RunHistory2EPM4LogCost` if objective is runtime. runhistory2epm_kwargs: Optional[Dict] Arguments passed to the constructor of '~runhistory2epm' initial_design : InitialDesign initial sampling design initial_design_kwargs: Optional[Dict] arguments passed to constructor of `~initial_design' initial_configurations : List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats : Stats optional stats object rng : np.random.RandomState Random number generator restore_incumbent : Configuration incumbent used if restoring to previous state smbo_class : ~smac.optimizer.smbo.SMBO Class implementing the SMBO interface which will be used to instantiate the optimizer class. run_id : int (optional) Run ID will be used as subfolder for output_dir. If no ``run_id`` is given, a random ``run_id`` will be chosen. random_configuration_chooser : ~smac.optimizer.random_configuration_chooser.RandomConfigurationChooser How often to choose a random configuration during the intensification procedure. random_configuration_chooser_kwargs : Optional[Dict] arguments of constructor for '~random_configuration_chooser' dask_client : dask.distributed.Client User-created dask client, can be used to start a dask cluster and then attach SMAC to it. n_jobs : int, optional Number of jobs. If > 1 or -1, this creates a dask client if ``dask_client`` is ``None``. Will be ignored if ``dask_client`` is not ``None``. If ``None``, this value will be set to 1, if ``-1``, this will be set to the number of cpu cores. """ self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__) self.scenario = scenario self.output_dir = "" if not restore_incumbent: # restore_incumbent is used by the CLI interface which provides a method for restoring a SMAC run given an # output directory. This is the default path. # initial random number generator run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) self.output_dir = create_output_directory(scenario, run_id) elif scenario.output_dir is not None: # type: ignore[attr-defined] # noqa F821 run_id, rng = get_rng(rng=rng, run_id=run_id, logger=self.logger) # output-directory is created in CLI when restoring from a # folder. calling the function again in the facade results in two # folders being created: run_X and run_X.OLD. if we are # restoring, the output-folder exists already and we omit creating it, # but set the self-output_dir to the dir. # necessary because we want to write traj to new output-dir in CLI. self.output_dir = cast(str, scenario.output_dir_for_this_run ) # type: ignore[attr-defined] # noqa F821 rng = cast(np.random.RandomState, rng) if (scenario.deterministic is True # type: ignore[attr-defined] # noqa F821 and getattr(scenario, 'tuner_timeout', None) is None and scenario.run_obj == 'quality' # type: ignore[attr-defined] # noqa F821 ): self.logger.info( 'Optimizing a deterministic scenario for quality without a tuner timeout - will make ' 'SMAC deterministic and only evaluate one configuration per iteration!' ) scenario.intensification_percentage = 1e-10 # type: ignore[attr-defined] # noqa F821 scenario.min_chall = 1 # type: ignore[attr-defined] # noqa F821 scenario.write() # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) if self.scenario.run_obj == "runtime" and not self.scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 self.logger.warning( "Runtime as objective automatically activates log(y) transformation" ) self.scenario.transform_y = "LOG" # type: ignore[attr-defined] # noqa F821 # initialize empty runhistory runhistory_def_kwargs = {} if runhistory_kwargs is not None: runhistory_def_kwargs.update(runhistory_kwargs) if runhistory is None: runhistory = RunHistory(**runhistory_def_kwargs) elif inspect.isclass(runhistory): runhistory = runhistory( **runhistory_def_kwargs) # type: ignore[operator] # noqa F821 elif isinstance(runhistory, RunHistory): pass else: raise ValueError( 'runhistory has to be a class or an object of RunHistory') rand_conf_chooser_kwargs = {'rng': rng} if random_configuration_chooser_kwargs is not None: rand_conf_chooser_kwargs.update( random_configuration_chooser_kwargs) if random_configuration_chooser is None: if 'prob' not in rand_conf_chooser_kwargs: rand_conf_chooser_kwargs[ 'prob'] = scenario.rand_prob # type: ignore[attr-defined] # noqa F821 random_configuration_chooser_instance = ( ChooserProb(**rand_conf_chooser_kwargs ) # type: ignore[arg-type] # noqa F821 ) # type: RandomConfigurationChooser elif inspect.isclass(random_configuration_chooser): random_configuration_chooser_instance = random_configuration_chooser( ** rand_conf_chooser_kwargs) # type: ignore[arg-type] # noqa F821 elif not isinstance(random_configuration_chooser, RandomConfigurationChooser): raise ValueError( "random_configuration_chooser has to be" " a class or object of RandomConfigurationChooser") # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed( rng.randint(MAXINT)) # type: ignore[attr-defined] # noqa F821 # initial Trajectory Logger traj_logger = TrajLogger(output_dir=self.output_dir, stats=self.stats) # initial EPM types, bounds = get_types( scenario.cs, scenario.feature_array) # type: ignore[attr-defined] # noqa F821 model_def_kwargs = { 'types': types, 'bounds': bounds, 'instance_features': scenario.feature_array, 'seed': rng.randint(MAXINT), 'pca_components': scenario.PCA_DIM, } if model_kwargs is not None: model_def_kwargs.update(model_kwargs) if model is None: for key, value in { 'log_y': scenario.transform_y in ["LOG", "LOGS"], # type: ignore[attr-defined] # noqa F821 'num_trees': scenario. rf_num_trees, # type: ignore[attr-defined] # noqa F821 'do_bootstrapping': scenario. rf_do_bootstrapping, # type: ignore[attr-defined] # noqa F821 'ratio_features': scenario. rf_ratio_features, # type: ignore[attr-defined] # noqa F821 'min_samples_split': scenario. rf_min_samples_split, # type: ignore[attr-defined] # noqa F821 'min_samples_leaf': scenario. rf_min_samples_leaf, # type: ignore[attr-defined] # noqa F821 'max_depth': scenario. rf_max_depth, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in model_def_kwargs: model_def_kwargs[key] = value model_def_kwargs[ 'configspace'] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = ( RandomForestWithInstances( **model_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractEPM elif inspect.isclass(model): model_def_kwargs[ 'configspace'] = self.scenario.cs # type: ignore[attr-defined] # noqa F821 model_instance = model( **model_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError("Model not recognized: %s" % (type(model))) # initial acquisition function acq_def_kwargs = {'model': model_instance} if acquisition_function_kwargs is not None: acq_def_kwargs.update(acquisition_function_kwargs) if acquisition_function is None: if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 acquisition_function_instance = ( LogEI(** acq_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractAcquisitionFunction else: acquisition_function_instance = EI( **acq_def_kwargs) # type: ignore[arg-type] # noqa F821 elif inspect.isclass(acquisition_function): acquisition_function_instance = acquisition_function( **acq_def_kwargs) else: raise TypeError( "Argument acquisition_function must be None or an object implementing the " "AbstractAcquisitionFunction, not %s." % type(acquisition_function)) if integrate_acquisition_function: acquisition_function_instance = IntegratedAcquisitionFunction( acquisition_function=acquisition_function_instance, **acq_def_kwargs) # initialize optimizer on acquisition function acq_func_opt_kwargs = { 'acquisition_function': acquisition_function_instance, 'config_space': scenario.cs, # type: ignore[attr-defined] # noqa F821 'rng': rng, } if acquisition_function_optimizer_kwargs is not None: acq_func_opt_kwargs.update(acquisition_function_optimizer_kwargs) if acquisition_function_optimizer is None: for key, value in { 'max_steps': scenario. sls_max_steps, # type: ignore[attr-defined] # noqa F821 'n_steps_plateau_walk': scenario. sls_n_steps_plateau_walk, # type: ignore[attr-defined] # noqa F821 }.items(): if key not in acq_func_opt_kwargs: acq_func_opt_kwargs[key] = value acquisition_function_optimizer_instance = ( LocalAndSortedRandomSearch( ** acq_func_opt_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AcquisitionFunctionMaximizer elif inspect.isclass(acquisition_function_optimizer): acquisition_function_optimizer_instance = acquisition_function_optimizer( **acq_func_opt_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument acquisition_function_optimizer must be None or an object implementing the " "AcquisitionFunctionMaximizer, but is '%s'" % type(acquisition_function_optimizer)) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file tae_def_kwargs = { 'stats': self.stats, 'run_obj': scenario.run_obj, 'par_factor': scenario.par_factor, # type: ignore[attr-defined] # noqa F821 'cost_for_crash': scenario.cost_for_crash, # type: ignore[attr-defined] # noqa F821 'abort_on_first_run_crash': scenario. abort_on_first_run_crash # type: ignore[attr-defined] # noqa F821 } if tae_runner_kwargs is not None: tae_def_kwargs.update(tae_runner_kwargs) if 'ta' not in tae_def_kwargs: tae_def_kwargs[ 'ta'] = scenario.ta # type: ignore[attr-defined] # noqa F821 if tae_runner is None: tae_def_kwargs[ 'ta'] = scenario.ta # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ( ExecuteTARunOld( **tae_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: BaseRunner elif inspect.isclass(tae_runner): tae_runner_instance = cast(BaseRunner, tae_runner( **tae_def_kwargs)) # type: ignore[arg-type] # noqa F821 elif callable(tae_runner): tae_def_kwargs['ta'] = tae_runner tae_def_kwargs[ 'use_pynisher'] = scenario.limit_resources # type: ignore[attr-defined] # noqa F821 tae_runner_instance = ExecuteTAFuncDict( **tae_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument 'tae_runner' is %s, but must be " "either None, a callable or an object implementing " "BaseRunner. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # In case of a parallel run, wrap the single worker in a parallel # runner if n_jobs is None or n_jobs == 1: _n_jobs = 1 elif n_jobs == -1: _n_jobs = joblib.cpu_count() elif n_jobs > 0: _n_jobs = n_jobs else: raise ValueError( 'Number of tasks must be positive, None or -1, but is %s' % str(n_jobs)) if _n_jobs > 1 or dask_client is not None: tae_runner_instance = DaskParallelRunner( tae_runner_instance, n_workers=_n_jobs, output_directory=self.output_dir, dask_client=dask_client, ) # Check that overall objective and tae objective are the same # TODO: remove these two ignores once the scenario object knows all its attributes! if tae_runner_instance.run_obj != scenario.run_obj: # type: ignore[union-attr] # noqa F821 raise ValueError( "Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner_instance.run_obj, scenario.run_obj)) # type: ignore[union-attr] # noqa F821 # initialize intensification intensifier_def_kwargs = { 'stats': self.stats, 'traj_logger': traj_logger, 'rng': rng, 'instances': scenario.train_insts, # type: ignore[attr-defined] # noqa F821 'cutoff': scenario.cutoff, # type: ignore[attr-defined] # noqa F821 'deterministic': scenario.deterministic, # type: ignore[attr-defined] # noqa F821 'run_obj_time': scenario.run_obj == "runtime", # type: ignore[attr-defined] # noqa F821 'instance_specifics': scenario. instance_specific, # type: ignore[attr-defined] # noqa F821 'adaptive_capping_slackfactor': scenario. intens_adaptive_capping_slackfactor, # type: ignore[attr-defined] # noqa F821 'min_chall': scenario.intens_min_chall # type: ignore[attr-defined] # noqa F821 } if isinstance(intensifier, Intensifier) \ or (intensifier is not None and inspect.isclass(intensifier) and issubclass(intensifier, Intensifier)): intensifier_def_kwargs[ 'always_race_against'] = scenario.cs.get_default_configuration( ) # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'use_ta_time_bound'] = scenario.use_ta_time # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'minR'] = scenario.minR # type: ignore[attr-defined] # noqa F821 intensifier_def_kwargs[ 'maxR'] = scenario.maxR # type: ignore[attr-defined] # noqa F821 if intensifier_kwargs is not None: intensifier_def_kwargs.update(intensifier_kwargs) if intensifier is None: intensifier_instance = ( Intensifier(**intensifier_def_kwargs ) # type: ignore[arg-type] # noqa F821 ) # type: AbstractRacer elif inspect.isclass(intensifier): intensifier_instance = intensifier( **intensifier_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument intensifier must be None or an object implementing the AbstractRacer, but is '%s'" % type(intensifier)) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) init_design_def_kwargs = { 'cs': scenario.cs, # type: ignore[attr-defined] # noqa F821 'traj_logger': traj_logger, 'rng': rng, 'ta_run_limit': scenario.ta_run_limit, # type: ignore[attr-defined] # noqa F821 'configs': initial_configurations, 'n_configs_x_params': 0, 'max_config_fracs': 0.0 } if initial_design_kwargs is not None: init_design_def_kwargs.update(initial_design_kwargs) if initial_configurations is not None: initial_design_instance = InitialDesign(**init_design_def_kwargs) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design_instance = DefaultConfiguration( **init_design_def_kwargs) elif scenario.initial_incumbent == "RANDOM": # type: ignore[attr-defined] # noqa F821 init_design_def_kwargs['max_config_fracs'] = 0.0 initial_design_instance = RandomConfigurations( **init_design_def_kwargs) elif scenario.initial_incumbent == "LHD": # type: ignore[attr-defined] # noqa F821 initial_design_instance = LHDesign(**init_design_def_kwargs) elif scenario.initial_incumbent == "FACTORIAL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = FactorialInitialDesign( **init_design_def_kwargs) elif scenario.initial_incumbent == "SOBOL": # type: ignore[attr-defined] # noqa F821 initial_design_instance = SobolDesign(**init_design_def_kwargs) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent ) # type: ignore[attr-defined] # noqa F821 elif inspect.isclass(initial_design): initial_design_instance = initial_design(**init_design_def_kwargs) else: raise TypeError( "Argument initial_design must be None or an object implementing the InitialDesign, but is '%s'" % type(initial_design)) # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory if scenario.transform_y in [ "LOG", "LOGS" ]: # type: ignore[attr-defined] # noqa F821 cutoff = np.log(np.nanmin([ np.inf, np.float_(scenario.cutoff) ])) # type: ignore[attr-defined] # noqa F821 threshold = cutoff + np.log( scenario.par_factor) # type: ignore[attr-defined] # noqa F821 else: cutoff = np.nanmin([np.inf, np.float_(scenario.cutoff) ]) # type: ignore[attr-defined] # noqa F821 threshold = cutoff * scenario.par_factor # type: ignore[attr-defined] # noqa F821 num_params = len(scenario.cs.get_hyperparameters() ) # type: ignore[attr-defined] # noqa F821 imputor = RFRImputator(rng=rng, cutoff=cutoff, threshold=threshold, model=model_instance, change_threshold=0.01, max_iter=2) r2e_def_kwargs = { 'scenario': scenario, 'num_params': num_params, 'success_states': [ StatusType.SUCCESS, ], 'impute_censored_data': True, 'impute_state': [ StatusType.CAPPED, ], 'imputor': imputor, 'scale_perc': 5 } if scenario.run_obj == 'quality': r2e_def_kwargs.update({ 'success_states': [StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT], 'impute_censored_data': False, 'impute_state': None, }) if isinstance( intensifier_instance, (SuccessiveHalving, Hyperband)) and scenario.run_obj == "quality": r2e_def_kwargs.update({ 'success_states': [ StatusType.SUCCESS, StatusType.CRASHED, StatusType.MEMOUT, StatusType.DONOTADVANCE, ], 'consider_for_higher_budgets_state': [ StatusType.DONOTADVANCE, StatusType.TIMEOUT, StatusType.CRASHED, StatusType.MEMOUT, ], }) if runhistory2epm_kwargs is not None: r2e_def_kwargs.update(runhistory2epm_kwargs) if runhistory2epm is None: if scenario.run_obj == 'runtime': rh2epm = ( RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 ) # type: AbstractRunHistory2EPM elif scenario.run_obj == 'quality': if scenario.transform_y == "NONE": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4Cost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "LOG": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "LOGS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4LogScaledCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 elif scenario.transform_y == "INVS": # type: ignore[attr-defined] # noqa F821 rh2epm = RunHistory2EPM4InvScaledCost( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) elif inspect.isclass(runhistory2epm): rh2epm = runhistory2epm( **r2e_def_kwargs) # type: ignore[arg-type] # noqa F821 else: raise TypeError( "Argument runhistory2epm must be None or an object implementing the RunHistory2EPM, but is '%s'" % type(runhistory2epm)) smbo_args = { 'scenario': scenario, 'stats': self.stats, 'initial_design': initial_design_instance, 'runhistory': runhistory, 'runhistory2epm': rh2epm, 'intensifier': intensifier_instance, 'num_run': run_id, 'model': model_instance, 'acq_optimizer': acquisition_function_optimizer_instance, 'acquisition_func': acquisition_function_instance, 'rng': rng, 'restore_incumbent': restore_incumbent, 'random_configuration_chooser': random_configuration_chooser_instance, 'tae_runner': tae_runner_instance, } # type: Dict[str, Any] if smbo_class is None: self.solver = SMBO(** smbo_args) # type: ignore[arg-type] # noqa F821 else: self.solver = smbo_class( **smbo_args) # type: ignore[arg-type] # noqa F821
def runhistory_builder(ta,scenario_dic,rng): tae_runner = ExecuteTARun(ta=ta) scenario = Scenario(scenario_dic) stats = Stats(scenario=scenario) traj_logger = TrajLogger(stats=stats,output_dir="/home/dfki/Desktop/temp") # if tae_runner.stats is None: # new_smac =SMAC(scenario=scenario,tae_runner=tae_runner) # tae_runner.stats = new_smac.stats stats.start_timing() deful_config_builder = DefaultConfiguration(tae_runner,scenario,stats,traj_logger,rng) config_milad =deful_config_builder._select_configuration() config_milad._values = None config_milad._values = {'balancing:strategy': 'none', 'categorical_encoding:__choice__': 'one_hot_encoding', 'classifier:__choice__': 'random_forest', 'imputation:strategy': 'mean', 'preprocessor:__choice__': 'no_preprocessing', 'rescaling:__choice__': 'standardize', 'categorical_encoding:one_hot_encoding:use_minimum_fraction': 'True', 'classifier:random_forest:bootstrap': 'True', 'classifier:random_forest:criterion': 'gini', 'classifier:random_forest:max_depth': 10, 'classifier:random_forest:max_features': 0.5, 'classifier:random_forest:max_leaf_nodes': 'None', 'classifier:random_forest:min_impurity_decrease': 0.0, 'classifier:random_forest:min_samples_leaf': 1, 'classifier:random_forest:min_samples_split': 2, 'classifier:random_forest:min_weight_fraction_leaf': 0.0, 'classifier:random_forest:n_estimators': 100, 'categorical_encoding:one_hot_encoding:minimum_fraction': 0.01} # config_milad._values = {'balancing:strategy': 'none', # 'categorical_encoding:__choice__': 'no_encoding', # 'classifier:__choice__': 'random_forest', # 'imputation:strategy': 'mean', # 'preprocessor:__choice__': 'pca', # 'preprocessor:copy':True, # 'preprocessor:iterated_power':'auto', # 'preprocessor:n_components':'None', # 'preprocessor:random_state':'None', # 'preprocessor:svd_solver':'auto', # 'preprocessor:tol':0.0, # 'preprocessor:whiten':'False', # 'rescaling:__choice__': 'None', # 'classifier:random_forest:bootstrap': 'True', # 'classifier:random_forest:class_weight': 'None', # 'classifier:random_forest:criterion': 'gini', # 'classifier:random_forest:max_depth': 'None', # 'classifier:random_forest:max_features': 'auto', # 'classifier:random_forest:max_leaf_nodes': 'None', # 'classifier:random_forest:min_impurity_decrease': 0.0, # 'classifier:random_forest:min_impurity_split': '1e-07', # 'classifier:random_forest:min_samples_leaf': 1, # 'classifier:random_forest:min_samples_split': 2, # 'classifier:random_forest:min_weight_fraction_leaf': 0.0, # 'classifier:random_forest:n_estimators': 10, # 'classifier:random_forest:n_jobs': 1, # 'classifier:random_forest:oob_score': 'False', # 'classifier:random_forest:random_state': 'None', # 'classifier:random_forest:verbose': 0, # 'classifier:random_forest:warm_start': 'False', # } # config_milad._vector =None status, cost, runtime, additional_info = tae_runner.start(config=config_milad,instance=None) print(status, cost, runtime, additional_info) runhistory = RunHistory(aggregate_func=average_cost) runhistory.add( config=config_milad, cost=cost, time=runtime, status=status, instance_id=None, additional_info=additional_info) return runhistory
def __init__( self, scenario: Scenario, # TODO: once we drop python3.4 add type hint # typing.Union[ExecuteTARun, callable] tae_runner=None, runhistory: RunHistory = None, intensifier: Intensifier = None, acquisition_function: AbstractAcquisitionFunction = None, model: AbstractEPM = None, runhistory2epm: AbstractRunHistory2EPM = None, initial_design: InitialDesign = None, initial_configurations: typing.List[Configuration] = None, stats: Stats = None, rng: np.random.RandomState = None): ''' Facade to use SMAC default mode Parameters ---------- scenario: smac.scenario.scenario.Scenario Scenario object tae_runner: ExecuteTARun or callable Callable or implementation of :class:`ExecuteTaRun`. In case a callable is passed it will be wrapped by tae.ExecuteTaFunc(). If not set, tae_runner will be initialized with the tae.ExecuteTARunOld() runhistory: RunHistory runhistory to store all algorithm runs intensifier: Intensifier intensification object to issue a racing to decide the current incumbent acquisition_function : AcquisitionFunction Object that implements the AbstractAcquisitionFunction. Will use EI if not set. model : AbstractEPM Model that implements train() and predict(). Will use a RandomForest if not set. runhistory2epm : RunHistory2EMP Object that implements the AbstractRunHistory2EPM. If None, will use RunHistory2EPM4Cost if objective is cost or RunHistory2EPM4LogCost if objective is runtime. initial_design: InitialDesign initial sampling design initial_configurations: typing.List[Configuration] list of initial configurations for initial design -- cannot be used together with initial_design stats: Stats optional stats object rng: np.random.RandomState Random number generator ''' self.logger = logging.getLogger("SMAC") aggregate_func = average_cost # initialize stats object if stats: self.stats = stats else: self.stats = Stats(scenario) # initialize empty runhistory if runhistory is None: runhistory = RunHistory(aggregate_func=aggregate_func) # initial random number generator num_run, rng = self._get_rng(rng=rng) # reset random number generator in config space to draw different # random configurations with each seed given to SMAC scenario.cs.seed(rng.randint(MAXINT)) # initial Trajectory Logger traj_logger = TrajLogger(output_dir=scenario.output_dir, stats=self.stats) # initial EPM types = get_types(scenario.cs, scenario.feature_array) if model is None: model = RandomForestWithInstances( types=types, instance_features=scenario.feature_array, seed=rng.randint(MAXINT)) # initial acquisition function if acquisition_function is None: acquisition_function = EI(model=model) # initialize optimizer on acquisition function local_search = LocalSearch(acquisition_function, scenario.cs) # initialize tae_runner # First case, if tae_runner is None, the target algorithm is a call # string in the scenario file if tae_runner is None: tae_runner = ExecuteTARunOld(ta=scenario.ta, stats=self.stats, run_obj=scenario.run_obj, runhistory=runhistory, par_factor=scenario.par_factor) # Second case, the tae_runner is a function to be optimized elif callable(tae_runner): tae_runner = ExecuteTAFuncDict(ta=tae_runner, stats=self.stats, run_obj=scenario.run_obj, memory_limit=scenario.memory_limit, runhistory=runhistory, par_factor=scenario.par_factor) # Third case, if it is an ExecuteTaRun we can simply use the # instance. Otherwise, the next check raises an exception elif not isinstance(tae_runner, ExecuteTARun): raise TypeError("Argument 'tae_runner' is %s, but must be " "either a callable or an instance of " "ExecuteTaRun. Passing 'None' will result in the " "creation of target algorithm runner based on the " "call string in the scenario file." % type(tae_runner)) # Check that overall objective and tae objective are the same if tae_runner.run_obj != scenario.run_obj: raise ValueError("Objective for the target algorithm runner and " "the scenario must be the same, but are '%s' and " "'%s'" % (tae_runner.run_obj, scenario.run_obj)) # inject stats if necessary if tae_runner.stats is None: tae_runner.stats = self.stats # inject runhistory if necessary if tae_runner.runhistory is None: tae_runner.runhistory = runhistory # initial intensification if intensifier is None: intensifier = Intensifier( tae_runner=tae_runner, stats=self.stats, traj_logger=traj_logger, rng=rng, instances=scenario.train_insts, cutoff=scenario.cutoff, deterministic=scenario.deterministic, run_obj_time=scenario.run_obj == "runtime", instance_specifics=scenario.instance_specific, minR=scenario.minR, maxR=scenario.maxR) # initial design if initial_design is not None and initial_configurations is not None: raise ValueError( "Either use initial_design or initial_configurations; but not both" ) if initial_configurations is not None: initial_design = MultiConfigInitialDesign( tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, runhistory=runhistory, rng=rng, configs=initial_configurations, intensifier=intensifier, aggregate_func=aggregate_func) elif initial_design is None: if scenario.initial_incumbent == "DEFAULT": initial_design = DefaultConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) elif scenario.initial_incumbent == "RANDOM": initial_design = RandomConfiguration(tae_runner=tae_runner, scenario=scenario, stats=self.stats, traj_logger=traj_logger, rng=rng) else: raise ValueError("Don't know what kind of initial_incumbent " "'%s' is" % scenario.initial_incumbent) # initial conversion of runhistory into EPM data if runhistory2epm is None: num_params = len(scenario.cs.get_hyperparameters()) if scenario.run_obj == "runtime": # if we log the performance data, # the RFRImputator will already get # log transform data from the runhistory cutoff = np.log10(scenario.cutoff) threshold = np.log10(scenario.cutoff * scenario.par_factor) imputor = RFRImputator(rs=rng, cutoff=cutoff, threshold=threshold, model=model, change_threshold=0.01, max_iter=2) runhistory2epm = RunHistory2EPM4LogCost( scenario=scenario, num_params=num_params, success_states=[ StatusType.SUCCESS, ], impute_censored_data=True, impute_state=[ StatusType.TIMEOUT, ], imputor=imputor) elif scenario.run_obj == 'quality': runhistory2epm = RunHistory2EPM4Cost\ (scenario=scenario, num_params=num_params, success_states=[StatusType.SUCCESS, ], impute_censored_data=False, impute_state=None) else: raise ValueError('Unknown run objective: %s. Should be either ' 'quality or runtime.' % self.scenario.run_obj) self.solver = SMBO(scenario=scenario, stats=self.stats, initial_design=initial_design, runhistory=runhistory, runhistory2epm=runhistory2epm, intensifier=intensifier, aggregate_func=aggregate_func, num_run=num_run, model=model, acq_optimizer=local_search, acquisition_func=acquisition_function, rng=rng)
def create_optimizer(self): from smac.epm.rf_with_instances import RandomForestWithInstances from smac.initial_design.default_configuration_design import DefaultConfiguration from smac.intensification.intensification import Intensifier from smac.optimizer.smbo import SMBO from smac.optimizer.acquisition import EI from smac.optimizer.ei_optimization import InterleavedLocalAndRandomSearch from smac.optimizer.objective import average_cost from smac.runhistory.runhistory2epm import RunHistory2EPM4Cost from smac.tae.execute_ta_run import StatusType from smac.utils.constants import MAXINT from smac.utils.util_funcs import get_types TAE_RUNNER = self._priv_evaluator runhistory2epm = RunHistory2EPM4Cost( scenario=self.scenario, num_params=len(self.param_space), success_states=[StatusType.SUCCESS, StatusType.CRASHED], impute_censored_data=False, impute_state=None) intensifier = Intensifier(tae_runner=TAE_RUNNER, stats=self.stats, traj_logger=self.traj_logger, rng=self.rng, instances=self.scenario.train_insts, cutoff=self.scenario.cutoff, deterministic=self.scenario.deterministic, run_obj_time=self.scenario.run_obj == "runtime", always_race_against=self.scenario.cs.get_default_configuration() \ if self.scenario.always_race_default else None, instance_specifics=self.scenario.instance_specific, minR=self.scenario.minR, maxR=self.scenario.maxR) types, bounds = get_types(self.scenario.cs, self.scenario.feature_array) model = RandomForestWithInstances( types=types, bounds=bounds, seed=self.rng.randint(MAXINT), instance_features=self.scenario.feature_array, pca_components=self.scenario.PCA_DIM) acq_func = EI(model=model) smbo_args = { 'scenario': self.scenario, 'stats': self.stats, 'initial_design': DefaultConfiguration(tae_runner=TAE_RUNNER, scenario=self.scenario, stats=self.stats, traj_logger=self.traj_logger, rng=self.rng), 'runhistory': self.runhistory, 'runhistory2epm': runhistory2epm, 'intensifier': intensifier, 'aggregate_func': average_cost, 'num_run': self.seed, 'model': model, 'acq_optimizer': InterleavedLocalAndRandomSearch( acq_func, self.scenario.cs, np.random.RandomState(seed=self.rng.randint(MAXINT))), 'acquisition_func': acq_func, 'rng': self.rng, 'restore_incumbent': None, } self.smbo = SMBO(**smbo_args)