Exemplo n.º 1
0
 def __init__(self,
              challengers,
              configuration_space,
              random_configuration_chooser=ChooserNoCoolDown(2.0)):
     self.challengers = challengers
     self.configuration_space = configuration_space
     self._index = 0
     self._iteration = 1  # 1-based to prevent from starting with a random configuration
     self.random_configuration_chooser = random_configuration_chooser
Exemplo n.º 2
0
 def __init__(
     self,
     challengers: List[Configuration],
     configuration_space: ConfigurationSpace,
     random_configuration_chooser: Optional[
         RandomConfigurationChooser] = ChooserNoCoolDown(2.0),
 ):
     self.challengers = challengers
     self.configuration_space = configuration_space
     self._index = 0
     self._iteration = 1  # 1-based to prevent from starting with a random configuration
     self.random_configuration_chooser = random_configuration_chooser
Exemplo n.º 3
0
 def __init__(
     self,
     challenger_callback: Callable,
     configuration_space: ConfigurationSpace,
     random_configuration_chooser: Optional[
         RandomConfigurationChooser] = ChooserNoCoolDown(2.0),
 ):
     self.challengers_callback = challenger_callback
     self.challengers = None  # type: Optional[List[Configuration]]
     self.configuration_space = configuration_space
     self._index = 0
     self._iteration = 1  # 1-based to prevent from starting with a random configuration
     self.random_configuration_chooser = random_configuration_chooser
Exemplo n.º 4
0
    def __init__(self,
                 scenario: Scenario,
                 stats: Stats,
                 runhistory: RunHistory,
                 runhistory2epm: AbstractRunHistory2EPM,
                 model: RandomForestWithInstances,
                 acq_optimizer: AcquisitionFunctionMaximizer,
                 acquisition_func: AbstractAcquisitionFunction,
                 rng: np.random.RandomState,
                 restore_incumbent: Configuration = None,
                 random_configuration_chooser: typing.
                 Union[RandomConfigurationChooser] = ChooserNoCoolDown(2.0),
                 predict_x_best: bool = True,
                 min_samples_model: int = 1):
        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)
        self.incumbent = restore_incumbent

        self.scenario = scenario
        self.stats = stats
        self.runhistory = runhistory
        self.rh2EPM = runhistory2epm
        self.model = model
        self.acq_optimizer = acq_optimizer
        self.acquisition_func = acquisition_func
        self.rng = rng
        self.random_configuration_chooser = random_configuration_chooser

        self._random_search = RandomSearch(
            acquisition_func,
            self.scenario.cs,  # type: ignore[attr-defined] # noqa F821
            rng,
        )

        self.initial_design_configs = []  # type: typing.List[Configuration]

        self.predict_x_best = predict_x_best

        self.min_samples_model = min_samples_model
        self.currently_considered_budgets = [
            0.0,
        ]
Exemplo n.º 5
0
 def test_no_cool_down(self):
     c = ChooserNoCoolDown(rng=np.random.RandomState(), modulus=3.0)
     self.assertFalse(c.check(1))
     self.assertFalse(c.check(2))
     self.assertTrue(c.check(3))
     self.assertFalse(c.check(4))
     self.assertFalse(c.check(5))
     self.assertTrue(c.check(6))
     self.assertTrue(c.check(30))
     c.next_smbo_iteration()
     self.assertFalse(c.check(1))
     self.assertFalse(c.check(2))
     self.assertTrue(c.check(3))
     self.assertFalse(c.check(4))
     self.assertFalse(c.check(5))
     self.assertTrue(c.check(6))
     self.assertTrue(c.check(30))
     c = ChooserNoCoolDown(rng=np.random.RandomState(), modulus=1.0)
     self.assertTrue(c.check(1))
     self.assertTrue(c.check(2))
     c.next_smbo_iteration()
     self.assertTrue(c.check(1))
     self.assertTrue(c.check(2))
Exemplo n.º 6
0
    def __init__(self,
                 scenario: Scenario,
                 stats: Stats,
                 runhistory: RunHistory,
                 runhistory2epm: AbstractRunHistory2EPM,
                 model: RandomForestWithInstances,
                 acq_optimizer: AcquisitionFunctionMaximizer,
                 acquisition_func: AbstractAcquisitionFunction,
                 rng: np.random.RandomState,
                 restore_incumbent: Configuration = None,
                 random_configuration_chooser: typing.
                 Union[RandomConfigurationChooser] = ChooserNoCoolDown(2.0),
                 predict_x_best: bool = True,
                 min_samples_model: int = 1):
        """
        Interface to train the EPM and generate next configurations

        Parameters
        ----------

        scenario: smac.scenario.scenario.Scenario
            Scenario object
        stats: smac.stats.stats.Stats
            statistics object with configuration budgets
        runhistory: smac.runhistory.runhistory.RunHistory
            runhistory with all runs so far
        model: smac.epm.rf_with_instances.RandomForestWithInstances
            empirical performance model (right now, we support only
            RandomForestWithInstances)
        acq_optimizer: smac.optimizer.ei_optimization.AcquisitionFunctionMaximizer
            Optimizer of acquisition function.
        restore_incumbent: Configuration
            incumbent to be used from the start. ONLY used to restore states.
        rng: np.random.RandomState
            Random number generator
        random_configuration_chooser:
            Chooser for random configuration -- one of

            * ChooserNoCoolDown(modulus)
            * ChooserLinearCoolDown(start_modulus, modulus_increment, end_modulus)
        predict_x_best: bool
            Choose x_best for computing the acquisition function via the model instead of via the observations.
        min_samples_model: int
            Minimum number of samples to build a model
        """

        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)
        self.incumbent = restore_incumbent

        self.scenario = scenario
        self.stats = stats
        self.runhistory = runhistory
        self.rh2EPM = runhistory2epm
        self.model = model
        self.acq_optimizer = acq_optimizer
        self.acquisition_func = acquisition_func
        self.rng = rng
        self.random_configuration_chooser = random_configuration_chooser

        self._random_search = RandomSearch(
            acquisition_func,
            self.scenario.cs,  # type: ignore[attr-defined] # noqa F821
            rng,
        )

        self.initial_design_configs = []  # type: typing.List[Configuration]

        self.predict_x_best = predict_x_best

        self.min_samples_model = min_samples_model
        self.currently_considered_budgets = [
            0.0,
        ]
Exemplo n.º 7
0
    def __init__(
        self,
        scenario: Scenario,
        stats: Stats,
        initial_design: InitialDesign,
        runhistory: RunHistory,
        runhistory2epm: AbstractRunHistory2EPM,
        intensifier: AbstractRacer,
        num_run: int,
        model: AbstractEPM,
        acq_optimizer: AcquisitionFunctionMaximizer,
        acquisition_func: AbstractAcquisitionFunction,
        rng: np.random.RandomState,
        tae_runner: BaseRunner,
        restore_incumbent: Configuration = None,
        random_configuration_chooser:
        RandomConfigurationChooser = ChooserNoCoolDown(2.0),
        predict_x_best: bool = True,
        min_samples_model: int = 1,
    ):

        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)
        self.incumbent = restore_incumbent

        self.scenario = scenario
        self.config_space = scenario.cs  # type: ignore[attr-defined] # noqa F821
        self.stats = stats
        self.initial_design = initial_design
        self.runhistory = runhistory
        self.intensifier = intensifier
        self.num_run = num_run
        self.rng = rng
        self._min_time = 10**-5
        self.tae_runner = tae_runner

        self.initial_design_configs = []  # type: typing.List[Configuration]

        # TODO: consider if we need an additional EPMChooser for multi-objective optimization
        self.epm_chooser = EPMChooser(
            scenario=scenario,
            stats=stats,
            runhistory=runhistory,
            runhistory2epm=runhistory2epm,
            model=model,  # type: ignore
            acq_optimizer=acq_optimizer,
            acquisition_func=acquisition_func,
            rng=rng,
            restore_incumbent=restore_incumbent,
            random_configuration_chooser=random_configuration_chooser,
            predict_x_best=predict_x_best,
            min_samples_model=min_samples_model,
        )

        # Internal variable - if this is set to True it will gracefully stop SMAC
        self._stop = False

        # Callbacks. All known callbacks have a key. If something does not have a key here, there is
        # no callback available.
        self._callbacks = {
            "_incorporate_run_results": list()
        }  # type: typing.Dict[str, typing.List[typing.Callable]]
        self._callback_to_key = {
            IncorporateRunResultCallback: "_incorporate_run_results",
        }  # type: typing.Dict[typing.Type, str]
Exemplo n.º 8
0
    def __init__(self,
                 scenario: Scenario,
                 stats: Stats,
                 initial_design: InitialDesign,
                 runhistory: RunHistory,
                 runhistory2epm: AbstractRunHistory2EPM,
                 intensifier: Intensifier,
                 aggregate_func: callable,
                 num_run: int,
                 model: RandomForestWithInstances,
                 acq_optimizer: AcquisitionFunctionMaximizer,
                 acquisition_func: AbstractAcquisitionFunction,
                 rng: np.random.RandomState,
                 restore_incumbent: Configuration = None,
                 random_configuration_chooser: typing.Union[
                     ChooserNoCoolDown,
                     ChooserLinearCoolDown] = ChooserNoCoolDown(2.0),
                 predict_incumbent: bool = True):
        """
        Interface that contains the main Bayesian optimization loop

        Parameters
        ----------
        scenario: smac.scenario.scenario.Scenario
            Scenario object
        stats: Stats
            statistics object with configuration budgets
        initial_design: InitialDesign
            initial sampling design
        runhistory: RunHistory
            runhistory with all runs so far
        runhistory2epm : AbstractRunHistory2EPM
            Object that implements the AbstractRunHistory2EPM to convert runhistory
            data into EPM data
        intensifier: Intensifier
            intensification of new challengers against incumbent configuration
            (probably with some kind of racing on the instances)
        aggregate_func: callable
            how to aggregate the runs in the runhistory to get the performance of a
             configuration
        num_run: int
            id of this run (used for pSMAC)
        model: RandomForestWithInstances
            empirical performance model (right now, we support only
            RandomForestWithInstances)
        acq_optimizer: AcquisitionFunctionMaximizer
            Optimizer of acquisition function.
        acquisition_function : AcquisitionFunction
            Object that implements the AbstractAcquisitionFunction (i.e., infill
            criterion for acq_optimizer)
        restore_incumbent: Configuration
            incumbent to be used from the start. ONLY used to restore states.
        rng: np.random.RandomState
            Random number generator
        random_configuration_chooser
            Chooser for random configuration -- one of
            * ChooserNoCoolDown(modulus)
            * ChooserLinearCoolDown(start_modulus, modulus_increment, end_modulus)
        predict_incumbent: bool
            Use predicted performance of incumbent instead of observed performance
        """

        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)
        self.incumbent = restore_incumbent

        self.scenario = scenario
        self.config_space = scenario.cs
        self.stats = stats
        self.initial_design = initial_design
        self.runhistory = runhistory
        self.rh2EPM = runhistory2epm
        self.intensifier = intensifier
        self.aggregate_func = aggregate_func
        self.num_run = num_run
        self.model = model
        self.acq_optimizer = acq_optimizer
        self.acquisition_func = acquisition_func
        self.rng = rng
        self.random_configuration_chooser = random_configuration_chooser

        self._random_search = RandomSearch(acquisition_func, self.config_space,
                                           rng)

        self.predict_incumbent = predict_incumbent
Exemplo n.º 9
0
    def __init__(self,
                 scenario: Scenario,
                 stats: Stats,
                 initial_design: InitialDesign,
                 runhistory: RunHistory,
                 runhistory2epm: AbstractRunHistory2EPM,
                 intensifier: AbstractRacer,
                 num_run: int,
                 model: RandomForestWithInstances,
                 acq_optimizer: AcquisitionFunctionMaximizer,
                 acquisition_func: AbstractAcquisitionFunction,
                 rng: np.random.RandomState,
                 tae_runner: BaseRunner,
                 restore_incumbent: Configuration = None,
                 random_configuration_chooser: typing.
                 Union[RandomConfigurationChooser] = ChooserNoCoolDown(2.0),
                 predict_x_best: bool = True,
                 min_samples_model: int = 1):
        """
        Interface that contains the main Bayesian optimization loop

        Parameters
        ----------
        scenario: smac.scenario.scenario.Scenario
            Scenario object
        stats: Stats
            statistics object with configuration budgets
        initial_design: InitialDesign
            initial sampling design
        runhistory: RunHistory
            runhistory with all runs so far
        runhistory2epm : AbstractRunHistory2EPM
            Object that implements the AbstractRunHistory2EPM to convert runhistory
            data into EPM data
        intensifier: Intensifier
            intensification of new challengers against incumbent configuration
            (probably with some kind of racing on the instances)
        num_run: int
            id of this run (used for pSMAC)
        model: RandomForestWithInstances
            empirical performance model (right now, we support only RandomForestWithInstances)
        acq_optimizer: AcquisitionFunctionMaximizer
            Optimizer of acquisition function.
        acquisition_func : AcquisitionFunction
            Object that implements the AbstractAcquisitionFunction (i.e., infill criterion for acq_optimizer)
        restore_incumbent: Configuration
            incumbent to be used from the start. ONLY used to restore states.
        rng: np.random.RandomState
            Random number generator
        tae_runner : smac.tae.base.BaseRunner Object
            target algorithm run executor
        random_configuration_chooser
            Chooser for random configuration -- one of
            * ChooserNoCoolDown(modulus)
            * ChooserLinearCoolDown(start_modulus, modulus_increment, end_modulus)
        predict_x_best: bool
            Choose x_best for computing the acquisition function via the model instead of via the observations.
        min_samples_model: int
            Minimum number of samples to build a model.
        """

        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)
        self.incumbent = restore_incumbent

        self.scenario = scenario
        self.config_space = scenario.cs  # type: ignore[attr-defined] # noqa F821
        self.stats = stats
        self.initial_design = initial_design
        self.runhistory = runhistory
        self.intensifier = intensifier
        self.num_run = num_run
        self.rng = rng
        self._min_time = 10**-5
        self.tae_runner = tae_runner

        self.initial_design_configs = []  # type: typing.List[Configuration]

        # initialize the chooser to get configurations from the EPM
        self.epm_chooser = EPMChooser(
            scenario=scenario,
            stats=stats,
            runhistory=runhistory,
            runhistory2epm=runhistory2epm,
            model=model,
            acq_optimizer=acq_optimizer,
            acquisition_func=acquisition_func,
            rng=rng,
            restore_incumbent=restore_incumbent,
            random_configuration_chooser=random_configuration_chooser,
            predict_x_best=predict_x_best,
            min_samples_model=min_samples_model)

        # Internal variable - if this is set to True it will gracefully stop SMAC
        self._stop = False

        # Callbacks. All known callbacks have a key. If something does not have a key here, there is
        # no callback available.
        self._callbacks = {
            '_incorporate_run_results': list()
        }  # type: typing.Dict[str, typing.List[typing.Callable]]
        self._callback_to_key = {
            IncorporateRunResultCallback: '_incorporate_run_results',
        }  # type: typing.Dict[typing.Type, str]
Exemplo n.º 10
0
Arquivo: smbo.py Projeto: maxc01/SMAC3
    def __init__(self,
                 scenario: Scenario,
                 stats: Stats,
                 initial_design: InitialDesign,
                 runhistory: RunHistory,
                 runhistory2epm: AbstractRunHistory2EPM,
                 intensifier: AbstractRacer,
                 num_run: int,
                 model: RandomForestWithInstances,
                 acq_optimizer: AcquisitionFunctionMaximizer,
                 acquisition_func: AbstractAcquisitionFunction,
                 rng: np.random.RandomState,
                 restore_incumbent: Configuration = None,
                 random_configuration_chooser: typing.Union[RandomConfigurationChooser] = ChooserNoCoolDown(2.0),
                 predict_x_best: bool = True,
                 min_samples_model: int = 1):
        """
        Interface that contains the main Bayesian optimization loop

        Parameters
        ----------
        scenario: smac.scenario.scenario.Scenario
            Scenario object
        stats: Stats
            statistics object with configuration budgets
        initial_design: InitialDesign
            initial sampling design
        runhistory: RunHistory
            runhistory with all runs so far
        runhistory2epm : AbstractRunHistory2EPM
            Object that implements the AbstractRunHistory2EPM to convert runhistory
            data into EPM data
        intensifier: Intensifier
            intensification of new challengers against incumbent configuration
            (probably with some kind of racing on the instances)
        num_run: int
            id of this run (used for pSMAC)
        model: RandomForestWithInstances
            empirical performance model (right now, we support only RandomForestWithInstances)
        acq_optimizer: AcquisitionFunctionMaximizer
            Optimizer of acquisition function.
        acquisition_func : AcquisitionFunction
            Object that implements the AbstractAcquisitionFunction (i.e., infill criterion for acq_optimizer)
        restore_incumbent: Configuration
            incumbent to be used from the start. ONLY used to restore states.
        rng: np.random.RandomState
            Random number generator
        random_configuration_chooser
            Chooser for random configuration -- one of
            * ChooserNoCoolDown(modulus)
            * ChooserLinearCoolDown(start_modulus, modulus_increment, end_modulus)
        predict_x_best: bool
            Choose x_best for computing the acquisition function via the model instead of via the observations.
        min_samples_model: int
-            Minimum number of samples to build a model
        """

        self.logger = logging.getLogger(
            self.__module__ + "." + self.__class__.__name__)
        self.incumbent = restore_incumbent

        self.scenario = scenario
        self.config_space = scenario.cs  # type: ignore[attr-defined] # noqa F821
        self.stats = stats
        self.initial_design = initial_design
        self.runhistory = runhistory
        self.intensifier = intensifier
        self.num_run = num_run
        self.rng = rng

        self.initial_design_configs = []  # type: typing.List[Configuration]

        # initialize the chooser to get configurations from the EPM
        self.epm_chooser = EPMChooser(scenario=scenario,
                                      stats=stats,
                                      runhistory=runhistory,
                                      runhistory2epm=runhistory2epm,
                                      model=model,
                                      acq_optimizer=acq_optimizer,
                                      acquisition_func=acquisition_func,
                                      rng=rng,
                                      restore_incumbent=restore_incumbent,
                                      random_configuration_chooser=random_configuration_chooser,
                                      predict_x_best=predict_x_best,
                                      min_samples_model=min_samples_model)