Beispiel #1
0
 def __init__(self,
              acquisition_function: AbstractAcquisitionFunction,
              config_space: ConfigurationSpace,
              rng: Union[bool, np.random.RandomState] = None,
              rand_prob=0.0):
     super().__init__(acquisition_function, config_space, rng)
     self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
Beispiel #2
0
 def __init__(self,
              acquisition_function: AbstractAcquisitionFunction,
              config_space: ConfigurationSpace,
              rng: Union[bool, np.random.RandomState] = None,
              num_mc=1000,
              num_opt=1000,
              rand_prob=0.0):
     super().__init__(acquisition_function, config_space, rng)
     self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
     self.num_mc = num_mc
     self.num_opt = num_opt
     self.minimizer = scipy.optimize.minimize
Beispiel #3
0
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        types, bounds = get_types(self.config_space)
        assert all(types == 0)
        self.bounds = bounds
Beispiel #4
0
    def __init__(self,
                 objective_function,
                 config_space,
                 sample_strategy='bo',
                 time_limit_per_trial=180,
                 max_runs=200,
                 logging_dir='logs',
                 initial_configurations=None,
                 initial_runs=3,
                 task_id=None,
                 rng=None):
        super().__init__(config_space, task_id, output_dir=logging_dir)
        self.logger = super()._get_logger(self.__class__.__name__)
        if rng is None:
            run_id, rng = get_rng()

        self.init_num = initial_runs
        self.max_iterations = max_runs
        self.iteration_id = 0
        self.sls_max_steps = None
        self.n_sls_iterations = 5
        self.sls_n_steps_plateau_walk = 10
        self.time_limit_per_trial = time_limit_per_trial
        self.default_obj_value = MAXINT
        self.sample_strategy = sample_strategy

        self.configurations = list()
        self.failed_configurations = list()
        self.perfs = list()

        # Initialize the basic component in BO.
        self.config_space.seed(rng.randint(MAXINT))
        self.objective_function = objective_function
        types, bounds = get_types(config_space)
        # TODO: what is the feature array.
        self.model = RandomForestWithInstances(types=types,
                                               bounds=bounds,
                                               seed=rng.randint(MAXINT))
        self.acquisition_function = EI(self.model)
        self.optimizer = InterleavedLocalAndRandomSearch(
            acquisition_function=self.acquisition_function,
            config_space=self.config_space,
            rng=np.random.RandomState(seed=rng.randint(MAXINT)),
            max_steps=self.sls_max_steps,
            n_steps_plateau_walk=self.sls_n_steps_plateau_walk,
            n_sls_iterations=self.n_sls_iterations)
        self._random_search = RandomSearch(self.acquisition_function,
                                           self.config_space, rng)
        self.random_configuration_chooser = ChooserProb(prob=0.25, rng=rng)
Beispiel #5
0
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 batch_size=None,
                 rand_prob=0.0):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        if batch_size is None:
            types, bounds = get_types(self.config_space)
            dim = np.sum(types == 0)
            self.batch_size = min(5000, max(2000, 200 * dim))
        else:
            self.batch_size = batch_size
Beispiel #6
0
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        types, bounds = get_types(self.config_space)
        assert all(types == 0)
        self.bounds = bounds

        options = dict(disp=False, maxiter=1000)
        self.scipy_config = dict(tol=None, method='L-BFGS-B', options=options)
Beispiel #7
0
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)

        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        self.random_search = InterleavedLocalAndRandomSearch(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)
        self.scipy_optimizer = ScipyOptimizer(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)
Beispiel #8
0
 def __init__(self,
              acquisition_function: AbstractAcquisitionFunction,
              config_space: ConfigurationSpace,
              rng: Union[bool, np.random.RandomState] = None,
              max_steps: Optional[int] = None,
              n_steps_plateau_walk: int = 10,
              n_sls_iterations: int = 10,
              rand_prob=0.25):
     super().__init__(acquisition_function, config_space, rng)
     self.random_search = RandomSearch(
         acquisition_function=acquisition_function,
         config_space=config_space,
         rng=rng)
     self.local_search = LocalSearch(
         acquisition_function=acquisition_function,
         config_space=config_space,
         rng=rng,
         max_steps=max_steps,
         n_steps_plateau_walk=n_steps_plateau_walk)
     self.n_sls_iterations = n_sls_iterations
     self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
Beispiel #9
0
 def __init__(
     self,
     acquisition_function: AbstractAcquisitionFunction,
     config_space: ConfigurationSpace,
     num_random: int = 1000,
     num_restarts: int = 20,
     raw_samples: int = 1024,
     batch_limit: int = 5,
     scipy_maxiter: int = 200,
     rand_prob: float = 0.0,
     rng: Union[bool, np.random.RandomState] = None,
 ):
     super().__init__(acquisition_function, config_space, rng)
     self.num_random = num_random
     self.num_restarts = num_restarts
     self.raw_samples = raw_samples
     self.batch_limit = batch_limit
     self.scipy_max_iter = scipy_maxiter
     self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
     self.minimizer = scipy.optimize.minimize
     self.method = "L-BFGS-B"
     self.dim = len(self.config_space.get_hyperparameters())
     self.bound = (0.0, 1.0
                   )  # todo only on continuous dims (int, float) now
Beispiel #10
0
class BayesianOptimization(BaseFacade):
    def __init__(self,
                 objective_function,
                 config_space,
                 sample_strategy='bo',
                 time_limit_per_trial=180,
                 max_runs=200,
                 logging_dir='logs',
                 initial_configurations=None,
                 initial_runs=3,
                 task_id=None,
                 rng=None):
        super().__init__(config_space, task_id, output_dir=logging_dir)
        self.logger = super()._get_logger(self.__class__.__name__)
        if rng is None:
            run_id, rng = get_rng()

        self.init_num = initial_runs
        self.max_iterations = max_runs
        self.iteration_id = 0
        self.sls_max_steps = None
        self.n_sls_iterations = 5
        self.sls_n_steps_plateau_walk = 10
        self.time_limit_per_trial = time_limit_per_trial
        self.default_obj_value = MAXINT
        self.sample_strategy = sample_strategy

        self.configurations = list()
        self.failed_configurations = list()
        self.perfs = list()

        # Initialize the basic component in BO.
        self.config_space.seed(rng.randint(MAXINT))
        self.objective_function = objective_function
        types, bounds = get_types(config_space)
        # TODO: what is the feature array.
        self.model = RandomForestWithInstances(types=types,
                                               bounds=bounds,
                                               seed=rng.randint(MAXINT))
        self.acquisition_function = EI(self.model)
        self.optimizer = InterleavedLocalAndRandomSearch(
            acquisition_function=self.acquisition_function,
            config_space=self.config_space,
            rng=np.random.RandomState(seed=rng.randint(MAXINT)),
            max_steps=self.sls_max_steps,
            n_steps_plateau_walk=self.sls_n_steps_plateau_walk,
            n_sls_iterations=self.n_sls_iterations)
        self._random_search = RandomSearch(self.acquisition_function,
                                           self.config_space, rng)
        self.random_configuration_chooser = ChooserProb(prob=0.25, rng=rng)

    def run(self):
        while self.iteration_id < self.max_iterations:
            self.iterate()

    def iterate(self):
        if len(self.configurations) == 0:
            X = np.array([])
        else:
            failed_configs = list(
            ) if self.max_y is None else self.failed_configurations.copy()
            X = convert_configurations_to_array(self.configurations +
                                                failed_configs)

        failed_perfs = list() if self.max_y is None else [self.max_y] * len(
            self.failed_configurations)
        Y = np.array(self.perfs + failed_perfs, dtype=np.float64)

        config = self.choose_next(X, Y)

        trial_state = SUCCESS
        trial_info = None

        if config not in (self.configurations + self.failed_configurations):
            # Evaluate this configuration.
            try:
                args, kwargs = (config, ), dict()
                timeout_status, _result = time_limit(self.objective_function,
                                                     self.time_limit_per_trial,
                                                     args=args,
                                                     kwargs=kwargs)
                if timeout_status:
                    raise TimeoutException(
                        'Timeout: time limit for this evaluation is %.1fs' %
                        self.time_limit_per_trial)
                else:
                    perf = MAXINT if _result is None else _result
            except Exception as e:
                if isinstance(e, TimeoutException):
                    trial_state = TIMEOUT
                else:
                    traceback.print_exc(file=sys.stdout)
                    trial_state = FAILED
                perf = MAXINT
                trial_info = str(e)
                self.logger.error(trial_info)

            if trial_state == SUCCESS and perf < MAXINT:
                if len(self.configurations) == 0:
                    self.default_obj_value = perf

                self.configurations.append(config)
                self.perfs.append(perf)
                self.history_container.add(config, perf)

                self.perc = np.percentile(self.perfs, self.scale_perc)
                self.min_y = np.min(self.perfs)
                self.max_y = np.max(self.perfs)
            else:
                self.failed_configurations.append(config)
        else:
            self.logger.debug(
                'This configuration has been evaluated! Skip it.')
            if config in self.configurations:
                config_idx = self.configurations.index(config)
                trial_state, perf = SUCCESS, self.perfs[config_idx]
            else:
                trial_state, perf = FAILED, MAXINT

        self.iteration_id += 1
        self.logger.info(
            'Iteration-%d, objective improvement: %.4f' %
            (self.iteration_id, max(0, self.default_obj_value - perf)))
        return config, trial_state, perf, trial_info

    def choose_next(self, X: np.ndarray, Y: np.ndarray):
        _config_num = X.shape[0]
        if _config_num < self.init_num:
            default_config = self.config_space.get_default_configuration()
            if default_config not in (self.configurations +
                                      self.failed_configurations):
                return default_config
            else:
                return self._random_search.maximize(
                    runhistory=self.history_container, num_points=1)[0]

        if self.sample_strategy == 'random':
            return self.sample_config()
        elif self.sample_strategy == 'bo':
            if self.random_configuration_chooser.check(self.iteration_id):
                return self.sample_config()
            else:
                self.model.train(X, Y)

                incumbent_value = self.history_container.get_incumbents()[0][1]

                self.acquisition_function.update(
                    model=self.model,
                    eta=incumbent_value,
                    num_data=len(self.history_container.data))

                challengers = self.optimizer.maximize(
                    runhistory=self.history_container,
                    num_points=5000,
                    random_configuration_chooser=self.
                    random_configuration_chooser)

                return challengers.challengers[0]
        else:
            raise ValueError('Invalid sampling strategy - %s.' %
                             self.sample_strategy)

    def sample_config(self):
        config = None
        _sample_cnt, _sample_limit = 0, 10000
        while True:
            _sample_cnt += 1
            config = self.config_space.sample_configuration()
            if config not in (self.configurations +
                              self.failed_configurations):
                break
            if _sample_cnt >= _sample_limit:
                config = self.config_space.sample_configuration()
                break
        return config
Beispiel #11
0
class USeMO_Optimizer(AcquisitionFunctionMaximizer):
    """Implements USeMO optimizer

    Parameters
    ----------
    acquisition_function : ~litebo.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~litebo.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional

    """
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 rand_prob=0.0):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # useless in USeMO
            **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~litebo.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~litebo.ei_optimization.ChallengerList
        """

        acq_vals = np.asarray(self.acquisition_function.uncertainties)
        candidates = np.asarray(self.acquisition_function.candidates)
        assert len(acq_vals.shape) == 1 and len(candidates.shape) == 2 \
               and acq_vals.shape[0] == candidates.shape[0]

        acq_configs = []
        for i in range(acq_vals.shape[0]):
            # convert array to Configuration todo
            config = Configuration(self.config_space, vector=candidates[i])
            acq_configs.append((acq_vals[i], config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
Beispiel #12
0
class batchMCOptimizer(AcquisitionFunctionMaximizer):
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 batch_size=None,
                 rand_prob=0.0):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        if batch_size is None:
            types, bounds = get_types(self.config_space)
            dim = np.sum(types == 0)
            self.batch_size = min(5000, max(2000, 200 * dim))
        else:
            self.batch_size = batch_size

    def maximize(self,
                 runhistory: Union[HistoryContainer,
                                   MultiStartHistoryContainer],
                 num_points: int,
                 _sorted: bool = True,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        """Randomly sampled configurations

        Parameters
        ----------
        runhistory: ~litebo.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        _sorted: bool
            whether random configurations are sorted according to acquisition function
        **kwargs
            turbo_state: TurboState
                provide turbo state to use trust region

        Returns
        -------
        iterable
            An iterable consistng of
            tuple(acqusition_value, :class:`litebo.config_space.Configuration`).
        """
        from litebo.utils.samplers import SobolSampler

        cur_idx = 0
        config_acq = list()
        weight_seed = self.rng.randint(
            0, int(1e8))  # The same weight seed each iteration

        while cur_idx < num_points:
            batch_size = min(self.batch_size, num_points - cur_idx)
            turbo_state = kwargs.get('turbo_state', None)
            if turbo_state is None:
                lower_bounds = None
                upper_bounds = None
            else:
                assert isinstance(runhistory, MultiStartHistoryContainer)
                if runhistory.num_objs > 1:
                    # TODO implement adaptive strategy to choose trust region center for MO
                    raise NotImplementedError()
                else:
                    x_center = random.choice(
                        runhistory.get_incumbents())[0].get_array()
                    lower_bounds = x_center - turbo_state.length / 2.0
                    upper_bounds = x_center + turbo_state.length / 2.0

            sobol_sampler = SobolSampler(self.config_space,
                                         batch_size,
                                         lower_bounds,
                                         upper_bounds,
                                         random_state=self.rng.randint(
                                             0, int(1e8)))
            _configs = sobol_sampler.generate(return_config=True)
            _acq_values = self.acquisition_function(_configs, seed=weight_seed)
            config_acq.extend([(_configs[idx], _acq_values[idx])
                               for idx in range(len(_configs))])

            cur_idx += self.batch_size

        config_acq.sort(reverse=True, key=lambda x: x[1])

        challengers = ChallengerList([_[0] for _ in config_acq],
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
Beispiel #13
0
class MESMO_Optimizer2(AcquisitionFunctionMaximizer):
    """Implements Scipy optimizer for MESMO. Only on continuous dims

    Parameters
    ----------
    acquisition_function : ~litebo.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~litebo.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional

    """
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 num_mc=1000,
                 num_opt=1000,
                 rand_prob=0.0):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
        self.num_mc = num_mc
        self.num_opt = num_opt
        self.minimizer = scipy.optimize.minimize

    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # todo useless
            **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~litebo.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~litebo.ei_optimization.ChallengerList
        """
        def inverse_acquisition(x):
            # shape of x = (d,)
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        d = len(self.config_space.get_hyperparameters())
        bound = (0.0, 1.0)  # todo only on continuous dims (int, float) now
        bounds = [bound] * d
        acq_configs = []

        # MC
        x_tries = self.rng.uniform(bound[0], bound[1], size=(self.num_mc, d))
        acq_tries = self.acquisition_function(x_tries, convert=False)
        for i in range(x_tries.shape[0]):
            # convert array to Configuration
            config = Configuration(self.config_space, vector=x_tries[i])
            config.origin = 'Random Search'
            acq_configs.append((acq_tries[i], config))

        # L-BFGS-B
        x_seed = self.rng.uniform(low=bound[0],
                                  high=bound[1],
                                  size=(self.num_opt, d))
        for i in range(x_seed.shape[0]):
            x0 = x_seed[i].reshape(1, -1)
            result = self.minimizer(inverse_acquisition,
                                    x0=x0,
                                    method='L-BFGS-B',
                                    bounds=bounds)
            if not result.success:
                continue
            # convert array to Configuration
            config = Configuration(self.config_space, vector=result.x)
            config.origin = 'Scipy'
            acq_val = self.acquisition_function(result.x, convert=False)  # [0]
            acq_configs.append((acq_val, config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
Beispiel #14
0
class StagedBatchScipyOptimizer(AcquisitionFunctionMaximizer):
    """ todo constraints
    Use batch scipy.optimize with start points chosen by specific method. Only on continuous dims.

    Parameters
    ----------
    acquisition_function : ~litebo.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~litebo.config_space.ConfigurationSpace

    num_random : Number of random chosen points

    num_restarts : The number of starting points for multistart acquisition
            function optimization

    raw_samples : The number of samples for initialization

    batch_limit : Number of points in a batch optimized jointly by scipy minimizer

    scipy_maxiter : Maximum number of scipy minimizer iterations to perform

    rand_prob : Probability of choosing random config

    rng : np.random.RandomState or int, optional
    """
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        num_random: int = 1000,
        num_restarts: int = 20,
        raw_samples: int = 1024,
        batch_limit: int = 5,
        scipy_maxiter: int = 200,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.num_random = num_random
        self.num_restarts = num_restarts
        self.raw_samples = raw_samples
        self.batch_limit = batch_limit
        self.scipy_max_iter = scipy_maxiter
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
        self.minimizer = scipy.optimize.minimize
        self.method = "L-BFGS-B"
        self.dim = len(self.config_space.get_hyperparameters())
        self.bound = (0.0, 1.0
                      )  # todo only on continuous dims (int, float) now

    def gen_initial_points(self, num_restarts, raw_samples):
        # todo other strategy
        random_points = self.rng.uniform(self.bound[0],
                                         self.bound[1],
                                         size=(raw_samples, self.dim))
        acq_random = self.acquisition_function(random_points,
                                               convert=False).reshape(-1)
        idx = np.argsort(acq_random)[::-1][:num_restarts]
        return random_points[idx]

    def gen_batch_scipy_points(self, initial_points: np.ndarray):
        #count = 0  # todo remove
        def f(X_flattened):
            # nonlocal count
            # count += 1
            X = X_flattened.reshape(shapeX)
            joint_acq = -self.acquisition_function(X,
                                                   convert=False).sum().item()
            return joint_acq

        shapeX = initial_points.shape
        x0 = initial_points.reshape(-1)
        bounds = [self.bound] * x0.shape[0]

        result = self.minimizer(
            f,
            x0=x0,
            method=self.method,
            bounds=bounds,
            options=dict(maxiter=self.scipy_max_iter),
        )
        #print('count=', count)  # todo remove

        # return result.x even failed. may because 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
        # if not result.success:
        #     self.logger.warning('Scipy minimizer %s failed in this round: %s.' % (self.method, result))
        #     return None

        #print(result.x.reshape(shapeX))    # todo remove
        return result.x.reshape(shapeX)

    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # todo useless
            **kwargs) -> List[Tuple[float, Configuration]]:

        # print('start optimize')   # todo remove
        # import time
        # t0 = time.time()
        acq_configs = []

        # random points
        random_points = self.rng.uniform(self.bound[0],
                                         self.bound[1],
                                         size=(self.num_random, self.dim))
        acq_random = self.acquisition_function(random_points, convert=False)
        for i in range(random_points.shape[0]):
            # convert array to Configuration
            config = Configuration(self.config_space, vector=random_points[i])
            config.origin = 'Random Search'
            acq_configs.append((acq_random[i], config))

        # scipy points
        initial_points = self.gen_initial_points(
            num_restarts=self.num_restarts, raw_samples=self.raw_samples)

        for start_idx in range(0, self.num_restarts, self.batch_limit):
            end_idx = min(start_idx + self.batch_limit, self.num_restarts)
            # optimize using random restart optimization
            scipy_points = self.gen_batch_scipy_points(
                initial_points[start_idx:end_idx])
            if scipy_points is None:
                continue
            acq_scipy = self.acquisition_function(scipy_points, convert=False)
            for i in range(scipy_points.shape[0]):
                # convert array to Configuration
                config = Configuration(self.config_space,
                                       vector=scipy_points[i])
                config.origin = 'Batch Scipy'
                acq_configs.append((acq_scipy[i], config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()

        # t1 = time.time()  # todo remove
        # print('==time total=%.2f' % (t1-t0,))
        # for x1 in np.linspace(0, 1, 20):
        #     optimal_point = np.array([x1.item()] + [0.5] * (self.dim-1))
        #     print('optimal_point acq=', self.acquisition_function(optimal_point, convert=False))
        # print('best point acq=', acq_configs[0])
        # time.sleep(2)
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
Beispiel #15
0
class CMAESOptimizer(AcquisitionFunctionMaximizer):
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rng: Union[bool, np.random.RandomState] = None,
        rand_prob=0.25,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()

    def maximize(self, runhistory: HistoryContainer, num_points: int,
                 **kwargs) -> Iterable[Tuple[float, Configuration]]:
        try:
            from cma import CMAEvolutionStrategy
        except ImportError:
            raise ImportError("Package cma is not installed!")

        types, bounds = get_types(self.config_space)
        assert all(types == 0)

        # Check Constant Hyperparameter
        const_idx = list()
        for i, bound in enumerate(bounds):
            if np.isnan(bound[1]):
                const_idx.append(i)

        hp_num = len(bounds) - len(const_idx)
        es = CMAEvolutionStrategy(hp_num * [0],
                                  0.99,
                                  inopts={'bounds': [0, 1]})

        eval_num = 0
        next_configs_by_acq_value = list()
        while eval_num < num_points:
            X = es.ask(number=es.popsize)
            _X = X.copy()
            for i in range(len(_X)):
                for index in const_idx:
                    _X[i] = np.insert(_X[i], index, 0)
            _X = np.asarray(_X)
            values = self.acquisition_function._compute(_X)
            values = np.reshape(values, (-1, ))
            es.tell(X, values)
            next_configs_by_acq_value.extend([(values[i], _X[i])
                                              for i in range(es.popsize)])
            eval_num += es.popsize

        next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0])
        next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value]
        next_configs_by_acq_value = [
            Configuration(self.config_space, vector=array)
            for array in next_configs_by_acq_value
        ]

        challengers = ChallengerList(next_configs_by_acq_value,
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
Beispiel #16
0
class ScipyGlobalOptimizer(AcquisitionFunctionMaximizer):
    """
    Wraps scipy global optimizer. Only on continuous dims.

    Parameters
    ----------
    acquisition_function : ~litebo.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~litebo.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional
    """
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        types, bounds = get_types(self.config_space)
        assert all(types == 0)
        self.bounds = bounds

    def maximize(self,
                 runhistory: HistoryContainer,
                 initial_config=None,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        def negative_acquisition(x):
            # shape of x = (d,)
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        acq_configs = []
        result = scipy.optimize.differential_evolution(
            func=negative_acquisition, bounds=self.bounds)
        if not result.success:
            self.logger.debug(
                'Scipy differential evolution optimizer failed. Info:\n%s' %
                (result, ))
        try:
            config = Configuration(self.config_space, vector=result.x)
            acq = self.acquisition_function(result.x, convert=False)
            acq_configs.append((acq, config))
        except Exception:
            pass

        if not acq_configs:  # empty
            self.logger.warning(
                'Scipy differential evolution optimizer failed. Return empty config list. Info:\n%s'
                % (result, ))

        challengers = ChallengerList([config for _, config in acq_configs],
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
Beispiel #17
0
class InterleavedLocalAndRandomSearch(AcquisitionFunctionMaximizer):
    """Implements litebo's default acquisition function optimization.

    This acq_maximizer performs local search from the previous best points
    according, to the acquisition function, uses the acquisition function to
    sort randomly sampled configurations and interleaves unsorted, randomly
    sampled configurations in between.

    Parameters
    ----------
    acquisition_function : ~litebo.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~litebo.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional

    max_steps: int
        [LocalSearch] Maximum number of steps that the local search will perform

    n_steps_plateau_walk: int
        [LocalSearch] number of steps during a plateau walk before local search terminates

    n_sls_iterations: int
        [Local Search] number of local search iterations

    """
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 max_steps: Optional[int] = None,
                 n_steps_plateau_walk: int = 10,
                 n_sls_iterations: int = 10,
                 rand_prob=0.25):
        super().__init__(acquisition_function, config_space, rng)
        self.random_search = RandomSearch(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)
        self.local_search = LocalSearch(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng,
            max_steps=max_steps,
            n_steps_plateau_walk=n_steps_plateau_walk)
        self.n_sls_iterations = n_sls_iterations
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        # =======================================================================
        # self.local_search = DiffOpt(
        #     acquisition_function=acquisition_function,
        #     config_space=config_space,
        #     rng=rng
        # )
        # =======================================================================

    def maximize(self,
                 runhistory: HistoryContainer,
                 num_points: int,
                 random_configuration_chooser=None,
                 **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~litebo.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        random_configuration_chooser: ~litebo.acq_maximizer.random_configuration_chooser.RandomConfigurationChooser
            part of the returned ChallengerList such
            that we can interleave random configurations
            by a scheme defined by the random_configuration_chooser;
            random_configuration_chooser.next_smbo_iteration()
            is called at the end of this function
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~litebo.ei_optimization.ChallengerList
        """

        next_configs_by_local_search = self.local_search._maximize(
            runhistory, self.n_sls_iterations, **kwargs)

        # Get configurations sorted by EI
        next_configs_by_random_search_sorted = self.random_search._maximize(
            runhistory,
            num_points - len(next_configs_by_local_search),
            _sorted=True,
        )

        # Having the configurations from random search, sorted by their
        # acquisition function value is important for the first few iterations
        # of litebo. As long as the random forest predicts constant value, we
        # want to use only random configurations. Having them at the begging of
        # the list ensures this (even after adding the configurations by local
        # search, and then sorting them)
        next_configs_by_acq_value = (next_configs_by_random_search_sorted +
                                     next_configs_by_local_search)
        next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0])
        self.logger.debug(
            "First 10 acq func (origin) values of selected configurations: %s",
            str([[_[0], _[1].origin] for _ in next_configs_by_acq_value[:10]]))
        next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value]

        challengers = ChallengerList(next_configs_by_acq_value,
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
Beispiel #18
0
class RandomScipyOptimizer(AcquisitionFunctionMaximizer):
    """
    Use scipy.optimize with start points chosen by random search. Only on continuous dims.

    Parameters
    ----------
    acquisition_function : ~litebo.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~litebo.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional
    """
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)

        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        self.random_search = InterleavedLocalAndRandomSearch(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)
        self.scipy_optimizer = ScipyOptimizer(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)

    def maximize(self,
                 runhistory: HistoryContainer,
                 num_points: int,
                 num_trials=10,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        acq_configs = []

        initial_configs = self.random_search.maximize(runhistory, num_points,
                                                      **kwargs).challengers
        initial_acqs = self.acquisition_function(initial_configs)
        acq_configs.extend(zip(initial_acqs, initial_configs))

        success_count = 0
        for config in initial_configs[:num_trials]:
            scipy_configs = self.scipy_optimizer.maximize(
                runhistory, initial_config=config).challengers
            if not scipy_configs:  # empty
                continue
            scipy_acqs = self.acquisition_function(scipy_configs)
            acq_configs.extend(zip(scipy_acqs, scipy_configs))
            success_count += 1
        if success_count == 0:
            self.logger.warning(
                'None of Scipy optimizations are successful in RandomScipyOptimizer.'
            )

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()