コード例 #1
0
 def __init__(self,
              acquisition_function: AbstractAcquisitionFunction,
              config_space: ConfigurationSpace,
              rng: Union[bool, np.random.RandomState] = None,
              rand_prob=0.0):
     super().__init__(acquisition_function, config_space, rng)
     self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
コード例 #2
0
 def __init__(self,
              acquisition_function: AbstractAcquisitionFunction,
              config_space: ConfigurationSpace,
              rng: Union[bool, np.random.RandomState] = None,
              num_mc=1000,
              num_opt=1000,
              rand_prob=0.0):
     super().__init__(acquisition_function, config_space, rng)
     self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
     self.num_mc = num_mc
     self.num_opt = num_opt
     self.minimizer = scipy.optimize.minimize
コード例 #3
0
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        types, bounds = get_types(self.config_space)
        assert all(types == 0)
        self.bounds = bounds
コード例 #4
0
ファイル: bo_facade.py プロジェクト: zhengjian2322/lite-bo
    def __init__(self,
                 objective_function,
                 config_space,
                 sample_strategy='bo',
                 time_limit_per_trial=180,
                 max_runs=200,
                 logging_dir='logs',
                 initial_configurations=None,
                 initial_runs=3,
                 task_id=None,
                 rng=None):
        super().__init__(config_space, task_id, output_dir=logging_dir)
        self.logger = super()._get_logger(self.__class__.__name__)
        if rng is None:
            run_id, rng = get_rng()

        self.init_num = initial_runs
        self.max_iterations = max_runs
        self.iteration_id = 0
        self.sls_max_steps = None
        self.n_sls_iterations = 5
        self.sls_n_steps_plateau_walk = 10
        self.time_limit_per_trial = time_limit_per_trial
        self.default_obj_value = MAXINT
        self.sample_strategy = sample_strategy

        self.configurations = list()
        self.failed_configurations = list()
        self.perfs = list()

        # Initialize the basic component in BO.
        self.config_space.seed(rng.randint(MAXINT))
        self.objective_function = objective_function
        types, bounds = get_types(config_space)
        # TODO: what is the feature array.
        self.model = RandomForestWithInstances(types=types,
                                               bounds=bounds,
                                               seed=rng.randint(MAXINT))
        self.acquisition_function = EI(self.model)
        self.optimizer = InterleavedLocalAndRandomSearch(
            acquisition_function=self.acquisition_function,
            config_space=self.config_space,
            rng=np.random.RandomState(seed=rng.randint(MAXINT)),
            max_steps=self.sls_max_steps,
            n_steps_plateau_walk=self.sls_n_steps_plateau_walk,
            n_sls_iterations=self.n_sls_iterations)
        self._random_search = RandomSearch(self.acquisition_function,
                                           self.config_space, rng)
        self.random_configuration_chooser = ChooserProb(prob=0.25, rng=rng)
コード例 #5
0
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 batch_size=None,
                 rand_prob=0.0):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        if batch_size is None:
            types, bounds = get_types(self.config_space)
            dim = np.sum(types == 0)
            self.batch_size = min(5000, max(2000, 200 * dim))
        else:
            self.batch_size = batch_size
コード例 #6
0
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        types, bounds = get_types(self.config_space)
        assert all(types == 0)
        self.bounds = bounds

        options = dict(disp=False, maxiter=1000)
        self.scipy_config = dict(tol=None, method='L-BFGS-B', options=options)
コード例 #7
0
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)

        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        self.random_search = InterleavedLocalAndRandomSearch(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)
        self.scipy_optimizer = ScipyOptimizer(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)
コード例 #8
0
 def __init__(self,
              acquisition_function: AbstractAcquisitionFunction,
              config_space: ConfigurationSpace,
              rng: Union[bool, np.random.RandomState] = None,
              max_steps: Optional[int] = None,
              n_steps_plateau_walk: int = 10,
              n_sls_iterations: int = 10,
              rand_prob=0.25):
     super().__init__(acquisition_function, config_space, rng)
     self.random_search = RandomSearch(
         acquisition_function=acquisition_function,
         config_space=config_space,
         rng=rng)
     self.local_search = LocalSearch(
         acquisition_function=acquisition_function,
         config_space=config_space,
         rng=rng,
         max_steps=max_steps,
         n_steps_plateau_walk=n_steps_plateau_walk)
     self.n_sls_iterations = n_sls_iterations
     self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
コード例 #9
0
 def __init__(
     self,
     acquisition_function: AbstractAcquisitionFunction,
     config_space: ConfigurationSpace,
     num_random: int = 1000,
     num_restarts: int = 20,
     raw_samples: int = 1024,
     batch_limit: int = 5,
     scipy_maxiter: int = 200,
     rand_prob: float = 0.0,
     rng: Union[bool, np.random.RandomState] = None,
 ):
     super().__init__(acquisition_function, config_space, rng)
     self.num_random = num_random
     self.num_restarts = num_restarts
     self.raw_samples = raw_samples
     self.batch_limit = batch_limit
     self.scipy_max_iter = scipy_maxiter
     self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
     self.minimizer = scipy.optimize.minimize
     self.method = "L-BFGS-B"
     self.dim = len(self.config_space.get_hyperparameters())
     self.bound = (0.0, 1.0
                   )  # todo only on continuous dims (int, float) now
コード例 #10
0
    def __init__(self, objective_func,
                 config_space: ConfigurationSpace,
                 R,
                 eta=3,
                 num_iter=10000,
                 rand_prob=0.3,
                 init_weight=None, update_enable=True,
                 weight_method='rank_loss_p_norm', fusion_method='idp',
                 power_num=3,
                 random_state=1,
                 method_id='mqMFES',
                 restart_needed=True,
                 time_limit_per_trial=600,
                 runtime_limit=None,
                 ip='',
                 port=13579,
                 authkey=b'abc',):
        max_queue_len = 3 * R  # conservative design
        super().__init__(objective_func, method_name=method_id,
                         restart_needed=restart_needed, time_limit_per_trial=time_limit_per_trial,
                         runtime_limit=runtime_limit,
                         max_queue_len=max_queue_len, ip=ip, port=port, authkey=authkey)
        self.seed = random_state
        self.config_space = config_space
        self.config_space.seed(self.seed)

        self.R = R
        self.eta = eta
        self.logeta = lambda x: log(x) / log(self.eta)
        self.s_max = int(self.logeta(self.R))
        self.B = (self.s_max + 1) * self.R
        self.num_iter = num_iter

        self.update_enable = update_enable
        self.fusion_method = fusion_method
        # Parameter for weight method `rank_loss_p_norm`.
        self.power_num = power_num
        # Specify the weight learning method.
        self.weight_method = weight_method
        self.weight_update_id = 0
        self.weight_changed_cnt = 0

        if init_weight is None:
            init_weight = [0.]
            init_weight.extend([1. / self.s_max] * self.s_max)
        assert len(init_weight) == (self.s_max + 1)
        self.logger.info('Weight method & flag: %s-%s' % (self.weight_method, str(self.update_enable)))
        self.logger.info("Initial weight is: %s" % init_weight[:self.s_max + 1])
        types, bounds = get_types(config_space)

        self.weighted_surrogate = WeightedRandomForestCluster(
            types, bounds, self.s_max, self.eta, init_weight, self.fusion_method
        )
        self.acquisition_function = EI(model=self.weighted_surrogate)

        self.incumbent_configs = []
        self.incumbent_perfs = []

        self.iterate_id = 0
        self.iterate_r = []
        self.hist_weights = list()

        # Saving evaluation statistics in Hyperband.
        self.target_x = dict()
        self.target_y = dict()
        for index, item in enumerate(np.logspace(0, self.s_max, self.s_max + 1, base=self.eta)):
            r = int(item)
            self.iterate_r.append(r)
            self.target_x[r] = []
            self.target_y[r] = []

        # BO optimizer settings.
        self.configs = list()
        self.history_container = HistoryContainer(task_id=self.method_name)
        self.sls_max_steps = None
        self.n_sls_iterations = 5
        self.sls_n_steps_plateau_walk = 10
        self.rng = np.random.RandomState(seed=self.seed)
        self.acq_optimizer = InterleavedLocalAndRandomSearch(
            acquisition_function=self.acquisition_function,
            config_space=self.config_space,
            rng=self.rng,
            max_steps=self.sls_max_steps,
            n_steps_plateau_walk=self.sls_n_steps_plateau_walk,
            n_sls_iterations=self.n_sls_iterations,
            rand_prob=0.0,
        )
        self.random_configuration_chooser = ChooserProb(prob=rand_prob, rng=self.rng)
コード例 #11
0
class MESMO_Optimizer2(AcquisitionFunctionMaximizer):
    """Implements Scipy optimizer for MESMO. Only on continuous dims

    Parameters
    ----------
    acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~openbox.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional

    """
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 num_mc=1000,
                 num_opt=1000,
                 rand_prob=0.0):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
        self.num_mc = num_mc
        self.num_opt = num_opt
        self.minimizer = scipy.optimize.minimize

    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # todo useless
            **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~openbox.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~openbox.ei_optimization.ChallengerList
        """
        def inverse_acquisition(x):
            # shape of x = (d,)
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        d = len(self.config_space.get_hyperparameters())
        bound = (0.0, 1.0)  # todo only on continuous dims (int, float) now
        bounds = [bound] * d
        acq_configs = []

        # MC
        x_tries = self.rng.uniform(bound[0], bound[1], size=(self.num_mc, d))
        acq_tries = self.acquisition_function(x_tries, convert=False)
        for i in range(x_tries.shape[0]):
            # convert array to Configuration
            config = Configuration(self.config_space, vector=x_tries[i])
            config.origin = 'Random Search'
            acq_configs.append((acq_tries[i], config))

        # L-BFGS-B
        x_seed = self.rng.uniform(low=bound[0],
                                  high=bound[1],
                                  size=(self.num_opt, d))
        for i in range(x_seed.shape[0]):
            x0 = x_seed[i].reshape(1, -1)
            result = self.minimizer(inverse_acquisition,
                                    x0=x0,
                                    method='L-BFGS-B',
                                    bounds=bounds)
            if not result.success:
                continue
            # convert array to Configuration
            config = Configuration(self.config_space, vector=result.x)
            config.origin = 'Scipy'
            acq_val = self.acquisition_function(result.x, convert=False)  # [0]
            acq_configs.append((acq_val, config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
コード例 #12
0
class StagedBatchScipyOptimizer(AcquisitionFunctionMaximizer):
    """ todo constraints
    Use batch scipy.optimize with start points chosen by specific method. Only on continuous dims.

    Parameters
    ----------
    acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~openbox.config_space.ConfigurationSpace

    num_random : Number of random chosen points

    num_restarts : The number of starting points for multistart acquisition
            function optimization

    raw_samples : The number of samples for initialization

    batch_limit : Number of points in a batch optimized jointly by scipy minimizer

    scipy_maxiter : Maximum number of scipy minimizer iterations to perform

    rand_prob : Probability of choosing random config

    rng : np.random.RandomState or int, optional
    """
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        num_random: int = 1000,
        num_restarts: int = 20,
        raw_samples: int = 1024,
        batch_limit: int = 5,
        scipy_maxiter: int = 200,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.num_random = num_random
        self.num_restarts = num_restarts
        self.raw_samples = raw_samples
        self.batch_limit = batch_limit
        self.scipy_max_iter = scipy_maxiter
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)
        self.minimizer = scipy.optimize.minimize
        self.method = "L-BFGS-B"
        self.dim = len(self.config_space.get_hyperparameters())
        self.bound = (0.0, 1.0
                      )  # todo only on continuous dims (int, float) now

    def gen_initial_points(self, num_restarts, raw_samples):
        # todo other strategy
        random_points = self.rng.uniform(self.bound[0],
                                         self.bound[1],
                                         size=(raw_samples, self.dim))
        acq_random = self.acquisition_function(random_points,
                                               convert=False).reshape(-1)
        idx = np.argsort(acq_random)[::-1][:num_restarts]
        return random_points[idx]

    def gen_batch_scipy_points(self, initial_points: np.ndarray):
        #count = 0  # todo remove
        def f(X_flattened):
            # nonlocal count
            # count += 1
            X = X_flattened.reshape(shapeX)
            joint_acq = -self.acquisition_function(X,
                                                   convert=False).sum().item()
            return joint_acq

        shapeX = initial_points.shape
        x0 = initial_points.reshape(-1)
        bounds = [self.bound] * x0.shape[0]

        result = self.minimizer(
            f,
            x0=x0,
            method=self.method,
            bounds=bounds,
            options=dict(maxiter=self.scipy_max_iter),
        )
        #print('count=', count)  # todo remove

        # return result.x even failed. may because 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
        # if not result.success:
        #     self.logger.warning('Scipy minimizer %s failed in this round: %s.' % (self.method, result))
        #     return None

        #print(result.x.reshape(shapeX))    # todo remove
        return result.x.reshape(shapeX)

    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # todo useless
            **kwargs) -> List[Tuple[float, Configuration]]:

        # print('start optimize')   # todo remove
        # import time
        # t0 = time.time()
        acq_configs = []

        # random points
        random_points = self.rng.uniform(self.bound[0],
                                         self.bound[1],
                                         size=(self.num_random, self.dim))
        acq_random = self.acquisition_function(random_points, convert=False)
        for i in range(random_points.shape[0]):
            # convert array to Configuration
            config = Configuration(self.config_space, vector=random_points[i])
            config.origin = 'Random Search'
            acq_configs.append((acq_random[i], config))

        # scipy points
        initial_points = self.gen_initial_points(
            num_restarts=self.num_restarts, raw_samples=self.raw_samples)

        for start_idx in range(0, self.num_restarts, self.batch_limit):
            end_idx = min(start_idx + self.batch_limit, self.num_restarts)
            # optimize using random restart optimization
            scipy_points = self.gen_batch_scipy_points(
                initial_points[start_idx:end_idx])
            if scipy_points is None:
                continue
            acq_scipy = self.acquisition_function(scipy_points, convert=False)
            for i in range(scipy_points.shape[0]):
                # convert array to Configuration
                config = Configuration(self.config_space,
                                       vector=scipy_points[i])
                config.origin = 'Batch Scipy'
                acq_configs.append((acq_scipy[i], config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()

        # t1 = time.time()  # todo remove
        # print('==time total=%.2f' % (t1-t0,))
        # for x1 in np.linspace(0, 1, 20):
        #     optimal_point = np.array([x1.item()] + [0.5] * (self.dim-1))
        #     print('optimal_point acq=', self.acquisition_function(optimal_point, convert=False))
        # print('best point acq=', acq_configs[0])
        # time.sleep(2)
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
コード例 #13
0
class ScipyGlobalOptimizer(AcquisitionFunctionMaximizer):
    """
    Wraps scipy global optimizer. Only on continuous dims.

    Parameters
    ----------
    acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~openbox.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional
    """
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        types, bounds = get_types(self.config_space)
        assert all(types == 0)
        self.bounds = bounds

    def maximize(self,
                 runhistory: HistoryContainer,
                 initial_config=None,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        def negative_acquisition(x):
            # shape of x = (d,)
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        acq_configs = []
        result = scipy.optimize.differential_evolution(
            func=negative_acquisition, bounds=self.bounds)
        if not result.success:
            self.logger.debug(
                'Scipy differential evolution optimizer failed. Info:\n%s' %
                (result, ))
        try:
            config = Configuration(self.config_space, vector=result.x)
            acq = self.acquisition_function(result.x, convert=False)
            acq_configs.append((acq, config))
        except Exception:
            pass

        if not acq_configs:  # empty
            self.logger.warning(
                'Scipy differential evolution optimizer failed. Return empty config list. Info:\n%s'
                % (result, ))

        challengers = ChallengerList([config for _, config in acq_configs],
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
コード例 #14
0
ファイル: async_mq_mfes.py プロジェクト: PKU-DAIR/open-box
class async_mqMFES(async_mqHyperband):
    """
    The implementation of Asynchronous MFES (combine ASHA and MFES)
    """
    def __init__(self,
                 objective_func,
                 config_space: ConfigurationSpace,
                 R,
                 eta=3,
                 skip_outer_loop=0,
                 rand_prob=0.3,
                 use_bohb=False,
                 init_weight=None,
                 update_enable=True,
                 weight_method='rank_loss_p_norm',
                 fusion_method='idp',
                 power_num=3,
                 random_state=1,
                 method_id='mqAsyncMFES',
                 restart_needed=True,
                 time_limit_per_trial=600,
                 runtime_limit=None,
                 seed=1,
                 ip='',
                 port=13579,
                 authkey=b'abc'):
        super().__init__(objective_func,
                         config_space,
                         R,
                         eta=eta,
                         skip_outer_loop=skip_outer_loop,
                         random_state=random_state,
                         method_id=method_id,
                         restart_needed=restart_needed,
                         time_limit_per_trial=time_limit_per_trial,
                         runtime_limit=runtime_limit,
                         ip=ip,
                         port=port,
                         authkey=authkey)
        self.seed = seed
        self.last_n_iteration = None
        self.use_bohb_strategy = use_bohb
        self.update_enable = update_enable
        self.fusion_method = fusion_method
        # Parameter for weight method `rank_loss_p_norm`.
        self.power_num = power_num
        # Specify the weight learning method.
        self.weight_method = weight_method
        self.weight_update_id = 0
        self.weight_changed_cnt = 0

        if init_weight is None:
            init_weight = [1. / self.s_max] * self.s_max + [0.]
            assert len(init_weight) == (self.s_max + 1)
        self.logger.info("Initialize weight to %s" %
                         init_weight[:self.s_max + 1])
        types, bounds = get_types(config_space)

        if not self.use_bohb_strategy:
            self.surrogate = RandomForestEnsemble(types, bounds, self.s_max,
                                                  self.eta, init_weight,
                                                  self.fusion_method)
        else:
            self.surrogate = RandomForestWithInstances(types,
                                                       bounds,
                                                       seed=self.seed)
        self.acquisition_function = EI(model=self.surrogate)

        self.iterate_id = 0
        self.iterate_r = list()
        self.hist_weights = list()

        # Saving evaluation statistics in Hyperband.
        self.target_x = dict()
        self.target_y = dict()
        for index, item in enumerate(
                np.logspace(0, self.s_max, self.s_max + 1, base=self.eta)):
            r = int(item)
            self.iterate_r.append(r)
            self.target_x[r] = list()
            self.target_y[r] = list()

        # BO optimizer settings.
        self.history_container = HistoryContainer(task_id=self.method_name)
        self.sls_max_steps = None
        self.n_sls_iterations = 5
        self.sls_n_steps_plateau_walk = 10
        self.rng = np.random.RandomState(seed=self.seed)
        self.acq_optimizer = InterleavedLocalAndRandomSearch(
            acquisition_function=self.acquisition_function,
            config_space=self.config_space,
            rng=self.rng,
            max_steps=self.sls_max_steps,
            n_steps_plateau_walk=self.sls_n_steps_plateau_walk,
            n_sls_iterations=self.n_sls_iterations,
            rand_prob=0.0,
        )
        self.random_configuration_chooser = ChooserProb(prob=rand_prob,
                                                        rng=self.rng)
        self.random_check_idx = 0

    def update_observation(self, config, perf, n_iteration):
        rung_id = self.get_rung_id(self.bracket, n_iteration)

        updated = False
        for job in self.bracket[rung_id]['jobs']:
            _job_status, _config, _perf, _extra_conf = job
            if _config == config:
                assert _job_status == RUNNING
                job[0] = COMPLETED
                job[2] = perf
                updated = True
                break
        assert updated
        # print('=== bracket after update_observation:', self.get_bracket_status(self.bracket))

        configs_running = list()
        for _config in self.bracket[rung_id]['configs']:
            if _config not in self.target_x[n_iteration]:
                configs_running.append(_config)
        value_imputed = np.median(self.target_y[n_iteration])

        n_iteration = int(n_iteration)
        self.target_x[n_iteration].append(config)
        self.target_y[n_iteration].append(perf)

        if n_iteration == self.R:
            self.incumbent_configs.append(config)
            self.incumbent_perfs.append(perf)
            # Update history container.
            self.history_container.add(config, perf)

        # Refit the ensemble surrogate model.
        configs_train = self.target_x[n_iteration] + configs_running
        results_train = self.target_y[n_iteration] + [value_imputed
                                                      ] * len(configs_running)
        results_train = np.array(std_normalization(results_train),
                                 dtype=np.float64)
        if not self.use_bohb_strategy:
            self.surrogate.train(
                convert_configurations_to_array(configs_train),
                results_train,
                r=n_iteration)
        else:
            if n_iteration == self.R:
                self.surrogate.train(
                    convert_configurations_to_array(configs_train),
                    results_train)

    def choose_next(self):
        """
        sample a config according to MFES. give iterations according to Hyperband strategy.
        """
        next_config = None
        next_n_iteration = self.get_next_n_iteration()
        next_rung_id = self.get_rung_id(self.bracket, next_n_iteration)

        # Update weight when the inner loop of hyperband is finished
        if self.last_n_iteration != next_n_iteration and not self.use_bohb_strategy:
            if self.update_enable and self.weight_update_id > self.s_max:
                self.update_weight()
            self.weight_update_id += 1
        self.last_n_iteration = next_n_iteration

        # sample config
        excluded_configs = self.bracket[next_rung_id]['configs']
        if len(self.target_y[self.iterate_r[-1]]) == 0:
            next_config = sample_configuration(
                self.config_space, excluded_configs=excluded_configs)
        else:
            # Like BOHB, sample a fixed percentage of random configurations.
            self.random_check_idx += 1
            if self.random_configuration_chooser.check(self.random_check_idx):
                next_config = sample_configuration(
                    self.config_space, excluded_configs=excluded_configs)
            else:
                acq_configs = self.get_bo_candidates()
                for config in acq_configs:
                    if config not in self.bracket[next_rung_id]['configs']:
                        next_config = config
                        break
                if next_config is None:
                    self.logger.warning(
                        'Cannot get a non duplicate configuration from bo candidates. '
                        'Sample a random one.')
                    next_config = sample_configuration(
                        self.config_space, excluded_configs=excluded_configs)

        next_extra_conf = {}
        return next_config, next_n_iteration, next_extra_conf

    def get_bo_candidates(self):
        std_incumbent_value = np.min(
            std_normalization(self.target_y[self.iterate_r[-1]]))
        # Update surrogate model in acquisition function.
        self.acquisition_function.update(model=self.surrogate,
                                         eta=std_incumbent_value,
                                         num_data=len(self.incumbent_configs))

        challengers = self.acq_optimizer.maximize(
            runhistory=self.history_container,
            num_points=5000,
        )
        return challengers.challengers

    @staticmethod
    def calculate_preserving_order_num(y_pred, y_true):
        array_size = len(y_pred)
        assert len(y_true) == array_size

        total_pair_num, order_preserving_num = 0, 0
        for idx in range(array_size):
            for inner_idx in range(idx + 1, array_size):
                if bool(y_true[idx] > y_true[inner_idx]) == bool(
                        y_pred[idx] > y_pred[inner_idx]):
                    order_preserving_num += 1
                total_pair_num += 1
        return order_preserving_num, total_pair_num

    def update_weight(self):
        start_time = time.time()

        max_r = self.iterate_r[-1]
        incumbent_configs = self.target_x[max_r]
        test_x = convert_configurations_to_array(incumbent_configs)
        test_y = np.array(self.target_y[max_r], dtype=np.float64)

        r_list = self.surrogate.surrogate_r
        K = len(r_list)

        if len(test_y) >= 3:
            # Get previous weights
            if self.weight_method == 'rank_loss_p_norm':
                preserving_order_p = list()
                preserving_order_nums = list()
                for i, r in enumerate(r_list):
                    fold_num = 5
                    if i != K - 1:
                        mean, var = self.surrogate.surrogate_container[
                            r].predict(test_x)
                        tmp_y = np.reshape(mean, -1)
                        preorder_num, pair_num = self.calculate_preserving_order_num(
                            tmp_y, test_y)
                        preserving_order_p.append(preorder_num / pair_num)
                        preserving_order_nums.append(preorder_num)
                    else:
                        if len(test_y) < 2 * fold_num:
                            preserving_order_p.append(0)
                        else:
                            # 5-fold cross validation.
                            kfold = KFold(n_splits=fold_num)
                            cv_pred = np.array([0] * len(test_y))
                            for train_idx, valid_idx in kfold.split(test_x):
                                train_configs, train_y = test_x[
                                    train_idx], test_y[train_idx]
                                valid_configs, valid_y = test_x[
                                    valid_idx], test_y[valid_idx]
                                types, bounds = get_types(self.config_space)
                                _surrogate = RandomForestWithInstances(
                                    types=types, bounds=bounds)
                                _surrogate.train(train_configs, train_y)
                                pred, _ = _surrogate.predict(valid_configs)
                                cv_pred[valid_idx] = pred.reshape(-1)
                            preorder_num, pair_num = self.calculate_preserving_order_num(
                                cv_pred, test_y)
                            preserving_order_p.append(preorder_num / pair_num)
                            preserving_order_nums.append(preorder_num)

                trans_order_weight = np.array(preserving_order_p)
                power_sum = np.sum(np.power(trans_order_weight,
                                            self.power_num))
                new_weights = np.power(trans_order_weight,
                                       self.power_num) / power_sum

            elif self.weight_method == 'rank_loss_prob':
                # For basic surrogate i=1:K-1.
                mean_list, var_list = list(), list()
                for i, r in enumerate(r_list[:-1]):
                    mean, var = self.surrogate.surrogate_container[r].predict(
                        test_x)
                    mean_list.append(np.reshape(mean, -1))
                    var_list.append(np.reshape(var, -1))
                sample_num = 100
                min_probability_array = [0] * K
                for _ in range(sample_num):
                    order_preseving_nums = list()

                    # For basic surrogate i=1:K-1.
                    for idx in range(K - 1):
                        sampled_y = self.rng.normal(mean_list[idx],
                                                    var_list[idx])
                        _num, _ = self.calculate_preserving_order_num(
                            sampled_y, test_y)
                        order_preseving_nums.append(_num)

                    fold_num = 5
                    # For basic surrogate i=K. cv
                    if len(test_y) < 2 * fold_num:
                        order_preseving_nums.append(0)
                    else:
                        # 5-fold cross validation.
                        kfold = KFold(n_splits=fold_num)
                        cv_pred = np.array([0] * len(test_y))
                        for train_idx, valid_idx in kfold.split(test_x):
                            train_configs, train_y = test_x[train_idx], test_y[
                                train_idx]
                            valid_configs, valid_y = test_x[valid_idx], test_y[
                                valid_idx]
                            types, bounds = get_types(self.config_space)
                            _surrogate = RandomForestWithInstances(
                                types=types, bounds=bounds)
                            _surrogate.train(train_configs, train_y)
                            _pred, _var = _surrogate.predict(valid_configs)
                            sampled_pred = self.rng.normal(
                                _pred.reshape(-1), _var.reshape(-1))
                            cv_pred[valid_idx] = sampled_pred
                        _num, _ = self.calculate_preserving_order_num(
                            cv_pred, test_y)
                        order_preseving_nums.append(_num)
                    max_id = np.argmax(order_preseving_nums)
                    min_probability_array[max_id] += 1
                new_weights = np.array(min_probability_array) / sample_num
            else:
                raise ValueError('Invalid weight method: %s!' %
                                 self.weight_method)
        else:
            old_weights = list()
            for i, r in enumerate(r_list):
                _weight = self.surrogate.surrogate_weight[r]
                old_weights.append(_weight)
            new_weights = old_weights.copy()

        self.logger.info(
            '[%s] %d-th Updating weights: %s' %
            (self.weight_method, self.weight_changed_cnt, str(new_weights)))

        # Assign the weight to each basic surrogate.
        for i, r in enumerate(r_list):
            self.surrogate.surrogate_weight[r] = new_weights[i]
        self.weight_changed_cnt += 1
        # Save the weight data.
        self.hist_weights.append(new_weights)
        dir_path = os.path.join(self.data_directory, 'saved_weights')
        file_name = 'mfes_weights_%s.npy' % (self.method_name, )
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        np.save(os.path.join(dir_path, file_name),
                np.asarray(self.hist_weights))
        self.logger.info(
            'update_weight() cost %.2fs. new weights are saved to %s' %
            (time.time() - start_time, os.path.join(dir_path, file_name)))

    def get_weights(self):
        return self.hist_weights
コード例 #15
0
ファイル: bo_facade.py プロジェクト: PKU-DAIR/open-box
class BayesianOptimization(BaseFacade):
    def __init__(self, objective_function, config_space,
                 sample_strategy='bo',
                 time_limit_per_trial=180,
                 max_runs=200,
                 logging_dir='logs',
                 initial_configurations=None,
                 initial_runs=3,
                 task_id='default_task_id',
                 rng=None):
        super().__init__(config_space, task_id, output_dir=logging_dir)
        self.logger = super()._get_logger(self.__class__.__name__)
        if rng is None:
            run_id, rng = get_rng()

        self.init_num = initial_runs
        self.max_iterations = max_runs
        self.iteration_id = 0
        self.sls_max_steps = None
        self.n_sls_iterations = 5
        self.sls_n_steps_plateau_walk = 10
        self.time_limit_per_trial = time_limit_per_trial
        self.default_obj_value = MAXINT
        self.sample_strategy = sample_strategy

        self.configurations = list()
        self.failed_configurations = list()
        self.perfs = list()

        # Initialize the basic component in BO.
        self.config_space.seed(rng.randint(MAXINT))
        self.objective_function = objective_function
        types, bounds = get_types(config_space)
        # TODO: what is the feature array.
        self.model = RandomForestWithInstances(types=types, bounds=bounds, seed=rng.randint(MAXINT))
        self.acquisition_function = EI(self.model)
        self.optimizer = InterleavedLocalAndRandomSearch(
            acquisition_function=self.acquisition_function,
            config_space=self.config_space,
            rng=np.random.RandomState(seed=rng.randint(MAXINT)),
            max_steps=self.sls_max_steps,
            n_steps_plateau_walk=self.sls_n_steps_plateau_walk,
            n_sls_iterations=self.n_sls_iterations
        )
        self._random_search = RandomSearch(
            self.acquisition_function, self.config_space, rng
        )
        self.random_configuration_chooser = ChooserProb(prob=0.25, rng=rng)

    def run(self):
        while self.iteration_id < self.max_iterations:
            self.iterate()

    def iterate(self):
        if len(self.configurations) == 0:
            X = np.array([])
        else:
            failed_configs = list() if self.max_y is None else self.failed_configurations.copy()
            X = convert_configurations_to_array(self.configurations + failed_configs)

        failed_perfs = list() if self.max_y is None else [self.max_y] * len(self.failed_configurations)
        Y = np.array(self.perfs + failed_perfs, dtype=np.float64)

        config = self.choose_next(X, Y)

        trial_state = SUCCESS
        trial_info = None

        if config not in (self.configurations + self.failed_configurations):
            # Evaluate this configuration.
            try:
                args, kwargs = (config,), dict()
                timeout_status, _result = time_limit(self.objective_function, self.time_limit_per_trial,
                                                     args=args, kwargs=kwargs)
                if timeout_status:
                    raise TimeoutException(
                        'Timeout: time limit for this evaluation is %.1fs' % self.time_limit_per_trial)
                else:
                    perf = MAXINT if _result is None else _result
            except Exception as e:
                if isinstance(e, TimeoutException):
                    trial_state = TIMEOUT
                else:
                    traceback.print_exc(file=sys.stdout)
                    trial_state = FAILED
                perf = MAXINT
                trial_info = str(e)
                self.logger.error(trial_info)

            if trial_state == SUCCESS and perf < MAXINT:
                if len(self.configurations) == 0:
                    self.default_obj_value = perf

                self.configurations.append(config)
                self.perfs.append(perf)
                self.history_container.add(config, perf)

                self.perc = np.percentile(self.perfs, self.scale_perc)
                self.min_y = np.min(self.perfs)
                self.max_y = np.max(self.perfs)
            else:
                self.failed_configurations.append(config)
        else:
            self.logger.debug('This configuration has been evaluated! Skip it.')
            if config in self.configurations:
                config_idx = self.configurations.index(config)
                trial_state, perf = SUCCESS, self.perfs[config_idx]
            else:
                trial_state, perf = FAILED, MAXINT

        self.iteration_id += 1
        self.logger.info(
            'Iteration-%d, objective improvement: %.4f' % (self.iteration_id, max(0, self.default_obj_value - perf)))
        return config, trial_state, perf, trial_info

    def choose_next(self, X: np.ndarray, Y: np.ndarray):
        _config_num = X.shape[0]
        if _config_num < self.init_num:
            default_config = self.config_space.get_default_configuration()
            if default_config not in (self.configurations + self.failed_configurations):
                return default_config
            else:
                return self._random_search.maximize(runhistory=self.history_container, num_points=1)[0]

        if self.sample_strategy == 'random':
            return self.sample_config()
        elif self.sample_strategy == 'bo':
            if self.random_configuration_chooser.check(self.iteration_id):
                return self.sample_config()
            else:
                self.model.train(X, Y)

                incumbent_value = self.history_container.get_incumbents()[0][1]

                self.acquisition_function.update(model=self.model, eta=incumbent_value,
                                                 num_data=len(self.history_container.data))

                challengers = self.optimizer.maximize(
                    runhistory=self.history_container,
                    num_points=5000,
                    random_configuration_chooser=self.random_configuration_chooser
                )

                return challengers.challengers[0]
        else:
            raise ValueError('Invalid sampling strategy - %s.' % self.sample_strategy)

    def sample_config(self):
        config = None
        _sample_cnt, _sample_limit = 0, 10000
        while True:
            _sample_cnt += 1
            config = self.config_space.sample_configuration()
            if config not in (self.configurations + self.failed_configurations):
                break
            if _sample_cnt >= _sample_limit:
                config = self.config_space.sample_configuration()
                break
        return config
コード例 #16
0
class ScipyOptimizer(AcquisitionFunctionMaximizer):
    """
    Wraps scipy optimizer. Only on continuous dims.

    Parameters
    ----------
    acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~openbox.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional
    """
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        types, bounds = get_types(
            self.config_space)  # todo: support constant hp in scipy optimizer
        assert all(
            types == 0
        ), 'Scipy optimizer (L-BFGS-B) only supports Integer and Float parameters.'
        self.bounds = bounds

        options = dict(disp=False, maxiter=1000)
        self.scipy_config = dict(tol=None, method='L-BFGS-B', options=options)

    def maximize(self,
                 runhistory: HistoryContainer,
                 initial_config=None,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        def negative_acquisition(x):
            # shape of x = (d,)
            x = np.clip(x, 0.0, 1.0)  # fix numerical problem in L-BFGS-B
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        if initial_config is None:
            initial_config = self.config_space.sample_configuration()
        init_point = initial_config.get_array()

        acq_configs = []
        result = scipy.optimize.minimize(fun=negative_acquisition,
                                         x0=init_point,
                                         bounds=self.bounds,
                                         **self.scipy_config)
        # if result.success:
        #     acq_configs.append((result.fun, Configuration(self.config_space, vector=result.x)))
        if not result.success:
            self.logger.debug('Scipy optimizer failed. Info:\n%s' % (result, ))
        try:
            x = np.clip(result.x, 0.0,
                        1.0)  # fix numerical problem in L-BFGS-B
            config = Configuration(self.config_space, vector=x)
            acq = self.acquisition_function(x, convert=False)
            acq_configs.append((acq, config))
        except Exception:
            pass

        if not acq_configs:  # empty
            self.logger.warning(
                'Scipy optimizer failed. Return empty config list. Info:\n%s' %
                (result, ))

        challengers = ChallengerList([config for _, config in acq_configs],
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
コード例 #17
0
class USeMO_Optimizer(AcquisitionFunctionMaximizer):
    """Implements USeMO optimizer

    Parameters
    ----------
    acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~openbox.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional

    """
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 rand_prob=0.0):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # useless in USeMO
            **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~openbox.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~openbox.ei_optimization.ChallengerList
        """

        acq_vals = np.asarray(self.acquisition_function.uncertainties)
        candidates = np.asarray(self.acquisition_function.candidates)
        assert len(acq_vals.shape) == 1 and len(candidates.shape) == 2 \
               and acq_vals.shape[0] == candidates.shape[0]

        acq_configs = []
        for i in range(acq_vals.shape[0]):
            # convert array to Configuration todo
            config = Configuration(self.config_space, vector=candidates[i])
            acq_configs.append((acq_vals[i], config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
コード例 #18
0
class mqMFES(mqBaseFacade):
    """
    MFES-HB: https://arxiv.org/abs/2012.03011
    """
    def __init__(self, objective_func,
                 config_space: ConfigurationSpace,
                 R,
                 eta=3,
                 num_iter=10000,
                 rand_prob=0.3,
                 init_weight=None, update_enable=True,
                 weight_method='rank_loss_p_norm', fusion_method='idp',
                 power_num=3,
                 random_state=1,
                 method_id='mqMFES',
                 restart_needed=True,
                 time_limit_per_trial=600,
                 runtime_limit=None,
                 ip='',
                 port=13579,
                 authkey=b'abc',):
        max_queue_len = 3 * R  # conservative design
        super().__init__(objective_func, method_name=method_id,
                         restart_needed=restart_needed, time_limit_per_trial=time_limit_per_trial,
                         runtime_limit=runtime_limit,
                         max_queue_len=max_queue_len, ip=ip, port=port, authkey=authkey)
        self.seed = random_state
        self.config_space = config_space
        self.config_space.seed(self.seed)

        self.R = R
        self.eta = eta
        self.logeta = lambda x: log(x) / log(self.eta)
        self.s_max = int(self.logeta(self.R))
        self.B = (self.s_max + 1) * self.R
        self.num_iter = num_iter

        self.update_enable = update_enable
        self.fusion_method = fusion_method
        # Parameter for weight method `rank_loss_p_norm`.
        self.power_num = power_num
        # Specify the weight learning method.
        self.weight_method = weight_method
        self.weight_update_id = 0
        self.weight_changed_cnt = 0

        if init_weight is None:
            init_weight = [0.]
            init_weight.extend([1. / self.s_max] * self.s_max)
        assert len(init_weight) == (self.s_max + 1)
        self.logger.info('Weight method & flag: %s-%s' % (self.weight_method, str(self.update_enable)))
        self.logger.info("Initial weight is: %s" % init_weight[:self.s_max + 1])
        types, bounds = get_types(config_space)

        self.weighted_surrogate = WeightedRandomForestCluster(
            types, bounds, self.s_max, self.eta, init_weight, self.fusion_method
        )
        self.acquisition_function = EI(model=self.weighted_surrogate)

        self.incumbent_configs = []
        self.incumbent_perfs = []

        self.iterate_id = 0
        self.iterate_r = []
        self.hist_weights = list()

        # Saving evaluation statistics in Hyperband.
        self.target_x = dict()
        self.target_y = dict()
        for index, item in enumerate(np.logspace(0, self.s_max, self.s_max + 1, base=self.eta)):
            r = int(item)
            self.iterate_r.append(r)
            self.target_x[r] = []
            self.target_y[r] = []

        # BO optimizer settings.
        self.configs = list()
        self.history_container = HistoryContainer(task_id=self.method_name)
        self.sls_max_steps = None
        self.n_sls_iterations = 5
        self.sls_n_steps_plateau_walk = 10
        self.rng = np.random.RandomState(seed=self.seed)
        self.acq_optimizer = InterleavedLocalAndRandomSearch(
            acquisition_function=self.acquisition_function,
            config_space=self.config_space,
            rng=self.rng,
            max_steps=self.sls_max_steps,
            n_steps_plateau_walk=self.sls_n_steps_plateau_walk,
            n_sls_iterations=self.n_sls_iterations,
            rand_prob=0.0,
        )
        self.random_configuration_chooser = ChooserProb(prob=rand_prob, rng=self.rng)

    def iterate(self, skip_last=0):

        for s in reversed(range(self.s_max + 1)):

            if self.update_enable and self.weight_update_id > self.s_max:
                self.update_weight()
            self.weight_update_id += 1

            # Set initial number of configurations
            n = int(ceil(self.B / self.R / (s + 1) * self.eta ** s))
            # initial number of iterations per config
            r = int(self.R * self.eta ** (-s))

            # Choose a batch of configurations in different mechanisms.
            start_time = time.time()
            T = self.choose_next(n)
            time_elapsed = time.time() - start_time
            self.logger.info("[%s] Choosing next configurations took %.2f sec." % (self.method_name, time_elapsed))

            extra_info = None
            last_run_num = None

            for i in range((s + 1) - int(skip_last)):  # changed from s + 1

                # Run each of the n configs for <iterations>
                # and keep best (n_configs / eta) configurations

                n_configs = n * self.eta ** (-i)
                n_iteration = r * self.eta ** (i)

                n_iter = n_iteration
                if last_run_num is not None and not self.restart_needed:
                    n_iter -= last_run_num
                last_run_num = n_iteration

                self.logger.info("%s: %d configurations x %d iterations each" %
                                 (self.method_name, int(n_configs), int(n_iteration)))

                ret_val, early_stops = self.run_in_parallel(T, n_iter, extra_info)
                val_losses = [item['loss'] for item in ret_val]
                ref_list = [item['ref_id'] for item in ret_val]

                self.target_x[int(n_iteration)].extend(T)
                self.target_y[int(n_iteration)].extend(val_losses)

                if int(n_iteration) == self.R:
                    self.incumbent_configs.extend(T)
                    self.incumbent_perfs.extend(val_losses)
                    # Update history container.
                    for _config, _perf in zip(T, val_losses):
                        self.history_container.add(_config, _perf)

                # Select a number of best configurations for the next loop.
                # Filter out early stops, if any.
                indices = np.argsort(val_losses)
                if len(T) == sum(early_stops):
                    break
                if len(T) >= self.eta:
                    indices = [i for i in indices if not early_stops[i]]
                    T = [T[i] for i in indices]
                    extra_info = [ref_list[i] for i in indices]
                    reduced_num = int(n_configs / self.eta)
                    T = T[0:reduced_num]
                    extra_info = extra_info[0:reduced_num]
                else:
                    T = [T[indices[0]]]     # todo: confirm no filter early stops?
                    extra_info = [ref_list[indices[0]]]
                val_losses = [val_losses[i] for i in indices][0:len(T)]  # update: sorted
                incumbent_loss = val_losses[0]
                self.add_stage_history(self.stage_id, min(self.global_incumbent, incumbent_loss))
                self.stage_id += 1
            # self.remove_immediate_model()

            for item in self.iterate_r[self.iterate_r.index(r):]:
                # NORMALIZE Objective value: normalization
                normalized_y = std_normalization(self.target_y[item])
                self.weighted_surrogate.train(convert_configurations_to_array(self.target_x[item]),
                                              np.array(normalized_y, dtype=np.float64), r=item)

    def run(self, skip_last=0):
        try:
            for iter in range(1, 1 + self.num_iter):
                self.logger.info('-' * 50)
                self.logger.info("%s algorithm: %d/%d iteration starts" % (self.method_name, iter, self.num_iter))
                start_time = time.time()
                self.iterate(skip_last=skip_last)
                time_elapsed = (time.time() - start_time) / 60
                self.logger.info("%d/%d-Iteration took %.2f min." % (iter, self.num_iter, time_elapsed))
                self.iterate_id += 1
                self.save_intemediate_statistics()
        except Exception as e:
            print(e)
            self.logger.error(str(e))
            # Clean the immediate results.
            # self.remove_immediate_model()

    def get_bo_candidates(self, num_configs):
        # todo: parallel methods
        std_incumbent_value = np.min(std_normalization(self.target_y[self.iterate_r[-1]]))
        # Update surrogate model in acquisition function.
        self.acquisition_function.update(model=self.weighted_surrogate, eta=std_incumbent_value,
                                         num_data=len(self.history_container.data))

        challengers = self.acq_optimizer.maximize(
            runhistory=self.history_container,
            num_points=5000,
        )
        return challengers.challengers[:num_configs]

    def choose_next(self, num_config):
        if len(self.target_y[self.iterate_r[-1]]) == 0:
            configs = sample_configurations(self.config_space, num_config)
            self.configs.extend(configs)
            return configs

        config_candidates = list()
        acq_configs = self.get_bo_candidates(num_configs=2 * num_config)
        acq_idx = 0
        for idx in range(1, 1 + 2 * num_config):
            # Like BOHB, sample a fixed percentage of random configurations.
            if self.random_configuration_chooser.check(idx):
                _config = self.config_space.sample_configuration()
            else:
                _config = acq_configs[acq_idx]
                acq_idx += 1
            if _config not in config_candidates:
                config_candidates.append(_config)
            if len(config_candidates) >= num_config:
                break

        if len(config_candidates) < num_config:
            config_candidates = expand_configurations(config_candidates, self.config_space, num_config)

        _config_candidates = []
        for config in config_candidates:
            if config not in self.configs:  # Check if evaluated
                _config_candidates.append(config)
        self.configs.extend(_config_candidates)
        return _config_candidates

    @staticmethod
    def calculate_ranking_loss(y_pred, y_true):
        length = len(y_pred)
        y_pred = np.reshape(y_pred, -1)
        y_pred1 = np.tile(y_pred, (length, 1))
        y_pred2 = np.transpose(y_pred1)
        diff = y_pred1 - y_pred2
        y_true = np.reshape(y_true, -1)
        y_true1 = np.tile(y_true, (length, 1))
        y_true2 = np.transpose(y_true1)
        y_mask = (y_true1 - y_true2 > 0) + 0
        loss = np.sum(np.log(1 + np.exp(-diff)) * y_mask) / length
        return loss

    @staticmethod
    def calculate_preserving_order_num(y_pred, y_true):
        array_size = len(y_pred)
        assert len(y_true) == array_size

        total_pair_num, order_preserving_num = 0, 0
        for idx in range(array_size):
            for inner_idx in range(idx + 1, array_size):
                if bool(y_true[idx] > y_true[inner_idx]) == bool(y_pred[idx] > y_pred[inner_idx]):
                    order_preserving_num += 1
                total_pair_num += 1
        return order_preserving_num, total_pair_num

    def update_weight(self):
        start_time = time.time()

        max_r = self.iterate_r[-1]
        incumbent_configs = self.target_x[max_r]
        test_x = convert_configurations_to_array(incumbent_configs)
        test_y = np.array(self.target_y[max_r], dtype=np.float64)

        r_list = self.weighted_surrogate.surrogate_r
        K = len(r_list)

        if len(test_y) >= 3:
            # Get previous weights
            if self.weight_method == 'rank_loss_p_norm':
                preserving_order_p = list()
                preserving_order_nums = list()
                for i, r in enumerate(r_list):
                    fold_num = 5
                    if i != K - 1:
                        mean, var = self.weighted_surrogate.surrogate_container[r].predict(test_x)
                        tmp_y = np.reshape(mean, -1)
                        preorder_num, pair_num = self.calculate_preserving_order_num(tmp_y, test_y)
                        preserving_order_p.append(preorder_num / pair_num)
                        preserving_order_nums.append(preorder_num)
                    else:
                        if len(test_y) < 2 * fold_num:
                            preserving_order_p.append(0)
                        else:
                            # 5-fold cross validation.
                            kfold = KFold(n_splits=fold_num)
                            cv_pred = np.array([0] * len(test_y))
                            for train_idx, valid_idx in kfold.split(test_x):
                                train_configs, train_y = test_x[train_idx], test_y[train_idx]
                                valid_configs, valid_y = test_x[valid_idx], test_y[valid_idx]
                                types, bounds = get_types(self.config_space)
                                _surrogate = RandomForestWithInstances(types=types, bounds=bounds)
                                _surrogate.train(train_configs, train_y)
                                pred, _ = _surrogate.predict(valid_configs)
                                cv_pred[valid_idx] = pred.reshape(-1)
                            preorder_num, pair_num = self.calculate_preserving_order_num(cv_pred, test_y)
                            preserving_order_p.append(preorder_num / pair_num)
                            preserving_order_nums.append(preorder_num)

                trans_order_weight = np.array(preserving_order_p)
                power_sum = np.sum(np.power(trans_order_weight, self.power_num))
                new_weights = np.power(trans_order_weight, self.power_num) / power_sum

            elif self.weight_method == 'rank_loss_prob':
                # For basic surrogate i=1:K-1.
                mean_list, var_list = list(), list()
                for i, r in enumerate(r_list[:-1]):
                    mean, var = self.weighted_surrogate.surrogate_container[r].predict(test_x)
                    mean_list.append(np.reshape(mean, -1))
                    var_list.append(np.reshape(var, -1))
                sample_num = 100
                min_probability_array = [0] * K
                for _ in range(sample_num):
                    order_preseving_nums = list()

                    # For basic surrogate i=1:K-1.
                    for idx in range(K - 1):
                        sampled_y = self.rng.normal(mean_list[idx], var_list[idx])
                        _num, _ = self.calculate_preserving_order_num(sampled_y, test_y)
                        order_preseving_nums.append(_num)

                    fold_num = 5
                    # For basic surrogate i=K. cv
                    if len(test_y) < 2 * fold_num:
                        order_preseving_nums.append(0)
                    else:
                        # 5-fold cross validation.
                        kfold = KFold(n_splits=fold_num)
                        cv_pred = np.array([0] * len(test_y))
                        for train_idx, valid_idx in kfold.split(test_x):
                            train_configs, train_y = test_x[train_idx], test_y[train_idx]
                            valid_configs, valid_y = test_x[valid_idx], test_y[valid_idx]
                            types, bounds = get_types(self.config_space)
                            _surrogate = RandomForestWithInstances(types=types, bounds=bounds)
                            _surrogate.train(train_configs, train_y)
                            _pred, _var = _surrogate.predict(valid_configs)
                            sampled_pred = self.rng.normal(_pred.reshape(-1), _var.reshape(-1))
                            cv_pred[valid_idx] = sampled_pred
                        _num, _ = self.calculate_preserving_order_num(cv_pred, test_y)
                        order_preseving_nums.append(_num)
                    max_id = np.argmax(order_preseving_nums)
                    min_probability_array[max_id] += 1
                new_weights = np.array(min_probability_array) / sample_num
            else:
                raise ValueError('Invalid weight method: %s!' % self.weight_method)
        else:
            old_weights = list()
            for i, r in enumerate(r_list):
                _weight = self.weighted_surrogate.surrogate_weight[r]
                old_weights.append(_weight)
            new_weights = old_weights.copy()

        self.logger.info('[%s] %d-th Updating weights: %s' % (
            self.weight_method, self.weight_changed_cnt, str(new_weights)))

        # Assign the weight to each basic surrogate.
        for i, r in enumerate(r_list):
            self.weighted_surrogate.surrogate_weight[r] = new_weights[i]
        self.weight_changed_cnt += 1
        # Save the weight data.
        self.hist_weights.append(new_weights)
        dir_path = os.path.join(self.data_directory, 'saved_weights')
        file_name = 'mfes_weights_%s.npy' % (self.method_name,)
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        np.save(os.path.join(dir_path, file_name), np.asarray(self.hist_weights))
        self.logger.info('update_weight() cost %.2fs. new weights are saved to %s'
                         % (time.time()-start_time, os.path.join(dir_path, file_name)))

    def get_incumbent(self, num_inc=1):
        assert (len(self.incumbent_perfs) == len(self.incumbent_configs))
        indices = np.argsort(self.incumbent_perfs)
        configs = [self.incumbent_configs[i] for i in indices[0:num_inc]]
        perfs = [self.incumbent_perfs[i] for i in indices[0: num_inc]]
        return configs, perfs

    def get_weights(self):
        return self.hist_weights
コード例 #19
0
class InterleavedLocalAndRandomSearch(AcquisitionFunctionMaximizer):
    """Implements openbox's default acquisition function optimization.

    This acq_maximizer performs local search from the previous best points
    according, to the acquisition function, uses the acquisition function to
    sort randomly sampled configurations and interleaves unsorted, randomly
    sampled configurations in between.

    Parameters
    ----------
    acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~openbox.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional

    max_steps: int
        [LocalSearch] Maximum number of steps that the local search will perform

    n_steps_plateau_walk: int
        [LocalSearch] number of steps during a plateau walk before local search terminates

    n_sls_iterations: int
        [Local Search] number of local search iterations

    """
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 max_steps: Optional[int] = None,
                 n_steps_plateau_walk: int = 10,
                 n_sls_iterations: int = 10,
                 rand_prob=0.25):
        super().__init__(acquisition_function, config_space, rng)
        self.random_search = RandomSearch(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)
        self.local_search = LocalSearch(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng,
            max_steps=max_steps,
            n_steps_plateau_walk=n_steps_plateau_walk)
        self.n_sls_iterations = n_sls_iterations
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        # =======================================================================
        # self.local_search = DiffOpt(
        #     acquisition_function=acquisition_function,
        #     config_space=config_space,
        #     rng=rng
        # )
        # =======================================================================

    def maximize(self,
                 runhistory: HistoryContainer,
                 num_points: int,
                 random_configuration_chooser=None,
                 **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~openbox.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        random_configuration_chooser: ~openbox.acq_maximizer.random_configuration_chooser.RandomConfigurationChooser
            part of the returned ChallengerList such
            that we can interleave random configurations
            by a scheme defined by the random_configuration_chooser;
            random_configuration_chooser.next_smbo_iteration()
            is called at the end of this function
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~openbox.ei_optimization.ChallengerList
        """

        next_configs_by_local_search = self.local_search._maximize(
            runhistory, self.n_sls_iterations, **kwargs)

        # Get configurations sorted by EI
        next_configs_by_random_search_sorted = self.random_search._maximize(
            runhistory,
            num_points - len(next_configs_by_local_search),
            _sorted=True,
        )

        # Having the configurations from random search, sorted by their
        # acquisition function value is important for the first few iterations
        # of openbox. As long as the random forest predicts constant value, we
        # want to use only random configurations. Having them at the begging of
        # the list ensures this (even after adding the configurations by local
        # search, and then sorting them)
        next_configs_by_acq_value = (next_configs_by_random_search_sorted +
                                     next_configs_by_local_search)
        next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0])
        self.logger.debug(
            "First 10 acq func (origin) values of selected configurations: %s",
            str([[_[0], _[1].origin] for _ in next_configs_by_acq_value[:10]]))
        next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value]

        challengers = ChallengerList(next_configs_by_acq_value,
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
コード例 #20
0
class CMAESOptimizer(AcquisitionFunctionMaximizer):
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rng: Union[bool, np.random.RandomState] = None,
        rand_prob=0.25,
    ):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()

    def maximize(self, runhistory: HistoryContainer, num_points: int,
                 **kwargs) -> Iterable[Tuple[float, Configuration]]:
        try:
            from cma import CMAEvolutionStrategy
        except ImportError:
            raise ImportError("Package cma is not installed!")

        types, bounds = get_types(self.config_space)
        assert all(types == 0)

        # Check Constant Hyperparameter
        const_idx = list()
        for i, bound in enumerate(bounds):
            if np.isnan(bound[1]):
                const_idx.append(i)

        hp_num = len(bounds) - len(const_idx)
        es = CMAEvolutionStrategy(hp_num * [0],
                                  0.99,
                                  inopts={'bounds': [0, 1]})

        eval_num = 0
        next_configs_by_acq_value = list()
        while eval_num < num_points:
            X = es.ask(number=es.popsize)
            _X = X.copy()
            for i in range(len(_X)):
                for index in const_idx:
                    _X[i] = np.insert(_X[i], index, 0)
            _X = np.asarray(_X)
            values = self.acquisition_function._compute(_X)
            values = np.reshape(values, (-1, ))
            es.tell(X, values)
            next_configs_by_acq_value.extend([(values[i], _X[i])
                                              for i in range(es.popsize)])
            eval_num += es.popsize

        next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0])
        next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value]
        next_configs_by_acq_value = [
            Configuration(self.config_space, vector=array)
            for array in next_configs_by_acq_value
        ]

        challengers = ChallengerList(next_configs_by_acq_value,
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
コード例 #21
0
class batchMCOptimizer(AcquisitionFunctionMaximizer):
    def __init__(self,
                 acquisition_function: AbstractAcquisitionFunction,
                 config_space: ConfigurationSpace,
                 rng: Union[bool, np.random.RandomState] = None,
                 batch_size=None,
                 rand_prob=0.0):
        super().__init__(acquisition_function, config_space, rng)
        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        if batch_size is None:
            types, bounds = get_types(self.config_space)
            dim = np.sum(types == 0)
            self.batch_size = min(5000, max(2000, 200 * dim))
        else:
            self.batch_size = batch_size

    def maximize(self,
                 runhistory: Union[HistoryContainer,
                                   MultiStartHistoryContainer],
                 num_points: int,
                 _sorted: bool = True,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        """Randomly sampled configurations

        Parameters
        ----------
        runhistory: ~openbox.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        _sorted: bool
            whether random configurations are sorted according to acquisition function
        **kwargs
            turbo_state: TurboState
                provide turbo state to use trust region

        Returns
        -------
        iterable
            An iterable consistng of
            tuple(acqusition_value, :class:`openbox.config_space.Configuration`).
        """
        from openbox.utils.samplers import SobolSampler

        cur_idx = 0
        config_acq = list()
        weight_seed = self.rng.randint(
            0, int(1e8))  # The same weight seed each iteration

        while cur_idx < num_points:
            batch_size = min(self.batch_size, num_points - cur_idx)
            turbo_state = kwargs.get('turbo_state', None)
            if turbo_state is None:
                lower_bounds = None
                upper_bounds = None
            else:
                assert isinstance(runhistory, MultiStartHistoryContainer)
                if runhistory.num_objs > 1:
                    # TODO implement adaptive strategy to choose trust region center for MO
                    raise NotImplementedError()
                else:
                    x_center = random.choice(
                        runhistory.get_incumbents())[0].get_array()
                    lower_bounds = x_center - turbo_state.length / 2.0
                    upper_bounds = x_center + turbo_state.length / 2.0

            sobol_sampler = SobolSampler(self.config_space,
                                         batch_size,
                                         lower_bounds,
                                         upper_bounds,
                                         random_state=self.rng.randint(
                                             0, int(1e8)))
            _configs = sobol_sampler.generate(return_config=True)
            _acq_values = self.acquisition_function(_configs, seed=weight_seed)
            config_acq.extend([(_configs[idx], _acq_values[idx])
                               for idx in range(len(_configs))])

            cur_idx += self.batch_size

        config_acq.sort(reverse=True, key=lambda x: x[1])

        challengers = ChallengerList([_[0] for _ in config_acq],
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
コード例 #22
0
class RandomScipyOptimizer(AcquisitionFunctionMaximizer):
    """
    Use scipy.optimize with start points chosen by random search. Only on continuous dims.

    Parameters
    ----------
    acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction

    config_space : ~openbox.config_space.ConfigurationSpace

    rng : np.random.RandomState or int, optional
    """
    def __init__(
        self,
        acquisition_function: AbstractAcquisitionFunction,
        config_space: ConfigurationSpace,
        rand_prob: float = 0.0,
        rng: Union[bool, np.random.RandomState] = None,
    ):
        super().__init__(acquisition_function, config_space, rng)

        self.random_chooser = ChooserProb(prob=rand_prob, rng=rng)

        self.random_search = InterleavedLocalAndRandomSearch(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)
        self.scipy_optimizer = ScipyOptimizer(
            acquisition_function=acquisition_function,
            config_space=config_space,
            rng=rng)

    def maximize(self,
                 runhistory: HistoryContainer,
                 num_points: int,
                 num_trials=10,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        acq_configs = []

        initial_configs = self.random_search.maximize(runhistory, num_points,
                                                      **kwargs).challengers
        initial_acqs = self.acquisition_function(initial_configs)
        acq_configs.extend(zip(initial_acqs, initial_configs))

        success_count = 0
        for config in initial_configs[:num_trials]:
            scipy_configs = self.scipy_optimizer.maximize(
                runhistory, initial_config=config).challengers
            if not scipy_configs:  # empty
                continue
            scipy_acqs = self.acquisition_function(scipy_configs)
            acq_configs.extend(zip(scipy_acqs, scipy_configs))
            success_count += 1
        if success_count == 0:
            self.logger.warning(
                'None of Scipy optimizations are successful in RandomScipyOptimizer.'
            )

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers

    def _maximize(self, runhistory: HistoryContainer, num_points: int,
                  **kwargs) -> Iterable[Tuple[float, Configuration]]:
        raise NotImplementedError()
コード例 #23
0
ファイル: async_mq_mfes.py プロジェクト: PKU-DAIR/open-box
    def __init__(self,
                 objective_func,
                 config_space: ConfigurationSpace,
                 R,
                 eta=3,
                 skip_outer_loop=0,
                 rand_prob=0.3,
                 use_bohb=False,
                 init_weight=None,
                 update_enable=True,
                 weight_method='rank_loss_p_norm',
                 fusion_method='idp',
                 power_num=3,
                 random_state=1,
                 method_id='mqAsyncMFES',
                 restart_needed=True,
                 time_limit_per_trial=600,
                 runtime_limit=None,
                 seed=1,
                 ip='',
                 port=13579,
                 authkey=b'abc'):
        super().__init__(objective_func,
                         config_space,
                         R,
                         eta=eta,
                         skip_outer_loop=skip_outer_loop,
                         random_state=random_state,
                         method_id=method_id,
                         restart_needed=restart_needed,
                         time_limit_per_trial=time_limit_per_trial,
                         runtime_limit=runtime_limit,
                         ip=ip,
                         port=port,
                         authkey=authkey)
        self.seed = seed
        self.last_n_iteration = None
        self.use_bohb_strategy = use_bohb
        self.update_enable = update_enable
        self.fusion_method = fusion_method
        # Parameter for weight method `rank_loss_p_norm`.
        self.power_num = power_num
        # Specify the weight learning method.
        self.weight_method = weight_method
        self.weight_update_id = 0
        self.weight_changed_cnt = 0

        if init_weight is None:
            init_weight = [1. / self.s_max] * self.s_max + [0.]
            assert len(init_weight) == (self.s_max + 1)
        self.logger.info("Initialize weight to %s" %
                         init_weight[:self.s_max + 1])
        types, bounds = get_types(config_space)

        if not self.use_bohb_strategy:
            self.surrogate = RandomForestEnsemble(types, bounds, self.s_max,
                                                  self.eta, init_weight,
                                                  self.fusion_method)
        else:
            self.surrogate = RandomForestWithInstances(types,
                                                       bounds,
                                                       seed=self.seed)
        self.acquisition_function = EI(model=self.surrogate)

        self.iterate_id = 0
        self.iterate_r = list()
        self.hist_weights = list()

        # Saving evaluation statistics in Hyperband.
        self.target_x = dict()
        self.target_y = dict()
        for index, item in enumerate(
                np.logspace(0, self.s_max, self.s_max + 1, base=self.eta)):
            r = int(item)
            self.iterate_r.append(r)
            self.target_x[r] = list()
            self.target_y[r] = list()

        # BO optimizer settings.
        self.history_container = HistoryContainer(task_id=self.method_name)
        self.sls_max_steps = None
        self.n_sls_iterations = 5
        self.sls_n_steps_plateau_walk = 10
        self.rng = np.random.RandomState(seed=self.seed)
        self.acq_optimizer = InterleavedLocalAndRandomSearch(
            acquisition_function=self.acquisition_function,
            config_space=self.config_space,
            rng=self.rng,
            max_steps=self.sls_max_steps,
            n_steps_plateau_walk=self.sls_n_steps_plateau_walk,
            n_sls_iterations=self.n_sls_iterations,
            rand_prob=0.0,
        )
        self.random_configuration_chooser = ChooserProb(prob=rand_prob,
                                                        rng=self.rng)
        self.random_check_idx = 0