Ejemplo n.º 1
0
    def __init__(
        self,
        objective_func,
        config_space: ConfigurationSpace,
        R,
        eta=3,
        num_iter=10000,
        rand_prob=0.3,
        bo_init_num=3,
        random_state=1,
        method_id='mqBOHB',
        restart_needed=True,
        time_limit_per_trial=600,
        runtime_limit=None,
        ip='',
        port=13579,
        authkey=b'abc',
    ):
        super().__init__(objective_func,
                         config_space,
                         R,
                         eta=eta,
                         num_iter=num_iter,
                         random_state=random_state,
                         method_id=method_id,
                         restart_needed=restart_needed,
                         time_limit_per_trial=time_limit_per_trial,
                         runtime_limit=runtime_limit,
                         ip=ip,
                         port=port,
                         authkey=authkey)

        self.rand_prob = rand_prob
        self.bo_init_num = bo_init_num
        task_info = {'num_constraints': 0, 'num_objs': 1}
        # using median_imputation batch_strategy implemented in OpenBox to generate BO suggestions
        self.config_advisor = SyncBatchAdvisor(
            config_space,
            task_info,
            batch_size=None,
            batch_strategy='median_imputation',
            initial_trials=self.bo_init_num,
            init_strategy='random_explore_first',
            optimization_strategy='bo',
            surrogate_type='prf',
            acq_type='ei',
            acq_optimizer_type='local_random',
            task_id=self.method_name,
            output_dir=self.log_directory,
            random_state=random_state,
        )
        self.config_advisor.optimizer.rand_prob = 0.0
Ejemplo n.º 2
0
    def __init__(
        self,
        objective_function,
        config_space,
        eval_type='holdout',
        parallel_strategy='async',
        batch_size=4,
        batch_strategy='median_imputation',
        num_constraints=0,
        num_objs=1,
        sample_strategy: str = 'bo',
        runtime_limit=600,
        time_limit_per_trial=180,
        surrogate_type=None,
        acq_type=None,
        acq_optimizer_type='local_random',
        initial_runs=3,
        init_strategy='random_explore_first',
        initial_configurations=None,
        ref_point=None,
        history_bo_data: List[OrderedDict] = None,
        logging_dir='logs',
        task_id='default',
        random_state=1,
        ip="",
        port=13579,
        authkey=b'abc',
    ):

        self.task_info = {
            'num_constraints': num_constraints,
            'num_objs': num_objs
        }
        self.FAILED_PERF = [MAXINT] * num_objs
        super().__init__(objective_function,
                         config_space,
                         task_id=task_id,
                         output_dir=logging_dir,
                         random_state=random_state,
                         initial_runs=initial_runs,
                         max_runs=int(1e10),
                         runtime_limit=runtime_limit,
                         sample_strategy=sample_strategy,
                         time_limit_per_trial=time_limit_per_trial,
                         history_bo_data=history_bo_data)
        if parallel_strategy == 'sync':
            self.config_advisor = SyncBatchAdvisor(
                config_space,
                self.task_info,
                batch_size=batch_size,
                batch_strategy=batch_strategy,
                initial_trials=initial_runs,
                initial_configurations=initial_configurations,
                init_strategy=init_strategy,
                history_bo_data=history_bo_data,
                optimization_strategy=sample_strategy,
                surrogate_type=surrogate_type,
                acq_type=acq_type,
                acq_optimizer_type=acq_optimizer_type,
                ref_point=ref_point,
                task_id=task_id,
                output_dir=logging_dir,
                random_state=random_state)
        elif parallel_strategy == 'async':
            self.config_advisor = AsyncBatchAdvisor(
                config_space,
                self.task_info,
                batch_size=batch_size,
                batch_strategy=batch_strategy,
                initial_trials=initial_runs,
                initial_configurations=initial_configurations,
                init_strategy=init_strategy,
                history_bo_data=history_bo_data,
                optimization_strategy=sample_strategy,
                surrogate_type=surrogate_type,
                acq_type=acq_type,
                acq_optimizer_type=acq_optimizer_type,
                ref_point=ref_point,
                task_id=task_id,
                output_dir=logging_dir,
                random_state=random_state)
        else:
            raise ValueError('Invalid parallel strategy - %s.' %
                             parallel_strategy)

        self.eval_type = eval_type
        self.parallel_strategy = parallel_strategy
        self.batch_size = batch_size
        max_queue_len = max(100, 3 * batch_size)
        self.master_messager = MasterMessager(ip, port, authkey, max_queue_len,
                                              max_queue_len)
        self.start_time = time.time()

        self.configs = list()
        self.perfs = list()
        self.incumbent_perf = float("-INF")
        self.incumbent_config = self.config_space.get_default_configuration()
        self.eval_dict = dict()
        self.workers = dict()
Ejemplo n.º 3
0
class mqBOHB(mqHyperband):
    """ The implementation of BOHB.
        The paper can be found in https://arxiv.org/abs/1807.01774 .
    """
    def __init__(
        self,
        objective_func,
        config_space: ConfigurationSpace,
        R,
        eta=3,
        num_iter=10000,
        rand_prob=0.3,
        bo_init_num=3,
        random_state=1,
        method_id='mqBOHB',
        restart_needed=True,
        time_limit_per_trial=600,
        runtime_limit=None,
        ip='',
        port=13579,
        authkey=b'abc',
    ):
        super().__init__(objective_func,
                         config_space,
                         R,
                         eta=eta,
                         num_iter=num_iter,
                         random_state=random_state,
                         method_id=method_id,
                         restart_needed=restart_needed,
                         time_limit_per_trial=time_limit_per_trial,
                         runtime_limit=runtime_limit,
                         ip=ip,
                         port=port,
                         authkey=authkey)

        self.rand_prob = rand_prob
        self.bo_init_num = bo_init_num
        task_info = {'num_constraints': 0, 'num_objs': 1}
        # using median_imputation batch_strategy implemented in OpenBox to generate BO suggestions
        self.config_advisor = SyncBatchAdvisor(
            config_space,
            task_info,
            batch_size=None,
            batch_strategy='median_imputation',
            initial_trials=self.bo_init_num,
            init_strategy='random_explore_first',
            optimization_strategy='bo',
            surrogate_type='prf',
            acq_type='ei',
            acq_optimizer_type='local_random',
            task_id=self.method_name,
            output_dir=self.log_directory,
            random_state=random_state,
        )
        self.config_advisor.optimizer.rand_prob = 0.0

    def choose_next(self, num_config):
        # Sample n configurations according to BOHB strategy.
        self.logger.info('Sample %d configs in choose_next. rand_prob is %f.' %
                         (num_config, self.rand_prob))

        # get bo configs
        # update batchsize each round. random ratio is fixed.
        self.config_advisor.batch_size = num_config - int(
            num_config * self.rand_prob)
        bo_configs = self.config_advisor.get_suggestions()
        bo_configs = bo_configs[:
                                num_config]  # may exceed num_config in initial random sampling
        self.logger.info('len bo configs = %d.' % len(bo_configs))

        # sample random configs
        configs = expand_configurations(bo_configs, self.config_space,
                                        num_config)
        self.logger.info('len total configs = %d.' % len(configs))
        assert len(configs) == num_config
        return configs

    def update_incumbent_before_reduce(self, T, val_losses, n_iteration):
        if int(n_iteration) < self.R:
            return
        self.incumbent_configs.extend(T)
        self.incumbent_perfs.extend(val_losses)
        # update config advisor
        for config, perf in zip(T, val_losses):
            objs = [perf]
            observation = (config, SUCCESS, None, objs
                           )  # config, trial_state, constraints, objs
            self.config_advisor.update_observation(observation)
            self.logger.info('update observation: config=%s, perf=%f' %
                             (str(config), perf))
        self.logger.info(
            '%d observations updated. %d incumbent configs total.' %
            (len(T), len(self.incumbent_configs)))

    def update_incumbent_after_reduce(self, T, incumbent_loss):
        return
Ejemplo n.º 4
0
    def __init__(
        self,
        objective_function,
        config_space,
        parallel_strategy='async',
        batch_size=4,
        batch_strategy='default',
        num_constraints=0,
        num_objs=1,
        sample_strategy: str = 'bo',
        max_runs=200,
        time_limit_per_trial=180,
        surrogate_type='auto',
        acq_type='auto',
        acq_optimizer_type='auto',
        initial_runs=3,
        init_strategy='random_explore_first',
        initial_configurations=None,
        ref_point=None,
        history_bo_data: List[OrderedDict] = None,
        logging_dir='logs',
        task_id='default_task_id',
        random_state=None,
        advisor_kwargs: dict = None,
        ip="",
        port=13579,
        authkey=b'abc',
    ):

        if task_id is None:
            raise ValueError(
                'Task id is not SPECIFIED. Please input task id first.')

        self.num_objs = num_objs
        self.num_constraints = num_constraints
        self.FAILED_PERF = [MAXINT] * num_objs
        super().__init__(objective_function,
                         config_space,
                         task_id=task_id,
                         output_dir=logging_dir,
                         random_state=random_state,
                         initial_runs=initial_runs,
                         max_runs=max_runs,
                         sample_strategy=sample_strategy,
                         time_limit_per_trial=time_limit_per_trial,
                         history_bo_data=history_bo_data)

        self.parallel_strategy = parallel_strategy
        self.batch_size = batch_size
        max_queue_len = max(100, 3 * batch_size)
        self.master_messager = MasterMessager(ip, port, authkey, max_queue_len,
                                              max_queue_len)

        advisor_kwargs = advisor_kwargs or {}
        if parallel_strategy == 'sync':
            self.config_advisor = SyncBatchAdvisor(
                config_space,
                num_objs=num_objs,
                num_constraints=num_constraints,
                batch_size=batch_size,
                batch_strategy=batch_strategy,
                initial_trials=initial_runs,
                initial_configurations=initial_configurations,
                init_strategy=init_strategy,
                history_bo_data=history_bo_data,
                optimization_strategy=sample_strategy,
                surrogate_type=surrogate_type,
                acq_type=acq_type,
                acq_optimizer_type=acq_optimizer_type,
                ref_point=ref_point,
                task_id=task_id,
                output_dir=logging_dir,
                random_state=random_state,
                **advisor_kwargs)
        elif parallel_strategy == 'async':
            self.config_advisor = AsyncBatchAdvisor(
                config_space,
                num_objs=num_objs,
                num_constraints=num_constraints,
                batch_size=batch_size,
                batch_strategy=batch_strategy,
                initial_trials=initial_runs,
                initial_configurations=initial_configurations,
                init_strategy=init_strategy,
                history_bo_data=history_bo_data,
                optimization_strategy=sample_strategy,
                surrogate_type=surrogate_type,
                acq_type=acq_type,
                acq_optimizer_type=acq_optimizer_type,
                ref_point=ref_point,
                task_id=task_id,
                output_dir=logging_dir,
                random_state=random_state,
                **advisor_kwargs)
        else:
            raise ValueError('Invalid parallel strategy - %s.' %
                             parallel_strategy)
Ejemplo n.º 5
0
 def __init__(
     self,
     task_id,
     config_space,
     parallel_strategy='async',
     batch_size=4,
     batch_strategy='median_imputation',
     num_constraints=0,
     num_objs=1,
     sample_strategy: str = 'bo',
     max_runs=200,
     time_limit_per_trial=180,
     surrogate_type=None,
     acq_type=None,
     acq_optimizer_type='local_random',
     initial_runs=3,
     init_strategy='random_explore_first',
     initial_configurations=None,
     ref_point=None,
     history_bo_data: List[OrderedDict] = None,
     logging_dir='logs',
     random_state=1,
 ):
     self.task_info = {
         'num_constraints': num_constraints,
         'num_objs': num_objs
     }
     if parallel_strategy == 'sync':
         self.config_advisor = SyncBatchAdvisor(
             config_space,
             self.task_info,
             batch_size=batch_size,
             batch_strategy=batch_strategy,
             initial_trials=initial_runs,
             initial_configurations=initial_configurations,
             init_strategy=init_strategy,
             history_bo_data=history_bo_data,
             optimization_strategy=sample_strategy,
             surrogate_type=surrogate_type,
             acq_type=acq_type,
             acq_optimizer_type=acq_optimizer_type,
             ref_point=ref_point,
             task_id=task_id,
             output_dir=logging_dir,
             random_state=random_state)
     elif parallel_strategy == 'async':
         self.config_advisor = AsyncBatchAdvisor(
             config_space,
             self.task_info,
             batch_size=batch_size,
             batch_strategy=batch_strategy,
             initial_trials=initial_runs,
             initial_configurations=initial_configurations,
             init_strategy=init_strategy,
             history_bo_data=history_bo_data,
             optimization_strategy=sample_strategy,
             surrogate_type=surrogate_type,
             acq_type=acq_type,
             acq_optimizer_type=acq_optimizer_type,
             ref_point=ref_point,
             task_id=task_id,
             output_dir=logging_dir,
             random_state=random_state)
     else:
         raise ValueError('Invalid parallel strategy - %s.' %
                          parallel_strategy)
     self.total_trials = max_runs
     super().__init__(task_id, self.config_advisor, self.total_trials)
Ejemplo n.º 6
0
    def __init__(
        self,
        objective_function,
        config_space,
        parallel_strategy='async',
        batch_size=4,
        batch_strategy='median_imputation',
        num_constraints=0,
        num_objs=1,
        sample_strategy: str = 'bo',
        max_runs=200,
        time_limit_per_trial=180,
        surrogate_type=None,
        acq_type=None,
        acq_optimizer_type='local_random',
        initial_runs=3,
        init_strategy='random_explore_first',
        initial_configurations=None,
        ref_point=None,
        history_bo_data: List[OrderedDict] = None,
        logging_dir='logs',
        task_id=None,
        random_state=1,
    ):

        if task_id is None:
            raise ValueError(
                'Task id is not SPECIFIED. Please input task id first.')

        self.task_info = {
            'num_constraints': num_constraints,
            'num_objs': num_objs
        }
        self.FAILED_PERF = [MAXINT] * num_objs
        super().__init__(objective_function,
                         config_space,
                         task_id=task_id,
                         output_dir=logging_dir,
                         random_state=random_state,
                         initial_runs=initial_runs,
                         max_runs=max_runs,
                         sample_strategy=sample_strategy,
                         time_limit_per_trial=time_limit_per_trial,
                         history_bo_data=history_bo_data)

        if parallel_strategy == 'sync':
            self.config_advisor = SyncBatchAdvisor(
                config_space,
                self.task_info,
                batch_size=batch_size,
                batch_strategy=batch_strategy,
                initial_trials=initial_runs,
                initial_configurations=initial_configurations,
                init_strategy=init_strategy,
                history_bo_data=history_bo_data,
                optimization_strategy=sample_strategy,
                surrogate_type=surrogate_type,
                acq_type=acq_type,
                acq_optimizer_type=acq_optimizer_type,
                ref_point=ref_point,
                task_id=task_id,
                output_dir=logging_dir,
                random_state=random_state)
        elif parallel_strategy == 'async':
            self.config_advisor = AsyncBatchAdvisor(
                config_space,
                self.task_info,
                batch_size=batch_size,
                batch_strategy=batch_strategy,
                initial_trials=initial_runs,
                initial_configurations=initial_configurations,
                init_strategy=init_strategy,
                history_bo_data=history_bo_data,
                optimization_strategy=sample_strategy,
                surrogate_type=surrogate_type,
                acq_type=acq_type,
                acq_optimizer_type=acq_optimizer_type,
                ref_point=ref_point,
                task_id=task_id,
                output_dir=logging_dir,
                random_state=random_state)
            self.advisor_lock = Lock()
        else:
            raise ValueError('Invalid parallel strategy - %s.' %
                             parallel_strategy)

        self.parallel_strategy = parallel_strategy
        self.batch_size = batch_size
Ejemplo n.º 7
0
    def __init__(
        self,
        objective_function,
        config_space,
        parallel_strategy='async',
        batch_size=4,
        batch_strategy='default',
        num_constraints=0,
        num_objs=1,
        sample_strategy: str = 'bo',
        max_runs=200,
        time_limit_per_trial=180,
        surrogate_type='auto',
        acq_type='auto',
        acq_optimizer_type='auto',
        initial_runs=3,
        init_strategy='random_explore_first',
        initial_configurations=None,
        ref_point=None,
        history_bo_data: List[OrderedDict] = None,
        logging_dir='logs',
        task_id='default_task_id',
        random_state=None,
        advisor_kwargs: dict = None,
    ):

        if task_id is None:
            raise ValueError(
                'Task id is not SPECIFIED. Please input task id first.')

        self.num_objs = num_objs
        self.num_constraints = num_constraints
        self.FAILED_PERF = [MAXINT] * num_objs
        super().__init__(objective_function,
                         config_space,
                         task_id=task_id,
                         output_dir=logging_dir,
                         random_state=random_state,
                         initial_runs=initial_runs,
                         max_runs=max_runs,
                         sample_strategy=sample_strategy,
                         time_limit_per_trial=time_limit_per_trial,
                         history_bo_data=history_bo_data)

        self.parallel_strategy = parallel_strategy
        self.batch_size = batch_size

        advisor_kwargs = advisor_kwargs or {}
        if parallel_strategy == 'sync':
            if sample_strategy in ['random', 'bo']:
                self.config_advisor = SyncBatchAdvisor(
                    config_space,
                    num_objs=num_objs,
                    num_constraints=num_constraints,
                    batch_size=batch_size,
                    batch_strategy=batch_strategy,
                    initial_trials=initial_runs,
                    initial_configurations=initial_configurations,
                    init_strategy=init_strategy,
                    history_bo_data=history_bo_data,
                    optimization_strategy=sample_strategy,
                    surrogate_type=surrogate_type,
                    acq_type=acq_type,
                    acq_optimizer_type=acq_optimizer_type,
                    ref_point=ref_point,
                    task_id=task_id,
                    output_dir=logging_dir,
                    random_state=random_state,
                    **advisor_kwargs)
            elif sample_strategy == 'ea':
                assert num_objs == 1 and num_constraints == 0
                self.config_advisor = EA_Advisor(
                    config_space,
                    num_objs=num_objs,
                    num_constraints=num_constraints,
                    optimization_strategy=sample_strategy,
                    batch_size=batch_size,
                    task_id=task_id,
                    output_dir=logging_dir,
                    random_state=random_state,
                    **advisor_kwargs)
            else:
                raise ValueError('Unknown sample_strategy: %s' %
                                 sample_strategy)
        elif parallel_strategy == 'async':
            self.advisor_lock = Lock()
            if sample_strategy in ['random', 'bo']:
                self.config_advisor = AsyncBatchAdvisor(
                    config_space,
                    num_objs=num_objs,
                    num_constraints=num_constraints,
                    batch_size=batch_size,
                    batch_strategy=batch_strategy,
                    initial_trials=initial_runs,
                    initial_configurations=initial_configurations,
                    init_strategy=init_strategy,
                    history_bo_data=history_bo_data,
                    optimization_strategy=sample_strategy,
                    surrogate_type=surrogate_type,
                    acq_type=acq_type,
                    acq_optimizer_type=acq_optimizer_type,
                    ref_point=ref_point,
                    task_id=task_id,
                    output_dir=logging_dir,
                    random_state=random_state,
                    **advisor_kwargs)
            elif sample_strategy == 'ea':
                assert num_objs == 1 and num_constraints == 0
                self.config_advisor = EA_Advisor(
                    config_space,
                    num_objs=num_objs,
                    num_constraints=num_constraints,
                    optimization_strategy=sample_strategy,
                    batch_size=batch_size,
                    task_id=task_id,
                    output_dir=logging_dir,
                    random_state=random_state,
                    **advisor_kwargs)
            else:
                raise ValueError('Unknown sample_strategy: %s' %
                                 sample_strategy)
        else:
            raise ValueError('Invalid parallel strategy - %s.' %
                             parallel_strategy)