コード例 #1
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        y = problem.evaluate_config(config)
        res = dict()
        res['config'] = config
        res['objs'] = (y,)
        res['constraints'] = None
        return res

    bo = SMBO(objective_function, cs,
              surrogate_type=surrogate_type,            # default: gp
              acq_optimizer_type=acq_optimizer_type,    # default: random_scipy
              initial_runs=initial_runs,                # default: 3
              init_strategy=init_strategy,              # default: random_explore_first
              max_runs=max_runs,
              time_limit_per_trial=time_limit_per_trial, task_id=task_id, random_state=seed)
    # bo.run()
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, _, objs = bo.iterate()
        global_time = time.time() - global_start_time
        print(seed, i, objs, config, trial_state, 'time=', global_time)
        time_list.append(global_time)
    config_list = bo.get_history().configurations
    perf_list = bo.get_history().perfs

    return config_list, perf_list, time_list
コード例 #2
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        y = problem.evaluate_config(config)
        res = dict()
        res['config'] = config
        res['objs'] = (y, )
        res['constraints'] = None
        return res

    bo = SMBO(objective_function,
              cs,
              sample_strategy='random',
              init_strategy='random',
              max_runs=max_runs,
              time_limit_per_trial=time_limit_per_trial,
              task_id=task_id,
              random_state=seed)
    # bo.run()
    config_list = []
    perf_list = []
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, objs, trial_info = bo.iterate()
        global_time = time.time() - global_start_time
        print(seed, i, objs, config, trial_state, trial_info, 'time=',
              global_time)
        config_list.append(config)
        perf_list.append(objs[0])
        time_list.append(global_time)

    return config_list, perf_list, time_list
コード例 #3
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        y = problem.evaluate_config(config)
        return y

    bo = SMBO(objective_function, cs,
              num_constraints=num_constraints,
              surrogate_type=surrogate_type,  # default: gp
              acq_optimizer_type=acq_optimizer_type,  # default: random_scipy
              initial_runs=initial_runs,  # default: 3
              init_strategy=init_strategy,  # default: random_explore_first
              max_runs=max_runs + initial_runs,
              time_limit_per_trial=time_limit_per_trial, task_id=task_id, random_state=seed)
    # bo.run()
    config_list = []
    perf_list = []
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, constraints, objs = bo.iterate()
        global_time = time.time() - global_start_time
        origin_perf = objs[0]
        if any(c > 0 for c in constraints):
            perf = 9999999.0
        else:
            perf = origin_perf
        print(seed, i, perf, config, constraints, trial_state, 'time=', global_time)
        config_list.append(config)
        perf_list.append(perf)
        time_list.append(global_time)

    return config_list, perf_list, time_list
コード例 #4
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        res = problem.evaluate_config(config)
        res['config'] = config
        res['objs'] = np.asarray(res['objs']).tolist()
        res['constraints'] = np.asarray(res['constraints']).tolist()
        return res

    bo = SMBO(
        objective_function,
        cs,
        num_objs=problem.num_objs,
        num_constraints=problem.num_constraints,
        surrogate_type=surrogate_type,  # default: gp
        acq_type=acq_type,  # default: ehvic
        acq_optimizer_type=acq_optimizer_type,  # default: random_scipy
        initial_runs=initial_runs,  # default: 2 * (problem.dim + 1)
        init_strategy=init_strategy,  # default: sobol
        max_runs=max_runs,
        ref_point=problem.ref_point,
        time_limit_per_trial=time_limit_per_trial,
        task_id=task_id,
        random_state=seed)

    # bo.run()
    hv_diffs = []
    config_list = []
    perf_list = []
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, constraints, origin_objs = bo.iterate()
        global_time = time.time() - global_start_time
        if any(c > 0 for c in constraints):
            objs = [9999999.0] * problem.num_objs
        else:
            objs = origin_objs
        print(seed, i, origin_objs, objs, constraints, config, trial_state,
              'time=', global_time)
        config_list.append(config)
        perf_list.append(objs)
        time_list.append(global_time)
        hv = Hypervolume(problem.ref_point).compute(perf_list)
        hv_diff = problem.max_hv - hv
        hv_diffs.append(hv_diff)
        print(seed, i, 'hypervolume =', hv)
        print(seed, i, 'hv diff =', hv_diff)
    pf = np.asarray(bo.get_history().get_pareto_front())

    # plot for debugging
    if plot_mode == 1:
        Y_init = None
        plot_pf(problem, problem_str, mth, pf, Y_init)

    return hv_diffs, pf, config_list, perf_list, time_list
コード例 #5
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        y = problem.evaluate_config(config)
        return y

    bo = SMBO(objective_function,
              cs,
              num_constraints=num_constraints,
              sample_strategy='random',
              init_strategy='random',
              max_runs=max_runs,
              time_limit_per_trial=time_limit_per_trial,
              task_id=task_id,
              random_state=seed)
    # bo.run()
    config_list = []
    perf_list = []
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, objs, trial_info = bo.iterate()
        global_time = time.time() - global_start_time
        print(seed, i, objs, config, trial_state, trial_info, 'time=',
              global_time)
        config_list.append(config)
        perf_list.append(objs[0])
        time_list.append(global_time)

    _perf_list = []
    for i, c in enumerate(bo.config_advisor.constraint_perfs[0]):
        if c > 0:
            _perf_list.append(9999999)
        else:
            _perf_list.append(perf_list[i])

    return config_list, _perf_list, time_list
コード例 #6
0
def evaluate(problem, seed):
    def objective_function(config):
        y = problem.evaluate_config(config)
        res = dict()
        res['config'] = config
        res['objs'] = (y, )
        res['constraints'] = None
        return res

    from openbox.optimizer.generic_smbo import SMBO
    bo = SMBO(
        objective_function,
        cs,
        surrogate_type=surrogate_type,  # default: prf
        acq_optimizer_type=acq_optimizer_type,  # default: local_random
        initial_runs=initial_runs,  # default: 3
        init_strategy=init_strategy,  # default: random_explore_first
        max_runs=max_runs,
        time_limit_per_trial=time_limit_per_trial,
        task_id=task_id,
        random_state=seed)
    # bo.run()
    config_list = []
    perf_list = []
    time_list = []
    global_start_time = time.time()
    for i in range(max_runs):
        config, trial_state, _, objs = bo.iterate()
        global_time = time.time() - global_start_time
        print(seed, i, objs, config, trial_state, 'time=', global_time)
        config_list.append(config)
        perf_list.append(objs[0])
        time_list.append(global_time)
        if global_time >= runtime_limit:
            break

    return config_list, perf_list, time_list
コード例 #7
0
    # surrogate_type='gp_rbf',    # use default
    acq_type=mth,
    # initial_configurations=X_init, initial_runs=10,
    time_limit_per_trial=60,
    task_id='mo',
    random_state=seed)
bo.config_advisor.optimizer.random_chooser.prob = rand_prob  # set rand_prob, default 0
bo.config_advisor.acquisition_function.sample_num = sample_num  # set sample_num
#bo.config_advisor.acquisition_function.random_state = seed      # set random_state
bo.config_advisor.optimizer.num_mc = 1000  # MESMO optimizer only
bo.config_advisor.optimizer.num_opt = 100  # MESMO optimizer only
print(mth, '===== start =====')
# bo.run()
hv_diffs = []
for i in range(max_runs):
    config, trial_state, objs, trial_info = bo.iterate()
    print(i, objs, config)
    hv = Hypervolume(referencePoint).compute(
        bo.get_history().get_pareto_front())
    print(i, 'hypervolume =', hv)
    hv_diff = real_hv - hv
    hv_diffs.append(hv_diff)
    print(i, 'hv diff =', hv_diff)

# Print result
pf = np.asarray(bo.get_history().get_pareto_front())
print(mth, 'pareto num:', pf.shape[0])
print('real hv =', real_hv)
print('hv_diffs:', hv_diffs)

# Evaluate the random search.
コード例 #8
0
class RandomSearchOptimizer(BaseOptimizer):

    def __init__(self, evaluator, config_space, name, eval_type, time_limit=None, evaluation_limit=None,
                 per_run_time_limit=300, output_dir='./', timestamp=None,
                 inner_iter_num_per_iter=1, seed=1, n_jobs=1):
        super().__init__(evaluator, config_space, name, eval_type=eval_type, timestamp=timestamp, output_dir=output_dir,
                         seed=seed)
        self.time_limit = time_limit
        self.evaluation_num_limit = evaluation_limit
        self.inner_iter_num_per_iter = inner_iter_num_per_iter
        self.per_run_time_limit = per_run_time_limit
        # self.per_run_mem_limit= per_run_mem_limit

        if n_jobs == 1:
            self.optimizer = RandomSearch(objective_function=self.evaluator,
                                          config_space=config_space,
                                          advisor_type='random',
                                          task_id='Default',
                                          time_limit_per_trial=self.per_run_time_limit,
                                          random_state=self.seed)
        else:
            self.optimizer = pRandomSearch(objective_function=self.evaluator,
                                           config_space=config_space,
                                           sample_strategy='random',
                                           batch_size=n_jobs,
                                           task_id='Default',
                                           time_limit_per_trial=self.per_run_time_limit,
                                           random_state=self.seed)

        self.trial_cnt = 0
        self.configs = list()
        self.perfs = list()
        self.exp_output = dict()
        self.incumbent_perf = float("-INF")
        self.incumbent_config = self.config_space.get_default_configuration()

        hp_num = len(self.config_space.get_hyperparameters())
        if hp_num == 0:
            self.config_num_threshold = 0
        else:
            _threshold = int(len(set(self.config_space.sample_configuration(5000))))
            self.config_num_threshold = _threshold

        self.logger.debug("The maximum trial number in HPO is :%d" % self.config_num_threshold)
        self.maximum_config_num = min(1500, self.config_num_threshold)
        self.eval_dict = {}
        self.n_jobs = n_jobs

    def run(self):
        while True:
            evaluation_num = len(self.perfs)
            if self.evaluation_num_limit is not None and evaluation_num > self.evaluation_num_limit:
                break
            if self.time_limit is not None and time.time() - self.start_time > self.time_limit:
                break
            self.iterate()
        return np.max(self.perfs)

    def iterate(self, budget=MAX_INT):
        _start_time = time.time()

        if len(self.configs) == 0 and self.init_hpo_iter_num is not None:
            inner_iter_num = self.init_hpo_iter_num
            print('initial hpo trial num is set to %d' % inner_iter_num)
        else:
            inner_iter_num = self.inner_iter_num_per_iter

        if self.n_jobs == 1:
            for _ in range(inner_iter_num):
                if len(self.configs) >= self.maximum_config_num:
                    self.early_stopped_flag = True
                    self.logger.warning('Already explored 70 percentage of the '
                                        'hyperspace or maximum configuration number met: %d!' % self.maximum_config_num)
                    break
                if time.time() - _start_time > budget:
                    self.logger.warning('Time limit exceeded!')
                    break
                _config, _status, _, _perf = self.optimizer.iterate()
                self.update_saver([_config], [_perf[0]])
                if _status == SUCCESS:
                    self.exp_output[time.time()] = (_config, _perf[0])
                    self.configs.append(_config)
                    self.perfs.append(-_perf[0])
        else:
            if len(self.configs) >= self.maximum_config_num:
                self.early_stopped_flag = True
                self.logger.warning('Already explored 70 percentage of the '
                                    'hyperspace or maximum configuration number met: %d!' % self.maximum_config_num)
            elif time.time() - _start_time > budget:
                self.logger.warning('Time limit exceeded!')
            else:
                _config_list, _status_list, _, _perf_list = self.optimizer.async_iterate(n=inner_iter_num)
                self.update_saver(_config_list, _perf_list)
                for i, _config in enumerate(_config_list):
                    if _status_list[i] == SUCCESS:
                        self.exp_output[time.time()] = (_config, _perf_list[i])
                        self.configs.append(_config)
                        self.perfs.append(-_perf_list[i])

        run_history = self.optimizer.get_history()
        if self.name == 'hpo':
            if hasattr(self.evaluator, 'fe_config'):
                fe_config = self.evaluator.fe_config
            else:
                fe_config = None
            self.eval_dict = {(fe_config, hpo_config): [-run_history.perfs[i], time.time(), run_history.trial_states[i]]
                              for i, hpo_config in enumerate(run_history.configurations)}
        else:
            if hasattr(self.evaluator, 'hpo_config'):
                hpo_config = self.evaluator.hpo_config
            else:
                hpo_config = None
            self.eval_dict = {(fe_config, hpo_config): [-run_history.perfs[i], time.time(), run_history.trial_states[i]]
                              for i, fe_config in enumerate(run_history.configurationsa)}
        if len(run_history.get_incumbents()) > 0:
            self.incumbent_config, self.incumbent_perf = run_history.get_incumbents()[0]
            self.incumbent_perf = -self.incumbent_perf
        iteration_cost = time.time() - _start_time
        return self.incumbent_perf, iteration_cost, self.incumbent_config