Beispiel #1
0
def wrapper(param):
    objective_function, config, time_limit_per_trial = param
    trial_state = SUCCESS
    start_time = time.time()
    try:
        args, kwargs = (config, ), dict()
        timeout_status, _result = time_limit(objective_function,
                                             time_limit_per_trial,
                                             args=args,
                                             kwargs=kwargs)
        if timeout_status:
            raise TimeoutException(
                'Timeout: time limit for this evaluation is %.1fs' %
                time_limit_per_trial)
        else:
            objs, constraints = get_result(_result)
    except Exception as e:
        if isinstance(e, TimeoutException):
            trial_state = TIMEOUT
        else:
            traceback.print_exc(file=sys.stdout)
            trial_state = FAILED
        objs = None
        constraints = None
    elapsed_time = time.time() - start_time
    return Observation(config, trial_state, constraints, objs, elapsed_time)
    def async_run(self):
        config_num = 0
        result_num = 0
        while result_num < self.max_iterations:
            # Add jobs to masterQueue.
            while len(self.config_advisor.running_configs
                      ) < self.batch_size and config_num < self.max_iterations:
                config_num += 1
                config = self.config_advisor.get_suggestion()
                msg = [config, self.time_limit_per_trial]
                self.logger.info("Master: Add config %d." % config_num)
                self.master_messager.send_message(msg)

            # Get results from workerQueue.
            while True:
                observation = self.master_messager.receive_message()
                if observation is None:
                    # Wait for workers.
                    # self.logger.info("Master: wait for worker results. sleep 1s.")
                    time.sleep(1)
                    break
                # Report result.
                result_num += 1
                if observation.objs is None:
                    observation = Observation(
                        config=observation.config,
                        objs=self.FAILED_PERF,
                        constraints=observation.constraints,
                        trial_state=observation.trial_state,
                        elapsed_time=observation.elapsed_time,
                    )
                self.config_advisor.update_observation(observation)
                self.logger.info('Master: Get %d observation: %s' %
                                 (result_num, str(observation)))
Beispiel #3
0
 def sync_run(self):
     with ParallelEvaluation(wrapper, n_worker=self.batch_size) as proc:
         batch_num = (self.max_iterations + self.batch_size - 1) // self.batch_size
         if self.batch_size > self.config_advisor.init_num:
             batch_num += 1  # fix bug
         batch_id = 0
         while batch_id < batch_num:
             configs = self.config_advisor.get_suggestions()
             self.logger.info('Running on %d configs in the %d-th batch.' % (len(configs), batch_id))
             params = [(self.objective_function, config, self.time_limit_per_trial) for config in configs]
             # Wait all workers to complete their corresponding jobs.
             observations = proc.parallel_execute(params)
             # Report their results.
             for idx, observation in enumerate(observations):
                 config, trial_state, constraints, objs, elapsed_time = observation
                 if objs is None:
                     objs = self.FAILED_PERF
                     observation = Observation(config, trial_state, constraints, self.FAILED_PERF, elapsed_time)
                 self.config_advisor.update_observation(observation)
                 if self.task_info['num_constraints'] > 0:
                     self.logger.info('In the %d-th batch [%d/%d], config: %s, objective value: %s, constraints: %s.'
                                      % (batch_id, idx+1, len(configs), config, objs, constraints))
                 else:
                     self.logger.info('In the %d-th batch [%d/%d], config: %s, objective value: %s.'
                                      % (batch_id, idx+1, len(configs), config, objs))
             batch_id += 1
Beispiel #4
0
 def sync_run(self):
     batch_id = 0
     while time.time() - self.start_time < self.runtime_limit:
         configs = self.config_advisor.get_suggestions()
         # Add batch configs to masterQueue.
         for config in configs:
             msg = [config, self.time_limit_per_trial]
             self.master_messager.send_message(msg)
         self.logger.info('Master: %d-th batch. %d configs sent.' %
                          (batch_id, len(configs)))
         # Get batch results from workerQueue.
         result_num = 0
         result_needed = len(configs)
         while True:
             observation = self.master_messager.receive_message()
             if observation is None:
                 # Wait for workers.
                 # self.logger.info("Master: wait for worker results. sleep 1s.")
                 time.sleep(1)
                 continue
             # Report result.
             result_num += 1
             config, trial_state, constraints, objs, elapsed_time, worker_info, extra_info = observation
             if objs is None:
                 observation = Observation(config, trial_state, constraints,
                                           self.FAILED_PERF, elapsed_time,
                                           worker_info, extra_info)
             self.config_advisor.update_observation(observation)
             self.logger.info(
                 'Master: In the %d-th batch [%d], observation is: %s' %
                 (batch_id, result_num, str(observation)))
             if result_num == result_needed:
                 break
         batch_id += 1
Beispiel #5
0
    def iterate(self, budget_left=None):
        config = self.config_advisor.get_suggestion()

        trial_state = SUCCESS
        _budget_left = int(1e10) if budget_left is None else budget_left
        _time_limit_per_trial = math.ceil(min(self.time_limit_per_trial, _budget_left))

        if config not in self.config_advisor.history_container.configurations:
            start_time = time.time()
            try:
                args, kwargs = (config,), dict()
                timeout_status, _result = time_limit(self.objective_function,
                                                     _time_limit_per_trial,
                                                     args=args, kwargs=kwargs)
                if timeout_status:
                    raise TimeoutException(
                        'Timeout: time limit for this evaluation is %.1fs' % _time_limit_per_trial)
                else:
                    objs, constraints = get_result(_result)
            except Exception as e:
                if isinstance(e, TimeoutException):
                    self.logger.warning(str(e))
                    trial_state = TIMEOUT
                else:
                    self.logger.warning('Exception when calling objective function: %s' % str(e))
                    trial_state = FAILED
                objs = self.FAILED_PERF
                constraints = None

            elapsed_time = time.time() - start_time
            observation = Observation(config, trial_state, constraints, objs, elapsed_time)
            if _time_limit_per_trial != self.time_limit_per_trial and trial_state == TIMEOUT:
                # Timeout in the last iteration.
                pass
            else:
                self.config_advisor.update_observation(observation)
        else:
            self.logger.info('This configuration has been evaluated! Skip it: %s' % config)
            history = self.get_history()
            config_idx = history.configurations.index(config)
            trial_state = history.trial_states[config_idx]
            objs = history.perfs[config_idx]
            constraints = history.constraint_perfs[config_idx] if self.task_info['num_constraints'] > 0 else None
            if self.task_info['num_objs'] == 1:
                objs = (objs,)

        self.iteration_id += 1
        # Logging.
        if self.task_info['num_constraints'] > 0:
            self.logger.info('Iteration %d, objective value: %s. constraints: %s.'
                             % (self.iteration_id, objs, constraints))
        else:
            self.logger.info('Iteration %d, objective value: %s.' % (self.iteration_id, objs))

        # Visualization.
        for idx, obj in enumerate(objs):
            if obj < self.FAILED_PERF[idx]:
                self.writer.add_scalar('data/objective-%d' % (idx + 1), obj, self.iteration_id)
        return config, trial_state, constraints, objs
Beispiel #6
0
    def get_suggestion(self, history_container=None):
        self.logger.info('#Call get_suggestion. len of running configs = %d.' % len(self.running_configs))

        if history_container is None:
            history_container = self.history_container

        num_config_all = len(history_container.configurations) + len(self.running_configs)
        num_config_successful = len(history_container.successful_perfs)

        if (num_config_all < self.init_num) or \
                num_config_successful < self.bo_start_n or \
                self.optimization_strategy == 'random':
            if num_config_all >= len(self.initial_configurations):
                _config = self.sample_random_configs(1, history_container)[0]
            else:
                _config = self.initial_configurations[num_config_all]
            self.running_configs.append(_config)
            return _config

        X = convert_configurations_to_array(history_container.configurations)
        Y = history_container.get_transformed_perfs()
        # cY = history_container.get_transformed_constraint_perfs()

        if self.batch_strategy == 'median_imputation':
            # set bilog_transform=False to get real cY for estimating median
            cY = history_container.get_transformed_constraint_perfs(bilog_transform=False)

            estimated_y = np.median(Y, axis=0).reshape(-1).tolist()
            estimated_c = np.median(cY, axis=0).tolist() if self.num_constraints > 0 else None
            batch_history_container = copy.deepcopy(history_container)
            # imputation
            for config in self.running_configs:
                observation = Observation(config, SUCCESS, estimated_c, estimated_y, None)
                batch_history_container.update_observation(observation)

            # use super class get_suggestion
            _config = super().get_suggestion(batch_history_container)

        elif self.batch_strategy == 'local_penalization':
            # local_penalization only supports single objective with no constraint
            self.surrogate_model.train(X, Y)
            incumbent_value = history_container.get_incumbents()[0][1]
            self.acquisition_function.update(model=self.surrogate_model, eta=incumbent_value,
                                             num_data=len(history_container.data),
                                             batch_configs=self.running_configs)

            challengers = self.optimizer.maximize(
                runhistory=history_container,
                num_points=5000
            )
            _config = challengers.challengers[0]
        else:
            raise ValueError('Invalid sampling strategy - %s.' % self.batch_strategy)

        self.running_configs.append(_config)
        return _config
Beispiel #7
0
 def callback(self, observation: Observation):
     config, trial_state, constraints, objs, elapsed_time = observation
     if objs is None:
         observation = Observation(config, trial_state, constraints, self.FAILED_PERF, elapsed_time)
     # Report the result, and remove the config from the running queue.
     with self.advisor_lock:
         self.config_advisor.update_observation(observation)
         self.logger.info('Update observation %d: %s.' % (self.iteration_id, str(observation)))
     # Parent process: collect the result and increment id.
     self.iteration_id += 1
Beispiel #8
0
    def run(self):
        while True:
            # Get config
            try:
                msg = self.worker_messager.receive_message()
            except Exception as e:
                print("Worker receive message error:", str(e))
                return
            if msg is None:
                # Wait for configs
                time.sleep(1)
                continue
            print("Worker: get config. start working.")
            config, time_limit_per_trial = msg

            # Start working
            trial_state = SUCCESS
            start_time = time.time()
            try:
                args, kwargs = (config, ), dict()
                timeout_status, _result = time_limit(self.objective_function,
                                                     time_limit_per_trial,
                                                     args=args,
                                                     kwargs=kwargs)
                if timeout_status:
                    raise TimeoutException(
                        'Timeout: time limit for this evaluation is %.1fs' %
                        time_limit_per_trial)
                else:
                    objs, constraints = get_result(_result)
            except Exception as e:
                if isinstance(e, TimeoutException):
                    trial_state = TIMEOUT
                else:
                    traceback.print_exc(file=sys.stdout)
                    trial_state = FAILED
                objs = None
                constraints = None

            elapsed_time = time.time() - start_time
            observation = Observation(
                config=config,
                objs=objs,
                constraints=constraints,
                trial_state=trial_state,
                elapsed_time=elapsed_time,
            )

            # Send result
            print("Worker: observation=%s. sending result." % str(observation))
            try:
                self.worker_messager.send_message(observation)
            except Exception as e:
                print("Worker send message error:", str(e))
                return
Beispiel #9
0
    def async_run(self):
        config_num = 0
        cur_num = 0
        while time.time() - self.start_time < self.runtime_limit:
            # Add jobs to masterQueue.
            while len(self.config_advisor.running_configs
                      ) < self.batch_size and config_num < self.max_iterations:
                config_num += 1
                config = self.config_advisor.get_suggestion()
                msg = [config, self.time_limit_per_trial]
                self.logger.info("Master: Add config %d." % config_num)
                self.master_messager.send_message(msg)

            # Get results from workerQueue.
            while True:
                observation = self.master_messager.receive_message()
                if observation is None:
                    # Wait for workers.
                    # self.logger.info("Master: wait for worker results. sleep 1s.")
                    time.sleep(1)
                    break
                # Report result.
                cur_num += 1
                config, trial_state, constraints, objs, elapsed_time, worker_info, extra_info = observation

                stored_info = list(self.workers.values())
                if worker_info not in stored_info:
                    self.workers[len(self.workers)] = worker_info

                _perf = float("INF") if objs is None else objs[0]
                self.configs.append(config)
                self.perfs.append(_perf)
                self.eval_dict[config] = [-_perf, time.time(), trial_state]

                if -_perf > self.incumbent_perf:
                    self.incumbent_perf = -_perf
                    self.incumbent_config = config

                if objs is None:
                    observation = Observation(config,
                                              trial_state,
                                              constraints,
                                              self.FAILED_PERF,
                                              elapsed_time,
                                              worker_info=worker_info,
                                              extra=extra_info)
                self.config_advisor.update_observation(observation)

                self.logger.info('Master: Get %d observation: %s' %
                                 (cur_num, str(observation)))
    def sync_run_with_limit(self, runtime_limit):
        batch_num = (self.max_iterations + self.batch_size -
                     1) // self.batch_size
        if self.batch_size > self.config_advisor.init_num:
            batch_num += 1  # fix bug
        batch_id = 0
        while batch_id < batch_num:
            configs = self.config_advisor.get_suggestions()
            # Add batch configs to masterQueue.
            for config in configs:
                msg = [config, self.time_limit_per_trial]
                self.master_messager.send_message(msg)
            self.logger.info('Master: %d-th batch. %d configs sent.' %
                             (batch_id, len(configs)))
            # Get batch results from workerQueue.
            result_num = 0
            result_needed = len(configs)
            while True:
                # Observation: config, trial_state, constraints, objs, elapsed_time
                observation = self.master_messager.receive_message()
                if observation is None:
                    # Wait for workers.
                    # self.logger.info("Master: wait for worker results. sleep 1s.")
                    time.sleep(self.sleep_time)
                    continue
                # Report result.
                result_num += 1
                config, trial_state, constraints, objs, elapsed_time = observation
                if objs is None:
                    observation = Observation(config, trial_state, constraints,
                                              self.FAILED_PERF, elapsed_time)
                self.config_advisor.update_observation(observation)
                self.logger.info(
                    'Master: In the %d-th batch [%d], observation is: %s' %
                    (batch_id, result_num, str(observation)))

                config, trial_state, constraints, objs, elapsed_time = observation
                global_time = time.time() - self.global_start_time
                self.config_list.append(config)
                self.perf_list.append(objs[0])  # single objective
                self.time_list.append(global_time)

                if result_num == result_needed:
                    break
            batch_id += 1

            global_time = time.time() - self.global_start_time
            if global_time >= runtime_limit:
                return
Beispiel #11
0
    def iterate(self):
        config_space, hist_list = self.get_configspace()
        # print(self._hp_cnt, config_space)
        # print(self._hp_cnt, hist_list)

        # Set the number of initial number.
        if len(hist_list) > 0:
            init_num = 0
        else:
            init_num = 3

        # Set the number of iterations.
        # eta = 3
        # if self._hp_cnt > 0:
        #     iter_num = eta ** (self._hp_cnt + 1) - eta ** self._hp_cnt
        #     if eta ** (self._hp_cnt + 1) > self.max_run:
        #         iter_num = self.max_run - eta ** self._hp_cnt
        # else:
        #     iter_num = eta
        iter_num = self.step_size

        smbo = SMBO(self.evaluate_wrapper,
                    config_space,
                    advisor_type=self.strategy,
                    max_runs=iter_num,
                    init_num=init_num,
                    task_id='smbo%d' % self._hp_cnt,
                    random_state=self.random_state)

        # Set the history trials.
        for _config_dict, _perf in hist_list:
            config = deactivate_inactive_hyperparameters(
                configuration_space=config_space, configuration=_config_dict)
            _observation = Observation(config, SUCCESS, None, (_perf, ), None)
            smbo.config_advisor.history_container.update_observation(
                _observation)
        smbo.run()

        # Save the runhistory.
        self.history_dict = OrderedDict()
        for _config, perf in zip(
                smbo.config_advisor.history_container.configurations,
                smbo.config_advisor.history_container.perfs):
            self.history_dict[_config] = perf

        self._hp_cnt += self._delta
        if self._hp_cnt > self.hp_size:
            self._hp_cnt = self.hp_size
Beispiel #12
0
def update_observation(request):
    """
    Update observation in config advisor.

    Parameters
    ----------
    request : a dict

    Returns
    -------
    a readable information string in HttpResponse form
    """
    if request.method == 'POST':
        if request.POST:
            task_id = request.POST.get('task_id')
            config_advisor = advisor_dict[task_id]
            config_dict = json.loads(request.POST.get('config'))
            config = Configuration(config_advisor.config_space, config_dict)
            trial_state = int(request.POST.get('trial_state'))
            constraints = json.loads(request.POST.get('constraints'))
            objs = json.loads(request.POST.get('objs'))
            trial_info = json.loads(request.POST.get('trial_info'))
            item = {
                'task_id': task_id,
                'config': config_dict,
                'result': list(objs),
                'status': trial_state,
                'trial_info': trial_info['trial_info'],
                'worker_id': trial_info['worker_id'],
                'cost': trial_info['cost']
            }
            runhistory_id = Runhistory().insert_one(item)
            observation = Observation(config, trial_state, constraints, objs,
                                      trial_info['cost'])
            config_advisor.update_observation(observation)

            config_advisor.save_history()

            print('-' * 21)
            print('Update observation')
            print(observation)
            print('-' * 21)
            return JsonResponse({'code': 1, 'msg': 'SUCCESS'})
        else:
            return JsonResponse({'code': 0, 'msg': 'Empty post data'})
    else:
        return JsonResponse({'code': 0, 'msg': 'Should be a POST request'})
Beispiel #13
0
 def sync_run(self):
     batch_num = (self.max_iterations + self.batch_size -
                  1) // self.batch_size
     if self.batch_size > self.config_advisor.init_num:
         batch_num += 1  # fix bug
     batch_id = 0
     while batch_id < batch_num:
         configs = self.config_advisor.get_suggestions()
         # Add batch configs to masterQueue.
         for config in configs:
             msg = [config, self.time_limit_per_trial]
             self.master_messager.send_message(msg)
         self.logger.info('Master: %d-th batch. %d configs sent.' %
                          (batch_id, len(configs)))
         # Get batch results from workerQueue.
         result_num = 0
         result_needed = len(configs)
         while True:
             observation = self.master_messager.receive_message()
             if observation is None:
                 # Wait for workers.
                 # self.logger.info("Master: wait for worker results. sleep 1s.")
                 time.sleep(1)
                 continue
             # Report result.
             result_num += 1
             if observation.objs is None:
                 observation = Observation(
                     config=observation.config,
                     objs=self.FAILED_PERF,
                     constraints=observation.constraints,
                     trial_state=observation.trial_state,
                     elapsed_time=observation.elapsed_time,
                 )
             self.config_advisor.update_observation(observation)
             self.logger.info(
                 'Master: In the %d-th batch [%d], observation is: %s' %
                 (batch_id, result_num, str(observation)))
             if result_num == result_needed:
                 break
         batch_id += 1
Beispiel #14
0
 def update_incumbent_before_reduce(self, T, val_losses, n_iteration):
     if int(n_iteration) < self.R:
         return
     self.incumbent_configs.extend(T)
     self.incumbent_perfs.extend(val_losses)
     # update config advisor
     for config, perf in zip(T, val_losses):
         objs = [perf]
         observation = Observation(
             config=config,
             objs=objs,
             constraints=None,
             trial_state=SUCCESS,
             elapsed_time=None,
         )
         self.config_advisor.update_observation(observation)
         self.logger.info('update observation: config=%s, perf=%f' %
                          (str(config), perf))
     self.logger.info(
         '%d observations updated. %d incumbent configs total.' %
         (len(T), len(self.incumbent_configs)))
    def async_run_with_limit(self, runtime_limit):
        config_num = 0
        result_num = 0
        while result_num < self.max_iterations:
            # Add jobs to masterQueue.
            while len(self.config_advisor.running_configs
                      ) < self.batch_size and config_num < self.max_iterations:
                config_num += 1
                config = self.config_advisor.get_suggestion()
                msg = [config, self.time_limit_per_trial]
                self.logger.info("Master: Add config %d." % config_num)
                self.master_messager.send_message(msg)

            # Get results from workerQueue.
            while True:
                observation = self.master_messager.receive_message()
                if observation is None:
                    # Wait for workers.
                    # self.logger.info("Master: wait for worker results. sleep 1s.")
                    time.sleep(self.sleep_time)
                    break
                # Report result.
                result_num += 1
                config, trial_state, constraints, objs, elapsed_time = observation
                if objs is None:
                    observation = Observation(config, trial_state, constraints,
                                              self.FAILED_PERF, elapsed_time)
                self.config_advisor.update_observation(observation)
                self.logger.info('Master: Get %d observation: %s' %
                                 (result_num, str(observation)))

                config, trial_state, constraints, objs, elapsed_time = observation
                global_time = time.time() - self.global_start_time
                self.config_list.append(config)
                self.perf_list.append(objs[0])  # single objective
                self.time_list.append(global_time)

            global_time = time.time() - self.global_start_time
            if global_time >= runtime_limit:
                return
Beispiel #16
0
    def get_suggestions(self, batch_size=None, history_container=None):
        if batch_size is None:
            batch_size = self.batch_size
        assert batch_size >= 1
        if history_container is None:
            history_container = self.history_container

        num_config_evaluated = len(history_container.configurations)
        num_config_successful = len(history_container.successful_perfs)

        if num_config_evaluated < self.init_num:
            if self.initial_configurations is not None:  # self.init_num equals to len(self.initial_configurations)
                next_configs = self.initial_configurations[
                    num_config_evaluated:num_config_evaluated + batch_size]
                if len(next_configs) < batch_size:
                    next_configs.extend(
                        self.sample_random_configs(
                            batch_size - len(next_configs), history_container))
                return next_configs
            else:
                return self.sample_random_configs(batch_size,
                                                  history_container)

        if self.optimization_strategy == 'random':
            return self.sample_random_configs(batch_size, history_container)

        if num_config_successful < max(self.init_num, 1):
            self.logger.warning(
                'No enough successful initial trials! Sample random configurations.'
            )
            return self.sample_random_configs(batch_size, history_container)

        X = convert_configurations_to_array(history_container.configurations)
        Y = history_container.get_transformed_perfs(transform=None)
        # cY = history_container.get_transformed_constraint_perfs(transform='bilog')

        batch_configs_list = list()

        if self.batch_strategy == 'median_imputation':
            # set bilog_transform=False to get real cY for estimating median
            cY = history_container.get_transformed_constraint_perfs(
                transform=None)

            estimated_y = np.median(Y, axis=0).reshape(-1).tolist()
            estimated_c = np.median(
                cY, axis=0).tolist() if self.num_constraints > 0 else None
            batch_history_container = copy.deepcopy(history_container)

            for batch_i in range(batch_size):
                # use super class get_suggestion
                curr_batch_config = super().get_suggestion(
                    batch_history_container)

                # imputation
                observation = Observation(config=curr_batch_config,
                                          objs=estimated_y,
                                          constraints=estimated_c,
                                          trial_state=SUCCESS,
                                          elapsed_time=None)
                batch_history_container.update_observation(observation)
                batch_configs_list.append(curr_batch_config)

        elif self.batch_strategy == 'local_penalization':
            # local_penalization only supports single objective with no constraint
            self.surrogate_model.train(X, Y)
            incumbent_value = history_container.get_incumbents()[0][1]
            # L = self.estimate_L(X)
            for i in range(batch_size):
                if self.rng.random() < self.rand_prob:
                    # sample random configuration proportionally
                    self.logger.info('Sample random config. rand_prob=%f.' %
                                     self.rand_prob)
                    cur_config = self.sample_random_configs(
                        1,
                        history_container,
                        excluded_configs=batch_configs_list)[0]
                else:
                    self.acquisition_function.update(
                        model=self.surrogate_model,
                        eta=incumbent_value,
                        num_data=len(history_container.data),
                        batch_configs=batch_configs_list)

                    challengers = self.optimizer.maximize(
                        runhistory=history_container,
                        num_points=5000,
                    )
                    cur_config = challengers.challengers[0]
                batch_configs_list.append(cur_config)
        elif self.batch_strategy == 'reoptimization':
            surrogate_trained = False
            for i in range(batch_size):
                if self.rng.random() < self.rand_prob:
                    # sample random configuration proportionally
                    self.logger.info('Sample random config. rand_prob=%f.' %
                                     self.rand_prob)
                    cur_config = self.sample_random_configs(
                        1,
                        history_container,
                        excluded_configs=batch_configs_list)[0]
                else:
                    if not surrogate_trained:
                        # set return_list=True to ensure surrogate trained
                        candidates = super().get_suggestion(history_container,
                                                            return_list=True)
                        surrogate_trained = True
                    else:
                        # re-optimize acquisition function
                        challengers = self.optimizer.maximize(
                            runhistory=history_container, num_points=5000)
                        candidates = challengers.challengers
                    cur_config = None
                    for config in candidates:
                        if config not in batch_configs_list and config not in history_container.configurations:
                            cur_config = config
                            break
                    if cur_config is None:
                        self.logger.warning(
                            'Cannot get non duplicate configuration from BO candidates (len=%d). '
                            'Sample random config.' % (len(candidates), ))
                        cur_config = self.sample_random_configs(
                            1,
                            history_container,
                            excluded_configs=batch_configs_list)[0]
                batch_configs_list.append(cur_config)
        elif self.batch_strategy == 'default':
            # select first N candidates
            candidates = super().get_suggestion(history_container,
                                                return_list=True)
            idx = 0
            while len(batch_configs_list) < batch_size:
                if idx >= len(candidates):
                    self.logger.warning(
                        'Cannot get non duplicate configuration from BO candidates (len=%d). '
                        'Sample random config.' % (len(candidates), ))
                    cur_config = self.sample_random_configs(
                        1,
                        history_container,
                        excluded_configs=batch_configs_list)[0]
                elif self.rng.random() < self.rand_prob:
                    # sample random configuration proportionally
                    self.logger.info('Sample random config. rand_prob=%f.' %
                                     self.rand_prob)
                    cur_config = self.sample_random_configs(
                        1,
                        history_container,
                        excluded_configs=batch_configs_list)[0]
                else:
                    cur_config = None
                    while idx < len(candidates):
                        conf = candidates[idx]
                        idx += 1
                        if conf not in batch_configs_list and conf not in history_container.configurations:
                            cur_config = conf
                            break
                if cur_config is not None:
                    batch_configs_list.append(cur_config)

        else:
            raise ValueError('Invalid sampling strategy - %s.' %
                             self.batch_strategy)
        return batch_configs_list
    def _get_suggestion(self, history_container=None):
        if history_container is None:
            history_container = self.history_container

        num_config_all = len(history_container.configurations) + len(
            self.running_configs)
        num_config_successful = len(history_container.successful_perfs)

        if (num_config_all < self.init_num) or \
                num_config_successful < self.bo_start_n or \
                self.optimization_strategy == 'random':
            if num_config_all >= len(self.initial_configurations):
                _config = self.sample_random_configs(1, history_container)[0]
            else:
                _config = self.initial_configurations[num_config_all]
            return _config

        # sample random configuration proportionally
        if self.rng.random() < self.rand_prob:
            self.logger.info('Sample random config. rand_prob=%f.' %
                             self.rand_prob)
            return self.sample_random_configs(
                1, history_container, excluded_configs=self.running_configs)[0]

        X = convert_configurations_to_array(history_container.configurations)
        Y = history_container.get_transformed_perfs(transform=None)
        # cY = history_container.get_transformed_constraint_perfs(transform='bilog')

        if self.batch_strategy == 'median_imputation':
            # set bilog_transform=False to get real cY for estimating median
            cY = history_container.get_transformed_constraint_perfs(
                transform=None)

            estimated_y = np.median(Y, axis=0).reshape(-1).tolist()
            estimated_c = np.median(
                cY, axis=0).tolist() if self.num_constraints > 0 else None
            batch_history_container = copy.deepcopy(history_container)
            # imputation
            for config in self.running_configs:
                observation = Observation(config=config,
                                          objs=estimated_y,
                                          constraints=estimated_c,
                                          trial_state=SUCCESS,
                                          elapsed_time=None)
                batch_history_container.update_observation(observation)

            # use super class get_suggestion
            return super().get_suggestion(batch_history_container)

        elif self.batch_strategy == 'local_penalization':
            # local_penalization only supports single objective with no constraint
            self.surrogate_model.train(X, Y)
            incumbent_value = history_container.get_incumbents()[0][1]
            self.acquisition_function.update(
                model=self.surrogate_model,
                eta=incumbent_value,
                num_data=len(history_container.data),
                batch_configs=self.running_configs)

            challengers = self.optimizer.maximize(runhistory=history_container,
                                                  num_points=5000)
            return challengers.challengers[0]

        elif self.batch_strategy == 'default':
            # select first N candidates
            candidates = super().get_suggestion(history_container,
                                                return_list=True)

            for config in candidates:
                if config not in self.running_configs and config not in history_container.configurations:
                    return config

            self.logger.warning(
                'Cannot get non duplicate configuration from BO candidates (len=%d). '
                'Sample random config.' % (len(candidates), ))
            return self.sample_random_configs(
                1, history_container, excluded_configs=self.running_configs)[0]
        else:
            raise ValueError('Invalid sampling strategy - %s.' %
                             self.batch_strategy)
Beispiel #18
0
    def run(self):
        while True:
            try:
                msg = self.receiver_messager.receive_message()
            except Exception as e:
                self.logger.error("Worker receive message error: %s." % str(e))
            if msg == 'ready':
                break

            # Get config
            try:
                msg = self.worker_messager.receive_message()
            except Exception as e:
                self.logger.error("Worker receive message error: %s." % str(e))

            if msg is None:
                # Wait for configs
                time.sleep(0.3)
                continue
            self.logger.info("Worker: get config. start working.")
            config, time_limit_per_trial = msg

            # Start working
            trial_state = SUCCESS
            start_time = time.time()
            try:
                args, kwargs = (config, ), dict()
                timeout_status, _result = time_limit(self.evaluator,
                                                     time_limit_per_trial,
                                                     args=args,
                                                     kwargs=kwargs)
                if timeout_status:
                    raise TimeoutException(
                        'Timeout: time limit for this evaluation is %.1fs' %
                        time_limit_per_trial)
                else:
                    objs, constraints = get_result(_result)
            except Exception as e:
                if isinstance(e, TimeoutException):
                    trial_state = TIMEOUT
                else:
                    traceback.print_exc(file=sys.stdout)
                    trial_state = FAILED
                objs = None
                constraints = None

            _perf = float("INF") if objs is None else objs[0]
            self.configs.append(config)
            self.perfs.append(_perf)
            self.eval_dict[config] = [-_perf, time.time(), trial_state]

            if -_perf > self.incumbent_perf:
                self.incumbent_perf = -_perf
                self.incumbent_config = config

            elapsed_time = time.time() - start_time
            observation = Observation(config,
                                      trial_state,
                                      constraints,
                                      objs,
                                      elapsed_time,
                                      worker_info={
                                          'ip': get_host_ip(),
                                          'port': self.worker_port
                                      })

            # Send result
            self.logger.info("Worker: observation=%s. sending result." %
                             str(observation))
            try:
                self.worker_messager.send_message(observation)
            except Exception as e:
                self.logger.error("Worker send message error: %s." % str(e))

        eval_list = self.eval_dict.items()
        sorted_list = sorted(eval_list, key=lambda x: x[1][0], reverse=True)
        if len(sorted_list) > 10:
            ensemble_dict = dict(sorted_list[:int(len(sorted_list) / 10)])
        else:
            ensemble_dict = dict(sorted_list[:1])
        self.best_configs = list(ensemble_dict.keys())
        preds = self.fetch_ensemble_pred()
        self.receiver_messager.send_message(preds)

        while True:
            time.sleep(5)
            try:
                msg = self.receiver_messager.receive_message()
            except Exception as e:
                self.logger.error("Worker receive message error: %s." % str(e))
            if msg == 'over':
                break
            elif msg is not None:
                self.receiver_messager.send_message(msg)
    def get_suggestions(self, batch_size=None, history_container=None):
        if batch_size is None:
            batch_size = self.batch_size
        if history_container is None:
            history_container = self.history_container

        num_config_evaluated = len(history_container.configurations)
        num_config_successful = len(history_container.successful_perfs)

        if num_config_evaluated < self.init_num:
            if self.initial_configurations is not None:  # self.init_num equals to len(self.initial_configurations)
                return self.initial_configurations
            else:
                return self.sample_random_configs(self.init_num,
                                                  history_container)

        if self.optimization_strategy == 'random':
            return self.sample_random_configs(batch_size, history_container)

        if num_config_successful < max(self.init_num, 1):
            self.logger.warning(
                'No enough successful initial trials! Sample random configurations.'
            )
            return self.sample_random_configs(batch_size, history_container)

        X = convert_configurations_to_array(history_container.configurations)
        Y = history_container.get_transformed_perfs()
        # cY = history_container.get_transformed_constraint_perfs()

        batch_configs_list = list()

        if self.batch_strategy == 'median_imputation':
            # set bilog_transform=False to get real cY for estimating median
            cY = history_container.get_transformed_constraint_perfs(
                bilog_transform=False)

            estimated_y = np.median(Y, axis=0).reshape(-1).tolist()
            estimated_c = np.median(
                cY, axis=0).tolist() if self.num_constraints > 0 else None
            batch_history_container = copy.deepcopy(history_container)

            for batch_i in range(batch_size):
                # use super class get_suggestion
                curr_batch_config = super().get_suggestion(
                    batch_history_container)

                # imputation
                observation = Observation(curr_batch_config, SUCCESS,
                                          estimated_c, estimated_y, None)
                batch_history_container.update_observation(observation)
                batch_configs_list.append(curr_batch_config)

        elif self.batch_strategy == 'local_penalization':
            # local_penalization only supports single objective with no constraint
            self.surrogate_model.train(X, Y)
            incumbent_value = history_container.get_incumbents()[0][1]
            # L = self.estimate_L(X)
            for i in range(batch_size):
                self.acquisition_function.update(
                    model=self.surrogate_model,
                    eta=incumbent_value,
                    num_data=len(history_container.data),
                    batch_configs=batch_configs_list)

                challengers = self.optimizer.maximize(
                    runhistory=history_container,
                    num_points=5000,
                )
                batch_configs_list.append(challengers.challengers[0])
        else:
            raise ValueError('Invalid sampling strategy - %s.' %
                             self.batch_strategy)
        return batch_configs_list