Exemple #1
0
 def process_all_configs_exist(self, info_dict, budget):
     seed = self.rng.randint(1, 8888)
     self.config_space.seed(seed)
     config = self.config_space.sample_configuration()
     add_configs_origin(config, "Initial Design")
     info_dict.update({"sampling_different_samples_failed": True, "seed": seed})
     return self.process_config_info_pair(config, info_dict, budget)
Exemple #2
0
 def tpe_sampling(self, epm, budget):
     info_dict = {"model_based_pick": True}
     for try_id in range(self.max_try):
         samples = epm.sample(n_candidates=self.n_candidates,
                              sort_by_EI=self.sort_by_EI,
                              random_state=self.rng,
                              bandwidth_factor=self._bw_factor +
                              self.min_bw_factor)
         for i, sample in enumerate(samples):
             if self.is_config_exist(budget, sample):
                 self.logger.debug(
                     f"The sample already exists and needs to be resampled. "
                     f"It's the {i}-th time sampling in thompson sampling. "
                 )
             else:
                 add_configs_origin(sample, "ETPE sampling")
                 return sample, info_dict
         old_db = self._bw_factor
         self._bw_factor = (self._bw_factor + self.min_bw_factor
                            ) * self.gamma2 - self.min_bw_factor
         self.logger.warning(
             f"After {try_id + 1} times sampling, all samples exist in observations. "
             f"Update bandwidth_factor from {old_db:.4f} to {self._bw_factor:.4f} by "
             f"multiply gamma2 ({self.gamma2}).")
     sample = self.config_space.sample_configuration()
     add_configs_origin(sample, "Random Search")
     info_dict = {"model_based_pick": False}
     return sample, info_dict
Exemple #3
0
 def pick_random_initial_config(self, budget, max_sample=1000, origin="Initial Design"):
     i = 0
     info_dict = {"model_based_pick": False}
     while i < max_sample:
         i += 1
         config = self.config_space.sample_configuration()
         add_configs_origin(config, origin)
         if self.is_config_exist(budget, config):
             self.logger.debug(f"The sample already exists and needs to be resampled. "
                               f"It's the {i}-th time sampling in random sampling. ")
         else:
             return self.process_config_info_pair(config, info_dict, budget)
     return self.process_all_configs_exist(info_dict, budget)
Exemple #4
0
    def sample(self,
               n_candidates=20,
               sort_by_EI=False,
               random_state=None,
               bandwidth_factor=3) -> List[Configuration]:
        # https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html#sklearn.neighbors.KernelDensity
        groups = np.array(self.groups)
        rng = check_random_state(random_state)
        if self.good_kdes is None:
            self.logger.warning("good_kdes is None, random sampling.")
            return self.config_transformer.config_space.sample_configuration(
                n_candidates)
        sampled_matrix = np.zeros([n_candidates, len(self.groups)])
        for group, good_kde in enumerate(self.good_kdes):
            group_mask = groups == group
            if good_kde:
                # KDE采样
                bw = good_kde.bandwidth
                prev_bw = bw
                bw *= bandwidth_factor
                good_kde.set_params(bandwidth=bw)
                result = good_kde.sample(n_candidates,
                                         random_state=random_state)
                good_kde.set_params(bandwidth=prev_bw)
            else:
                # 随机采样(0-1)
                result = rng.rand(n_candidates, group_mask.sum())
            sampled_matrix[:, group_mask] = result
        candidates = self.config_transformer.inverse_transform(sampled_matrix)
        n_fails = len(candidates) - n_candidates
        add_configs_origin(candidates, "ETPE sampling")
        if n_fails:
            random_candidates = self.config_transformer.config_space.sample_configuration(
                n_fails)
            add_configs_origin(random_candidates, "Random Search")
            candidates.extend(random_candidates)
        if sort_by_EI:
            try:
                X = [candidate.get_array() for candidate in candidates]
                X_trans = self.config_transformer.transform(X)

                EI = self.predict(X_trans)
                indexes = np.argsort(-EI)
                candidates = [candidates[ix] for ix in indexes]
            except Exception as e:
                self.logger.error(f"sort_by_EI failed: {e}")
        return candidates
Exemple #5
0
 def _get_config(self, budget, max_budget):
     # choose model from max-budget
     epm = self.budget2epm[max_budget]
     # random sampling
     if epm is None:
         # return self.pick_random_initial_config(budget)
         info_dict = {"model_based_pick": False}
         if self.initial_design_ix < len(self.initial_design_configs):
             config = self.initial_design_configs[self.initial_design_ix]
             add_configs_origin(config, "Initial Design")
             self.initial_design_ix += 1
             return self.process_config_info_pair(config, info_dict, budget)
         else:
             return self.pick_random_initial_config(budget)
     # model based pick
     config, info_dict = self.tpe_sampling(epm, budget)
     self._bw_factor *= self.gamma1
     return self.process_config_info_pair(config, info_dict, budget)
 def get_config_(self, budget, max_budget):
     # choose model from max-budget
     epm = self.budget2epm[max_budget]
     # random sampling
     if epm is None:
         return self.pick_random_initial_config(budget)
     # model based pick
     info_dict = {"model_based_pick": True}
     # using config_evaluator evaluate random samples
     configs = self.config_space.sample_configuration(self.n_samples)
     losses, configs_sorted = self.evaluate(configs,
                                            max_budget,
                                            return_loss_config=True)
     add_configs_origin(configs_sorted, "Random Search (Sorted)")
     if self.use_local_search:
         start_points = self.get_local_search_initial_points(
             max_budget, 10, configs_sorted)
         local_losses, local_configs = self.local_search(
             start_points, max_budget)
         add_configs_origin(local_configs, "Local Search")
         concat_losses = np.hstack(
             [losses.flatten(), local_losses.flatten()])
         concat_configs = configs + local_configs
         random_var = self.rng.rand(len(concat_losses))
         indexes = np.lexsort((random_var.flatten(), concat_losses))
         concat_configs_sorted = [concat_configs[i] for i in indexes]
         concat_losses = concat_losses[indexes]
     else:
         concat_losses, concat_configs_sorted = losses, configs_sorted
     # 选取获益最大,且没有出现过的一个配置
     for i, config in enumerate(concat_configs_sorted):
         if self.is_config_exist(budget, config):
             self.logger.debug(
                 f"The sample already exists and needs to be resampled. "
                 f"It's the {i}-th time sampling in bayesian sampling. ")
         else:
             return self.process_config_info_pair(config, info_dict, budget)
     return self.process_all_configs_exist(info_dict, budget)