Esempio n. 1
0
    def maximize(self, batch_size=1):
        """
        Maximizes the given acquisition function.

        Parameters
        ----------
        batch_size: number of maximizer returned.

        Returns
        -------
        np.ndarray(N,D)
            Point with highest acquisition value.
        """

        incs_configs = list(
            get_one_exchange_neighbourhood(self.objective_func.eta['config'],
                                           seed=self.rng.randint(int(1e6))))

        configs_list = list(incs_configs)
        rand_incs = convert_configurations_to_array(configs_list)

        # Sample random points uniformly over the whole space
        rand_configs = sample_configurations(
            self.config_space, self.n_samples - rand_incs.shape[0])
        rand = convert_configurations_to_array(rand_configs)

        configs_list.extend(rand_configs)

        X = np.concatenate((rand_incs, rand), axis=0)
        y = self.objective_func(X).flatten()
        candidate_idxs = list(np.argsort(-y)[:batch_size])
        # print(candidate_idxs)
        # print(type(candidate_idxs))
        # print(configs_list[:5])
        return [configs_list[idx] for idx in candidate_idxs]
Esempio n. 2
0
    def _iterate(self, s, budget=MAX_INT, skip_last=0):

        # Set initial number of configurations
        n = int(ceil(self.B / self.R / (s + 1) * self.eta**s))
        # initial number of iterations per config
        r = int(self.R * self.eta**(-s))

        # Choose a batch of configurations in different mechanisms.
        start_time = time.time()
        T = sample_configurations(self.config_space, n)
        time_elapsed = time.time() - start_time
        self.logger.info(
            "Choosing next batch of configurations took %.2f sec." %
            time_elapsed)

        with ParallelProcessEvaluator(self.eval_func,
                                      n_worker=self.n_workers) as executor:
            for i in range((s + 1) - int(skip_last)):  # changed from s + 1
                if time.time() >= budget + start_time:
                    break

                # Run each of the n configs for <iterations>
                # and keep best (n_configs / eta) configurations

                n_configs = n * self.eta**(-i)
                n_resource = r * self.eta**i

                self.logger.info(
                    "MFSE: %d configurations x size %d / %d each" %
                    (int(n_configs), n_resource, self.R))

                val_losses = executor.parallel_execute(
                    T,
                    resource_ratio=float(n_resource / self.R),
                    eta=self.eta,
                    first_iter=(i == 0))
                for _id, _val_loss in enumerate(val_losses):
                    if np.isfinite(_val_loss):
                        self.target_x[int(n_resource)].append(T[_id])
                        self.target_y[int(n_resource)].append(_val_loss)

                self.exp_output[time.time()] = (int(n_resource), T, val_losses)

                if int(n_resource) == self.R:
                    self.incumbent_configs.extend(T)
                    self.incumbent_perfs.extend(val_losses)

                # Select a number of best configurations for the next loop.
                # Filter out early stops, if any.
                indices = np.argsort(val_losses)
                if len(T) >= self.eta:
                    T = [T[i] for i in indices]
                    reduced_num = int(n_configs / self.eta)
                    T = T[0:reduced_num]
                else:
                    T = [T[indices[0]]]
Esempio n. 3
0
 def choose_next(self, X: np.ndarray, Y: np.ndarray):
     _config_num = X.shape[0]
     if _config_num < self.init_num:
         if self.initial_configurations is None:
             if _config_num == 0:
                 return self.config_space.get_default_configuration()
             else:
                 return sample_configurations(self.config_space, 1)[0]
         else:
             return self.initial_configurations[_config_num]
     else:
         if self.surrogate_model == 'tpe':
             config, _ = self.model.get_config()
         elif self.surrogate_model == 'random_search':
             config = sample_configurations(self.config_space, 1)[0]
         else:
             raise ValueError('Invalid surrogate model - %s.' %
                              self.surrogate_model)
         return config
Esempio n. 4
0
    def smac_get_candidate_configurations(self, num_config):
        if len(self.target_y[self.iterate_r[-1]]) <= 3:
            return sample_configurations(self.config_space, num_config)

        incumbent = dict()
        max_r = self.iterate_r[-1]
        best_index = np.argmin(self.target_y[max_r])
        incumbent['config'] = self.target_x[max_r][best_index]
        incumbent['obj'] = self.target_y[max_r][best_index]
        self.acquisition_func.update(model=self.surrogate, eta=incumbent)

        config_candidates = self.acq_optimizer.maximize(batch_size=num_config)
        p_threshold = 0.3
        candidates = list()
        idx_acq = 0
        for _id in range(num_config):
            if rd.random() < p_threshold or _id >= len(config_candidates):
                config = sample_configurations(self.config_space, 1)[0]
            else:
                config = config_candidates[idx_acq]
                idx_acq += 1
            candidates.append(config)
        return candidates
Esempio n. 5
0
    def baseline_get_candidate_configurations(self, num_config):
        config_candidates = list()

        config_left = num_config
        while config_left:
            config = self.config_gen.get_config()[0]
            if config in config_candidates:
                continue
            config_candidates.append(config)
            config_left -= 1

        p_threshold = 0.3
        candidates = list()
        idx_acq = 0
        for _id in range(num_config):
            if rd.random() < p_threshold:
                config = sample_configurations(self.config_space, 1)[0]
            else:
                config = config_candidates[idx_acq]
                idx_acq += 1
            candidates.append(config)
        return candidates
Esempio n. 6
0
 def sample_configs_for_archs(self, include_architectures, N):
     configs = list()
     for _arch in include_architectures:
         _cs = self.get_model_config_space(_arch)
         configs.extend(sample_configurations(_cs, N))
     return configs
Esempio n. 7
0
parser.add_argument('--network', type=str, default='mobilenet')
parser.add_argument('--dataset', type=str, default='extremely_small')
args = parser.parse_args()

data_dir = 'data/img_datasets/%s/' % args.dataset

image_data = ImageDataset(data_path=data_dir, train_val_split=True)
image_data.set_test_path(data_dir)
evaluator = DLEvaluator(None,
                        IMG_CLS,
                        scorer=get_metric('acc'),
                        dataset=image_data,
                        device='cuda',
                        image_size=32)

from solnml.components.computation.parallel_process import ParallelProcessEvaluator
from solnml.components.models.img_classification import _classifiers
network_id = args.network
config_space = _classifiers[network_id].get_hyperparameter_search_space()
model = UnParametrizedHyperparameter("estimator", network_id)
config_space.add_hyperparameter(model)
aug_space = get_aug_hyperparameter_space()
config_space.add_hyperparameters(aug_space.get_hyperparameters())
config_space.add_conditions(aug_space.get_conditions())

executor = ParallelProcessEvaluator(evaluator, n_worker=3)
_configs = sample_configurations(config_space, 12)

res = executor.parallel_execute(_configs, resource_ratio=0.1)
print(res)
Esempio n. 8
0
    def sample_configs_for_archs(self,
                                 include_architectures,
                                 N,
                                 sampling_strategy='uniform'):
        configs = list()
        for _arch in include_architectures:
            _cs = self.get_model_config_space(_arch)
            if sampling_strategy == 'uniform':
                configs.append(_cs.get_default_configuration())
                configs.extend(sample_configurations(_cs, N - 1))

            elif sampling_strategy == 'bohb':
                if _arch not in self.tpe_config_gen:
                    self.tpe_config_gen[_arch] = TPE(_cs)
                config_candidates = list()

                config_left = N
                while config_left:
                    config = self.tpe_config_gen[_arch].get_config()[0]
                    if config in config_candidates:
                        continue
                    config_candidates.append(config)
                    config_left -= 1

                p_threshold = 0.3
                idx_acq = 0
                for _id in range(N):
                    if rd.random() < p_threshold:
                        config = sample_configurations(_cs, 1)[0]
                    else:
                        config = config_candidates[idx_acq]
                        idx_acq += 1
                    configs.append(config)

            else:  # mfse
                if _arch not in self.mfse_config_gen:
                    types, bounds = get_types(_cs)
                    init_weight = [1. / self.s_max] * self.s_max + [0.]
                    self.mfse_config_gen[_arch] = dict()
                    self.mfse_config_gen[_arch][
                        'surrogate'] = WeightedRandomForestCluster(
                            types, bounds, self.s_max, self.eta, init_weight,
                            'gpoe')
                    acq_func = EI(
                        model=self.mfse_config_gen[_arch]['surrogate'])
                    self.mfse_config_gen[_arch][
                        'acq_optimizer'] = RandomSampling(
                            acq_func,
                            _cs,
                            n_samples=2000,
                            rng=np.random.RandomState(1))
                if self.R not in self.eval_hist_perfs[_arch] or len(
                        self.eval_hist_perfs[_arch][self.R]) == 0:
                    configs.extend(sample_configurations(_cs, N))
                    continue

                incumbent = dict()
                max_r = self.R
                # The lower, the better.
                best_index = np.argmin(self.eval_hist_perfs[_arch][max_r])
                incumbent['config'] = self.eval_hist_configs[_arch][max_r][
                    best_index]
                approximate_obj = self.mfse_config_gen[_arch][
                    'surrogate'].predict(
                        convert_configurations_to_array([incumbent['config']
                                                         ]))[0]
                incumbent['obj'] = approximate_obj
                self.mfse_config_gen[_arch]['acq_optimizer'].update(
                    model=self.mfse_config_gen[_arch]['surrogate'],
                    eta=incumbent)

                config_candidates = self.mfse_config_gen[_arch][
                    'acq_optimizer'].maximize(batch_size=N)
                p_threshold = 0.3
                n_acq = self.eta * self.eta

                if N <= n_acq:
                    return config_candidates

                candidates = config_candidates[:n_acq]
                idx_acq = n_acq
                for _id in range(N - n_acq):
                    if rd.random() < p_threshold:
                        config = sample_configurations(_cs, 1)[0]
                    else:
                        config = config_candidates[idx_acq]
                        idx_acq += 1
                    candidates.append(config)
                return candidates
        return configs