コード例 #1
0
    def maximize(self,
                 runhistory: HistoryContainer,
                 initial_config=None,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        def negative_acquisition(x):
            # shape of x = (d,)
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        acq_configs = []
        result = scipy.optimize.differential_evolution(
            func=negative_acquisition, bounds=self.bounds)
        if not result.success:
            self.logger.debug(
                'Scipy differential evolution optimizer failed. Info:\n%s' %
                (result, ))
        try:
            config = Configuration(self.config_space, vector=result.x)
            acq = self.acquisition_function(result.x, convert=False)
            acq_configs.append((acq, config))
        except Exception:
            pass

        if not acq_configs:  # empty
            self.logger.warning(
                'Scipy differential evolution optimizer failed. Return empty config list. Info:\n%s'
                % (result, ))

        challengers = ChallengerList([config for _, config in acq_configs],
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
コード例 #2
0
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        y = problem.evaluate_config(config)
        return y

    from cma import CMAEvolutionStrategy
    from litebo.utils.util_funcs import get_types
    from litebo.utils.config_space import Configuration

    types, bounds = get_types(cs)
    assert all(types == 0)

    # Check Constant Hyperparameter
    const_idx = list()
    for i, bound in enumerate(bounds):
        if np.isnan(bound[1]):
            const_idx.append(i)

    hp_num = len(bounds) - len(const_idx)
    es = CMAEvolutionStrategy(hp_num * [0], 0.99, inopts={'bounds': [0, 1], 'seed': seed})

    global_start_time = time.time()
    global_trial_counter = 0
    config_list = []
    perf_list = []
    time_list = []
    eval_num = 0
    while eval_num < max_runs:
        X = es.ask(number=es.popsize)
        _X = X.copy()
        for i in range(len(_X)):
            for index in const_idx:
                _X[i] = np.insert(_X[i], index, 0)  # np.insert returns a copy
        # _X = np.asarray(_X)
        values = []
        for xi in _X:
            # convert array to Configuration
            config = Configuration(cs, vector=xi)
            perf = objective_function(config)
            global_time = time.time() - global_start_time
            global_trial_counter += 1
            values.append(perf)
            print('=== CMAES Trial %d: %s perf=%f global_time=%f' % (global_trial_counter, config, perf, global_time))

            config_list.append(config)
            perf_list.append(perf)
            time_list.append(global_time)
        values = np.reshape(values, (-1,))
        es.tell(X, values)
        eval_num += es.popsize

    print('===== Total evaluation times=%d. Truncate to max_runs=%d.' % (eval_num, max_runs))
    config_list = config_list[:max_runs]
    perf_list = perf_list[:max_runs]
    time_list = time_list[:max_runs]
    return config_list, perf_list, time_list
コード例 #3
0
    def maximize(self, runhistory: HistoryContainer, num_points: int,
                 **kwargs) -> Iterable[Tuple[float, Configuration]]:
        try:
            from cma import CMAEvolutionStrategy
        except ImportError:
            raise ImportError("Package cma is not installed!")

        types, bounds = get_types(self.config_space)
        assert all(types == 0)

        # Check Constant Hyperparameter
        const_idx = list()
        for i, bound in enumerate(bounds):
            if np.isnan(bound[1]):
                const_idx.append(i)

        hp_num = len(bounds) - len(const_idx)
        es = CMAEvolutionStrategy(hp_num * [0],
                                  0.99,
                                  inopts={'bounds': [0, 1]})

        eval_num = 0
        next_configs_by_acq_value = list()
        while eval_num < num_points:
            X = es.ask(number=es.popsize)
            _X = X.copy()
            for i in range(len(_X)):
                for index in const_idx:
                    _X[i] = np.insert(_X[i], index, 0)
            _X = np.asarray(_X)
            values = self.acquisition_function._compute(_X)
            values = np.reshape(values, (-1, ))
            es.tell(X, values)
            next_configs_by_acq_value.extend([(values[i], _X[i])
                                              for i in range(es.popsize)])
            eval_num += es.popsize

        next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0])
        next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value]
        next_configs_by_acq_value = [
            Configuration(self.config_space, vector=array)
            for array in next_configs_by_acq_value
        ]

        challengers = ChallengerList(next_configs_by_acq_value,
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
コード例 #4
0
    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # useless in USeMO
            **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~litebo.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~litebo.ei_optimization.ChallengerList
        """

        acq_vals = np.asarray(self.acquisition_function.uncertainties)
        candidates = np.asarray(self.acquisition_function.candidates)
        assert len(acq_vals.shape) == 1 and len(candidates.shape) == 2 \
               and acq_vals.shape[0] == candidates.shape[0]

        acq_configs = []
        for i in range(acq_vals.shape[0]):
            # convert array to Configuration todo
            config = Configuration(self.config_space, vector=candidates[i])
            acq_configs.append((acq_vals[i], config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
コード例 #5
0
    def generate(self, return_config=True):
        """
        Create samples in the domain specified during construction.

        Returns
        -------
        configs : list
            List of N sampled configurations within domain. (return_config is True)

        X : array, shape (N, D)
            Design matrix X in the specified domain. (return_config is False)
        """
        X = self._generate()
        X = self.lower_bounds + (self.upper_bounds - self.lower_bounds) * X

        if return_config:
            configs = [Configuration(self.config_space, vector=x) for x in X]
            return configs
        else:
            return X
コード例 #6
0
ファイル: views.py プロジェクト: gitter-badger/lite-bo
def update_observation(request):
    """
    Update observation in config advisor.

    Parameters
    ----------
    request : a dict

    Returns
    -------
    a readable information string in HttpResponse form
    """
    if request.method == 'POST':
        if request.POST:
            task_id = request.POST.get('task_id')
            config_advisor = advisor_dict[task_id]
            config_dict = json.loads(request.POST.get('config'))
            config = Configuration(config_advisor.config_space, config_dict)
            trial_state = int(request.POST.get('trial_state'))
            constraints = json.loads(request.POST.get('constraints'))
            objs = json.loads(request.POST.get('objs'))

            observation = Observation(config, trial_state, constraints, objs)
            config_advisor.update_observation(observation)

            config_advisor.save_history()
            # print('config history', config_advisor.load_history_from_json())

            print('-' * 21)
            print('Update observation')
            print(observation)
            print('-' * 21)
            return HttpResponse('[bo_advice/views.py] update SUCCESS')
        else:
            return HttpResponse('[bo_advice/views.py] error3')
    else:
        return HttpResponse('[bo_advice/views.py] error4')
コード例 #7
0
    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # todo useless
            **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~litebo.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~litebo.ei_optimization.ChallengerList
        """
        def inverse_acquisition(x):
            # shape of x = (d,)
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        d = len(self.config_space.get_hyperparameters())
        bound = (0.0, 1.0)  # todo only on continuous dims (int, float) now
        bounds = [bound] * d
        acq_configs = []

        # MC
        x_tries = self.rng.uniform(bound[0], bound[1], size=(self.num_mc, d))
        acq_tries = self.acquisition_function(x_tries, convert=False)
        for i in range(x_tries.shape[0]):
            # convert array to Configuration
            config = Configuration(self.config_space, vector=x_tries[i])
            config.origin = 'Random Search'
            acq_configs.append((acq_tries[i], config))

        # L-BFGS-B
        x_seed = self.rng.uniform(low=bound[0],
                                  high=bound[1],
                                  size=(self.num_opt, d))
        for i in range(x_seed.shape[0]):
            x0 = x_seed[i].reshape(1, -1)
            result = self.minimizer(inverse_acquisition,
                                    x0=x0,
                                    method='L-BFGS-B',
                                    bounds=bounds)
            if not result.success:
                continue
            # convert array to Configuration
            config = Configuration(self.config_space, vector=result.x)
            config.origin = 'Scipy'
            acq_val = self.acquisition_function(result.x, convert=False)  # [0]
            acq_configs.append((acq_val, config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
コード例 #8
0
    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # todo useless
            **kwargs) -> List[Tuple[float, Configuration]]:

        # print('start optimize')   # todo remove
        # import time
        # t0 = time.time()
        acq_configs = []

        # random points
        random_points = self.rng.uniform(self.bound[0],
                                         self.bound[1],
                                         size=(self.num_random, self.dim))
        acq_random = self.acquisition_function(random_points, convert=False)
        for i in range(random_points.shape[0]):
            # convert array to Configuration
            config = Configuration(self.config_space, vector=random_points[i])
            config.origin = 'Random Search'
            acq_configs.append((acq_random[i], config))

        # scipy points
        initial_points = self.gen_initial_points(
            num_restarts=self.num_restarts, raw_samples=self.raw_samples)

        for start_idx in range(0, self.num_restarts, self.batch_limit):
            end_idx = min(start_idx + self.batch_limit, self.num_restarts)
            # optimize using random restart optimization
            scipy_points = self.gen_batch_scipy_points(
                initial_points[start_idx:end_idx])
            if scipy_points is None:
                continue
            acq_scipy = self.acquisition_function(scipy_points, convert=False)
            for i in range(scipy_points.shape[0]):
                # convert array to Configuration
                config = Configuration(self.config_space,
                                       vector=scipy_points[i])
                config.origin = 'Batch Scipy'
                acq_configs.append((acq_scipy[i], config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()

        # t1 = time.time()  # todo remove
        # print('==time total=%.2f' % (t1-t0,))
        # for x1 in np.linspace(0, 1, 20):
        #     optimal_point = np.array([x1.item()] + [0.5] * (self.dim-1))
        #     print('optimal_point acq=', self.acquisition_function(optimal_point, convert=False))
        # print('best point acq=', acq_configs[0])
        # time.sleep(2)
        return challengers
コード例 #9
0
ファイル: test_web.py プロジェクト: gitter-badger/lite-bo

# Send task id and config space at register
task_id = time.time()
townsend_params = {'float': {'x1': (-2.25, 2.5, 0), 'x2': (-2.5, 1.75, 0)}}
townsend_cs = ConfigurationSpace()
townsend_cs.add_hyperparameters([
    UniformFloatHyperparameter(e, *townsend_params['float'][e])
    for e in townsend_params['float']
])

# Create remote advisor
config_advisor = RemoteAdvisor(townsend_cs,
                               '127.0.0.1',
                               8000,
                               num_constraints=1,
                               random_state=1)

# Simulate 50 iterations
for _ in range(20):
    config_dict = config_advisor.get_suggestion()
    config = Configuration(config_advisor.config_space, config_dict)
    obs = townsend(config)
    config_advisor.update_observation(config_dict,
                                      obs['objs'],
                                      obs['constraints'],
                                      trial_state=SUCCESS)

incumbents, history = config_advisor.get_result()
print(incumbents)
コード例 #10
0
seed = 123

# Evaluate mth
X_init = np.array([
    [6.66666667e-01, 3.33333333e-01],
    [3.33333333e-01, 6.66666667e-01],
    [2.22222222e-01, 2.22222222e-01],
    [7.77777778e-01, 7.77777778e-01],
    [5.55555556e-01, 0],
    [0, 5.55555556e-01],
    [1.00000000e+00, 4.44444444e-01],
    [4.44444444e-01, 1.00000000e+00],
    [8.88888889e-01, 1.11111111e-01],
    [1.11111111e-01, 8.88888889e-01],
])  # use latin hypercube
X_init = [Configuration(cs, vector=X_init[i]) for i in range(X_init.shape[0])]

bo = SMBO(
    multi_objective_func,
    cs,
    num_objs=num_objs,
    max_runs=max_runs,
    # surrogate_type='gp_rbf',    # use default
    acq_type=mth,
    # initial_configurations=X_init, initial_runs=10,
    time_limit_per_trial=60,
    task_id='mo',
    random_state=seed)
bo.config_advisor.optimizer.random_chooser.prob = rand_prob  # set rand_prob, default 0
bo.config_advisor.acquisition_function.sample_num = sample_num  # set sample_num
bo.config_advisor.acquisition_function.random_state = seed  # set random_state