Example #1
0
    def maximize(self,
                 runhistory: HistoryContainer,
                 initial_config=None,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        def negative_acquisition(x):
            # shape of x = (d,)
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        acq_configs = []
        result = scipy.optimize.differential_evolution(
            func=negative_acquisition, bounds=self.bounds)
        if not result.success:
            self.logger.debug(
                'Scipy differential evolution optimizer failed. Info:\n%s' %
                (result, ))
        try:
            config = Configuration(self.config_space, vector=result.x)
            acq = self.acquisition_function(result.x, convert=False)
            acq_configs.append((acq, config))
        except Exception:
            pass

        if not acq_configs:  # empty
            self.logger.warning(
                'Scipy differential evolution optimizer failed. Return empty config list. Info:\n%s'
                % (result, ))

        challengers = ChallengerList([config for _, config in acq_configs],
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
def evaluate(mth, run_i, seed):
    print(mth, run_i, seed, '===== start =====', flush=True)

    def objective_function(config):
        y = problem.evaluate_config(config)
        return y

    from cma import CMAEvolutionStrategy
    from openbox.utils.util_funcs import get_types
    from openbox.utils.config_space import Configuration

    types, bounds = get_types(cs)
    assert all(types == 0)

    # Check Constant Hyperparameter
    const_idx = list()
    for i, bound in enumerate(bounds):
        if np.isnan(bound[1]):
            const_idx.append(i)

    hp_num = len(bounds) - len(const_idx)
    es = CMAEvolutionStrategy(hp_num * [0], 0.99, inopts={'bounds': [0, 1], 'seed': seed})

    global_start_time = time.time()
    global_trial_counter = 0
    config_list = []
    perf_list = []
    time_list = []
    eval_num = 0
    while eval_num < max_runs:
        X = es.ask(number=es.popsize)
        _X = X.copy()
        for i in range(len(_X)):
            for index in const_idx:
                _X[i] = np.insert(_X[i], index, 0)  # np.insert returns a copy
        # _X = np.asarray(_X)
        values = []
        for xi in _X:
            # convert array to Configuration
            config = Configuration(cs, vector=xi)
            perf = objective_function(config)
            global_time = time.time() - global_start_time
            global_trial_counter += 1
            values.append(perf)
            print('=== CMAES Trial %d: %s perf=%f global_time=%f' % (global_trial_counter, config, perf, global_time))

            config_list.append(config)
            perf_list.append(perf)
            time_list.append(global_time)
        values = np.reshape(values, (-1,))
        es.tell(X, values)
        eval_num += es.popsize

    print('===== Total evaluation times=%d. Truncate to max_runs=%d.' % (eval_num, max_runs))
    config_list = config_list[:max_runs]
    perf_list = perf_list[:max_runs]
    time_list = time_list[:max_runs]
    return config_list, perf_list, time_list
Example #3
0
 def get_pareto_set(self):
     solutions = self.get_solutions(feasible=True,
                                    nondominated=True,
                                    decode=True)
     pareto_set = [
         Configuration(self.config_space, vector=np.asarray(s.variables))
         for s in solutions
     ]
     return pareto_set
Example #4
0
 def get_incumbent(self):
     solutions = self.get_solutions(feasible=True,
                                    nondominated=True,
                                    decode=True)
     pareto_set = [
         Configuration(self.config_space, vector=np.asarray(s.variables))
         for s in solutions
     ]
     pareto_front = np.array([s.objectives for s in solutions])
     return pareto_set, pareto_front
Example #5
0
    def maximize(self, runhistory: HistoryContainer, num_points: int,
                 **kwargs) -> Iterable[Tuple[float, Configuration]]:
        try:
            from cma import CMAEvolutionStrategy
        except ImportError:
            raise ImportError("Package cma is not installed!")

        types, bounds = get_types(self.config_space)
        assert all(types == 0)

        # Check Constant Hyperparameter
        const_idx = list()
        for i, bound in enumerate(bounds):
            if np.isnan(bound[1]):
                const_idx.append(i)

        hp_num = len(bounds) - len(const_idx)
        es = CMAEvolutionStrategy(hp_num * [0],
                                  0.99,
                                  inopts={'bounds': [0, 1]})

        eval_num = 0
        next_configs_by_acq_value = list()
        while eval_num < num_points:
            X = es.ask(number=es.popsize)
            _X = X.copy()
            for i in range(len(_X)):
                for index in const_idx:
                    _X[i] = np.insert(_X[i], index, 0)
            _X = np.asarray(_X)
            values = self.acquisition_function._compute(_X)
            values = np.reshape(values, (-1, ))
            es.tell(X, values)
            next_configs_by_acq_value.extend([(values[i], _X[i])
                                              for i in range(es.popsize)])
            eval_num += es.popsize

        next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0])
        next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value]
        next_configs_by_acq_value = [
            Configuration(self.config_space, vector=array)
            for array in next_configs_by_acq_value
        ]

        challengers = ChallengerList(next_configs_by_acq_value,
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
    def cls_objective_function(config: Configuration):
        # convert Configuration to dict
        params = config.get_dictionary()

        # fit model
        model = XGBClassifier(**params, use_label_encoder=False)
        model.fit(x_train, y_train)

        # predict and calculate loss
        y_pred = model.predict(x_val)
        loss = 1 - balanced_accuracy_score(
            y_val, y_pred)  # OpenBox minimizes the objective

        # return result dictionary
        result = dict(objs=(loss, ))
        return result
Example #7
0
def mishra(config: Configuration):
    config_dict = config.get_dictionary()
    X = np.array([config_dict['x%d' % i] for i in range(2)])
    x, y = X[0], X[1]
    t1 = np.sin(y) * np.exp((1 - np.cos(x))**2)
    t2 = np.cos(x) * np.exp((1 - np.sin(y))**2)
    t3 = (x - y)**2

    result = dict()
    result['objs'] = [
        t1 + t2 + t3,
    ]
    result['constraints'] = [
        np.sum((X + 5)**2) - 25,
    ]
    return result
Example #8
0
def update_observation(request):
    """
    Update observation in config advisor.

    Parameters
    ----------
    request : a dict

    Returns
    -------
    a readable information string in HttpResponse form
    """
    if request.method == 'POST':
        if request.POST:
            task_id = request.POST.get('task_id')
            config_advisor = advisor_dict[task_id]
            config_dict = json.loads(request.POST.get('config'))
            config = Configuration(config_advisor.config_space, config_dict)
            trial_state = int(request.POST.get('trial_state'))
            constraints = json.loads(request.POST.get('constraints'))
            objs = json.loads(request.POST.get('objs'))
            trial_info = json.loads(request.POST.get('trial_info'))
            item = {
                'task_id': task_id,
                'config': config_dict,
                'result': list(objs),
                'status': trial_state,
                'trial_info': trial_info['trial_info'],
                'worker_id': trial_info['worker_id'],
                'cost': trial_info['cost']
            }
            runhistory_id = Runhistory().insert_one(item)
            observation = Observation(config, trial_state, constraints, objs,
                                      trial_info['cost'])
            config_advisor.update_observation(observation)

            config_advisor.save_history()

            print('-' * 21)
            print('Update observation')
            print(observation)
            print('-' * 21)
            return JsonResponse({'code': 1, 'msg': 'SUCCESS'})
        else:
            return JsonResponse({'code': 0, 'msg': 'Empty post data'})
    else:
        return JsonResponse({'code': 0, 'msg': 'Should be a POST request'})
Example #9
0
    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # useless in USeMO
            **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~openbox.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~openbox.ei_optimization.ChallengerList
        """

        acq_vals = np.asarray(self.acquisition_function.uncertainties)
        candidates = np.asarray(self.acquisition_function.candidates)
        assert len(acq_vals.shape) == 1 and len(candidates.shape) == 2 \
               and acq_vals.shape[0] == candidates.shape[0]

        acq_configs = []
        for i in range(acq_vals.shape[0]):
            # convert array to Configuration todo
            config = Configuration(self.config_space, vector=candidates[i])
            acq_configs.append((acq_vals[i], config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
Example #10
0
    def generate(self, return_config=True):
        """
        Create samples in the domain specified during construction.

        Returns
        -------
        configs : list
            List of N sampled configurations within domain. (return_config is True)

        X : array, shape (N, D)
            Design matrix X in the specified domain. (return_config is False)
        """
        X = self._generate()
        X = self.lower_bounds + (self.upper_bounds - self.lower_bounds) * X

        if return_config:
            configs = [Configuration(self.config_space, vector=x) for x in X]
            return configs
        else:
            return X
Example #11
0
    def maximize(self,
                 runhistory: HistoryContainer,
                 initial_config=None,
                 **kwargs) -> List[Tuple[float, Configuration]]:
        def negative_acquisition(x):
            # shape of x = (d,)
            x = np.clip(x, 0.0, 1.0)  # fix numerical problem in L-BFGS-B
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        if initial_config is None:
            initial_config = self.config_space.sample_configuration()
        init_point = initial_config.get_array()

        acq_configs = []
        result = scipy.optimize.minimize(fun=negative_acquisition,
                                         x0=init_point,
                                         bounds=self.bounds,
                                         **self.scipy_config)
        # if result.success:
        #     acq_configs.append((result.fun, Configuration(self.config_space, vector=result.x)))
        if not result.success:
            self.logger.debug('Scipy optimizer failed. Info:\n%s' % (result, ))
        try:
            x = np.clip(result.x, 0.0,
                        1.0)  # fix numerical problem in L-BFGS-B
            config = Configuration(self.config_space, vector=x)
            acq = self.acquisition_function(x, convert=False)
            acq_configs.append((acq, config))
        except Exception:
            pass

        if not acq_configs:  # empty
            self.logger.warning(
                'Scipy optimizer failed. Return empty config list. Info:\n%s' %
                (result, ))

        challengers = ChallengerList([config for _, config in acq_configs],
                                     self.config_space, self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
Example #12
0
# Create remote advisor
config_advisor = RemoteAdvisor(config_space=townsend_cs,
                               server_ip='127.0.0.1',
                               port=11425,
                               email='*****@*****.**',
                               password='******',
                               num_constraints=1,
                               max_runs=max_runs,
                               task_name="task_test",
                               task_id=task_id)

# Simulate max_runs iterations
for idx in range(max_runs):

    config_dict = config_advisor.get_suggestion()
    config = Configuration(config_advisor.config_space, config_dict)
    print('Get %d config: %s' % (idx + 1, config))
    trial_info = {}
    start_time = datetime.datetime.now()
    obs = townsend(config)

    trial_info['cost'] = (datetime.datetime.now() - start_time).seconds
    trial_info['worker_id'] = 0
    trial_info['trial_info'] = 'None'
    print('Result %d is %s. Update observation to server.' % (idx + 1, obs))
    config_advisor.update_observation(config_dict,
                                      obs['objs'],
                                      obs['constraints'],
                                      trial_info=trial_info,
                                      trial_state=SUCCESS)
Example #13
0
seed = 123

# Evaluate mth
X_init = np.array([
    [6.66666667e-01, 3.33333333e-01],
    [3.33333333e-01, 6.66666667e-01],
    [2.22222222e-01, 2.22222222e-01],
    [7.77777778e-01, 7.77777778e-01],
    [5.55555556e-01, 0],
    [0, 5.55555556e-01],
    [1.00000000e+00, 4.44444444e-01],
    [4.44444444e-01, 1.00000000e+00],
    [8.88888889e-01, 1.11111111e-01],
    [1.11111111e-01, 8.88888889e-01],
])  # use latin hypercube
X_init = [Configuration(cs, vector=X_init[i]) for i in range(X_init.shape[0])]

bo = SMBO(
    multi_objective_func,
    cs,
    num_objs=num_objs,
    max_runs=max_runs,
    # surrogate_type='gp_rbf',    # use default
    acq_type=mth,
    # initial_configurations=X_init, initial_runs=10,
    time_limit_per_trial=60,
    task_id='mo',
    random_state=seed)
bo.config_advisor.optimizer.random_chooser.prob = rand_prob  # set rand_prob, default 0
bo.config_advisor.acquisition_function.sample_num = sample_num  # set sample_num
#bo.config_advisor.acquisition_function.random_state = seed      # set random_state
Example #14
0
    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # todo useless
            **kwargs) -> Iterable[Configuration]:
        """Maximize acquisition function using ``_maximize``.

        Parameters
        ----------
        runhistory: ~openbox.utils.history_container.HistoryContainer
            runhistory object
        num_points: int
            number of points to be sampled
        **kwargs
            passed to acquisition function

        Returns
        -------
        Iterable[Configuration]
            to be concrete: ~openbox.ei_optimization.ChallengerList
        """
        def inverse_acquisition(x):
            # shape of x = (d,)
            return -self.acquisition_function(x,
                                              convert=False)[0]  # shape=(1,)

        d = len(self.config_space.get_hyperparameters())
        bound = (0.0, 1.0)  # todo only on continuous dims (int, float) now
        bounds = [bound] * d
        acq_configs = []

        # MC
        x_tries = self.rng.uniform(bound[0], bound[1], size=(self.num_mc, d))
        acq_tries = self.acquisition_function(x_tries, convert=False)
        for i in range(x_tries.shape[0]):
            # convert array to Configuration
            config = Configuration(self.config_space, vector=x_tries[i])
            config.origin = 'Random Search'
            acq_configs.append((acq_tries[i], config))

        # L-BFGS-B
        x_seed = self.rng.uniform(low=bound[0],
                                  high=bound[1],
                                  size=(self.num_opt, d))
        for i in range(x_seed.shape[0]):
            x0 = x_seed[i].reshape(1, -1)
            result = self.minimizer(inverse_acquisition,
                                    x0=x0,
                                    method='L-BFGS-B',
                                    bounds=bounds)
            if not result.success:
                continue
            # convert array to Configuration
            config = Configuration(self.config_space, vector=result.x)
            config.origin = 'Scipy'
            acq_val = self.acquisition_function(result.x, convert=False)  # [0]
            acq_configs.append((acq_val, config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()
        return challengers
Example #15
0
    def maximize(
            self,
            runhistory: HistoryContainer,
            num_points: int,  # todo useless
            **kwargs) -> List[Tuple[float, Configuration]]:

        # print('start optimize')   # todo remove
        # import time
        # t0 = time.time()
        acq_configs = []

        # random points
        random_points = self.rng.uniform(self.bound[0],
                                         self.bound[1],
                                         size=(self.num_random, self.dim))
        acq_random = self.acquisition_function(random_points, convert=False)
        for i in range(random_points.shape[0]):
            # convert array to Configuration
            config = Configuration(self.config_space, vector=random_points[i])
            config.origin = 'Random Search'
            acq_configs.append((acq_random[i], config))

        # scipy points
        initial_points = self.gen_initial_points(
            num_restarts=self.num_restarts, raw_samples=self.raw_samples)

        for start_idx in range(0, self.num_restarts, self.batch_limit):
            end_idx = min(start_idx + self.batch_limit, self.num_restarts)
            # optimize using random restart optimization
            scipy_points = self.gen_batch_scipy_points(
                initial_points[start_idx:end_idx])
            if scipy_points is None:
                continue
            acq_scipy = self.acquisition_function(scipy_points, convert=False)
            for i in range(scipy_points.shape[0]):
                # convert array to Configuration
                config = Configuration(self.config_space,
                                       vector=scipy_points[i])
                config.origin = 'Batch Scipy'
                acq_configs.append((acq_scipy[i], config))

        # shuffle for random tie-break
        self.rng.shuffle(acq_configs)

        # sort according to acq value
        acq_configs.sort(reverse=True, key=lambda x: x[0])

        configs = [_[1] for _ in acq_configs]

        challengers = ChallengerList(configs, self.config_space,
                                     self.random_chooser)
        self.random_chooser.next_smbo_iteration()

        # t1 = time.time()  # todo remove
        # print('==time total=%.2f' % (t1-t0,))
        # for x1 in np.linspace(0, 1, 20):
        #     optimal_point = np.array([x1.item()] + [0.5] * (self.dim-1))
        #     print('optimal_point acq=', self.acquisition_function(optimal_point, convert=False))
        # print('best point acq=', acq_configs[0])
        # time.sleep(2)
        return challengers