Пример #1
0
            configuration = Configuration(configuration_space=self.cs,
                                          values=rh_value[0])
            self.runhistory.add(config=configuration,
                                cost=rh_value[1],
                                time=0,
                                status=StatusType.SUCCESS)

    def observe(self, X, y):
        """Feed an observation back.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary 使用where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        for xx, yy in zip(X, y):
            configuration = Configuration(configuration_space=self.cs,
                                          values=xx)
            self.runhistory.add(config=configuration,
                                cost=yy,
                                time=0,
                                status=StatusType.SUCCESS)


if __name__ == "__main__":
    experiment_main(SMAC4EPMOpimizer)
Пример #2
0
            print("adjusted region length: {}".format(self.turbo.length))

        self.turbo.n_evals += self.batch_size

        self.turbo._X = np.vstack((self.turbo._X, deepcopy(XX)))
        self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(yy)))
        self.turbo.X = np.vstack((self.turbo.X, deepcopy(XX)))
        self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(yy)))

        ind_best = np.argmin(self.turbo.fX)
        f_best, x_best = self.turbo.fX[ind_best], self.turbo.X[ind_best, :]
        print("best f(x): {}, at x: {}".format(round(f_best[0], 2),
                                               np.around(x_best, 2)))
        print("x_best: {}".format(self.space_x.unwarp([x_best])))

        # Check for a restart
        print("turbo.length: {}, turbo.length_min: {}".format(
            self.turbo.length, self.turbo.length_min))
        if self.turbo.length < self.turbo.length_min:
            self.cnt_restart = self.cnt_restart + 1
            self.restart()
            print("original new region length: {}".format(self.turbo.length))
            # already exploit current region (current_length < length_min)
            # try new region but smaller one
            self.turbo.length = round(self.turbo.length / self.cnt_restart, 1)
            print("reduced new region length: {}".format(self.turbo.length))


if __name__ == "__main__":
    experiment_main(TurboOptimizer)
Пример #3
0
        # Find the matching proposal and execute its callbacks
        idx = [x == xx for xx in self.history]
        i = np.argwhere(idx)[0].item()  # Pick the first index if there are ties
        proposal = self.proposals[i]
        proposal.record.complete(y)
        self.proposals.pop(i)
        self.history.pop(i)

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        for x_, y_ in zip(X, y):
            # Just ignore, any inf observations we got, unclear if right thing
            if np.isfinite(y_):
                self._observe(x_, y_)


if __name__ == "__main__":
    experiment_main(PySOTOptimizer)
Пример #4
0
from sk_optimizer import ScikitOptimizer
from hyper_optimizer import HyperoptOptimizer


class SKHyperOptimizer(AbstractOptimizer):
    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.opt1 = HyperoptOptimizer(api_config, **kwargs)
        self.opt2 = ScikitOptimizer(api_config)

    def suggest(self, n_suggestions=1):
        t_out = self.opt1.suggest(n_suggestions)
        s_out = self.opt2.suggest(n_suggestions)
        N = len(t_out) // 2
        return t_out[:N] + s_out[N:]

    def observe(self, X, y):
        self.opt1.observe(X, y)
        self.opt2.observe(X, y)


if __name__ == "__main__":
    experiment_main(SKHyperOptimizer)
from opentuner_optimizer import OpentunerOptimizer
from pysot_optimizer import PySOTOptimizer


class OpenPySOTOptimizer(AbstractOptimizer):
    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.opt1 = PySOTOptimizer(api_config, **kwargs)
        self.opt2 = OpentunerOptimizer(api_config)

    def suggest(self, n_suggestions=1):
        t_out = self.opt1.suggest(n_suggestions)
        s_out = self.opt2.suggest(n_suggestions)
        N = len(t_out) // 2
        return t_out[:N] + s_out[N:]

    def observe(self, X, y):
        self.opt1.observe(X, y)
        self.opt2.observe(X, y)


if __name__ == "__main__":
    experiment_main(OpenPySOTOptimizer)
Пример #6
0
        x_guess_data = [self.optim.ask() for _ in range(n_suggestions)]

        x_guess = [None] * n_suggestions
        for ii, xx in enumerate(x_guess_data):
            x_pos, x_kwarg = self.instrum.data_to_arguments(xx)
            assert x_pos == ()
            x_guess[ii] = self.postwarp(x_kwarg)

        return x_guess

    def observe(self, X, y):
        """Feed an observation back to nevergrad.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        for xx, yy in zip(X, y):
            xx = self.prewarp(xx)
            xx = self.instrum.arguments_to_data(**xx)
            self.optim.tell(xx, yy)


if __name__ == "__main__":
    experiment_main(NevergradOptimizer)
Пример #7
0
        dictionary = copy.copy(x) if isinstance(x,
                                                dict) else x.get_dictionary()
        for k, v in dictionary.items():
            if k in self._par:
                hp = self.original_cs.get_hyperparameter(k)
                _fun = INV_TRANS[self._par[k]]
                dictionary[k] = np.clip(_fun(v), hp.lower, hp.upper)
                # need to check  original configspace since bilog-int are converted to float
                if isinstance(self.original_cs.get_hyperparameter(k),
                              UniformIntegerHyperparameter):
                    dictionary[k] = int(np.rint(dictionary[k]))
        x = Configuration(self.original_cs, values=dictionary)
        return x


if __name__ == "__main__":
    experiment_main(SwitchingOptimizer)

    # The test for the bilog spaces.. Please uncomment for testing
    # api_config = {'hidden_layer_sizes': {'type': 'int', 'space': 'linear', 'range': (50, 200)},
    #  'learning_rate_init': {'type': 'real', 'space': 'bilog', 'range': (-1, 1)},  # To test negative value for `bilog`
    #  'beta_1': {'type': 'real', 'space': 'logit', 'range': (0.5, 0.99)},
    #  'epsilon': {'type': 'real', 'space': 'logit', 'range': (1e-9, 1e-6)}
    # }
    #
    # opt = SwitchingOptimizer(api_config)
    #
    # for i in range(20):
    #     X = opt.suggest(n_suggestions=8)
    #     opt.observe(X, list(range(8)))
Пример #8
0
                # TODO(xadrianzetx) Support `suggest_categorical` if benchmark is extended.
                raise RuntimeError("CategoricalDistribution is not supported in bayesmark.")

            suggestions[name] = param if config["space"] != "logit" else 1 / (1 + np.exp(-param))

        return suggestions

    def suggest(self, n_suggestions: int) -> List[Suggestion]:

        suggestions: List[Suggestion] = list()
        for _ in range(n_suggestions):
            trial = self.study.ask()
            params = self._suggest(trial)
            sid = hash(frozenset(params.items()))
            self.current_trials[sid] = trial.number
            suggestions.append(params)

        return suggestions

    def observe(self, X: List[Suggestion], y: List[float]) -> None:

        for params, objective_value in zip(X, y):
            sid = hash(frozenset(params.items()))
            trial = self.current_trials.pop(sid)
            self.study.tell(trial, objective_value)


if __name__ == "__main__":
    optuna.logging.disable_default_handler()
    experiment_main(OptunaOptimizer)
Пример #9
0
                        self.population[i]))  # individuals 0-7
            elif self.idxask == 1:
                next_guess.append(
                    self.vector_to_configspace(
                        self.population[i + 8]))  # individuals 8-15
            elif self.idxask == 2:
                next_guess.append(
                    self.vector_to_configspace(
                        self.population[i + 16]))  # individuals 16-23
        return next_guess

    def observe(self, X, y):
        """Feed an observation back.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """

        self.iteration += 1
        self.idxask = (self.idxask + 1) % 3


if __name__ == "__main__":
    experiment_main(InitialDesign)
Пример #10
0
            corresponds to a parameter being optimized.
        """
        if len(self.known_points) < 2:
            return self.random_suggestion(n_suggestions)

        gp = GaussianProcessRegressor(
            kernel=DiscreteKernel(Matern(nu=2.5), self.tr),
            alpha=1e-6,
            normalize_y=True,
            n_restarts_optimizer=5,
            random_state=self._random_state,
        )
        known_points = {
            k: [dic[k] for dic in self.known_points]
            for k in self.known_points[0]
        }
        gp.fit(self.tr.to_real_space(**known_points), self.known_values)

        cost_f = self._cost(gp,
                            self.tr,
                            max_y=max(self.known_values),
                            x=0.01,
                            kappa=2.6)
        meta_minimizer = self._meta_optimizer(self.api_config,
                                              self._random_state, cost_f)
        return meta_minimizer.suggest(n_suggestions, timeout=30)


if __name__ == "__main__":
    experiment_main(MultiGaussianProcess)
Пример #11
0
            all_points = self.known_points[:]
            all_points += new_points
            all_values = np.concatenate([self.known_values, new_values])
            all_known_points = {
                k: [dic[k] for dic in all_points]
                for k in all_points[0]
            }
            gp.fit(self.tr.to_real_space(**all_known_points), all_values)

            cost_f = self._cost(gp,
                                self.tr,
                                max_y=max(all_values),
                                min_y=min(all_values),
                                x=0.10,
                                kappa=1.6)
            meta_minimizer = self._meta_optimizer(self.api_config,
                                                  self._random_state, cost_f)

            min_point = meta_minimizer.suggest(1, timeout=4.0)
            new_points += min_point

            _p = {k: [dic[k] for dic in min_point] for k in min_point[0]}
            X = self.tr.to_real_space(**_p)
            min_value = gp.predict(X)[0]
            new_values.append(min_value)
        return new_points


if __name__ == "__main__":
    experiment_main(MarkovGaussianProcess)
Пример #12
0
from bayesmark.space import JointSpace
from hyper_optimizer import HyperoptOptimizer
from pysot_optimizer import PySOTOptimizer

class HyperPySOTOptimizer(AbstractOptimizer):

    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.opt1 = HyperoptOptimizer(api_config, **kwargs)  
        self.opt2 = PySOTOptimizer(api_config) 

    def suggest(self, n_suggestions=1):
        t_out = self.opt1.suggest(n_suggestions)
        s_out = self.opt2.suggest(n_suggestions)
        N = len(t_out)//2
        return t_out[:N] + s_out[N:]

    def observe(self, X, y):
        self.opt1.observe(X, y)
        self.opt2.observe(X, y)

if __name__ == "__main__":
    experiment_main(HyperPySOTOptimizer)
Пример #13
0
        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated.
        """
        assert len(X) == len(y)

        for x_guess, y_ in zip(X, y):
            x_guess_ = HyperoptOptimizer.hashable_dict(x_guess)
            assert x_guess_ in self.trial_id_lookup, "Appears to be guess that did not originate from suggest"

            trial_id = self.trial_id_lookup.pop(x_guess_)
            trial = self.get_trial(trial_id)
            assert self.cleanup_guess(trial["misc"]["vals"]) == x_guess, "trial ID not consistent with x values stored"

            # Cast to float to ensure native type
            result = {"loss": float(y_), "status": STATUS_OK}
            trial["state"] = JOB_STATE_DONE
            trial["result"] = result
        # hyperopt.fmin.FMinIter.serial_evaluate only does one refresh at end
        # of loop of a bunch of evals, so we will do the same thing here.
        self.trials.refresh()


if __name__ == "__main__":
    experiment_main(HyperoptOptimizer)
Пример #14
0
from turbo_optimizer import TurboOptimizer
from hyper_optimizer import HyperoptOptimizer


class TurboHyperOptimizer(AbstractOptimizer):
    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.opt1 = TurboOptimizer(api_config, **kwargs)
        self.opt2 = HyperoptOptimizer(api_config)

    def suggest(self, n_suggestions=1):
        t_out = self.opt1.suggest(n_suggestions)
        s_out = self.opt2.suggest(n_suggestions)
        N = len(t_out) // 2
        return t_out[:N] + s_out[N:]

    def observe(self, X, y):
        self.opt1.observe(X, y)
        self.opt2.observe(X, y)


if __name__ == "__main__":
    experiment_main(TurboHyperOptimizer)
Пример #15
0
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        assert len(X) == len(y)

        for x_, y_ in zip(X, y):
            # Just ignore, any inf observations we got, unclear if right thing
            if np.isfinite(y_):
                self._observe(x_, y_)

        XX, yy = self.space_x.warp(X), np.array(y)[:, None]

        if len(self.turbo._fX) >= self.turbo.n_init:
            self.turbo._adjust_length(yy)

        self.turbo.n_evals += self.turbo_batch_size

        self.turbo._X = np.vstack((self.turbo._X, deepcopy(XX)))
        self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(yy)))
        self.turbo.X = np.vstack((self.turbo.X, deepcopy(XX)))
        self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(yy)))

        # Check for a restart
        if self.turbo.length < self.turbo.length_min:
            self.restart()


if __name__ == "__main__":
    experiment_main(tuSOTOptimizer)
Пример #16
0
        return x_guess

    def check_unique(self, rec: pd.DataFrame) -> [bool]:
        return (~pd.concat([self.X, rec], axis=0).duplicated().tail(
            rec.shape[0]).values).tolist()

    def observe(self, X, y):
        """Feed an observation back.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        # Random search so don't do anything
        y = np.array(y).reshape(-1)
        valid_id = np.where(np.isfinite(y))[0].tolist()
        XX = [X[idx] for idx in valid_id]
        yy = y[valid_id].reshape(-1, 1)
        self.X = self.X.append(XX, ignore_index=True)
        self.y = np.vstack([self.y, yy])
        print(yy)


if __name__ == "__main__":
    experiment_main(BO)
Пример #17
0
        # that there is not nec a round function for each dimension here.
        for param_name, round_f in self.round_to_values.items():
            for xx in next_guess:
                xx[param_name] = round_f(xx[param_name])
        return next_guess

    def observe(self, X, y):
        """Send an observation of a suggestion back to the optimizer.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        # Supposedly skopt can handle blocks, but not sure about interface for
        # that. Just do loop to be safe for now.
        for xx, yy in zip(X, y):
            # skopt needs lists instead of dicts
            xx = [xx[dim_name] for dim_name in self.dimensions_list]
            # Just ignore, any inf observations we got, unclear if right thing
            if np.isfinite(yy):
                self.skopt.tell(xx, yy)


if __name__ == "__main__":
    experiment_main(ScikitOptimizer)
from bayesmark.space import JointSpace
from hyper_optimizer import HyperoptOptimizer
from opentuner_optimizer import OpentunerOptimizer

class HyperOpenOptimizer(AbstractOptimizer):

    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.opt1 = HyperoptOptimizer(api_config, **kwargs)  
        self.opt2 = OpentunerOptimizer(api_config) 

    def suggest(self, n_suggestions=1):
        t_out = self.opt1.suggest(n_suggestions)
        s_out = self.opt2.suggest(n_suggestions)
        N = len(t_out)//2
        return t_out[:N] + s_out[N:]

    def observe(self, X, y):
        self.opt1.observe(X, y)
        self.opt2.observe(X, y)

if __name__ == "__main__":
    experiment_main(HyperOpenOptimizer)
Пример #19
0
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """
        x_guess = rs.suggest_dict([], [],
                                  self.api_config,
                                  n_suggestions=n_suggestions,
                                  random=self.random)
        return x_guess

    def observe(self, X, y):
        """Feed an observation back.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        # Random search so don't do anything
        pass


if __name__ == "__main__":
    experiment_main(RandomOptimizer)
Пример #20
0
        if self.random.rand() <= 0.5 or self.mode == "normal":
            x_guess = rs.suggest_dict([], [], self.api_config, n_suggestions=n_suggestions, random=self.random)
        elif self.mode == "delay":
            sleep(15 * 60)  # 15 minutes
            x_guess = rs.suggest_dict([], [], self.api_config, n_suggestions=n_suggestions, random=self.random)
        elif self.mode == "crash":
            assert False, "Crashing for testing purposes"
        else:
            assert False, "Crashing, not for testing purposes"

        return x_guess

    def observe(self, X, y):
        """Feed an observation back.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        # Random search so don't do anything
        pass


if __name__ == "__main__":
    experiment_main(FlakyOptimizer)
Пример #21
0
        -------
        best_point : A dictionary
            The point with the best objective value obsereved. Each key corresponds to a parameter being optimized.
        """
        assert isinstance(optimizer, CobBO), ' A CobBO optimizer is expected'

        while optimizer.has_budget:
            if not use_real_space:
                x_probe_list = self.suggest(n_suggestions=self.batch)
                target_list = [obj_func(**x) for x in x_probe_list]
                self.observe(x_probe_list, target_list)
            else:
                x_probe_real_list = self.suggest_as_real_values(n_suggestions=self.batch)
                x_probe_list = self.convert_real_to_target_type(copy.deepcopy(x_probe_real_list))
                target_list = [obj_func(**x) for x in x_probe_list]
                self.observe(x_probe_real_list, target_list)

        return self.best_point

    @property
    def has_budget(self):
        return self.space.has_unused_trial_budget()

    @property
    def best_point(self):
        return self.space.max_param


if __name__ == "__main__":
    experiment_main(CobBO)
Пример #22
0
                self.search_space.warp(self.archive)),
                                bounds=self.torch_bounds)
            train_y = standardize(
                torch.from_numpy(
                    np.array(self.arc_fitness).reshape(len(self.arc_fitness),
                                                       1)))
            # Fit the GP based on the actual observed values
            if self.iter == 1:
                self.model, mll = self.make_model(train_x, train_y)
            else:
                self.model, mll = self.make_model(train_x, train_y,
                                                  self.model.state_dict())

            # mll.train()
            fit_gpytorch_model(mll)

            # define the sampler
            sampler = SobolQMCNormalSampler(num_samples=512)

            # define the acquisition function
            self.acquisition = qExpectedImprovement(model=self.model,
                                                    best_f=train_y.max(),
                                                    sampler=sampler)

        except Exception as e:
            print('Error: {} in observe()'.format(e))


if __name__ == "__main__":
    experiment_main(steade)
Пример #23
0
from sk_optimizer import ScikitOptimizer
from opentuner_optimizer import OpentunerOptimizer


class SKOpenOptimizer(AbstractOptimizer):
    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.opt1 = OpentunerOptimizer(api_config)
        self.opt2 = ScikitOptimizer(api_config)

    def suggest(self, n_suggestions=1):
        t_out = self.opt1.suggest(n_suggestions)
        s_out = self.opt2.suggest(n_suggestions)
        N = len(t_out) // 2
        return t_out[:N] + s_out[N:]

    def observe(self, X, y):
        self.opt1.observe(X, y)
        self.opt2.observe(X, y)


if __name__ == "__main__":
    experiment_main(SKOpenOptimizer)
        -------
        next_guess : list of dict
            List of `n_suggestions` suggestions to evaluate the objective
            function. Each suggestion is a dictionary where each key
            corresponds to a parameter being optimized.
        """
        next_guess = []
        for i in range(n_suggestions):
            next_guess.append(self.vector_to_configspace(self.population[(self.idxask*n_suggestions)+i]))
        return next_guess

    def observe(self, X, y):
        """Feed an observation back.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """

        self.iteration += 1
        self.idxask = (self.idxask + 1) % self.limit


if __name__ == "__main__":
    experiment_main(PointsMinDisc)
Пример #25
0
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated.
        """
        assert len(X) == len(y)

        for x_guess, y_ in zip(X, y):
            x_guess_ = OpentunerOptimizer.hashable_dict(x_guess)

            # If we can't find the dr object then it must be the dummy guess.
            if x_guess_ not in self.x_to_dr:
                assert x_guess == self.dummy_suggest, "Appears to be guess that did not originate from suggest"
                continue

            # Get the corresponding DesiredResult object.
            dr = self.x_to_dr.pop(x_guess_, None)
            # This will also catch None from opentuner.
            assert isinstance(
                dr,
                DesiredResult), "DesiredResult object not available in x_to_dr"

            # Opentuner's arg names assume we are minimizing execution time.
            # So, if we want to minimize we have to pretend y is a 'time'.
            result = Result(time=y_)
            self.api.report_result(dr, result)


if __name__ == "__main__":
    experiment_main(OpentunerOptimizer)
Пример #26
0
                        traverse += cat_vec_len[index]
                        index += 1
                        index = min(index, len(cat_vec_pos)-1)

            XX = new_XX 
            
        if self.batch_size is None:
            self.X_init = XX
            self.batch_size = len(XX)
            self.Y_init = yy
            # evaluator useless but need for GPyOpt instantiation
            self.evaluator = GPyOpt.core.evaluators.RandomBatch(acquisition=self.aquisition, batch_size = self.batch_size)
            self.bo = GPyOpt.methods.ModularBayesianOptimization(self.model, self.space, self.objective, self.aquisition, self.evaluator, self.X_init, Y_init=self.Y_init)
            self.X = self.X_init
            self.Y = self.Y_init
        else:
            # update the stack of all the evaluated X's and y's
            self.bo.X = np.vstack((self.bo.X, deepcopy(XX)))
            self.bo.Y = np.vstack((self.bo.Y, deepcopy(yy)))
            # update GP model
            
      
        
        # update GP model
        self.bo._update_model('stats')
        # bo has attribute bo.num_acquisitions        
        self.bo.num_acquisitions += 1  
        
if __name__ == "__main__":
    experiment_main(BoEI)
Пример #27
0
        assert len(X) == len(y), "The length is not the same"

        outputs_candidates = list()

        # collect outputs
        for candidate in self.stored_candidates:
            idx = [candidate == x_guess for x_guess in X]
            id_y = np.argwhere(idx)[0].item()  # pick the first index
            outputs_candidates.append(y[id_y])

        #  print(outputs_candidates)

        # trigger callbacks

        if self.nomad_process.is_alive():
            #  if self.nomad_thread.is_alive():
            self.outputs_queue.put(outputs_candidates)
            # wait for completion
            self.outputs_queue.join()
            self.n_iters += 1
            print("Observe Done!")

        # kill thread if last iteration
        if self.n_iters >= 16:
            self.nomad_process.terminate()
            self.nomad_process.join()


if __name__ == "__main__":
    experiment_main(PyNomadOptimizer)
        if self.turbo:
            if len(self.turbo._X) >= self.turbo.n_init:
                self.turbo._adjust_length(Y)
            print('TURBO length:', self.turbo.length)
            self.turbo._X = np.vstack((self.turbo._X, deepcopy(X)))
            self.turbo._fX = np.vstack((self.turbo._fX, deepcopy(Y)))
            self.turbo.X = np.vstack((self.turbo.X, deepcopy(X)))
            self.turbo.fX = np.vstack((self.turbo.fX, deepcopy(Y)))

        N = self.config['reset_no_improvement']
        if len(self.best_values) > N and np.min(
                self.best_values[:-N]) <= np.min(self.best_values[-N:]):
            print('########## RESETTING COMPLETELY! ##########')
            self.X = np.zeros((0, self.dim))
            self.y = np.zeros((0, 1))
            self.best_values = []
            self.X_init = None
            self.node = None
            self.turbo = None
            self.split_used = 0

        if self.split_used >= self.config['reset_split_after']:
            print('########## REBUILDING THE SPLIT! ##########')
            self.node = None
            self.turbo = None
            self.split_used = 0


if __name__ == '__main__':
    experiment_main(SpacePartitioningOptimizer)
Пример #29
0
        return x_guess

    def check_unique(self, rec: pd.DataFrame) -> [bool]:
        return (~pd.concat([self.X, rec], axis=0).duplicated().tail(
            rec.shape[0]).values).tolist()

    def observe(self, X, y):
        """Feed an observation back.

        Parameters
        ----------
        X : list of dict-like
            Places where the objective function has already been evaluated.
            Each suggestion is a dictionary where each key corresponds to a
            parameter being optimized.
        y : array-like, shape (n,)
            Corresponding values where objective has been evaluated
        """
        # Random search so don't do anything
        y = np.array(y).reshape(-1)
        valid_id = np.where(np.isfinite(y))[0].tolist()
        XX = [X[idx] for idx in valid_id]
        yy = y[valid_id].reshape(-1, 1)
        self.X = self.X.append(XX, ignore_index=True)
        self.y = np.vstack([self.y, yy])
        print(yy)


if __name__ == "__main__":
    experiment_main(MACEBO)
from turbo_optimizer import TurboOptimizer
from pysot_optimizer import PySOTOptimizer


class TurboPysotOptimizer(AbstractOptimizer):
    def __init__(self, api_config, **kwargs):
        """Build wrapper class to use an optimizer in benchmark.

        Parameters
        ----------
        api_config : dict-like of dict-like
            Configuration of the optimization variables. See API description.
        """
        AbstractOptimizer.__init__(self, api_config)
        self.opt1 = TurboOptimizer(api_config, **kwargs)
        self.opt2 = PySOTOptimizer(api_config)

    def suggest(self, n_suggestions=1):
        t_out = self.opt1.suggest(n_suggestions)
        s_out = self.opt2.suggest(n_suggestions)
        N = len(t_out) // 2
        return t_out[:N] + s_out[N:]

    def observe(self, X, y):
        self.opt1.observe(X, y)
        self.opt2.observe(X, y)


if __name__ == "__main__":
    experiment_main(TurboPysotOptimizer)