예제 #1
0
    def test_add_starting_points_with_too_many_dim(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
        pb.add_hyperparameter((-10, 10), "dim0")
        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=0, dim1=2)
예제 #2
0
    def test_dim_with_wrong_name(self):
        from deephyper.core.exceptions.problem import SpaceDimNameOfWrongType
        from deephyper.problem import HpProblem

        pb = HpProblem()
        with pytest.raises(SpaceDimNameOfWrongType):
            pb.add_hyperparameter((-10, 10), 0)
예제 #3
0
    def test_sample_types_no_cat(self):
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()
        problem.add_hyperparameter((0, 10), "x_int")
        problem.add_hyperparameter((0.0, 10.0), "x_float")

        def run(config):

            assert np.issubdtype(type(config["x_int"]), np.integer)
            assert np.issubdtype(type(config["x_float"]), float)

            return 0

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        CBO(problem,
            create_evaluator(),
            random_state=42,
            surrogate_model="DUMMY").search(10)

        CBO(problem, create_evaluator(), random_state=42,
            surrogate_model="RF").search(10)
예제 #4
0
    def test_gp(self):
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        # test float hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter((0.0, 10.0), "x")

        def run(config):
            return config["x"]

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)

        # test int hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter((0, 10), "x")

        def run(config):
            return config["x"]

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)

        # test categorical hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter([f"{i}" for i in range(10)], "x")

        def run(config):
            return int(config["x"])

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)
예제 #5
0
    def test_config_space_hp(self):
        import ConfigSpace.hyperparameters as csh
        from deephyper.problem import HpProblem

        alpha = csh.UniformFloatHyperparameter(name="alpha", lower=0, upper=1)
        beta = csh.UniformFloatHyperparameter(name="beta", lower=0, upper=1)

        pb = HpProblem()
        pb.add_hyperparameters([alpha, beta])
예제 #6
0
    def __init__(
        self,
        problem,
        run,
        evaluator,
        surrogate_model="RF",
        acq_func="LCB",
        kappa=1.96,
        xi=0.001,
        liar_strategy="cl_min",
        n_jobs=1,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            **kwargs,
        )

        self.n_jobs = int(
            n_jobs)  # parallelism of BO surrogate model estimator
        self.kappa = float(kappa)
        self.xi = float(xi)
        self.n_initial_points = self.evaluator.num_workers
        self.liar_strategy = liar_strategy

        # Setup
        na_search_space = self.problem.build_search_space()

        self.hp_space = self.problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem(self.problem.seed)
        for i, vnode in enumerate(na_search_space.variable_nodes):
            self.na_space.add_hyperparameter((0, vnode.num_ops - 1),
                                             name=f"vnode_{i:05d}")

        self.space = CS.ConfigurationSpace(seed=self.problem.seed)
        self.space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space)
        self.space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space)

        # Initialize opitmizer of hyperparameter space
        self.opt = SkOptimizer(
            dimensions=self.space,
            base_estimator=self.get_surrogate_model(surrogate_model,
                                                    self.n_jobs),
            acq_func=acq_func,
            acq_optimizer="sampling",
            acq_func_kwargs={
                "xi": self.xi,
                "kappa": self.kappa
            },
            n_initial_points=self.n_initial_points,
        )
예제 #7
0
    def test_add_good_dim(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()

        p0 = pb.add_hyperparameter((-10, 10), "p0")
        p0_csh = csh.UniformIntegerHyperparameter(
            name="p0", lower=-10, upper=10, log=False
        )
        assert p0 == p0_csh

        p1 = pb.add_hyperparameter((1, 100, "log-uniform"), "p1")
        p1_csh = csh.UniformIntegerHyperparameter(name="p1", lower=1, upper=100, log=True)
        assert p1 == p1_csh

        p2 = pb.add_hyperparameter((-10.0, 10.0), "p2")
        p2_csh = csh.UniformFloatHyperparameter(
            name="p2", lower=-10.0, upper=10.0, log=False
        )
        assert p2 == p2_csh

        p3 = pb.add_hyperparameter((1.0, 100.0, "log-uniform"), "p3")
        p3_csh = csh.UniformFloatHyperparameter(
            name="p3", lower=1.0, upper=100.0, log=True
        )
        assert p3 == p3_csh

        p4 = pb.add_hyperparameter([1, 2, 3, 4], "p4")
        p4_csh = csh.OrdinalHyperparameter(name="p4", sequence=[1, 2, 3, 4])
        assert p4 == p4_csh

        p5 = pb.add_hyperparameter([1.0, 2.0, 3.0, 4.0], "p5")
        p5_csh = csh.OrdinalHyperparameter(name="p5", sequence=[1.0, 2.0, 3.0, 4.0])
        assert p5 == p5_csh

        p6 = pb.add_hyperparameter(["cat0", "cat1"], "p6")
        p6_csh = csh.CategoricalHyperparameter(name="p6", choices=["cat0", "cat1"])
        assert p6 == p6_csh

        p7 = pb.add_hyperparameter({"mu": 0, "sigma": 1}, "p7")
        p7_csh = csh.NormalIntegerHyperparameter(name="p7", mu=0, sigma=1)
        assert p7 == p7_csh

        if cs.__version__ > "0.4.20":
            p8 = pb.add_hyperparameter(
                {"mu": 0, "sigma": 1, "lower": -5, "upper": 5}, "p8"
            )
            p8_csh = csh.NormalIntegerHyperparameter(
                name="p8", mu=0, sigma=1, lower=-5, upper=5
            )
            assert p8 == p8_csh

        p9 = pb.add_hyperparameter({"mu": 0.0, "sigma": 1.0}, "p9")
        p9_csh = csh.NormalFloatHyperparameter(name="p9", mu=0, sigma=1)
        assert p9 == p9_csh
예제 #8
0
    def test_random_seed(self):
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()
        problem.add_hyperparameter((0.0, 10.0), "x")

        def run(config):
            return config["x"]

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")

        res1 = search.search(max_evals=4)
        res1_array = res1[["x"]].to_numpy()

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")
        res2 = search.search(max_evals=4)
        res2_array = res2[["x"]].to_numpy()

        assert np.array_equal(res1_array, res2_array)

        # test multi-objective
        def run(config):
            return config["x"], config["x"]

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")

        res1 = search.search(max_evals=4)
        res1_array = res1[["x"]].to_numpy()

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")
        res2 = search.search(max_evals=4)
        res2_array = res2[["x"]].to_numpy()

        assert np.array_equal(res1_array, res2_array)
예제 #9
0
    def test_sample_types_conditional(self):
        import ConfigSpace as cs
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()

        # choices
        choice = problem.add_hyperparameter(
            name="choice",
            value=["choice1", "choice2"],
        )

        # integers
        x1_int = problem.add_hyperparameter(name="x1_int", value=(1, 10))

        x2_int = problem.add_hyperparameter(name="x2_int", value=(1, 10))

        # conditions
        cond_1 = cs.EqualsCondition(x1_int, choice, "choice1")

        cond_2 = cs.EqualsCondition(x2_int, choice, "choice2")

        problem.add_condition(cond_1)
        problem.add_condition(cond_2)

        def run(config):

            if config["choice"] == "choice1":
                assert np.issubdtype(type(config["x1_int"]), np.integer)
            else:
                assert np.issubdtype(type(config["x2_int"]), np.integer)

            return 0

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        CBO(problem,
            create_evaluator(),
            random_state=42,
            surrogate_model="DUMMY").search(10)
예제 #10
0
    def __init__(
        self,
        problem,
        evaluator,
        random_state: int = None,
        log_dir: str = ".",
        verbose: int = 0,
        population_size: int = 100,
        sample_size: int = 10,
        **kwargs,
    ):
        super().__init__(
            problem,
            evaluator,
            random_state,
            log_dir,
            verbose,
            population_size,
            sample_size,
        )

        # Setup
        na_search_space = self._problem.build_search_space()

        self.hp_space = self._problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem()
        self.na_space._space.seed(self._random_state.get_state()[1][0])
        for i, (low, high) in enumerate(na_search_space.choices()):
            self.na_space.add_hyperparameter((low, high), name=f"vnode_{i:05d}")

        self._space = CS.ConfigurationSpace(seed=self._random_state.get_state()[1][0])
        self._space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space
        )
        self._space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space
        )
        self._space_size = len(self._space.get_hyperparameter_names())
예제 #11
0
    def test_quickstart(self):
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO
        from deephyper.evaluator import Evaluator

        # define the variable you want to optimize
        problem = HpProblem()
        problem.add_hyperparameter((-10.0, 10.0), "x")

        # define the evaluator to distribute the computation
        evaluator = Evaluator.create(
            run,
            method="subprocess",
            method_kwargs={
                "num_workers": 2,
            },
        )

        # define you search and execute it
        search = CBO(problem, evaluator)

        results = search.search(max_evals=15)
예제 #12
0
    def __init__(
        self,
        problem,
        run,
        evaluator,
        population_size=100,
        sample_size=10,
        n_jobs=1,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            population_size=population_size,
            sample_size=sample_size,
            **kwargs,
        )

        self.n_jobs = int(n_jobs)

        # Setup
        na_search_space = self.problem.build_search_space()

        self.hp_space = self.problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem(self.problem.seed)
        for i, vnode in enumerate(na_search_space.variable_nodes):
            self.na_space.add_hyperparameter((0, vnode.num_ops - 1),
                                             name=f"vnode_{i:05d}")

        self.space = CS.ConfigurationSpace(seed=self.problem.seed)
        self.space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space)
        self.space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space)
예제 #13
0
    def test_add_starting_points_not_in_space_def(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
        pb.add_hyperparameter((-10, 10), "dim0")
        pb.add_hyperparameter((-10.0, 10.0), "dim1")
        pb.add_hyperparameter(["a", "b"], "dim2")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=-11, dim1=0.0, dim2="a")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=11, dim1=0.0, dim2="a")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=0, dim1=-11.0, dim2="a")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=0, dim1=11.0, dim2="a")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=0, dim1=0.0, dim2="c")

        pb.add_starting_point(dim0=0, dim1=0.0, dim2="a")
예제 #14
0
"""
Hyperparameter optimization problem to try forbidden clauses by directly using ConfigSpace.

Example command line::

    python -m deephyper.search.hps.ambsv1 --evaluator threadPool --problem deephyper.benchmark.hps.toy.problem_cond_1.Problem --run deephyper.benchmark.hps.toy.problem_cond_1.run --max-evals 100 --kappa 0.001
"""
import ConfigSpace as cs
from deephyper.problem import HpProblem

Problem = HpProblem(seed=45)

func = Problem.add_hyperparameter(name="func", value=["f1", "f2"])

# f1 variables
f1_x = Problem.add_hyperparameter(name="f1_x", value=(0.0, 1.0, "uniform"))
f1_y = Problem.add_hyperparameter(name="f1_y", value=(0.0, 1.0, "uniform"))

# f2 variables
f2_x = Problem.add_hyperparameter(name="f2_x", value=(0.0, 1.0, "uniform"))
f2_y = Problem.add_hyperparameter(name="f2_y", value=(0.0, 1.0, "uniform"))

cond_f1_x = cs.EqualsCondition(f1_x, func, "f1")
cond_f1_y = cs.EqualsCondition(f1_y, func, "f1")
Problem.add_condition(cond_f1_x)
Problem.add_condition(cond_f1_y)

cond_f2_x = cs.EqualsCondition(f2_x, func, "f2")
cond_f2_y = cs.EqualsCondition(f2_y, func, "f2")
Problem.add_condition(cond_f2_x)
Problem.add_condition(cond_f2_y)
예제 #15
0
"""
Hyperparameter optimization problem to try forbidden clauses by directly using ConfigSpace.

Example command line::

    python -m deephyper.search.hps.ambs2 --evaluator threadPool --problem deephyper.benchmark.hps.toy.problem_basic_1.Problem --run deephyper.benchmark.hps.toy.problem_basic_1.run --max-evals 100 --kappa 0.001
"""
import ConfigSpace.hyperparameters as csh
import numpy as np

from deephyper.problem import HpProblem

# Problem definition
Problem = HpProblem()

x_hp = csh.UniformIntegerHyperparameter(name="x", lower=0, upper=10, log=False)
y_hp = csh.UniformIntegerHyperparameter(name="y", lower=0, upper=10, log=False)

Problem.add_hyperparameters([x_hp, y_hp])

Problem.add_starting_point(x=1, y=1)

# Definition of the function which runs the model


def run(param_dict):

    x = param_dict["x"]
    y = param_dict["y"]

    res = x + y
예제 #16
0
 def __init__(self, seed=None, **kwargs):
     self._space = OrderedDict()
     self._hp_space = HpProblem(seed)
     self.seed = seed
예제 #17
0
    def test_add_good_reference(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
        pb.add_hyperparameter((-10, 10), "dim0")
        pb.add_starting_point(dim0=0)
예제 #18
0
    def test_kwargs(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
        pb.add_hyperparameter(value=(-10, 10), name="dim0")
예제 #19
0
 def __init__(self):
     self._space = OrderedDict()
     self._hp_space = HpProblem()
     self._space["metrics"] = []
     self._space["hyperparameters"] = dict(verbose=0)
예제 #20
0
    def test_create(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
예제 #21
0
    def __init__(
        self,
        problem,
        evaluator,
        random_state=None,
        log_dir=".",
        verbose=0,
        surrogate_model: str = "RF",
        acq_func: str = "UCB",
        kappa: float = 1.96,
        xi: float = 0.001,
        n_points: int = 10000,
        liar_strategy: str = "cl_max",
        n_jobs: int = 1,
        **kwargs,
    ):
        super().__init__(problem, evaluator, random_state, log_dir, verbose)

        # Setup the search space
        na_search_space = self._problem.build_search_space()

        self.hp_space = self._problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem()
        self.na_space._space.seed(self._random_state.get_state()[1][0])
        for i, vnode in enumerate(na_search_space.variable_nodes):
            self.na_space.add_hyperparameter((0, vnode.num_ops - 1),
                                             name=f"vnode_{i:05d}")

        self._space = CS.ConfigurationSpace(
            seed=self._random_state.get_state()[1][0])
        self._space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space)
        self._space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space)

        # check input parameters
        surrogate_model_allowed = ["RF", "ET", "GBRT", "DUMMY"]
        if not (surrogate_model in surrogate_model_allowed):
            raise ValueError(
                f"Parameter 'surrogate_model={surrogate_model}' should have a value in {surrogate_model_allowed}!"
            )

        acq_func_allowed = ["UCB", "EI", "PI", "gp_hedge"]
        if not (acq_func in acq_func_allowed):
            raise ValueError(
                f"Parameter 'acq_func={acq_func}' should have a value in {acq_func_allowed}!"
            )

        if not (np.isscalar(kappa)):
            raise ValueError(f"Parameter 'kappa' should be a scalar value!")

        if not (np.isscalar(xi)):
            raise ValueError("Parameter 'xi' should be a scalar value!")

        if not (type(n_points) is int):
            raise ValueError("Parameter 'n_points' shoud be an integer value!")

        liar_strategy_allowed = ["cl_min", "cl_mean", "cl_max"]
        if not (liar_strategy in liar_strategy_allowed):
            raise ValueError(
                f"Parameter 'liar_strategy={liar_strategy}' should have a value in {liar_strategy_allowed}!"
            )

        if not (type(n_jobs) is int):
            raise ValueError(f"Parameter 'n_jobs' should be an integer value!")

        self._n_initial_points = self._evaluator.num_workers
        self._liar_strategy = MAP_liar_strategy.get(liar_strategy,
                                                    liar_strategy)

        base_estimator = self._get_surrogate_model(
            surrogate_model,
            n_jobs,
            random_state=self._random_state.get_state()[1][0])

        self._opt = None
        self._opt_kwargs = dict(
            dimensions=self._space,
            base_estimator=base_estimator,
            acq_func=MAP_acq_func.get(acq_func, acq_func),
            acq_optimizer="sampling",
            acq_func_kwargs={
                "xi": xi,
                "kappa": kappa,
                "n_points": n_points
            },
            n_initial_points=self._n_initial_points,
            random_state=self._random_state,
        )
예제 #22
0
        ("stdscaler", StandardScaler()),
    ])
    return preprocessor


REGRESSORS = {
    "RandomForest": RandomForestRegressor,
    "Linear": LinearRegression,
    "AdaBoost": AdaBoostRegressor,
    "KNeighbors": KNeighborsRegressor,
    "MLP": MLPRegressor,
    "SVR": SVR,
    "XGBoost": XGBRegressor,
}

problem_autosklearn1 = HpProblem()

regressor = problem_autosklearn1.add_hyperparameter(
    name="regressor",
    value=[
        "RandomForest", "Linear", "AdaBoost", "KNeighbors", "MLP", "SVR",
        "XGBoost"
    ],
)

# n_estimators
n_estimators = problem_autosklearn1.add_hyperparameter(name="n_estimators",
                                                       value=(1, 2000,
                                                              "log-uniform"))

cond_n_estimators = cs.OrConjunction(
예제 #23
0
def run(config: dict, N: int) -> float:
    y = -sum([config[f"x{i}"] ** 2 for i in range(N)])
    return y


run_small = functools.partial(run, N=1)
run_large = functools.partial(run, N=2)

# %%
# Then, we can define the hyperparameter problem space based on :math:`n`
from deephyper.problem import HpProblem


N = 1
problem_small = HpProblem()
for i in range(N):
    problem_small.add_hyperparameter((-10.0, 10.0), f"x{i}")
problem_small

# %%
N = 2
problem_large = HpProblem()
for i in range(N):
    problem_large.add_hyperparameter((-10.0, 10.0), f"x{i}")
problem_large

# %%
# Then, we define setup the search and execute it:
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback
예제 #24
0
from deephyper.problem import HpProblem

config_space = cs.ConfigurationSpace(seed=42)

x_hp = csh.UniformIntegerHyperparameter(name="x", lower=0, upper=10, log=False)
y_hp = csh.UniformIntegerHyperparameter(name="y", lower=0, upper=10, log=False)

config_space.add_hyperparameters([x_hp, y_hp])

not_zero = cs.ForbiddenAndConjunction(cs.ForbiddenEqualsClause(x_hp, 0),
                                      cs.ForbiddenEqualsClause(y_hp, 0))

config_space.add_forbidden_clause(not_zero)

# Problem definition
Problem = HpProblem(config_space)

Problem.add_starting_point(x=1, y=1)

# Definition of the function which runs the model


def run(param_dict):

    x = param_dict["x"]
    y = param_dict["y"]

    res = np.log(1 / (y + x))

    return res  # the objective