Exemplo n.º 1
0
    def test_dim_with_wrong_name(self):
        from deephyper.core.exceptions.problem import SpaceDimNameOfWrongType
        from deephyper.problem import HpProblem

        pb = HpProblem()
        with pytest.raises(SpaceDimNameOfWrongType):
            pb.add_hyperparameter((-10, 10), 0)
Exemplo n.º 2
0
    def test_config_space_hp(self):
        import ConfigSpace.hyperparameters as csh
        from deephyper.problem import HpProblem

        alpha = csh.UniformFloatHyperparameter(name="alpha", lower=0, upper=1)
        beta = csh.UniformFloatHyperparameter(name="beta", lower=0, upper=1)

        pb = HpProblem()
        pb.add_hyperparameters([alpha, beta])
Exemplo n.º 3
0
    def __init__(
        self,
        problem,
        run,
        evaluator,
        surrogate_model="RF",
        acq_func="LCB",
        kappa=1.96,
        xi=0.001,
        liar_strategy="cl_min",
        n_jobs=1,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            **kwargs,
        )

        self.n_jobs = int(
            n_jobs)  # parallelism of BO surrogate model estimator
        self.kappa = float(kappa)
        self.xi = float(xi)
        self.n_initial_points = self.evaluator.num_workers
        self.liar_strategy = liar_strategy

        # Setup
        na_search_space = self.problem.build_search_space()

        self.hp_space = self.problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem(self.problem.seed)
        for i, vnode in enumerate(na_search_space.variable_nodes):
            self.na_space.add_hyperparameter((0, vnode.num_ops - 1),
                                             name=f"vnode_{i:05d}")

        self.space = CS.ConfigurationSpace(seed=self.problem.seed)
        self.space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space)
        self.space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space)

        # Initialize opitmizer of hyperparameter space
        self.opt = SkOptimizer(
            dimensions=self.space,
            base_estimator=self.get_surrogate_model(surrogate_model,
                                                    self.n_jobs),
            acq_func=acq_func,
            acq_optimizer="sampling",
            acq_func_kwargs={
                "xi": self.xi,
                "kappa": self.kappa
            },
            n_initial_points=self.n_initial_points,
        )
Exemplo n.º 4
0
    def test_random_seed(self):
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()
        problem.add_hyperparameter((0.0, 10.0), "x")

        def run(config):
            return config["x"]

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")

        res1 = search.search(max_evals=4)
        res1_array = res1[["x"]].to_numpy()

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")
        res2 = search.search(max_evals=4)
        res2_array = res2[["x"]].to_numpy()

        assert np.array_equal(res1_array, res2_array)

        # test multi-objective
        def run(config):
            return config["x"], config["x"]

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")

        res1 = search.search(max_evals=4)
        res1_array = res1[["x"]].to_numpy()

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")
        res2 = search.search(max_evals=4)
        res2_array = res2[["x"]].to_numpy()

        assert np.array_equal(res1_array, res2_array)
Exemplo n.º 5
0
class Problem:
    """Representation of a problem."""
    def __init__(self, seed=None, **kwargs):
        self._space = OrderedDict()
        self._hp_space = HpProblem(seed)
        self.seed = seed

    def __str__(self):
        return repr(self)

    def __repr__(self):
        return f"Problem\n{pformat({k:v for k,v in self._space.items()}, indent=2)}"

    def add_dim(self, p_name, p_space):
        """Add a dimension to the search space.

        Args:
            p_name (str): name of the parameter/dimension.
            p_space (Object): space corresponding to the new dimension.
        """
        self._space[p_name] = p_space

    @property
    def space(self):
        dims = list(self._space.keys())
        dims.sort()
        space = OrderedDict(**{d: self._space[d] for d in dims})
        return space

    def add_hyperparameter(self,
                           value,
                           name: str = None,
                           default_value=None) -> csh.Hyperparameter:
        return self._hp_space.add_hyperparameter(value, name, default_value)
Exemplo n.º 6
0
    def test_add_starting_points_with_too_many_dim(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
        pb.add_hyperparameter((-10, 10), "dim0")
        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=0, dim1=2)
Exemplo n.º 7
0
    def test_sample_types_no_cat(self):
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()
        problem.add_hyperparameter((0, 10), "x_int")
        problem.add_hyperparameter((0.0, 10.0), "x_float")

        def run(config):

            assert np.issubdtype(type(config["x_int"]), np.integer)
            assert np.issubdtype(type(config["x_float"]), float)

            return 0

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        CBO(problem,
            create_evaluator(),
            random_state=42,
            surrogate_model="DUMMY").search(10)

        CBO(problem, create_evaluator(), random_state=42,
            surrogate_model="RF").search(10)
Exemplo n.º 8
0
    def __init__(
        self,
        problem,
        evaluator,
        random_state: int = None,
        log_dir: str = ".",
        verbose: int = 0,
        population_size: int = 100,
        sample_size: int = 10,
        **kwargs,
    ):
        super().__init__(
            problem,
            evaluator,
            random_state,
            log_dir,
            verbose,
            population_size,
            sample_size,
        )

        # Setup
        na_search_space = self._problem.build_search_space()

        self.hp_space = self._problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem()
        self.na_space._space.seed(self._random_state.get_state()[1][0])
        for i, (low, high) in enumerate(na_search_space.choices()):
            self.na_space.add_hyperparameter((low, high), name=f"vnode_{i:05d}")

        self._space = CS.ConfigurationSpace(seed=self._random_state.get_state()[1][0])
        self._space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space
        )
        self._space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space
        )
        self._space_size = len(self._space.get_hyperparameter_names())
Exemplo n.º 9
0
    def test_quickstart(self):
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO
        from deephyper.evaluator import Evaluator

        # define the variable you want to optimize
        problem = HpProblem()
        problem.add_hyperparameter((-10.0, 10.0), "x")

        # define the evaluator to distribute the computation
        evaluator = Evaluator.create(
            run,
            method="subprocess",
            method_kwargs={
                "num_workers": 2,
            },
        )

        # define you search and execute it
        search = CBO(problem, evaluator)

        results = search.search(max_evals=15)
Exemplo n.º 10
0
    def __init__(
        self,
        problem,
        run,
        evaluator,
        population_size=100,
        sample_size=10,
        n_jobs=1,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            population_size=population_size,
            sample_size=sample_size,
            **kwargs,
        )

        self.n_jobs = int(n_jobs)

        # Setup
        na_search_space = self.problem.build_search_space()

        self.hp_space = self.problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem(self.problem.seed)
        for i, vnode in enumerate(na_search_space.variable_nodes):
            self.na_space.add_hyperparameter((0, vnode.num_ops - 1),
                                             name=f"vnode_{i:05d}")

        self.space = CS.ConfigurationSpace(seed=self.problem.seed)
        self.space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space)
        self.space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space)
Exemplo n.º 11
0
    def test_sample_types_conditional(self):
        import ConfigSpace as cs
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()

        # choices
        choice = problem.add_hyperparameter(
            name="choice",
            value=["choice1", "choice2"],
        )

        # integers
        x1_int = problem.add_hyperparameter(name="x1_int", value=(1, 10))

        x2_int = problem.add_hyperparameter(name="x2_int", value=(1, 10))

        # conditions
        cond_1 = cs.EqualsCondition(x1_int, choice, "choice1")

        cond_2 = cs.EqualsCondition(x2_int, choice, "choice2")

        problem.add_condition(cond_1)
        problem.add_condition(cond_2)

        def run(config):

            if config["choice"] == "choice1":
                assert np.issubdtype(type(config["x1_int"]), np.integer)
            else:
                assert np.issubdtype(type(config["x2_int"]), np.integer)

            return 0

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        CBO(problem,
            create_evaluator(),
            random_state=42,
            surrogate_model="DUMMY").search(10)
Exemplo n.º 12
0
    def test_create(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
Exemplo n.º 13
0
    def test_add_starting_points_not_in_space_def(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
        pb.add_hyperparameter((-10, 10), "dim0")
        pb.add_hyperparameter((-10.0, 10.0), "dim1")
        pb.add_hyperparameter(["a", "b"], "dim2")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=-11, dim1=0.0, dim2="a")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=11, dim1=0.0, dim2="a")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=0, dim1=-11.0, dim2="a")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=0, dim1=11.0, dim2="a")

        with pytest.raises(ValueError):
            pb.add_starting_point(dim0=0, dim1=0.0, dim2="c")

        pb.add_starting_point(dim0=0, dim1=0.0, dim2="a")
Exemplo n.º 14
0
"""
deephyper hps ambs --problem nas_big_data.albert.tune_random_forest.Problem --run nas_big_data.albert.tune_random_forest.run --max-evals 30 --evaluator subprocess --n-jobs 3
"""

import json
import os

import pandas as pd
from deephyper.problem import HpProblem
from nas_big_data import RANDOM_STATE
from nas_big_data.albert.load_data import load_data
from sklearn.ensemble import RandomForestClassifier

Problem = HpProblem()

Problem.add_hyperparameter((10, 300), "n_estimators")
Problem.add_hyperparameter(["gini", "entropy"], "criterion")
Problem.add_hyperparameter((1, 50), "max_depth")
Problem.add_hyperparameter((2, 10), "min_samples_split")

# We define a starting point with the defaul hyperparameters from sklearn-learn
# that we consider good in average.
Problem.add_starting_point(n_estimators=100,
                           criterion="gini",
                           max_depth=50,
                           min_samples_split=2)


def test_best():
    """Test data with RandomForest
Exemplo n.º 15
0
from deephyper.problem import HpProblem

Problem = HpProblem()

Problem.add_dim('units', (1, 100))
Problem.add_dim('activation', ['NA', 'relu', 'sigmoid', 'tanh'])
Problem.add_dim('lr', (0.0001, 1.))

Problem.add_starting_point(
    units=10,
    activation='relu',
    lr=0.01)

if __name__ == '__main__':
    print(Problem)
Exemplo n.º 16
0
"""Inspired from https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
"""
import ConfigSpace as cs
from deephyper.problem import HpProblem


Problem = HpProblem(seed=45)

classifier = Problem.add_hyperparameter(
    name="classifier",
    value=["RandomForest", "Logistic", "AdaBoost", "KNeighbors", "MLP", "SVC", "XGBoost"],
)

# n_estimators
n_estimators = Problem.add_hyperparameter(
    name="n_estimators", value=(1, 2000, "log-uniform")
)

cond_n_estimators = cs.OrConjunction(
    cs.EqualsCondition(n_estimators, classifier, "RandomForest"),
    cs.EqualsCondition(n_estimators, classifier, "AdaBoost"),
)

Problem.add_condition(cond_n_estimators)

# max_depth
max_depth = Problem.add_hyperparameter(name="max_depth", value=(2, 100, "log-uniform"))

cond_max_depth = cs.EqualsCondition(max_depth, classifier, "RandomForest")

Problem.add_condition(cond_max_depth)
Exemplo n.º 17
0
    def test_gp(self):
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        # test float hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter((0.0, 10.0), "x")

        def run(config):
            return config["x"]

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)

        # test int hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter((0, 10), "x")

        def run(config):
            return config["x"]

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)

        # test categorical hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter([f"{i}" for i in range(10)], "x")

        def run(config):
            return int(config["x"])

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)
Exemplo n.º 18
0
    def test_add_good_reference(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
        pb.add_hyperparameter((-10, 10), "dim0")
        pb.add_starting_point(dim0=0)
Exemplo n.º 19
0
    def test_add_good_dim(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()

        p0 = pb.add_hyperparameter((-10, 10), "p0")
        p0_csh = csh.UniformIntegerHyperparameter(
            name="p0", lower=-10, upper=10, log=False
        )
        assert p0 == p0_csh

        p1 = pb.add_hyperparameter((1, 100, "log-uniform"), "p1")
        p1_csh = csh.UniformIntegerHyperparameter(name="p1", lower=1, upper=100, log=True)
        assert p1 == p1_csh

        p2 = pb.add_hyperparameter((-10.0, 10.0), "p2")
        p2_csh = csh.UniformFloatHyperparameter(
            name="p2", lower=-10.0, upper=10.0, log=False
        )
        assert p2 == p2_csh

        p3 = pb.add_hyperparameter((1.0, 100.0, "log-uniform"), "p3")
        p3_csh = csh.UniformFloatHyperparameter(
            name="p3", lower=1.0, upper=100.0, log=True
        )
        assert p3 == p3_csh

        p4 = pb.add_hyperparameter([1, 2, 3, 4], "p4")
        p4_csh = csh.OrdinalHyperparameter(name="p4", sequence=[1, 2, 3, 4])
        assert p4 == p4_csh

        p5 = pb.add_hyperparameter([1.0, 2.0, 3.0, 4.0], "p5")
        p5_csh = csh.OrdinalHyperparameter(name="p5", sequence=[1.0, 2.0, 3.0, 4.0])
        assert p5 == p5_csh

        p6 = pb.add_hyperparameter(["cat0", "cat1"], "p6")
        p6_csh = csh.CategoricalHyperparameter(name="p6", choices=["cat0", "cat1"])
        assert p6 == p6_csh

        p7 = pb.add_hyperparameter({"mu": 0, "sigma": 1}, "p7")
        p7_csh = csh.NormalIntegerHyperparameter(name="p7", mu=0, sigma=1)
        assert p7 == p7_csh

        if cs.__version__ > "0.4.20":
            p8 = pb.add_hyperparameter(
                {"mu": 0, "sigma": 1, "lower": -5, "upper": 5}, "p8"
            )
            p8_csh = csh.NormalIntegerHyperparameter(
                name="p8", mu=0, sigma=1, lower=-5, upper=5
            )
            assert p8 == p8_csh

        p9 = pb.add_hyperparameter({"mu": 0.0, "sigma": 1.0}, "p9")
        p9_csh = csh.NormalFloatHyperparameter(name="p9", mu=0, sigma=1)
        assert p9 == p9_csh
Exemplo n.º 20
0
"""
Hyperparameter optimization problem to try forbidden clauses by directly using ConfigSpace.

Example command line::

    python -m deephyper.search.hps.ambs2 --evaluator threadPool --problem deephyper.benchmark.hps.toy.problem_basic_1.Problem --run deephyper.benchmark.hps.toy.problem_basic_1.run --max-evals 100 --kappa 0.001
"""
import ConfigSpace.hyperparameters as csh
import numpy as np

from deephyper.problem import HpProblem

# Problem definition
Problem = HpProblem()

x_hp = csh.UniformIntegerHyperparameter(name="x", lower=0, upper=10, log=False)
y_hp = csh.UniformIntegerHyperparameter(name="y", lower=0, upper=10, log=False)

Problem.add_hyperparameters([x_hp, y_hp])

Problem.add_starting_point(x=1, y=1)

# Definition of the function which runs the model


def run(param_dict):

    x = param_dict["x"]
    y = param_dict["y"]

    res = x + y
Exemplo n.º 21
0
class AMBSMixed(NeuralArchitectureSearch):
    def __init__(
        self,
        problem,
        run,
        evaluator,
        surrogate_model="RF",
        acq_func="LCB",
        kappa=1.96,
        xi=0.001,
        liar_strategy="cl_min",
        n_jobs=1,
        **kwargs,
    ):
        super().__init__(
            problem=problem,
            run=run,
            evaluator=evaluator,
            **kwargs,
        )

        self.n_jobs = int(
            n_jobs)  # parallelism of BO surrogate model estimator
        self.kappa = float(kappa)
        self.xi = float(xi)
        self.n_initial_points = self.evaluator.num_workers
        self.liar_strategy = liar_strategy

        # Setup
        na_search_space = self.problem.build_search_space()

        self.hp_space = self.problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem(self.problem.seed)
        for i, vnode in enumerate(na_search_space.variable_nodes):
            self.na_space.add_hyperparameter((0, vnode.num_ops - 1),
                                             name=f"vnode_{i:05d}")

        self.space = CS.ConfigurationSpace(seed=self.problem.seed)
        self.space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space)
        self.space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space)

        # Initialize opitmizer of hyperparameter space
        self.opt = SkOptimizer(
            dimensions=self.space,
            base_estimator=self.get_surrogate_model(surrogate_model,
                                                    self.n_jobs),
            acq_func=acq_func,
            acq_optimizer="sampling",
            acq_func_kwargs={
                "xi": self.xi,
                "kappa": self.kappa
            },
            n_initial_points=self.n_initial_points,
        )

    @staticmethod
    def _extend_parser(parser):
        NeuralArchitectureSearch._extend_parser(parser)
        add_arguments_from_signature(parser, AMBSMixed)
        return parser

    def saved_keys(self, val: dict):
        res = {"arch_seq": str(val["arch_seq"])}
        hp_names = self.hp_space._space.get_hyperparameter_names()

        for hp_name in hp_names:
            if hp_name == "loss":
                res["loss"] = val["loss"]
            else:
                res[hp_name] = val["hyperparameters"][hp_name]

        return res

    def main(self):

        num_evals_done = 0

        # Filling available nodes at start
        dhlogger.info(
            f"Generating {self.evaluator.num_workers} initial points...")
        self.evaluator.add_eval_batch(
            self.get_random_batch(size=self.n_initial_points))

        # Main loop
        while num_evals_done < self.max_evals:

            # Collecting finished evaluations
            new_results = list(self.evaluator.get_finished_evals())

            if len(new_results) > 0:
                stats = {
                    "num_cache_used": self.evaluator.stats["num_cache_used"]
                }
                dhlogger.info(jm(type="env_stats", **stats))
                self.evaluator.dump_evals(saved_keys=self.saved_keys)

                num_received = len(new_results)
                num_evals_done += num_received

                # Transform configurations to list to fit optimizer
                opt_X = []
                opt_y = []
                for cfg, obj in new_results:
                    arch_seq = cfg["arch_seq"]
                    hp_val = self.problem.extract_hp_values(cfg)
                    x = replace_nan(hp_val + arch_seq)
                    opt_X.append(x)
                    opt_y.append(-obj)  #! maximizing

                self.opt.tell(opt_X, opt_y)  #! fit: costly
                new_X = self.opt.ask(n_points=len(new_results),
                                     strategy=self.liar_strategy)

                new_batch = []
                for x in new_X:
                    new_cfg = self.problem.gen_config(x[self.hp_size:],
                                                      x[:self.hp_size])
                    new_batch.append(new_cfg)

                # submit_childs
                if len(new_results) > 0:
                    self.evaluator.add_eval_batch(new_batch)

    def get_surrogate_model(self, name: str, n_jobs: int = None):
        """Get a surrogate model from Scikit-Optimize.

        Args:
            name (str): name of the surrogate model.
            n_jobs (int): number of parallel processes to distribute the computation of the surrogate model.

        Raises:
            ValueError: when the name of the surrogate model is unknown.
        """
        accepted_names = ["RF", "ET", "GBRT", "GP", "DUMMY"]
        if not (name in accepted_names):
            raise ValueError(
                f"Unknown surrogate model {name}, please choose among {accepted_names}."
            )

        if name == "RF":
            surrogate = skopt.learning.RandomForestRegressor(n_jobs=n_jobs)
        elif name == "ET":
            surrogate = skopt.learning.ExtraTreesRegressor(n_jobs=n_jobs)
        elif name == "GBRT":
            surrogate = skopt.learning.GradientBoostingQuantileRegressor(
                n_jobs=n_jobs)
        else:  # for DUMMY and GP
            surrogate = name

        return surrogate

    def get_random_batch(self, size: int) -> list:
        batch = []
        n_points = max(0, size - len(batch))
        if n_points > 0:
            points = self.opt.ask(n_points=n_points)
            for point in points:
                point_as_dict = self.problem.gen_config(
                    point[self.hp_size:], point[:self.hp_size])
                batch.append(point_as_dict)
        return batch

    def to_dict(self, x: list) -> dict:
        hp_x = x[:self.hp_size]
        arch_seq = x[self.hp_size:]
        cfg = self.problem.space.copy()
        cfg["arch_seq"] = arch_seq
        return cfg
Exemplo n.º 22
0
        ("stdscaler", StandardScaler()),
    ])
    return preprocessor


REGRESSORS = {
    "RandomForest": RandomForestRegressor,
    "Linear": LinearRegression,
    "AdaBoost": AdaBoostRegressor,
    "KNeighbors": KNeighborsRegressor,
    "MLP": MLPRegressor,
    "SVR": SVR,
    "XGBoost": XGBRegressor,
}

problem_autosklearn1 = HpProblem()

regressor = problem_autosklearn1.add_hyperparameter(
    name="regressor",
    value=[
        "RandomForest", "Linear", "AdaBoost", "KNeighbors", "MLP", "SVR",
        "XGBoost"
    ],
)

# n_estimators
n_estimators = problem_autosklearn1.add_hyperparameter(name="n_estimators",
                                                       value=(1, 2000,
                                                              "log-uniform"))

cond_n_estimators = cs.OrConjunction(
from deephyper.problem import HpProblem

Problem = HpProblem()

Problem.add_dim("lr", [1e-4, 5e-4, .001, .005, .01, .1])
Problem.add_dim("trade_off", [.01, .05, .1, .5, 1])
Problem.add_dim("cycle_length", [2, 4, 5, 8, 10])
Problem.add_dim("weight_decay", [1e-5, 1e-4, 1e-3, 5e-2, .01, .1])

Problem.add_starting_point(lr=1e-4, trade_off=.01, cycle_length=2, weight_decay = 1e-5)

if __name__ == "__main__":
    print(Problem)
Exemplo n.º 24
0
    def __init__(
        self,
        problem,
        evaluator,
        random_state=None,
        log_dir=".",
        verbose=0,
        surrogate_model: str = "RF",
        acq_func: str = "UCB",
        kappa: float = 1.96,
        xi: float = 0.001,
        n_points: int = 10000,
        liar_strategy: str = "cl_max",
        n_jobs: int = 1,
        **kwargs,
    ):
        super().__init__(problem, evaluator, random_state, log_dir, verbose)

        # Setup the search space
        na_search_space = self._problem.build_search_space()

        self.hp_space = self._problem._hp_space  #! hyperparameters
        self.hp_size = len(self.hp_space.space.get_hyperparameter_names())
        self.na_space = HpProblem()
        self.na_space._space.seed(self._random_state.get_state()[1][0])
        for i, vnode in enumerate(na_search_space.variable_nodes):
            self.na_space.add_hyperparameter((0, vnode.num_ops - 1),
                                             name=f"vnode_{i:05d}")

        self._space = CS.ConfigurationSpace(
            seed=self._random_state.get_state()[1][0])
        self._space.add_configuration_space(
            prefix="1", configuration_space=self.hp_space.space)
        self._space.add_configuration_space(
            prefix="2", configuration_space=self.na_space.space)

        # check input parameters
        surrogate_model_allowed = ["RF", "ET", "GBRT", "DUMMY"]
        if not (surrogate_model in surrogate_model_allowed):
            raise ValueError(
                f"Parameter 'surrogate_model={surrogate_model}' should have a value in {surrogate_model_allowed}!"
            )

        acq_func_allowed = ["UCB", "EI", "PI", "gp_hedge"]
        if not (acq_func in acq_func_allowed):
            raise ValueError(
                f"Parameter 'acq_func={acq_func}' should have a value in {acq_func_allowed}!"
            )

        if not (np.isscalar(kappa)):
            raise ValueError(f"Parameter 'kappa' should be a scalar value!")

        if not (np.isscalar(xi)):
            raise ValueError("Parameter 'xi' should be a scalar value!")

        if not (type(n_points) is int):
            raise ValueError("Parameter 'n_points' shoud be an integer value!")

        liar_strategy_allowed = ["cl_min", "cl_mean", "cl_max"]
        if not (liar_strategy in liar_strategy_allowed):
            raise ValueError(
                f"Parameter 'liar_strategy={liar_strategy}' should have a value in {liar_strategy_allowed}!"
            )

        if not (type(n_jobs) is int):
            raise ValueError(f"Parameter 'n_jobs' should be an integer value!")

        self._n_initial_points = self._evaluator.num_workers
        self._liar_strategy = MAP_liar_strategy.get(liar_strategy,
                                                    liar_strategy)

        base_estimator = self._get_surrogate_model(
            surrogate_model,
            n_jobs,
            random_state=self._random_state.get_state()[1][0])

        self._opt = None
        self._opt_kwargs = dict(
            dimensions=self._space,
            base_estimator=base_estimator,
            acq_func=MAP_acq_func.get(acq_func, acq_func),
            acq_optimizer="sampling",
            acq_func_kwargs={
                "xi": xi,
                "kappa": kappa,
                "n_points": n_points
            },
            n_initial_points=self._n_initial_points,
            random_state=self._random_state,
        )
Exemplo n.º 25
0
    def test_kwargs(self):
        from deephyper.problem import HpProblem

        pb = HpProblem()
        pb.add_hyperparameter(value=(-10, 10), name="dim0")
Exemplo n.º 26
0
This example demonstrates how to handle failure of objectives in hyperparameter search. In many cases such as software auto-tuning (where we minimize the run-time of a software application) some configurations can create run-time errors and therefore no scalar objective is returned. A default choice could be to return in this case the worst case objective if known and it can be done inside the ``run``-function. Other possibilites are to ignore these configurations or to replace them with the running mean/min objective. To illustrate such a use-case we define an artificial ``run``-function which will fail when one of its input parameters is greater than 0.5. To define a failure, it is possible to return a "string" value with ``"F"`` as prefix such as:
"""


def run(config: dict) -> float:
    if config["y"] > 0.5:
        return "F_postfix"
    else:
        return config["x"]


# %%
# Then, we define the corresponding hyperparameter problem where ``x`` is the value to maximize and ``y`` is a value impact the appearance of failures.
from deephyper.problem import HpProblem

problem = HpProblem()
problem.add_hyperparameter([1, 2, 4, 8, 16, 32], "x")
problem.add_hyperparameter((0.0, 1.0), "y")

print(problem)


# %%
# Then, we define a centralized Bayesian optimization (CBO) search (i.e., master-worker architecture) which uses the Random-Forest regressor as default surrogate model. We will compare the ``ignore`` strategy which filters-out failed configurations, the ``mean`` strategy which replaces a failure by the running mean of collected objectives and the ``min`` strategy which replaces by the running min of collected objectives.
from deephyper.search.hps import CBO
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback

results = {}
max_evals = 30
for failure_strategy in ["ignore", "mean", "min"]:
Exemplo n.º 27
0
"""
problem.py
"""
import ConfigSpace as cs

from deephyper.problem import HpProblem

Problem = HpProblem()

# call signature: Problem.add_dim(name, value)
Problem.add_dim('units1', (1, 64))            # int in range 1-64
Problem.add_dim('units2', (1, 64))            # int in range 1-64
Problem.add_dim('dropout1', (0.0, 1.0))       # float in range 0-1
Problem.add_dim('dropout2', (0.0, 1.0))  	  # float in range 0-1
Problem.add_dim('batch_size', (5, 500)) 	  # int in range 5-500
Problem.add_dim('log10_learning_rate', (-5.0, 0.0))  # float lr range from 10^-5 to 1

# one of ['relu', ..., ]
Problem.add_dim('activation', ['relu', 'elu', 'selu', 'tanh'])

optimizer = Problem.add_dim('optimizer', [
    'Adam', 'RMSprop', 'SGD', 'Nadam', 'Adagrad'
])

# Only vary momentum if optimizer is SGD
momentum = Problem.add_dim("momentum", (0.5, 0.9))
Problem.add_condition(cs.EqualsCondition(momentum, optimizer, "SGD"))

# Add a starting point to try first
Problem.add_starting_point(
    units1=16,
Exemplo n.º 28
0
from deephyper.problem import HpProblem

config_space = cs.ConfigurationSpace(seed=42)

x_hp = csh.UniformIntegerHyperparameter(name="x", lower=0, upper=10, log=False)
y_hp = csh.UniformIntegerHyperparameter(name="y", lower=0, upper=10, log=False)

config_space.add_hyperparameters([x_hp, y_hp])

not_zero = cs.ForbiddenAndConjunction(cs.ForbiddenEqualsClause(x_hp, 0),
                                      cs.ForbiddenEqualsClause(y_hp, 0))

config_space.add_forbidden_clause(not_zero)

# Problem definition
Problem = HpProblem(config_space)

Problem.add_starting_point(x=1, y=1)

# Definition of the function which runs the model


def run(param_dict):

    x = param_dict["x"]
    y = param_dict["y"]

    res = np.log(1 / (y + x))

    return res  # the objective
Exemplo n.º 29
0
"""
Hyperparameter optimization problem to try forbidden clauses by directly using ConfigSpace.

Example command line::

    python -m deephyper.search.hps.ambsv1 --evaluator threadPool --problem deephyper.benchmark.hps.toy.problem_cond_1.Problem --run deephyper.benchmark.hps.toy.problem_cond_1.run --max-evals 100 --kappa 0.001
"""
import ConfigSpace as cs
from deephyper.problem import HpProblem

Problem = HpProblem(seed=45)

func = Problem.add_hyperparameter(name="func", value=["f1", "f2"])

# f1 variables
f1_x = Problem.add_hyperparameter(name="f1_x", value=(0.0, 1.0, "uniform"))
f1_y = Problem.add_hyperparameter(name="f1_y", value=(0.0, 1.0, "uniform"))

# f2 variables
f2_x = Problem.add_hyperparameter(name="f2_x", value=(0.0, 1.0, "uniform"))
f2_y = Problem.add_hyperparameter(name="f2_y", value=(0.0, 1.0, "uniform"))

cond_f1_x = cs.EqualsCondition(f1_x, func, "f1")
cond_f1_y = cs.EqualsCondition(f1_y, func, "f1")
Problem.add_condition(cond_f1_x)
Problem.add_condition(cond_f1_y)

cond_f2_x = cs.EqualsCondition(f2_x, func, "f2")
cond_f2_y = cs.EqualsCondition(f2_y, func, "f2")
Problem.add_condition(cond_f2_x)
Problem.add_condition(cond_f2_y)
Exemplo n.º 30
0
"""
!! CURRENT OPTIMIZER DOES NOT SUPPORT THIS!!

Hyperparameter optimization problem to try forbidden clauses.

Example command line::

    python -m deephyper.search.hps.ambs2 --evaluator threadPool --problem deephyper.benchmark.hps.toy.problem_forbidden_1.Problem --run deephyper.benchmark.hps.toy.problem_forbidden_1.run --max-evals 100 --kappa 0.001
"""
import numpy as np

from deephyper.problem import HpProblem
from deephyper.problem import config_space as cs

# Problem definition
Problem = HpProblem()

x_hp = Problem.add_hyperparameter(
    name="x", value=(0, 10))  # or Problem.add_dim("x", (0, 10))
y_hp = Problem.add_hyperparameter(
    name="y", value=(0, 10))  # or Problem.add_dim("y", (0, 10))

not_zero = cs.ForbiddenAndConjunction(cs.ForbiddenEqualsClause(x_hp, 0),
                                      cs.ForbiddenEqualsClause(y_hp, 0))

Problem.add_forbidden_clause(not_zero)

Problem.add_starting_point(x=1, y=1)

# Definition of the function which runs the model