示例#1
0
    def test_random_seed(self):
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()
        problem.add_hyperparameter((0.0, 10.0), "x")

        def run(config):
            return config["x"]

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")

        res1 = search.search(max_evals=4)
        res1_array = res1[["x"]].to_numpy()

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")
        res2 = search.search(max_evals=4)
        res2_array = res2[["x"]].to_numpy()

        assert np.array_equal(res1_array, res2_array)

        # test multi-objective
        def run(config):
            return config["x"], config["x"]

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")

        res1 = search.search(max_evals=4)
        res1_array = res1[["x"]].to_numpy()

        search = CBO(problem,
                     create_evaluator(),
                     random_state=42,
                     surrogate_model="DUMMY")
        res2 = search.search(max_evals=4)
        res2_array = res2[["x"]].to_numpy()

        assert np.array_equal(res1_array, res2_array)
示例#2
0
def test_ambs():

    create_evaluator = lambda: Evaluator.create(
        run, method="process", method_kwargs={"num_workers": 1})

    search = AMBS(
        problem,
        create_evaluator(),
        random_state=42,
    )

    res1 = search.search(max_evals=4)
    res1_array = res1[["x"]].to_numpy()

    search.search(max_evals=100, timeout=1)

    search = AMBS(
        problem,
        create_evaluator(),
        random_state=42,
    )
    res2 = search.search(max_evals=4)
    res2_array = res2[["x"]].to_numpy()

    assert np.array_equal(res1_array, res2_array)
示例#3
0
    def test_agebo_with_hp(self):
        import numpy as np
        from deephyper.benchmark.nas import linearRegHybrid
        from deephyper.evaluator import Evaluator
        from deephyper.nas.run import run_debug_arch
        from deephyper.search.nas import AgEBO

        create_evaluator = lambda: Evaluator.create(run_debug_arch,
                                                    method="serial")

        search = AgEBO(
            linearRegHybrid.Problem,
            create_evaluator(),
            random_state=42,
        )

        res1 = search.search(max_evals=4)
        res1_array = res1[[
            "arch_seq", "batch_size", "learning_rate", "optimizer"
        ]].to_numpy()

        search = AgEBO(
            linearRegHybrid.Problem,
            create_evaluator(),
            random_state=42,
        )
        res2 = search.search(max_evals=4)
        res2_array = res2[[
            "arch_seq", "batch_size", "learning_rate", "optimizer"
        ]].to_numpy()

        assert np.array_equal(res1_array, res2_array)
示例#4
0
    def test_ambsmixed_with_hp(self):
        import numpy as np
        from deephyper.benchmark.nas import linearRegHybrid
        from deephyper.evaluator import Evaluator
        from deephyper.nas.run import run_debug_arch
        from deephyper.search.nas import AMBSMixed

        create_evaluator = lambda: Evaluator.create(run_debug_arch,
                                                    method="serial")

        search = AMBSMixed(
            linearRegHybrid.Problem,
            create_evaluator(),
            random_state=42,
        )

        res1 = search.search(max_evals=4)
        res1_array = res1[["arch_seq"]].to_numpy()

        search = AMBSMixed(
            linearRegHybrid.Problem,
            create_evaluator(),
            random_state=42,
        )
        res2 = search.search(max_evals=4)
        res2_array = res2[["arch_seq"]].to_numpy()

        assert np.array_equal(res1_array, res2_array)
示例#5
0
    def test_sample_types_no_cat(self):
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()
        problem.add_hyperparameter((0, 10), "x_int")
        problem.add_hyperparameter((0.0, 10.0), "x_float")

        def run(config):

            assert np.issubdtype(type(config["x_int"]), np.integer)
            assert np.issubdtype(type(config["x_float"]), float)

            return 0

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        CBO(problem,
            create_evaluator(),
            random_state=42,
            surrogate_model="DUMMY").search(10)

        CBO(problem, create_evaluator(), random_state=42,
            surrogate_model="RF").search(10)
示例#6
0
    def test_regevo_without_hp():
        import numpy as np
        from deephyper.benchmark.nas import linearReg
        from deephyper.evaluator import Evaluator
        from deephyper.nas.run import run_debug_arch
        from deephyper.search.nas import RegularizedEvolution

        create_evaluator = lambda: Evaluator.create(run_debug_arch,
                                                    method="serial")

        search = RegularizedEvolution(
            linearReg.Problem,
            create_evaluator(),
            random_state=42,
        )

        res1 = search.search(max_evals=4)
        res1_array = res1[["arch_seq"]].to_numpy()

        search = RegularizedEvolution(
            linearReg.Problem,
            create_evaluator(),
            random_state=42,
        )
        res2 = search.search(max_evals=4)
        res2_array = res2[["arch_seq"]].to_numpy()

        assert np.array_equal(res1_array, res2_array)
示例#7
0
    def __init__(self, problem, run, evaluator, **kwargs):
        super().__init__(problem, run, evaluator, **kwargs)
        # set in super : self.problem
        # set in super : self.run_func
        # set in super : self.evaluator
        self.evaluator = Evaluator.create(self.run_func,
                                          cache_key=key,
                                          method=evaluator)

        self.num_episodes = kwargs.get('num_episodes')
        if self.num_episodes is None:
            self.num_episodes = math.inf

        self.reward_rule = util.load_attr_from(
            'deephyper.search.nas.agent.utils.' + kwargs['reward_rule'])

        self.space = self.problem.space

        logger.debug(f'evaluator: {type(self.evaluator)}')

        self.num_agents = MPI.COMM_WORLD.Get_size(
        ) - 1  # one is  the parameter server
        self.rank = MPI.COMM_WORLD.Get_rank()

        logger.debug(f'num_agents: {self.num_agents}')
        logger.debug(f'rank: {self.rank}')
示例#8
0
def test_regevo_without_hp():

    create_evaluator = lambda: Evaluator.create(
        run_debug_arch, method="process", method_kwargs={"num_workers": 1}
    )

    search = RegularizedEvolution(
        linearReg.Problem,
        create_evaluator(),
        random_state=42,
    )

    res1 = search.search(max_evals=4)
    res1_array = res1[["arch_seq"]].to_numpy()

    search.search(max_evals=100, timeout=1)

    search = RegularizedEvolution(
        linearReg.Problem,
        create_evaluator(),
        random_state=42,
    )
    res2 = search.search(max_evals=4)
    res2_array = res2[["arch_seq"]].to_numpy()

    assert np.array_equal(res1_array, res2_array)
示例#9
0
def test_agebo_with_hp():

    create_evaluator = lambda: Evaluator.create(
        run_debug_arch, method="process", method_kwargs={"num_workers": 1})

    search = AgEBO(
        linearRegHybrid.Problem,
        create_evaluator(),
        random_state=42,
    )

    res1 = search.search(max_evals=4)
    res1_array = res1[["arch_seq", "batch_size", "learning_rate",
                       "optimizer"]].to_numpy()

    search.search(max_evals=100, timeout=1)

    search = AgEBO(
        linearRegHybrid.Problem,
        create_evaluator(),
        random_state=42,
    )
    res2 = search.search(max_evals=4)
    res2_array = res2[["arch_seq", "batch_size", "learning_rate",
                       "optimizer"]].to_numpy()

    assert np.array_equal(res1_array, res2_array)
示例#10
0
def test_agebo_without_hp():

    create_evaluator = lambda: Evaluator.create(
        run_debug_arch, method="process", method_kwargs={"num_workers": 1})

    with pytest.raises(ValueError):  # timeout should be an int
        search = AgEBO(linearReg.Problem, create_evaluator(), random_state=42)
示例#11
0
def main():
    from deephyper.search.nas.agent.run_func_math import run_func
    evaluator = Evaluator.create(run_func, cache_key=key, method='threadPool')
    train(
        num_iter=500,
        num_episodes_per_iter=10,
        seed=2018,
        evaluator=evaluator)
示例#12
0
    def test_gp(self):
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        # test float hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter((0.0, 10.0), "x")

        def run(config):
            return config["x"]

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)

        # test int hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter((0, 10), "x")

        def run(config):
            return config["x"]

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)

        # test categorical hyperparameters
        problem = HpProblem()
        problem.add_hyperparameter([f"{i}" for i in range(10)], "x")

        def run(config):
            return int(config["x"])

        CBO(
            problem,
            Evaluator.create(run, method="serial"),
            random_state=42,
            surrogate_model="GP",
        ).search(10)
示例#13
0
def main(**kwargs):
    """
    :meta private:
    """

    sys.path.insert(0, ".")

    if kwargs["verbose"]:
        logging.basicConfig(filename="deephyper.log", level=logging.INFO)

    search_name = sys.argv[2]

    # load search class
    logging.info(f"Loading the search '{search_name}'...")
    search_cls = load_attr(HPS_SEARCHES[search_name])

    # load problem
    logging.info("Loading the problem...")
    problem = load_attr(kwargs.pop("problem"))

    # load run function
    logging.info("Loading the run-function...")
    run_function = load_attr(kwargs.pop("run_function"))

    # filter arguments from evaluator class signature
    logging.info("Loading the evaluator...")
    evaluator_method = kwargs.pop("evaluator")
    base_arguments = ["num_workers", "callbacks"]
    evaluator_kwargs = {k:kwargs.pop(k) for k in base_arguments}

    # remove the arguments from unused evaluator
    for method in EVALUATORS.keys():
        evaluator_method_kwargs = {k[len(evaluator_method)+1:]:kwargs.pop(k) for k in kwargs.copy() if method in k}
        if method == evaluator_method:
            evaluator_kwargs = {**evaluator_kwargs, **evaluator_method_kwargs}

    # create evaluator
    logging.info(f"Evaluator(method={evaluator_method}, method_kwargs={evaluator_kwargs}")
    evaluator = Evaluator.create(
        run_function, method=evaluator_method, method_kwargs=evaluator_kwargs
    )
    logging.info(f"Evaluator has {evaluator.num_workers} workers available.")

    # filter arguments from search class signature
    # remove keys in evaluator_kwargs
    kwargs = {k:v for k,v in kwargs.items() if k not in evaluator_kwargs}
    max_evals = kwargs.pop("max_evals")
    timeout = kwargs.pop("timeout")

    # TODO: How about checkpointing and transfer learning?

    # execute the search
    # remaining kwargs are for the search
    logging.info(f"Evaluator has {evaluator.num_workers} workers available.")
    search = search_cls(problem, evaluator, **kwargs)

    search.search(max_evals=max_evals, timeout=timeout)
示例#14
0
def test_regovo_with_hp():

    create_evaluator = lambda: Evaluator.create(
        run_debug_arch, method="process", method_kwargs={"num_workers": 1}
    )

    with pytest.raises(ValueError):  # timeout should be an int
        search = RegularizedEvolution(
            linearRegHybrid.Problem, create_evaluator(), random_state=42
        )
示例#15
0
    def test_wrong_evaluator(self):
        from deephyper.evaluator import Evaluator

        with pytest.raises(DeephyperRuntimeError):
            evaluator = Evaluator.create(
                run,
                method="threadPool",
                method_kwargs={
                    "num_workers": 1,
                },
            )
示例#16
0
    def evaluate_search(self, search_cls, problem):
        from deephyper.evaluator import Evaluator
        from deephyper.nas.run import run_debug_arch

        # Test "max_evals" stopping criteria
        evaluator = Evaluator.create(run_debug_arch, method="serial")

        search = search_cls(problem, evaluator)

        res = search.search(max_evals=10)
        self.assertEqual(len(res), 10)
示例#17
0
    def test_regovo_with_hp():
        from deephyper.benchmark.nas import linearRegHybrid
        from deephyper.evaluator import Evaluator
        from deephyper.nas.run import run_debug_arch
        from deephyper.search.nas import RegularizedEvolution

        create_evaluator = lambda: Evaluator.create(run_debug_arch,
                                                    method="serial")

        with pytest.raises(ValueError):  # timeout should be an int
            search = RegularizedEvolution(linearRegHybrid.Problem,
                                          create_evaluator(),
                                          random_state=42)
示例#18
0
def _test_mpicomm_evaluator():
    """Test the MPICommEvaluator"""

    configs = [{"x": i} for i in range(4)]

    with Evaluator.create(
            run,
            method="mpicomm",
    ) as evaluator:
        if evaluator is not None:
            evaluator.submit(configs)

            results = evaluator.gather("ALL")
            objectives = sorted([job.result for job in results])
            assert objectives == list(range(4))
示例#19
0
    def evaluate_search(self, search_cls, problem):
        # Test "max_evals" stopping criteria
        evaluator = Evaluator.create(run_debug_arch,
                                     method="subprocess",
                                     method_kwargs={"num_workers": 1})

        search = search_cls(problem, evaluator)

        res = search.search(max_evals=10)
        self.assertEqual(len(res), 10)

        # Test "max_evals" and "timeout" stopping criterias
        evaluator = Evaluator.create(run_debug_slow,
                                     method="subprocess",
                                     method_kwargs={"num_workers": 1})

        search = search_cls(problem, evaluator)

        with pytest.raises(TypeError):  # timeout should be an int
            res = search.search(max_evals=10, timeout=1.0)
        t1 = time.time()
        res = search.search(max_evals=10, timeout=1)
        d = time.time() - t1
        self.assertAlmostEqual(d, 1, delta=0.1)
示例#20
0
    def test_agebo_without_hp(self):
        from deephyper.benchmark.nas import linearReg
        from deephyper.evaluator import Evaluator
        from deephyper.nas.run import run_debug_arch
        from deephyper.search.nas import AgEBO

        create_evaluator = lambda: Evaluator.create(run_debug_arch,
                                                    method="serial")

        # ValueError: No hyperparameter space was defined for this problem
        with pytest.raises(ValueError):
            search = AgEBO(
                linearReg.Problem,
                create_evaluator(),
                random_state=42,
            )
示例#21
0
def test_random_search():

    create_evaluator = lambda: Evaluator.create(
        run_debug_arch, method="process", method_kwargs={"num_workers": 1})

    search = Random(linearReg.Problem, create_evaluator(), random_state=42)

    res1 = search.search(max_evals=4)

    search.search(max_evals=100, timeout=1)

    search = Random(linearReg.Problem, create_evaluator(), random_state=42)
    res2 = search.search(max_evals=4)

    assert np.array_equal(res1["arch_seq"].to_numpy(),
                          res2["arch_seq"].to_numpy())
示例#22
0
 def _on_done(job): #def _on_done(job, process_data):
     output = job.read_file_in_workdir(f'{job.name}.out')
     # process_data(job)
     #args = job.args
     #args = args.replace("\'", "")
     #with open('test.json', 'w') as f:
     #    f.write(args)
     #
     #with open('test.json', 'r') as f:
     #    args = json.load(f)
     output = Evaluator._parse(output)
     #job.data['reward'] = output
     #job.data['arch_seq'] = args['arch_seq']
     #job.data['id_worker'] = args['w']
     #job.save()
     return output
示例#23
0
    def __init__(self, problem, run, evaluator, **kwargs):
        _args = vars(self.parse_args(''))
        _args.update(kwargs)
        _args['problem'] = problem
        _args['run'] = run
        self.args = Namespace(**_args)
        self.problem = util.generic_loader(problem, 'Problem')
        self.run_func = util.generic_loader(run, 'run')
        logger.info('Evaluator will execute the function: ' + run)
        self.evaluator = Evaluator.create(self.run_func, method=evaluator)
        self.num_workers = self.evaluator.num_workers

        logger.info(f'Options: ' + pformat(self.args.__dict__, indent=4))
        logger.info('Hyperparameter space definition: ' +
                    pformat(self.problem.space, indent=4))
        logger.info(f'Created {self.args.evaluator} evaluator')
        logger.info(f'Evaluator: num_workers is {self.num_workers}')
示例#24
0
    def test_random_search(self):
        import numpy as np
        from deephyper.benchmark.nas import linearReg
        from deephyper.evaluator import Evaluator
        from deephyper.nas.run import run_debug_arch
        from deephyper.search.nas import Random

        create_evaluator = lambda: Evaluator.create(run_debug_arch,
                                                    method="serial")

        search = Random(linearReg.Problem, create_evaluator(), random_state=42)

        res1 = search.search(max_evals=4)

        search = Random(linearReg.Problem, create_evaluator(), random_state=42)
        res2 = search.search(max_evals=4)

        assert np.array_equal(res1["arch_seq"].to_numpy(),
                              res2["arch_seq"].to_numpy())
示例#25
0
    def test_sample_types_conditional(self):
        import ConfigSpace as cs
        import numpy as np
        from deephyper.evaluator import Evaluator
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO

        problem = HpProblem()

        # choices
        choice = problem.add_hyperparameter(
            name="choice",
            value=["choice1", "choice2"],
        )

        # integers
        x1_int = problem.add_hyperparameter(name="x1_int", value=(1, 10))

        x2_int = problem.add_hyperparameter(name="x2_int", value=(1, 10))

        # conditions
        cond_1 = cs.EqualsCondition(x1_int, choice, "choice1")

        cond_2 = cs.EqualsCondition(x2_int, choice, "choice2")

        problem.add_condition(cond_1)
        problem.add_condition(cond_2)

        def run(config):

            if config["choice"] == "choice1":
                assert np.issubdtype(type(config["x1_int"]), np.integer)
            else:
                assert np.issubdtype(type(config["x2_int"]), np.integer)

            return 0

        create_evaluator = lambda: Evaluator.create(run, method="serial")

        CBO(problem,
            create_evaluator(),
            random_state=42,
            surrogate_model="DUMMY").search(10)
示例#26
0
    def test_quickstart(self):
        from deephyper.problem import HpProblem
        from deephyper.search.hps import CBO
        from deephyper.evaluator import Evaluator

        # define the variable you want to optimize
        problem = HpProblem()
        problem.add_hyperparameter((-10.0, 10.0), "x")

        # define the evaluator to distribute the computation
        evaluator = Evaluator.create(
            run,
            method="subprocess",
            method_kwargs={
                "num_workers": 2,
            },
        )

        # define you search and execute it
        search = CBO(problem, evaluator)

        results = search.search(max_evals=15)
示例#27
0
    def test_thread_process_subprocess(self):
        from deephyper.evaluator import Evaluator

        for method in ["thread", "process", "subprocess"]:
            evaluator = Evaluator.create(
                run,
                method=method,
                method_kwargs={
                    "num_workers": 1,
                },
            )

            configs = [{"x": i} for i in range(10)]
            evaluator.submit(configs)
            jobs = evaluator.gather("ALL")
            jobs.sort(key=lambda j: j.config["x"])
            for config, job in zip(configs, jobs):
                assert config["x"] == job.config["x"]

            evaluator.submit(configs)
            jobs = evaluator.gather("BATCH", size=1)
            assert 1 <= len(jobs) and len(jobs) <= len(configs)
示例#28
0
    def __init__(self,
                 problem,
                 evaluator,
                 random_state=None,
                 log_dir=".",
                 verbose=0,
                 **kwargs):

        # get the __init__ parameters
        self._init_params = locals()
        self._call_args = []

        self._problem = copy.deepcopy(problem)

        # if a callable is directly passed wrap it around the serial evaluator
        if not (isinstance(evaluator, Evaluator)) and callable(evaluator):
            self._evaluator = Evaluator.create(
                evaluator,
                method="serial",
                method_kwargs={"callbacks": [TqdmCallback()]},
            )
        else:
            self._evaluator = evaluator
        self._seed = None

        if type(random_state) is int:
            self._seed = random_state
            self._random_state = np.random.RandomState(random_state)
        elif isinstance(random_state, np.random.RandomState):
            self._random_state = random_state
        else:
            self._random_state = np.random.RandomState()

        # Create logging directory if does not exist
        self._log_dir = os.path.abspath(log_dir)
        pathlib.Path(log_dir).mkdir(parents=False, exist_ok=True)

        self._verbose = verbose
示例#29
0
    def test_ray(self):
        from deephyper.evaluator import Evaluator

        def run(config):
            return config["x"]

        evaluator = Evaluator.create(
            run,
            method="ray",
            method_kwargs={
                "num_cpus": 1,
            },
        )

        configs = [{"x": i} for i in range(10)]
        evaluator.submit(configs)
        jobs = evaluator.gather("ALL")
        jobs.sort(key=lambda j: j.config["x"])
        for config, job in zip(configs, jobs):
            assert config["x"] == job.config["x"]

        evaluator.submit(configs)
        jobs = evaluator.gather("BATCH", size=1)
        assert 1 <= len(jobs) and len(jobs) <= len(configs)
示例#30
0
print(problem)


# %%
# Then, we define a centralized Bayesian optimization (CBO) search (i.e., master-worker architecture) which uses the Random-Forest regressor as default surrogate model. We will compare the ``ignore`` strategy which filters-out failed configurations, the ``mean`` strategy which replaces a failure by the running mean of collected objectives and the ``min`` strategy which replaces by the running min of collected objectives.
from deephyper.search.hps import CBO
from deephyper.evaluator import Evaluator
from deephyper.evaluator.callback import TqdmCallback

results = {}
max_evals = 30
for failure_strategy in ["ignore", "mean", "min"]:
    # for failure_strategy in ["min"]:
    print(f"Executing failure strategy: {failure_strategy}")
    evaluator = Evaluator.create(
        run, method="serial", method_kwargs={"callbacks": [TqdmCallback(max_evals)]}
    )
    search = CBO(
        problem,
        evaluator,
        filter_failures=failure_strategy,
        log_dir=f"search_{failure_strategy}",
        random_state=42,
    )
    results[failure_strategy] = search.search(max_evals)

# %%
# Finally we plot the collected results
import matplotlib.pyplot as plt
import numpy as np