示例#1
0
    def test_create_with_different_configure(self, benchmark_config_py,
                                             caplog):
        """Test creation with same name but different configure"""
        with OrionState():
            config = copy.deepcopy(benchmark_config_py)
            bm1 = get_or_create_benchmark(**config)
            bm1.close()

            config = copy.deepcopy(benchmark_config_py)
            config["targets"][0]["assess"] = [AverageResult(2)]

            with caplog.at_level(logging.WARNING,
                                 logger="orion.benchmark.benchmark_client"):
                bm2 = get_or_create_benchmark(**config)
                bm2.close()

            assert bm2.configuration == bm1.configuration
            assert (
                "Benchmark with same name is found but has different configuration, "
                "which will be used for this creation." in caplog.text)

            caplog.clear()
            config = copy.deepcopy(benchmark_config_py)
            config["targets"][0]["task"] = [
                RosenBrock(26, dim=3), CarromTable(20)
            ]
            with caplog.at_level(logging.WARNING,
                                 logger="orion.benchmark.benchmark_client"):
                bm3 = get_or_create_benchmark(**config)
                bm3.close()

            assert bm3.configuration == bm1.configuration
            assert (
                "Benchmark with same name is found but has different configuration, "
                "which will be used for this creation." in caplog.text)
示例#2
0
    def test_create_with_invalid_targets(self, benchmark_config_py):
        """Test creation with invalid Task and Assessment"""
        with OrionState():

            with pytest.raises(AttributeError) as exc:
                config = copy.deepcopy(benchmark_config_py)
                config["targets"] = [{
                    "assess": [AverageResult(2)],
                    "task": [DummyTask]
                }]
                get_or_create_benchmark(**config).close()

            assert "type object '{}' has no attribute ".format(
                "DummyTask") in str(exc.value)

            with pytest.raises(AttributeError) as exc:
                config = copy.deepcopy(benchmark_config_py)
                config["targets"] = [{
                    "assess": [DummyAssess],
                    "task": [RosenBrock(25, dim=3)]
                }]
                get_or_create_benchmark(**config).close()

            assert "type object '{}' has no attribute ".format(
                "DummyAssess") in str(exc.value)
示例#3
0
def create_study_experiments(exp_config,
                             trial_config,
                             algorithms,
                             task_number,
                             max_trial,
                             n_workers=(1, )):
    gen_exps, gen_trials = generate_benchmark_experiments_trials(
        algorithms, exp_config, trial_config, task_number * len(n_workers),
        max_trial)

    from orion.client.experiment import ExperimentClient
    from orion.executor.joblib_backend import Joblib

    workers = []
    for _ in range(task_number):
        for worker in n_workers:
            for _ in range(len(algorithms)):
                workers.append(worker)
    with OrionState(experiments=gen_exps, trials=gen_trials):
        experiments = []
        experiments_info = []
        for i in range(task_number * len(n_workers) * len(algorithms)):
            experiment = experiment_builder.build(
                "experiment-name-{}".format(i))

            executor = Joblib(n_workers=workers[i], backend="threading")
            client = ExperimentClient(experiment, executor=executor)
            experiments.append(client)

        for index, exp in enumerate(experiments):
            experiments_info.append((int(index / task_number), exp))

        yield experiments_info
示例#4
0
    def test_create_race_condition(self, benchmark_config, benchmark_config_py,
                                   monkeypatch, caplog):
        """Test creation in race condition"""
        with OrionState(benchmarks=benchmark_config):

            def insert_race_condition(*args, **kwargs):
                if insert_race_condition.count == 0:
                    data = {}
                else:
                    data = benchmark_config

                insert_race_condition.count += 1

                return data

            insert_race_condition.count = 0
            monkeypatch.setattr(benchmark_client, "_fetch_benchmark",
                                insert_race_condition)

            with caplog.at_level(logging.INFO,
                                 logger="orion.benchmark.benchmark_client"):
                bm = benchmark_client.get_or_create_benchmark(
                    **benchmark_config_py)
                bm.close()

            assert (
                "Benchmark registration failed. This is likely due to a race condition. "
                "Now rolling back and re-attempting building it."
                in caplog.text)
            assert insert_race_condition.count == 2

            del benchmark_config["_id"]

            assert bm.configuration == benchmark_config
            assert count_benchmarks() == 1
示例#5
0
    def test_create_with_deterministic_algorithm(self, benchmark_config_py):
        algorithms = [
            {
                "algorithm": {
                    "random": {
                        "seed": 1
                    }
                }
            },
            {
                "algorithm": {
                    "gridsearch": {
                        "n_values": 50
                    }
                },
                "deterministic": True
            },
        ]
        with OrionState():
            config = copy.deepcopy(benchmark_config_py)
            config["algorithms"] = algorithms
            bm = get_or_create_benchmark(**config)
            bm.close()

            for study in bm.studies:
                for status in study.status():
                    algo = status["algorithm"]
                    if algo == "gridsearch":
                        assert status["experiments"] == 1
                    else:
                        assert status[
                            "experiments"] == study.assessment.task_num
示例#6
0
def test_with_evc(algorithm):
    """Test a scenario where algos are warm-started with EVC."""

    with OrionState(storage={
            "type": "legacy",
            "database": {
                "type": "EphemeralDB"
            }
    }):
        base_exp = create_experiment(
            name="exp",
            space=space_with_fidelity,
            algorithms=algorithm_configs["random"],
        )
        base_exp.workon(rosenbrock, max_trials=10)

        exp = create_experiment(
            name="exp",
            space=space_with_fidelity,
            algorithms=algorithm,
            branching={"branch_from": "exp"},
        )

        assert exp.version == 2

        exp.workon(rosenbrock, max_trials=30)

        assert exp.configuration["algorithms"] == algorithm

        trials = exp.fetch_trials(with_evc_tree=False)
        assert len(trials) >= 30

        trials_with_evc = exp.fetch_trials(with_evc_tree=True)
        assert len(trials_with_evc) >= 40
        assert len(trials_with_evc) - len(trials) == 10

        completed_trials = [
            trial for trial in trials_with_evc if trial.status == "completed"
        ]
        assert len(completed_trials) == 40

        results = [trial.objective.value for trial in completed_trials]
        best_trial = next(
            iter(
                sorted(completed_trials,
                       key=lambda trial: trial.objective.value)))

        assert best_trial.objective.name == "objective"
        assert abs(best_trial.objective.value - 23.4) < 1e-5
        assert len(best_trial.params) == 2
        fidelity = best_trial._params[0]
        assert fidelity.name == "noise"
        assert fidelity.type == "fidelity"
        assert fidelity.value == 10
        param = best_trial._params[1]
        assert param.name == "x"
        assert param.type == "real"
示例#7
0
    def test_create_with_only_name(self):
        """Test creation with a non-existing benchmark name"""
        with OrionState():
            name = "bm00001"
            with pytest.raises(NoConfigurationError) as exc:
                get_or_create_benchmark(name).close()

            assert "Benchmark {} does not exist in DB".format(name) in str(
                exc.value)
示例#8
0
    def test_experiments_parallel(self, benchmark_config_py, monkeypatch):
        import multiprocessing

        class FakeFuture:
            def __init__(self, value):
                self.value = value

            def wait(self, timeout=None):
                return

            def ready(self):
                return True

            def get(self, timeout=None):
                return self.value

            def successful(self):
                return True

        count = multiprocessing.Value("i", 0)
        is_done_value = multiprocessing.Value("i", 0)

        def is_done(self):
            return count.value > 0

        def submit(*args, c=count, **kwargs):
            # because worker == 2 only 2 jobs were submitted
            # we now set is_done to True so when runner checks
            # for adding more jobs it will stop right away
            c.value += 1
            return FakeFuture([dict(name="v", type="objective", value=1)])

        with OrionState():
            config = copy.deepcopy(benchmark_config_py)

            with Joblib(n_workers=5, backend="threading") as executor:
                monkeypatch.setattr(ExperimentClient, "is_done",
                                    property(is_done))
                monkeypatch.setattr(executor, "submit", submit)

                config["executor"] = executor
                bm1 = get_or_create_benchmark(**config)
                client = bm1.studies[0].experiments_info[0][1]

                count.value = 0
                bm1.process(n_workers=2)
                assert count.value == 2
                assert executor.n_workers == 5
                assert orion.core.config.worker.n_workers != 2

                is_done.done = False
                count.value = 0
                bm1.process(n_workers=3)
                assert count.value == 3
                assert executor.n_workers == 5
                assert orion.core.config.worker.n_workers != 3
示例#9
0
    def test_create_benchmark_with_storage(self, benchmark_config_py):
        """Test benchmark instance has the storage configurations"""

        config = copy.deepcopy(benchmark_config_py)
        storage = {"type": "legacy", "database": {"type": "EphemeralDB"}}
        with OrionState(storage=storage):
            config["storage"] = storage
            bm = get_or_create_benchmark(**config)
            bm.close()

            assert bm.storage_config == config["storage"]
示例#10
0
    def test_create_benchmark(self, benchmark_config, benchmark_config_py):
        """Test creation with valid configuration"""
        with OrionState():
            bm1 = get_or_create_benchmark(**benchmark_config_py)
            bm1.close()

            bm2 = get_or_create_benchmark("bm00001")
            bm2.close()

            assert bm1.configuration == benchmark_config

            assert bm1.configuration == bm2.configuration
示例#11
0
    def test_create_with_not_exist_targets_parameters(self, benchmark_config):
        """Test creation with not existing assessment parameters"""

        benchmark_config["targets"][0]["assess"]["AverageResult"] = {
            "task_num": 2,
            "idontexist": 100,
        }

        with OrionState(benchmarks=benchmark_config):
            with pytest.raises(TypeError) as exc:
                get_or_create_benchmark(benchmark_config["name"])
            assert "__init__() got an unexpected keyword argument 'idontexist'" in str(
                exc.value)
示例#12
0
 def setup_global_config(self, tmp_path):
     """Setup temporary yaml file for the global configuration"""
     with OrionState(storage=self.default_storage):
         conf_file = tmp_path / "config.yaml"
         conf_file.write_text(yaml.dump(self.config))
         conf_files = orion.core.DEF_CONFIG_FILES_PATHS
         orion.core.DEF_CONFIG_FILES_PATHS = [conf_file]
         orion.core.config = orion.core.build_config()
         try:
             yield conf_file
         finally:
             orion.core.DEF_CONFIG_FILES_PATHS = conf_files
             orion.core.config = orion.core.build_config()
示例#13
0
    def test_create_with_not_loaded_targets(self, benchmark_config):
        """Test creation with assessment or task does not exist or not loaded"""

        cfg_invalid_assess = copy.deepcopy(benchmark_config)
        cfg_invalid_assess["targets"][0]["assess"]["idontexist"] = {
            "task_num": 2
        }

        with OrionState(benchmarks=cfg_invalid_assess):
            with pytest.raises(NotImplementedError) as exc:
                get_or_create_benchmark(benchmark_config["name"]).close()
            assert "Could not find implementation of BenchmarkAssessment" in str(
                exc.value)

        cfg_invalid_task = copy.deepcopy(benchmark_config)
        cfg_invalid_task["targets"][0]["task"]["idontexist"] = {
            "max_trials": 2
        }

        with OrionState(benchmarks=cfg_invalid_task):
            with pytest.raises(NotImplementedError) as exc:
                get_or_create_benchmark(benchmark_config["name"])
            assert "Could not find implementation of BenchmarkTask" in str(
                exc.value)
示例#14
0
    def test_create_with_executor(self, benchmark_config, benchmark_config_py):

        with OrionState():
            config = copy.deepcopy(benchmark_config_py)
            bm1 = get_or_create_benchmark(**config)
            bm1.close()

            assert bm1.configuration == benchmark_config
            assert bm1.executor.n_workers == orion.core.config.worker.n_workers
            with Joblib(n_workers=2, backend="threading") as executor:
                config["executor"] = executor
                bm2 = get_or_create_benchmark(**config)

                assert bm2.configuration == benchmark_config
                assert bm2.executor.n_workers == executor.n_workers
                assert orion.core.config.worker.n_workers != 2
示例#15
0
def create_study_experiments(exp_config, trial_config, algorithms, task_number,
                             max_trial):
    gen_exps, gen_trials = generate_benchmark_experiments_trials(
        algorithms, exp_config, trial_config, task_number, max_trial)
    with OrionState(experiments=gen_exps, trials=gen_trials):
        experiments = []
        experiments_info = []
        for i in range(task_number * len(algorithms)):
            experiment = experiment_builder.build(
                "experiment-name-{}".format(i))
            experiments.append(experiment)

        for index, exp in enumerate(experiments):
            experiments_info.append((int(index / task_number), exp))

        yield experiments_info
示例#16
0
    def test_create_with_invalid_algorithms(self, benchmark_config_py):
        """Test creation with a not existed algorithm"""
        with OrionState():

            with pytest.raises(NotImplementedError) as exc:
                benchmark_config_py["algorithms"] = [{
                    "algorithm": {
                        "fake_algorithm": {
                            "seed": 1
                        }
                    }
                }]
                # Pass executor to close it properly
                with Joblib(n_workers=2, backend="threading") as executor:
                    get_or_create_benchmark(**benchmark_config_py,
                                            executor=executor)
            assert "Could not find implementation of BaseAlgorithm" in str(
                exc.value)
示例#17
0
def test_parallel_workers(algorithm):
    """Test parallel execution with joblib"""
    MAX_TRIALS = 30
    ASHA_UGLY_FIX = 10
    with OrionState() as cfg:  # Using PickledDB

        name = f"{list(algorithm.keys())[0]}_exp"

        exp = create_experiment(
            name=name,
            space=space_with_fidelity,
            algorithms=algorithm,
        )

        exp.workon(rosenbrock, max_trials=MAX_TRIALS, n_workers=2)

        assert exp.configuration["algorithms"] == algorithm

        trials = exp.fetch_trials()
        assert len(trials) >= MAX_TRIALS

        completed_trials = [
            trial for trial in trials if trial.status == "completed"
        ]
        assert MAX_TRIALS <= len(completed_trials) <= MAX_TRIALS + 2

        results = [trial.objective.value for trial in completed_trials]
        assert all(trial.objective is not None for trial in completed_trials)
        best_trial = min(completed_trials,
                         key=lambda trial: trial.objective.value)

        assert best_trial.objective.name == "objective"
        assert abs(best_trial.objective.value - 23.4) < 1e-5 + ASHA_UGLY_FIX
        assert len(best_trial.params) == 2
        fidelity = best_trial._params[0]
        assert fidelity.name == "noise"
        assert fidelity.type == "fidelity"
        assert fidelity.value + ASHA_UGLY_FIX >= 1
        param = best_trial._params[1]
        assert param.name == "x"
        assert param.type == "real"
示例#18
0
    def test_create_benchmark_no_storage(self, benchmark_config_py):
        """Test creation if storage is not configured"""
        name = "oopsie_forgot_a_storage"
        host = orion.core.config.storage.database.host

        with OrionState(storage=orion.core.config.storage.to_dict()) as cfg:
            # Reset the Storage and drop instances so that get_storage() would fail.
            cfg.cleanup()
            cfg.singletons = update_singletons()

            # Make sure storage must be instantiated during `get_or_create_benchmark()`
            with pytest.raises(SingletonNotInstantiatedError):
                get_storage()

            get_or_create_benchmark(**benchmark_config_py).close()

            storage = get_storage()

            assert isinstance(storage, Legacy)
            assert isinstance(storage._db, PickledDB)
            assert storage._db.host == host
示例#19
0
文件: __init__.py 项目: lebrice/orion
def create_experiment(exp_config=None, trial_config=None, statuses=None):
    """Context manager for the creation of an ExperimentClient and storage init"""
    if exp_config is None:
        raise ValueError("Parameter 'exp_config' is missing")
    if trial_config is None:
        raise ValueError("Parameter 'trial_config' is missing")
    if statuses is None:
        statuses = ["new", "interrupted", "suspended", "reserved", "completed"]

    from orion.client.experiment import ExperimentClient

    with OrionState(
        experiments=[exp_config],
        trials=generate_trials(trial_config, statuses, exp_config),
    ) as cfg:
        experiment = experiment_builder.build(name=exp_config["name"])
        if cfg.trials:
            experiment._id = cfg.trials[0]["experiment"]
        client = ExperimentClient(experiment, Producer(experiment))
        yield cfg, experiment, client

    client.close()
示例#20
0
 def test_create_from_db_config(self, benchmark_config):
     """Test creation from existing db configubenchmark_configre"""
     with OrionState(benchmarks=copy.deepcopy(benchmark_config)):
         bm = get_or_create_benchmark(benchmark_config["name"])
         bm.close()
         assert bm.configuration == benchmark_config
示例#21
0
def test_with_evc(algorithm):
    """Test a scenario where algos are warm-started with EVC."""

    with OrionState(storage={
            "type": "legacy",
            "database": {
                "type": "PickledDB"
            }
    }):
        base_exp = create_experiment(
            name="exp",
            space=space_with_fidelity,
            algorithms=algorithm_configs["random"],
            max_trials=10,
        )
        base_exp.workon(rosenbrock, max_trials=10)

        exp = create_experiment(
            name="exp",
            space=space_with_fidelity,
            algorithms=algorithm,
            max_trials=30,
            branching={
                "branch_from": "exp",
                "enable": True
            },
        )

        assert exp.version == 2

        exp.workon(rosenbrock, max_trials=30)

        assert exp.configuration["algorithms"] == algorithm

        trials = exp.fetch_trials(with_evc_tree=False)

        # Some algo may not be able to suggest exactly 30 trials (ex: hyperband)
        assert len(trials) >= 20

        trials_with_evc = exp.fetch_trials(with_evc_tree=True)
        assert len(trials_with_evc) >= 30
        assert len(trials_with_evc) - len(trials) == 10

        completed_trials = [
            trial for trial in trials_with_evc if trial.status == "completed"
        ]
        assert len(completed_trials) == 30

        results = [trial.objective.value for trial in completed_trials]
        assert all(trial.objective is not None for trial in completed_trials)
        best_trial = min(completed_trials,
                         key=lambda trial: trial.objective.value)

        assert best_trial.objective.name == "objective"
        assert abs(best_trial.objective.value - 23.4) < 1e-5
        assert len(best_trial.params) == 2
        fidelity = best_trial._params[0]
        assert fidelity.name == "noise"
        assert fidelity.type == "fidelity"
        assert fidelity.value == 10
        param = best_trial._params[1]
        assert param.name == "x"
        assert param.type == "real"