def test_fetch_config_from_db_hit(new_config):
    """Verify db config when experiment is in db"""
    with OrionState(experiments=[new_config], trials=[]):
        db_config = experiment_builder.fetch_config_from_db(name="supernaekei")

    assert db_config["name"] == new_config["name"]
    assert db_config["refers"] == new_config["refers"]
    assert db_config["metadata"] == new_config["metadata"]
    assert db_config["pool_size"] == new_config["pool_size"]
    assert db_config["max_trials"] == new_config["max_trials"]
    assert db_config["max_broken"] == new_config["max_broken"]
    assert db_config["algorithms"] == new_config["algorithms"]
    assert db_config["metadata"] == new_config["metadata"]
Beispiel #2
0
    def test_reserve_trial_fail(self, storage):
        """Test reserve trial"""
        with OrionState(
                experiments=[base_experiment],
                trials=generate_trials(status=["completed", "reserved"]),
                storage=storage,
        ) as cfg:

            storage = cfg.storage()
            experiment = cfg.get_experiment("default_name", version=None)

            trial = storage.reserve_trial(experiment)
            assert trial is None
Beispiel #3
0
    def test_fix_lost_trials(self):
        """Test that a running trial with an old heartbeat is set to interrupted."""
        trial = copy.deepcopy(base_trial)
        trial["status"] = "reserved"
        trial["heartbeat"] = datetime.datetime.utcnow() - datetime.timedelta(
            seconds=60 * 10)
        with OrionState(trials=[trial]) as cfg:
            exp = Experiment("supernaekei", mode="x")
            exp._id = cfg.trials[0]["experiment"]

            assert len(exp.fetch_trials_by_status("reserved")) == 1
            exp.fix_lost_trials()
            assert len(exp.fetch_trials_by_status("reserved")) == 0
Beispiel #4
0
        def check_status_change(new_status):
            with OrionState(
                experiments=[base_experiment], trials=generate_trials(), storage=storage
            ) as cfg:
                trial = get_storage().get_trial(cfg.get_trial(0))
                assert trial is not None, "Was not able to retrieve trial for test"
                assert trial.status != new_status

                if trial.status == new_status:
                    return

                with pytest.raises(FailedUpdate):
                    trial.status = new_status
                    get_storage().set_trial_status(trial, status=new_status)
Beispiel #5
0
    def test_create_experiment_hit_no_config(self):
        """Test creating an existing experiment by specifying the name only."""
        with OrionState(experiments=[config]):
            experiment = create_experiment(config["name"])

            assert experiment.name == config["name"]
            assert experiment.version == 1
            assert experiment.space.configuration == config["space"]
            assert experiment.algorithms.configuration == config["algorithms"]
            assert experiment.max_trials == config["max_trials"]
            assert experiment.max_broken == config["max_broken"]
            assert experiment.working_dir == config["working_dir"]
            assert (experiment.producer["strategy"].configuration ==
                    config["producer"]["strategy"])
Beispiel #6
0
def test_fetch_non_completed_trials():
    """Fetch a list of the trials that are not completed

    trials.status in ['new', 'interrupted', 'suspended', 'broken']
    """
    non_completed_stati = ["new", "interrupted", "suspended", "reserved"]
    stati = non_completed_stati + ["completed"]
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment("supernaekei", mode="x")
        exp._id = cfg.trials[0]["experiment"]

        trials = exp.fetch_noncompleted_trials()
        assert len(trials) == 4
        assert set(trial.status for trial in trials) == set(non_completed_stati)
Beispiel #7
0
    def test_create_with_not_exist_targets_parameters(self, benchmark_config):
        """Test creation with not existing assessment parameters"""

        benchmark_config["targets"][0]["assess"]["AverageResult"] = {
            "task_num": 2,
            "idontexist": 100,
        }

        with OrionState(benchmarks=benchmark_config):
            with pytest.raises(TypeError) as exc:
                get_or_create_benchmark(benchmark_config["name"])
            assert "__init__() got an unexpected keyword argument 'idontexist'" in str(
                exc.value
            )
Beispiel #8
0
    def test_create_experiment_hit_no_branch(self, user_config):
        """Test creating an existing experiment by specifying all identical attributes."""
        with OrionState(experiments=[config]):
            experiment = create_experiment(**user_config)

            exp_config = experiment.configuration

            assert experiment.name == config["name"]
            assert experiment.version == 1
            assert exp_config["space"] == config["space"]
            assert exp_config["max_trials"] == config["max_trials"]
            assert exp_config["max_broken"] == config["max_broken"]
            assert exp_config["working_dir"] == config["working_dir"]
            assert exp_config["algorithms"] == config["algorithms"]
Beispiel #9
0
    def test_setup_studies(self, benchmark):
        """Test to setup studies for benchmark"""
        with OrionState():
            benchmark.setup_studies()

            assert (
                str(benchmark.studies)
                == "[Study(assessment=AverageResult, task=RosenBrock, algorithms=[random,tpe]), "
                "Study(assessment=AverageResult, task=CarromTable, algorithms=[random,tpe]), "
                "Study(assessment=AverageRank, task=RosenBrock, algorithms=[random,tpe]), "
                "Study(assessment=AverageRank, task=CarromTable, algorithms=[random,tpe])]"
            )

        assert len(benchmark.studies) == 4
def test_get_from_args_hit(config_file, random_dt, new_config):
    """Try building experiment view when in db"""
    cmdargs = {"name": "supernaekei", "config": config_file}

    with OrionState(experiments=[new_config], trials=[]):
        exp_view = experiment_builder.get_from_args(cmdargs)

    assert exp_view._id == new_config["_id"]
    assert exp_view.name == new_config["name"]
    assert exp_view.configuration["refers"] == new_config["refers"]
    assert exp_view.metadata == new_config["metadata"]
    assert exp_view.max_trials == new_config["max_trials"]
    assert exp_view.max_broken == new_config["max_broken"]
    assert exp_view.algorithms.configuration == new_config["algorithms"]
Beispiel #11
0
    def test_delete_algorithm_lock(self, storage):
        if storage and storage["type"] == "track":
            pytest.xfail("Track does not support algorithm lock yet.")

        with OrionState(experiments=generate_experiments(),
                        storage=storage) as cfg:
            storage = cfg.storage()

            experiments = storage.fetch_experiments({})

            assert storage.delete_algorithm_lock(
                uid=experiments[0]["_id"]) == 1
            assert storage.get_algorithm_lock_info(
                uid=experiments[0]["_id"]) is None
    def test_working_dir_works_when_db_absent(self, database, new_config):
        """Check if working_dir is correctly when absent from the database."""
        with OrionState(experiments=[], trials=[]):
            exp = experiment_builder.build(**new_config)
            storage = get_storage()
            found_config = list(
                storage.fetch_experiments(
                    {"name": "supernaekei", "metadata.user": "******"}
                )
            )

            found_config = found_config[0]
            exp = experiment_builder.build(**found_config)
            assert exp.working_dir == ""
Beispiel #13
0
    def test_count_completed_trials(self, storage):
        """Test count completed trials"""
        with OrionState(
            experiments=[base_experiment], trials=generate_trials(), storage=storage
        ) as cfg:
            count = 0
            for trial in cfg.trials:
                if trial["status"] == "completed":
                    count += 1

            storage = cfg.storage()

            experiment = cfg.get_experiment("default_name", version=None)
            trials = storage.count_completed_trials(experiment)
            assert trials == count
Beispiel #14
0
def test_experiment_stats():
    """Check that property stats is returning a proper summary of experiment's results."""
    NUM_COMPLETED = 3
    stati = (["completed"] * NUM_COMPLETED) + (["reserved"] * 2)
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment("supernaekei", mode="x")
        exp._id = cfg.trials[0]["experiment"]
        exp.metadata = {"datetime": datetime.datetime.utcnow()}
        stats = exp.stats
        assert stats.trials_completed == NUM_COMPLETED
        assert stats.best_trials_id == cfg.trials[3]["_id"]
        assert stats.best_evaluation == 0
        assert stats.start_time == exp.metadata["datetime"]
        assert stats.finish_time == cfg.trials[0]["end_time"]
        assert stats.duration == stats.finish_time - stats.start_time
Beispiel #15
0
        def check_status_change(new_status):
            with OrionState(experiments=[base_experiment],
                            trials=generate_trials(),
                            storage=storage) as cfg:
                trial = get_storage().get_trial(cfg.get_trial(0))
                assert trial is not None, "was not able to retrieve trial for test"

                get_storage().set_trial_status(trial, status=new_status)
                assert (trial.status == new_status
                        ), "Trial status should have been updated locally"

                trial = get_storage().get_trial(trial)
                assert (
                    trial.status == new_status
                ), "Trial status should have been updated in the storage"
Beispiel #16
0
    def test_strategy_defined_in_global_config(self, caplog, space,
                                               monkeypatch):
        """Verify there is a warning"""

        with monkeypatch.context() as m:
            m.setattr(
                orion.core.config.experiment,
                "strategy",
                {"this is deprecated": "and should be ignored"},
            )
            with OrionState():
                with caplog.at_level(logging.WARNING):
                    exp = experiment_builder.build(name="whatever",
                                                   space=space)
                assert "`strategy` option is not supported anymore." in caplog.text
Beispiel #17
0
def test_load_unavailable_algo(algo_unavailable_config, capsys):
    with OrionState(experiments=[algo_unavailable_config]):
        experiment = experiment_builder.load("supernaekei", mode="r")
        assert experiment.algorithms == algo_unavailable_config["algorithms"]
        assert (experiment.configuration["algorithms"] ==
                algo_unavailable_config["algorithms"])

        experiment = experiment_builder.load("supernaekei", mode="w")
        assert experiment.algorithms == algo_unavailable_config["algorithms"]
        assert (experiment.configuration["algorithms"] ==
                algo_unavailable_config["algorithms"])

        with pytest.raises(NotImplementedError) as exc:
            experiment_builder.build("supernaekei")
        exc.match("Could not find implementation of BaseAlgorithm")
    def test_working_dir_is_correctly_set(self, new_config):
        """Check if working_dir is correctly changed."""
        with OrionState():
            new_config["working_dir"] = "./"
            exp = experiment_builder.build(**new_config)
            storage = get_storage()
            found_config = list(
                storage.fetch_experiments(
                    {"name": "supernaekei", "metadata.user": "******"}
                )
            )

            found_config = found_config[0]
            exp = experiment_builder.build(**found_config)
            assert exp.working_dir == "./"
def test_load_unavailable_strategy(strategy_unavailable_config, capsys):
    with OrionState(experiments=[strategy_unavailable_config]):
        experiment = experiment_builder.load("supernaekei", mode="r")
        assert experiment.producer == strategy_unavailable_config["producer"]
        assert (experiment.configuration["producer"] ==
                strategy_unavailable_config["producer"])

        experiment = experiment_builder.load("supernaekei", mode="w")
        assert experiment.producer == strategy_unavailable_config["producer"]
        assert (experiment.configuration["producer"] ==
                strategy_unavailable_config["producer"])

        with pytest.raises(NotImplementedError) as exc:
            experiment_builder.build("supernaekei")
        exc.match("Could not find implementation of BaseParallelStrategy")
Beispiel #20
0
    def test_create_with_invalid_algorithms(self, benchmark_config_py):
        """Test creation with a not existed algorithm"""
        with OrionState():

            with pytest.raises(NotImplementedError) as exc:
                benchmark_config_py["algorithms"] = [{
                    "algorithm": {
                        "fake_algorithm": {
                            "seed": 1
                        }
                    }
                }]
                get_or_create_benchmark(**benchmark_config_py)
            assert "Could not find implementation of BaseAlgorithm" in str(
                exc.value)
Beispiel #21
0
    def test_create_with_not_loaded_targets(self, benchmark_config):
        """Test creation with assessment or task does not exist or not loaded"""

        cfg_invalid_assess = copy.deepcopy(benchmark_config)
        cfg_invalid_assess["targets"][0]["assess"]["idontexist"] = {
            "task_num": 2
        }

        with OrionState(benchmarks=cfg_invalid_assess):
            with pytest.raises(NotImplementedError) as exc:
                get_or_create_benchmark(benchmark_config["name"])
            assert "Could not find implementation of BaseAssess" in str(
                exc.value)

        cfg_invalid_task = copy.deepcopy(benchmark_config)
        cfg_invalid_task["targets"][0]["task"]["idontexist"] = {
            "max_trials": 2
        }

        with OrionState(benchmarks=cfg_invalid_task):
            with pytest.raises(NotImplementedError) as exc:
                get_or_create_benchmark(benchmark_config["name"])
            assert "Could not find implementation of BaseTask" in str(
                exc.value)
Beispiel #22
0
    def test_create_experiment_hit_branch(self):
        """Test creating a differing experiment that cause branching."""
        with OrionState(experiments=[config]):
            experiment = create_experiment(config["name"],
                                           space={"y": "uniform(0, 10)"})

            assert experiment.name == config["name"]
            assert experiment.version == 2

            assert experiment.algorithms.configuration == config["algorithms"]
            assert experiment.max_trials == config["max_trials"]
            assert experiment.max_broken == config["max_broken"]
            assert experiment.working_dir == config["working_dir"]
            assert (experiment.producer["strategy"].configuration ==
                    config["producer"]["strategy"])
Beispiel #23
0
    def test_acquire_algorithm_lock_successful(self, storage):
        if storage and storage["type"] == "track":
            pytest.xfail("Track does not support algorithm lock yet.")

        with OrionState(experiments=[base_experiment], storage=storage) as cfg:
            storage = cfg.storage()
            experiment = cfg.get_experiment("default_name", version=None)

            with storage.acquire_algorithm_lock(
                    experiment, timeout=0.1) as locked_algo_state:
                assert locked_algo_state.state is None
                locked_algo_state.set_state("my new state")

            with storage.acquire_algorithm_lock(
                    experiment) as locked_algo_state:
                assert locked_algo_state.state == "my new state"
Beispiel #24
0
    def test_acquire_algorithm_lock_timeout(self, storage, mocker):
        with OrionState(experiments=[base_experiment], storage=storage) as cfg:
            storage = cfg.storage()
            experiment = cfg.get_experiment("default_name", version=None)

            with storage.acquire_algorithm_lock(
                    experiment) as locked_algo_state:

                retry_interval = 0.2
                sleep_mock = mocker.spy(time, "sleep")
                with pytest.raises(LockAcquisitionTimeout):
                    with storage.acquire_algorithm_lock(
                            experiment,
                            timeout=0.05,
                            retry_interval=retry_interval):
                        pass
Beispiel #25
0
    def test_push_trial_results(self, storage=None):
        """Successfully push a completed trial into database."""
        reserved_trial = copy.deepcopy(base_trial)
        reserved_trial["status"] = "reserved"
        with OrionState(experiments=[],
                        trials=[reserved_trial],
                        storage=storage) as cfg:
            storage = cfg.storage()
            trial = storage.get_trial(Trial(**reserved_trial))
            results = [Trial.Result(name="loss", type="objective", value=2)]
            trial.results = results
            assert storage.push_trial_results(
                trial), "should update successfully"

            trial2 = storage.get_trial(trial)
            assert trial2.results == results
Beispiel #26
0
    def test_acquire_algorithm_lock_with_different_config(self, new_config, algorithm):
        with OrionState(experiments=[new_config]) as cfg:
            exp = Experiment("supernaekei", mode="x")
            exp._id = 0
            algorithm_original_config = algorithm.configuration
            exp.algorithms = algorithm
            # Setting attribute to algorithm inside the wrapper
            algorithm.algorithm.seed = 10

            assert algorithm.configuration != algorithm_original_config

            with pytest.raises(
                RuntimeError, match="Algorithm configuration changed since"
            ):
                with exp.acquire_algorithm_lock(timeout=0.2, retry_interval=0.1):
                    pass
Beispiel #27
0
    def test_acquire_algorithm_lock_timeout(self, new_config, algorithm, mocker):
        with OrionState(experiments=[new_config]) as cfg:
            exp = Experiment("supernaekei", mode="x")
            exp._id = 0
            exp.algorithms = algorithm

            storage_acquisition_mock = mocker.spy(
                cfg.storage(), "acquire_algorithm_lock"
            )

            with exp.acquire_algorithm_lock(timeout=0.2, retry_interval=0.1):
                pass

            storage_acquisition_mock.assert_called_with(
                experiment=exp, timeout=0.2, retry_interval=0.1
            )
Beispiel #28
0
    def test_create_experiment(self, storage):
        """Test create experiment"""
        with OrionState(experiments=[], storage=storage) as cfg:
            storage = cfg.storage()

            storage.create_experiment(base_experiment)

            experiments = storage.fetch_experiments({})
            assert len(experiments) == 1, "Only one experiment in the database"

            experiment = experiments[0]
            assert base_experiment == experiment, "Local experiment and DB should match"

            # Insert it again
            with pytest.raises(DuplicateKeyError):
                storage.create_experiment(base_experiment)
Beispiel #29
0
def test_configurable_broken_property():
    """Check if max_broken changes after configuration."""
    MAX_BROKEN = 5

    stati = (["reserved"] * 10) + (["broken"] * (MAX_BROKEN))
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment("supernaekei", mode="x")
        exp._id = cfg.trials[0]["experiment"]

        exp.max_broken = MAX_BROKEN

        assert exp.is_broken

        exp.max_broken += 1

        assert not exp.is_broken
Beispiel #30
0
 def test_empty(self, space):
     """Test panda frame creation when there is no trials"""
     with OrionState():
         exp = Experiment("supernaekei", mode="x")
         exp.space = space
         assert exp.to_pandas().shape == (0, 8)
         assert list(exp.to_pandas().columns) == [
             "id",
             "experiment_id",
             "status",
             "suggested",
             "reserved",
             "completed",
             "objective",
             "/index",
         ]