예제 #1
0
    def test_fetch_trials(self, storage):
        """Test fetch experiment trials"""
        with OrionState(experiments=[base_experiment],
                        trials=generate_trials(),
                        database=storage) as cfg:
            storage = cfg.storage()
            experiment = cfg.get_experiment('default_name',
                                            'default_user',
                                            version=None)

            trials1 = storage.fetch_trials(experiment=experiment)
            trials2 = storage.fetch_trials(uid=experiment._id)

            with pytest.raises(MissingArguments):
                storage.fetch_trials()

            with pytest.raises(AssertionError):
                storage.fetch_trials(experiment=experiment, uid='123')

            assert len(trials1) == len(cfg.trials), 'trial count should match'
            assert len(trials2) == len(cfg.trials), 'trial count should match'
예제 #2
0
    def test_good_set_before_init_no_hit(self, random_dt, new_config):
        """Trying to set, overwrite everything from input."""
        with OrionState(experiments=[], trials=[]):
            exp = experiment_builder.build(**new_config)
            found_config = list(get_storage().fetch_experiments({
                'name':
                'supernaekei',
                'metadata.user':
                '******'
            }))

        new_config['metadata']['datetime'] = exp.metadata['datetime']

        assert len(found_config) == 1
        _id = found_config[0].pop('_id')
        assert _id != 'fasdfasfa'
        assert exp._id == _id
        new_config['refers'] = {}
        new_config.pop('_id')
        new_config.pop('something_to_be_ignored')
        new_config['algorithms']['dumbalgo']['done'] = False
        new_config['algorithms']['dumbalgo']['judgement'] = None
        new_config['algorithms']['dumbalgo']['scoring'] = 0
        new_config['algorithms']['dumbalgo']['suspend'] = False
        new_config['algorithms']['dumbalgo']['value'] = 5
        new_config['algorithms']['dumbalgo']['seed'] = None
        new_config['refers'] = {
            'adapter': [],
            'parent_id': None,
            'root_id': _id
        }
        assert found_config[0] == new_config
        assert exp.name == new_config['name']
        assert exp.configuration['refers'] == new_config['refers']
        assert exp.metadata == new_config['metadata']
        assert exp.pool_size == new_config['pool_size']
        assert exp.max_trials == new_config['max_trials']
        assert exp.working_dir == new_config['working_dir']
        assert exp.version == new_config['version']
        assert exp.algorithms.configuration == new_config['algorithms']
예제 #3
0
def test_build_from_args_hit(old_config_file, script_path, new_config):
    """Try building experiment when in db (no branch)"""
    cmdargs = {
        'name': 'supernaekei',
        'config': old_config_file,
        'user_args':
        [script_path, '--mini-batch~uniform(32, 256, discrete=True)']
    }

    with OrionState(experiments=[new_config], trials=[]):
        # Test that experiment already exists
        experiment_builder.build_view_from_args(cmdargs)

        exp = experiment_builder.build_from_args(cmdargs)

    assert exp._id == new_config['_id']
    assert exp.name == new_config['name']
    assert exp.version == 1
    assert exp.configuration['refers'] == new_config['refers']
    assert exp.metadata == new_config['metadata']
    assert exp.max_trials == new_config['max_trials']
    assert exp.algorithms.configuration == new_config['algorithms']
예제 #4
0
def test_optimizer_actually_optimize(monkeypatch):
    """Check if Bayesian Optimizer has better optimization than random search."""
    monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
    best_random_search = 23.403275057472825

    with OrionState(experiments=[], trials=[]):

        orion.core.cli.main([
            "hunt",
            "--config",
            "./benchmark/meshadaptivedirectsearch.yaml",
            "./benchmark/rosenbrock.py",
            "--max-trials",
            100,
            "-x~uniform(-50, 50)",
        ])

        exp = create_experiment(name="exp")

        objective = exp.stats["best_evaluation"]

        assert best_random_search > objective
예제 #5
0
    def retrieve_result(self, storage, generated_result):
        """Test retrieve result"""
        results_file = tempfile.NamedTemporaryFile(mode='w',
                                                   prefix='results_',
                                                   suffix='.log',
                                                   dir='.',
                                                   delete=True)

        # Generate fake result
        with open(results_file.name, 'w') as file:
            json.dump([generated_result], file)
        # --
        with OrionState(experiments=[], trials=[], database=storage) as cfg:
            storage = cfg.storage()

            trial = Trial(**base_trial)
            trial = storage.retrieve_result(trial, results_file)

            results = trial.results

            assert len(results) == 1
            assert results[0].to_dict() == generated_result
예제 #6
0
def test_is_done_property_with_pending(algorithm):
    """Check experiment stopping conditions when there is pending trials."""
    completed = ['completed'] * 10
    reserved = ['reserved'] * 5
    with OrionState(trials=generate_trials(completed + reserved)) as cfg:
        exp = Experiment('supernaekei')
        exp._id = cfg.trials[0]['experiment']

        exp.algorithms = algorithm
        exp.max_trials = 10

        assert exp.is_done

        exp.max_trials = 15

        # There is only 10 completed trials
        assert not exp.is_done

        exp.algorithms.algorithm.done = True

        # Algorithm is done but 5 trials are pending
        assert not exp.is_done
def test_optimizer_actually_optimize(monkeypatch):
    """Check if Bayesian Optimizer has better optimization than random search."""
    monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
    best_random_search = 23.403275057472825

    with OrionState(experiments=[], trials=[]):

        orion.core.cli.main([
            "hunt", "--name", "exp", "--max-trials", "20", "--config",
            "./benchmark/hyperband.yaml", "./benchmark/rosenbrock.py",
            "-x~uniform(-50, 50)"
        ])

        with open("./benchmark/hyperband.yaml", "r") as f:
            exp = ExperimentBuilder().build_view_from({
                'name': 'exp',
                'config': f
            })

        objective = exp.stats['best_evaluation']

        assert best_random_search > objective
예제 #8
0
def test_view_is_done_property_no_pending(algorithm):
    """Check experiment stopping conditions from view when there is no pending trials."""
    completed = ['completed'] * 10
    broken = ['broken'] * 5
    with OrionState(trials=generate_trials(completed + broken)) as cfg:
        exp = Experiment('supernaekei')
        exp._id = cfg.trials[0]['experiment']
        exp.algorithms = algorithm
        exp.max_trials = 100

        exp_view = ExperimentView(exp)

        exp.algorithms = algorithm

        exp.max_trials = 15

        # There is only 10 completed trials and algo not done.
        assert not exp_view.is_done

        exp.algorithms.algorithm.done = True

        # Algorithm is done and no pending trials
        assert exp_view.is_done
예제 #9
0
    def test_fix_lost_trials_configurable_hb(self):
        """Test that heartbeat is correctly being configured."""
        trial = copy.deepcopy(base_trial)
        trial['status'] = 'reserved'
        trial['heartbeat'] = datetime.datetime.utcnow() - datetime.timedelta(
            seconds=180)
        with OrionState(trials=[trial]) as cfg:
            exp = Experiment('supernaekei')
            exp._id = cfg.trials[0]['experiment']

            assert len(exp.fetch_trials_by_status('reserved')) == 1

            orion.core.config.worker.heartbeat = 360

            exp.fix_lost_trials()

            assert len(exp.fetch_trials_by_status('reserved')) == 1

            orion.core.config.worker.heartbeat = 180

            exp.fix_lost_trials()

            assert len(exp.fetch_trials_by_status('reserved')) == 0
예제 #10
0
    def test_good_set_before_init_hit_no_diffs_exc_max_trials(
            self, new_config):
        """Trying to set, and NO differences were found from the config pulled from db.

        Everything is normal, nothing changes. Experiment is resumed,
        perhaps with more trials to evaluate (an exception is 'max_trials').
        """
        with OrionState(experiments=[new_config], trials=[]):

            new_config['max_trials'] = 5000

            exp = experiment_builder.build(**new_config)

        # Deliver an external configuration to finalize init
        new_config['algorithms']['dumbalgo']['done'] = False
        new_config['algorithms']['dumbalgo']['judgement'] = None
        new_config['algorithms']['dumbalgo']['scoring'] = 0
        new_config['algorithms']['dumbalgo']['suspend'] = False
        new_config['algorithms']['dumbalgo']['value'] = 5
        new_config['algorithms']['dumbalgo']['seed'] = None
        new_config['producer']['strategy'] = "NoParallelStrategy"
        new_config.pop('something_to_be_ignored')
        assert exp.configuration == new_config
예제 #11
0
    def test_fix_only_lost_trials(self):
        """Test that an old trial is set to interrupted but not a recent one."""
        lost_trial, running_trial = generate_trials(['reserved'] * 2)
        lost_trial['heartbeat'] = datetime.datetime.utcnow(
        ) - datetime.timedelta(seconds=360)
        running_trial['heartbeat'] = datetime.datetime.utcnow()

        with OrionState(trials=[lost_trial, running_trial]) as cfg:
            exp = Experiment('supernaekei')
            exp._id = cfg.trials[0]['experiment']

            assert len(exp.fetch_trials_by_status('reserved')) == 2

            exp.fix_lost_trials()

            reserved_trials = exp.fetch_trials_by_status('reserved')
            assert len(reserved_trials) == 1
            assert reserved_trials[0].to_dict(
            )['params'] == running_trial['params']

            failedover_trials = exp.fetch_trials_by_status('interrupted')
            assert len(failedover_trials) == 1
            assert failedover_trials[0].to_dict(
            )['params'] == lost_trial['params']
예제 #12
0
    def test_try_set_after_race_condition(self, new_config, monkeypatch):
        """Cannot set a configuration after init if it looses a race
        condition.

        The experiment from process which first writes to db is initialized
        properly. The experiment which looses the race condition cannot be
        initialized and needs to be rebuilt.
        """
        with OrionState(experiments=[new_config], trials=[]):
            experiment_count_before = count_experiments()

            def insert_race_condition(*args, **kwargs):
                if insert_race_condition.count == 0:
                    data = {}
                else:
                    data = new_config

                insert_race_condition.count += 1

                return data

            insert_race_condition.count = 0

            monkeypatch.setattr(experiment_builder, 'fetch_config_from_db',
                                insert_race_condition)

            experiment_builder.build(**new_config)

            assert experiment_count_before == count_experiments()

        # Should be called
        # - once in build(),
        #     -> then register fails,
        # - then called once again in build,
        # - then called in build_view to evaluate the conflicts
        assert insert_race_condition.count == 3
예제 #13
0
    def test_race_condition_wout_version(self, monkeypatch):
        """Test that an experiment loosing the race condition during version increment raises
        RaceCondition if version number was not specified.
        """
        name = 'parent'
        space = {'x': 'uniform(0,10)'}

        with OrionState(experiments=[], trials=[]):
            parent = experiment_builder.build(name, space=space)
            child = experiment_builder.build(name=name,
                                             space={'x': 'loguniform(1,10)'})
            assert child.name == parent.name
            assert parent.version == 1
            assert child.version == 2

            # Either
            # 1.
            #     fetch_config_from_db only fetch parent
            #     test_version finds other child
            #     -> Detect race condition looking at conflicts
            # 2.
            #     fetch_config_from_db only fetch parent
            #     test_version do not find other child
            #     -> DuplicateKeyError

            def insert_race_condition_1(self, query):
                is_auto_version_query = (query == {
                    'name': name,
                    'refers.parent_id': parent.id
                })
                if is_auto_version_query:
                    data = [child.configuration]
                # First time the query returns no other child
                elif insert_race_condition_1.count < 1:
                    data = [parent.configuration]
                else:
                    data = [parent.configuration, child.configuration]

                insert_race_condition_1.count += int(is_auto_version_query)

                return data

            insert_race_condition_1.count = 0

            monkeypatch.setattr(get_storage().__class__, 'fetch_experiments',
                                insert_race_condition_1)

            with pytest.raises(RaceCondition) as exc_info:
                experiment_builder.build(name=name,
                                         space={'x': 'loguniform(1,10)'})
            assert 'There was likely a race condition during version' in str(
                exc_info.value)

            def insert_race_condition_2(self, query):
                is_auto_version_query = (query == {
                    'name': name,
                    'refers.parent_id': parent.id
                })
                # First time the query returns no other child
                if is_auto_version_query:
                    data = []
                elif insert_race_condition_2.count < 1:
                    data = [parent.configuration]
                else:
                    data = [parent.configuration, child.configuration]

                insert_race_condition_2.count += int(is_auto_version_query)

                return data

            insert_race_condition_2.count = 0

            monkeypatch.setattr(get_storage().__class__, 'fetch_experiments',
                                insert_race_condition_2)

            with pytest.raises(RaceCondition) as exc_info:
                experiment_builder.build(name=name,
                                         space={'x': 'loguniform(1,10)'})
            assert 'There was a race condition during branching.' in str(
                exc_info.value)
예제 #14
0
 def test_reserve_none(self):
     """Find nothing, return None."""
     with OrionState(experiments=[], trials=[]):
         exp = Experiment('supernaekei')
         trial = exp.reserve_trial()
         assert trial is None
예제 #15
0
 def test_register_lie(self, storage):
     """Test register lie"""
     with OrionState(experiments=[base_experiment], storage=storage) as cfg:
         storage = cfg.storage()
         storage.register_lie(Trial(**base_trial))
예제 #16
0
    def test_race_condition_w_version(self, monkeypatch):
        """Test that an experiment loosing the race condition during version increment cannot
        be resolved automatically if a version number was specified.

        Note that if we would raise RaceCondition, the conflict would still occur since
        the version number fetched will not be the new one from the resolution but the requested
        one. Therefore raising and handling RaceCondition would lead to infinite recursion in
        the experiment builder.
        """
        name = 'parent'
        space = {'x': 'uniform(0,10)'}

        with OrionState(experiments=[], trials=[]):
            parent = experiment_builder.build(name, space=space)
            child = experiment_builder.build(name=name,
                                             space={'x': 'loguniform(1,10)'})
            assert child.name == parent.name
            assert parent.version == 1
            assert child.version == 2

            # Either
            # 1.
            #     fetch_config_from_db only fetch parent
            #     test_version finds other child
            #     -> Detect race condition looking at conflicts
            # 2.
            #     fetch_config_from_db only fetch parent
            #     test_version do not find other child
            #     -> DuplicateKeyError

            def insert_race_condition_1(self, query):
                is_auto_version_query = (query == {
                    'name': name,
                    'refers.parent_id': parent.id
                })
                if is_auto_version_query:
                    data = [child.configuration]
                # First time the query returns no other child
                elif insert_race_condition_1.count < 1:
                    data = [parent.configuration]
                else:
                    data = [parent.configuration, child.configuration]

                insert_race_condition_1.count += int(is_auto_version_query)

                return data

            insert_race_condition_1.count = 0

            monkeypatch.setattr(get_storage().__class__, 'fetch_experiments',
                                insert_race_condition_1)

            with pytest.raises(BranchingEvent) as exc_info:
                experiment_builder.build(name=name,
                                         version=1,
                                         space={'x': 'loguniform(1,10)'})
            assert 'Configuration is different and generates' in str(
                exc_info.value)

            def insert_race_condition_2(self, query):
                is_auto_version_query = (query == {
                    'name': name,
                    'refers.parent_id': parent.id
                })
                # First time the query returns no other child
                if is_auto_version_query:
                    data = []
                elif insert_race_condition_2.count < 1:
                    data = [parent.configuration]
                else:
                    data = [parent.configuration, child.configuration]

                insert_race_condition_2.count += int(is_auto_version_query)

                return data

            insert_race_condition_2.count = 0

            monkeypatch.setattr(get_storage().__class__, 'fetch_experiments',
                                insert_race_condition_2)

            with pytest.raises(RaceCondition) as exc_info:
                experiment_builder.build(name=name,
                                         version=1,
                                         space={'x': 'loguniform(1,10)'})
            assert 'There was a race condition during branching.' in str(
                exc_info.value)
예제 #17
0
def test_fetch_config_from_db_no_hit():
    """Verify that fetch_config_from_db returns an empty dict when the experiment is not in db"""
    with OrionState(experiments=[], trials=[]):
        db_config = experiment_builder.fetch_config_from_db(name='supernaekei')

    assert db_config == {}
예제 #18
0
    def test_old_experiment_wout_version(self, parent_version_config):
        """Create an already existing experiment without a version."""
        with OrionState(experiments=[parent_version_config]):
            exp = experiment_builder.build(name=parent_version_config["name"])

        assert exp.version == 1
예제 #19
0
def test_build_no_commandline_config():
    """Try building experiment with no commandline configuration."""
    with OrionState(experiments=[], trials=[]):
        with pytest.raises(NoConfigurationError):
            experiment_builder.build('supernaekei')