Esempio n. 1
0
    def build_from_config(self, config):
        """Build a fully configured (and writable) experiment based on full configuration.

        .. seealso::

            `orion.core.io.experiment_builder` for more information on the hierarchy of
            configurations.

            :class:`orion.core.worker.experiment.Experiment` for more information on the experiment
            object.
        """
        log.info(config)

        # Pop out configuration concerning databases and resources
        config.pop('database', None)
        config.pop('resources', None)

        experiment = Experiment(config['name'], config.get('user', None))

        # Finish experiment's configuration and write it to database.
        try:
            experiment.configure(config)
        except AttributeError as ex:
            if 'user_script' not in config['metadata']:
                raise NoConfigurationError from ex

        return experiment
Esempio n. 2
0
    def build_from_config(self, config):
        """Build a fully configured (and writable) experiment based on full configuration.

        .. seealso::

            `orion.core.io.experiment_builder` for more information on the hierarchy of
            configurations.

            :class:`orion.core.worker.experiment.Experiment` for more information on the experiment
            object.
        """
        log.info(config)

        # Pop out configuration concerning databases and resources
        config.pop('database', None)
        config.pop('resources', None)

        experiment = Experiment(config['name'], config.get('user', None),
                                config.get('version', None))

        # TODO: Handle both from cmdline and python APIs.
        if 'priors' not in config['metadata'] and 'user_args' not in config['metadata']:
            raise NoConfigurationError

        # Parse to generate priors
        if 'user_args' in config['metadata']:
            parser = OrionCmdlineParser(orion.core.config.user_script_config)
            parser.parse(config['metadata']['user_args'])
            config['metadata']['parser'] = parser.get_state_dict()
            config['metadata']['priors'] = dict(parser.priors)

        # Finish experiment's configuration and write it to database.
        experiment.configure(config)

        return experiment
Esempio n. 3
0
 def test_good_set_before_init_no_hit(self, random_dt, database,
                                      new_config):
     """Trying to set, overwrite everything from input."""
     exp = Experiment(new_config['name'])
     exp.configure(new_config)
     assert exp._init_done is True
     found_config = list(
         database.experiments.find({
             'name': 'supernaekei',
             'metadata.user': '******'
         }))
     assert len(found_config) == 1
     _id = found_config[0].pop('_id')
     assert _id != 'fasdfasfa'
     assert exp._id == _id
     new_config['refers'] = None
     new_config['status'] = 'pending'
     new_config.pop('_id')
     new_config.pop('something_to_be_ignored')
     new_config['algorithms']['dumbalgo']['done'] = False
     new_config['algorithms']['dumbalgo']['judgement'] = None
     new_config['algorithms']['dumbalgo']['scoring'] = 0
     new_config['algorithms']['dumbalgo']['suspend'] = False
     new_config['algorithms']['dumbalgo']['value'] = 5
     assert found_config[0] == new_config
     assert exp.name == new_config['name']
     assert exp.refers is None
     assert exp.metadata == new_config['metadata']
     assert exp.pool_size == new_config['pool_size']
     assert exp.max_trials == new_config['max_trials']
     assert exp.status == new_config['status']
Esempio n. 4
0
def test_register_trials(random_dt):
    """Register a list of newly proposed trials/parameters."""
    with OrionState():
        exp = Experiment('supernaekei')
        exp._id = 0

        trials = [
            Trial(params=[{
                'name': 'a',
                'type': 'integer',
                'value': 5
            }]),
            Trial(params=[{
                'name': 'b',
                'type': 'integer',
                'value': 6
            }]),
        ]
        for trial in trials:
            exp.register_trial(trial)

        yo = list(
            map(lambda trial: trial.to_dict(),
                get_storage().fetch_trials(exp)))
        assert len(yo) == len(trials)
        assert yo[0]['params'] == list(
            map(lambda x: x.to_dict(), trials[0]._params))
        assert yo[1]['params'] == list(
            map(lambda x: x.to_dict(), trials[1]._params))
        assert yo[0]['status'] == 'new'
        assert yo[1]['status'] == 'new'
        assert yo[0]['submit_time'] == random_dt
        assert yo[1]['submit_time'] == random_dt
Esempio n. 5
0
def hacked_exp(with_user_dendi, random_dt, clean_db, create_db_instance):
    """Return an `Experiment` instance with hacked _id to find trials in
    fake database.
    """
    exp = Experiment('supernaedo2-dendi')
    exp._id = 'supernaedo2-dendi'  # white box hack
    return exp
Esempio n. 6
0
def test_register_trials(random_dt):
    """Register a list of newly proposed trials/parameters."""
    with OrionState():
        exp = Experiment("supernaekei", mode="x")
        exp._id = 0

        trials = [
            Trial(params=[{
                "name": "a",
                "type": "integer",
                "value": 5
            }]),
            Trial(params=[{
                "name": "b",
                "type": "integer",
                "value": 6
            }]),
        ]
        for trial in trials:
            exp.register_trial(trial)

        yo = list(
            map(lambda trial: trial.to_dict(),
                get_storage().fetch_trials(exp)))
        assert len(yo) == len(trials)
        assert yo[0]["params"] == list(
            map(lambda x: x.to_dict(), trials[0]._params))
        assert yo[1]["params"] == list(
            map(lambda x: x.to_dict(), trials[1]._params))
        assert yo[0]["status"] == "new"
        assert yo[1]["status"] == "new"
        assert yo[0]["submit_time"] == random_dt
        assert yo[1]["submit_time"] == random_dt
Esempio n. 7
0
def test_workon(database):
    """Test scenario having a configured experiment already setup."""
    try:
        Database(of_type='MongoDB',
                 name='orion_test',
                 username='******',
                 password='******')
    except (TypeError, ValueError):
        pass
    experiment = Experiment('voila_voici')
    config = experiment.configuration
    config['algorithms'] = {'gradient_descent': {'learning_rate': 0.1}}
    config['pool_size'] = 1
    config['max_trials'] = 100
    config['metadata']['user_script'] = os.path.abspath(
        os.path.join(os.path.dirname(__file__), "black_box.py"))
    config['metadata']['user_args'] = ["-x~uniform(-50, 50)"]
    experiment.configure(config)

    workon(experiment)

    exp = list(database.experiments.find({'name': 'voila_voici'}))
    assert len(exp) == 1
    exp = exp[0]
    assert '_id' in exp
    exp_id = exp['_id']
    assert exp['name'] == 'voila_voici'
    assert exp['pool_size'] == 1
    assert exp['max_trials'] == 100
    assert exp['algorithms'] == {
        'gradient_descent': {
            'learning_rate': 0.1,
            'dx_tolerance': 1e-7
        }
    }
    assert 'user' in exp['metadata']
    assert 'datetime' in exp['metadata']
    assert 'user_script' in exp['metadata']
    assert os.path.isabs(exp['metadata']['user_script'])
    assert exp['metadata']['user_args'] == ['-x~uniform(-50, 50)']

    trials = list(database.trials.find({'experiment': exp_id}))
    assert len(trials) <= 15
    trials = list(sorted(trials, key=lambda trial: trial['submit_time']))
    assert trials[-1]['status'] == 'completed'
    for result in trials[-1]['results']:
        assert result['type'] != 'constraint'
        if result['type'] == 'objective':
            assert abs(result['value'] - 23.4) < 1e-6
            assert result['name'] == 'example_objective'
        elif result['type'] == 'gradient':
            res = numpy.asarray(result['value'])
            assert 0.1 * numpy.sqrt(res.dot(res)) < 1e-7
            assert result['name'] == 'example_gradient'
    params = trials[-1]['params']
    assert len(params) == 1
    assert params[0]['name'] == '/x'
    assert params[0]['type'] == 'real'
    assert (params[0]['value'] - 34.56789) < 1e-5
Esempio n. 8
0
def test_fetch_all_trials():
    """Fetch a list of all trials"""
    with OrionState(trials=generate_trials(["new", "reserved", "completed"])) as cfg:
        exp = Experiment("supernaekei", mode="x")
        exp._id = cfg.trials[0]["experiment"]

        trials = list(map(lambda trial: trial.to_dict(), exp.fetch_trials({})))
        assert trials == cfg.trials
Esempio n. 9
0
 def test_inconsistent_2_set_before_init_no_hit(self, random_dt,
                                                new_config):
     """Test inconsistent configuration because of user."""
     exp = Experiment(new_config['name'])
     new_config['metadata']['user'] = '******'
     with pytest.raises(ValueError) as exc_info:
         exp.configure(new_config)
     assert 'inconsistent' in str(exc_info.value)
Esempio n. 10
0
def test_fetch_all_trials():
    """Fetch a list of all trials"""
    with OrionState(
            trials=generate_trials(['new', 'reserved', 'completed'])) as cfg:
        exp = Experiment('supernaekei')
        exp._id = cfg.trials[0]['experiment']

        trials = list(map(lambda trial: trial.to_dict(), exp.fetch_trials({})))
        assert trials == cfg.trials
Esempio n. 11
0
 def test_reserve_when_exhausted(self):
     """Return None once all the trials have been allocated"""
     stati = ["new", "reserved", "interrupted", "completed", "broken"]
     with OrionState(trials=generate_trials(stati)) as cfg:
         exp = Experiment("supernaekei", mode="x")
         exp._id = cfg.trials[0]["experiment"]
         assert exp.reserve_trial() is not None
         assert exp.reserve_trial() is not None
         assert exp.reserve_trial() is None
Esempio n. 12
0
 def test_reserve_when_exhausted(self):
     """Return None once all the trials have been allocated"""
     stati = ['new', 'reserved', 'interrupted', 'completed', 'broken']
     with OrionState(trials=generate_trials(stati)) as cfg:
         exp = Experiment('supernaekei')
         exp._id = cfg.trials[0]['experiment']
         assert exp.reserve_trial() is not None
         assert exp.reserve_trial() is not None
         assert exp.reserve_trial() is None
Esempio n. 13
0
def create_experiment(exp_name, expconfig, cmdconfig, cmdargs):
    """Create an experiment based on configuration.

    Configuration is a combination of command line, experiment configuration
    file, experiment configuration in database and orion configuration files.

    Precedence of configurations is:
    `cmdargs` > `cmdconfig` > `dbconfig` > `expconfig`

    This means `expconfig` values would be overwritten by `dbconfig` and so on.

    Parameters
    ----------
    exp_name: str
        Name of the experiment
    expconfig: dict
        Configuration coming from default configuration files.
    cmdconfig: dict
        Configuration coming from configuration file.
    cmdargs: dict
        Configuration coming from command line arguments.

    """
    # Initialize experiment object.
    # Check for existing name and fetch configuration.
    experiment = Experiment(exp_name)
    dbconfig = experiment.configuration

    log.debug("DB config")
    log.debug(dbconfig)

    expconfig = resolve_config.merge_orion_config(expconfig, dbconfig,
                                                  cmdconfig, cmdargs)
    # Infer rest information about the process + versioning
    expconfig['metadata'] = infer_versioning_metadata(expconfig['metadata'])

    # Pop out configuration concerning databases and resources
    expconfig.pop('database', None)
    expconfig.pop('resources', None)
    expconfig.pop('status', None)

    log.info(expconfig)

    # Finish experiment's configuration and write it to database.
    try:
        experiment.configure(expconfig)
    except DuplicateKeyError:
        # Fails if concurrent experiment with identical (name, metadata.user)
        # is written first in the database.
        # Next infer_experiment() should either load experiment from database
        # and run smoothly if identical or trigger an experiment fork.
        # In other words, there should not be more than 1 level of recursion.
        experiment = create_experiment(exp_name, expconfig, cmdconfig, cmdargs)

    return experiment
Esempio n. 14
0
 def test_reserve_none(self):
     """Find nothing, return None."""
     try:
         Database(of_type='MongoDB',
                  name='orion_test',
                  username='******',
                  password='******')
     except (TypeError, ValueError):
         pass
     exp = Experiment('supernaekei')
     trial = exp.reserve_trial()
     assert trial is None
Esempio n. 15
0
def test_fetch_completed_trials_from_view():
    """Fetch a list of the unseen yet completed trials."""
    non_completed_stati = ['new', 'interrupted', 'suspended', 'reserved']
    stati = non_completed_stati + ['completed']
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment('supernaekei')
        exp._id = cfg.trials[0]['experiment']
        exp_view = ExperimentView(exp)

        trials = exp_view.fetch_trials_by_status('completed')
        assert len(trials) == 1
        assert trials[0].status == 'completed'
Esempio n. 16
0
def test_fetch_non_completed_trials():
    """Fetch a list of the trials that are not completed

    trials.status in ['new', 'interrupted', 'suspended', 'broken']
    """
    non_completed_stati = ["new", "interrupted", "suspended", "reserved"]
    stati = non_completed_stati + ["completed"]
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment("supernaekei", mode="x")
        exp._id = cfg.trials[0]["experiment"]

        trials = exp.fetch_noncompleted_trials()
        assert len(trials) == 4
        assert set(trial.status for trial in trials) == set(non_completed_stati)
Esempio n. 17
0
def hacked_exp(with_user_dendi, random_dt, clean_db):
    """Return an `Experiment` instance with hacked _id to find trials in
    fake database.
    """
    try:
        Database(of_type='MongoDB',
                 name='orion_test',
                 username='******',
                 password='******')
    except (TypeError, ValueError):
        pass
    exp = Experiment('supernaedo2')
    exp._id = 'supernaedo2'  # white box hack
    return exp
Esempio n. 18
0
    def test_fix_lost_trials_race_condition(self, monkeypatch, caplog):
        """Test that a lost trial fixed by a concurrent process does not cause error."""
        trial = copy.deepcopy(base_trial)
        trial['status'] = 'interrupted'
        trial['heartbeat'] = datetime.datetime.utcnow() - datetime.timedelta(
            seconds=360)
        with OrionState(trials=[trial]) as cfg:
            exp = Experiment('supernaekei')
            exp._id = cfg.trials[0]['experiment']

            assert len(exp.fetch_trials_by_status('interrupted')) == 1

            assert len(exp._storage.fetch_lost_trials(exp)) == 0

            def fetch_lost_trials(self, query):
                trial_object = Trial(**trial)
                trial_object.status = 'reserved'
                return [trial_object]

            # Force the fetch of a trial marked as reserved (and lost) while actually interrupted
            # (as if already failed-over by another process).
            with monkeypatch.context() as m:
                m.setattr(exp._storage.__class__, 'fetch_lost_trials',
                          fetch_lost_trials)

                assert len(exp._storage.fetch_lost_trials(exp)) == 1

                with caplog.at_level(logging.DEBUG):
                    exp.fix_lost_trials()

            assert caplog.records[-1].levelname == 'DEBUG'
            assert caplog.records[-1].msg == 'failed'
            assert len(exp.fetch_trials_by_status('interrupted')) == 1
            assert len(exp.fetch_trials_by_status('reserved')) == 0
Esempio n. 19
0
 def test_experiment_with_parent(self, create_db_instance, random_dt,
                                 exp_config):
     """Configure an existing experiment with parent."""
     exp = Experiment('supernaedo2.1')
     exp.algorithms = {'random': {}}
     exp.configure(exp.configuration)
     assert exp._init_done is True
     assert exp._db is create_db_instance
     assert exp._id is not None
     assert exp.name == 'supernaedo2.1'
     assert exp.configuration['refers'] == exp_config[0][4]['refers']
     assert exp.metadata == exp_config[0][4]['metadata']
     assert exp.pool_size == 2
     assert exp.max_trials == 1000
     assert exp.configuration['algorithms'] == {'random': {}}
Esempio n. 20
0
 def test_algorithm_config_with_just_a_string(self, exp_config):
     """Test that configuring an algorithm with just a string is OK."""
     new_config = copy.deepcopy(exp_config[0][1])
     new_config['algorithms'] = 'dumbalgo'
     exp = Experiment('supernaedo3')
     exp.configure(new_config)
     new_config['algorithms'] = dict()
     new_config['algorithms']['dumbalgo'] = dict()
     new_config['algorithms']['dumbalgo']['done'] = False
     new_config['algorithms']['dumbalgo']['judgement'] = None
     new_config['algorithms']['dumbalgo']['scoring'] = 0
     new_config['algorithms']['dumbalgo']['suspend'] = False
     new_config['algorithms']['dumbalgo']['value'] = 5
     assert exp._id == new_config.pop('_id')
     assert exp.configuration['algorithms'] == new_config['algorithms']
Esempio n. 21
0
def test_experiment_stats():
    """Check that property stats is returning a proper summary of experiment's results."""
    NUM_COMPLETED = 3
    stati = (["completed"] * NUM_COMPLETED) + (["reserved"] * 2)
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment("supernaekei", mode="x")
        exp._id = cfg.trials[0]["experiment"]
        exp.metadata = {"datetime": datetime.datetime.utcnow()}
        stats = exp.stats
        assert stats.trials_completed == NUM_COMPLETED
        assert stats.best_trials_id == cfg.trials[3]["_id"]
        assert stats.best_evaluation == 0
        assert stats.start_time == exp.metadata["datetime"]
        assert stats.finish_time == cfg.trials[0]["end_time"]
        assert stats.duration == stats.finish_time - stats.start_time
Esempio n. 22
0
    def test_good_set_before_init_hit_with_diffs(self, exp_config):
        """Trying to set, and differences were found from the config pulled from db.

        In this case:
        1. Force renaming of experiment, prompt user for new name.
        2. Fork from experiment with previous name. New experiments refers to the
           old one, if user wants to.
        3. Overwrite elements with the ones from input.

        .. warning:: Currently, not implemented.
        """
        new_config = copy.deepcopy(exp_config[0][1])
        new_config['metadata']['user_version'] = 1.2
        exp = Experiment('supernaedo2')
        exp.configure(new_config)
Esempio n. 23
0
 def test_existing_experiment(self, create_db_instance, exp_config):
     """Hit exp_name + user's name in the db, fetch most recent entry."""
     exp = Experiment('supernaedo2')
     assert exp._init_done is False
     assert exp._db is create_db_instance
     assert exp._id == exp_config[0][0]['_id']
     assert exp.name == exp_config[0][0]['name']
     assert exp.refers == exp_config[0][0]['refers']
     assert exp.metadata == exp_config[0][0]['metadata']
     assert exp._last_fetched == exp_config[0][0]['metadata']['datetime']
     assert exp.pool_size == exp_config[0][0]['pool_size']
     assert exp.max_trials == exp_config[0][0]['max_trials']
     assert exp.algorithms == exp_config[0][0]['algorithms']
     with pytest.raises(AttributeError):
         exp.this_is_not_in_config = 5
Esempio n. 24
0
def test_configurable_broken_property():
    """Check if max_broken changes after configuration."""
    MAX_BROKEN = 3
    orion.core.config.worker.max_broken = MAX_BROKEN

    stati = (['reserved'] * 10) + (['broken'] * (MAX_BROKEN))
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment('supernaekei')
        exp._id = cfg.trials[0]['experiment']

        assert exp.is_broken

        orion.core.config.worker.max_broken += 1

        assert not exp.is_broken
Esempio n. 25
0
    def test_acquire_algorithm_lock_with_different_config(self, new_config, algorithm):
        with OrionState(experiments=[new_config]) as cfg:
            exp = Experiment("supernaekei", mode="x")
            exp._id = 0
            algorithm_original_config = algorithm.configuration
            exp.algorithms = algorithm
            # Setting attribute to algorithm inside the wrapper
            algorithm.algorithm.seed = 10

            assert algorithm.configuration != algorithm_original_config

            with pytest.raises(
                RuntimeError, match="Algorithm configuration changed since"
            ):
                with exp.acquire_algorithm_lock(timeout=0.2, retry_interval=0.1):
                    pass
Esempio n. 26
0
def test_experiment_stats():
    """Check that property stats is returning a proper summary of experiment's results."""
    NUM_COMPLETED = 3
    stati = (['completed'] * NUM_COMPLETED) + (['reserved'] * 2)
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment('supernaekei')
        exp._id = cfg.trials[0]['experiment']
        exp.metadata = {'datetime': datetime.datetime.utcnow()}
        stats = exp.stats
        assert stats['trials_completed'] == NUM_COMPLETED
        assert stats['best_trials_id'] == cfg.trials[3]['_id']
        assert stats['best_evaluation'] == 0
        assert stats['start_time'] == exp.metadata['datetime']
        assert stats['finish_time'] == cfg.trials[0]['end_time']
        assert stats['duration'] == stats['finish_time'] - stats['start_time']
        assert len(stats) == 6
Esempio n. 27
0
 def test_empty(self, space):
     """Test panda frame creation when there is no trials"""
     with OrionState():
         exp = Experiment("supernaekei", mode="x")
         exp.space = space
         assert exp.to_pandas().shape == (0, 8)
         assert list(exp.to_pandas().columns) == [
             "id",
             "experiment_id",
             "status",
             "suggested",
             "reserved",
             "completed",
             "objective",
             "/index",
         ]
Esempio n. 28
0
def test_configurable_broken_property():
    """Check if max_broken changes after configuration."""
    MAX_BROKEN = 5

    stati = (["reserved"] * 10) + (["broken"] * (MAX_BROKEN))
    with OrionState(trials=generate_trials(stati)) as cfg:
        exp = Experiment("supernaekei", mode="x")
        exp._id = cfg.trials[0]["experiment"]

        exp.max_broken = MAX_BROKEN

        assert exp.is_broken

        exp.max_broken += 1

        assert not exp.is_broken
Esempio n. 29
0
 def test_new_experiment_due_to_name(self, create_db_instance, random_dt):
     """Hit user name, but exp_name does not hit the db, create new entry."""
     exp = Experiment('supernaekei')
     assert exp._init_done is False
     assert exp._db is create_db_instance
     assert exp._id is None
     assert exp.name == 'supernaekei'
     assert exp.refers == {}
     assert exp.metadata['user'] == 'tsirif'
     assert exp._last_fetched == random_dt
     assert len(exp.metadata) == 1
     assert exp.pool_size is None
     assert exp.max_trials is None
     assert exp.algorithms is None
     with pytest.raises(AttributeError):
         exp.this_is_not_in_config = 5
Esempio n. 30
0
    def test_acquire_algorithm_lock_timeout(self, new_config, algorithm, mocker):
        with OrionState(experiments=[new_config]) as cfg:
            exp = Experiment("supernaekei", mode="x")
            exp._id = 0
            exp.algorithms = algorithm

            storage_acquisition_mock = mocker.spy(
                cfg.storage(), "acquire_algorithm_lock"
            )

            with exp.acquire_algorithm_lock(timeout=0.2, retry_interval=0.1):
                pass

            storage_acquisition_mock.assert_called_with(
                experiment=exp, timeout=0.2, retry_interval=0.1
            )