def test_try_set_after_init(self, exp_config):
     """Cannot set a configuration after init (currently)."""
     exp = Experiment('supernaedo2')
     # Deliver an external configuration to finalize init
     exp.configure(exp_config[0][0])
     assert exp._init_done is True
     with pytest.raises(RuntimeError) as exc_info:
         exp.configure(exp_config[0][0])
     assert 'cannot reset' in str(exc_info.value)
Exemple #2
0
def test_workon(database):
    """Test scenario having a configured experiment already setup."""
    try:
        Database(of_type='MongoDB', name='orion_test',
                 username='******', password='******')
    except (TypeError, ValueError):
        pass
    experiment = Experiment('voila_voici')
    config = experiment.configuration
    config['algorithms'] = {
        'gradient_descent': {
            'learning_rate': 0.1
            }
        }
    config['pool_size'] = 1
    config['max_trials'] = 100
    config['metadata']['user_script'] = os.path.abspath(os.path.join(
        os.path.dirname(__file__), "black_box.py"))
    config['metadata']['user_args'] = ["-x~uniform(-50, 50)"]
    experiment.configure(config)

    workon(experiment)

    exp = list(database.experiments.find({'name': 'voila_voici'}))
    assert len(exp) == 1
    exp = exp[0]
    assert '_id' in exp
    exp_id = exp['_id']
    assert exp['name'] == 'voila_voici'
    assert exp['pool_size'] == 1
    assert exp['max_trials'] == 100
    assert exp['algorithms'] == {'gradient_descent': {'learning_rate': 0.1,
                                                      'dx_tolerance': 1e-7}}
    assert 'user' in exp['metadata']
    assert 'datetime' in exp['metadata']
    assert 'user_script' in exp['metadata']
    assert os.path.isabs(exp['metadata']['user_script'])
    assert exp['metadata']['user_args'] == ['-x~uniform(-50, 50)']

    trials = list(database.trials.find({'experiment': exp_id}))
    assert len(trials) < 15
    assert trials[-1]['status'] == 'completed'
    for result in trials[-1]['results']:
        assert result['type'] != 'constraint'
        if result['type'] == 'objective':
            assert abs(result['value'] - 23.4) < 1e-6
            assert result['name'] == 'example_objective'
        elif result['type'] == 'gradient':
            res = numpy.asarray(result['value'])
            assert 0.1 * numpy.sqrt(res.dot(res)) < 1e-7
            assert result['name'] == 'example_gradient'
    params = trials[-1]['params']
    assert len(params) == 1
    assert params[0]['name'] == '/x'
    assert params[0]['type'] == 'real'
    assert (params[0]['value'] - 34.56789) < 1e-5
Exemple #3
0
def create_experiment(exp_name, expconfig, cmdconfig, cmdargs):
    """Create an experiment based on configuration.

    Configuration is a combination of command line, experiment configuration
    file, experiment configuration in database and orion configuration files.

    Precedence of configurations is:
    `cmdargs` > `cmdconfig` > `dbconfig` > `expconfig`

    This means `expconfig` values would be overwritten by `dbconfig` and so on.

    Parameters
    ----------
    exp_name: str
        Name of the experiment
    expconfig: dict
        Configuration coming from default configuration files.
    cmdconfig: dict
        Configuration coming from configuration file.
    cmdargs: dict
        Configuration coming from command line arguments.

    """
    # Initialize experiment object.
    # Check for existing name and fetch configuration.
    experiment = Experiment(exp_name)
    dbconfig = experiment.configuration

    log.debug("DB config")
    log.debug(dbconfig)

    expconfig = resolve_config.merge_orion_config(expconfig, dbconfig,
                                                  cmdconfig, cmdargs)
    # Infer rest information about the process + versioning
    expconfig['metadata'] = infer_versioning_metadata(expconfig['metadata'])

    # Pop out configuration concerning databases and resources
    expconfig.pop('database', None)
    expconfig.pop('resources', None)
    expconfig.pop('status', None)

    log.info(expconfig)

    # Finish experiment's configuration and write it to database.
    try:
        experiment.configure(expconfig)
    except DuplicateKeyError:
        # Fails if concurrent experiment with identical (name, metadata.user)
        # is written first in the database.
        # Next infer_experiment() should either load experiment from database
        # and run smoothly if identical or trigger an experiment fork.
        # In other words, there should not be more than 1 level of recursion.
        experiment = create_experiment(exp_name, expconfig, cmdconfig, cmdargs)

    return experiment
Exemple #4
0
 def test_experiment_non_interactive_branching(self, create_db_instance,
                                               random_dt, exp_config,
                                               monkeypatch):
     """Configure an existing experiment with parent."""
     monkeypatch.setattr('sys.__stdin__.isatty', lambda: True)
     exp = Experiment('supernaedo2.1')
     exp.algorithms = {'dumbalgo': {}}
     with pytest.raises(OSError):
         exp.configure(exp.configuration)
     monkeypatch.undo()
     with pytest.raises(ValueError) as exc_info:
         exp.configure(exp.configuration)
     assert "Configuration is different and generates a branching" in str(
         exc_info.value)
Exemple #5
0
 def test_algorithm_config_with_just_a_string(self, exp_config):
     """Test that configuring an algorithm with just a string is OK."""
     new_config = copy.deepcopy(exp_config[0][1])
     new_config['algorithms'] = 'dumbalgo'
     exp = Experiment('supernaedo3')
     exp.configure(new_config)
     new_config['algorithms'] = dict()
     new_config['algorithms']['dumbalgo'] = dict()
     new_config['algorithms']['dumbalgo']['done'] = False
     new_config['algorithms']['dumbalgo']['judgement'] = None
     new_config['algorithms']['dumbalgo']['scoring'] = 0
     new_config['algorithms']['dumbalgo']['suspend'] = False
     new_config['algorithms']['dumbalgo']['value'] = 5
     assert exp._id == new_config.pop('_id')
     assert exp.configuration['algorithms'] == new_config['algorithms']
Exemple #6
0
    def test_status_is_pending_when_increase_max_trials(self, exp_config):
        """Attribute exp.algorithms become objects after init."""
        exp = Experiment('supernaedo4')

        # Deliver an external configuration to finalize init
        exp.configure(exp_config[0][2])

        assert exp.is_done

        exp = Experiment('supernaedo4')
        # Deliver an external configuration to finalize init
        exp_config[0][2]['max_trials'] = 1000
        exp.configure(exp_config[0][2])

        assert not exp.is_done
Exemple #7
0
 def test_experiment_with_parent(self, create_db_instance, random_dt,
                                 exp_config):
     """Configure an existing experiment with parent."""
     exp = Experiment('supernaedo2.1')
     exp.algorithms = {'random': {}}
     exp.configure(exp.configuration)
     assert exp._init_done is True
     assert exp._db is create_db_instance
     assert exp._id is not None
     assert exp.name == 'supernaedo2.1'
     assert exp.configuration['refers'] == exp_config[0][4]['refers']
     assert exp.metadata == exp_config[0][4]['metadata']
     assert exp.pool_size == 2
     assert exp.max_trials == 1000
     assert exp.configuration['algorithms'] == {'random': {}}
    def test_good_set_before_init_hit_with_diffs(self, exp_config):
        """Trying to set, and differences were found from the config pulled from db.

        In this case:
        1. Force renaming of experiment, prompt user for new name.
        2. Fork from experiment with previous name. New experiments refers to the
           old one, if user wants to.
        3. Overwrite elements with the ones from input.

        .. warning:: Currently, not implemented.
        """
        new_config = copy.deepcopy(exp_config[0][1])
        new_config['metadata']['user_version'] = 1.2
        exp = Experiment('supernaedo2')
        exp.configure(new_config)
Exemple #9
0
    def test_good_set_before_init_hit_no_diffs_exc_pool_size(self, exp_config):
        """Trying to set, and NO differences were found from the config pulled from db.

        Everything is normal, nothing changes. Experiment is resumed,
        perhaps with more workers that evaluate (an exception is 'pool_size').
        """
        exp = Experiment('supernaedo2')
        # Deliver an external configuration to finalize init
        exp_config[0][0]['pool_size'] = 10
        exp.configure(exp_config[0][0])
        exp_config[0][0]['algorithms']['dumbalgo']['done'] = False
        exp_config[0][0]['algorithms']['dumbalgo']['judgement'] = None
        exp_config[0][0]['algorithms']['dumbalgo']['scoring'] = 0
        exp_config[0][0]['algorithms']['dumbalgo']['suspend'] = False
        exp_config[0][0]['algorithms']['dumbalgo']['value'] = 5
        assert exp._id == exp_config[0][0].pop('_id')
        assert exp.configuration == exp_config[0][0]
Exemple #10
0
 def test_new_experiment_with_parent(self, create_db_instance, random_dt,
                                     exp_config):
     """Configure a branch experiment."""
     exp = Experiment('supernaedo2.6')
     exp.metadata = exp_config[0][4]['metadata']
     exp.refers = exp_config[0][4]['refers']
     exp.algorithms = exp_config[0][4]['algorithms']
     exp.configure(exp.configuration)
     assert exp._init_done is True
     assert_protocol(exp, create_db_instance)
     assert exp._id is not None
     assert exp.name == 'supernaedo2.6'
     assert exp.configuration['refers'] == exp_config[0][4]['refers']
     exp_config[0][4]['metadata']['datetime'] = random_dt
     assert exp.metadata == exp_config[0][4]['metadata']
     assert exp.pool_size is None
     assert exp.max_trials is None
     assert exp.configuration['algorithms'] == {'random': {'seed': None}}
Exemple #11
0
def test_forcing_user(exp_config):
    """Trying to set by forcing user so that NO differences are found."""
    assert getpass.getuser() == 'bouthilx'
    exp = Experiment('supernaedo2')
    assert exp.metadata['user'] == 'bouthilx'
    exp = Experiment('supernaedo2', 'tsirif')
    # Deliver an external configuration to finalize init
    exp_config[0][0]['max_trials'] = 5000
    exp.configure(exp_config[0][0])
    exp_config[0][0]['algorithms']['dumbalgo']['done'] = False
    exp_config[0][0]['algorithms']['dumbalgo']['judgement'] = None
    exp_config[0][0]['algorithms']['dumbalgo']['scoring'] = 0
    exp_config[0][0]['algorithms']['dumbalgo']['suspend'] = False
    exp_config[0][0]['algorithms']['dumbalgo']['value'] = 5
    exp_config[0][0]['algorithms']['dumbalgo']['seed'] = None
    exp_config[0][0]['producer']['strategy'] = "NoParallelStrategy"
    assert exp._id == exp_config[0][0].pop('_id')
    assert exp.configuration == exp_config[0][0]
Exemple #12
0
    def test_good_set_before_init_hit_no_diffs_exc_max_trials(
            self, exp_config):
        """Trying to set, and NO differences were found from the config pulled from db.

        Everything is normal, nothing changes. Experiment is resumed,
        perhaps with more trials to evaluate (an exception is 'max_trials').
        """
        exp = Experiment('supernaedo2')
        # Deliver an external configuration to finalize init
        exp_config[0][0]['max_trials'] = 5000
        exp.configure(exp_config[0][0])
        exp_config[0][0]['algorithms']['dumbalgo']['done'] = False
        exp_config[0][0]['algorithms']['dumbalgo']['judgement'] = None
        exp_config[0][0]['algorithms']['dumbalgo']['scoring'] = 0
        exp_config[0][0]['algorithms']['dumbalgo']['suspend'] = False
        exp_config[0][0]['algorithms']['dumbalgo']['value'] = 5
        exp_config[0][0]['producer']['strategy'] = "NoParallelStrategy"
        assert exp._id == exp_config[0][0].pop('_id')
        assert exp.configuration == exp_config[0][0]
Exemple #13
0
    def test_get_after_init_plus_hit_no_diffs(self, exp_config):
        """Return a configuration dict according to an experiment object.

        Before initialization is done, it can be the case that the pair (`name`,
        user's name) has not hit the database. return a yaml compliant form
        of current state, to be used with :mod:`orion.core.cli.esolve_config`.
        """
        exp = Experiment('supernaedo2')
        # Deliver an external configuration to finalize init
        experiment_count_before = exp._db.count("experiments")
        exp.configure(exp_config[0][0])
        assert exp._init_done is True
        exp_config[0][0]['algorithms']['dumbalgo']['done'] = False
        exp_config[0][0]['algorithms']['dumbalgo']['judgement'] = None
        exp_config[0][0]['algorithms']['dumbalgo']['scoring'] = 0
        exp_config[0][0]['algorithms']['dumbalgo']['suspend'] = False
        exp_config[0][0]['algorithms']['dumbalgo']['value'] = 5
        assert exp._id == exp_config[0][0].pop('_id')
        assert exp.configuration == exp_config[0][0]
        assert experiment_count_before == exp._db.count("experiments")
Exemple #14
0
    def test_try_reset_after_race_condition(self, exp_config, new_config):
        """Cannot set a configuration after init if it looses a race condition,
        but can set it if reloaded.

        The experiment from process which first writes to db is initialized
        properly. The experiment which looses the race condition cannot be
        initialized and needs to be rebuilt.
        """
        exp = Experiment(new_config['name'])
        # Another experiment gets configured first
        experiment_count_before = exp._db.count("experiments")
        naughty_little_exp = Experiment(new_config['name'])
        naughty_little_exp.configure(new_config)
        assert naughty_little_exp._init_done is True
        assert exp._init_done is False
        assert (experiment_count_before + 1) == exp._db.count("experiments")
        # First experiment won't be able to be configured
        with pytest.raises(DuplicateKeyError) as exc_info:
            exp.configure(new_config)
        assert 'duplicate key error' in str(exc_info.value)

        # Still not more experiment in DB
        assert (experiment_count_before + 1) == exp._db.count("experiments")

        # Retry configuring the experiment
        new_config['metadata']['datetime'] = naughty_little_exp.metadata[
            'datetime']
        exp = Experiment(new_config['name'])
        exp.configure(new_config)
        assert exp._init_done is True
        assert (experiment_count_before + 1) == exp._db.count("experiments")
        assert exp.configuration == naughty_little_exp.configuration
Exemple #15
0
    def test_working_dir_is_correctly_set(self, database, new_config):
        """Check if working_dir is correctly changed."""
        exp = Experiment(new_config['name'])
        exp.configure(new_config)
        assert exp._init_done is True
        database.experiments.update_one(
            {
                'name': 'supernaekei',
                'metadata.user': '******'
            }, {'$set': {
                'working_dir': './'
            }})
        found_config = list(
            database.experiments.find({
                'name': 'supernaekei',
                'metadata.user': '******'
            }))

        found_config = found_config[0]
        exp = Experiment(found_config['name'])
        exp.configure(found_config)
        assert exp.working_dir == './'
Exemple #16
0
    def test_working_dir_works_when_db_absent(self, database, new_config):
        """Check if working_dir is correctly when absent from the database."""
        exp = Experiment(new_config['name'])
        exp.configure(new_config)
        assert exp._init_done is True
        database.experiments.update_one(
            {
                'name': 'supernaekei',
                'metadata.user': '******'
            }, {'$unset': {
                'working_dir': ''
            }})
        found_config = list(
            database.experiments.find({
                'name': 'supernaekei',
                'metadata.user': '******'
            }))

        found_config = found_config[0]
        exp = Experiment(found_config['name'])
        exp.configure(found_config)
        assert exp.working_dir is None
Exemple #17
0
    def test_good_set_before_init_no_hit(self, random_dt, database,
                                         new_config):
        """Trying to set, overwrite everything from input."""
        exp = Experiment(new_config['name'])
        exp.configure(new_config)
        assert exp._init_done is True
        found_config = list(
            database.experiments.find({
                'name': 'supernaekei',
                'metadata.user': '******'
            }))

        new_config['metadata']['datetime'] = exp.metadata['datetime']

        assert len(found_config) == 1
        _id = found_config[0].pop('_id')
        assert _id != 'fasdfasfa'
        assert exp._id == _id
        new_config['refers'] = {}
        new_config.pop('_id')
        new_config.pop('something_to_be_ignored')
        new_config['algorithms']['dumbalgo']['done'] = False
        new_config['algorithms']['dumbalgo']['judgement'] = None
        new_config['algorithms']['dumbalgo']['scoring'] = 0
        new_config['algorithms']['dumbalgo']['suspend'] = False
        new_config['algorithms']['dumbalgo']['value'] = 5
        new_config['algorithms']['dumbalgo']['seed'] = None
        new_config['refers'] = {
            'adapter': [],
            'parent_id': None,
            'root_id': _id
        }
        assert found_config[0] == new_config
        assert exp.name == new_config['name']
        assert exp.configuration['refers'] == new_config['refers']
        assert exp.metadata == new_config['metadata']
        assert exp.pool_size == new_config['pool_size']
        assert exp.max_trials == new_config['max_trials']
        assert exp.working_dir == new_config['working_dir']
    def build_from_config(self, config):
        """Build a fully configured (and writable) experiment based on full configuration.

        .. seealso::

            `orion.core.io.experiment_builder` for more information on the hierarchy of
            configurations.

            :class:`orion.core.worker.experiment.Experiment` for more information on the experiment
            object.
        """
        log.info(config)

        # Pop out configuration concerning databases and resources
        config.pop('database', None)
        config.pop('resources', None)

        experiment = Experiment(config['name'])

        # Finish experiment's configuration and write it to database.
        experiment.configure(config)

        return experiment
Exemple #19
0
    def test_try_set_after_race_condition(self, exp_config, new_config):
        """Cannot set a configuration after init if it looses a race
        condition.

        The experiment from process which first writes to db is initialized
        properly. The experiment which looses the race condition cannot be
        initialized and needs to be rebuilt.
        """
        exp = Experiment(new_config['name'])
        assert exp.id is None
        # Another experiment gets configured first
        experiment_count_before = exp._db.count("experiments")
        naughty_little_exp = Experiment(new_config['name'])
        assert naughty_little_exp.id is None
        naughty_little_exp.configure(new_config)
        assert naughty_little_exp._init_done is True
        assert exp._init_done is False
        assert (experiment_count_before + 1) == exp._db.count("experiments")
        # First experiment won't be able to be configured
        with pytest.raises(DuplicateKeyError) as exc_info:
            exp.configure(new_config)
        assert 'duplicate key error' in str(exc_info.value)

        assert (experiment_count_before + 1) == exp._db.count("experiments")
Exemple #20
0
    def test_try_set_after_race_condition_with_hit(self, exp_config,
                                                   new_config):
        """Cannot set a configuration after init if config is built
        from no-hit (without up-to-date db info) and new exp is hit

        The experiment from process which first writes to db is initialized
        properly. The experiment which looses the race condition cannot be
        initialized and needs to be rebuilt.
        """
        # Another experiment gets configured first
        naughty_little_exp = Experiment(new_config['name'])
        assert naughty_little_exp.id is None
        experiment_count_before = naughty_little_exp._db.count("experiments")
        naughty_little_exp.configure(copy.deepcopy(new_config))
        assert naughty_little_exp._init_done is True

        exp = Experiment(new_config['name'])
        assert exp._init_done is False
        assert (experiment_count_before + 1) == exp._db.count("experiments")
        # Experiment with hit won't be able to be configured with config without db info
        with pytest.raises(DuplicateKeyError) as exc_info:
            exp.configure(new_config)
        assert 'Cannot register an existing experiment with a new config' in str(
            exc_info.value)

        assert (experiment_count_before + 1) == exp._db.count("experiments")

        new_config['metadata']['datetime'] = naughty_little_exp.metadata[
            'datetime']
        exp = Experiment(new_config['name'])
        assert exp._init_done is False
        assert (experiment_count_before + 1) == exp._db.count("experiments")
        # New experiment will be able to be configured
        exp.configure(new_config)

        assert (experiment_count_before + 1) == exp._db.count("experiments")
Exemple #21
0
 def test_not_inconsistent_3_set_before_init_no_hit(self, random_dt,
                                                    new_config):
     """Test inconsistent configuration because of datetime."""
     exp = Experiment(new_config['name'])
     new_config['metadata']['datetime'] = 123
     exp.configure(new_config)
Exemple #22
0
 def test_after_init_algorithms_are_objects(self, exp_config):
     """Attribute exp.algorithms become objects after init."""
     exp = Experiment('supernaedo2')
     # Deliver an external configuration to finalize init
     exp.configure(exp_config[0][0])
     assert isinstance(exp.algorithms, BaseAlgorithm)