Пример #1
0
def test_setup_storage_default():
    """Test that storage is setup using default config"""
    update_singletons()
    setup_storage()
    storage = Storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
Пример #2
0
def test_setup_storage_bad():
    """Test how setup fails when configuring with non-existant backends"""
    update_singletons()
    with pytest.raises(NotImplementedError) as exc:
        setup_storage({"type": "idontexist"})

    assert exc.match("idontexist")
Пример #3
0
def test_setup_storage_stateless():
    """Test that passed configuration dictionary is not modified by the fonction"""
    update_singletons()
    config = {"database": {"type": "pickleddb", "host": "test.pkl"}}
    passed_config = copy.deepcopy(config)
    setup_storage(passed_config)
    assert config == passed_config
Пример #4
0
def get_experiment(name, version=None, mode="r", storage=None):
    """
    Retrieve an existing experiment as :class:`orion.client.experiment.ExperimentClient`.

    Parameters
    ----------
    name: str
        The name of the experiment.
    version: int, optional
        Version to select. If None, last version will be selected. If version given is larger than
        largest version available, the largest version will be selected.
    mode: str, optional
        The access rights of the experiment on the database.
        'r': read access only
        'w': can read and write to database
        Default is 'r'
    storage: dict, optional
        Configuration of the storage backend.

    Returns
    -------
    An instance of :class:`orion.client.experiment.ExperimentClient` representing the experiment.

    Raises
    ------
    `orion.core.utils.exceptions.NoConfigurationError`
        The experiment is not in the database provided by the user.
    """
    setup_storage(storage)
    assert mode in set("rw")
    experiment = experiment_builder.load(name, version, mode)
    return ExperimentClient(experiment)
Пример #5
0
def test_setup_storage_custom_type_missing():
    """Test setup with local configuration with type missing"""
    update_singletons()
    setup_storage({"database": {"type": "pickleddb", "host": "test.pkl"}})
    storage = Storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    assert storage._db.host == "test.pkl"
Пример #6
0
def test_get_storage():
    """Test that get storage gets the singleton"""
    update_singletons()
    setup_storage({'database': {'type': 'pickleddb', 'host': 'test.pkl'}})
    storage = get_storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    assert get_storage() == storage
Пример #7
0
def test_setup_storage_custom_legacy_emtpy():
    """Test setup with local configuration with legacy but no config"""
    update_singletons()
    setup_storage({"type": "legacy"})
    storage = Storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    assert storage._db.host == orion.core.config.storage.database.host
Пример #8
0
def test_get_storage():
    """Test that get storage gets the singleton"""
    update_singletons()
    setup_storage({"database": {"type": "pickleddb", "host": "test.pkl"}})
    storage = get_storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    assert get_storage() == storage
Пример #9
0
def test_setup_storage_custom_type_missing():
    """Test setup with local configuration with type missing"""
    update_singletons()
    setup_storage({'database': {'type': 'pickleddb', 'host': 'test.pkl'}})
    storage = Storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    assert storage._db.host == 'test.pkl'
Пример #10
0
def test_setup_storage_bad_config_override():
    """Test setup with different config than existing singleton"""
    update_singletons()
    setup_storage({"database": {"type": "pickleddb", "host": "test.pkl"}})
    storage = Storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    with pytest.raises(SingletonAlreadyInstantiatedError):
        setup_storage({"database": {"type": "mongodb"}})
Пример #11
0
def test_setup_storage_bad_config_override():
    """Test setup with different config than existing singleton"""
    update_singletons()
    setup_storage({'database': {'type': 'pickleddb', 'host': 'test.pkl'}})
    storage = Storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    with pytest.raises(SingletonAlreadyInstantiatedError):
        setup_storage({'database': {'type': 'mongodb'}})
Пример #12
0
    def __init__(self, config=None):
        # By default, server will reject requests coming from a server
        # with different origin. E.g., if server is hosted at
        # http://myorionserver.com, it won't accept an API call
        # coming from a server not hosted at same address
        # (e.g. a local installation at http://localhost)
        # This is a Cross-Origin Resource Sharing (CORS) security:
        # https://developer.mozilla.org/fr/docs/Web/HTTP/CORS
        # To make server accept CORS requests, we need to use
        # falcon-cors package: https://github.com/lwcolton/falcon-cors
        frontends_uri = (config["frontends_uri"] if "frontends_uri" in config
                         else ["http://localhost:3000"])
        logger.info("allowed frontends: {}".format(
            ", ".join(frontends_uri) if frontends_uri else "(none)"))
        cors = MyCORS(allow_origins_list=frontends_uri)
        super(WebApi, self).__init__(middleware=[cors.middleware])
        self.config = config

        setup_storage(config.get("storage"))

        # Create our resources
        root_resource = RuntimeResource()
        experiments_resource = ExperimentsResource()
        trials_resource = TrialsResource()
        plots_resource = PlotsResource()

        # Build routes
        self.add_route("/", root_resource)
        self.add_route("/experiments", experiments_resource)
        self.add_route("/experiments/{name}",
                       experiments_resource,
                       suffix="experiment")
        self.add_route("/trials/{experiment_name}",
                       trials_resource,
                       suffix="trials_in_experiment")
        self.add_route(
            "/trials/{experiment_name}/{trial_id}",
            trials_resource,
            suffix="trial_in_experiment",
        )
        self.add_route("/plots/lpi/{experiment_name}",
                       plots_resource,
                       suffix="lpi")
        self.add_route(
            "/plots/partial_dependencies/{experiment_name}",
            plots_resource,
            suffix="partial_dependencies",
        )
        self.add_route(
            "/plots/parallel_coordinates/{experiment_name}",
            plots_resource,
            suffix="parallel_coordinates",
        )
        self.add_route("/plots/regret/{experiment_name}",
                       plots_resource,
                       suffix="regret")
Пример #13
0
def workon(function, space, name='loop', algorithms=None, max_trials=None):
    """Optimize a function over a given search space

    This will create a new experiment with an in-memory storage and optimize the given function
    until `max_trials` is reached or the `algorithm` is done
    (some algorithms like random search are never done).

    For informations on how to fetch results, see
    :py:class:`orion.client.experiment.ExperimentClient`.

    .. note::

        Each call to this function will create a separate in-memory storage.

    Parameters
    ----------
    name: str
        Name of the experiment
    version: int, optional
        Version of the experiment. Defaults to last existing version for a given `name`
        or 1 for new experiment.
    space: dict, optional
        Optimization space of the algorithm. Should have the form `dict(name='<prior>(args)')`.
    algorithms: str or dict, optional
        Algorithm used for optimization.
    max_trials: int, optional
        Maximum number or trials before the experiment is considered done.

    Raises
    ------
    `NotImplementedError`
        If the algorithm specified is not properly installed.

    """
    # Clear singletons and keep pointers to restore them.
    singletons = update_singletons()

    setup_storage(storage={'type': 'legacy', 'database': {'type': 'EphemeralDB'}})

    experiment = experiment_builder.build(
        name, version=1, space=space, algorithms=algorithms,
        strategy='NoParallelStrategy', max_trials=max_trials)

    producer = Producer(experiment)

    experiment_client = ExperimentClient(experiment, producer)
    experiment_client.workon(function, max_trials=max_trials)

    # Restore singletons
    update_singletons(singletons)

    return experiment_client
Пример #14
0
def setup_tmp_storage(host):
    # Clear singletons
    update_singletons()

    setup_storage(storage={
        "type": "legacy",
        "database": {
            "type": "pickleddb",
            "host": host,
        },
    })

    return get_storage()
Пример #15
0
def test_setup_storage_bad_override():
    """Test setup with different type than existing singleton"""
    update_singletons()
    setup_storage(
        {"type": "legacy", "database": {"type": "pickleddb", "host": "test.pkl"}}
    )
    storage = Storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    with pytest.raises(SingletonAlreadyInstantiatedError) as exc:
        setup_storage({"type": "track"})

    assert exc.match("A singleton instance of \(type: Storage\)")
Пример #16
0
def test_setup_storage_custom():
    """Test setup with local configuration"""
    update_singletons()
    setup_storage({
        "type": "legacy",
        "database": {
            "type": "pickleddb",
            "host": "test.pkl"
        }
    })
    storage = storage_factory.create()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    assert storage._db.host == os.path.abspath("test.pkl")
Пример #17
0
def test_setup_storage_bad_override():
    """Test setup with different type than existing singleton"""
    update_singletons()
    setup_storage({
        'type': 'legacy',
        'database': {
            'type': 'pickleddb',
            'host': 'test.pkl'
        }
    })
    storage = Storage()
    assert isinstance(storage, Legacy)
    assert isinstance(storage._db, PickledDB)
    with pytest.raises(SingletonAlreadyInstantiatedError) as exc:
        setup_storage({'type': 'track'})

    assert exc.match('A singleton instance of \(type: Storage\)')
Пример #18
0
    def __init__(self, config=None):
        super(WebApi, self).__init__()
        self.config = config

        setup_storage(config.get("storage"))

        # Create our resources
        root_resource = RuntimeResource()
        experiments_resource = ExperimentsResource()
        trials_resource = TrialsResource()
        plots_resource = PlotsResource()

        # Build routes
        self.add_route("/", root_resource)
        self.add_route("/experiments", experiments_resource)
        self.add_route("/experiments/{name}",
                       experiments_resource,
                       suffix="experiment")
        self.add_route("/trials/{experiment_name}",
                       trials_resource,
                       suffix="trials_in_experiment")
        self.add_route(
            "/trials/{experiment_name}/{trial_id}",
            trials_resource,
            suffix="trial_in_experiment",
        )
        self.add_route("/plots/lpi/{experiment_name}",
                       plots_resource,
                       suffix="lpi")
        self.add_route(
            "/plots/partial_dependencies/{experiment_name}",
            plots_resource,
            suffix="partial_dependencies",
        )
        self.add_route(
            "/plots/parallel_coordinates/{experiment_name}",
            plots_resource,
            suffix="parallel_coordinates",
        )
        self.add_route("/plots/regret/{experiment_name}",
                       plots_resource,
                       suffix="regret")
Пример #19
0
def build_view_from_args(cmdargs):
    """Build an experiment view based on commandline arguments

    .. seealso::

        :func:`orion.core.io.experiment_builder.build_view` for more information on experiment view
        creation.

    """
    cmd_config = get_cmd_config(cmdargs)

    if 'name' not in cmd_config:
        raise NoNameError()

    setup_storage(cmd_config['storage'], debug=cmd_config.get('debug'))

    name = cmd_config.get('name')
    version = cmd_config.get('version')

    return build_view(name, version)
Пример #20
0
def build_from_args(cmdargs):
    """Build an experiment based on commandline arguments.

    Options provided in commandline and configuration file (--config) will overwrite system's
    default values and configuration from database if experiment already exits.
    Commandline arguments have precedence over configuration file options.

    .. seealso::

        :func:`orion.core.io.experiment_builder.build` for more information on experiment creation.

    """
    cmd_config = get_cmd_config(cmdargs)

    if 'name' not in cmd_config:
        raise NoNameError()

    setup_storage(cmd_config['storage'], debug=cmd_config.get('debug'))

    return build(**cmd_config)
Пример #21
0
def get_from_args(cmdargs, mode="r"):
    """Build an experiment view based on commandline arguments

    .. seealso::

        :func:`orion.core.io.experiment_builder.load` for more information on creation of read-only
        experiments.

    """
    cmd_config = get_cmd_config(cmdargs)

    if "name" not in cmd_config:
        raise NoNameError()

    setup_storage(cmd_config["storage"], debug=cmd_config.get("debug"))

    name = cmd_config.get("name")
    version = cmd_config.get("version")

    return load(name, version, mode=mode)
Пример #22
0
def build_experiment(
    name,
    version=None,
    space=None,
    algorithms=None,
    strategy=None,
    max_trials=None,
    max_broken=None,
    storage=None,
    branching=None,
    max_idle_time=None,
    heartbeat=None,
    working_dir=None,
    debug=False,
    executor=None,
):
    """Build an experiment to be executable

    Building the experiment can result in branching if there are any changes in the environment.
    This is required to ensure coherence between execution of trials. For an experiment
    in read/write mode without execution rights, see `get_experiment`.

    There is 2 main scenarios

    1) The experiment is new

    ``name`` and ``space`` arguments are required, otherwise ``NoConfigurationError`` will be
    raised.

    All other arguments (``algorithms``, ``strategy``, ``max_trials``, ``storage``, ``branching``
    and ``working_dir``) will be replaced by system's defaults if ommited. The system's defaults can
    also be overriden in global configuration file as described for the database in
    :ref:`Database Configuration`. We do not recommand overriding the algorithm configuration using
    system's default, but overriding the storage configuration can be very convenient if the same
    storage is used for all your experiments.

    2) The experiment exist in the database.

    We can break down this scenario in two sub-scenarios for clarity.

    2.1) Only experiment name is given.

    The configuration will be fetched from database.

    2.2) Some other arguments than the name are given.

    The configuration will be fetched from database and given arguments will override them.
    ``max_trials`` and ``max_broken`` may be overwritten in DB, but any other changes will lead to a
    branching. Instead of creating the experiment ``(name, version)``, it will create a new
    experiment ``(name, version+1)`` which will have the same configuration than ``(name, version)``
    except for the differing arguments given by user. This new experiment will have access to trials
    of ``(name, version)``, adapted according to the differences between ``version`` and
    ``version+1``.  A previous version can be accessed by specifying the ``version`` argument.

    Causes of experiment branching are:

    - Change of search space

        - New dimension

        - Different prior

        - Missing dimension

    - Change of algorithm

    - Change of strategy (Not implemented yet)

    - Change of code version (Only supported by commandline API for now)

    - Change of orion version

    Parameters
    ----------
    name: str
        Name of the experiment
    version: int, optional
        Version of the experiment. Defaults to last existing version for a given ``name``
        or 1 for new experiment.
    space: dict, optional
        Optimization space of the algorithm. Should have the form ``dict(name='<prior>(args)')``.
    algorithms: str or dict, optional
        Algorithm used for optimization.
    strategy: str or dict, optional
        Deprecated and will be remove in v0.4. It should now be set in algorithm configuration
        directly if it supports it.
    max_trials: int, optional
        Maximum number or trials before the experiment is considered done.
    max_broken: int, optional
        Number of broken trials for the experiment to be considered broken.
    storage: dict, optional
        Configuration of the storage backend.
    working_dir: str, optional
        Working directory created for the experiment inside which a unique folder will be created
        for each trial. Defaults to a temporary directory that is deleted at end of execution.
    max_idle_time: int, optional
        Deprecated and will be removed in v0.3.0.
        Use experiment.workon(reservation_timeout) instead.
    heartbeat: int, optional
        Frequency (seconds) at which the heartbeat of the trial is updated.
        If the heartbeat of a `reserved` trial is larger than twice the configured
        heartbeat, Oríon will reset the status of the trial to `interrupted`.
        This allows restoring lost trials (ex: due to killed worker).
        Defaults to ``orion.core.config.worker.max_idle_time``.
    debug: bool, optional
        If using in debug mode, the storage config is overrided with legacy:EphemeralDB.
        Defaults to False.
    branching: dict, optional
        Arguments to control the branching.

        branch_to: str, optional
            Name of the experiment to branch to. The parent experiment will be the one specified by
            ``(name, version)``, and the child will be ``(branch_to, 1)``.
        branch_from: str, optional
            Name of the experiment to branch from.
            The parent experiment will be the one specified by
            ``(branch_from, last version)``, and the child will be ``(name, 1)``.
        manual_resolution: bool, optional
            Starts the prompt to resolve manually the conflicts. Defaults to False.
        algorithm_change: bool, optional
            Whether to automatically solve the algorithm conflict (change of algo config).
            Defaults to True.
        orion_version_change: bool, optional
            Whether to automatically solve the orion version conflict.
            Defaults to True.
        code_change_type: str, optional
            How to resolve code change automatically. Must be one of 'noeffect', 'unsure' or
            'break'.  Defaults to 'break'.
        cli_change_type: str, optional
            How to resolve cli change automatically. Must be one of 'noeffect', 'unsure' or 'break'.
            Defaults to 'break'.
        config_change_type: str, optional
            How to resolve config change automatically. Must be one of 'noeffect', 'unsure' or
            'break'.  Defaults to 'break'.
    executor: `orion.executor.base.BaseExecutor`, optional
        Executor to run the experiment

    Raises
    ------
    :class:`orion.core.utils.singleton.SingletonAlreadyInstantiatedError`
        If the storage is already instantiated and given configuration is different.
        Storage is a singleton, you may only use one instance per process.
    :class:`orion.core.utils.exceptions.NoConfigurationError`
        The experiment is not in database and no space is provided by the user.
    :class:`orion.core.utils.exceptions.RaceCondition`
        There was a race condition during branching and new version cannot be infered because of
        that. Single race conditions are normally handled seemlessly. If this error gets raised, it
        means that different modifications occured during each race condition resolution. This is
        likely due to quick code change during experiment creation. Make sure your script is not
        generating files within your code repository.
    :class:`orion.core.utils.exceptions.BranchingEvent`
        The configuration is different than the corresponding one in DB and the branching cannot be
        solved automatically. This usually happens if the version=x is specified but the experiment
        ``(name, x)`` already has a child ``(name, x+1)``. If you really need to branch from version
        ``x``, give it a new name to branch to with ``branching={'branch_to': <new_name>}``.
    `NotImplementedError`
        If the algorithm or storage specified is not properly installed.

    """
    if max_idle_time:
        log.warning(
            "max_idle_time is deprecated. Use experiment.workon(reservation_timeout) instead."
        )

    setup_storage(storage=storage, debug=debug)

    try:
        experiment = experiment_builder.build(
            name,
            version=version,
            space=space,
            algorithms=algorithms,
            strategy=strategy,
            max_trials=max_trials,
            max_broken=max_broken,
            branching=branching,
            working_dir=working_dir,
        )
    except RaceCondition:
        # Try again, but if it fails again, raise. Race conditions due to version increment should
        # only occur once in a short window of time unless code version is changing at a crazy pace.
        try:
            experiment = experiment_builder.build(
                name,
                version=version,
                space=space,
                algorithms=algorithms,
                strategy=strategy,
                max_trials=max_trials,
                max_broken=max_broken,
                branching=branching,
                working_dir=working_dir,
            )
        except RaceCondition as e:
            raise RaceCondition(
                "There was a race condition during branching and new version cannot be infered "
                "because of that. Single race conditions are normally handled seemlessly. If this "
                "error gets raised, it means that different modifications occured during each race "
                "condition resolution. This is likely due to quick code change during experiment "
                "creation. Make sure your script is not generating files within your code "
                "repository."
            ) from e

    return ExperimentClient(experiment, executor, heartbeat)
Пример #23
0
"""
Script to turn the database ``examples/plotting/database.pkl`` into a clean
version ``examples/base_db.pkl`` for the examples.
"""
import pprint
import shutil

from orion.client import get_experiment
from orion.core.io.orion_cmdline_parser import OrionCmdlineParser
from orion.storage.base import get_storage, setup_storage

shutil.copy("./examples/plotting/database.pkl", "./examples/base_db.pkl")

setup_storage(
    dict(
        type="legacy",
        database=dict(type="pickleddb", host="./examples/base_db.pkl"),
    ))

filter_exps = {
    ("lateral-view-pa4", 1): "2-dim-exp",
    ("lateral-view-dualnet2", 1): "2-dim-shape-exp",
    ("lateral-view-multitask2", 1): "4-dim-cat-shape-exp",
    ("lateral-view-multitask3", 1): "3-dim-cat-shape-exp",
}

storage = get_storage()


def update_dropout(experiment_config):
    metadata = experiment_config["metadata"]
Пример #24
0
def storage(setup_pickleddb_database):
    setup_storage()
    yield get_storage()
Пример #25
0
def get_or_create_benchmark(
    name, algorithms=None, targets=None, storage=None, debug=False
):
    """
    Create or get a benchmark object.

    Parameters
    ----------
    name: str
        Name of the benchmark
    algorithms: list, optional
        Algorithms used for benchmark, each algorithm can be a string or dict.
    targets: list, optional
        Targets for the benchmark, each target will be a dict with two keys.

        assess: list
            Assessment objects
        task: list
            Task objects
    storage: dict, optional
        Configuration of the storage backend.
    debug: bool, optional
        If using in debug mode, the storage config is overrided with legacy:EphemeralDB.
        Defaults to False.

    Returns
    -------
    An instance of `orion.benchmark.Benchmark`
    """
    setup_storage(storage=storage, debug=debug)

    # fetch benchmark from db
    db_config = _fetch_benchmark(name)

    benchmark_id = None
    input_configure = None

    if db_config:
        if algorithms or targets:
            input_benchmark = Benchmark(name, algorithms, targets)
            input_configure = input_benchmark.configuration
        benchmark_id, algorithms, targets = _resolve_db_config(db_config)

    if not algorithms or not targets:
        raise NoConfigurationError(
            "Benchmark {} does not exist in DB, "
            "algorithms and targets space was not defined.".format(name)
        )

    benchmark = _create_benchmark(name, algorithms, targets)

    if input_configure and input_benchmark.configuration != benchmark.configuration:
        logger.warn(
            "Benchmark with same name is found but has different configuration, "
            "which will be used for this creation.\n{}".format(benchmark.configuration)
        )

    if benchmark_id is None:
        logger.debug("Benchmark not found in DB. Now attempting registration in DB.")
        try:
            _register_benchmark(benchmark)
            logger.debug("Benchmark successfully registered in DB.")
        except DuplicateKeyError:
            logger.info(
                "Benchmark registration failed. This is likely due to a race condition. "
                "Now rolling back and re-attempting building it."
            )
            get_or_create_benchmark(name, algorithms, targets, storage, debug)

    return benchmark