Ejemplo n.º 1
0
def test_update_trial_second_write() -> None:

    storage = create_test_storage()
    study_id = storage.create_new_study()
    template = FrozenTrial(
        number=1,
        state=TrialState.RUNNING,
        value=0.1,
        datetime_start=None,
        datetime_complete=None,
        params={
            "paramA": 0.1,
            "paramB": 1.1
        },
        distributions={
            "paramA": UniformDistribution(0, 1),
            "paramB": UniformDistribution(0, 2)
        },
        user_attrs={
            "userA": 2,
            "userB": 3
        },
        system_attrs={
            "sysA": 4,
            "sysB": 5
        },
        intermediate_values={
            3: 1.2,
            5: 9.2
        },
        trial_id=1,
    )
    trial_id = storage.create_new_trial(study_id, template)
    trial_before_update = storage.get_trial(trial_id)
    storage._update_trial(
        trial_id,
        state=None,
        value=1.1,
        intermediate_values={
            3: 2.3,
            7: 3.3
        },
        params={
            "paramA": 0.2,
            "paramC": 2.3
        },
        distributions_={
            "paramA": UniformDistribution(0, 1),
            "paramC": UniformDistribution(0, 4)
        },
        user_attrs={
            "userA": 1,
            "userC": "attr"
        },
        system_attrs={
            "sysA": 6,
            "sysC": 8
        },
    )
    trial_after_update = storage.get_trial(trial_id)
    expected_attrs = {
        "_trial_id": trial_before_update._trial_id,
        "number": trial_before_update.number,
        "state": TrialState.RUNNING,
        "value": 1.1,
        "params": {
            "paramA": 0.2,
            "paramB": 1.1,
            "paramC": 2.3
        },
        "intermediate_values": {
            3: 2.3,
            5: 9.2,
            7: 3.3
        },
        "_distributions": {
            "paramA": UniformDistribution(0, 1),
            "paramB": UniformDistribution(0, 2),
            "paramC": UniformDistribution(0, 4),
        },
        "user_attrs": {
            "userA": 1,
            "userB": 3,
            "userC": "attr"
        },
        "system_attrs": {
            "sysA": 6,
            "sysB": 5,
            "sysC": 8
        },
    }
    for key, value in expected_attrs.items():
        assert getattr(trial_after_update, key) == value
Ejemplo n.º 2
0

@pytest.mark.parametrize(
    "fields_to_modify, kwargs",
    [
        (
            {"state": TrialState.COMPLETE, "datetime_complete": None},
            {"state": TrialState.COMPLETE},
        ),
        ({"value": 1.1}, {"value": 1.1}),
        ({"intermediate_values": {1: 2.3, 3: 2.5}}, {"intermediate_values": {1: 2.3, 3: 2.5}}),
        (
            {
                "params": {"paramA": 3, "paramB": "bar"},
                "_distributions": {
                    "paramA": UniformDistribution(0, 3),
                    "paramB": CategoricalDistribution(("foo", "bar")),
                },
            },
            {
                "params": {
                    "paramA": UniformDistribution(0, 3).to_internal_repr(3),
                    "paramB": CategoricalDistribution(["foo", "bar"]).to_internal_repr("bar"),
                },
                "distributions_": {
                    "paramA": UniformDistribution(0, 3),
                    "paramB": CategoricalDistribution(["foo", "bar"]),
                },
            },
        ),
        (
Ejemplo n.º 3
0
def test_create_new_trial_with_template_trial(storage_mode: str) -> None:

    start_time = datetime.now()
    complete_time = datetime.now()
    template_trial = FrozenTrial(
        state=TrialState.COMPLETE,
        value=10000,
        datetime_start=start_time,
        datetime_complete=complete_time,
        params={"x": 0.5},
        distributions={"x": UniformDistribution(0, 1)},
        user_attrs={"foo": "bar"},
        system_attrs={"baz": 123},
        intermediate_values={
            1: 10,
            2: 100,
            3: 1000
        },
        number=55,  # This entry is ignored.
        trial_id=-1,  # dummy value (unused).
    )

    def _check_trials(trials: List[FrozenTrial], idx: int,
                      trial_id: int) -> None:
        assert len(trials) == idx + 1
        assert len({t._trial_id for t in trials}) == idx + 1
        assert trial_id in {t._trial_id for t in trials}
        assert {t.number for t in trials} == set(range(idx + 1))
        assert all(t.state == template_trial.state for t in trials)
        assert all(t.params == template_trial.params for t in trials)
        assert all(t.distributions == template_trial.distributions
                   for t in trials)
        assert all(t.intermediate_values == template_trial.intermediate_values
                   for t in trials)
        assert all(t.user_attrs == template_trial.user_attrs for t in trials)
        assert all(t.system_attrs == template_trial.system_attrs
                   for t in trials)
        assert all(t.datetime_start == template_trial.datetime_start
                   for t in trials)
        assert all(t.datetime_complete == template_trial.datetime_complete
                   for t in trials)
        assert all(t.value == template_trial.value for t in trials)

    with StorageSupplier(storage_mode) as storage:

        study_id = storage.create_new_study()

        n_trial_in_study = 3
        for i in range(n_trial_in_study):
            trial_id = storage.create_new_trial(study_id,
                                                template_trial=template_trial)
            trials = storage.get_all_trials(study_id)
            _check_trials(trials, i, trial_id)

        # Create trial in non-existent study.
        with pytest.raises(KeyError):
            storage.create_new_trial(study_id + 1)

        study_id2 = storage.create_new_study()
        for i in range(n_trial_in_study):
            storage.create_new_trial(study_id2, template_trial=template_trial)
            trials = storage.get_all_trials(study_id2)
            assert {t.number for t in trials} == set(range(i + 1))

        trials = storage.get_all_trials(study_id) + storage.get_all_trials(
            study_id2)
        # Check trial_ids are unique across studies.
        assert len({t._trial_id for t in trials}) == 2 * n_trial_in_study
Ejemplo n.º 4
0
def test_set_trial_param(storage_mode: str) -> None:

    with StorageSupplier(storage_mode) as storage:

        # Setup test across multiple studies and trials.
        study_id = storage.create_new_study()
        trial_id_1 = storage.create_new_trial(study_id)
        trial_id_2 = storage.create_new_trial(study_id)
        trial_id_3 = storage.create_new_trial(storage.create_new_study())

        # Setup distributions.
        distribution_x = UniformDistribution(low=1.0, high=2.0)
        distribution_y_1 = CategoricalDistribution(choices=("Shibuya", "Ebisu",
                                                            "Meguro"))
        distribution_y_2 = CategoricalDistribution(choices=("Shibuya",
                                                            "Shinsen"))
        distribution_z = LogUniformDistribution(low=1.0, high=100.0)

        # Set new params.
        storage.set_trial_param(trial_id_1, "x", 0.5, distribution_x)
        storage.set_trial_param(trial_id_1, "y", 2, distribution_y_1)
        assert storage.get_trial_param(trial_id_1, "x") == 0.5
        assert storage.get_trial_param(trial_id_1, "y") == 2
        # Check set_param breaks neither get_trial nor get_trial_params.
        assert storage.get_trial(trial_id_1).params == {
            "x": 0.5,
            "y": "Meguro"
        }
        assert storage.get_trial_params(trial_id_1) == {
            "x": 0.5,
            "y": "Meguro"
        }
        # Duplicated registration should overwrite.
        storage.set_trial_param(trial_id_1, "x", 0.6, distribution_x)
        assert storage.get_trial_param(trial_id_1, "x") == 0.6
        assert storage.get_trial(trial_id_1).params == {
            "x": 0.6,
            "y": "Meguro"
        }
        assert storage.get_trial_params(trial_id_1) == {
            "x": 0.6,
            "y": "Meguro"
        }

        # Set params to another trial.
        storage.set_trial_param(trial_id_2, "x", 0.3, distribution_x)
        storage.set_trial_param(trial_id_2, "z", 0.1, distribution_z)
        assert storage.get_trial_param(trial_id_2, "x") == 0.3
        assert storage.get_trial_param(trial_id_2, "z") == 0.1
        assert storage.get_trial(trial_id_2).params == {"x": 0.3, "z": 0.1}
        assert storage.get_trial_params(trial_id_2) == {"x": 0.3, "z": 0.1}

        # Set params with distributions that do not match previous ones.
        with pytest.raises(ValueError):
            storage.set_trial_param(trial_id_2, "x", 0.5, distribution_z)
        with pytest.raises(ValueError):
            storage.set_trial_param(trial_id_2, "y", 0.5, distribution_z)
        # Choices in CategoricalDistribution should match including its order.
        with pytest.raises(ValueError):
            storage.set_trial_param(
                trial_id_2, "y", 2,
                CategoricalDistribution(choices=("Meguro", "Shibuya",
                                                 "Ebisu")))

        storage.set_trial_state(trial_id_2, TrialState.COMPLETE)
        # Cannot assign params to finished trial.
        with pytest.raises(RuntimeError):
            storage.set_trial_param(trial_id_2, "y", 2, distribution_y_1)
        # Check the previous call does not change the params.
        with pytest.raises(KeyError):
            storage.get_trial_param(trial_id_2, "y")
        # State should be checked prior to distribution compatibility.
        with pytest.raises(RuntimeError):
            storage.set_trial_param(trial_id_2, "y", 0.4, distribution_z)

        # Set params of trials in a different study.
        storage.set_trial_param(trial_id_3, "y", 1, distribution_y_2)
        assert storage.get_trial_param(trial_id_3, "y") == 1
        assert storage.get_trial(trial_id_3).params == {"y": "Shinsen"}
        assert storage.get_trial_params(trial_id_3) == {"y": "Shinsen"}

        # Set params of non-existent trial.
        non_existent_trial_id = max([trial_id_1, trial_id_2, trial_id_3]) + 1
        with pytest.raises(KeyError):
            storage.set_trial_param(non_existent_trial_id, "x", 0.1,
                                    distribution_x)
Ejemplo n.º 5
0
    from typing import Any  # NOQA
    from typing import Callable  # NOQA
    from typing import Dict  # NOQA
    from typing import Optional  # NOQA

EXAMPLE_ATTRS = {
    "dataset": "MNIST",
    "none": None,
    "json_serializable": {
        "baseline_score": 0.001,
        "tags": ["image", "classification"]
    },
}

EXAMPLE_DISTRIBUTIONS = {
    "x": UniformDistribution(low=1.0, high=2.0),
    "y": CategoricalDistribution(choices=("Otemachi", "Tokyo", "Ginza")),
}  # type: Dict[str, BaseDistribution]

EXAMPLE_TRIALS = [
    FrozenTrial(
        number=0,  # dummy
        value=1.0,
        state=TrialState.COMPLETE,
        user_attrs={},
        system_attrs={},
        params={
            "x": 0.5,
            "y": "Ginza"
        },
        distributions=EXAMPLE_DISTRIBUTIONS,
Ejemplo n.º 6
0
def prepare_study_with_trials(no_trials: bool = False,
                              less_than_two: bool = False,
                              with_c_d: bool = True) -> Study:
    """Prepare a study for tests.

    Args:
        no_trials: If ``False``, create a study with no trials.
        less_than_two: If ``True``, create a study with two/four hyperparameters where
            'param_a' (and 'param_c') appear(s) only once while 'param_b' (and 'param_b')
            appear(s) twice in `study.trials`.
        with_c_d: If ``True``, the study has four hyperparameters named 'param_a',
            'param_b', 'param_c', and 'param_d'. Otherwise, there are only two
            hyperparameters ('param_a' and 'param_b').

    Returns:
        :class:`~optuna.study.Study`

    """

    study = create_study()
    if no_trials:
        return study
    study.add_trial(
        create_trial(
            value=0.0,
            params={
                "param_a": 1.0,
                "param_b": 2.0,
                "param_c": 3.0,
                "param_d": 4.0
            } if with_c_d else {
                "param_a": 1.0,
                "param_b": 2.0
            },
            distributions={
                "param_a": UniformDistribution(0.0, 3.0),
                "param_b": UniformDistribution(0.0, 3.0),
                "param_c": UniformDistribution(2.0, 5.0),
                "param_d": UniformDistribution(2.0, 5.0),
            } if with_c_d else {
                "param_a": UniformDistribution(0.0, 3.0),
                "param_b": UniformDistribution(0.0, 3.0),
            },
        ))
    study.add_trial(
        create_trial(
            value=2.0,
            params={
                "param_b": 0.0,
                "param_d": 4.0
            } if with_c_d else {"param_b": 0.0},
            distributions={
                "param_b": UniformDistribution(0.0, 3.0),
                "param_d": UniformDistribution(2.0, 5.0),
            } if with_c_d else {"param_b": UniformDistribution(0.0, 3.0)},
        ))
    if less_than_two:
        return study

    study.add_trial(
        create_trial(
            value=1.0,
            params={
                "param_a": 2.5,
                "param_b": 1.0,
                "param_c": 4.5,
                "param_d": 2.0
            } if with_c_d else {
                "param_a": 2.5,
                "param_b": 1.0
            },
            distributions={
                "param_a": UniformDistribution(0.0, 3.0),
                "param_b": UniformDistribution(0.0, 3.0),
                "param_c": UniformDistribution(2.0, 5.0),
                "param_d": UniformDistribution(2.0, 5.0),
            } if with_c_d else {
                "param_a": UniformDistribution(0.0, 3.0),
                "param_b": UniformDistribution(0.0, 3.0),
            },
        ))
    return study
Ejemplo n.º 7
0
from optuna.distributions import BaseDistribution
from optuna.distributions import CategoricalDistribution
from optuna.distributions import DiscreteUniformDistribution
from optuna.distributions import IntLogUniformDistribution
from optuna.distributions import IntUniformDistribution
from optuna.distributions import LogUniformDistribution
from optuna.distributions import UniformDistribution


@pytest.mark.parametrize(
    "param,distribution",
    [
        (0, IntUniformDistribution(0, 3)),
        (1, IntLogUniformDistribution(1, 10)),
        (2, IntUniformDistribution(0, 10, step=2)),
        (0.0, UniformDistribution(0, 3)),
        (1.0, LogUniformDistribution(1, 10)),
        (0.2, DiscreteUniformDistribution(0, 1, q=0.2)),
        ("foo", CategoricalDistribution(["foo"])),
        ("bar", CategoricalDistribution(["foo", "bar", "baz"])),
    ],
)
def test_search_space_transform_shapes_dtypes(param: Any, distribution: BaseDistribution) -> None:
    trans = _SearchSpaceTransform({"x0": distribution})
    trans_params = trans.transform({"x0": param})

    if isinstance(distribution, CategoricalDistribution):
        expected_bounds_shape = (len(distribution.choices), 2)
        expected_params_shape = (len(distribution.choices),)
    else:
        expected_bounds_shape = (1, 2)
Ejemplo n.º 8
0
    assert sampler._rng.bytes(10) == restored_sampler._rng.bytes(10)


def test_random_sampler_reseed_rng() -> None:
    sampler = optuna.samplers.RandomSampler()
    original_seed = sampler._rng.seed

    sampler.reseed_rng()
    assert original_seed != sampler._rng.seed


@parametrize_sampler
@pytest.mark.parametrize(
    "distribution",
    [
        UniformDistribution(-1.0, 1.0),
        UniformDistribution(0.0, 1.0),
        UniformDistribution(-1.0, 0.0),
    ],
)
def test_uniform(sampler_class, distribution):
    # type: (typing.Callable[[], BaseSampler], UniformDistribution) -> None

    study = optuna.study.create_study(sampler=sampler_class())
    points = np.array([
        study.sampler.sample_independent(study, _create_new_trial(study), "x",
                                         distribution) for _ in range(100)
    ])
    assert np.all(points >= distribution.low)
    assert np.all(points < distribution.high)
    assert not isinstance(
Ejemplo n.º 9
0
def test_group_decomposed_search_space() -> None:
    search_space = _GroupDecomposedSearchSpace()
    study = create_study()

    # No trial.
    assert search_space.calculate(study).search_spaces == []

    # A single parameter.
    study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=1)
    assert search_space.calculate(study).search_spaces == [{
        "x":
        IntUniformDistribution(low=0, high=10)
    }]

    # Disjoint parameters.
    study.optimize(
        lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3),
        n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
    ]

    # Parameters which include one of search spaces in the group.
    study.optimize(
        lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3) + t.
        suggest_float("u", 1e-2, 1e2, log=True) + bool(
            t.suggest_categorical("v", ["A", "B", "C"])),
        n_trials=1,
    )
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "z": UniformDistribution(low=-3, high=3),
            "y": IntUniformDistribution(low=0, high=10),
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2),
            "v": CategoricalDistribution(choices=["A", "B", "C"]),
        },
    ]

    # A parameter which is included by one of search spaces in thew group.
    study.optimize(lambda t: t.suggest_float("u", 1e-2, 1e2, log=True),
                   n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
    ]

    # Parameters whose intersection with one of search spaces in the group is not empty.
    study.optimize(lambda t: t.suggest_int("y", 0, 10) + t.suggest_int(
        "w", 2, 8, log=True),
                   n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10)
        },
        {
            "z": UniformDistribution(low=-3, high=3)
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntLogUniformDistribution(low=2, high=8)
        },
    ]

    search_space = _GroupDecomposedSearchSpace()
    study = create_study()

    # Failed or pruned trials are not considered in the calculation of
    # an intersection search space.
    def objective(trial: Trial, exception: Exception) -> float:

        trial.suggest_float("a", 0, 1)
        raise exception

    study.optimize(lambda t: objective(t, RuntimeError()),
                   n_trials=1,
                   catch=(RuntimeError, ))
    study.optimize(lambda t: objective(t, TrialPruned()), n_trials=1)
    assert search_space.calculate(study).search_spaces == []

    # If two parameters have the same name but different distributions,
    # the first one takes priority.
    study.optimize(lambda t: t.suggest_float("a", -1, 1), n_trials=1)
    study.optimize(lambda t: t.suggest_float("a", 0, 1), n_trials=1)
    assert search_space.calculate(study).search_spaces == [{
        "a":
        UniformDistribution(low=-1, high=1)
    }]
Ejemplo n.º 10
0
    from optuna.distributions import BaseDistribution  # NOQA
    from optuna.structs import FrozenTrial  # NOQA
    from optuna.study import Study  # NOQA
    from optuna.trial import T  # NOQA
    from optuna.trial import Trial  # NOQA

parametrize_sampler = pytest.mark.parametrize('sampler_class', [
    optuna.samplers.RandomSampler,
    lambda: optuna.samplers.TPESampler(n_startup_trials=0), lambda: optuna.
    integration.SkoptSampler(skopt_kwargs={'n_initial_points': 1})
])


@parametrize_sampler
@pytest.mark.parametrize('distribution', [
    UniformDistribution(-1., 1.),
    UniformDistribution(0., 1.),
    UniformDistribution(-1., 0.)
])
def test_uniform(sampler_class, distribution):
    # type: (typing.Callable[[], BaseSampler], UniformDistribution) -> None

    study = optuna.study.create_study(sampler=sampler_class())
    in_trial_study = InTrialStudy(study)
    points = np.array([
        study.sampler.sample_independent(in_trial_study,
                                         _create_new_trial(study), 'x',
                                         distribution) for _ in range(100)
    ])
    assert np.all(points >= distribution.low)
    assert np.all(points < distribution.high)
Ejemplo n.º 11
0
def test_search_space_group() -> None:
    search_space_group = _SearchSpaceGroup()

    # No search space.
    assert search_space_group.search_spaces == []

    # No distributions.
    search_space_group.add_distributions({})
    assert search_space_group.search_spaces == []

    # Add a single distribution.
    search_space_group.add_distributions(
        {"x": IntUniformDistribution(low=0, high=10)})
    assert search_space_group.search_spaces == [{
        "x":
        IntUniformDistribution(low=0, high=10)
    }]

    # Add a same single distribution.
    search_space_group.add_distributions(
        {"x": IntUniformDistribution(low=0, high=10)})
    assert search_space_group.search_spaces == [{
        "x":
        IntUniformDistribution(low=0, high=10)
    }]

    # Add disjoint distributions.
    search_space_group.add_distributions({
        "y":
        IntUniformDistribution(low=0, high=10),
        "z":
        UniformDistribution(low=-3, high=3),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
    ]

    # Add distributions, which include one of search spaces in the group.
    search_space_group.add_distributions({
        "y":
        IntUniformDistribution(low=0, high=10),
        "z":
        UniformDistribution(low=-3, high=3),
        "u":
        LogUniformDistribution(low=1e-2, high=1e2),
        "v":
        CategoricalDistribution(choices=["A", "B", "C"]),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2),
            "v": CategoricalDistribution(choices=["A", "B", "C"]),
        },
    ]

    # Add a distribution, which is included by one of search spaces in the group.
    search_space_group.add_distributions(
        {"u": LogUniformDistribution(low=1e-2, high=1e2)})
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10),
            "z": UniformDistribution(low=-3, high=3),
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
    ]

    # Add distributions whose intersection with one of search spaces in the group is not empty.
    search_space_group.add_distributions({
        "y":
        IntUniformDistribution(low=0, high=10),
        "w":
        IntLogUniformDistribution(low=2, high=8),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10)
        },
        {
            "z": UniformDistribution(low=-3, high=3)
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntLogUniformDistribution(low=2, high=8)
        },
    ]

    # Add distributions which include some of search spaces in the group.
    search_space_group.add_distributions({
        "y":
        IntUniformDistribution(low=0, high=10),
        "w":
        IntLogUniformDistribution(low=2, high=8),
        "t":
        UniformDistribution(low=10, high=100),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntUniformDistribution(low=0, high=10)
        },
        {
            "y": IntUniformDistribution(low=0, high=10)
        },
        {
            "z": UniformDistribution(low=-3, high=3)
        },
        {
            "u": LogUniformDistribution(low=1e-2, high=1e2)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntLogUniformDistribution(low=2, high=8)
        },
        {
            "t": UniformDistribution(low=10, high=100)
        },
    ]
Ejemplo n.º 12
0
def test_get_contour_plot():
    # type: () -> None

    # Test with no trial.
    study = create_study()
    figure = _get_contour_plot(study)
    assert len(figure.data) == 0

    study._append_trial(value=0.0,
                        params={
                            'param_a': 1.0,
                            'param_b': 2.0,
                        },
                        distributions={
                            'param_a': UniformDistribution(0.0, 3.0),
                            'param_b': UniformDistribution(0.0, 3.0),
                        })
    study._append_trial(value=2.0,
                        params={
                            'param_b': 0.0,
                        },
                        distributions={
                            'param_b': UniformDistribution(0.0, 3.0),
                        })
    study._append_trial(value=1.0,
                        params={
                            'param_a': 2.5,
                            'param_b': 1.0,
                        },
                        distributions={
                            'param_a': UniformDistribution(0.0, 3.0),
                            'param_b': UniformDistribution(0.0, 3.0),
                        })

    # Test with a trial.
    figure = _get_contour_plot(study)
    assert figure.data[0]['x'] == (1.0, 2.5)
    assert figure.data[0]['y'] == (0.0, 1.0, 2.0)
    assert figure.data[1]['x'] == (1.0, 2.5)
    assert figure.data[1]['y'] == (2.0, 1.0)
    assert figure.layout['xaxis']['range'] == (1.0, 2.5)
    assert figure.layout['yaxis']['range'] == (0.0, 2.0)

    # Test ValueError due to wrong params.
    with pytest.raises(ValueError):
        _get_contour_plot(study, ['optuna', 'Optuna'])

    # Test with a trial to select parameter.
    figure = _get_contour_plot(study, params=['param_a', 'param_b'])
    assert figure.data[0]['x'] == (1.0, 2.5)
    assert figure.data[0]['y'] == (0.0, 1.0, 2.0)
    assert figure.data[1]['x'] == (1.0, 2.5)
    assert figure.data[1]['y'] == (2.0, 1.0)
    assert figure.layout['xaxis']['range'] == (1.0, 2.5)
    assert figure.layout['yaxis']['range'] == (0.0, 2.0)

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = _get_contour_plot(study)
    assert len(figure.data) == 0
Ejemplo n.º 13
0
def test_get_parallel_coordinate_plot():
    # type: () -> None

    # Test with no trial.
    study = create_study()
    figure = _get_parallel_coordinate_plot(study)
    assert len(figure.data) == 0

    study._append_trial(value=0.0,
                        params={
                            'param_a': 1.0,
                            'param_b': 2.0,
                        },
                        distributions={
                            'param_a': UniformDistribution(0.0, 3.0),
                            'param_b': UniformDistribution(0.0, 3.0),
                        })
    study._append_trial(value=2.0,
                        params={
                            'param_b': 0.0,
                        },
                        distributions={
                            'param_b': UniformDistribution(0.0, 3.0),
                        })
    study._append_trial(value=1.0,
                        params={
                            'param_a': 2.5,
                            'param_b': 1.0,
                        },
                        distributions={
                            'param_a': UniformDistribution(0.0, 3.0),
                            'param_b': UniformDistribution(0.0, 3.0),
                        })

    # Test with a trial.
    figure = _get_parallel_coordinate_plot(study)
    assert len(figure.data[0]['dimensions']) == 3
    assert figure.data[0]['dimensions'][0]['label'] == 'Objective Value'
    assert figure.data[0]['dimensions'][0]['range'] == (0.0, 2.0)
    assert figure.data[0]['dimensions'][0]['values'] == (0.0, 2.0, 1.0)
    assert figure.data[0]['dimensions'][1]['label'] == 'param_a'
    assert figure.data[0]['dimensions'][1]['range'] == (1.0, 2.5)
    assert figure.data[0]['dimensions'][1]['values'] == (1.0, 2.5)
    assert figure.data[0]['dimensions'][2]['label'] == 'param_b'
    assert figure.data[0]['dimensions'][2]['range'] == (0.0, 2.0)
    assert figure.data[0]['dimensions'][2]['values'] == (2.0, 0.0, 1.0)

    # Test with a trial to select parameter.
    figure = _get_parallel_coordinate_plot(study, params=['param_a'])
    assert len(figure.data[0]['dimensions']) == 2
    assert figure.data[0]['dimensions'][0]['label'] == 'Objective Value'
    assert figure.data[0]['dimensions'][0]['range'] == (0.0, 2.0)
    assert figure.data[0]['dimensions'][0]['values'] == (0.0, 2.0, 1.0)
    assert figure.data[0]['dimensions'][1]['label'] == 'param_a'
    assert figure.data[0]['dimensions'][1]['range'] == (1.0, 2.5)
    assert figure.data[0]['dimensions'][1]['values'] == (1.0, 2.5)

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = _get_parallel_coordinate_plot(study)
    assert len(figure.data) == 0
Ejemplo n.º 14
0
 def to_optuna(self):
     """returns an equivalent optuna space"""
     if self.prior != 'log':
         return UniformDistribution(low=self.low, high=self.high)
     else:
         return LogUniformDistribution(low=self.low, high=self.high)
Ejemplo n.º 15
0
    def suggest_uniform(self, name: str, low: float, high: float) -> float:

        return self._suggest(name, UniformDistribution(low=low, high=high))
Ejemplo n.º 16
0
    def suggest_uniform(self, name, low, high):
        # type: (str, float, float) -> float

        return self._suggest(name, UniformDistribution(low=low, high=high))
Ejemplo n.º 17
0
def test_frozen_trial_validate() -> None:

    # Valid.
    valid_trial = _create_frozen_trial()
    valid_trial._validate()

    # Invalid: `datetime_start` is not set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.datetime_start = None
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: `state` is `RUNNING` and `datetime_complete` is set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.state = TrialState.RUNNING
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: `state` is not `RUNNING` and `datetime_complete` is not set.
    for state in [TrialState.COMPLETE, TrialState.PRUNED, TrialState.FAIL]:
        invalid_trial = copy.copy(valid_trial)
        invalid_trial.state = state
        invalid_trial.datetime_complete = None
        with pytest.raises(ValueError):
            invalid_trial._validate()

    # Invalid: `state` is `COMPLETE` and `value` is not set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.value = None
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: Inconsistent `params` and `distributions`
    inconsistent_pairs = [
        # `params` has an extra element.
        ({
            "x": 0.1,
            "y": 0.5
        }, {
            "x": UniformDistribution(0, 1)
        }),
        # `distributions` has an extra element.
        ({
            "x": 0.1
        }, {
            "x": UniformDistribution(0, 1),
            "y": LogUniformDistribution(0.1, 1.0)
        }),
        # The value of `x` isn't contained in the distribution.
        ({
            "x": -0.5
        }, {
            "x": UniformDistribution(0, 1)
        }),
    ]  # type: List[Tuple[Dict[str, Any], Dict[str, BaseDistribution]]]

    for params, distributions in inconsistent_pairs:
        invalid_trial = copy.copy(valid_trial)
        invalid_trial.params = params
        invalid_trial.distributions = distributions
        with pytest.raises(ValueError):
            invalid_trial._validate()
Ejemplo n.º 18
0
def test_intersection_search_space() -> None:
    search_space = optuna.samplers.IntersectionSearchSpace()
    study = optuna.create_study()

    # No trial.
    assert search_space.calculate(study) == {}
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # First trial.
    study.optimize(
        lambda t: t.suggest_uniform("y", -3, 3) + t.suggest_int("x", 0, 10),
        n_trials=1)
    assert search_space.calculate(study) == {
        "x": IntUniformDistribution(low=0, high=10),
        "y": UniformDistribution(low=-3, high=3),
    }
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # Returning sorted `OrderedDict` instead of `dict`.
    assert search_space.calculate(study, ordered_dict=True) == OrderedDict([
        ("x", IntUniformDistribution(low=0, high=10)),
        ("y", UniformDistribution(low=-3, high=3)),
    ])
    assert search_space.calculate(
        study, ordered_dict=True) == optuna.samplers.intersection_search_space(
            study, ordered_dict=True)

    # Second trial (only 'y' parameter is suggested in this trial).
    study.optimize(lambda t: t.suggest_uniform("y", -3, 3), n_trials=1)
    assert search_space.calculate(study) == {
        "y": UniformDistribution(low=-3, high=3)
    }
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # Failed or pruned trials are not considered in the calculation of
    # an intersection search space.
    def objective(trial, exception):
        # type: (optuna.trial.Trial, Exception) -> float

        trial.suggest_uniform("z", 0, 1)
        raise exception

    study.optimize(lambda t: objective(t, RuntimeError()),
                   n_trials=1,
                   catch=(RuntimeError, ))
    study.optimize(lambda t: objective(t, optuna.TrialPruned()), n_trials=1)
    assert search_space.calculate(study) == {
        "y": UniformDistribution(low=-3, high=3)
    }
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # If two parameters have the same name but different distributions,
    # those are regarded as different parameters.
    study.optimize(lambda t: t.suggest_uniform("y", -1, 1), n_trials=1)
    assert search_space.calculate(study) == {}
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)

    # The search space remains empty once it is empty.
    study.optimize(
        lambda t: t.suggest_uniform("y", -3, 3) + t.suggest_int("x", 0, 10),
        n_trials=1)
    assert search_space.calculate(study) == {}
    assert search_space.calculate(
        study) == optuna.samplers.intersection_search_space(study)
Ejemplo n.º 19
0
     "high": 5
 }, IntUniformDistribution(0, 5)),
 (
     {
         "type": "int",
         "low": 1,
         "high": 100,
         "log": True
     },
     IntLogUniformDistribution(1, 100),
 ),
 ({
     "type": "float",
     "low": 0,
     "high": 1
 }, UniformDistribution(0, 1)),
 (
     {
         "type": "float",
         "low": 0,
         "high": 10,
         "step": 2
     },
     DiscreteUniformDistribution(0, 10, 2),
 ),
 (
     {
         "type": "float",
         "low": 1,
         "high": 100,
         "log": True
Ejemplo n.º 20
0
    from typing import Optional  # NOQA
    from typing import Tuple  # NOQA

# TODO(Yanase): Remove _number from system_attrs after adding TrialModel.number.
EXAMPLE_ATTRS = {
    'dataset': 'MNIST',
    'none': None,
    'json_serializable': {
        'baseline_score': 0.001,
        'tags': ['image', 'classification']
    },
    '_number': 0,
}

EXAMPLE_DISTRIBUTIONS = {
    'x': UniformDistribution(low=1., high=2.),
    'y': CategoricalDistribution(choices=('Otemachi', 'Tokyo', 'Ginza'))
}  # type: Dict[str, BaseDistribution]

# TODO(Yanase): Remove _number from system_attrs after adding TrialModel.number.
EXAMPLE_TRIALS = [
    FrozenTrial(
        number=0,  # dummy
        value=1.,
        state=TrialState.COMPLETE,
        user_attrs={},
        system_attrs={'_number': 0},
        params={
            'x': 0.5,
            'y': 'Ginza'
        },
Ejemplo n.º 21
0
def prepare_study_with_trials(no_trials=False,
                              less_than_two=False,
                              with_c_d=True):
    # type: (bool, bool, bool) -> Study
    """Prepare a study for tests.

    Args:
        no_trials: If ``False``, create a study with no trials.
        less_than_two: If ``True``, create a study with two/four hyperparameters where
            'param_a' (and 'param_c') appear(s) only once while 'param_b' (and 'param_b')
            appear(s) twice in `study.trials`.
        with_c_d: If ``True``, the study has four hyperparameters named 'param_a',
            'param_b', 'param_c', and 'param_d'. Otherwise, there are only two
            hyperparameters ('param_a' and 'param_b').

    Returns:
        :class:`~optuna.study.Study`

    """

    study = create_study()
    if no_trials:
        return study
    study._append_trial(value=0.0,
                        params={
                            'param_a': 1.0,
                            'param_b': 2.0,
                            'param_c': 3.0,
                            'param_d': 4.0,
                        } if with_c_d else {
                            'param_a': 1.0,
                            'param_b': 2.0,
                        },
                        distributions={
                            'param_a': UniformDistribution(0.0, 3.0),
                            'param_b': UniformDistribution(0.0, 3.0),
                            'param_c': UniformDistribution(2.0, 5.0),
                            'param_d': UniformDistribution(2.0, 5.0),
                        } if with_c_d else {
                            'param_a': UniformDistribution(0.0, 3.0),
                            'param_b': UniformDistribution(0.0, 3.0),
                        })
    study._append_trial(value=2.0,
                        params={
                            'param_b': 0.0,
                            'param_d': 4.0,
                        } if with_c_d else {
                            'param_b': 0.0,
                        },
                        distributions={
                            'param_b': UniformDistribution(0.0, 3.0),
                            'param_d': UniformDistribution(2.0, 5.0),
                        } if with_c_d else {
                            'param_b': UniformDistribution(0.0, 3.0),
                        })
    if less_than_two:
        return study

    study._append_trial(value=1.0,
                        params={
                            'param_a': 2.5,
                            'param_b': 1.0,
                            'param_c': 4.5,
                            'param_d': 2.0,
                        } if with_c_d else {
                            'param_a': 2.5,
                            'param_b': 1.0,
                        },
                        distributions={
                            'param_a': UniformDistribution(0.0, 3.0),
                            'param_b': UniformDistribution(0.0, 3.0),
                            'param_c': UniformDistribution(2.0, 5.0),
                            'param_d': UniformDistribution(2.0, 5.0),
                        } if with_c_d else {
                            'param_a': UniformDistribution(0.0, 3.0),
                            'param_b': UniformDistribution(0.0, 3.0),
                        })
    return study