Beispiel #1
0
def test_optuna_search_convert_deprecated_distribution() -> None:

    param_dist = {
        "ud": distributions.UniformDistribution(low=0, high=10),
        "dud": distributions.DiscreteUniformDistribution(low=0, high=10, q=2),
        "lud": distributions.LogUniformDistribution(low=1, high=10),
        "id": distributions.IntUniformDistribution(low=0, high=10),
        "idd": distributions.IntUniformDistribution(low=0, high=10, step=2),
        "ild": distributions.IntLogUniformDistribution(low=1, high=10),
    }

    expected_param_dist = {
        "ud": distributions.FloatDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=None),
        "dud": distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=2),
        "lud": distributions.FloatDistribution(low=1,
                                               high=10,
                                               log=True,
                                               step=None),
        "id": distributions.IntDistribution(low=0, high=10, log=False, step=1),
        "idd": distributions.IntDistribution(low=0, high=10, log=False,
                                             step=2),
        "ild": distributions.IntDistribution(low=1, high=10, log=True, step=1),
    }

    optuna_search = integration.OptunaSearchCV(
        KernelDensity(),
        param_dist,
    )

    assert optuna_search.param_distributions == expected_param_dist

    # It confirms that ask doesn't convert non-deprecated distributions.
    optuna_search = integration.OptunaSearchCV(
        KernelDensity(),
        expected_param_dist,
    )

    assert optuna_search.param_distributions == expected_param_dist
Beispiel #2
0
def test_check_distribution_compatibility():
    # type: () -> None

    # test the same distribution
    for key in EXAMPLE_JSONS.keys():
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS[key], EXAMPLE_DISTRIBUTIONS[key])

    # test different distribution classes
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["u"], EXAMPLE_DISTRIBUTIONS["l"]),
    )

    # test dynamic value range (CategoricalDistribution)
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["c2"],
            distributions.CategoricalDistribution(choices=("Roppongi",
                                                           "Akasaka")),
        ),
    )

    # test dynamic value range (except CategoricalDistribution)
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["u"],
        distributions.UniformDistribution(low=-3.0, high=-2.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["l"],
        distributions.LogUniformDistribution(low=0.1, high=1.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["du"],
        distributions.DiscreteUniformDistribution(low=-1.0, high=11.0, q=3.0),
    )
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["iu"],
        distributions.IntUniformDistribution(low=-1, high=1))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["ilu"],
        distributions.IntLogUniformDistribution(low=1, high=13),
    )
Beispiel #3
0
def test_empty_distribution():
    # type: () -> None

    # Empty distributions cannot be instantiated.
    with pytest.raises(ValueError):
        distributions.UniformDistribution(low=0.0, high=-100.0)

    with pytest.raises(ValueError):
        distributions.LogUniformDistribution(low=7.3, high=7.2)

    with pytest.raises(ValueError):
        distributions.DiscreteUniformDistribution(low=-30, high=-40, q=3)

    with pytest.raises(ValueError):
        distributions.IntUniformDistribution(low=123, high=100)

    with pytest.raises(ValueError):
        distributions.IntUniformDistribution(low=123, high=100, step=2)

    with pytest.raises(ValueError):
        distributions.CategoricalDistribution(choices=())
Beispiel #4
0
def test_ask_distribution_conversion() -> None:
    fixed_distributions = {
        "ud": distributions.UniformDistribution(low=0, high=10),
        "dud": distributions.DiscreteUniformDistribution(low=0, high=10, q=2),
        "lud": distributions.LogUniformDistribution(low=1, high=10),
        "id": distributions.IntUniformDistribution(low=0, high=10),
        "idd": distributions.IntUniformDistribution(low=0, high=10, step=2),
        "ild": distributions.IntLogUniformDistribution(low=1, high=10),
    }

    study = create_study()

    with pytest.warns(
            FutureWarning,
            match="See https://github.com/optuna/optuna/issues/2941",
    ) as record:

        trial = study.ask(fixed_distributions=fixed_distributions)
        assert len(record) == 6

    expected_distributions = {
        "ud": distributions.FloatDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=None),
        "dud": distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=2),
        "lud": distributions.FloatDistribution(low=1,
                                               high=10,
                                               log=True,
                                               step=None),
        "id": distributions.IntDistribution(low=0, high=10, log=False, step=1),
        "idd": distributions.IntDistribution(low=0, high=10, log=False,
                                             step=2),
        "ild": distributions.IntDistribution(low=1, high=10, log=True, step=1),
    }

    assert trial.distributions == expected_distributions
def test_empty_range_contains():
    # type: () -> None

    u = distributions.UniformDistribution(low=1.0, high=1.0)
    assert not u._contains(0.9)
    assert u._contains(1.0)
    assert not u._contains(1.1)

    lu = distributions.LogUniformDistribution(low=1.0, high=1.0)
    assert not lu._contains(0.9)
    assert lu._contains(1.0)
    assert not lu._contains(1.1)

    du = distributions.DiscreteUniformDistribution(low=1.0, high=1.0, q=2.0)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert not du._contains(1.1)

    iu = distributions.IntUniformDistribution(low=1, high=1)
    assert not iu._contains(0)
    assert iu._contains(1)
    assert not iu._contains(2)
Beispiel #6
0
def test_suggest_discrete_uniform(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    mock = Mock()
    mock.side_effect = [1., 2., 3.]
    sampler = samplers.RandomSampler()

    with patch.object(sampler, 'sample', mock) as mock_object:
        study = create_study(storage_init_func(), sampler=sampler)
        trial = Trial(study, study.storage.create_new_trial_id(study.study_id))
        distribution = distributions.DiscreteUniformDistribution(low=0.,
                                                                 high=3.,
                                                                 q=1.)

        assert trial._suggest('x',
                              distribution) == 1.  # Test suggesting a param.
        assert trial._suggest(
            'x', distribution) == 1.  # Test suggesting the same param.
        assert trial._suggest(
            'y', distribution) == 3.  # Test suggesting a different param.
        assert trial.params == {'x': 1., 'y': 3.}
        assert mock_object.call_count == 3
    def suggest_discrete_uniform(self, name, low, high, q):
        # type: (str, float, float, float) -> float
        """Suggest a value for the discrete parameter.

        The value is sampled from the range ``[low, high]``, and the step of discretization is
        ``q``.

        Example:

            Suggest a fraction of samples used for fitting the individual learners of
            `GradientBoostingClassifier <https://scikit-learn.org/stable/modules/generated/
            sklearn.ensemble.GradientBoostingClassifier.html>`_.

            .. code::

                >>> def objective(trial):
                >>>     ...
                >>>     subsample = trial.suggest_discrete_uniform('subsample', 0.1, 1.0, 0.1)
                >>>     clf = sklearn.ensemble.GradientBoostingClassifier(subsample=subsample)
                >>>     ...

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
            high:
                Upper endpoint of the range of suggested values. ``high`` is included in the range.
            q:
                A step of discretization.

        Returns:
            A suggested float value.
        """

        discrete = distributions.DiscreteUniformDistribution(low=low,
                                                             high=high,
                                                             q=q)
        return self._suggest(name, discrete)
Beispiel #8
0
def test_suggest_discrete_uniform(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    mock = Mock()
    mock.side_effect = [1.0, 2.0, 3.0]
    sampler = samplers.RandomSampler()

    with patch.object(sampler, "sample_independent", mock) as mock_object:
        study = create_study(storage_init_func(), sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = distributions.DiscreteUniformDistribution(low=0.0,
                                                                 high=3.0,
                                                                 q=1.0)

        assert trial._suggest("x",
                              distribution) == 1.0  # Test suggesting a param.
        assert trial._suggest(
            "x", distribution) == 1.0  # Test suggesting the same param.
        assert trial._suggest(
            "y", distribution) == 3.0  # Test suggesting a different param.
        assert trial.params == {"x": 1.0, "y": 3.0}
        assert mock_object.call_count == 3
def test_contains():
    # type: () -> None

    u = distributions.UniformDistribution(low=1., high=2.)
    assert not u._contains(0.9)
    assert u._contains(1)
    assert u._contains(1.5)
    assert not u._contains(2)

    lu = distributions.LogUniformDistribution(low=0.001, high=100)
    assert not lu._contains(0.0)
    assert lu._contains(0.001)
    assert lu._contains(12.3)
    assert not lu._contains(100)

    du = distributions.DiscreteUniformDistribution(low=1., high=10., q=2.)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert du._contains(3.5)
    assert du._contains(6)
    assert du._contains(10)
    assert not du._contains(10.1)

    iu = distributions.IntUniformDistribution(low=1, high=10)
    assert not iu._contains(0.9)
    assert iu._contains(1)
    assert iu._contains(3.5)
    assert iu._contains(6)
    assert iu._contains(10)
    assert iu._contains(10.1)
    assert not iu._contains(11)

    c = distributions.CategoricalDistribution(choices=('Roppongi', 'Azabu'))
    assert not c._contains(-1)
    assert c._contains(0)
    assert c._contains(1)
    assert c._contains(1.5)
    assert not c._contains(3)
Beispiel #10
0
def test_infer_relative_search_space() -> None:
    sampler = TPESampler()
    search_space = {
        "a": distributions.UniformDistribution(1.0, 100.0),
        "b": distributions.LogUniformDistribution(1.0, 100.0),
        "c": distributions.DiscreteUniformDistribution(1.0, 100.0, 3.0),
        "d": distributions.IntUniformDistribution(1, 100),
        "e": distributions.IntUniformDistribution(0, 100, step=2),
        "f": distributions.IntLogUniformDistribution(1, 100),
        "g": distributions.CategoricalDistribution(["x", "y", "z"]),
    }

    def obj(t: Trial) -> float:
        t.suggest_uniform("a", 1.0, 100.0)
        t.suggest_loguniform("b", 1.0, 100.0)
        t.suggest_discrete_uniform("c", 1.0, 100.0, 3.0)
        t.suggest_int("d", 1, 100)
        t.suggest_int("e", 0, 100, step=2)
        t.suggest_int("f", 1, 100, log=True)
        t.suggest_categorical("g", ["x", "y", "z"])
        return 0.0

    # Study and frozen-trial are not supposed to be accessed.
    study1 = Mock(spec=[])
    frozen_trial = Mock(spec=[])
    assert sampler.infer_relative_search_space(study1, frozen_trial) == {}

    study2 = optuna.create_study(sampler=sampler)
    study2.optimize(obj, n_trials=1)
    assert sampler.infer_relative_search_space(study2, study2.best_trial) == {}

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = TPESampler(multivariate=True)
    study3 = optuna.create_study(sampler=sampler)
    study3.optimize(obj, n_trials=1)
    assert sampler.infer_relative_search_space(
        study3, study3.best_trial) == search_space
Beispiel #11
0
    def suggest_float(self, name, low, high, *, log=False, step=None):
        # type: (str, float, float, bool, Optional[float]) -> float

        if step is not None:
            if log:
                raise NotImplementedError(
                    "The parameter `step` is not supported when `log` is True."
                )
            else:
                return self._suggest(
                    name,
                    distributions.DiscreteUniformDistribution(low=low,
                                                              high=high,
                                                              q=step))
        else:
            if log:
                return self._suggest(
                    name,
                    distributions.LogUniformDistribution(low=low, high=high))
            else:
                return self._suggest(
                    name, distributions.UniformDistribution(low=low,
                                                            high=high))
def test_convert_old_distribution_to_new_distribution() -> None:
    ud = distributions.UniformDistribution(low=0, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        ud) == distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=None)

    dud = distributions.DiscreteUniformDistribution(low=0, high=10, q=2)
    assert distributions._convert_old_distribution_to_new_distribution(
        dud) == distributions.FloatDistribution(low=0,
                                                high=10,
                                                log=False,
                                                step=2)

    lud = distributions.LogUniformDistribution(low=1, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        lud) == distributions.FloatDistribution(low=1,
                                                high=10,
                                                log=True,
                                                step=None)

    id = distributions.IntUniformDistribution(low=0, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        id) == distributions.IntDistribution(low=0, high=10, log=False, step=1)

    idd = distributions.IntUniformDistribution(low=0, high=10, step=2)
    assert distributions._convert_old_distribution_to_new_distribution(
        idd) == distributions.IntDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=2)

    ild = distributions.IntLogUniformDistribution(low=1, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        ild) == distributions.IntDistribution(low=1, high=10, log=True, step=1)
Beispiel #13
0
def test_distributions(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    def objective(trial):
        # type: (Trial) -> float

        trial.suggest_uniform("a", 0, 10)
        trial.suggest_loguniform("b", 0.1, 10)
        trial.suggest_discrete_uniform("c", 0, 10, 1)
        trial.suggest_int("d", 0, 10)
        trial.suggest_categorical("e", ["foo", "bar", "baz"])

        return 1.0

    study = create_study(storage_init_func())
    study.optimize(objective, n_trials=1)

    assert study.best_trial.distributions == {
        "a": distributions.UniformDistribution(low=0, high=10),
        "b": distributions.LogUniformDistribution(low=0.1, high=10),
        "c": distributions.DiscreteUniformDistribution(low=0, high=10, q=1),
        "d": distributions.IntUniformDistribution(low=0, high=10),
        "e": distributions.CategoricalDistribution(choices=("foo", "bar", "baz")),
    }
Beispiel #14
0
def test_distributions(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    def objective(trial):
        # type: (Trial) -> float

        trial.suggest_uniform('a', 0, 10)
        trial.suggest_loguniform('b', 0.1, 10)
        trial.suggest_discrete_uniform('c', 0, 10, 1)
        trial.suggest_int('d', 0, 10)
        trial.suggest_categorical('e', ['foo', 'bar', 'baz'])

        return 1.0

    study = create_study(storage_init_func())
    study.optimize(objective, n_trials=1)

    assert study.best_trial.distributions == {
        'a': distributions.UniformDistribution(low=0, high=10),
        'b': distributions.LogUniformDistribution(low=0.1, high=10),
        'c': distributions.DiscreteUniformDistribution(low=0, high=10, q=1),
        'd': distributions.IntUniformDistribution(low=0, high=10),
        'e': distributions.CategoricalDistribution(choices=('foo', 'bar', 'baz'))
    }
Beispiel #15
0
def test_single():
    # type: () -> None

    with warnings.catch_warnings():
        # UserWarning will be raised since the range is not divisible by step.
        warnings.simplefilter("ignore", category=UserWarning)
        single_distributions = [
            distributions.UniformDistribution(low=1.0, high=1.0),
            distributions.LogUniformDistribution(low=7.3, high=7.3),
            distributions.DiscreteUniformDistribution(low=2.22,
                                                      high=2.22,
                                                      q=0.1),
            distributions.DiscreteUniformDistribution(low=2.22,
                                                      high=2.24,
                                                      q=0.3),
            distributions.IntUniformDistribution(low=-123, high=-123),
            distributions.IntUniformDistribution(low=-123, high=-120, step=4),
            distributions.CategoricalDistribution(choices=("foo", )),
            distributions.IntLogUniformDistribution(low=2, high=2),
            distributions.IntLogUniformDistribution(low=2, high=2, step=2),
        ]  # type: List[distributions.BaseDistribution]
    for distribution in single_distributions:
        assert distribution.single()

    nonsingle_distributions = [
        distributions.UniformDistribution(low=1.0, high=1.001),
        distributions.LogUniformDistribution(low=7.3, high=10),
        distributions.DiscreteUniformDistribution(low=-30, high=-20, q=2),
        distributions.DiscreteUniformDistribution(low=-30, high=-20, q=10),
        # In Python, "0.3 - 0.2 != 0.1" is True.
        distributions.DiscreteUniformDistribution(low=0.2, high=0.3, q=0.1),
        distributions.DiscreteUniformDistribution(low=0.7, high=0.8, q=0.1),
        distributions.IntUniformDistribution(low=-123, high=0),
        distributions.IntUniformDistribution(low=-123, high=0, step=123),
        distributions.CategoricalDistribution(choices=("foo", "bar")),
        distributions.IntLogUniformDistribution(low=2, high=4),
        distributions.IntLogUniformDistribution(low=2, high=4, step=2),
    ]  # type: List[distributions.BaseDistribution]
    for distribution in nonsingle_distributions:
        assert not distribution.single()
Beispiel #16
0
    def suggest_discrete_uniform(self, name, low, high, q):
        # type: (str, float, float, float) -> float

        high = _adjust_discrete_uniform_high(name, low, high, q)
        discrete = distributions.DiscreteUniformDistribution(low=low, high=high, q=q)
        return self._suggest(name, discrete)
def test_contains() -> None:
    u = distributions.UniformDistribution(low=1.0, high=2.0)
    assert not u._contains(0.9)
    assert u._contains(1)
    assert u._contains(1.5)
    assert not u._contains(2)

    lu = distributions.LogUniformDistribution(low=0.001, high=100)
    assert not lu._contains(0.0)
    assert lu._contains(0.001)
    assert lu._contains(12.3)
    assert not lu._contains(100)

    with warnings.catch_warnings():
        # UserWarning will be raised since the range is not divisible by 2.
        # The range will be replaced with [1.0, 9.0].
        warnings.simplefilter("ignore", category=UserWarning)
        du = distributions.DiscreteUniformDistribution(low=1.0,
                                                       high=10.0,
                                                       q=2.0)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert du._contains(3.5)
    assert du._contains(6)
    assert du._contains(9)
    assert not du._contains(9.1)
    assert not du._contains(10)

    iu = distributions.IntUniformDistribution(low=1, high=10)
    assert not iu._contains(0.9)
    assert iu._contains(1)
    assert iu._contains(4)
    assert iu._contains(6)
    assert iu._contains(10)
    assert not iu._contains(10.1)
    assert not iu._contains(11)

    # IntUniformDistribution with a 'step' parameter.
    with warnings.catch_warnings():
        # UserWarning will be raised since the range is not divisible by 2.
        # The range will be replaced with [1, 9].
        warnings.simplefilter("ignore", category=UserWarning)
        iuq = distributions.IntUniformDistribution(low=1, high=10, step=2)
    assert not iuq._contains(0.9)
    assert iuq._contains(1)
    assert iuq._contains(4)
    assert iuq._contains(6)
    assert iuq._contains(9)
    assert not iuq._contains(9.1)
    assert not iuq._contains(10)

    c = distributions.CategoricalDistribution(choices=("Roppongi", "Azabu"))
    assert not c._contains(-1)
    assert c._contains(0)
    assert c._contains(1)
    assert c._contains(1.5)
    assert not c._contains(3)

    ilu = distributions.IntUniformDistribution(low=2, high=12)
    assert not ilu._contains(0.9)
    assert ilu._contains(2)
    assert ilu._contains(4)
    assert ilu._contains(6)
    assert ilu._contains(12)
    assert not ilu._contains(12.1)
    assert not ilu._contains(13)

    iluq = distributions.IntLogUniformDistribution(low=2, high=7)
    assert not iluq._contains(0.9)
    assert iluq._contains(2)
    assert iluq._contains(4)
    assert iluq._contains(5)
    assert iluq._contains(6)
    assert not iluq._contains(7.1)
    assert not iluq._contains(8)
import copy
import json
from typing import Any
from typing import Dict
from typing import List
import warnings

import pytest

from optuna import distributions

EXAMPLE_DISTRIBUTIONS = {
    "u": distributions.UniformDistribution(low=1.0, high=2.0),
    "l": distributions.LogUniformDistribution(low=0.001, high=100),
    "du": distributions.DiscreteUniformDistribution(low=1.0, high=9.0, q=2.0),
    "iu": distributions.IntUniformDistribution(low=1, high=9, step=2),
    "c1": distributions.CategoricalDistribution(choices=(2.71, -float("inf"))),
    "c2": distributions.CategoricalDistribution(choices=("Roppongi", "Azabu")),
    "c3": distributions.CategoricalDistribution(choices=["Roppongi", "Azabu"]),
    "ilu": distributions.IntLogUniformDistribution(low=2, high=12, step=2),
}  # type: Dict[str, Any]

EXAMPLE_JSONS = {
    "u":
    '{"name": "UniformDistribution", "attributes": {"low": 1.0, "high": 2.0}}',
    "l":
    '{"name": "LogUniformDistribution", "attributes": {"low": 0.001, "high": 100}}',
    "du":
    '{"name": "DiscreteUniformDistribution",'
    '"attributes": {"low": 1.0, "high": 9.0, "q": 2.0}}',
    "iu":
Beispiel #19
0
def test_check_distribution_compatibility() -> None:

    # test the same distribution
    for key in EXAMPLE_JSONS:
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS[key], EXAMPLE_DISTRIBUTIONS[key])

    # test different distribution classes
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["i"], EXAMPLE_DISTRIBUTIONS["fl"]),
    )

    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["u"], EXAMPLE_DISTRIBUTIONS["l"]),
    )

    # test compatibility between IntDistributions.
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["i"],
                                                   EXAMPLE_DISTRIBUTIONS["il"])
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["il"],
                                                   EXAMPLE_DISTRIBUTIONS["id"])
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["id"],
                                                   EXAMPLE_DISTRIBUTIONS["i"])

    # test compatibility between FloatDistributions.
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["f"],
                                                   EXAMPLE_DISTRIBUTIONS["fl"])
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["fl"],
                                                   EXAMPLE_DISTRIBUTIONS["fd"])
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["fd"],
                                                   EXAMPLE_DISTRIBUTIONS["f"])

    # test dynamic value range (CategoricalDistribution)
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["c2"],
            distributions.CategoricalDistribution(choices=("Roppongi",
                                                           "Akasaka")),
        ),
    )

    # test dynamic value range (except CategoricalDistribution)
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["i"],
        distributions.IntDistribution(low=-3, high=2))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["il"],
        distributions.IntDistribution(low=1, high=13, log=True))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["id"],
        distributions.IntDistribution(low=-3, high=2, step=2))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["f"],
        distributions.FloatDistribution(low=-3.0, high=-2.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["fl"],
        distributions.FloatDistribution(low=0.1, high=1.0, log=True))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["fd"],
        distributions.FloatDistribution(low=-1.0, high=11.0, step=0.5))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["u"],
        distributions.UniformDistribution(low=-3.0, high=-2.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["l"],
        distributions.LogUniformDistribution(low=0.1, high=1.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["du"],
        distributions.DiscreteUniformDistribution(low=-1.0, high=11.0, q=3.0),
    )
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["iu"],
        distributions.IntUniformDistribution(low=-1, high=1))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["iuq"],
        distributions.IntUniformDistribution(low=-1, high=1))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["ilu"],
        distributions.IntLogUniformDistribution(low=1, high=13))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["iluq"],
        distributions.IntLogUniformDistribution(low=1, high=13))
Beispiel #20
0
 def suggest_discrete_uniform(self, name: str, low: float, high: float, q: float) -> float:
     discrete = distributions.DiscreteUniformDistribution(low=low, high=high, q=q)
     return self._suggest(name, discrete)
Beispiel #21
0
import json

import pytest

from optuna import distributions
from optuna import type_checking

if type_checking.TYPE_CHECKING:
    from typing import Any  # NOQA
    from typing import Dict  # NOQA
    from typing import List  # NOQA

EXAMPLE_DISTRIBUTIONS = {
    'u': distributions.UniformDistribution(low=1., high=2.),
    'l': distributions.LogUniformDistribution(low=0.001, high=100),
    'du': distributions.DiscreteUniformDistribution(low=1., high=10., q=2.),
    'iu': distributions.IntUniformDistribution(low=1, high=10),
    'c1': distributions.CategoricalDistribution(choices=(2.71, -float('inf'))),
    'c2': distributions.CategoricalDistribution(choices=('Roppongi', 'Azabu'))
}  # type: Dict[str, Any]

EXAMPLE_JSONS = {
    'u': '{"name": "UniformDistribution", "attributes": {"low": 1.0, "high": 2.0}}',
    'l': '{"name": "LogUniformDistribution", "attributes": {"low": 0.001, "high": 100}}',
    'du': '{"name": "DiscreteUniformDistribution",'
    '"attributes": {"low": 1.0, "high": 10.0, "q": 2.0}}',
    'iu': '{"name": "IntUniformDistribution", "attributes": {"low": 1, "high": 10}}',
    'c1': '{"name": "CategoricalDistribution", "attributes": {"choices": [2.71, -Infinity]}}',
    'c2': '{"name": "CategoricalDistribution", "attributes": {"choices": ["Roppongi", "Azabu"]}}'
}
Beispiel #22
0
from typing import Dict
from typing import List
from unittest.mock import patch

import numpy as np
import pytest

from optuna import distributions
from optuna.samplers._tpe.parzen_estimator import _ParzenEstimator
from optuna.samplers._tpe.parzen_estimator import _ParzenEstimatorParameters
from optuna.samplers._tpe.sampler import default_weights

SEARCH_SPACE = {
    "a": distributions.UniformDistribution(1.0, 100.0),
    "b": distributions.LogUniformDistribution(1.0, 100.0),
    "c": distributions.DiscreteUniformDistribution(1.0, 100.0, 3.0),
    "d": distributions.IntUniformDistribution(1, 100),
    "e": distributions.IntLogUniformDistribution(1, 100),
    "f": distributions.CategoricalDistribution(["x", "y", "z"]),
}

MULTIVARIATE_SAMPLES = {
    "a": np.array([1.0]),
    "b": np.array([1.0]),
    "c": np.array([1.0]),
    "d": np.array([1]),
    "e": np.array([1]),
    "f": np.array([1]),
}

_PRECOMPUTE_SIGMAS0 = "optuna.samplers._tpe.parzen_estimator._ParzenEstimator._precompute_sigmas0"
Beispiel #23
0
    def suggest_discrete_uniform(self, name, low, high, q):
        # type: (str, float, float, float) -> float
        """Suggest a value for the discrete parameter.

        The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high}]`,
        and the step of discretization is :math:`q`. More specifically,
        this method returns one of the values in the sequence
        :math:`\\mathsf{low}, \\mathsf{low} + q, \\mathsf{low} + 2 q, \\dots,
        \\mathsf{low} + k q \\le \\mathsf{high}`,
        where :math:`k` denotes an integer. Note that :math:`high` may be changed due to round-off
        errors if :math:`q` is not an integer. Please check warning messages to find the changed
        values.

        Example:

            Suggest a fraction of samples used for fitting the individual learners of
            `GradientBoostingClassifier <https://scikit-learn.org/stable/modules/generated/
            sklearn.ensemble.GradientBoostingClassifier.html>`_.

            .. testsetup::

                import numpy as np
                from sklearn.model_selection import train_test_split

                np.random.seed(seed=0)
                X = np.random.randn(50).reshape(-1, 1)
                y = np.random.randint(0, 2, 50)
                X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)

            .. testcode::

                import optuna
                from sklearn.ensemble import GradientBoostingClassifier

                def objective(trial):
                    subsample = trial.suggest_discrete_uniform('subsample', 0.1, 1.0, 0.1)
                    clf = GradientBoostingClassifier(subsample=subsample, random_state=0)
                    clf.fit(X_train, y_train)
                    return clf.score(X_valid, y_valid)

                study = optuna.create_study(direction='maximize')
                study.optimize(objective, n_trials=3)

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
            high:
                Upper endpoint of the range of suggested values. ``high`` is included in the range.
            q:
                A step of discretization.

        Returns:
            A suggested float value.
        """

        high = _adjust_discrete_uniform_high(name, low, high, q)
        distribution = distributions.DiscreteUniformDistribution(low=low,
                                                                 high=high,
                                                                 q=q)

        self._check_distribution(name, distribution)

        if low == high:
            return self._set_new_param_or_get_existing(name, low, distribution)

        return self._suggest(name, distribution)