Exemple #1
0
def test_ask_distribution_conversion_noop() -> None:
    fixed_distributions = {
        "ud": distributions.FloatDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=None),
        "dud": distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=2),
        "lud": distributions.FloatDistribution(low=1,
                                               high=10,
                                               log=True,
                                               step=None),
        "id": distributions.IntDistribution(low=0, high=10, log=False, step=1),
        "idd": distributions.IntDistribution(low=0, high=10, log=False,
                                             step=2),
        "ild": distributions.IntDistribution(low=1, high=10, log=True, step=1),
        "cd": distributions.CategoricalDistribution(choices=["a", "b", "c"]),
    }

    study = create_study()

    trial = study.ask(fixed_distributions=fixed_distributions)

    # Check fixed_distributions doesn't change.
    assert trial.distributions == fixed_distributions
def test_empty_range_contains() -> None:

    i = distributions.IntDistribution(low=1, high=1)
    assert not i._contains(0)
    assert i._contains(1)
    assert not i._contains(2)

    iq = distributions.IntDistribution(low=1, high=1, step=2)
    assert not iq._contains(0)
    assert iq._contains(1)
    assert not iq._contains(2)

    il = distributions.IntDistribution(low=1, high=1, log=True)
    assert not il._contains(0)
    assert il._contains(1)
    assert not il._contains(2)

    f = distributions.FloatDistribution(low=1.0, high=1.0)
    assert not f._contains(0.9)
    assert f._contains(1.0)
    assert not f._contains(1.1)

    fd = distributions.FloatDistribution(low=1.0, high=1.0, step=2.0)
    assert not fd._contains(0.9)
    assert fd._contains(1.0)
    assert not fd._contains(1.1)

    fl = distributions.FloatDistribution(low=1.0, high=1.0, log=True)
    assert not fl._contains(0.9)
    assert fl._contains(1.0)
    assert not fl._contains(1.1)
Exemple #3
0
def test_optuna_search_convert_deprecated_distribution() -> None:

    param_dist = {
        "ud": distributions.UniformDistribution(low=0, high=10),
        "dud": distributions.DiscreteUniformDistribution(low=0, high=10, q=2),
        "lud": distributions.LogUniformDistribution(low=1, high=10),
        "id": distributions.IntUniformDistribution(low=0, high=10),
        "idd": distributions.IntUniformDistribution(low=0, high=10, step=2),
        "ild": distributions.IntLogUniformDistribution(low=1, high=10),
    }

    expected_param_dist = {
        "ud": distributions.FloatDistribution(low=0, high=10, log=False, step=None),
        "dud": distributions.FloatDistribution(low=0, high=10, log=False, step=2),
        "lud": distributions.FloatDistribution(low=1, high=10, log=True, step=None),
        "id": distributions.IntDistribution(low=0, high=10, log=False, step=1),
        "idd": distributions.IntDistribution(low=0, high=10, log=False, step=2),
        "ild": distributions.IntDistribution(low=1, high=10, log=True, step=1),
    }

    optuna_search = integration.OptunaSearchCV(
        KernelDensity(),
        param_dist,
    )

    assert optuna_search.param_distributions == expected_param_dist

    # It confirms that ask doesn't convert non-deprecated distributions.
    optuna_search = integration.OptunaSearchCV(
        KernelDensity(),
        expected_param_dist,
    )

    assert optuna_search.param_distributions == expected_param_dist
def test_convert_old_distribution_to_new_distribution_noop() -> None:
    # No conversion happens for CategoricalDistribution.
    cd = distributions.CategoricalDistribution(choices=["a", "b", "c"])
    assert distributions._convert_old_distribution_to_new_distribution(
        cd) == cd

    # No conversion happens for new distributions.
    fd = distributions.FloatDistribution(low=0, high=10, log=False, step=None)
    assert distributions._convert_old_distribution_to_new_distribution(
        fd) == fd

    dfd = distributions.FloatDistribution(low=0, high=10, log=False, step=2)
    assert distributions._convert_old_distribution_to_new_distribution(
        dfd) == dfd

    lfd = distributions.FloatDistribution(low=1, high=10, log=True, step=None)
    assert distributions._convert_old_distribution_to_new_distribution(
        lfd) == lfd

    id = distributions.IntDistribution(low=0, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        id) == id

    idd = distributions.IntDistribution(low=0, high=10, step=2)
    assert distributions._convert_old_distribution_to_new_distribution(
        idd) == idd

    ild = distributions.IntDistribution(low=1, high=10, log=True)
    assert distributions._convert_old_distribution_to_new_distribution(
        ild) == ild
Exemple #5
0
def test_create_trial_distribution_conversion() -> None:
    fixed_params = {
        "ud": 0,
        "dud": 2,
        "lud": 1,
        "id": 0,
        "idd": 2,
        "ild": 1,
    }

    fixed_distributions = {
        "ud": distributions.UniformDistribution(low=0, high=10),
        "dud": distributions.DiscreteUniformDistribution(low=0, high=10, q=2),
        "lud": distributions.LogUniformDistribution(low=1, high=10),
        "id": distributions.IntUniformDistribution(low=0, high=10),
        "idd": distributions.IntUniformDistribution(low=0, high=10, step=2),
        "ild": distributions.IntLogUniformDistribution(low=1, high=10),
    }

    with pytest.warns(
            FutureWarning,
            match="See https://github.com/optuna/optuna/issues/2941",
    ) as record:

        trial = create_trial(params=fixed_params,
                             distributions=fixed_distributions,
                             value=1)
        assert len(record) == 6

    expected_distributions = {
        "ud": distributions.FloatDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=None),
        "dud": distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=2),
        "lud": distributions.FloatDistribution(low=1,
                                               high=10,
                                               log=True,
                                               step=None),
        "id": distributions.IntDistribution(low=0, high=10, log=False, step=1),
        "idd": distributions.IntDistribution(low=0, high=10, log=False,
                                             step=2),
        "ild": distributions.IntDistribution(low=1, high=10, log=True, step=1),
    }

    assert trial.distributions == expected_distributions
def test_int_single(expected: bool, low: int, high: int, log: bool,
                    step: int) -> None:
    distribution = distributions.IntDistribution(low=low,
                                                 high=high,
                                                 log=log,
                                                 step=step)
    assert distribution.single() == expected
Exemple #7
0
def test_eq_ne_hash() -> None:

    # Two instances of a class are regarded as equivalent if the fields have the same values.
    for d in EXAMPLE_DISTRIBUTIONS.values():
        d_copy = copy.deepcopy(d)
        assert d == d_copy
        assert hash(d) == hash(d_copy)

    # Different field values.
    di0 = distributions.FloatDistribution(low=1, high=2)
    di1 = distributions.FloatDistribution(low=1, high=3)
    assert di0 != di1

    # Different distribution classes.
    di2 = distributions.IntDistribution(low=1, high=2)
    assert di0 != di2

    # Different field values.
    d0 = distributions.UniformDistribution(low=1, high=2)
    d1 = distributions.UniformDistribution(low=1, high=3)
    assert d0 != d1

    # Different distribution classes.
    d2 = distributions.IntUniformDistribution(low=1, high=2)
    assert d0 != d2
def test_suggest_with_step_parzen_estimator(multivariate: bool) -> None:
    parameters = _ParzenEstimatorParameters(
        consider_prior=False,
        prior_weight=0.0,
        consider_magic_clip=False,
        consider_endpoints=False,
        weights=lambda x: np.arange(x) + 1.0,
        multivariate=multivariate,
    )

    # Define search space for distribution with step argument and true ranges
    search_space = {
        "c": distributions.FloatDistribution(low=1.0, high=7.0, step=3.0),
        "d": distributions.IntDistribution(low=1, high=5, step=2),
    }
    multivariate_samples = {"c": np.array([4]), "d": np.array([3])}
    valid_ranges = {
        "c": set(np.arange(1.0, 10.0, 3.0)),
        "d": set(np.arange(1, 7, 2))
    }

    sigmas0 = 1 if multivariate else None
    with patch(_PRECOMPUTE_SIGMAS0, return_value=sigmas0):
        mpe = _ParzenEstimator(multivariate_samples, search_space, parameters)

    # Draw 10 samples, and check if all valid values are sampled.
    output_samples = mpe.sample(np.random.RandomState(0), 10)
    for param_name in output_samples:
        assert set(output_samples[param_name]) == valid_ranges[param_name]
Exemple #9
0
def test_is_compatible() -> None:

    sampler = optuna.integration.SkoptSampler()
    study = optuna.create_study(sampler=sampler)

    study.optimize(lambda t: t.suggest_float("p0", 0, 10), n_trials=1)
    search_space = optuna.samplers.intersection_search_space(study)
    assert search_space == {
        "p0": distributions.FloatDistribution(low=0, high=10)
    }

    optimizer = optuna.integration.skopt._Optimizer(search_space, {})

    # Compatible.
    trial = _create_frozen_trial(
        {"p0": 5}, {"p0": distributions.FloatDistribution(low=0, high=10)})
    assert optimizer._is_compatible(trial)

    # Compatible.
    trial = _create_frozen_trial(
        {"p0": 5}, {"p0": distributions.FloatDistribution(low=0, high=100)})
    assert optimizer._is_compatible(trial)

    # Compatible.
    trial = _create_frozen_trial(
        {
            "p0": 5,
            "p1": 7
        },
        {
            "p0": distributions.FloatDistribution(low=0, high=10),
            "p1": distributions.FloatDistribution(low=0, high=10),
        },
    )
    assert optimizer._is_compatible(trial)

    # Incompatible ('p0' doesn't exist).
    trial = _create_frozen_trial(
        {"p1": 5}, {"p1": distributions.FloatDistribution(low=0, high=10)})
    assert not optimizer._is_compatible(trial)

    # Incompatible (the value of 'p0' is out of range).
    trial = _create_frozen_trial(
        {"p0": 20}, {"p0": distributions.FloatDistribution(low=0, high=100)})
    assert not optimizer._is_compatible(trial)

    # Error (different distribution class).
    trial = _create_frozen_trial(
        {"p0": 5}, {"p0": distributions.IntDistribution(low=0, high=10)})
    with pytest.raises(ValueError):
        optimizer._is_compatible(trial)
Exemple #10
0
def test_empty_range_contains() -> None:

    i = distributions.IntDistribution(low=1, high=1)
    assert not i._contains(0)
    assert i._contains(1)
    assert not i._contains(2)

    f = distributions.FloatDistribution(low=1.0, high=1.0)
    assert not f._contains(0.9)
    assert f._contains(1.0)
    assert not f._contains(1.1)

    fd = distributions.FloatDistribution(low=1.0, high=1.0, step=2.0)
    assert not fd._contains(0.9)
    assert fd._contains(1.0)
    assert not fd._contains(1.1)

    u = distributions.UniformDistribution(low=1.0, high=1.0)
    assert not u._contains(0.9)
    assert u._contains(1.0)
    assert not u._contains(1.1)

    lu = distributions.LogUniformDistribution(low=1.0, high=1.0)
    assert not lu._contains(0.9)
    assert lu._contains(1.0)
    assert not lu._contains(1.1)

    du = distributions.DiscreteUniformDistribution(low=1.0, high=1.0, q=2.0)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert not du._contains(1.1)

    iu = distributions.IntUniformDistribution(low=1, high=1)
    assert not iu._contains(0)
    assert iu._contains(1)
    assert not iu._contains(2)

    iuq = distributions.IntUniformDistribution(low=1, high=1, step=2)
    assert not iuq._contains(0)
    assert iuq._contains(1)
    assert not iuq._contains(2)

    ilu = distributions.IntLogUniformDistribution(low=1, high=1)
    assert not ilu._contains(0)
    assert ilu._contains(1)
    assert not ilu._contains(2)

    iluq = distributions.IntLogUniformDistribution(low=1, high=1, step=2)
    assert not iluq._contains(0)
    assert iluq._contains(1)
    assert not iluq._contains(2)
Exemple #11
0
def test_infer_relative_search_space() -> None:
    sampler = TPESampler()
    search_space = {
        "a": distributions.FloatDistribution(1.0, 100.0),
        "b": distributions.FloatDistribution(1.0, 100.0, log=True),
        "c": distributions.FloatDistribution(1.0, 100.0, step=3.0),
        "d": distributions.IntDistribution(1, 100),
        "e": distributions.IntDistribution(0, 100, step=2),
        "f": distributions.IntDistribution(1, 100, log=True),
        "g": distributions.CategoricalDistribution(["x", "y", "z"]),
    }

    def obj(t: Trial) -> float:
        t.suggest_float("a", 1.0, 100.0)
        t.suggest_float("b", 1.0, 100.0, log=True)
        t.suggest_float("c", 1.0, 100.0, step=3.0)
        t.suggest_int("d", 1, 100)
        t.suggest_int("e", 0, 100, step=2)
        t.suggest_int("f", 1, 100, log=True)
        t.suggest_categorical("g", ["x", "y", "z"])
        return 0.0

    # Study and frozen-trial are not supposed to be accessed.
    study1 = Mock(spec=[])
    frozen_trial = Mock(spec=[])
    assert sampler.infer_relative_search_space(study1, frozen_trial) == {}

    study2 = optuna.create_study(sampler=sampler)
    study2.optimize(obj, n_trials=1)
    assert sampler.infer_relative_search_space(study2, study2.best_trial) == {}

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = TPESampler(multivariate=True)
    study3 = optuna.create_study(sampler=sampler)
    study3.optimize(obj, n_trials=1)
    assert sampler.infer_relative_search_space(
        study3, study3.best_trial) == search_space
def test_group() -> None:
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = TPESampler(multivariate=True, group=True)
    study = optuna.create_study(sampler=sampler)

    with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
        study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=2)
        assert mock.call_count == 1
    assert study.trials[-1].distributions == {"x": distributions.IntDistribution(low=0, high=10)}

    with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
        study.optimize(
            lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3), n_trials=1
        )
        assert mock.call_count == 1
    assert study.trials[-1].distributions == {
        "y": distributions.IntDistribution(low=0, high=10),
        "z": distributions.FloatDistribution(low=-3, high=3),
    }

    with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
        study.optimize(
            lambda t: t.suggest_int("y", 0, 10)
            + t.suggest_float("z", -3, 3)
            + t.suggest_float("u", 1e-2, 1e2, log=True)
            + bool(t.suggest_categorical("v", ["A", "B", "C"])),
            n_trials=1,
        )
        assert mock.call_count == 2
    assert study.trials[-1].distributions == {
        "u": distributions.FloatDistribution(low=1e-2, high=1e2, log=True),
        "v": distributions.CategoricalDistribution(choices=["A", "B", "C"]),
        "y": distributions.IntDistribution(low=0, high=10),
        "z": distributions.FloatDistribution(low=-3, high=3),
    }

    with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
        study.optimize(lambda t: t.suggest_float("u", 1e-2, 1e2, log=True), n_trials=1)
        assert mock.call_count == 3
    assert study.trials[-1].distributions == {
        "u": distributions.FloatDistribution(low=1e-2, high=1e2, log=True)
    }

    with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
        study.optimize(
            lambda t: t.suggest_int("y", 0, 10) + t.suggest_int("w", 2, 8, log=True), n_trials=1
        )
        assert mock.call_count == 4
    assert study.trials[-1].distributions == {
        "y": distributions.IntDistribution(low=0, high=10),
        "w": distributions.IntDistribution(low=2, high=8, log=True),
    }

    with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
        study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=1)
        assert mock.call_count == 6
    assert study.trials[-1].distributions == {"x": distributions.IntDistribution(low=0, high=10)}
Exemple #13
0
def test_create_trial_distribution_conversion_noop() -> None:
    fixed_params = {
        "ud": 0,
        "dud": 2,
        "lud": 1,
        "id": 0,
        "idd": 2,
        "ild": 1,
        "cd": "a",
    }

    fixed_distributions = {
        "ud": distributions.FloatDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=None),
        "dud": distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=2),
        "lud": distributions.FloatDistribution(low=1,
                                               high=10,
                                               log=True,
                                               step=None),
        "id": distributions.IntDistribution(low=0, high=10, log=False, step=1),
        "idd": distributions.IntDistribution(low=0, high=10, log=False,
                                             step=2),
        "ild": distributions.IntDistribution(low=1, high=10, log=True, step=1),
        "cd": distributions.CategoricalDistribution(choices=["a", "b", "c"]),
    }

    trial = create_trial(params=fixed_params,
                         distributions=fixed_distributions,
                         value=1)

    # Check fixed_distributions doesn't change.
    assert trial.distributions == fixed_distributions
def test_convert_old_distribution_to_new_distribution() -> None:
    ud = distributions.UniformDistribution(low=0, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        ud) == distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=None)

    dud = distributions.DiscreteUniformDistribution(low=0, high=10, q=2)
    assert distributions._convert_old_distribution_to_new_distribution(
        dud) == distributions.FloatDistribution(low=0,
                                                high=10,
                                                log=False,
                                                step=2)

    lud = distributions.LogUniformDistribution(low=1, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        lud) == distributions.FloatDistribution(low=1,
                                                high=10,
                                                log=True,
                                                step=None)

    id = distributions.IntUniformDistribution(low=0, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        id) == distributions.IntDistribution(low=0, high=10, log=False, step=1)

    idd = distributions.IntUniformDistribution(low=0, high=10, step=2)
    assert distributions._convert_old_distribution_to_new_distribution(
        idd) == distributions.IntDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=2)

    ild = distributions.IntLogUniformDistribution(low=1, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        ild) == distributions.IntDistribution(low=1, high=10, log=True, step=1)
def test_int_init_error() -> None:

    # Empty distributions cannot be instantiated.
    with pytest.raises(ValueError):
        distributions.IntDistribution(low=123, high=100)

    with pytest.raises(ValueError):
        distributions.IntDistribution(low=100, high=10, log=True)

    with pytest.raises(ValueError):
        distributions.IntDistribution(low=123, high=100, step=2)

    # 'step' must be 1 when 'log' is True.
    with pytest.raises(ValueError):
        distributions.IntDistribution(low=1, high=100, log=True, step=2)

    # 'step' should be positive.
    with pytest.raises(ValueError):
        distributions.IntDistribution(low=1, high=100, step=0)

    with pytest.raises(ValueError):
        distributions.IntDistribution(low=1, high=10, step=-1)
import pytest

from optuna import distributions
from optuna.samplers._tpe.parzen_estimator import _ParzenEstimator
from optuna.samplers._tpe.parzen_estimator import _ParzenEstimatorParameters
from optuna.samplers._tpe.sampler import default_weights

SEARCH_SPACE = {
    "a":
    distributions.FloatDistribution(1.0, 100.0),
    "b":
    distributions.FloatDistribution(1.0, 100.0, log=True),
    "c":
    distributions.FloatDistribution(1.0, 100.0, step=3.0),
    "d":
    distributions.IntDistribution(1, 100),
    "e":
    distributions.IntDistribution(1, 100, log=True),
    "f":
    distributions.CategoricalDistribution(["x", "y", "z"]),
    "g":
    distributions.CategoricalDistribution(
        [0.0, float("inf"), float("nan"), None]),
}

MULTIVARIATE_SAMPLES = {
    "a": np.array([1.0]),
    "b": np.array([1.0]),
    "c": np.array([1.0]),
    "d": np.array([1]),
    "e": np.array([1]),
Exemple #17
0
def test_check_distribution_compatibility() -> None:

    # test the same distribution
    for key in EXAMPLE_JSONS:
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS[key], EXAMPLE_DISTRIBUTIONS[key])

    # test different distribution classes
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["i"], EXAMPLE_DISTRIBUTIONS["fl"]),
    )

    # test compatibility between IntDistributions.
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["id"],
                                                   EXAMPLE_DISTRIBUTIONS["i"])

    with pytest.raises(ValueError):
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["i"], EXAMPLE_DISTRIBUTIONS["il"])

    with pytest.raises(ValueError):
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["il"], EXAMPLE_DISTRIBUTIONS["id"])

    # test compatibility between FloatDistributions.
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["fd"],
                                                   EXAMPLE_DISTRIBUTIONS["f"])

    with pytest.raises(ValueError):
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["f"], EXAMPLE_DISTRIBUTIONS["fl"])

    with pytest.raises(ValueError):
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["fl"], EXAMPLE_DISTRIBUTIONS["fd"])

    # test dynamic value range (CategoricalDistribution)
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["c2"],
            distributions.CategoricalDistribution(choices=("Roppongi",
                                                           "Akasaka")),
        ),
    )

    # test dynamic value range (except CategoricalDistribution)
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["i"],
        distributions.IntDistribution(low=-3, high=2))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["il"],
        distributions.IntDistribution(low=1, high=13, log=True))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["id"],
        distributions.IntDistribution(low=-3, high=2, step=2))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["f"],
        distributions.FloatDistribution(low=-3.0, high=-2.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["fl"],
        distributions.FloatDistribution(low=0.1, high=1.0, log=True))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["fd"],
        distributions.FloatDistribution(low=-1.0, high=11.0, step=0.5))
def test_int_contains(expected: bool, value: float, step: int) -> None:
    i = distributions.IntDistribution(low=1, high=10, step=step)
    assert i._contains(value) == expected
def test_int_internal_representation(value: int) -> None:

    i = distributions.IntDistribution(low=1, high=10)
    assert i.to_external_repr(i.to_internal_repr(value)) == value
def test_check_distribution_compatibility() -> None:

    # test the same distribution
    for key in EXAMPLE_JSONS:
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS[key], EXAMPLE_DISTRIBUTIONS[key])
        # We need to create new objects to compare NaNs.
        # See https://github.com/optuna/optuna/pull/3567#pullrequestreview-974939837.
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS[key],
            distributions.json_to_distribution(EXAMPLE_JSONS[key]))

    # test different distribution classes
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["i"], EXAMPLE_DISTRIBUTIONS["fl"]),
    )

    # test compatibility between IntDistributions.
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["id"],
                                                   EXAMPLE_DISTRIBUTIONS["i"])

    with pytest.raises(ValueError):
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["i"], EXAMPLE_DISTRIBUTIONS["il"])

    with pytest.raises(ValueError):
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["il"], EXAMPLE_DISTRIBUTIONS["id"])

    # test compatibility between FloatDistributions.
    distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS["fd"],
                                                   EXAMPLE_DISTRIBUTIONS["f"])

    with pytest.raises(ValueError):
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["f"], EXAMPLE_DISTRIBUTIONS["fl"])

    with pytest.raises(ValueError):
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["fl"], EXAMPLE_DISTRIBUTIONS["fd"])

    # test dynamic value range (CategoricalDistribution)
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["c2"],
            distributions.CategoricalDistribution(choices=("Roppongi",
                                                           "Akasaka")),
        ),
    )

    # test dynamic value range (except CategoricalDistribution)
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["i"],
        distributions.IntDistribution(low=-3, high=2))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["il"],
        distributions.IntDistribution(low=1, high=13, log=True))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["id"],
        distributions.IntDistribution(low=-3, high=2, step=2))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["f"],
        distributions.FloatDistribution(low=-3.0, high=-2.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["fl"],
        distributions.FloatDistribution(low=0.1, high=1.0, log=True))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["fd"],
        distributions.FloatDistribution(low=-1.0, high=11.0, step=0.5))
from typing import cast
from typing import Dict
from typing import Optional
import warnings

import numpy as np
import pytest

from optuna import distributions

_choices = (None, True, False, 0, 1, 0.0, 1.0, float("nan"), float("inf"),
            -float("inf"), "", "a")
_choices_json = '[null, true, false, 0, 1, 0.0, 1.0, NaN, Infinity, -Infinity, "", "a"]'

EXAMPLE_DISTRIBUTIONS: Dict[str, Any] = {
    "i": distributions.IntDistribution(low=1, high=9, log=False),
    # i2 and i3 are identical to i, and tested for cases when `log` and `step` are omitted in json.
    "i2": distributions.IntDistribution(low=1, high=9, log=False),
    "i3": distributions.IntDistribution(low=1, high=9, log=False),
    "il": distributions.IntDistribution(low=2, high=12, log=True),
    "il2": distributions.IntDistribution(low=2, high=12, log=True),
    "id": distributions.IntDistribution(low=1, high=9, log=False, step=2),
    "id2": distributions.IntDistribution(low=1, high=9, log=False, step=2),
    "f": distributions.FloatDistribution(low=1.0, high=2.0, log=False),
    "fl": distributions.FloatDistribution(low=0.001, high=100.0, log=True),
    "fd": distributions.FloatDistribution(low=1.0,
                                          high=9.0,
                                          log=False,
                                          step=2.0),
    "c1": distributions.CategoricalDistribution(choices=_choices),
    "c2": distributions.CategoricalDistribution(choices=("Roppongi", "Azabu")),