Пример #1
0
def test_single():
    # type: () -> None

    single_distributions = [
        distributions.UniformDistribution(low=1.0, high=1.0),
        distributions.LogUniformDistribution(low=7.3, high=7.3),
        distributions.DiscreteUniformDistribution(low=2.22, high=2.22, q=0.1),
        distributions.DiscreteUniformDistribution(low=2.22, high=2.24, q=0.3),
        distributions.IntUniformDistribution(low=-123, high=-123),
        distributions.IntUniformDistribution(low=-123, high=-120, step=4),
        distributions.CategoricalDistribution(choices=("foo", )),
    ]  # type: List[distributions.BaseDistribution]
    for distribution in single_distributions:
        assert distribution.single()

    nonsingle_distributions = [
        distributions.UniformDistribution(low=1.0, high=1.001),
        distributions.LogUniformDistribution(low=7.3, high=10),
        distributions.DiscreteUniformDistribution(low=-30, high=-20, q=2),
        distributions.DiscreteUniformDistribution(low=-30, high=-20, q=10),
        # In Python, "0.3 - 0.2 != 0.1" is True.
        distributions.DiscreteUniformDistribution(low=0.2, high=0.3, q=0.1),
        distributions.DiscreteUniformDistribution(low=0.7, high=0.8, q=0.1),
        distributions.IntUniformDistribution(low=-123, high=0),
        distributions.IntUniformDistribution(low=-123, high=0, step=123),
        distributions.CategoricalDistribution(choices=("foo", "bar")),
    ]  # type: List[distributions.BaseDistribution]
    for distribution in nonsingle_distributions:
        assert not distribution.single()
Пример #2
0
def test_single():
    # type: () -> None

    single_distributions = [
        distributions.UniformDistribution(low=1.0, high=1.0),
        distributions.LogUniformDistribution(low=7.3, high=7.3),
        distributions.DiscreteUniformDistribution(low=2.22, high=2.22, q=0.1),
        distributions.IntUniformDistribution(low=-123, high=-123),
        distributions.CategoricalDistribution(choices=('foo', ))
    ]  # type: List[distributions.BaseDistribution]
    for distribution in single_distributions:
        assert distribution.single()

    nonsingle_distributions = [
        distributions.UniformDistribution(low=0.0, high=-100.0),
        distributions.UniformDistribution(low=1.0, high=1.001),
        distributions.LogUniformDistribution(low=7.3, high=7.2),
        distributions.LogUniformDistribution(low=7.3, high=10),
        distributions.DiscreteUniformDistribution(low=-30, high=-40, q=3),
        distributions.DiscreteUniformDistribution(low=-30, high=-20, q=2),
        distributions.IntUniformDistribution(low=123, high=100),
        distributions.IntUniformDistribution(low=-123, high=0),
        distributions.CategoricalDistribution(choices=()),
        distributions.CategoricalDistribution(choices=('foo', 'bar'))
    ]  # type: List[distributions.BaseDistribution]
    for distribution in nonsingle_distributions:
        assert not distribution.single()
Пример #3
0
def test_single() -> None:

    with warnings.catch_warnings():
        # UserWarning will be raised since the range is not divisible by step.
        warnings.simplefilter("ignore", category=UserWarning)
        single_distributions: List[distributions.BaseDistribution] = [
            distributions.UniformDistribution(low=1.0, high=1.0),
            distributions.LogUniformDistribution(low=7.3, high=7.3),
            distributions.DiscreteUniformDistribution(low=2.22, high=2.22, q=0.1),
            distributions.DiscreteUniformDistribution(low=2.22, high=2.24, q=0.3),
            distributions.IntUniformDistribution(low=-123, high=-123),
            distributions.IntUniformDistribution(low=-123, high=-120, step=4),
            distributions.CategoricalDistribution(choices=("foo",)),
            distributions.IntLogUniformDistribution(low=2, high=2),
        ]
    for distribution in single_distributions:
        assert distribution.single()

    nonsingle_distributions: List[distributions.BaseDistribution] = [
        distributions.UniformDistribution(low=1.0, high=1.001),
        distributions.LogUniformDistribution(low=7.3, high=10),
        distributions.DiscreteUniformDistribution(low=-30, high=-20, q=2),
        distributions.DiscreteUniformDistribution(low=-30, high=-20, q=10),
        # In Python, "0.3 - 0.2 != 0.1" is True.
        distributions.DiscreteUniformDistribution(low=0.2, high=0.3, q=0.1),
        distributions.DiscreteUniformDistribution(low=0.7, high=0.8, q=0.1),
        distributions.IntUniformDistribution(low=-123, high=0),
        distributions.IntUniformDistribution(low=-123, high=0, step=123),
        distributions.CategoricalDistribution(choices=("foo", "bar")),
        distributions.IntLogUniformDistribution(low=2, high=4),
    ]
    for distribution in nonsingle_distributions:
        assert not distribution.single()
Пример #4
0
    def suggest_float(self, name, low, high, *, log=False):
        # type: (str, float, float, bool) -> float

        if log:
            return self._suggest(name, distributions.LogUniformDistribution(low=low, high=high))
        else:
            return self._suggest(name, distributions.UniformDistribution(low=low, high=high))
Пример #5
0
def test_check_distribution_compatibility():
    # type: () -> None

    # test the same distribution
    for key in EXAMPLE_JSONS.keys():
        distributions.check_distribution_compatibility(EXAMPLE_DISTRIBUTIONS[key],
                                                       EXAMPLE_DISTRIBUTIONS[key])

    # test different distribution classes
    pytest.raises(
        ValueError, lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS['u'], EXAMPLE_DISTRIBUTIONS['l']))

    # test dynamic value range (CategoricalDistribution)
    pytest.raises(
        ValueError, lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS['c2'],
            distributions.CategoricalDistribution(choices=('Roppongi', 'Akasaka'))))

    # test dynamic value range (except CategoricalDistribution)
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS['u'], distributions.UniformDistribution(low=-3.0, high=-2.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS['l'], distributions.LogUniformDistribution(low=-0.1, high=1.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS['du'],
        distributions.DiscreteUniformDistribution(low=-1.0, high=10.0, q=3.))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS['iu'], distributions.IntUniformDistribution(low=-1, high=1))
Пример #6
0
def test_distributions(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    def objective(trial):
        # type: (Trial) -> float

        trial.suggest_uniform('a', 0, 10)
        trial.suggest_loguniform('b', 0.1, 10)
        trial.suggest_discrete_uniform('c', 0, 10, 1)
        trial.suggest_int('d', 0, 10)
        trial.suggest_categorical('e', ['foo', 'bar', 'baz'])

        return 1.0

    study = create_study(storage_init_func())
    study.optimize(objective, n_trials=1)

    assert study.best_trial.distributions == {
        'a': distributions.UniformDistribution(low=0, high=10),
        'b': distributions.LogUniformDistribution(low=0.1, high=10),
        'c': distributions.DiscreteUniformDistribution(low=0, high=10, q=1),
        'd': distributions.IntUniformDistribution(low=0, high=10),
        'e':
        distributions.CategoricalDistribution(choices=('foo', 'bar', 'baz'))
    }
Пример #7
0
def test_distributions(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    def objective(trial):
        # type: (Trial) -> float

        trial.suggest_uniform("a", 0, 10)
        trial.suggest_loguniform("b", 0.1, 10)
        trial.suggest_discrete_uniform("c", 0, 10, 1)
        trial.suggest_int("d", 0, 10)
        trial.suggest_categorical("e", ["foo", "bar", "baz"])

        return 1.0

    study = create_study(storage_init_func())
    study.optimize(objective, n_trials=1)

    assert study.best_trial.distributions == {
        "a": distributions.UniformDistribution(low=0, high=10),
        "b": distributions.LogUniformDistribution(low=0.1, high=10),
        "c": distributions.DiscreteUniformDistribution(low=0, high=10, q=1),
        "d": distributions.IntUniformDistribution(low=0, high=10),
        "e":
        distributions.CategoricalDistribution(choices=("foo", "bar", "baz")),
    }
Пример #8
0
def test_empty_range_contains():
    # type: () -> None

    u = distributions.UniformDistribution(low=1.0, high=1.0)
    assert not u._contains(0.9)
    assert u._contains(1.0)
    assert not u._contains(1.1)

    lu = distributions.LogUniformDistribution(low=1.0, high=1.0)
    assert not lu._contains(0.9)
    assert lu._contains(1.0)
    assert not lu._contains(1.1)

    du = distributions.DiscreteUniformDistribution(low=1.0, high=1.0, q=2.0)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert not du._contains(1.1)

    iu = distributions.IntUniformDistribution(low=1, high=1)
    assert not iu._contains(0)
    assert iu._contains(1)
    assert not iu._contains(2)

    iuq = distributions.IntUniformDistribution(low=1, high=1, step=2)
    assert not iuq._contains(0)
    assert iuq._contains(1)
    assert not iuq._contains(2)
Пример #9
0
def test_optuna_search_convert_deprecated_distribution() -> None:

    param_dist = {
        "ud": distributions.UniformDistribution(low=0, high=10),
        "dud": distributions.DiscreteUniformDistribution(low=0, high=10, q=2),
        "lud": distributions.LogUniformDistribution(low=1, high=10),
        "id": distributions.IntUniformDistribution(low=0, high=10),
        "idd": distributions.IntUniformDistribution(low=0, high=10, step=2),
        "ild": distributions.IntLogUniformDistribution(low=1, high=10),
    }

    expected_param_dist = {
        "ud": distributions.FloatDistribution(low=0, high=10, log=False, step=None),
        "dud": distributions.FloatDistribution(low=0, high=10, log=False, step=2),
        "lud": distributions.FloatDistribution(low=1, high=10, log=True, step=None),
        "id": distributions.IntDistribution(low=0, high=10, log=False, step=1),
        "idd": distributions.IntDistribution(low=0, high=10, log=False, step=2),
        "ild": distributions.IntDistribution(low=1, high=10, log=True, step=1),
    }

    optuna_search = integration.OptunaSearchCV(
        KernelDensity(),
        param_dist,
    )

    assert optuna_search.param_distributions == expected_param_dist

    # It confirms that ask doesn't convert non-deprecated distributions.
    optuna_search = integration.OptunaSearchCV(
        KernelDensity(),
        expected_param_dist,
    )

    assert optuna_search.param_distributions == expected_param_dist
Пример #10
0
    def test_relative_sampling(storage_mode: str,
                               comm: CommunicatorBase) -> None:

        relative_search_space = {
            "x": distributions.UniformDistribution(low=-10, high=10),
            "y": distributions.LogUniformDistribution(low=20, high=30),
            "z": distributions.CategoricalDistribution(choices=(-1.0, 1.0)),
        }
        relative_params = {"x": 1.0, "y": 25.0, "z": -1.0}
        sampler = DeterministicRelativeSampler(
            relative_search_space,
            relative_params  # type: ignore
        )

        with MultiNodeStorageSupplier(storage_mode, comm) as storage:
            study = TestChainerMNStudy._create_shared_study(storage,
                                                            comm,
                                                            sampler=sampler)
            mn_study = ChainerMNStudy(study, comm)

            # Invoke optimize.
            n_trials = 20
            func = Func()
            mn_study.optimize(func, n_trials=n_trials)

            # Assert trial counts.
            assert len(mn_study.trials) == n_trials

            # Assert the parameters in `relative_params` have been suggested among all nodes.
            for trial in mn_study.trials:
                assert trial.params == relative_params
Пример #11
0
def test_optuna_search(enable_pruning: bool, fit_params: str) -> None:

    X, y = make_blobs(n_samples=10)
    est = SGDClassifier(max_iter=5, tol=1e-03)
    param_dist = {"alpha": distributions.LogUniformDistribution(1e-04, 1e03)}
    optuna_search = integration.OptunaSearchCV(
        est,
        param_dist,
        cv=3,
        enable_pruning=enable_pruning,
        error_score="raise",
        max_iter=5,
        random_state=0,
        return_train_score=True,
    )

    with pytest.raises(NotFittedError):
        optuna_search._check_is_fitted()

    if fit_params == "coef_init" and not enable_pruning:
        optuna_search.fit(X, y, coef_init=np.ones((3, 2), dtype=np.float64))
    else:
        optuna_search.fit(X, y)

    optuna_search.trials_dataframe()
    optuna_search.decision_function(X)
    optuna_search.predict(X)
    optuna_search.score(X, y)
Пример #12
0
    def suggest_float(
        self,
        name: str,
        low: float,
        high: float,
        *,
        step: Optional[float] = None,
        log: bool = False
    ) -> float:

        if step is not None:
            if log:
                raise NotImplementedError(
                    "The parameter `step` is not supported when `log` is True."
                )
            else:
                return self._suggest(
                    name, distributions.DiscreteUniformDistribution(low=low, high=high, q=step)
                )
        else:
            if log:
                return self._suggest(
                    name, distributions.LogUniformDistribution(low=low, high=high)
                )
            else:
                return self._suggest(name, distributions.UniformDistribution(low=low, high=high))
Пример #13
0
def test_empty_distribution():
    # type: () -> None

    # Empty distributions cannot be instantiated.
    with pytest.raises(ValueError):
        distributions.UniformDistribution(low=0.0, high=-100.0)

    with pytest.raises(ValueError):
        distributions.LogUniformDistribution(low=7.3, high=7.2)

    with pytest.raises(ValueError):
        distributions.DiscreteUniformDistribution(low=-30, high=-40, q=3)

    with pytest.raises(ValueError):
        distributions.IntUniformDistribution(low=123, high=100)

    with pytest.raises(ValueError):
        distributions.IntUniformDistribution(low=123, high=100, step=2)

    with pytest.raises(ValueError):
        distributions.CategoricalDistribution(choices=())

    with pytest.raises(ValueError):
        distributions.IntLogUniformDistribution(low=123, high=100)

    with pytest.raises(ValueError):
        distributions.IntLogUniformDistribution(low=123, high=100, step=2)
Пример #14
0
    def suggest_loguniform(self, name, low, high):
        # type: (str, float, float) -> float
        """Suggest a value for the continuous parameter.

        The value is sampled from the range ``[low, high)`` in the log domain.

        Example:

            Suggest penalty parameter ``C`` of `SVC <https://scikit-learn.org/stable/modules/
            generated/sklearn.svm.SVC.html>`_.

            .. code::

                >>> def objective(trial):
                >>>     ...
                >>>     c = trial.suggest_logunifrom('c', 1e-5, 1e2)
                >>>     clf = sklearn.svm.SVC(C=c)
                >>>     ...

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
            high:
                Upper endpoint of the range of suggested values. ``high`` is excluded from the
                range.

        Returns:
            A suggested float value.
        """

        return self._suggest(
            name, distributions.LogUniformDistribution(low=low, high=high))
Пример #15
0
def test_optuna_search_properties() -> None:

    X, y = make_blobs(n_samples=10)
    est = LogisticRegression(max_iter=5, tol=1e-03)
    param_dist = {"C": distributions.LogUniformDistribution(1e-04, 1e03)}

    optuna_search = integration.OptunaSearchCV(est,
                                               param_dist,
                                               cv=3,
                                               error_score="raise",
                                               random_state=0,
                                               return_train_score=True)
    optuna_search.fit(X, y)
    optuna_search.set_user_attr("dataset", "blobs")

    assert optuna_search._estimator_type == "classifier"
    assert type(optuna_search.best_index_) == int
    assert type(optuna_search.best_params_) == dict
    assert optuna_search.best_score_ is not None
    assert optuna_search.best_trial_ is not None
    assert np.allclose(optuna_search.classes_, np.array([0, 1, 2]))
    assert optuna_search.n_trials_ == 10
    assert optuna_search.user_attrs_ == {"dataset": "blobs"}
    assert type(optuna_search.predict_log_proba(X)) == np.ndarray
    assert type(optuna_search.predict_proba(X)) == np.ndarray
Пример #16
0
    def suggest_loguniform(self, name, low, high):
        # type: (str, float, float) -> float
        """Suggest a value for the continuous parameter.

        The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high})`
        in the log domain. When :math:`\\mathsf{low} = \\mathsf{high}`, the value of
        :math:`\\mathsf{low}` will be returned.

        Example:

            Suggest penalty parameter ``C`` of `SVC <https://scikit-learn.org/stable/modules/
            generated/sklearn.svm.SVC.html>`_.

            .. testsetup::

                import numpy as np
                from sklearn.model_selection import train_test_split

                np.random.seed(seed=0)
                X = np.random.randn(50).reshape(-1, 1)
                y = np.random.randint(0, 2, 50)
                X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)

            .. testcode::

                import optuna
                from sklearn.svm import SVC

                def objective(trial):
                    c = trial.suggest_loguniform('c', 1e-5, 1e2)
                    clf = SVC(C=c, gamma='scale', random_state=0)
                    clf.fit(X_train, y_train)
                    return clf.score(X_valid, y_valid)

                study = optuna.create_study(direction='maximize')
                study.optimize(objective, n_trials=3)

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
            high:
                Upper endpoint of the range of suggested values. ``high`` is excluded from the
                range.

        Returns:
            A suggested float value.
        """

        distribution = distributions.LogUniformDistribution(low=low, high=high)

        self._check_distribution(name, distribution)

        if low == high:
            return self._set_new_param_or_get_existing(name, low, distribution)

        return self._suggest(name, distribution)
Пример #17
0
def test_empty_range_contains() -> None:

    i = distributions.IntDistribution(low=1, high=1)
    assert not i._contains(0)
    assert i._contains(1)
    assert not i._contains(2)

    f = distributions.FloatDistribution(low=1.0, high=1.0)
    assert not f._contains(0.9)
    assert f._contains(1.0)
    assert not f._contains(1.1)

    fd = distributions.FloatDistribution(low=1.0, high=1.0, step=2.0)
    assert not fd._contains(0.9)
    assert fd._contains(1.0)
    assert not fd._contains(1.1)

    u = distributions.UniformDistribution(low=1.0, high=1.0)
    assert not u._contains(0.9)
    assert u._contains(1.0)
    assert not u._contains(1.1)

    lu = distributions.LogUniformDistribution(low=1.0, high=1.0)
    assert not lu._contains(0.9)
    assert lu._contains(1.0)
    assert not lu._contains(1.1)

    du = distributions.DiscreteUniformDistribution(low=1.0, high=1.0, q=2.0)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert not du._contains(1.1)

    iu = distributions.IntUniformDistribution(low=1, high=1)
    assert not iu._contains(0)
    assert iu._contains(1)
    assert not iu._contains(2)

    iuq = distributions.IntUniformDistribution(low=1, high=1, step=2)
    assert not iuq._contains(0)
    assert iuq._contains(1)
    assert not iuq._contains(2)

    ilu = distributions.IntLogUniformDistribution(low=1, high=1)
    assert not ilu._contains(0)
    assert ilu._contains(1)
    assert not ilu._contains(2)

    iluq = distributions.IntLogUniformDistribution(low=1, high=1, step=2)
    assert not iluq._contains(0)
    assert iluq._contains(1)
    assert not iluq._contains(2)
Пример #18
0
def test_create_trial_distribution_conversion() -> None:
    fixed_params = {
        "ud": 0,
        "dud": 2,
        "lud": 1,
        "id": 0,
        "idd": 2,
        "ild": 1,
    }

    fixed_distributions = {
        "ud": distributions.UniformDistribution(low=0, high=10),
        "dud": distributions.DiscreteUniformDistribution(low=0, high=10, q=2),
        "lud": distributions.LogUniformDistribution(low=1, high=10),
        "id": distributions.IntUniformDistribution(low=0, high=10),
        "idd": distributions.IntUniformDistribution(low=0, high=10, step=2),
        "ild": distributions.IntLogUniformDistribution(low=1, high=10),
    }

    with pytest.warns(
            FutureWarning,
            match="See https://github.com/optuna/optuna/issues/2941",
    ) as record:

        trial = create_trial(params=fixed_params,
                             distributions=fixed_distributions,
                             value=1)
        assert len(record) == 6

    expected_distributions = {
        "ud": distributions.FloatDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=None),
        "dud": distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=2),
        "lud": distributions.FloatDistribution(low=1,
                                               high=10,
                                               log=True,
                                               step=None),
        "id": distributions.IntDistribution(low=0, high=10, log=False, step=1),
        "idd": distributions.IntDistribution(low=0, high=10, log=False,
                                             step=2),
        "ild": distributions.IntDistribution(low=1, high=10, log=True, step=1),
    }

    assert trial.distributions == expected_distributions
Пример #19
0
def test_contains():
    # type: () -> None

    u = distributions.UniformDistribution(low=1.0, high=2.0)
    assert not u._contains(0.9)
    assert u._contains(1)
    assert u._contains(1.5)
    assert not u._contains(2)

    lu = distributions.LogUniformDistribution(low=0.001, high=100)
    assert not lu._contains(0.0)
    assert lu._contains(0.001)
    assert lu._contains(12.3)
    assert not lu._contains(100)

    du = distributions.DiscreteUniformDistribution(low=1.0, high=10.0, q=2.0)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert du._contains(3.5)
    assert du._contains(6)
    assert du._contains(10)
    assert not du._contains(10.1)

    iu = distributions.IntUniformDistribution(low=1, high=10)
    assert not iu._contains(0.9)
    assert iu._contains(1)
    assert iu._contains(4)
    assert iu._contains(6)
    assert iu._contains(10)
    assert not iu._contains(10.1)
    assert not iu._contains(11)

    # IntUniformDistribution with a 'q' parameter.
    iuq = distributions.IntUniformDistribution(low=1, high=10, step=2)
    assert not iuq._contains(0.9)
    assert iuq._contains(1)
    assert iuq._contains(4)
    assert iuq._contains(6)
    assert iuq._contains(10)
    assert not iuq._contains(10.1)
    assert not iuq._contains(11)

    c = distributions.CategoricalDistribution(choices=("Roppongi", "Azabu"))
    assert not c._contains(-1)
    assert c._contains(0)
    assert c._contains(1)
    assert c._contains(1.5)
    assert not c._contains(3)
Пример #20
0
def test_check_distribution_compatibility():
    # type: () -> None

    # test the same distribution
    for key in EXAMPLE_JSONS.keys():
        distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS[key], EXAMPLE_DISTRIBUTIONS[key])

    # test different distribution classes
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["u"], EXAMPLE_DISTRIBUTIONS["l"]),
    )

    # test dynamic value range (CategoricalDistribution)
    pytest.raises(
        ValueError,
        lambda: distributions.check_distribution_compatibility(
            EXAMPLE_DISTRIBUTIONS["c2"],
            distributions.CategoricalDistribution(choices=("Roppongi",
                                                           "Akasaka")),
        ),
    )

    # test dynamic value range (except CategoricalDistribution)
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["u"],
        distributions.UniformDistribution(low=-3.0, high=-2.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["l"],
        distributions.LogUniformDistribution(low=0.1, high=1.0))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["du"],
        distributions.DiscreteUniformDistribution(low=-1.0, high=11.0, q=3.0),
    )
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["iu"],
        distributions.IntUniformDistribution(low=-1, high=1))
    distributions.check_distribution_compatibility(
        EXAMPLE_DISTRIBUTIONS["ilu"],
        distributions.IntLogUniformDistribution(low=1, high=13),
    )
Пример #21
0
    def suggest_loguniform(self, name, low, high):
        # type: (str, float, float) -> float
        """Suggest a value for the continuous parameter.

        The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high})`
        in the log domain. When :math:`\\mathsf{low} = \\mathsf{high}`, the value of
        :math:`\\mathsf{low}` will be returned.

        Example:

            Suggest penalty parameter ``C`` of `SVC <https://scikit-learn.org/stable/modules/
            generated/sklearn.svm.SVC.html>`_.

            .. code::

                >>> def objective(trial):
                >>>     ...
                >>>     c = trial.suggest_logunifrom('c', 1e-5, 1e2)
                >>>     clf = sklearn.svm.SVC(C=c)
                >>>     ...

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
            high:
                Upper endpoint of the range of suggested values. ``high`` is excluded from the
                range.

        Returns:
            A suggested float value.
        """

        distribution = distributions.LogUniformDistribution(low=low, high=high)
        if low == high:
            param_value_in_internal_repr = distribution.to_internal_repr(low)
            return self._set_new_param_or_get_existing(
                name, param_value_in_internal_repr, distribution)

        return self._suggest(name, distribution)
Пример #22
0
def test_contains():
    # type: () -> None

    u = distributions.UniformDistribution(low=1., high=2.)
    assert not u._contains(0.9)
    assert u._contains(1)
    assert u._contains(1.5)
    assert not u._contains(2)

    lu = distributions.LogUniformDistribution(low=0.001, high=100)
    assert not lu._contains(0.0)
    assert lu._contains(0.001)
    assert lu._contains(12.3)
    assert not lu._contains(100)

    du = distributions.DiscreteUniformDistribution(low=1., high=10., q=2.)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert du._contains(3.5)
    assert du._contains(6)
    assert du._contains(10)
    assert not du._contains(10.1)

    iu = distributions.IntUniformDistribution(low=1, high=10)
    assert not iu._contains(0.9)
    assert iu._contains(1)
    assert iu._contains(3.5)
    assert iu._contains(6)
    assert iu._contains(10)
    assert iu._contains(10.1)
    assert not iu._contains(11)

    c = distributions.CategoricalDistribution(choices=('Roppongi', 'Azabu'))
    assert not c._contains(-1)
    assert c._contains(0)
    assert c._contains(1)
    assert c._contains(1.5)
    assert not c._contains(3)
Пример #23
0
def test_infer_relative_search_space() -> None:
    sampler = TPESampler()
    search_space = {
        "a": distributions.UniformDistribution(1.0, 100.0),
        "b": distributions.LogUniformDistribution(1.0, 100.0),
        "c": distributions.DiscreteUniformDistribution(1.0, 100.0, 3.0),
        "d": distributions.IntUniformDistribution(1, 100),
        "e": distributions.IntUniformDistribution(0, 100, step=2),
        "f": distributions.IntLogUniformDistribution(1, 100),
        "g": distributions.CategoricalDistribution(["x", "y", "z"]),
    }

    def obj(t: Trial) -> float:
        t.suggest_uniform("a", 1.0, 100.0)
        t.suggest_loguniform("b", 1.0, 100.0)
        t.suggest_discrete_uniform("c", 1.0, 100.0, 3.0)
        t.suggest_int("d", 1, 100)
        t.suggest_int("e", 0, 100, step=2)
        t.suggest_int("f", 1, 100, log=True)
        t.suggest_categorical("g", ["x", "y", "z"])
        return 0.0

    # Study and frozen-trial are not supposed to be accessed.
    study1 = Mock(spec=[])
    frozen_trial = Mock(spec=[])
    assert sampler.infer_relative_search_space(study1, frozen_trial) == {}

    study2 = optuna.create_study(sampler=sampler)
    study2.optimize(obj, n_trials=1)
    assert sampler.infer_relative_search_space(study2, study2.best_trial) == {}

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = TPESampler(multivariate=True)
    study3 = optuna.create_study(sampler=sampler)
    study3.optimize(obj, n_trials=1)
    assert sampler.infer_relative_search_space(
        study3, study3.best_trial) == search_space
Пример #24
0
    def suggest_float(self, name, low, high, *, log=False, step=None):
        # type: (str, float, float, bool, Optional[float]) -> float

        if step is not None:
            if log:
                raise NotImplementedError(
                    "The parameter `step` is not supported when `log` is True."
                )
            else:
                return self._suggest(
                    name,
                    distributions.DiscreteUniformDistribution(low=low,
                                                              high=high,
                                                              q=step))
        else:
            if log:
                return self._suggest(
                    name,
                    distributions.LogUniformDistribution(low=low, high=high))
            else:
                return self._suggest(
                    name, distributions.UniformDistribution(low=low,
                                                            high=high))
Пример #25
0
def test_optuna_search(enable_pruning):
    # type: (bool) -> None

    X, y = make_blobs(n_samples=10)
    est = SGDClassifier(max_iter=5, tol=1e-03)
    param_dist = {'alpha': distributions.LogUniformDistribution(1e-04, 1e+03)}
    optuna_search = integration.OptunaSearchCV(est,
                                               param_dist,
                                               cv=3,
                                               enable_pruning=enable_pruning,
                                               error_score='raise',
                                               max_iter=5,
                                               random_state=0,
                                               return_train_score=True)

    with pytest.raises(NotFittedError):
        optuna_search._check_is_fitted()

    optuna_search.fit(X, y)
    optuna_search.trials_dataframe()
    optuna_search.decision_function(X)
    optuna_search.predict(X)
    optuna_search.score(X, y)
Пример #26
0
def test_convert_old_distribution_to_new_distribution() -> None:
    ud = distributions.UniformDistribution(low=0, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        ud) == distributions.FloatDistribution(low=0,
                                               high=10,
                                               log=False,
                                               step=None)

    dud = distributions.DiscreteUniformDistribution(low=0, high=10, q=2)
    assert distributions._convert_old_distribution_to_new_distribution(
        dud) == distributions.FloatDistribution(low=0,
                                                high=10,
                                                log=False,
                                                step=2)

    lud = distributions.LogUniformDistribution(low=1, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        lud) == distributions.FloatDistribution(low=1,
                                                high=10,
                                                log=True,
                                                step=None)

    id = distributions.IntUniformDistribution(low=0, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        id) == distributions.IntDistribution(low=0, high=10, log=False, step=1)

    idd = distributions.IntUniformDistribution(low=0, high=10, step=2)
    assert distributions._convert_old_distribution_to_new_distribution(
        idd) == distributions.IntDistribution(low=0,
                                              high=10,
                                              log=False,
                                              step=2)

    ild = distributions.IntLogUniformDistribution(low=1, high=10)
    assert distributions._convert_old_distribution_to_new_distribution(
        ild) == distributions.IntDistribution(low=1, high=10, log=True, step=1)
Пример #27
0
    def suggest_loguniform(self, name, low, high):
        # type: (str, float, float) -> float

        return self._suggest(name, distributions.LogUniformDistribution(low=low, high=high))
Пример #28
0
def test_contains() -> None:
    u = distributions.UniformDistribution(low=1.0, high=2.0)
    assert not u._contains(0.9)
    assert u._contains(1)
    assert u._contains(1.5)
    assert not u._contains(2)

    lu = distributions.LogUniformDistribution(low=0.001, high=100)
    assert not lu._contains(0.0)
    assert lu._contains(0.001)
    assert lu._contains(12.3)
    assert not lu._contains(100)

    with warnings.catch_warnings():
        # UserWarning will be raised since the range is not divisible by 2.
        # The range will be replaced with [1.0, 9.0].
        warnings.simplefilter("ignore", category=UserWarning)
        du = distributions.DiscreteUniformDistribution(low=1.0,
                                                       high=10.0,
                                                       q=2.0)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert du._contains(3.5)
    assert du._contains(6)
    assert du._contains(9)
    assert not du._contains(9.1)
    assert not du._contains(10)

    iu = distributions.IntUniformDistribution(low=1, high=10)
    assert not iu._contains(0.9)
    assert iu._contains(1)
    assert iu._contains(4)
    assert iu._contains(6)
    assert iu._contains(10)
    assert not iu._contains(10.1)
    assert not iu._contains(11)

    # IntUniformDistribution with a 'step' parameter.
    with warnings.catch_warnings():
        # UserWarning will be raised since the range is not divisible by 2.
        # The range will be replaced with [1, 9].
        warnings.simplefilter("ignore", category=UserWarning)
        iuq = distributions.IntUniformDistribution(low=1, high=10, step=2)
    assert not iuq._contains(0.9)
    assert iuq._contains(1)
    assert iuq._contains(4)
    assert iuq._contains(6)
    assert iuq._contains(9)
    assert not iuq._contains(9.1)
    assert not iuq._contains(10)

    c = distributions.CategoricalDistribution(choices=("Roppongi", "Azabu"))
    assert not c._contains(-1)
    assert c._contains(0)
    assert c._contains(1)
    assert c._contains(1.5)
    assert not c._contains(3)

    ilu = distributions.IntUniformDistribution(low=2, high=12)
    assert not ilu._contains(0.9)
    assert ilu._contains(2)
    assert ilu._contains(4)
    assert ilu._contains(6)
    assert ilu._contains(12)
    assert not ilu._contains(12.1)
    assert not ilu._contains(13)

    iluq = distributions.IntLogUniformDistribution(low=2, high=7)
    assert not iluq._contains(0.9)
    assert iluq._contains(2)
    assert iluq._contains(4)
    assert iluq._contains(5)
    assert iluq._contains(6)
    assert not iluq._contains(7.1)
    assert not iluq._contains(8)
Пример #29
0
import copy
import json
from typing import Any
from typing import Dict
from typing import List
import warnings

import pytest

from optuna import distributions

EXAMPLE_DISTRIBUTIONS = {
    "u": distributions.UniformDistribution(low=1.0, high=2.0),
    "l": distributions.LogUniformDistribution(low=0.001, high=100),
    "du": distributions.DiscreteUniformDistribution(low=1.0, high=9.0, q=2.0),
    "iu": distributions.IntUniformDistribution(low=1, high=9, step=2),
    "c1": distributions.CategoricalDistribution(choices=(2.71, -float("inf"))),
    "c2": distributions.CategoricalDistribution(choices=("Roppongi", "Azabu")),
    "c3": distributions.CategoricalDistribution(choices=["Roppongi", "Azabu"]),
    "ilu": distributions.IntLogUniformDistribution(low=2, high=12, step=2),
}  # type: Dict[str, Any]

EXAMPLE_JSONS = {
    "u":
    '{"name": "UniformDistribution", "attributes": {"low": 1.0, "high": 2.0}}',
    "l":
    '{"name": "LogUniformDistribution", "attributes": {"low": 0.001, "high": 100}}',
    "du":
    '{"name": "DiscreteUniformDistribution",'
    '"attributes": {"low": 1.0, "high": 9.0, "q": 2.0}}',
    "iu":
Пример #30
0
    def suggest_joint(self, name, parameters_list):
        # type: (str, Sequence) -> Sequence
        """Suggest values for the given parameter space.

        The values is sampled based on a given parameter list.

        Example:

            Suggest a dropout rate and a number of unit for neural network training.

            .. code::

                >>> def objective(trial):
                >>>     ...
                >>>     parameters_list = trial.suggest_joint('dropout_rate_and_n_units',
                >>>                                           [['uniform', 'dropout_rate', 0., 1.],
                >>>                                            ['loguniform', 'n_units', 4, 128]])
                >>>     ...

        Args:
            name:
                A parameters name
            parameters_list:
                A list of fours (parameter type, parameter name, argument of distribution).
                The parameter name should be "uniform" or "loguniform".


        Returns:
            A suggested parameters list.
        """
        distributions_list = []
        for p in parameters_list:
            if p[0] == 'uniform':
                distributions_list.append(
                    distributions.ElementOfDistributionsList(
                        name=p[1],
                        dist=distributions.UniformDistribution(low=p[2],
                                                               high=p[3])))
            elif p[0] == 'loguniform':
                distributions_list.append(
                    distributions.ElementOfDistributionsList(
                        name=p[1],
                        dist=distributions.LogUniformDistribution(low=p[2],
                                                                  high=p[3])))
            else:
                raise ValueError(
                    'The {} is not implemented for suggest_joint'.format(p[0]))
        distribution = distributions.JointDistribution(
            distributions_list=distributions_list)

        dict_of_param_value_in_internal_repr = self._call_sampler(
            name, distribution)

        set_success = True
        for d in distributions_list:
            set_success &= self.storage.set_trial_param(
                self.trial_id, d.name,
                dict_of_param_value_in_internal_repr[d.name], d.dist)
        set_success &= self.storage.set_trial_param(
            self.trial_id, name, dict_of_param_value_in_internal_repr,
            distribution)
        if not set_success:
            for d in distributions_list:
                dict_of_param_value_in_internal_repr[
                    d.name] = self.storage.get_trial_param(
                        self.trial_id, d.name)

        list_of_params_value = distribution.to_external_repr(
            dict_of_param_value_in_internal_repr)
        return list_of_params_value