示例#1
0
def test_search_space_transform_untransform_params() -> None:
    search_space = {
        "x0": CategoricalDistribution(["corge"]),
        "x1": CategoricalDistribution(["foo", "bar", "baz", "qux"]),
        "x2": CategoricalDistribution(["quux", "quuz"]),
        "x3": FloatDistribution(2, 3),
        "x4": FloatDistribution(-2, 2),
        "x5": FloatDistribution(1, 10, log=True),
        "x6": FloatDistribution(1, 1, log=True),
        "x7": FloatDistribution(0, 1, step=0.2),
        "x8": IntDistribution(2, 4),
        "x9": IntDistribution(1, 10, log=True),
        "x10": IntDistribution(1, 9, step=2),
    }

    params = {
        "x0": "corge",
        "x1": "qux",
        "x2": "quux",
        "x3": 2.0,
        "x4": -2,
        "x5": 1.0,
        "x6": 1.0,
        "x7": 0.2,
        "x8": 2,
        "x9": 1,
        "x10": 3,
    }

    trans = _SearchSpaceTransform(search_space)
    trans_params = trans.transform(params)
    untrans_params = trans.untransform(trans_params)

    for name in params.keys():
        assert untrans_params[name] == params[name]
示例#2
0
def test_sample_single_distribution(
        sampler_class: Callable[[], BaseSampler]) -> None:

    relative_search_space = {
        "a": UniformDistribution(low=1.0, high=1.0),
        "b": LogUniformDistribution(low=1.0, high=1.0),
        "c": DiscreteUniformDistribution(low=1.0, high=1.0, q=1.0),
        "d": IntUniformDistribution(low=1, high=1),
        "e": IntLogUniformDistribution(low=1, high=1),
        "f": CategoricalDistribution([1]),
        "g": FloatDistribution(low=1.0, high=1.0),
        "h": FloatDistribution(low=1.0, high=1.0, log=True),
        "i": FloatDistribution(low=1.0, high=1.0, step=1.0),
        "j": IntDistribution(low=1, high=1),
        "k": IntDistribution(low=1, high=1, log=True),
    }

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = sampler_class()
    study = optuna.study.create_study(sampler=sampler)

    # We need to test the construction of the model, so we should set `n_trials >= 2`.
    for _ in range(2):
        trial = study.ask(fixed_distributions=relative_search_space)
        study.tell(trial, 1.0)
        for param_name in relative_search_space.keys():
            assert trial.params[param_name] == 1
示例#3
0
文件: test_cma.py 项目: optuna/optuna
    def search_space() -> Dict[str, BaseDistribution]:

        return {
            "c": CategoricalDistribution(("a", "b")),
            "d": FloatDistribution(-1, 9, step=2),
            "i": IntDistribution(-1, 1),
            "ii": IntDistribution(-1, 3, step=2),
            "il": IntDistribution(2, 16, log=True),
            "l": FloatDistribution(0.001, 0.1, log=True),
            "u": FloatDistribution(-2, 2),
        }
示例#4
0
def test_relative_parameters(storage_mode: str) -> None:

    relative_search_space = {
        "x": FloatDistribution(low=5, high=6),
        "y": FloatDistribution(low=5, high=6),
    }
    relative_params = {"x": 5.5, "y": 5.5, "z": 5.5}

    sampler = DeterministicRelativeSampler(relative_search_space,
                                           relative_params)  # type: ignore

    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)

        def create_trial() -> Trial:

            return Trial(study,
                         study._storage.create_new_trial(study._study_id))

        # Suggested from `relative_params`.
        trial0 = create_trial()
        distribution0 = FloatDistribution(low=0, high=100)
        assert trial0._suggest("x", distribution0) == 5.5

        # Not suggested from `relative_params` (due to unknown parameter name).
        trial1 = create_trial()
        distribution1 = distribution0
        assert trial1._suggest("w", distribution1) != 5.5

        # Not suggested from `relative_params` (due to incompatible value range).
        trial2 = create_trial()
        distribution2 = FloatDistribution(low=0, high=5)
        assert trial2._suggest("x", distribution2) != 5.5

        # Error (due to incompatible distribution class).
        trial3 = create_trial()
        distribution3 = IntDistribution(low=1, high=100)
        with pytest.raises(ValueError):
            trial3._suggest("y", distribution3)

        # Error ('z' is included in `relative_params` but not in `relative_search_space`).
        trial4 = create_trial()
        distribution4 = FloatDistribution(low=0, high=10)
        with pytest.raises(ValueError):
            trial4._suggest("z", distribution4)

        # Error (due to incompatible distribution class).
        trial5 = create_trial()
        distribution5 = IntDistribution(low=1, high=100, log=True)
        with pytest.raises(ValueError):
            trial5._suggest("y", distribution5)
示例#5
0
def test_not_contained_param() -> None:
    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": FloatDistribution(1.0, 10.0)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": FloatDistribution(1.0, 10.0, log=True)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0, log=True) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": FloatDistribution(1.0, 10.0, step=1.0)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0, step=1.0) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": IntDistribution(1, 10)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100) == 1

    trial = create_trial(
        value=0.2,
        params={"x": 1},
        distributions={"x": IntDistribution(1, 10)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100, 1) == 1

    trial = create_trial(
        value=0.2,
        params={"x": 1},
        distributions={"x": IntDistribution(1, 10, log=True)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100, log=True) == 1
示例#6
0
def test_shap_importance_evaluator_with_infinite(inf_value: float) -> None:
    # The test ensures that trials with infinite values are ignored to calculate importance scores.
    n_trial = 10
    seed = 13

    # Importance scores are calculated without a trial with an inf value.
    study = create_study(sampler=RandomSampler(seed=seed))
    study.optimize(objective, n_trials=n_trial)

    evaluator = ShapleyImportanceEvaluator(seed=seed)
    param_importance_without_inf = evaluator.evaluate(study)

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            value=inf_value,
            params={"x1": 1.0, "x2": 1.0, "x3": 3.0, "x4": 0.1},
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": IntDistribution(low=2, high=4, log=True),
                "x4": CategoricalDistribution([0.1, 1, 10]),
            },
        )
    )
    # Importance scores are calculated with a trial with an inf value.
    param_importance_with_inf = evaluator.evaluate(study)

    # Obtained importance scores should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert param_importance_with_inf == param_importance_without_inf
示例#7
0
def test_sample_relative() -> None:

    relative_search_space: Dict[str, BaseDistribution] = {
        "a": FloatDistribution(low=0, high=5),
        "b": CategoricalDistribution(choices=("foo", "bar", "baz")),
        "c": IntDistribution(low=20, high=50),  # Not exist in `relative_params`.
    }
    relative_params = {
        "a": 3.2,
        "b": "baz",
    }
    unknown_param_value = 30

    sampler = FixedSampler(relative_search_space, relative_params, unknown_param_value)
    study = optuna.study.create_study(sampler=sampler)

    def objective(trial: Trial) -> float:

        # Predefined parameters are sampled by `sample_relative()` method.
        assert trial.suggest_float("a", 0, 5) == 3.2
        assert trial.suggest_categorical("b", ["foo", "bar", "baz"]) == "baz"

        # Other parameters are sampled by `sample_independent()` method.
        assert trial.suggest_int("c", 20, 50) == unknown_param_value
        assert trial.suggest_float("d", 1, 100, log=True) == unknown_param_value
        assert trial.suggest_float("e", 20, 40) == unknown_param_value

        return 0.0

    study.optimize(objective, n_trials=10, catch=())
    for trial in study.trials:
        assert trial.params == {"a": 3.2, "b": "baz", "c": 30, "d": 30, "e": 30}
示例#8
0
 def suggest_int(self,
                 name: str,
                 low: int,
                 high: int,
                 step: int = 1,
                 log: bool = False) -> int:
     return int(
         self._suggest(name, IntDistribution(low, high, log=log,
                                             step=step)))
示例#9
0
文件: test_cma.py 项目: optuna/optuna
    def test_init_cma_opts() -> None:

        sampler = optuna.integration.PyCmaSampler(
            x0={
                "x": 0,
                "y": 0
            },
            sigma0=0.1,
            cma_stds={
                "x": 1,
                "y": 1
            },
            seed=1,
            cma_opts={"popsize": 5},
            independent_sampler=DeterministicRelativeSampler({}, {}),
        )
        study = optuna.create_study(sampler=sampler)

        with patch("optuna.integration.cma._Optimizer") as mock_obj:
            mock_obj.ask.return_value = {"x": -1, "y": -1}
            study.optimize(lambda t: t.suggest_int("x", -1, 1) + t.suggest_int(
                "y", -1, 1),
                           n_trials=2)
            assert mock_obj.mock_calls[0] == call(
                {
                    "x": IntDistribution(low=-1, high=1),
                    "y": IntDistribution(low=-1, high=1),
                },
                {
                    "x": 0,
                    "y": 0
                },
                0.1,
                {
                    "x": 1,
                    "y": 1
                },
                {
                    "popsize": 5,
                    "seed": 1,
                    "verbose": -2
                },
            )
示例#10
0
def test_suggest_int_log(trial_type: type) -> None:

    trial = _create_trial(
        trial_type=trial_type,
        params={"x": 1},
        distributions={"x": IntDistribution(1, 10, log=True)},
    )

    assert trial.suggest_int("x", 1, 10, log=True) == 1

    with pytest.raises(ValueError):
        trial.suggest_int("x", 1, 10, step=2, log=True)

    with pytest.raises(ValueError):
        trial.suggest_int("y", 1, 10, log=True)
示例#11
0
def test_distributions(storage_mode: str) -> None:
    def objective(trial: Trial) -> float:

        trial.suggest_float("a", 0, 10)
        trial.suggest_float("b", 0.1, 10, log=True)
        trial.suggest_float("c", 0, 10, step=1)
        trial.suggest_int("d", 0, 10)
        trial.suggest_categorical("e", ["foo", "bar", "baz"])
        trial.suggest_int("f", 1, 10, log=True)

        return 1.0

    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage)
        study.optimize(objective, n_trials=1)

        assert study.best_trial.distributions == {
            "a": FloatDistribution(low=0, high=10),
            "b": FloatDistribution(low=0.1, high=10, log=True),
            "c": FloatDistribution(low=0, high=10, step=1),
            "d": IntDistribution(low=0, high=10),
            "e": CategoricalDistribution(choices=("foo", "bar", "baz")),
            "f": IntDistribution(low=1, high=10, log=True),
        }
示例#12
0
def test_crossover_numerical_distribution(crossover: BaseCrossover) -> None:

    study = optuna.study.create_study()
    rng = np.random.RandomState()
    search_space = {"x": FloatDistribution(1, 10), "y": IntDistribution(1, 10)}
    numerical_transform = _SearchSpaceTransform(search_space)
    parent_params = np.array([[1.0, 2], [3.0, 4]])

    if crossover.n_parents == 3:
        parent_params = np.append(parent_params, [[5.0, 6]], axis=0)

    child_params = crossover.crossover(parent_params, rng, study, numerical_transform.bounds)
    assert child_params.ndim == 1
    assert len(child_params) == len(search_space)
    assert np.nan not in child_params
    assert np.inf not in child_params
示例#13
0
def test_crossover_duplicated_param_values(crossover: BaseCrossover) -> None:

    param_values = [1.0, 2.0]

    study = optuna.study.create_study()
    rng = np.random.RandomState()
    search_space = {"x": FloatDistribution(1, 10), "y": IntDistribution(1, 10)}
    numerical_transform = _SearchSpaceTransform(search_space)
    parent_params = np.array([param_values, param_values])

    if crossover.n_parents == 3:
        parent_params = np.append(parent_params, [param_values], axis=0)

    child_params = crossover.crossover(parent_params, rng, study,
                                       numerical_transform.bounds)
    assert child_params.ndim == 1
    np.testing.assert_almost_equal(child_params, param_values)
示例#14
0
def test_suggest_int() -> None:

    trial = FrozenTrial(
        number=0,
        trial_id=0,
        state=TrialState.COMPLETE,
        value=0.2,
        datetime_start=datetime.datetime.now(),
        datetime_complete=datetime.datetime.now(),
        params={"x": 1},
        distributions={"x": IntDistribution(0, 10)},
        user_attrs={},
        system_attrs={},
        intermediate_values={},
    )

    assert trial.suggest_int("x", 0, 10) == 1

    with pytest.raises(ValueError):
        trial.suggest_int("y", 0, 10)
示例#15
0
def test_multi_objective_shap_importance_evaluator_with_infinite(
    target_idx: int, inf_value: float
) -> None:
    def multi_objective_function(trial: Trial) -> Tuple[float, float]:
        x1: float = trial.suggest_float("x1", 0.1, 3)
        x2: float = trial.suggest_float("x2", 0.1, 3, log=True)
        x3: int = trial.suggest_int("x3", 2, 4, log=True)
        x4 = trial.suggest_categorical("x4", [0.1, 1.0, 10.0])
        assert isinstance(x4, float)
        return (x1 + x2 * x3 + x4, x1 * x4)

    # The test ensures that trials with infinite values are ignored to calculate importance scores.
    n_trial = 10
    seed = 13

    # Importance scores are calculated without a trial with an inf value.
    study = create_study(directions=["minimize", "minimize"], sampler=RandomSampler(seed=seed))
    study.optimize(multi_objective_function, n_trials=n_trial)

    evaluator = ShapleyImportanceEvaluator(seed=seed)
    param_importance_without_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx])

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            values=[inf_value, inf_value],
            params={"x1": 1.0, "x2": 1.0, "x3": 3.0, "x4": 0.1},
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": IntDistribution(low=2, high=4, log=True),
                "x4": CategoricalDistribution([0.1, 1, 10]),
            },
        )
    )
    # Importance scores are calculated with a trial with an inf value.
    param_importance_with_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx])

    # Obtained importance scores should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert param_importance_with_inf == param_importance_without_inf
示例#16
0
def test_suggest_int(storage_mode: str) -> None:

    mock = Mock()
    mock.side_effect = [1, 2]
    sampler = samplers.RandomSampler()

    with patch.object(
            sampler, "sample_independent",
            mock) as mock_object, StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = IntDistribution(low=0, high=3)

        assert trial._suggest("x",
                              distribution) == 1  # Test suggesting a param.
        assert trial._suggest(
            "x", distribution) == 1  # Test suggesting the same param.
        assert trial._suggest(
            "y", distribution) == 2  # Test suggesting a different param.
        assert trial.params == {"x": 1, "y": 2}
        assert mock_object.call_count == 2
示例#17
0
文件: test_cma.py 项目: optuna/optuna
    def test_is_compatible(search_space: Dict[str, BaseDistribution],
                           x0: Dict[str, Any]) -> None:

        optimizer = optuna.integration.cma._Optimizer(search_space, x0, 0.1,
                                                      None, {})

        # Compatible.
        trial = _create_frozen_trial(x0, search_space)
        assert optimizer._is_compatible(trial)

        # Compatible.
        trial = _create_frozen_trial(
            x0, dict(search_space, u=FloatDistribution(-10, 10)))
        assert optimizer._is_compatible(trial)

        # Compatible.
        trial = _create_frozen_trial(
            dict(x0, unknown=7),
            dict(search_space, unknown=FloatDistribution(0, 10)))
        assert optimizer._is_compatible(trial)

        # Incompatible ('u' doesn't exist).
        param = dict(x0)
        del param["u"]
        dist = dict(search_space)
        del dist["u"]
        trial = _create_frozen_trial(param, dist)
        assert not optimizer._is_compatible(trial)

        # Incompatible (the value of 'u' is out of range).
        trial = _create_frozen_trial(
            dict(x0, u=20), dict(search_space, u=FloatDistribution(-100, 100)))
        assert not optimizer._is_compatible(trial)

        # Error (different distribution class).
        trial = _create_frozen_trial(
            x0, dict(search_space, u=IntDistribution(-2, 2)))
        with pytest.raises(ValueError):
            optimizer._is_compatible(trial)
示例#18
0
def test_suggest_int_log(storage_mode: str) -> None:

    mock = Mock()
    mock.side_effect = [1, 2]
    sampler = samplers.RandomSampler()

    with patch.object(
            sampler, "sample_independent",
            mock) as mock_object, StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = IntDistribution(low=1, high=3, log=True)

        assert trial._suggest("x",
                              distribution) == 1  # Test suggesting a param.
        assert trial._suggest(
            "x", distribution) == 1  # Test suggesting the same param.
        assert trial._suggest(
            "y", distribution) == 2  # Test suggesting a different param.
        assert trial.params == {"x": 1, "y": 2}
        assert mock_object.call_count == 2

    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        with warnings.catch_warnings():
            # UserWarning will be raised since [0.5, 10] is not divisible by 1.
            warnings.simplefilter("ignore", category=UserWarning)
            with pytest.raises(ValueError):
                trial.suggest_int("z", 0.5, 10, log=True)  # type: ignore

    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        with pytest.raises(ValueError):
            trial.suggest_int("w", 1, 3, step=2, log=True)
示例#19
0
def test_search_space_transform_encoding() -> None:
    trans = _SearchSpaceTransform({"x0": IntDistribution(0, 3)})

    assert len(trans.column_to_encoded_columns) == 1
    numpy.testing.assert_equal(trans.column_to_encoded_columns[0],
                               numpy.array([0]))
    numpy.testing.assert_equal(trans.encoded_column_to_column,
                               numpy.array([0]))

    trans = _SearchSpaceTransform(
        {"x0": CategoricalDistribution(["foo", "bar", "baz"])})

    assert len(trans.column_to_encoded_columns) == 1
    numpy.testing.assert_equal(trans.column_to_encoded_columns[0],
                               numpy.array([0, 1, 2]))
    numpy.testing.assert_equal(trans.encoded_column_to_column,
                               numpy.array([0, 0, 0]))

    trans = _SearchSpaceTransform({
        "x0":
        FloatDistribution(0, 3),
        "x1":
        CategoricalDistribution(["foo", "bar", "baz"]),
        "x3":
        FloatDistribution(0, 1, step=0.2),
    })

    assert len(trans.column_to_encoded_columns) == 3
    numpy.testing.assert_equal(trans.column_to_encoded_columns[0],
                               numpy.array([0]))
    numpy.testing.assert_equal(trans.column_to_encoded_columns[1],
                               numpy.array([1, 2, 3]))
    numpy.testing.assert_equal(trans.column_to_encoded_columns[2],
                               numpy.array([4]))
    numpy.testing.assert_equal(trans.encoded_column_to_column,
                               numpy.array([0, 1, 1, 1, 2]))
示例#20
0
def test_search_space_group() -> None:
    search_space_group = _SearchSpaceGroup()

    # No search space.
    assert search_space_group.search_spaces == []

    # No distributions.
    search_space_group.add_distributions({})
    assert search_space_group.search_spaces == []

    # Add a single distribution.
    search_space_group.add_distributions(
        {"x": IntDistribution(low=0, high=10)})
    assert search_space_group.search_spaces == [{
        "x":
        IntDistribution(low=0, high=10)
    }]

    # Add a same single distribution.
    search_space_group.add_distributions(
        {"x": IntDistribution(low=0, high=10)})
    assert search_space_group.search_spaces == [{
        "x":
        IntDistribution(low=0, high=10)
    }]

    # Add disjoint distributions.
    search_space_group.add_distributions({
        "y":
        IntDistribution(low=0, high=10),
        "z":
        FloatDistribution(low=-3, high=3),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "y": IntDistribution(low=0, high=10),
            "z": FloatDistribution(low=-3, high=3),
        },
    ]

    # Add distributions, which include one of search spaces in the group.
    search_space_group.add_distributions({
        "y":
        IntDistribution(low=0, high=10),
        "z":
        FloatDistribution(low=-3, high=3),
        "u":
        FloatDistribution(low=1e-2, high=1e2, log=True),
        "v":
        CategoricalDistribution(choices=["A", "B", "C"]),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "y": IntDistribution(low=0, high=10),
            "z": FloatDistribution(low=-3, high=3),
        },
        {
            "u": FloatDistribution(low=1e-2, high=1e2, log=True),
            "v": CategoricalDistribution(choices=["A", "B", "C"]),
        },
    ]

    # Add a distribution, which is included by one of search spaces in the group.
    search_space_group.add_distributions(
        {"u": FloatDistribution(low=1e-2, high=1e2, log=True)})
    assert search_space_group.search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "y": IntDistribution(low=0, high=10),
            "z": FloatDistribution(low=-3, high=3),
        },
        {
            "u": FloatDistribution(low=1e-2, high=1e2, log=True)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
    ]

    # Add distributions whose intersection with one of search spaces in the group is not empty.
    search_space_group.add_distributions({
        "y":
        IntDistribution(low=0, high=10),
        "w":
        IntDistribution(low=2, high=8, log=True),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "y": IntDistribution(low=0, high=10)
        },
        {
            "z": FloatDistribution(low=-3, high=3)
        },
        {
            "u": FloatDistribution(low=1e-2, high=1e2, log=True)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntDistribution(low=2, high=8, log=True)
        },
    ]

    # Add distributions which include some of search spaces in the group.
    search_space_group.add_distributions({
        "y":
        IntDistribution(low=0, high=10),
        "w":
        IntDistribution(low=2, high=8, log=True),
        "t":
        FloatDistribution(low=10, high=100),
    })
    assert search_space_group.search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "y": IntDistribution(low=0, high=10)
        },
        {
            "z": FloatDistribution(low=-3, high=3)
        },
        {
            "u": FloatDistribution(low=1e-2, high=1e2, log=True)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntDistribution(low=2, high=8, log=True)
        },
        {
            "t": FloatDistribution(low=10, high=100)
        },
    ]
示例#21
0
import numpy
import pytest

from optuna._transform import _SearchSpaceTransform
from optuna._transform import _untransform_numerical_param
from optuna.distributions import BaseDistribution
from optuna.distributions import CategoricalDistribution
from optuna.distributions import FloatDistribution
from optuna.distributions import IntDistribution


@pytest.mark.parametrize(
    "param,distribution",
    [
        (0, IntDistribution(0, 3)),
        (1, IntDistribution(1, 10, log=True)),
        (2, IntDistribution(0, 10, step=2)),
        (0.0, FloatDistribution(0, 3)),
        (1.0, FloatDistribution(1, 10, log=True)),
        (0.2, FloatDistribution(0, 1, step=0.2)),
        ("foo", CategoricalDistribution(["foo"])),
        ("bar", CategoricalDistribution(["foo", "bar", "baz"])),
    ],
)
def test_search_space_transform_shapes_dtypes(
        param: Any, distribution: BaseDistribution) -> None:
    trans = _SearchSpaceTransform({"x0": distribution})
    trans_params = trans.transform({"x0": param})

    if isinstance(distribution, CategoricalDistribution):
示例#22
0
    with pytest.raises(ValueError):
        trial.suggest_categorical("x", ["foo", "bar"])

    # Unknown parameter and bad category type.
    with pytest.warns(UserWarning):
        with pytest.raises(
                ValueError
        ):  # Must come after `pytest.warns` to catch failures.
            trial.suggest_categorical("x", [{"foo": "bar"}])  # type: ignore


@parametrize_trial_type
@pytest.mark.parametrize(
    ("suggest_func", "distribution"),
    [
        (lambda trial, *args: trial.suggest_int(*args), IntDistribution(1,
                                                                        10)),
        (
            lambda trial, *args: trial.suggest_int(*args, log=True),
            IntDistribution(1, 10, log=True),
        ),
        (lambda trial, *args: trial.suggest_int(*args, step=2),
         IntDistribution(1, 10, step=2)),
        (lambda trial, *args: trial.suggest_float(*args),
         FloatDistribution(1, 10)),
        (
            lambda trial, *args: trial.suggest_float(*args, log=True),
            FloatDistribution(1, 10, log=True),
        ),
        (
            lambda trial, *args: trial.suggest_float(*args, step=1),
            FloatDistribution(1, 10, step=1),
示例#23
0
def test_relative_parameters(storage_mode: str) -> None:
    class SamplerStubForTestRelativeParameters(samplers.BaseSampler):
        def infer_relative_search_space(
            self, study: "optuna.study.Study",
            trial: "optuna.trial.FrozenTrial"
        ) -> Dict[str, distributions.BaseDistribution]:
            return {
                "x": FloatDistribution(low=5, high=6),
                "y": FloatDistribution(low=5, high=6),
            }

        def sample_relative(
            self,
            study: "optuna.study.Study",
            trial: "optuna.trial.FrozenTrial",
            search_space: Dict[str, distributions.BaseDistribution],
        ) -> Dict[str, Any]:
            return {"x": 5.5, "y": 5.5, "z": 5.5}

        def sample_independent(
            self,
            study: "optuna.study.Study",
            trial: "optuna.trial.FrozenTrial",
            param_name: str,
            param_distribution: distributions.BaseDistribution,
        ) -> Any:
            return 5.0

    sampler = SamplerStubForTestRelativeParameters()
    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)

        def create_trial() -> Trial:

            return Trial(study,
                         study._storage.create_new_trial(study._study_id))

        # Suggested by `sample_relative`.
        trial0 = create_trial()
        distribution0 = FloatDistribution(low=0, high=100)
        assert trial0._suggest("x", distribution0) == 5.5

        # Not suggested by `sample_relative` (due to unknown parameter name).
        trial1 = create_trial()
        distribution1 = distribution0
        assert trial1._suggest("w", distribution1) != 5.5

        # Not suggested by `sample_relative` (due to incompatible value range).
        trial2 = create_trial()
        distribution2 = FloatDistribution(low=0, high=5)
        assert trial2._suggest("x", distribution2) != 5.5

        # Error (due to incompatible distribution class).
        trial3 = create_trial()
        distribution3 = IntDistribution(low=1, high=100)
        with pytest.raises(ValueError):
            trial3._suggest("y", distribution3)

        # Error ('z' is included in `sample_relative` but not in `infer_relative_search_space`).
        trial4 = create_trial()
        distribution4 = FloatDistribution(low=0, high=10)
        with pytest.raises(ValueError):
            trial4._suggest("z", distribution4)

        # Error (due to incompatible distribution class).
        trial5 = create_trial()
        distribution5 = IntDistribution(low=1, high=100, log=True)
        with pytest.raises(ValueError):
            trial5._suggest("y", distribution5)
示例#24
0
def test_group_decomposed_search_space() -> None:
    search_space = _GroupDecomposedSearchSpace()
    study = create_study()

    # No trial.
    assert search_space.calculate(study).search_spaces == []

    # A single parameter.
    study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=1)
    assert search_space.calculate(study).search_spaces == [{
        "x":
        IntDistribution(low=0, high=10)
    }]

    # Disjoint parameters.
    study.optimize(
        lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3),
        n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "y": IntDistribution(low=0, high=10),
            "z": FloatDistribution(low=-3, high=3),
        },
    ]

    # Parameters which include one of search spaces in the group.
    study.optimize(
        lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3) + t.
        suggest_float("u", 1e-2, 1e2, log=True) + bool(
            t.suggest_categorical("v", ["A", "B", "C"])),
        n_trials=1,
    )
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "z": FloatDistribution(low=-3, high=3),
            "y": IntDistribution(low=0, high=10),
        },
        {
            "u": FloatDistribution(low=1e-2, high=1e2, log=True),
            "v": CategoricalDistribution(choices=["A", "B", "C"]),
        },
    ]

    # A parameter which is included by one of search spaces in thew group.
    study.optimize(lambda t: t.suggest_float("u", 1e-2, 1e2, log=True),
                   n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "y": IntDistribution(low=0, high=10),
            "z": FloatDistribution(low=-3, high=3),
        },
        {
            "u": FloatDistribution(low=1e-2, high=1e2, log=True)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
    ]

    # Parameters whose intersection with one of search spaces in the group is not empty.
    study.optimize(lambda t: t.suggest_int("y", 0, 10) + t.suggest_int(
        "w", 2, 8, log=True),
                   n_trials=1)
    assert search_space.calculate(study).search_spaces == [
        {
            "x": IntDistribution(low=0, high=10)
        },
        {
            "y": IntDistribution(low=0, high=10)
        },
        {
            "z": FloatDistribution(low=-3, high=3)
        },
        {
            "u": FloatDistribution(low=1e-2, high=1e2, log=True)
        },
        {
            "v": CategoricalDistribution(choices=["A", "B", "C"])
        },
        {
            "w": IntDistribution(low=2, high=8, log=True)
        },
    ]

    search_space = _GroupDecomposedSearchSpace()
    study = create_study()

    # Failed or pruned trials are not considered in the calculation of
    # an intersection search space.
    def objective(trial: Trial, exception: Exception) -> float:

        trial.suggest_float("a", 0, 1)
        raise exception

    study.optimize(lambda t: objective(t, RuntimeError()),
                   n_trials=1,
                   catch=(RuntimeError, ))
    study.optimize(lambda t: objective(t, TrialPruned()), n_trials=1)
    assert search_space.calculate(study).search_spaces == []

    # If two parameters have the same name but different distributions,
    # the first one takes priority.
    study.optimize(lambda t: t.suggest_float("a", -1, 1), n_trials=1)
    study.optimize(lambda t: t.suggest_float("a", 0, 1), n_trials=1)
    assert search_space.calculate(study).search_spaces == [{
        "a":
        FloatDistribution(low=-1, high=1)
    }]
示例#25
0
        np.floating,
    )

    if distribution.step is not None:
        # Check all points are multiples of distribution.step.
        points -= distribution.low
        points /= distribution.step
        round_points = np.round(points)
        np.testing.assert_almost_equal(round_points, points)


@parametrize_sampler
@pytest.mark.parametrize(
    "distribution",
    [
        IntDistribution(-10, 10),
        IntDistribution(0, 10),
        IntDistribution(-10, 0),
        IntDistribution(-10, 10, step=2),
        IntDistribution(0, 10, step=2),
        IntDistribution(-10, 0, step=2),
        IntDistribution(1, 100, log=True),
    ],
)
def test_int(sampler_class: Callable[[], BaseSampler],
             distribution: IntDistribution) -> None:

    study = optuna.study.create_study(sampler=sampler_class())
    points = np.array([
        study.sampler.sample_independent(study, _create_new_trial(study), "x",
                                         distribution) for _ in range(100)
示例#26
0
        optuna.multi_objective.samplers.NSGAIIMultiObjectiveSampler,
    ],
)


@parametrize_sampler
@pytest.mark.parametrize(
    "distribution",
    [
        FloatDistribution(-1.0, 1.0),
        FloatDistribution(0.0, 1.0),
        FloatDistribution(-1.0, 0.0),
        FloatDistribution(1e-7, 1.0, log=True),
        FloatDistribution(-10, 10, step=0.1),
        FloatDistribution(-10.2, 10.2, step=0.1),
        IntDistribution(-10, 10),
        IntDistribution(0, 10),
        IntDistribution(-10, 0),
        IntDistribution(-10, 10, step=2),
        IntDistribution(0, 10, step=2),
        IntDistribution(-10, 0, step=2),
        CategoricalDistribution((1, 2, 3)),
        CategoricalDistribution(("a", "b", "c")),
        CategoricalDistribution((1, "a")),
    ],
)
def test_sample_independent(sampler_class: Callable[[],
                                                    BaseMultiObjectiveSampler],
                            distribution: BaseDistribution) -> None:
    study = optuna.multi_objective.study.create_study(["minimize", "maximize"],
                                                      sampler=sampler_class())
示例#27
0
文件: _trial.py 项目: optuna/optuna
    def suggest_int(self,
                    name: str,
                    low: int,
                    high: int,
                    step: int = 1,
                    log: bool = False) -> int:
        """Suggest a value for the integer parameter.

        The value is sampled from the integers in :math:`[\\mathsf{low}, \\mathsf{high}]`.

        Example:

            Suggest the number of trees in `RandomForestClassifier <https://scikit-learn.org/
            stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_.

            .. testcode::

                import numpy as np
                from sklearn.datasets import load_iris
                from sklearn.ensemble import RandomForestClassifier
                from sklearn.model_selection import train_test_split

                import optuna

                X, y = load_iris(return_X_y=True)
                X_train, X_valid, y_train, y_valid = train_test_split(X, y)


                def objective(trial):
                    n_estimators = trial.suggest_int("n_estimators", 50, 400)
                    clf = RandomForestClassifier(n_estimators=n_estimators, random_state=0)
                    clf.fit(X_train, y_train)
                    return clf.score(X_valid, y_valid)


                study = optuna.create_study(direction="maximize")
                study.optimize(objective, n_trials=3)

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
                ``low`` must be less than or equal to ``high``. If ``log`` is :obj:`True`,
                ``low`` must be larger than 0.
            high:
                Upper endpoint of the range of suggested values. ``high`` is included in the range.
                ``high`` must be greater than or equal to ``low``.
            step:
                A step of discretization.

                .. note::
                    Note that :math:`\\mathsf{high}` is modified if the range is not divisible by
                    :math:`\\mathsf{step}`. Please check the warning messages to find the changed
                    values.

                .. note::
                    The method returns one of the values in the sequence
                    :math:`\\mathsf{low}, \\mathsf{low} + \\mathsf{step}, \\mathsf{low} + 2 *
                    \\mathsf{step}, \\dots, \\mathsf{low} + k * \\mathsf{step} \\le
                    \\mathsf{high}`, where :math:`k` denotes an integer.

                .. note::
                    The ``step != 1`` and ``log`` arguments cannot be used at the same time.
                    To set the ``step`` argument :math:`\\mathsf{step} \\ge 2`, set the
                    ``log`` argument to :obj:`False`.
            log:
                A flag to sample the value from the log domain or not.

                .. note::
                    If ``log`` is true, at first, the range of suggested values is divided into
                    grid points of width 1. The range of suggested values is then converted to
                    a log domain, from which a value is sampled. The uniformly sampled
                    value is re-converted to the original domain and rounded to the nearest grid
                    point that we just split, and the suggested value is determined.
                    For example, if `low = 2` and `high = 8`, then the range of suggested values is
                    `[2, 3, 4, 5, 6, 7, 8]` and lower values tend to be more sampled than higher
                    values.

                .. note::
                    The ``step != 1`` and ``log`` arguments cannot be used at the same time.
                    To set the ``log`` argument to :obj:`True`, set the ``step`` argument to 1.

        .. seealso::
            :ref:`configurations` tutorial describes more details and flexible usages.
        """

        distribution = IntDistribution(low=low, high=high, log=log, step=step)
        self._check_distribution(name, distribution)
        return int(self._suggest(name, distribution))