Beispiel #1
0
def test_datetime_start(storage_init_func):
    # type: (Callable[[], storages.BaseStorage]) -> None

    trial_datetime_start = [None]  # type: List[Optional[datetime.datetime]]

    def objective(trial):
        # type: (Trial) -> float

        trial_datetime_start[0] = trial.datetime_start
        return 1.0

    study = create_study(storage_init_func())
    study.optimize(objective, n_trials=1)

    assert study.trials[0].datetime_start == trial_datetime_start[0]
Beispiel #2
0
def test_get_param_importances_invalid_empty_study(
        evaluator_init_func: Callable[[], BaseImportanceEvaluator]) -> None:

    study = create_study()

    with pytest.raises(ValueError):
        get_param_importances(study, evaluator=evaluator_init_func())

    def objective(trial: Trial) -> float:
        raise optuna.TrialPruned

    study.optimize(objective, n_trials=3)

    with pytest.raises(ValueError):
        get_param_importances(study, evaluator=evaluator_init_func())
Beispiel #3
0
def test_check_distribution_suggest_int(
    storage_init_func: Callable[[], storages.BaseStorage], enable_log: bool
) -> None:

    sampler = samplers.RandomSampler()
    study = create_study(storage_init_func(), sampler=sampler)
    trial = Trial(study, study._storage.create_new_trial(study._study_id))

    with pytest.warns(None) as record:
        trial.suggest_int("x", 10, 20, log=enable_log)
        trial.suggest_int("x", 10, 20, log=enable_log)
        trial.suggest_int("x", 10, 22, log=enable_log)

    # We expect exactly one warning.
    assert len(record) == 1
Beispiel #4
0
def test_optuna_search_study_with_minimize() -> None:

    X, y = make_blobs(n_samples=10)
    est = KernelDensity()
    study = create_study(direction="minimize")
    optuna_search = integration.OptunaSearchCV(est, {},
                                               cv=3,
                                               error_score="raise",
                                               random_state=0,
                                               return_train_score=True,
                                               study=study)

    with pytest.raises(ValueError,
                       match="direction of study must be 'maximize'."):
        optuna_search.fit(X)
Beispiel #5
0
def test_get_info_importances_nonfinite_removed(
        inf_value: float, evaluator: BaseImportanceEvaluator,
        n_trials: int) -> None:
    def _objective(trial: Trial) -> float:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1 + x2 * x3

    seed = 13
    target_name = "Objective Value"

    study = create_study(sampler=RandomSampler(seed=seed))
    study.optimize(_objective, n_trials=n_trials)

    # Create param importances info without inf value.
    info_without_inf = _get_importances_info(study,
                                             evaluator=evaluator,
                                             params=None,
                                             target=None,
                                             target_name=target_name)

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            value=inf_value,
            params={
                "x1": 1.0,
                "x2": 1.0,
                "x3": 3.0
            },
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        ))

    # Create param importances info with inf value.
    info_with_inf = _get_importances_info(study,
                                          evaluator=evaluator,
                                          params=None,
                                          target=None,
                                          target_name=target_name)

    # Obtained info instances should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert info_with_inf == info_without_inf
Beispiel #6
0
def test_plot_slice() -> None:

    # Test with no trial.
    study = prepare_study_with_trials(no_trials=True)
    figure = plot_slice(study)
    assert len(figure.data) == 0

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_slice(study)
    assert len(figure.data) == 2
    assert figure.data[0]["x"] == (1.0, 2.5)
    assert figure.data[0]["y"] == (0.0, 1.0)
    assert figure.data[1]["x"] == (2.0, 0.0, 1.0)
    assert figure.data[1]["y"] == (0.0, 2.0, 1.0)
    assert figure.layout.yaxis.title.text == "Objective Value"

    # Test with a trial to select parameter.
    figure = plot_slice(study, params=["param_a"])
    assert len(figure.data) == 1
    assert figure.data[0]["x"] == (1.0, 2.5)
    assert figure.data[0]["y"] == (0.0, 1.0)

    # Test with a customized target value.
    figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"])
    assert len(figure.data) == 1
    assert figure.data[0]["x"] == (1.0, 2.5)
    assert figure.data[0]["y"] == (2.0, 1.0)
    assert figure.layout.yaxis.title.text == "Objective Value"

    # Test with a customized target name.
    figure = plot_slice(study, target_name="Target Name")
    assert figure.layout.yaxis.title.text == "Target Name"

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_slice(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
    figure = plot_slice(study)
    assert len(figure.data) == 0
Beispiel #7
0
def test_generate_contour_plot_for_few_observations(params: List[str]) -> None:
    study = create_study(direction="minimize")
    study.add_trial(
        create_trial(
            values=[0.0],
            params={
                "param_a": 1.0,
                "param_b": 2.0
            },
            distributions={
                "param_a": FloatDistribution(0.0, 3.0),
                "param_b": FloatDistribution(0.0, 3.0),
            },
        ))
    study.add_trial(
        create_trial(
            values=[2.0],
            params={"param_b": 0.0},
            distributions={"param_b": FloatDistribution(0.0, 3.0)},
        ))

    info = _get_contour_info(study, params=params)
    assert info == _ContourInfo(
        sorted_params=sorted(params),
        sub_plot_infos=[[
            _SubContourInfo(
                xaxis=_AxisInfo(
                    name=sorted(params)[0],
                    range=(1.0, 1.0),
                    is_log=False,
                    is_cat=False,
                    indices=[1.0],
                    values=[1.0, None],
                ),
                yaxis=_AxisInfo(
                    name=sorted(params)[1],
                    range=(-0.1, 2.1),
                    is_log=False,
                    is_cat=False,
                    indices=[-0.1, 0.0, 2.0, 2.1],
                    values=[2.0, 0.0],
                ),
                z_values={},
            )
        ]],
        reverse_scale=True,
        target_name="Objective Value",
    )
def test_plot_parallel_coordinate_log_params() -> None:
    # Test with log params.
    study_log_params = create_study()
    study_log_params.add_trial(
        create_trial(
            value=0.0,
            params={"param_a": 1e-6, "param_b": 10},
            distributions={
                "param_a": LogUniformDistribution(1e-7, 1e-2),
                "param_b": LogUniformDistribution(1, 1000),
            },
        )
    )
    study_log_params.add_trial(
        create_trial(
            value=1.0,
            params={"param_a": 2e-5, "param_b": 200},
            distributions={
                "param_a": LogUniformDistribution(1e-7, 1e-2),
                "param_b": LogUniformDistribution(1, 1000),
            },
        )
    )
    study_log_params.add_trial(
        create_trial(
            value=0.1,
            params={"param_a": 1e-4, "param_b": 30},
            distributions={
                "param_a": LogUniformDistribution(1e-7, 1e-2),
                "param_b": LogUniformDistribution(1, 1000),
            },
        )
    )
    figure = plot_parallel_coordinate(study_log_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 1.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 1.0, 0.1)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (-6.0, -4.0)
    assert figure.data[0]["dimensions"][1]["values"] == (-6, math.log10(2e-5), -4)
    assert figure.data[0]["dimensions"][1]["ticktext"] == ("1e-06", "1e-05", "0.0001")
    assert figure.data[0]["dimensions"][1]["tickvals"] == (-6, -5, -4.0)
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (1.0, math.log10(200))
    assert figure.data[0]["dimensions"][2]["values"] == (1.0, math.log10(200), math.log10(30))
    assert figure.data[0]["dimensions"][2]["ticktext"] == ("10", "100", "200")
    assert figure.data[0]["dimensions"][2]["tickvals"] == (1.0, 2.0, math.log10(200))
Beispiel #9
0
def test_plot_edf_with_target_name(plot_edf: Callable[..., Any],
                                   target_name: Optional[str]) -> None:
    study = create_study()
    study.optimize(lambda t: t.suggest_float("x", 0, 5), n_trials=10)
    if target_name is None:
        figure = plot_edf(study)
    else:
        figure = plot_edf(study, target_name=target_name)

    expected = target_name if target_name is not None else "Objective Value"
    if isinstance(figure, go.Figure):
        assert figure.layout.xaxis.title.text == expected
    elif isinstance(figure, Axes):
        assert figure.xaxis.label.get_text() == expected

    save_static_image(figure)
Beispiel #10
0
def test_switch_label_when_param_insignificant() -> None:
    def _objective(trial: Trial) -> int:
        x = trial.suggest_int("x", 0, 2)
        _ = trial.suggest_int("y", -1, 1)
        return x**2

    study = create_study()
    for x in range(1, 3):
        study.enqueue_trial({"x": x, "y": 0})

    study.optimize(_objective, n_trials=2)
    ax = plot_param_importances(study)

    # Test if label for `y` param has been switched to `<0.01`.
    labels = ax.figure.findobj(lambda obj: "<0.01" in str(obj))
    assert len(labels) == 1
Beispiel #11
0
def test_switch_label_when_param_insignificant() -> None:
    def _objective(trial: Trial) -> int:
        x = trial.suggest_int("x", 0, 2)
        _ = trial.suggest_int("y", -1, 1)
        return x**2

    study = create_study()
    for x in range(1, 3):
        study.enqueue_trial({"x": x, "y": 0})

    study.optimize(_objective, n_trials=2)
    figure = plot_param_importances(study)

    # Test if label for `y` param has been switched to `<0.01`.
    labels = figure.data[0].text
    assert labels == ("<0.01", "1.00")
Beispiel #12
0
def test_suggest_int(storage_init_func: Callable[[], storages.BaseStorage]) -> None:

    mock = Mock()
    mock.side_effect = [1, 2]
    sampler = samplers.RandomSampler()

    with patch.object(sampler, "sample_independent", mock) as mock_object:
        study = create_study(storage_init_func(), sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = IntUniformDistribution(low=0, high=3)

        assert trial._suggest("x", distribution) == 1  # Test suggesting a param.
        assert trial._suggest("x", distribution) == 1  # Test suggesting the same param.
        assert trial._suggest("y", distribution) == 2  # Test suggesting a different param.
        assert trial.params == {"x": 1, "y": 2}
        assert mock_object.call_count == 2
Beispiel #13
0
def test_empty_edf_info() -> None:
    def _assert_empty(info: _EDFInfo) -> None:
        assert info.lines == []
        np.testing.assert_array_equal(info.x_values, np.array([]))

    edf_info = _get_edf_info([])
    _assert_empty(edf_info)

    study = create_study()
    edf_info = _get_edf_info(study)
    _assert_empty(edf_info)

    trial = study.ask()
    study.tell(trial, state=optuna.trial.TrialState.PRUNED)
    edf_info = _get_edf_info(study)
    _assert_empty(edf_info)
Beispiel #14
0
def test_check_distribution_suggest_float(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    sampler = samplers.RandomSampler()
    study = create_study(storage_init_func(), sampler=sampler)
    trial = Trial(study, study._storage.create_new_trial(study._study_id))

    x1 = trial.suggest_float("x1", 10, 20)
    x2 = trial.suggest_uniform("x1", 10, 20)

    assert x1 == x2

    x3 = trial.suggest_float("x2", 1e-5, 1e-3, log=True)
    x4 = trial.suggest_loguniform("x2", 1e-5, 1e-3)

    assert x3 == x4
Beispiel #15
0
def test_switch_label_when_param_insignificant() -> None:
    def _objective(trial: Trial) -> int:
        x = trial.suggest_int("x", 0, 2)
        _ = trial.suggest_int("y", -1, 1)
        return x**2

    study = create_study()
    for x in range(1, 3):
        study.enqueue_trial({"x": x, "y": 0})

    study.optimize(_objective, n_trials=2)

    info = _get_importances_info(study, None, None, None, "Objective Value")

    # Test if label for `y` param has been switched to `<0.01`.
    assert info.importance_labels == ["<0.01", "1.00"]
Beispiel #16
0
def test_relative_parameters(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    relative_search_space = {
        'x': distributions.UniformDistribution(low=5, high=6),
        'y': distributions.UniformDistribution(low=5, high=6)
    }
    relative_params = {
        'x': 5.5,
        'y': 5.5,
        'z': 5.5
    }

    sampler = DeterministicRelativeSampler(relative_search_space, relative_params)  # type: ignore
    study = create_study(storage=storage_init_func(), sampler=sampler)

    def create_trial():
        # type: () -> Trial

        return Trial(study, study.storage.create_new_trial_id(study.study_id))

    # Suggested from `relative_params`.
    trial0 = create_trial()
    distribution0 = distributions.UniformDistribution(low=0, high=100)
    assert trial0._suggest('x', distribution0) == 5.5

    # Not suggested from `relative_params` (due to unknown parameter name).
    trial1 = create_trial()
    distribution1 = distribution0
    assert trial1._suggest('w', distribution1) != 5.5

    # Not suggested from `relative_params` (due to incompatible value range).
    trial2 = create_trial()
    distribution2 = distributions.UniformDistribution(low=0, high=5)
    assert trial2._suggest('x', distribution2) != 5.5

    # Error (due to incompatible distribution class).
    trial3 = create_trial()
    distribution3 = distributions.IntUniformDistribution(low=1, high=100)
    with pytest.raises(ValueError):
        trial3._suggest('y', distribution3)

    # Error ('z' is included in `relative_params` but not in `relative_search_space`).
    trial4 = create_trial()
    distribution4 = distributions.UniformDistribution(low=0, high=10)
    with pytest.raises(ValueError):
        trial4._suggest('z', distribution4)
Beispiel #17
0
def test_relative_parameters(storage_init_func: Callable[[], storages.BaseStorage]) -> None:

    relative_search_space = {
        "x": UniformDistribution(low=5, high=6),
        "y": UniformDistribution(low=5, high=6),
    }
    relative_params = {"x": 5.5, "y": 5.5, "z": 5.5}

    sampler = DeterministicRelativeSampler(relative_search_space, relative_params)  # type: ignore
    study = create_study(storage=storage_init_func(), sampler=sampler)

    def create_trial() -> Trial:

        return Trial(study, study._storage.create_new_trial(study._study_id))

    # Suggested from `relative_params`.
    trial0 = create_trial()
    distribution0 = UniformDistribution(low=0, high=100)
    assert trial0._suggest("x", distribution0) == 5.5

    # Not suggested from `relative_params` (due to unknown parameter name).
    trial1 = create_trial()
    distribution1 = distribution0
    assert trial1._suggest("w", distribution1) != 5.5

    # Not suggested from `relative_params` (due to incompatible value range).
    trial2 = create_trial()
    distribution2 = UniformDistribution(low=0, high=5)
    assert trial2._suggest("x", distribution2) != 5.5

    # Error (due to incompatible distribution class).
    trial3 = create_trial()
    distribution3 = IntUniformDistribution(low=1, high=100)
    with pytest.raises(ValueError):
        trial3._suggest("y", distribution3)

    # Error ('z' is included in `relative_params` but not in `relative_search_space`).
    trial4 = create_trial()
    distribution4 = UniformDistribution(low=0, high=10)
    with pytest.raises(ValueError):
        trial4._suggest("z", distribution4)

    # Error (due to incompatible distribution class).
    trial5 = create_trial()
    distribution5 = IntLogUniformDistribution(low=1, high=100)
    with pytest.raises(ValueError):
        trial5._suggest("y", distribution5)
Beispiel #18
0
def test_plot_parallel_coordinate_log_params() -> None:
    # Test with log params.
    study_log_params = create_study()
    distributions: Dict[str, BaseDistribution] = {
        "param_a": FloatDistribution(1e-7, 1e-2, log=True),
        "param_b": FloatDistribution(1, 1000, log=True),
    }
    study_log_params.add_trial(
        create_trial(
            value=0.0,
            params={
                "param_a": 1e-6,
                "param_b": 10
            },
            distributions=distributions,
        ))
    study_log_params.add_trial(
        create_trial(
            value=1.0,
            params={
                "param_a": 2e-5,
                "param_b": 200
            },
            distributions=distributions,
        ))
    study_log_params.add_trial(
        create_trial(
            value=0.1,
            params={
                "param_a": 1e-4,
                "param_b": 30
            },
            distributions=distributions,
        ))
    figure = plot_parallel_coordinate(study_log_params)
    axes = figure.get_figure().axes
    assert len(axes) == 3 + 1
    assert axes[0].get_ylim() == (0.0, 1.0)
    assert axes[1].get_ylabel() == "Objective Value"
    assert axes[1].get_ylim() == (0.0, 1.0)
    objectives = _fetch_objectives_from_figure(figure)
    assert objectives == [0.0, 1.0, 0.1]
    assert axes[2].get_ylim() == (1e-6, 1e-4)
    np.testing.assert_almost_equal(axes[3].get_ylim(), (10.0, 200))
    expected_labels = ["Objective Value", "param_a", "param_b"]
    _test_xtick_labels(axes, expected_labels)
    plt.savefig(BytesIO())
Beispiel #19
0
def test_plot_slice() -> None:

    # Test with no trial.
    study = prepare_study_with_trials(no_trials=True)
    figure = plot_slice(study)
    assert not figure.has_data()

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_slice(study)
    assert len(figure) == 2
    assert figure[0].has_data()
    assert figure[1].has_data()

    # Test with a trial to select parameter.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_slice(study, params=["param_a"])
    assert figure.has_data()

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_slice(study,
                            params=["param_a"],
                            target=lambda t: t.params["param_b"])
    assert figure.has_data()

    # Test with a customized target name.
    figure = plot_slice(study, target_name="Target Name")
    assert len(figure) == 2
    assert figure[0].has_data()
    assert figure[1].has_data()

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_slice(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_slice(study)
    assert not figure.has_data()
Beispiel #20
0
def test_plot_slice() -> None:

    # Test with no trial.
    study = prepare_study_with_trials(no_trials=True)
    figure = plot_slice(study)
    assert len(figure.get_lines()) == 0

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_slice(study)
    assert len(figure) == 2
    assert len(figure[0].get_lines()) == 0
    assert len(figure[1].get_lines()) == 0
    assert figure[0].yaxis.label.get_text() == "Objective Value"

    # Test with a trial to select parameter.
    figure = plot_slice(study, params=["param_a"])
    assert len(figure.get_lines()) == 0
    assert figure.yaxis.label.get_text() == "Objective Value"

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"])
    assert len(figure.get_lines()) == 0
    assert figure.yaxis.label.get_text() == "Objective Value"

    # Test with a customized target name.
    figure = plot_slice(study, target_name="Target Name")
    assert len(figure) == 2
    assert len(figure[0].get_lines()) == 0
    assert len(figure[1].get_lines()) == 0
    assert figure[0].yaxis.label.get_text() == "Target Name"

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_slice(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
    figure = plot_slice(study)
    assert len(figure.get_lines()) == 0
Beispiel #21
0
def test_plot_optimization_history(direction: str) -> None:
    # Test with no studies.
    figure = plot_edf([])
    assert len(figure.data) == 0

    # Test with no trials.
    figure = plot_edf(create_study(direction=direction))
    assert len(figure.data) == 0

    figure = plot_edf(
        [create_study(direction=direction),
         create_study(direction=direction)])
    assert len(figure.data) == 0

    # Test with a study.
    study0 = create_study(direction=direction)
    study0.optimize(lambda t: t.suggest_float("x", 0, 5), n_trials=10)
    figure = plot_edf(study0)
    _validate_edf_values(figure.data[0]["y"])
    assert len(figure.data) == 1
    assert figure.layout.xaxis.title.text == "Objective Value"

    # Test with two studies.
    study1 = create_study(direction=direction)
    study1.optimize(lambda t: t.suggest_float("x", 0, 5), n_trials=10)
    figure = plot_edf([study0, study1])
    for points in figure.data:
        _validate_edf_values(points["y"])
    assert len(figure.data) == 2
    figure = plot_edf((study0, study1))
    for points in figure.data:
        _validate_edf_values(points["y"])
    assert len(figure.data) == 2

    # Test with a customized target value.
    study0 = create_study(direction=direction)
    study0.optimize(lambda t: t.suggest_float("x", 0, 5), n_trials=10)
    with pytest.warns(UserWarning):
        figure = plot_edf(study0, target=lambda t: t.params["x"])
    _validate_edf_values(figure.data[0]["y"])
    assert len(figure.data) == 1

    # Test with a customized target name.
    study0 = create_study(direction=direction)
    study0.optimize(lambda t: t.suggest_float("x", 0, 5), n_trials=10)
    figure = plot_edf(study0, target_name="Target Name")
    _validate_edf_values(figure.data[0]["y"])
    assert len(figure.data) == 1
    assert figure.layout.xaxis.title.text == "Target Name"
Beispiel #22
0
def test_get_param_importances_empty_search_space(
        evaluator_init_func: Callable[[], BaseImportanceEvaluator]) -> None:
    def objective(trial: Trial) -> float:
        x = trial.suggest_float("x", 0, 5)
        y = trial.suggest_float("y", 1, 1)
        return 4 * x**2 + 4 * y**2

    study = create_study()
    study.optimize(objective, n_trials=3)

    param_importance = get_param_importances(study,
                                             evaluator=evaluator_init_func())

    assert len(param_importance) == 2
    assert all([param in param_importance for param in ["x", "y"]])
    assert param_importance["x"] > 0.0
    assert param_importance["y"] == 0.0
Beispiel #23
0
def test_suggest_low_equals_high(storage_init_func):
    # type: (Callable[[], storages.BaseStorage]) -> None

    study = create_study(storage_init_func(),
                         sampler=samplers.TPESampler(n_startup_trials=0))
    trial = Trial(study, study._storage.create_new_trial(study._study_id))

    # Parameter values are determined without suggestion when low == high.
    with patch.object(trial, "_suggest", wraps=trial._suggest) as mock_object:
        assert trial.suggest_uniform("a", 1.0,
                                     1.0) == 1.0  # Suggesting a param.
        assert trial.suggest_uniform("a", 1.0,
                                     1.0) == 1.0  # Suggesting the same param.
        assert mock_object.call_count == 0
        assert trial.suggest_loguniform("b", 1.0,
                                        1.0) == 1.0  # Suggesting a param.
        assert trial.suggest_loguniform(
            "b", 1.0, 1.0) == 1.0  # Suggesting the same param.
        assert mock_object.call_count == 0
        assert trial.suggest_discrete_uniform(
            "c", 1.0, 1.0, 1.0) == 1.0  # Suggesting a param.
        assert (trial.suggest_discrete_uniform("c", 1.0, 1.0, 1.0) == 1.0
                )  # Suggesting the same param.
        assert mock_object.call_count == 0
        assert trial.suggest_int("d", 1, 1) == 1  # Suggesting a param.
        assert trial.suggest_int("d", 1, 1) == 1  # Suggesting the same param.
        assert mock_object.call_count == 0
        assert trial.suggest_float("e", 1.0, 1.0) == 1.0  # Suggesting a param.
        assert trial.suggest_float("e", 1.0,
                                   1.0) == 1.0  # Suggesting the same param.
        assert mock_object.call_count == 0
        assert trial.suggest_float("f", 0.5, 0.5,
                                   log=True) == 0.5  # Suggesting a param.
        assert trial.suggest_float(
            "f", 0.5, 0.5, log=True) == 0.5  # Suggesting the same param.
        assert mock_object.call_count == 0
        assert trial.suggest_float("g", 0.5, 0.5,
                                   log=False) == 0.5  # Suggesting a param.
        assert trial.suggest_float(
            "g", 0.5, 0.5, log=False) == 0.5  # Suggesting the same param.
        assert mock_object.call_count == 0
        assert trial.suggest_float("h", 0.5, 0.5,
                                   step=1.0) == 0.5  # Suggesting a param.
        assert trial.suggest_float(
            "h", 0.5, 0.5, step=1.0) == 0.5  # Suggesting the same param.
        assert mock_object.call_count == 0
Beispiel #24
0
def _create_study(
        mo_study: "multi_objective.study.MultiObjectiveStudy"
) -> "optuna.Study":
    study = create_study(
        storage=mo_study._storage,
        sampler=_MultiObjectiveSamplerAdapter(mo_study.sampler),
        pruner=NopPruner(),
        study_name="_motpe-" +
        mo_study._storage.get_study_name_from_id(mo_study._study_id),
        directions=mo_study.directions,
        load_if_exists=True,
    )
    for mo_trial in mo_study.trials:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", ExperimentalWarning)
            study.add_trial(_create_trial(mo_trial))
    return study
Beispiel #25
0
def test_suggest_discrete_uniform(storage_init_func):
    # type: (Callable[[], storages.BaseStorage]) -> None

    mock = Mock()
    mock.side_effect = [1.0, 2.0, 3.0]
    sampler = samplers.RandomSampler()

    with patch.object(sampler, "sample_independent", mock) as mock_object:
        study = create_study(storage_init_func(), sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = DiscreteUniformDistribution(low=0.0, high=3.0, q=1.0)

        assert trial._suggest("x", distribution) == 1.0  # Test suggesting a param.
        assert trial._suggest("x", distribution) == 1.0  # Test suggesting the same param.
        assert trial._suggest("y", distribution) == 3.0  # Test suggesting a different param.
        assert trial.params == {"x": 1.0, "y": 3.0}
        assert mock_object.call_count == 3
def test_trial_with_infinite_value_ignored(inf_value: float,
                                           evaluator: BaseImportanceEvaluator,
                                           n_trial: int) -> None:
    def _objective(trial: Trial) -> float:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1 + x2 * x3

    seed = 13

    study = create_study(sampler=RandomSampler(seed=seed))
    study.optimize(_objective, n_trials=n_trial)

    # A figure is created without a trial with an inf value.
    plot_param_importances(study, evaluator=evaluator)
    with BytesIO() as byte_io:
        plt.savefig(byte_io)
        figure_with_inf = byte_io.getvalue()

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            value=inf_value,
            params={
                "x1": 1.0,
                "x2": 1.0,
                "x3": 3.0
            },
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        ))

    # A figure is created with a trial with an inf value.
    plot_param_importances(study, evaluator=evaluator)
    with BytesIO() as byte_io:
        plt.savefig(byte_io)
        figure_without_inf = byte_io.getvalue()

    # Obtained figures should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert len(figure_without_inf) > 0
    assert figure_with_inf == figure_without_inf
Beispiel #27
0
def test_error_bar_in_optimization_history(direction: str) -> None:
    def objective(trial: Trial) -> float:
        return trial.suggest_float("x", 0, 1)

    studies = [create_study(direction=direction) for _ in range(3)]
    suggested_params = [0.1, 0.3, 0.2]
    for x, study in zip(suggested_params, studies):
        study.enqueue_trial({"x": x})
        study.optimize(objective, n_trials=1)
    figure = plot_optimization_history(studies, error_bar=True)

    mean = np.mean(suggested_params)
    std = np.std(suggested_params)

    np.testing.assert_almost_equal(figure.data[0].y, mean)
    np.testing.assert_almost_equal(figure.data[2].y, mean + std)
    np.testing.assert_almost_equal(figure.data[3].y, mean - std)
Beispiel #28
0
def test_suggest_discrete_uniform(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    mock = Mock()
    mock.side_effect = [1., 2., 3.]
    sampler = samplers.RandomSampler()

    with patch.object(sampler, 'sample_independent', mock) as mock_object:
        study = create_study(storage_init_func(), sampler=sampler)
        trial = Trial(study, study.storage.create_new_trial_id(study.study_id))
        distribution = distributions.DiscreteUniformDistribution(low=0., high=3., q=1.)

        assert trial._suggest('x', distribution) == 1.  # Test suggesting a param.
        assert trial._suggest('x', distribution) == 1.  # Test suggesting the same param.
        assert trial._suggest('y', distribution) == 3.  # Test suggesting a different param.
        assert trial.params == {'x': 1., 'y': 3.}
        assert mock_object.call_count == 3
def test_get_optimization_history_info_list_with_error_bar(
        direction: str, target_name: str) -> None:
    n_studies = 10

    def objective(trial: Trial) -> float:

        if trial.number == 0:
            return 1.0
        elif trial.number == 1:
            return 2.0
        elif trial.number == 2:
            return 0.0
        return 0.0

    # Test with trials.
    studies = [create_study(direction=direction) for _ in range(n_studies)]
    for study in studies:
        study.optimize(objective, n_trials=3)
    info_list = _get_optimization_history_info_list(study,
                                                    target=None,
                                                    target_name=target_name,
                                                    error_bar=True)

    best_values = [1.0, 1.0, 0.0
                   ] if direction == "minimize" else [1.0, 2.0, 2.0]
    assert info_list == [
        _OptimizationHistoryInfo(
            [0, 1, 2],
            _ValuesInfo([1.0, 2.0, 0.0], [0.0, 0.0, 0.0], target_name),
            _ValuesInfo(best_values, [0.0, 0.0, 0.0], "Best Value"),
        )
    ]

    # Test customized target.
    info_list = _get_optimization_history_info_list(study,
                                                    target=lambda t: t.number,
                                                    target_name=target_name,
                                                    error_bar=True)
    assert info_list == [
        _OptimizationHistoryInfo(
            [0, 1, 2],
            _ValuesInfo([0.0, 1.0, 2.0], [0.0, 0.0, 0.0], target_name),
            None,
        )
    ]
def test_plot_parallel_coordinate_categorical_numeric_params() -> None:
    # Test with categorical params that can be interpreted as numeric params.
    study_categorical_params = create_study()
    distributions: Dict[str, BaseDistribution] = {
        "category_a": CategoricalDistribution((1, 2)),
        "category_b": CategoricalDistribution((10, 20, 30)),
    }
    study_categorical_params.add_trial(
        create_trial(
            value=0.0,
            params={"category_a": 2, "category_b": 20},
            distributions=distributions,
        )
    )
    study_categorical_params.add_trial(
        create_trial(
            value=1.0,
            params={"category_a": 1, "category_b": 30},
            distributions=distributions,
        )
    )
    study_categorical_params.add_trial(
        create_trial(
            value=2.0,
            params={"category_a": 2, "category_b": 10},
            distributions=distributions,
        )
    )

    # Trials are sorted by using param_a and param_b, i.e., trial#1, trial#2, and trial#0.
    figure = plot_parallel_coordinate(study_categorical_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (1.0, 2.0, 0.0)
    assert figure.data[0]["dimensions"][1]["label"] == "category_a"
    assert figure.data[0]["dimensions"][1]["range"] == (0, 1)
    assert figure.data[0]["dimensions"][1]["values"] == (0, 1, 1)
    assert figure.data[0]["dimensions"][1]["ticktext"] == (1, 2)
    assert figure.data[0]["dimensions"][1]["tickvals"] == (0, 1)
    assert figure.data[0]["dimensions"][2]["label"] == "category_b"
    assert figure.data[0]["dimensions"][2]["range"] == (0, 2)
    assert figure.data[0]["dimensions"][2]["values"] == (2, 0, 1)
    assert figure.data[0]["dimensions"][2]["ticktext"] == (10, 20, 30)
    assert figure.data[0]["dimensions"][2]["tickvals"] == (0, 1, 2)