def test_target_is_none_and_study_is_multi_obj() -> None:
    study = create_study(directions=["minimize", "minimize"])
    with pytest.raises(ValueError):
        _get_optimization_history_info_list(study,
                                            target=None,
                                            target_name="Objective Value",
                                            error_bar=False)
def test_info_with_no_trials(direction: str, error_bar: bool) -> None:
    # Single study.
    study = create_study(direction=direction)
    info_list = _get_optimization_history_info_list(
        study, target=None, target_name="Objective Value", error_bar=error_bar)
    assert info_list == []

    # Multiple studies.
    studies = [create_study(direction=direction) for _ in range(10)]
    info_list = _get_optimization_history_info_list(
        studies,
        target=None,
        target_name="Objective Value",
        error_bar=error_bar)
    assert info_list == []
def test_warn_default_target_name_with_customized_target(
        direction: str, error_bar: bool) -> None:
    # Single study.
    study = create_study(direction=direction)
    with pytest.warns(UserWarning):
        _get_optimization_history_info_list(study,
                                            target=lambda t: t.number,
                                            target_name="Objective Value",
                                            error_bar=error_bar)

    # Multiple studies.
    studies = [create_study(direction=direction) for _ in range(10)]
    with pytest.warns(UserWarning):
        _get_optimization_history_info_list(studies,
                                            target=lambda t: t.number,
                                            target_name="Objective Value",
                                            error_bar=error_bar)
def test_get_optimization_history_info_list_with_error_bar(
        direction: str, target_name: str) -> None:
    n_studies = 10

    def objective(trial: Trial) -> float:

        if trial.number == 0:
            return 1.0
        elif trial.number == 1:
            return 2.0
        elif trial.number == 2:
            return 0.0
        return 0.0

    # Test with trials.
    studies = [create_study(direction=direction) for _ in range(n_studies)]
    for study in studies:
        study.optimize(objective, n_trials=3)
    info_list = _get_optimization_history_info_list(study,
                                                    target=None,
                                                    target_name=target_name,
                                                    error_bar=True)

    best_values = [1.0, 1.0, 0.0
                   ] if direction == "minimize" else [1.0, 2.0, 2.0]
    assert info_list == [
        _OptimizationHistoryInfo(
            [0, 1, 2],
            _ValuesInfo([1.0, 2.0, 0.0], [0.0, 0.0, 0.0], target_name),
            _ValuesInfo(best_values, [0.0, 0.0, 0.0], "Best Value"),
        )
    ]

    # Test customized target.
    info_list = _get_optimization_history_info_list(study,
                                                    target=lambda t: t.number,
                                                    target_name=target_name,
                                                    error_bar=True)
    assert info_list == [
        _OptimizationHistoryInfo(
            [0, 1, 2],
            _ValuesInfo([0.0, 1.0, 2.0], [0.0, 0.0, 0.0], target_name),
            None,
        )
    ]
def test_ignore_failed_trials(direction: str, error_bar: bool) -> None:
    # Single study.
    study = create_study(direction=direction)
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    info_list = _get_optimization_history_info_list(
        study, target=None, target_name="Objective Value", error_bar=error_bar)
    assert info_list == []

    # Multiple studies.
    studies = [create_study(direction=direction) for _ in range(10)]
    for study in studies:
        study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    info_list = _get_optimization_history_info_list(
        studies,
        target=None,
        target_name="Objective Value",
        error_bar=error_bar)
    assert info_list == []
def test_get_optimization_history_info_list_with_multiple_studies(
        direction: str, target_name: str) -> None:
    n_studies = 10
    base_values = [1.0, 2.0, 0.0]
    base_best_values = [1.0, 1.0, 0.0
                        ] if direction == "minimize" else [1.0, 2.0, 2.0]

    # Test with trials.
    studies = [create_study(direction=direction) for _ in range(n_studies)]
    for i, study in enumerate(studies):
        study.optimize(lambda t: base_values[t.number] + i, n_trials=3)
    info_list = _get_optimization_history_info_list(studies,
                                                    target=None,
                                                    target_name=target_name,
                                                    error_bar=False)

    for i, info in enumerate(info_list):
        values_i = [v + i for v in base_values]
        best_values_i = [v + i for v in base_best_values]
        assert info == _OptimizationHistoryInfo(
            [0, 1, 2],
            _ValuesInfo(values_i, None,
                        f"{target_name} of {studies[i].study_name}"),
            _ValuesInfo(best_values_i, None,
                        f"Best Value of {studies[i].study_name}"),
        )

    # Test customized target.
    info_list = _get_optimization_history_info_list(studies,
                                                    target=lambda t: t.number,
                                                    target_name=target_name,
                                                    error_bar=False)
    for i, info in enumerate(info_list):
        assert info == _OptimizationHistoryInfo(
            [0, 1, 2],
            _ValuesInfo([0.0, 1.0, 2.0], None,
                        f"{target_name} of {studies[i].study_name}"),
            None,
        )
def test_error_bar_in_optimization_history(direction: str) -> None:
    def objective(trial: Trial) -> float:
        return trial.suggest_float("x", 0, 1)

    studies = [create_study(direction=direction) for _ in range(3)]
    suggested_params = [0.1, 0.3, 0.2]
    for x, study in zip(suggested_params, studies):
        study.enqueue_trial({"x": x})
        study.optimize(objective, n_trials=1)
    info_list = _get_optimization_history_info_list(
        studies, target=None, target_name="Objective Value", error_bar=True)
    mean = np.mean(suggested_params).item()
    std = np.std(suggested_params).item()
    assert info_list == [
        _OptimizationHistoryInfo(
            [0],
            _ValuesInfo([mean], [std], "Objective Value"),
            _ValuesInfo([mean], [std], "Best Value"),
        )
    ]
Example #8
0
def plot_optimization_history(
    study: Union[Study, Sequence[Study]],
    *,
    target: Optional[Callable[[FrozenTrial], float]] = None,
    target_name: str = "Objective Value",
    error_bar: bool = False,
) -> "Axes":
    """Plot optimization history of all trials in a study with Matplotlib.

    .. seealso::
        Please refer to :func:`optuna.visualization.plot_optimization_history` for an example.

    Example:

        The following code snippet shows how to plot optimization history.

        .. plot::

            import optuna
            import matplotlib.pyplot as plt


            def objective(trial):
                x = trial.suggest_float("x", -100, 100)
                y = trial.suggest_categorical("y", [-1, 0, 1])
                return x ** 2 + y

            sampler = optuna.samplers.TPESampler(seed=10)
            study = optuna.create_study(sampler=sampler)
            study.optimize(objective, n_trials=10)

            optuna.visualization.matplotlib.plot_optimization_history(study)
            plt.tight_layout()

        .. note::
            You need to adjust the size of the plot by yourself using ``plt.tight_layout()`` or
            ``plt.savefig(IMAGE_NAME, bbox_inches='tight')``.
    Args:
        study:
            A :class:`~optuna.study.Study` object whose trials are plotted for their target values.
            You can pass multiple studies if you want to compare those optimization histories.

        target:
            A function to specify the value to display. If it is :obj:`None` and ``study`` is being
            used for single-objective optimization, the objective values are plotted.

            .. note::
                Specify this argument if ``study`` is being used for multi-objective optimization.
        target_name:
            Target's name to display on the axis label and the legend.

        error_bar:
            A flag to show the error bar.

    Returns:
        A :class:`matplotlib.axes.Axes` object.
    """

    _imports.check()

    info_list = _get_optimization_history_info_list(study, target, target_name,
                                                    error_bar)
    return _get_optimization_history_plot(info_list, target_name)