Exemplo n.º 1
0
def test_plot_slice():
    # type: () -> None

    # Test with no trial.
    study = prepare_study_with_trials(no_trials=True)
    figure = plot_slice(study)
    assert len(figure.data) == 0

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_slice(study)
    assert len(figure.data) == 2
    assert figure.data[0]["x"] == (1.0, 2.5)
    assert figure.data[0]["y"] == (0.0, 1.0)
    assert figure.data[1]["x"] == (2.0, 0.0, 1.0)
    assert figure.data[1]["y"] == (0.0, 2.0, 1.0)

    # Test with a trial to select parameter.
    figure = plot_slice(study, params=["param_a"])
    assert len(figure.data) == 1
    assert figure.data[0]["x"] == (1.0, 2.5)
    assert figure.data[0]["y"] == (0.0, 1.0)

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_slice(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_slice(study)
    assert len(figure.data) == 0
Exemplo n.º 2
0
def test_inconsistent_number_of_trial_values() -> None:

    studies: List[Study] = []
    n_studies = 5

    for i in range(n_studies):
        study = prepare_study_with_trials()
        if i % 2 == 0:
            study.add_trial(create_trial(value=1.0))
        studies.append(study)

    figure = plot_edf(studies)
    assert len(figure.get_lines()) == n_studies
    plt.savefig(BytesIO())
Exemplo n.º 3
0
def test_plot_contour_customized_target(params: List[str]) -> None:

    study = prepare_study_with_trials()
    with pytest.warns(UserWarning):
        figure = plot_contour(study,
                              params=params,
                              target=lambda t: t.params["param_d"])
    for data in figure.data:
        if "z" in data:
            assert 4.0 in itertools.chain.from_iterable(data["z"])
            assert 2.0 in itertools.chain.from_iterable(data["z"])
    if len(params) == 2:
        assert figure.data[0]["z"][3][1] == 4.0
        assert figure.data[0]["z"][2][2] == 2.0
Exemplo n.º 4
0
def test_color_map(direction: str) -> None:
    study = prepare_study_with_trials(with_c_d=False, direction=direction)

    # `target` is `None`.
    contour = plot_contour(study).data[0]
    assert COLOR_SCALE == [v[1] for v in contour["colorscale"]]
    if direction == "minimize":
        assert contour["reversescale"]
    else:
        assert not contour["reversescale"]

    # When `target` is not `None`, `reversescale` is always `True`.
    contour = plot_contour(study, target=lambda t: t.number).data[0]
    assert COLOR_SCALE == [v[1] for v in contour["colorscale"]]
    assert contour["reversescale"]

    # Multi-objective optimization.
    study = prepare_study_with_trials(with_c_d=False,
                                      n_objectives=2,
                                      direction=direction)
    contour = plot_contour(study, target=lambda t: t.number).data[0]
    assert COLOR_SCALE == [v[1] for v in contour["colorscale"]]
    assert contour["reversescale"]
Exemplo n.º 5
0
def test_plot_contour_customized_target(params: List[str]) -> None:

    study = prepare_study_with_trials()
    with pytest.warns(UserWarning):
        figure = plot_contour(study,
                              params=params,
                              target=lambda t: t.params["param_d"])
    if len(params) == 2:
        assert len(figure.get_lines()) == 0
    else:
        assert figure.shape == (len(params), len(params))
        for i in range(len(params)):
            assert figure[i][0].yaxis.label.get_text() == list(params)[i]
    plt.savefig(BytesIO())
Exemplo n.º 6
0
def test_plot_intermediate_values():
    # type: () -> None

    # Test with no trials.
    study = prepare_study_with_trials(no_trials=True)
    figure = plot_intermediate_values(study)
    assert not figure.data

    def objective(trial, report_intermediate_values):
        # type: (Trial, bool) -> float

        if report_intermediate_values:
            trial.report(1.0, step=0)
            trial.report(2.0, step=1)
        return 0.0

    # Test with a trial with intermediate values.
    study = create_study()
    study.optimize(lambda t: objective(t, True), n_trials=1)
    figure = plot_intermediate_values(study)
    assert len(figure.data) == 1
    assert figure.data[0].x == (0, 1)
    assert figure.data[0].y == (1.0, 2.0)

    # Test a study with one trial with intermediate values and
    # one trial without intermediate values.
    # Expect the trial with no intermediate values to be ignored.
    study.optimize(lambda t: objective(t, False), n_trials=1)
    assert len(study.trials) == 2
    figure = plot_intermediate_values(study)
    assert len(figure.data) == 1
    assert figure.data[0].x == (0, 1)
    assert figure.data[0].y == (1.0, 2.0)

    # Test a study of only one trial that has no intermediate values.
    study = create_study()
    study.optimize(lambda t: objective(t, False), n_trials=1)
    figure = plot_intermediate_values(study)
    assert not figure.data

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
    figure = plot_intermediate_values(study)
    assert not figure.data
Exemplo n.º 7
0
def test_nonfinite_removed(recwarn: WarningsRecorder, value: float) -> None:

    # To check if contour lines have been rendered (meaning +-inf trials were removed),
    # we should be looking if artists responsible for drawing them are preset in the final plot.
    # Turns out it's difficult to do reliably (No information which artists are responsible for
    # drawing contours) so instead we are checking for warning raised by matplotlib
    # when contour plot fails. TODO(xadrianzetx) Find a better way to test this.
    study = prepare_study_with_trials(with_c_d=True,
                                      value_for_first_trial=value)
    plot_contour(study)
    for record in recwarn.list:
        assert "No contour levels were found within the data range" not in str(
            record.message)
    plt.savefig(BytesIO())
Exemplo n.º 8
0
def test_plot_param_importances() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_param_importances(study)
    assert len(figure.get_lines()) == 0

    study = prepare_study_with_trials(with_c_d=True)

    # Test with a trial.
    figure = plot_param_importances(study)
    assert len(figure.get_lines()) == 0
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"

    # Test with an evaluator.
    plot_param_importances(study,
                           evaluator=MeanDecreaseImpurityImportanceEvaluator())
    assert len(figure.get_lines()) == 0
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"

    # Test with a trial to select parameter.
    figure = plot_param_importances(study, params=["param_b"])
    assert len(figure.get_lines()) == 0
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_param_importances(
            study, target=lambda t: t.params["param_b"] + t.params["param_d"])
    assert len(figure.get_lines()) == 0

    # Test with a customized target name.
    figure = plot_param_importances(study, target_name="Target Name")
    assert len(figure.get_lines()) == 0
    assert figure.xaxis.label.get_text() == "Importance for Target Name"

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_param_importances(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_param_importances(study)
    assert len(figure.get_lines()) == 0
Exemplo n.º 9
0
def test_plot_parallel_coordinate() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_parallel_coordinate(study)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_parallel_coordinate(study)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    # Test with a trial to select parameter.
    figure = plot_parallel_coordinate(study, params=["param_a"])
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_parallel_coordinate(study,
                                          params=["param_a"],
                                          target=lambda t: t.params["param_b"])
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    # Test with a customized target name.
    figure = plot_parallel_coordinate(study, target_name="Target Name")
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    # Test with wrong params that do not exist in trials
    with pytest.raises(ValueError,
                       match="Parameter optuna does not exist in your study."):
        plot_parallel_coordinate(study, params=["optuna", "optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_parallel_coordinate(study)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())
Exemplo n.º 10
0
def test_plot_param_importances() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_param_importances(study)
    assert not figure.has_data()

    study = prepare_study_with_trials(with_c_d=True)

    # Test with a trial.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_param_importances(study)
    assert figure.has_data()

    # Test with an evaluator.
    # TODO(ytknzw): Add more specific assertion with the test case.
    plot_param_importances(study,
                           evaluator=MeanDecreaseImpurityImportanceEvaluator())
    assert figure.has_data()

    # Test with a trial to select parameter.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_param_importances(study, params=["param_b"])
    assert figure.has_data()

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_param_importances(
            study, target=lambda t: t.params["param_b"] + t.params["param_d"])
    assert figure.has_data()

    # Test with a customized target name.
    figure = plot_param_importances(study, target_name="Target Name")
    assert figure.has_data()

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_param_importances(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_param_importances(study)
    assert not figure.has_data()
Exemplo n.º 11
0
def test_generate_contour_plot_for_few_observations() -> None:

    study = prepare_study_with_trials(less_than_two=True)
    trials = study.trials

    # `x_axis` has one observation.
    params = ["param_a", "param_b"]
    contour, scatter = _generate_contour_subplot(trials, params[0], params[1],
                                                 StudyDirection.MINIMIZE)
    assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None

    # `y_axis` has one observation.
    params = ["param_b", "param_a"]
    contour, scatter = _generate_contour_subplot(trials, params[0], params[1],
                                                 StudyDirection.MINIMIZE)
    assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None
Exemplo n.º 12
0
def test_plot_intermediate_values() -> None:

    # Test with no trials.
    study = prepare_study_with_trials(no_trials=True)
    figure = plot_intermediate_values(study)
    assert len(figure.get_lines()) == 0

    def objective(trial: Trial, report_intermediate_values: bool) -> float:

        if report_intermediate_values:
            trial.report(1.0, step=0)
            trial.report(2.0, step=1)
        return 0.0

    # Test with a trial with intermediate values.
    study = create_study()
    study.optimize(lambda t: objective(t, True), n_trials=1)
    figure = plot_intermediate_values(study)
    assert len(figure.get_lines()) == 1
    assert list(figure.get_lines()[0].get_xdata()) == [0, 1]
    assert list(figure.get_lines()[0].get_ydata()) == [1.0, 2.0]

    # Test a study with one trial with intermediate values and
    # one trial without intermediate values.
    # Expect the trial with no intermediate values to be ignored.
    study.optimize(lambda t: objective(t, False), n_trials=1)
    assert len(study.trials) == 2
    figure = plot_intermediate_values(study)
    assert len(figure.get_lines()) == 1
    assert list(figure.get_lines()[0].get_xdata()) == [0, 1]
    assert list(figure.get_lines()[0].get_ydata()) == [1.0, 2.0]

    # Test a study of only one trial that has no intermediate values.
    study = create_study()
    study.optimize(lambda t: objective(t, False), n_trials=1)
    figure = plot_intermediate_values(study)
    assert len(figure.get_lines()) == 0

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
    figure = plot_intermediate_values(study)
    assert len(figure.get_lines()) == 0
Exemplo n.º 13
0
def test_generate_contour_plot_for_few_observations() -> None:

    study = prepare_study_with_trials(less_than_two=True)
    trials = study.trials
    reverse_scale = _is_reverse_scale(study, target=None)

    # `x_axis` has one observation.
    params = ["param_a", "param_b"]
    contour, scatter = _generate_contour_subplot(trials, params[0], params[1],
                                                 reverse_scale)
    assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None

    # `y_axis` has one observation.
    params = ["param_b", "param_a"]
    contour, scatter = _generate_contour_subplot(trials, params[0], params[1],
                                                 reverse_scale)
    assert contour.x is None and contour.y is None and scatter.x is None and scatter.y is None
Exemplo n.º 14
0
def test_plot_intermediate_values() -> None:

    # Test with no trials.
    study = prepare_study_with_trials(no_trials=True)
    figure = plot_intermediate_values(study)
    assert not figure.has_data()

    def objective(trial: Trial, report_intermediate_values: bool) -> float:

        if report_intermediate_values:
            trial.report(1.0, step=0)
            trial.report(2.0, step=1)
        return 0.0

    # Test with a trial with intermediate values.
    # TODO(ytknzw): Add more specific assertion with the test case.
    study = create_study()
    study.optimize(lambda t: objective(t, True), n_trials=1)
    figure = plot_intermediate_values(study)
    assert figure.has_data()

    # Test a study with one trial with intermediate values and
    # one trial without intermediate values.
    # Expect the trial with no intermediate values to be ignored.
    # TODO(ytknzw): Add more specific assertion with the test case.
    study.optimize(lambda t: objective(t, False), n_trials=1)
    assert len(study.trials) == 2
    figure = plot_intermediate_values(study)
    assert figure.has_data()

    # Test a study of only one trial that has no intermediate values.
    study = create_study()
    study.optimize(lambda t: objective(t, False), n_trials=1)
    figure = plot_intermediate_values(study)
    assert not figure.has_data()

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
    figure = plot_intermediate_values(study)
    assert not figure.has_data()
Exemplo n.º 15
0
def test_plot_parallel_coordinate() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_parallel_coordinate(study)
    assert not figure.has_data()

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_parallel_coordinate(study)
    assert figure.has_data()

    # Test with a trial to select parameter.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_parallel_coordinate(study, params=["param_a"])
    assert figure.has_data()

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_parallel_coordinate(study,
                                          params=["param_a"],
                                          target=lambda t: t.params["param_b"])
    assert figure.has_data()

    # Test with a customized target name.
    figure = plot_parallel_coordinate(study, target_name="Target Name")
    assert figure.has_data()

    # Test with wrong params that do not exist in trials
    with pytest.raises(ValueError,
                       match="Parameter optuna does not exist in your study."):
        plot_parallel_coordinate(study, params=["optuna", "optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_parallel_coordinate(study)
    assert not figure.has_data()
Exemplo n.º 16
0
def test_plot_param_importances() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_param_importances(study)
    assert len(figure.data) == 0

    study = prepare_study_with_trials(with_c_d=True)

    # Test with a trial.
    figure = plot_param_importances(study)
    assert len(figure.data) == 1
    assert set(figure.data[0].y) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in figure.data[0].x), abs_tol=1e-5)

    # Test with an evaluator.
    plot_param_importances(study,
                           evaluator=MeanDecreaseImpurityImportanceEvaluator())
    assert len(figure.data) == 1
    assert set(figure.data[0].y) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in figure.data[0].x), abs_tol=1e-5)

    # Test with a trial to select parameter.
    figure = plot_param_importances(study, params=["param_b"])
    assert len(figure.data) == 1
    assert figure.data[0].y == ("param_b", )
    assert math.isclose(1.0, sum(i for i in figure.data[0].x), abs_tol=1e-5)

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_param_importances(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_param_importances(study)
    assert len(figure.data) == 0
Exemplo n.º 17
0
def test_get_slice_plot_info_customized_target() -> None:
    params = ["param_a"]
    study = prepare_study_with_trials()
    info = _get_slice_plot_info(study,
                                params=params,
                                target=lambda t: t.params["param_d"],
                                target_name="Objective Value")
    assert info == _SlicePlotInfo(
        target_name="Objective Value",
        subplots=[
            _SliceSubplotInfo(
                param_name="param_a",
                x=[1.0, 2.5],
                y=[4.0, 2.0],
                trial_numbers=[0, 2],
                is_log=False,
                is_numerical=True,
            ),
        ],
    )
Exemplo n.º 18
0
def test_contour_subplots_have_correct_axis_labels_and_ranges() -> None:
    study = prepare_study_with_trials()
    params = ["param_a", "param_b", "param_c"]
    subplots = plot_contour(study, params=params)
    # `subplots` should look like this:
    # param_a [[subplot 1, subplot 2, subplot 3],
    # param_b  [subplot 4, subplot 4, subplot 6],
    # param_c  [subplot 7, subplot 8, subplot 9]]
    #           param_a    param_b    param_c
    #
    # The folowing block ensures:
    # - The y-axis label of subplot 1 is "param_a"
    # - The x-axis label of subplot 7 is "param_a"
    # - Subplot 1, 2, and 3 have the same y-axis range that covers the search space for `param_a`
    # - Subplot 1, 4, and 7 have the same x-axis range that covers the search space for `param_a`
    # - The y-axis label of subplot 4 is "param_b"
    # - ...
    # - The y-axis label of subplot 7 is "param_c"
    # - ...
    param_ranges = {
        "param_a": (0.0, 3.0),
        "param_b": (0.0, 3.0),
        "param_c": (2.0, 5.0),
    }
    for index, (param_name, param_range) in enumerate(param_ranges.items()):
        minimum, maximum = param_range
        padding = (maximum - minimum) * AXES_PADDING_RATIO
        param_range_with_padding = (minimum - padding, maximum + padding)
        assert subplots[index, 0].get_ylabel() == param_name
        assert subplots[-1, index].get_xlabel() == param_name
        ylims = [ax.get_ylim() for ax in subplots[index, :]]
        assert all_equal(ylims)
        assert all(
            range_covers(param_range_with_padding, ylim) for ylim in ylims)
        xlims = [ax.get_xlim() for ax in subplots[:, index]]
        assert all_equal(xlims)
        assert all(
            range_covers(param_range_with_padding, xlim) for xlim in xlims)
    plt.savefig(BytesIO())
Exemplo n.º 19
0
def test_get_contour_info_nonfinite_multiobjective(objective: int,
                                                   value: float) -> None:

    study = prepare_study_with_trials(n_objectives=2,
                                      value_for_first_trial=value)
    info = _get_contour_info(study,
                             params=["param_b", "param_d"],
                             target=lambda t: t.values[objective])
    assert info == _ContourInfo(
        sorted_params=["param_b", "param_d"],
        sub_plot_infos=[[
            _SubContourInfo(
                xaxis=_AxisInfo(
                    name="param_b",
                    range=(-0.05, 1.05),
                    is_log=False,
                    is_cat=False,
                    indices=[-0.05, 0.0, 1.0, 1.05],
                    values=[0.0, 1.0],
                ),
                yaxis=_AxisInfo(
                    name="param_d",
                    range=(1.9, 4.1),
                    is_log=False,
                    is_cat=False,
                    indices=[1.9, 2.0, 4.0, 4.1],
                    values=[4.0, 2.0],
                ),
                z_values={
                    (1, 2): 2.0,
                    (2, 1): 1.0
                },
            )
        ]],
        reverse_scale=True,
        target_name="Objective Value",
    )
    assert info.sorted_params == ["param_b", "param_d"]
Exemplo n.º 20
0
def test_plot_parallel_coordinate() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_parallel_coordinate(study)
    assert not figure.has_data()

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_parallel_coordinate(study)
    assert figure.has_data()

    # Test with a trial to select parameter.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_parallel_coordinate(study, params=["param_a"])
    assert figure.has_data()

    # Test with a customized target value.
    figure = plot_parallel_coordinate(study,
                                      params=["param_a"],
                                      target=lambda t: t.params["param_b"])
    assert figure.has_data()

    # Test with a customized target name.
    figure = plot_parallel_coordinate(study, target_name="Target Name")
    assert figure.has_data()

    # Test with wrong params that do not exist in trials
    with pytest.raises(ValueError,
                       match="Parameter optuna does not exist in your study."):
        plot_parallel_coordinate(study, params=["optuna", "optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_parallel_coordinate(study)
    assert not figure.has_data()

    # Test with categorical params that cannot be converted to numeral.
    # TODO(ytknzw): Add more specific assertion with the test case.
    study_categorical_params = create_study()
    study_categorical_params.add_trial(
        create_trial(
            value=0.0,
            params={
                "category_a": "preferred",
                "category_b": "net"
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "category_b": CategoricalDistribution(("net", "una")),
            },
        ))
    study_categorical_params.add_trial(
        create_trial(
            value=2.0,
            params={
                "category_a": "opt",
                "category_b": "una"
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "category_b": CategoricalDistribution(("net", "una")),
            },
        ))
    figure = plot_parallel_coordinate(study_categorical_params)
    assert figure.has_data()
Exemplo n.º 21
0
def test_generate_contour_plot_for_few_observations(params: List[str]) -> None:

    study = prepare_study_with_trials(less_than_two=True)
    figure = plot_contour(study, params)
    assert not figure.has_data()
Exemplo n.º 22
0
def test_plot_parallel_coordinate() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_parallel_coordinate(study)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 1.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (1.0, 2.0)
    assert figure.data[0]["dimensions"][2]["values"] == (2.0, 1.0)

    # Test with a trial to select parameter.
    figure = plot_parallel_coordinate(study, params=["param_a"])
    assert len(figure.data[0]["dimensions"]) == 2
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 1.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_parallel_coordinate(study,
                                          params=["param_a"],
                                          target=lambda t: t.params["param_b"])
    assert len(figure.data[0]["dimensions"]) == 2
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (1.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (2.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)

    # Test with a customized target name.
    figure = plot_parallel_coordinate(study, target_name="Target Name")
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Target Name"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 1.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (1.0, 2.0)
    assert figure.data[0]["dimensions"][2]["values"] == (2.0, 1.0)

    # Test with wrong params that do not exist in trials
    with pytest.raises(ValueError,
                       match="Parameter optuna does not exist in your study."):
        plot_parallel_coordinate(study, params=["optuna", "optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0
Exemplo n.º 23
0
def test_plot_slice() -> None:

    # Test with no trial.
    study = prepare_study_with_trials(no_trials=True)
    figure = plot_slice(study)
    assert len(figure.findobj(PathCollection)) == 0
    plt.savefig(BytesIO())

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_slice(study)
    assert len(figure) == 2
    assert len(figure[0].findobj(PathCollection)) == 1
    assert len(figure[1].findobj(PathCollection)) == 1
    assert figure[0].yaxis.label.get_text() == "Objective Value"
    plt.savefig(BytesIO())

    # Scatter plot data is available as PathCollection.
    data0 = figure[0].findobj(PathCollection)[0].get_offsets().data
    data1 = figure[1].findobj(PathCollection)[0].get_offsets().data
    assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]])
    assert np.allclose(data1, [[2.0, 0.0], [0.0, 2.0], [1.0, 1.0]])

    # Test with a trial to select parameter.
    figure = plot_slice(study, params=["param_a"])
    assert len(figure.findobj(PathCollection)) == 1
    assert figure.yaxis.label.get_text() == "Objective Value"

    data0 = figure.findobj(PathCollection)[0].get_offsets().data
    assert np.allclose(data0, [[1.0, 0.0], [2.5, 1.0]])
    plt.savefig(BytesIO())

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_slice(study, params=["param_a"], target=lambda t: t.params["param_b"])
    assert len(figure.findobj(PathCollection)) == 1
    assert figure.yaxis.label.get_text() == "Objective Value"

    data0 = figure.findobj(PathCollection)[0].get_offsets().data
    assert np.allclose(data0, [[1.0, 2.0], [2.5, 1.0]])
    plt.savefig(BytesIO())

    # Test with a customized target name.
    figure = plot_slice(study, target_name="Target Name")
    assert len(figure) == 2
    assert len(figure[0].findobj(PathCollection)) == 1
    assert len(figure[1].findobj(PathCollection)) == 1
    assert figure[0].yaxis.label.get_text() == "Target Name"
    plt.savefig(BytesIO())

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_slice(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
    figure = plot_slice(study)
    assert len(figure.get_lines()) == 0
    assert len(figure.findobj(PathCollection)) == 0
    plt.savefig(BytesIO())
def test_nonfinite_multiobjective(objective: int, value: float) -> None:

    study = prepare_study_with_trials(n_objectives=2, value_for_first_trial=value)
    figure = plot_parallel_coordinate(study, target=lambda t: t.values[objective])
    assert all(np.isfinite(figure.data[0]["dimensions"][0]["values"]))
Exemplo n.º 25
0
def test_get_contour_info_non_exist_param_error() -> None:
    study = prepare_study_with_trials()

    with pytest.raises(ValueError):
        _get_contour_info(study, ["optuna"])
Exemplo n.º 26
0
def test_get_contour_info_too_short_params(params: List[str]) -> None:
    study = prepare_study_with_trials()
    info = _get_contour_info(study, params=params)
    assert len(info.sorted_params) == len(params)
    assert len(info.sub_plot_infos) == len(params)
def test_plot_param_importances() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_param_importances(study)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    study = prepare_study_with_trials(with_c_d=True)

    # Test with a trial.
    figure = plot_param_importances(study)

    bars = figure.findobj(
        Rectangle)[:-1]  # The last Rectangle is the plot itself.
    plotted_data = [bar.get_width() for bar in bars]
    # get_yticklabels returns a data structure of Text(0, 0, 'param_d').
    labels = [label.get_text() for label in figure.get_yticklabels()]

    assert len(figure.get_lines()) == 0
    assert len(bars) == 2
    assert set(labels) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"
    plt.savefig(BytesIO())

    # Test with an evaluator.
    plot_param_importances(study,
                           evaluator=MeanDecreaseImpurityImportanceEvaluator())

    bars = figure.findobj(
        Rectangle)[:-1]  # The last Rectangle is the plot itself.
    plotted_data = [bar.get_width() for bar in bars]
    labels = [label.get_text() for label in figure.get_yticklabels()]

    assert len(figure.get_lines()) == 0
    assert len(bars) == 2
    assert set(labels) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"
    plt.savefig(BytesIO())

    # Test with a trial to select parameter.
    figure = plot_param_importances(study, params=["param_b"])

    bars = figure.findobj(
        Rectangle)[:-1]  # The last Rectangle is the plot itself.
    plotted_data = [bar.get_width() for bar in bars]
    labels = [label.get_text() for label in figure.get_yticklabels()]

    assert len(figure.get_lines()) == 0
    assert len(bars) == 1
    assert set(labels) == set(("param_b", ))
    assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"
    plt.savefig(BytesIO())

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_param_importances(
            study, target=lambda t: t.params["param_b"] + t.params["param_d"])
    bars = figure.findobj(
        Rectangle)[:-1]  # The last Rectangle is the plot itself.
    plotted_data = [bar.get_width() for bar in bars]
    labels = [label.get_text() for label in figure.get_yticklabels()]

    assert len(bars) == 2
    assert set(labels) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    # Test with a customized target name.
    figure = plot_param_importances(study, target_name="Target Name")
    assert len(figure.get_lines()) == 0
    assert figure.xaxis.label.get_text() == "Importance for Target Name"
    plt.savefig(BytesIO())

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_param_importances(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_param_importances(study)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())
Exemplo n.º 28
0
def test_filter_nonfinite_with_invalid_target() -> None:
    study = prepare_study_with_trials()
    trials = study.get_trials(states=(TrialState.COMPLETE,))
    with pytest.raises(ValueError):
        _filter_nonfinite(trials, target=lambda t: "invalid target")  # type: ignore
def test_plot_parallel_coordinate() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_parallel_coordinate(study)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 2.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][2]["values"] == (2.0, 0.0, 1.0)

    # Test with a trial to select parameter.
    figure = plot_parallel_coordinate(study, params=["param_a"])
    assert len(figure.data[0]["dimensions"]) == 2
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 2.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)

    # Test with a customized target value.
    figure = plot_parallel_coordinate(study,
                                      params=["param_a"],
                                      target=lambda t: t.params["param_b"])
    assert len(figure.data[0]["dimensions"]) == 2
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (2.0, 0.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)

    # Test with a customized target name.
    figure = plot_parallel_coordinate(study, target_name="Target Name")
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Target Name"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 2.0, 1.0)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][1]["values"] == (1.0, 2.5)
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][2]["values"] == (2.0, 0.0, 1.0)

    # Test with wrong params that do not exist in trials
    with pytest.raises(ValueError,
                       match="Parameter optuna does not exist in your study."):
        plot_parallel_coordinate(study, params=["optuna", "optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0

    # Test with categorical params that cannot be converted to numeral.
    study_categorical_params = create_study()
    study_categorical_params.add_trial(
        create_trial(
            value=0.0,
            params={
                "category_a": "preferred",
                "category_b": "net"
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "category_b": CategoricalDistribution(("net", "una")),
            },
        ))
    study_categorical_params.add_trial(
        create_trial(
            value=2.0,
            params={
                "category_a": "opt",
                "category_b": "una"
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "category_b": CategoricalDistribution(("net", "una")),
            },
        ))
    figure = plot_parallel_coordinate(study_categorical_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 2.0)
    assert figure.data[0]["dimensions"][1]["label"] == "category_a"
    assert figure.data[0]["dimensions"][1]["range"] == (0, 1)
    assert figure.data[0]["dimensions"][1]["values"] == (0, 1)
    assert figure.data[0]["dimensions"][1]["ticktext"] == (["preferred",
                                                            0], ["opt", 1])
    assert figure.data[0]["dimensions"][2]["label"] == "category_b"
    assert figure.data[0]["dimensions"][2]["range"] == (0, 1)
    assert figure.data[0]["dimensions"][2]["values"] == (0, 1)
    assert figure.data[0]["dimensions"][2]["ticktext"] == (["net",
                                                            0], ["una", 1])
Exemplo n.º 30
0
def test_plot_parallel_coordinate():
    # type: () -> None

    # Test with no trial.
    study = create_study()
    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0

    study = prepare_study_with_trials(with_c_d=False)

    # Test with a trial.
    figure = plot_parallel_coordinate(study)
    assert len(figure.data[0]['dimensions']) == 3
    assert figure.data[0]['dimensions'][0]['label'] == 'Objective Value'
    assert figure.data[0]['dimensions'][0]['range'] == (0.0, 2.0)
    assert figure.data[0]['dimensions'][0]['values'] == (0.0, 2.0, 1.0)
    assert figure.data[0]['dimensions'][1]['label'] == 'param_a'
    assert figure.data[0]['dimensions'][1]['range'] == (1.0, 2.5)
    assert figure.data[0]['dimensions'][1]['values'] == (1.0, 2.5)
    assert figure.data[0]['dimensions'][2]['label'] == 'param_b'
    assert figure.data[0]['dimensions'][2]['range'] == (0.0, 2.0)
    assert figure.data[0]['dimensions'][2]['values'] == (2.0, 0.0, 1.0)

    # Test with a trial to select parameter.
    figure = plot_parallel_coordinate(study, params=['param_a'])
    assert len(figure.data[0]['dimensions']) == 2
    assert figure.data[0]['dimensions'][0]['label'] == 'Objective Value'
    assert figure.data[0]['dimensions'][0]['range'] == (0.0, 2.0)
    assert figure.data[0]['dimensions'][0]['values'] == (0.0, 2.0, 1.0)
    assert figure.data[0]['dimensions'][1]['label'] == 'param_a'
    assert figure.data[0]['dimensions'][1]['range'] == (1.0, 2.5)
    assert figure.data[0]['dimensions'][1]['values'] == (1.0, 2.5)

    # Test with wrong params that do not exist in trials
    with pytest.raises(ValueError):
        plot_parallel_coordinate(study, params=['optuna', 'optuna'])

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0

    # Test with categorical params that cannot be converted to numeral.
    study_categorical_params = create_study()
    study_categorical_params._append_trial(value=0.0,
                                           params={
                                               'category_a': 'preferred',
                                               'category_b': 'net',
                                           },
                                           distributions={
                                               'category_a':
                                               CategoricalDistribution(
                                                   ('preferred', 'opt')),
                                               'category_b':
                                               CategoricalDistribution(
                                                   ('net', 'una')),
                                           })
    study_categorical_params._append_trial(value=2.0,
                                           params={
                                               'category_a': 'opt',
                                               'category_b': 'una',
                                           },
                                           distributions={
                                               'category_a':
                                               CategoricalDistribution(
                                                   ('preferred', 'opt')),
                                               'category_b':
                                               CategoricalDistribution(
                                                   ('net', 'una')),
                                           })
    figure = plot_parallel_coordinate(study_categorical_params)
    assert len(figure.data[0]['dimensions']) == 3
    assert figure.data[0]['dimensions'][0]['label'] == 'Objective Value'
    assert figure.data[0]['dimensions'][0]['range'] == (0.0, 2.0)
    assert figure.data[0]['dimensions'][0]['values'] == (0.0, 2.0)
    assert figure.data[0]['dimensions'][1]['label'] == 'category_a'
    assert figure.data[0]['dimensions'][1]['range'] == (0, 1)
    assert figure.data[0]['dimensions'][1]['values'] == (0, 1)
    assert figure.data[0]['dimensions'][1]['ticktext'] == (['preferred',
                                                            0], ['opt', 1])
    assert figure.data[0]['dimensions'][2]['label'] == 'category_b'
    assert figure.data[0]['dimensions'][2]['range'] == (0, 1)
    assert figure.data[0]['dimensions'][2]['values'] == (0, 1)
    assert figure.data[0]['dimensions'][2]['ticktext'] == (['net',
                                                            0], ['una', 1])