Exemplo n.º 1
0
def test_plot_param_importances() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_param_importances(study)
    assert not figure.has_data()

    study = prepare_study_with_trials(with_c_d=True)

    # Test with a trial.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_param_importances(study)
    assert figure.has_data()

    # Test with an evaluator.
    # TODO(ytknzw): Add more specific assertion with the test case.
    plot_param_importances(study,
                           evaluator=MeanDecreaseImpurityImportanceEvaluator())
    assert figure.has_data()

    # Test with a trial to select parameter.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_param_importances(study, params=["param_b"])
    assert figure.has_data()

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_param_importances(
            study, target=lambda t: t.params["param_b"] + t.params["param_d"])
    assert figure.has_data()

    # Test with a customized target name.
    figure = plot_param_importances(study, target_name="Target Name")
    assert figure.has_data()

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_param_importances(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_param_importances(study)
    assert not figure.has_data()
Exemplo n.º 2
0
def test_mean_decrease_impurity_importance_evaluator_max_depth() -> None:
    # Assumes that `seed` can be fixed to reproduce identical results.

    study = create_study()
    study.optimize(objective, n_trials=3)

    evaluator = MeanDecreaseImpurityImportanceEvaluator(max_depth=1, seed=0)
    param_importance = evaluator.evaluate(study)

    evaluator = MeanDecreaseImpurityImportanceEvaluator(max_depth=2, seed=0)
    param_importance_different_max_depth = evaluator.evaluate(study)

    assert param_importance != param_importance_different_max_depth
Exemplo n.º 3
0
def test_mean_decrease_impurity_importance_evaluator_n_trees() -> None:
    # Assumes that `seed` can be fixed to reproduce identical results.

    study = create_study(sampler=RandomSampler(seed=0))
    study.optimize(objective, n_trials=3)

    evaluator = MeanDecreaseImpurityImportanceEvaluator(n_trees=10, seed=0)
    param_importance = evaluator.evaluate(study)

    evaluator = MeanDecreaseImpurityImportanceEvaluator(n_trees=20, seed=0)
    param_importance_different_n_trees = evaluator.evaluate(study)

    assert param_importance != param_importance_different_n_trees
Exemplo n.º 4
0
def test_plot_param_importances() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_param_importances(study)
    assert len(figure.data) == 0

    study = prepare_study_with_trials(with_c_d=True)

    # Test with a trial.
    figure = plot_param_importances(study)
    assert len(figure.data) == 1
    assert set(figure.data[0].y) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in figure.data[0].x), abs_tol=1e-5)

    # Test with an evaluator.
    plot_param_importances(study,
                           evaluator=MeanDecreaseImpurityImportanceEvaluator())
    assert len(figure.data) == 1
    assert set(figure.data[0].y) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in figure.data[0].x), abs_tol=1e-5)

    # Test with a trial to select parameter.
    figure = plot_param_importances(study, params=["param_b"])
    assert len(figure.data) == 1
    assert figure.data[0].y == ("param_b", )
    assert math.isclose(1.0, sum(i for i in figure.data[0].x), abs_tol=1e-5)

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_param_importances(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_param_importances(study)
    assert len(figure.data) == 0
def test_plot_param_importances() -> None:

    # Test with no trial.
    study = create_study()
    figure = plot_param_importances(study)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    study = prepare_study_with_trials(with_c_d=True)

    # Test with a trial.
    figure = plot_param_importances(study)

    bars = figure.findobj(
        Rectangle)[:-1]  # The last Rectangle is the plot itself.
    plotted_data = [bar.get_width() for bar in bars]
    # get_yticklabels returns a data structure of Text(0, 0, 'param_d').
    labels = [label.get_text() for label in figure.get_yticklabels()]

    assert len(figure.get_lines()) == 0
    assert len(bars) == 2
    assert set(labels) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"
    plt.savefig(BytesIO())

    # Test with an evaluator.
    plot_param_importances(study,
                           evaluator=MeanDecreaseImpurityImportanceEvaluator())

    bars = figure.findobj(
        Rectangle)[:-1]  # The last Rectangle is the plot itself.
    plotted_data = [bar.get_width() for bar in bars]
    labels = [label.get_text() for label in figure.get_yticklabels()]

    assert len(figure.get_lines()) == 0
    assert len(bars) == 2
    assert set(labels) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"
    plt.savefig(BytesIO())

    # Test with a trial to select parameter.
    figure = plot_param_importances(study, params=["param_b"])

    bars = figure.findobj(
        Rectangle)[:-1]  # The last Rectangle is the plot itself.
    plotted_data = [bar.get_width() for bar in bars]
    labels = [label.get_text() for label in figure.get_yticklabels()]

    assert len(figure.get_lines()) == 0
    assert len(bars) == 1
    assert set(labels) == set(("param_b", ))
    assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
    assert figure.xaxis.label.get_text() == "Importance for Objective Value"
    plt.savefig(BytesIO())

    # Test with a customized target value.
    with pytest.warns(UserWarning):
        figure = plot_param_importances(
            study, target=lambda t: t.params["param_b"] + t.params["param_d"])
    bars = figure.findobj(
        Rectangle)[:-1]  # The last Rectangle is the plot itself.
    plotted_data = [bar.get_width() for bar in bars]
    labels = [label.get_text() for label in figure.get_yticklabels()]

    assert len(bars) == 2
    assert set(labels) == set(
        ("param_b", "param_d"))  # "param_a", "param_c" are conditional.
    assert math.isclose(1.0, sum(i for i in plotted_data), abs_tol=1e-5)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())

    # Test with a customized target name.
    figure = plot_param_importances(study, target_name="Target Name")
    assert len(figure.get_lines()) == 0
    assert figure.xaxis.label.get_text() == "Importance for Target Name"
    plt.savefig(BytesIO())

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        plot_param_importances(study, params=["optuna"])

    # Ignore failed trials.
    def fail_objective(_: Trial) -> float:

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_param_importances(study)
    assert len(figure.get_lines()) == 0
    plt.savefig(BytesIO())
        study.enqueue_trial({"x": x, "y": 0})

    study.optimize(_objective, n_trials=2)
    ax = plot_param_importances(study)

    # Test if label for `y` param has been switched to `<0.01`.
    labels = ax.figure.findobj(lambda obj: "<0.01" in str(obj))
    assert len(labels) == 1
    plt.savefig(BytesIO())


@pytest.mark.parametrize("inf_value", [float("inf"), -float("inf")])
@pytest.mark.parametrize(
    "evaluator",
    [
        MeanDecreaseImpurityImportanceEvaluator(seed=10),
        FanovaImportanceEvaluator(seed=10)
    ],
)
@pytest.mark.parametrize("n_trial", [0, 10])
def test_trial_with_infinite_value_ignored(inf_value: float,
                                           evaluator: BaseImportanceEvaluator,
                                           n_trial: int) -> None:
    def _objective(trial: Trial) -> float:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1 + x2 * x3

    seed = 13
Exemplo n.º 7
0
    study = create_study()
    for x in range(1, 3):
        study.enqueue_trial({"x": x, "y": 0})

    study.optimize(_objective, n_trials=2)
    figure = plot_param_importances(study)

    # Test if label for `y` param has been switched to `<0.01`.
    labels = figure.data[0].text
    assert labels == ("<0.01", "1.00")


@pytest.mark.parametrize("inf_value", [float("inf"), -float("inf")])
@pytest.mark.parametrize(
    "evaluator",
    [MeanDecreaseImpurityImportanceEvaluator(seed=10), FanovaImportanceEvaluator(seed=10)],
)
@pytest.mark.parametrize("n_trial", [0, 10])
def test_trial_with_infinite_value_ignored(
    inf_value: int, evaluator: BaseImportanceEvaluator, n_trial: int
) -> None:
    def _objective(trial: Trial) -> float:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1 + x2 * x3

    seed = 13

    study = create_study(sampler=RandomSampler(seed=seed))
    study.optimize(_objective, n_trials=n_trial)