示例#1
0
def test_plot_parallel_coordinate_only_missing_params() -> None:
    # When all trials contain only a part of parameters,
    # the plot returns an empty figure.
    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={"param_a": 1e-6},
            distributions={
                "param_a": FloatDistribution(1e-7, 1e-2),
            },
        ))
    study.add_trial(
        create_trial(
            value=1.0,
            params={"param_b": 200},
            distributions={
                "param_b": FloatDistribution(1, 1000),
            },
        ))

    figure = plot_parallel_coordinate(study)
    axes = figure.get_figure().axes
    assert len(axes) == 0 + 1
    plt.savefig(BytesIO())
示例#2
0
def test_search_space_transform_untransform_params() -> None:
    search_space = {
        "x0": CategoricalDistribution(["corge"]),
        "x1": CategoricalDistribution(["foo", "bar", "baz", "qux"]),
        "x2": CategoricalDistribution(["quux", "quuz"]),
        "x3": FloatDistribution(2, 3),
        "x4": FloatDistribution(-2, 2),
        "x5": FloatDistribution(1, 10, log=True),
        "x6": FloatDistribution(1, 1, log=True),
        "x7": FloatDistribution(0, 1, step=0.2),
        "x8": IntDistribution(2, 4),
        "x9": IntDistribution(1, 10, log=True),
        "x10": IntDistribution(1, 9, step=2),
    }

    params = {
        "x0": "corge",
        "x1": "qux",
        "x2": "quux",
        "x3": 2.0,
        "x4": -2,
        "x5": 1.0,
        "x6": 1.0,
        "x7": 0.2,
        "x8": 2,
        "x9": 1,
        "x10": 3,
    }

    trans = _SearchSpaceTransform(search_space)
    trans_params = trans.transform(params)
    untrans_params = trans.untransform(trans_params)

    for name in params.keys():
        assert untrans_params[name] == params[name]
示例#3
0
def test_multi_objective_fanova_importance_evaluator_with_infinite(
    target_idx: int, inf_value: float
) -> None:
    # The test ensures that trials with infinite values are ignored to calculate importance scores.
    n_trial = 10
    seed = 13

    # Importance scores are calculated without a trial with an inf value.
    study = create_study(directions=["minimize", "minimize"], sampler=RandomSampler(seed=seed))
    study.optimize(multi_objective_function, n_trials=n_trial)

    evaluator = FanovaImportanceEvaluator(seed=seed)
    param_importance_without_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx])

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            values=[inf_value, inf_value],
            params={"x1": 1.0, "x2": 1.0, "x3": 3.0},
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        )
    )
    # Importance scores are calculated with a trial with an inf value.
    param_importance_with_inf = evaluator.evaluate(study, target=lambda t: t.values[target_idx])

    # Obtained importance scores should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert param_importance_with_inf == param_importance_without_inf
示例#4
0
def test_filter_inf_trials_multiobjective(value: float,
                                          objective_selected: int,
                                          expected: int) -> None:

    study = create_study(directions=["minimize", "maximize"])
    study.add_trial(
        create_trial(
            values=[0.0, 1.0],
            params={"x": 1.0},
            distributions={"x": FloatDistribution(0.0, 1.0)},
        ))
    study.add_trial(
        create_trial(
            values=[0.0, value],
            params={"x": 0.0},
            distributions={"x": FloatDistribution(0.0, 1.0)},
        ))
    study.add_trial(
        create_trial(
            values=[value, value],
            params={"x": 0.0},
            distributions={"x": FloatDistribution(0.0, 1.0)},
        ))

    def _target(t: FrozenTrial) -> float:
        return t.values[objective_selected]

    trials = _filter_nonfinite(
        study.get_trials(states=(TrialState.COMPLETE, )), target=_target)
    assert len(trials) == expected
    assert all([t.number == num for t, num in zip(trials, range(expected))])
示例#5
0
def test_sample_single_distribution(
        sampler_class: Callable[[], BaseSampler]) -> None:

    relative_search_space = {
        "a": UniformDistribution(low=1.0, high=1.0),
        "b": LogUniformDistribution(low=1.0, high=1.0),
        "c": DiscreteUniformDistribution(low=1.0, high=1.0, q=1.0),
        "d": IntUniformDistribution(low=1, high=1),
        "e": IntLogUniformDistribution(low=1, high=1),
        "f": CategoricalDistribution([1]),
        "g": FloatDistribution(low=1.0, high=1.0),
        "h": FloatDistribution(low=1.0, high=1.0, log=True),
        "i": FloatDistribution(low=1.0, high=1.0, step=1.0),
        "j": IntDistribution(low=1, high=1),
        "k": IntDistribution(low=1, high=1, log=True),
    }

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = sampler_class()
    study = optuna.study.create_study(sampler=sampler)

    # We need to test the construction of the model, so we should set `n_trials >= 2`.
    for _ in range(2):
        trial = study.ask(fixed_distributions=relative_search_space)
        study.tell(trial, 1.0)
        for param_name in relative_search_space.keys():
            assert trial.params[param_name] == 1
示例#6
0
def test_plot_slice_log_scale() -> None:

    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={
                "x_linear": 1.0,
                "y_log": 1e-3
            },
            distributions={
                "x_linear": FloatDistribution(0.0, 3.0),
                "y_log": FloatDistribution(1e-5, 1.0, log=True),
            },
        ))

    # Plot a parameter.
    figure = plot_slice(study, params=["y_log"])
    assert figure.layout["xaxis_type"] == "log"
    figure = plot_slice(study, params=["x_linear"])
    assert figure.layout["xaxis_type"] is None

    # Plot multiple parameters.
    figure = plot_slice(study)
    assert figure.layout["xaxis_type"] is None
    assert figure.layout["xaxis2_type"] == "log"
示例#7
0
def test_suggest_loguniform(storage_mode: str) -> None:

    with pytest.raises(ValueError):
        FloatDistribution(low=1.0, high=0.9, log=True)

    with pytest.raises(ValueError):
        FloatDistribution(low=0.0, high=0.9, log=True)

    mock = Mock()
    mock.side_effect = [1.0, 2.0]
    sampler = samplers.RandomSampler()

    with patch.object(
            sampler, "sample_independent",
            mock) as mock_object, StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = FloatDistribution(low=0.1, high=4.0, log=True)

        assert trial._suggest("x",
                              distribution) == 1.0  # Test suggesting a param.
        assert trial._suggest(
            "x", distribution) == 1.0  # Test suggesting the same param.
        assert trial._suggest(
            "y", distribution) == 2.0  # Test suggesting a different param.
        assert trial.params == {"x": 1.0, "y": 2.0}
        assert mock_object.call_count == 2
示例#8
0
def _create_study_with_log_params() -> Study:
    study_log_params = create_study()
    distributions: Dict[str, BaseDistribution] = {
        "param_a": FloatDistribution(1e-7, 1e-2, log=True),
        "param_b": FloatDistribution(1, 1000, log=True),
    }
    study_log_params.add_trial(
        create_trial(
            value=0.0,
            params={
                "param_a": 1e-6,
                "param_b": 10
            },
            distributions=distributions,
        ))
    study_log_params.add_trial(
        create_trial(
            value=1.0,
            params={
                "param_a": 2e-5,
                "param_b": 200
            },
            distributions=distributions,
        ))
    study_log_params.add_trial(
        create_trial(
            value=0.1,
            params={
                "param_a": 1e-4,
                "param_b": 30
            },
            distributions=distributions,
        ))
    return study_log_params
示例#9
0
def test_shap_importance_evaluator_with_infinite(inf_value: float) -> None:
    # The test ensures that trials with infinite values are ignored to calculate importance scores.
    n_trial = 10
    seed = 13

    # Importance scores are calculated without a trial with an inf value.
    study = create_study(sampler=RandomSampler(seed=seed))
    study.optimize(objective, n_trials=n_trial)

    evaluator = ShapleyImportanceEvaluator(seed=seed)
    param_importance_without_inf = evaluator.evaluate(study)

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            value=inf_value,
            params={"x1": 1.0, "x2": 1.0, "x3": 3.0, "x4": 0.1},
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": IntDistribution(low=2, high=4, log=True),
                "x4": CategoricalDistribution([0.1, 1, 10]),
            },
        )
    )
    # Importance scores are calculated with a trial with an inf value.
    param_importance_with_inf = evaluator.evaluate(study)

    # Obtained importance scores should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert param_importance_with_inf == param_importance_without_inf
示例#10
0
def test_filter_inf_trials_message(caplog: LogCaptureFixture,
                                   with_message: bool) -> None:

    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={"x": 1.0},
            distributions={"x": FloatDistribution(0.0, 1.0)},
        ))
    study.add_trial(
        create_trial(
            value=float("inf"),
            params={"x": 0.0},
            distributions={"x": FloatDistribution(0.0, 1.0)},
        ))

    optuna.logging.enable_propagation()
    _filter_nonfinite(study.get_trials(states=(TrialState.COMPLETE, )),
                      with_message=with_message)
    msg = "Trial 1 is omitted in visualization because its objective value is inf or nan."

    if with_message:
        assert msg in caplog.text
        n_filtered_as_inf = 0
        for record in caplog.records:
            if record.msg == msg:
                assert record.levelno == logging.WARNING
                n_filtered_as_inf += 1
        assert n_filtered_as_inf == 1
    else:
        assert msg not in caplog.text
def test_plot_parallel_coordinate_only_missing_params() -> None:
    # When all trials contain only a part of parameters,
    # the plot returns an empty figure.
    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={"param_a": 1e-6},
            distributions={
                "param_a": FloatDistribution(1e-7, 1e-2, log=True),
            },
        )
    )
    study.add_trial(
        create_trial(
            value=1.0,
            params={"param_b": 200},
            distributions={
                "param_b": FloatDistribution(1, 1000, log=True),
            },
        )
    )

    figure = plot_parallel_coordinate(study)
    assert len(figure.data) == 0
示例#12
0
def test_crossover_deterministic(crossover: BaseCrossover, rand_value: float,
                                 expected_params: np.ndarray) -> None:

    study = optuna.study.create_study()
    search_space: Dict[str, BaseDistribution] = {
        "x": FloatDistribution(1, 10),
        "y": FloatDistribution(1, 10),
    }
    numerical_transform = _SearchSpaceTransform(search_space)
    parent_params = np.array([[1.0, 2.0], [3.0, 4.0]])

    if crossover.n_parents == 3:
        parent_params = np.append(parent_params, [[5.0, 6.0]], axis=0)

    def _rand(*args: Any, **kwargs: Any) -> Any:
        if len(args) == 0:
            return rand_value
        return np.full(args[0], rand_value)

    def _normal(*args: Any, **kwargs: Any) -> Any:
        if kwargs.get("size") is None:
            return rand_value
        return np.full(kwargs.get("size"), rand_value)  # type: ignore

    rng = Mock()
    rng.rand = Mock(side_effect=_rand)
    rng.normal = Mock(side_effect=_normal)
    child_params = crossover.crossover(parent_params, rng, study,
                                       numerical_transform.bounds)
    np.testing.assert_almost_equal(child_params, expected_params)
示例#13
0
 def infer_relative_search_space(
     self, study: "optuna.study.Study",
     trial: "optuna.trial.FrozenTrial"
 ) -> Dict[str, distributions.BaseDistribution]:
     return {
         "x": FloatDistribution(low=5, high=6),
         "y": FloatDistribution(low=5, high=6),
     }
示例#14
0
def test_plot_parallel_coordinate_unique_hyper_param() -> None:
    # Test case when one unique value is suggested during the optimization.

    study_categorical_params = create_study()
    study_categorical_params.add_trial(
        create_trial(
            value=0.0,
            params={
                "category_a": "preferred",
                "param_b": 30
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "param_b": FloatDistribution(1, 1000, log=True),
            },
        ))

    # Both hyperparameters contain unique values.
    figure = plot_parallel_coordinate(study_categorical_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 0.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, )
    assert figure.data[0]["dimensions"][1]["label"] == "category_a"
    assert figure.data[0]["dimensions"][1]["range"] == (0, 0)
    assert figure.data[0]["dimensions"][1]["values"] == (0.0, )
    assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred", )
    assert figure.data[0]["dimensions"][1]["tickvals"] == (0, )
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (math.log10(30),
                                                        math.log10(30))
    assert figure.data[0]["dimensions"][2]["values"] == (math.log10(30), )
    assert figure.data[0]["dimensions"][2]["ticktext"] == ("30", )
    assert figure.data[0]["dimensions"][2]["tickvals"] == (math.log10(30), )

    study_categorical_params.add_trial(
        create_trial(
            value=2.0,
            params={
                "category_a": "preferred",
                "param_b": 20
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "param_b": FloatDistribution(1, 1000, log=True),
            },
        ))

    # Still "category_a" contains unique suggested value during the optimization.
    figure = plot_parallel_coordinate(study_categorical_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][1]["label"] == "category_a"
    assert figure.data[0]["dimensions"][1]["range"] == (0, 0)
    assert figure.data[0]["dimensions"][1]["values"] == (0.0, 0.0)
    assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred", )
    assert figure.data[0]["dimensions"][1]["tickvals"] == (0, )
示例#15
0
def prepare_study_with_trials(
    n_objectives: int = 1,
    direction: str = "minimize",
    value_for_first_trial: float = 0.0,
) -> Study:

    """Prepare a study for tests.

    Args:
        n_objectives: Number of objective values.
        direction: Study's optimization direction.
        value_for_first_trial: Objective value in first trial. This value will be broadcasted
            to all objectives in multi-objective optimization.

    Returns:
        :class:`~optuna.study.Study`

    """

    study = create_study(directions=[direction] * n_objectives)
    study.add_trial(
        create_trial(
            values=[value_for_first_trial] * n_objectives,
            params={"param_a": 1.0, "param_b": 2.0, "param_c": 3.0, "param_d": 4.0},
            distributions={
                "param_a": FloatDistribution(0.0, 3.0),
                "param_b": FloatDistribution(0.0, 3.0),
                "param_c": FloatDistribution(2.0, 5.0),
                "param_d": FloatDistribution(2.0, 5.0),
            },
        )
    )
    study.add_trial(
        create_trial(
            values=[2.0] * n_objectives,
            params={"param_b": 0.0, "param_d": 4.0},
            distributions={
                "param_b": FloatDistribution(0.0, 3.0),
                "param_d": FloatDistribution(2.0, 5.0),
            },
        )
    )
    study.add_trial(
        create_trial(
            values=[1.0] * n_objectives,
            params={"param_a": 2.5, "param_b": 1.0, "param_c": 4.5, "param_d": 2.0},
            distributions={
                "param_a": FloatDistribution(0.0, 3.0),
                "param_b": FloatDistribution(0.0, 3.0),
                "param_c": FloatDistribution(2.0, 5.0),
                "param_d": FloatDistribution(2.0, 5.0),
            },
        )
    )
    return study
示例#16
0
文件: test_cma.py 项目: optuna/optuna
    def search_space() -> Dict[str, BaseDistribution]:

        return {
            "c": CategoricalDistribution(("a", "b")),
            "d": FloatDistribution(-1, 9, step=2),
            "i": IntDistribution(-1, 1),
            "ii": IntDistribution(-1, 3, step=2),
            "il": IntDistribution(2, 16, log=True),
            "l": FloatDistribution(0.001, 0.1, log=True),
            "u": FloatDistribution(-2, 2),
        }
示例#17
0
def test_multi_objective_trial_with_infinite_value_ignored(
        target_idx: int, inf_value: float, evaluator: BaseImportanceEvaluator,
        n_trial: int) -> None:
    def _multi_objective_function(trial: Trial) -> Tuple[float, float]:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1, x2 * x3

    seed = 13
    target_name = "Objective Value"

    study = create_study(directions=["minimize", "minimize"],
                         sampler=RandomSampler(seed=seed))
    study.optimize(_multi_objective_function, n_trials=n_trial)

    # Create param importances info without inf value.
    info_without_inf = _get_importances_info(
        study,
        evaluator=evaluator,
        params=None,
        target=lambda t: t.values[target_idx],
        target_name=target_name,
    )

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            values=[inf_value, inf_value],
            params={
                "x1": 1.0,
                "x2": 1.0,
                "x3": 3.0
            },
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        ))

    # Create param importances info with inf value.
    info_with_inf = _get_importances_info(
        study,
        evaluator=evaluator,
        params=None,
        target=lambda t: t.values[target_idx],
        target_name=target_name,
    )

    # Obtained info instances should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert info_with_inf == info_without_inf
示例#18
0
def _generate_trial(generator: random.Random) -> FrozenTrial:
    example_params = {
        "paramA": (generator.uniform(0, 1), FloatDistribution(0, 1)),
        "paramB": (generator.uniform(1, 2), FloatDistribution(1, 2, log=True)),
        "paramC": (
            generator.choice(["CatA", "CatB", "CatC"]),
            CategoricalDistribution(("CatA", "CatB", "CatC")),
        ),
        "paramD": (generator.uniform(-3, 0), FloatDistribution(-3, 0)),
        "paramE":
        (generator.choice([0.1, 0.2]), CategoricalDistribution((0.1, 0.2))),
    }
    example_attrs = {
        "attrA": "valueA",
        "attrB": 1,
        "attrC": None,
        "attrD": {
            "baseline_score": 0.001,
            "tags": ["image", "classification"]
        },
    }
    state = generator.choice(ALL_STATES)
    params = {}
    distributions = {}
    user_attrs = {}
    system_attrs = {}
    intermediate_values = {}
    for key, (value, dist) in example_params.items():
        if generator.choice([True, False]):
            params[key] = value
            distributions[key] = dist
    for key, value in example_attrs.items():
        if generator.choice([True, False]):
            user_attrs["usr_" + key] = value
        if generator.choice([True, False]):
            system_attrs["sys_" + key] = value
    for i in range(generator.randint(4, 10)):
        if generator.choice([True, False]):
            intermediate_values[i] = generator.uniform(-10, 10)
    return FrozenTrial(
        number=0,  # dummy
        state=state,
        value=generator.uniform(-10, 10),
        datetime_start=datetime.now(),
        datetime_complete=datetime.now() if state.is_finished() else None,
        params=params,
        distributions=distributions,
        user_attrs=user_attrs,
        system_attrs=system_attrs,
        intermediate_values=intermediate_values,
        trial_id=0,  # dummy
    )
示例#19
0
def test_distributions() -> None:

    distributions = {"x": FloatDistribution(0, 10)}
    trial = _create_trial(
        value=0.2,
        params={"x": 1},
        distributions=dict(distributions),
    )
    assert trial.distributions == distributions

    distributions = {"x": FloatDistribution(1, 9)}
    trial.distributions = dict(distributions)
    assert trial.distributions == distributions
示例#20
0
def test_relative_parameters(storage_mode: str) -> None:

    relative_search_space = {
        "x": FloatDistribution(low=5, high=6),
        "y": FloatDistribution(low=5, high=6),
    }
    relative_params = {"x": 5.5, "y": 5.5, "z": 5.5}

    sampler = DeterministicRelativeSampler(relative_search_space,
                                           relative_params)  # type: ignore

    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)

        def create_trial() -> Trial:

            return Trial(study,
                         study._storage.create_new_trial(study._study_id))

        # Suggested from `relative_params`.
        trial0 = create_trial()
        distribution0 = FloatDistribution(low=0, high=100)
        assert trial0._suggest("x", distribution0) == 5.5

        # Not suggested from `relative_params` (due to unknown parameter name).
        trial1 = create_trial()
        distribution1 = distribution0
        assert trial1._suggest("w", distribution1) != 5.5

        # Not suggested from `relative_params` (due to incompatible value range).
        trial2 = create_trial()
        distribution2 = FloatDistribution(low=0, high=5)
        assert trial2._suggest("x", distribution2) != 5.5

        # Error (due to incompatible distribution class).
        trial3 = create_trial()
        distribution3 = IntDistribution(low=1, high=100)
        with pytest.raises(ValueError):
            trial3._suggest("y", distribution3)

        # Error ('z' is included in `relative_params` but not in `relative_search_space`).
        trial4 = create_trial()
        distribution4 = FloatDistribution(low=0, high=10)
        with pytest.raises(ValueError):
            trial4._suggest("z", distribution4)

        # Error (due to incompatible distribution class).
        trial5 = create_trial()
        distribution5 = IntDistribution(low=1, high=100, log=True)
        with pytest.raises(ValueError):
            trial5._suggest("y", distribution5)
示例#21
0
def test_plot_contour_log_scale_and_str_category() -> None:

    # If the search space has three parameters, plot_contour generates nine plots.
    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={
                "param_a": 1e-6,
                "param_b": "100",
                "param_c": "one"
            },
            distributions={
                "param_a": FloatDistribution(1e-7, 1e-2, log=True),
                "param_b": CategoricalDistribution(["100", "101"]),
                "param_c": CategoricalDistribution(["one", "two"]),
            },
        ))
    study.add_trial(
        create_trial(
            value=1.0,
            params={
                "param_a": 1e-5,
                "param_b": "101",
                "param_c": "two"
            },
            distributions={
                "param_a": FloatDistribution(1e-7, 1e-2, log=True),
                "param_b": CategoricalDistribution(["100", "101"]),
                "param_c": CategoricalDistribution(["one", "two"]),
            },
        ))

    figure = plot_contour(study)
    subplots = [plot for plot in figure.flatten() if plot.has_data()]
    expected = {
        "param_a": [1e-6, 1e-5],
        "param_b": [0.0, 1.0],
        "param_c": [0.0, 1.0]
    }
    ranges = itertools.permutations(expected.keys(), 2)

    for plot, (yrange, xrange) in zip(subplots, ranges):
        # Take 5% axis padding into account.
        np.testing.assert_allclose(plot.get_xlim(),
                                   expected[xrange],
                                   atol=5e-2)
        np.testing.assert_allclose(plot.get_ylim(),
                                   expected[yrange],
                                   atol=5e-2)
    plt.savefig(BytesIO())
def test_multi_objective_trial_with_infinite_value_ignored(
        target_idx: int, inf_value: float, evaluator: BaseImportanceEvaluator,
        n_trial: int) -> None:
    def _multi_objective_function(trial: Trial) -> Tuple[float, float]:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1, x2 * x3

    seed = 13

    study = create_study(directions=["minimize", "minimize"],
                         sampler=RandomSampler(seed=seed))
    study.optimize(_multi_objective_function, n_trials=n_trial)

    # A figure is created without a trial with an inf value.
    plot_param_importances(study,
                           evaluator=evaluator,
                           target=lambda t: t.values[target_idx])
    with BytesIO() as byte_io:
        plt.savefig(byte_io)
        figure_with_inf = byte_io.getvalue()

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            values=[inf_value, inf_value],
            params={
                "x1": 1.0,
                "x2": 1.0,
                "x3": 3.0
            },
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        ))

    # A figure is created with a trial with an inf value.
    plot_param_importances(study,
                           evaluator=evaluator,
                           target=lambda t: t.values[target_idx])
    with BytesIO() as byte_io:
        plt.savefig(byte_io)
        figure_without_inf = byte_io.getvalue()

    # Obtained figures should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert len(figure_without_inf) > 0
    assert figure_without_inf == figure_with_inf
示例#23
0
def test_color_map(direction: str) -> None:
    study = create_study(direction=direction)
    for i in range(3):
        study.add_trial(
            create_trial(
                value=float(i),
                params={
                    "param_a": float(i),
                    "param_b": float(i)
                },
                distributions={
                    "param_a": FloatDistribution(0.0, 3.0),
                    "param_b": FloatDistribution(0.0, 3.0),
                },
            ))

    # `target` is `None`.
    line = plotly_plot_parallel_coordinate(study).data[0]["line"]
    assert COLOR_SCALE == [v[1] for v in line["colorscale"]]
    if direction == "minimize":
        assert line["reversescale"]
    else:
        assert not line["reversescale"]

    # When `target` is not `None`, `reversescale` is always `True`.
    line = plotly_plot_parallel_coordinate(
        study, target=lambda t: t.number).data[0]["line"]
    assert COLOR_SCALE == [v[1] for v in line["colorscale"]]
    assert line["reversescale"]

    # Multi-objective optimization.
    study = create_study(directions=[direction, direction])
    for i in range(3):
        study.add_trial(
            create_trial(
                values=[float(i), float(i)],
                params={
                    "param_a": float(i),
                    "param_b": float(i)
                },
                distributions={
                    "param_a": FloatDistribution(0.0, 3.0),
                    "param_b": FloatDistribution(0.0, 3.0),
                },
            ))
    line = plotly_plot_parallel_coordinate(
        study, target=lambda t: t.number).data[0]["line"]
    assert COLOR_SCALE == [v[1] for v in line["colorscale"]]
    assert line["reversescale"]
示例#24
0
def test_get_info_importances_nonfinite_removed(
        inf_value: float, evaluator: BaseImportanceEvaluator,
        n_trials: int) -> None:
    def _objective(trial: Trial) -> float:
        x1 = trial.suggest_float("x1", 0.1, 3)
        x2 = trial.suggest_float("x2", 0.1, 3, log=True)
        x3 = trial.suggest_float("x3", 2, 4, log=True)
        return x1 + x2 * x3

    seed = 13
    target_name = "Objective Value"

    study = create_study(sampler=RandomSampler(seed=seed))
    study.optimize(_objective, n_trials=n_trials)

    # Create param importances info without inf value.
    info_without_inf = _get_importances_info(study,
                                             evaluator=evaluator,
                                             params=None,
                                             target=None,
                                             target_name=target_name)

    # A trial with an inf value is added into the study manually.
    study.add_trial(
        create_trial(
            value=inf_value,
            params={
                "x1": 1.0,
                "x2": 1.0,
                "x3": 3.0
            },
            distributions={
                "x1": FloatDistribution(low=0.1, high=3),
                "x2": FloatDistribution(low=0.1, high=3, log=True),
                "x3": FloatDistribution(low=2, high=4, log=True),
            },
        ))

    # Create param importances info with inf value.
    info_with_inf = _get_importances_info(study,
                                          evaluator=evaluator,
                                          params=None,
                                          target=None,
                                          target_name=target_name)

    # Obtained info instances should be the same between with inf and without inf,
    # because the last trial whose objective value is an inf is ignored.
    assert info_with_inf == info_without_inf
示例#25
0
def test_not_contained_param() -> None:
    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": FloatDistribution(1.0, 10.0)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": FloatDistribution(1.0, 10.0, log=True)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0, log=True) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": FloatDistribution(1.0, 10.0, step=1.0)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0, step=1.0) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": IntDistribution(1, 10)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100) == 1

    trial = create_trial(
        value=0.2,
        params={"x": 1},
        distributions={"x": IntDistribution(1, 10)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100, 1) == 1

    trial = create_trial(
        value=0.2,
        params={"x": 1},
        distributions={"x": IntDistribution(1, 10, log=True)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100, log=True) == 1
示例#26
0
def test_generate_contour_plot_for_few_observations(params: List[str]) -> None:
    study = create_study(direction="minimize")
    study.add_trial(
        create_trial(
            values=[0.0],
            params={
                "param_a": 1.0,
                "param_b": 2.0
            },
            distributions={
                "param_a": FloatDistribution(0.0, 3.0),
                "param_b": FloatDistribution(0.0, 3.0),
            },
        ))
    study.add_trial(
        create_trial(
            values=[2.0],
            params={"param_b": 0.0},
            distributions={"param_b": FloatDistribution(0.0, 3.0)},
        ))

    info = _get_contour_info(study, params=params)
    assert info == _ContourInfo(
        sorted_params=sorted(params),
        sub_plot_infos=[[
            _SubContourInfo(
                xaxis=_AxisInfo(
                    name=sorted(params)[0],
                    range=(1.0, 1.0),
                    is_log=False,
                    is_cat=False,
                    indices=[1.0],
                    values=[1.0, None],
                ),
                yaxis=_AxisInfo(
                    name=sorted(params)[1],
                    range=(-0.1, 2.1),
                    is_log=False,
                    is_cat=False,
                    indices=[-0.1, 0.0, 2.0, 2.1],
                    values=[2.0, 0.0],
                ),
                z_values={},
            )
        ]],
        reverse_scale=True,
        target_name="Objective Value",
    )
示例#27
0
def _create_study_with_log_scale_and_str_category_3d() -> Study:
    study = create_study()
    distributions = {
        "param_a": FloatDistribution(1e-7, 1e-2, log=True),
        "param_b": CategoricalDistribution(["100", "101"]),
        "param_c": CategoricalDistribution(["one", "two"]),
    }
    study.add_trial(
        create_trial(
            value=0.0,
            params={
                "param_a": 1e-6,
                "param_b": "101",
                "param_c": "one"
            },
            distributions=distributions,
        ))
    study.add_trial(
        create_trial(
            value=1.0,
            params={
                "param_a": 1e-5,
                "param_b": "100",
                "param_c": "two"
            },
            distributions=distributions,
        ))
    return study
示例#28
0
def test_sample_relative() -> None:

    relative_search_space: Dict[str, BaseDistribution] = {
        "a": FloatDistribution(low=0, high=5),
        "b": CategoricalDistribution(choices=("foo", "bar", "baz")),
        "c": IntDistribution(low=20, high=50),  # Not exist in `relative_params`.
    }
    relative_params = {
        "a": 3.2,
        "b": "baz",
    }
    unknown_param_value = 30

    sampler = FixedSampler(relative_search_space, relative_params, unknown_param_value)
    study = optuna.study.create_study(sampler=sampler)

    def objective(trial: Trial) -> float:

        # Predefined parameters are sampled by `sample_relative()` method.
        assert trial.suggest_float("a", 0, 5) == 3.2
        assert trial.suggest_categorical("b", ["foo", "bar", "baz"]) == "baz"

        # Other parameters are sampled by `sample_independent()` method.
        assert trial.suggest_int("c", 20, 50) == unknown_param_value
        assert trial.suggest_float("d", 1, 100, log=True) == unknown_param_value
        assert trial.suggest_float("e", 20, 40) == unknown_param_value

        return 0.0

    study.optimize(objective, n_trials=10, catch=())
    for trial in study.trials:
        assert trial.params == {"a": 3.2, "b": "baz", "c": 30, "d": 30, "e": 30}
示例#29
0
def test_called_single_methods_when_multi() -> None:

    state = TrialState.COMPLETE
    values = (0.2, 0.3)
    params = {"x": 10}
    distributions: Dict[str, BaseDistribution] = {
        "x": FloatDistribution(5, 12)
    }
    user_attrs = {"foo": "bar"}
    system_attrs = {"baz": "qux"}
    intermediate_values = {0: 0.0, 1: 0.1, 2: 0.1}

    trial = optuna.trial.create_trial(
        state=state,
        values=values,
        params=params,
        distributions=distributions,
        user_attrs=user_attrs,
        system_attrs=system_attrs,
        intermediate_values=intermediate_values,
    )

    with pytest.raises(RuntimeError):
        trial.value

    with pytest.raises(RuntimeError):
        trial.value = 0.1

    with pytest.raises(RuntimeError):
        trial.value = [0.1]  # type: ignore
示例#30
0
def test_create_trial(state: TrialState) -> None:
    value = 0.2
    params = {"x": 10}
    distributions: Dict[str, BaseDistribution] = {
        "x": FloatDistribution(5, 12)
    }
    user_attrs = {"foo": "bar"}
    system_attrs = {"baz": "qux"}
    intermediate_values = {0: 0.0, 1: 0.1, 2: 0.1}

    trial = create_trial(
        state=state,
        value=value,
        params=params,
        distributions=distributions,
        user_attrs=user_attrs,
        system_attrs=system_attrs,
        intermediate_values=intermediate_values,
    )

    assert isinstance(trial, FrozenTrial)
    assert trial.state == state
    assert trial.value == value
    assert trial.params == params
    assert trial.distributions == distributions
    assert trial.user_attrs == user_attrs
    assert trial.system_attrs == system_attrs
    assert trial.intermediate_values == intermediate_values
    assert trial.datetime_start is not None
    assert (trial.datetime_complete is not None) == state.is_finished()

    with pytest.raises(ValueError):
        create_trial(state=state, value=value, values=(value, ))