Beispiel #1
0
def test_suggest_loguniform(storage_init_func):
    # type: (Callable[[], storages.BaseStorage]) -> None

    with pytest.raises(ValueError):
        LogUniformDistribution(low=1.0, high=0.9)

    with pytest.raises(ValueError):
        LogUniformDistribution(low=0.0, high=0.9)

    mock = Mock()
    mock.side_effect = [1.0, 2.0, 3.0]
    sampler = samplers.RandomSampler()

    with patch.object(sampler, "sample_independent", mock) as mock_object:
        study = create_study(storage_init_func(), sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = LogUniformDistribution(low=0.1, high=4.0)

        assert trial._suggest("x",
                              distribution) == 1.0  # Test suggesting a param.
        assert trial._suggest(
            "x", distribution) == 1.0  # Test suggesting the same param.
        assert trial._suggest(
            "y", distribution) == 3.0  # Test suggesting a different param.
        assert trial.params == {"x": 1.0, "y": 3.0}
        assert mock_object.call_count == 3
Beispiel #2
0
def test_search_space_transform_untransform_params() -> None:
    search_space = {
        "x0": DiscreteUniformDistribution(0, 1, q=0.2),
        "x1": CategoricalDistribution(["foo", "bar", "baz", "qux"]),
        "x2": IntLogUniformDistribution(1, 10),
        "x3": CategoricalDistribution(["quux", "quuz"]),
        "x4": UniformDistribution(2, 3),
        "x5": LogUniformDistribution(1, 10),
        "x6": IntUniformDistribution(2, 4),
        "x7": CategoricalDistribution(["corge"]),
        "x8": UniformDistribution(-2, -2),
        "x9": LogUniformDistribution(1, 1),
    }

    params = {
        "x0": 0.2,
        "x1": "qux",
        "x2": 1,
        "x3": "quux",
        "x4": 2.0,
        "x5": 1.0,
        "x6": 2,
        "x7": "corge",
        "x8": -2.0,
        "x9": 1.0,
    }

    trans = _SearchSpaceTransform(search_space)
    trans_params = trans.transform(params)
    untrans_params = trans.untransform(trans_params)

    for name in params.keys():
        assert untrans_params[name] == params[name]
Beispiel #3
0
def test_suggest_loguniform(storage_mode: str) -> None:

    with pytest.raises(ValueError):
        LogUniformDistribution(low=1.0, high=0.9)

    with pytest.raises(ValueError):
        LogUniformDistribution(low=0.0, high=0.9)

    mock = Mock()
    mock.side_effect = [1.0, 2.0]
    sampler = samplers.RandomSampler()

    with patch.object(
            sampler, "sample_independent",
            mock) as mock_object, StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage, sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = LogUniformDistribution(low=0.1, high=4.0)

        assert trial._suggest("x",
                              distribution) == 1.0  # Test suggesting a param.
        assert trial._suggest(
            "x", distribution) == 1.0  # Test suggesting the same param.
        assert trial._suggest(
            "y", distribution) == 2.0  # Test suggesting a different param.
        assert trial.params == {"x": 1.0, "y": 2.0}
        assert mock_object.call_count == 2
Beispiel #4
0
def test_plot_parallel_coordinate_unique_hyper_param() -> None:
    # Test case when one unique value is suggested during the optimization.

    study_categorical_params = create_study()
    study_categorical_params.add_trial(
        create_trial(
            value=0.0,
            params={
                "category_a": "preferred",
                "param_b": 30
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "param_b": LogUniformDistribution(1, 1000),
            },
        ))

    # Both hyperparameters contain unique values.
    figure = plot_parallel_coordinate(study_categorical_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 0.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, )
    assert figure.data[0]["dimensions"][1]["label"] == "category_a"
    assert figure.data[0]["dimensions"][1]["range"] == (0, 0)
    assert figure.data[0]["dimensions"][1]["values"] == (0.0, )
    assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred", )
    assert figure.data[0]["dimensions"][1]["tickvals"] == (0, )
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (math.log10(30),
                                                        math.log10(30))
    assert figure.data[0]["dimensions"][2]["values"] == (math.log10(30), )
    assert figure.data[0]["dimensions"][2]["ticktext"] == ("30", )
    assert figure.data[0]["dimensions"][2]["tickvals"] == (math.log10(30), )

    study_categorical_params.add_trial(
        create_trial(
            value=2.0,
            params={
                "category_a": "preferred",
                "param_b": 20
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "param_b": LogUniformDistribution(1, 1000),
            },
        ))

    # Still "category_a" contains unique suggested value during the optimization.
    figure = plot_parallel_coordinate(study_categorical_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][1]["label"] == "category_a"
    assert figure.data[0]["dimensions"][1]["range"] == (0, 0)
    assert figure.data[0]["dimensions"][1]["values"] == (0.0, 0.0)
    assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred", )
    assert figure.data[0]["dimensions"][1]["tickvals"] == (0, )
def create_new_trial_object(trial_info_dict):
    
    new_trial = optuna.trial.create_trial(
        params = {"optimizer": trial_info_dict["optimizer"],
                "weight_decay":trial_info_dict["weight_decay"],
                "learning_rate":trial_info_dict["learning_rate"]},
        distributions = {"optimizer" : CategoricalDistribution(choices=("Adam", "RMSprop", "SGD")),
                        "weight_decay": LogUniformDistribution(1e-5, 1e-1),
                        "learning_rate": LogUniformDistribution(1e-7, 1e-5)},
        value= trial_info_dict["value"],)
    new_trial.user_attrs = {"worker_id": trial_info_dict["worker_id"]}
    
    return new_trial
Beispiel #6
0
def create_optuna_distribution_from_config(
        config: MutableMapping[str, Any]) -> BaseDistribution:
    kwargs = dict(config)
    if isinstance(config["type"], str):
        kwargs["type"] = DistributionType[config["type"]]
    param = DistributionConfig(**kwargs)
    if param.type == DistributionType.categorical:
        assert param.choices is not None
        return CategoricalDistribution(param.choices)
    if param.type == DistributionType.int:
        assert param.low is not None
        assert param.high is not None
        if param.log:
            return IntLogUniformDistribution(int(param.low), int(param.high))
        step = int(param.step) if param.step is not None else 1
        return IntUniformDistribution(int(param.low),
                                      int(param.high),
                                      step=step)
    if param.type == DistributionType.float:
        assert param.low is not None
        assert param.high is not None
        if param.log:
            return LogUniformDistribution(param.low, param.high)
        if param.step is not None:
            return DiscreteUniformDistribution(param.low, param.high,
                                               param.step)
        return UniformDistribution(param.low, param.high)
    raise NotImplementedError(
        f"{param.type} is not supported by Optuna sweeper.")
Beispiel #7
0
def test_plot_slice_log_scale() -> None:

    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={"x_linear": 1.0, "y_log": 1e-3},
            distributions={
                "x_linear": UniformDistribution(0.0, 3.0),
                "y_log": LogUniformDistribution(1e-5, 1.0),
            },
        )
    )

    # Plot a parameter.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_slice(study, params=["y_log"])
    assert figure.has_data()
    figure = plot_slice(study, params=["x_linear"])
    assert figure.has_data()

    # Plot multiple parameters.
    # TODO(ytknzw): Add more specific assertion with the test case.
    figure = plot_slice(study)
    assert len(figure) == 2
    assert figure[0].has_data()
    assert figure[1].has_data()
def create_optuna_distribution_from_override(override: Override) -> Any:
    value = override.value()
    if not override.is_sweep_override():
        return value

    if override.is_choice_sweep():
        assert isinstance(value, ChoiceSweep)
        choices = [
            x for x in override.sweep_iterator(transformer=Transformer.encode)
        ]
        return CategoricalDistribution(choices)

    if override.is_range_sweep():
        choices = [
            x for x in override.sweep_iterator(transformer=Transformer.encode)
        ]
        return CategoricalDistribution(choices)

    if override.is_interval_sweep():
        assert isinstance(value, IntervalSweep)
        if "log" in value.tags:
            if "int" in value.tags:
                return IntLogUniformDistribution(value.start, value.end)
            return LogUniformDistribution(value.start, value.end)
        else:
            if "int" in value.tags:
                return IntUniformDistribution(value.start, value.end)
            return UniformDistribution(value.start, value.end)

    raise NotImplementedError(
        "{} is not supported by Optuna sweeper.".format(override))
def test_plot_slice_log_scale() -> None:

    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={
                "x_linear": 1.0,
                "y_log": 1e-3
            },
            distributions={
                "x_linear": UniformDistribution(0.0, 3.0),
                "y_log": LogUniformDistribution(1e-5, 1.0),
            },
        ))

    # Plot a parameter.
    figure = plot_slice(study, params=["y_log"])
    assert figure.layout["xaxis_type"] == "log"
    figure = plot_slice(study, params=["x_linear"])
    assert figure.layout["xaxis_type"] is None

    # Plot multiple parameters.
    figure = plot_slice(study)
    assert figure.layout["xaxis_type"] is None
    assert figure.layout["xaxis2_type"] == "log"
Beispiel #10
0
    def suggest_float(
        self,
        name: str,
        low: float,
        high: float,
        *,
        step: Optional[float] = None,
        log: bool = False,
    ) -> float:

        if step is not None:
            if log:
                raise ValueError(
                    "The parameter `step` is not supported when `log` is True."
                )
            else:
                return self._suggest(
                    name,
                    DiscreteUniformDistribution(low=low, high=high, q=step))
        else:
            if log:
                return self._suggest(
                    name, LogUniformDistribution(low=low, high=high))
            else:
                return self._suggest(name,
                                     UniformDistribution(low=low, high=high))
Beispiel #11
0
def test_distributions(storage_init_func):
    # type: (Callable[[], storages.BaseStorage]) -> None

    def objective(trial):
        # type: (Trial) -> float

        trial.suggest_uniform("a", 0, 10)
        trial.suggest_loguniform("b", 0.1, 10)
        trial.suggest_discrete_uniform("c", 0, 10, 1)
        trial.suggest_int("d", 0, 10)
        trial.suggest_categorical("e", ["foo", "bar", "baz"])
        trial.suggest_int("f", 1, 10, log=True)

        return 1.0

    study = create_study(storage_init_func())
    study.optimize(objective, n_trials=1)

    assert study.best_trial.distributions == {
        "a": UniformDistribution(low=0, high=10),
        "b": LogUniformDistribution(low=0.1, high=10),
        "c": DiscreteUniformDistribution(low=0, high=10, q=1),
        "d": IntUniformDistribution(low=0, high=10),
        "e": CategoricalDistribution(choices=("foo", "bar", "baz")),
        "f": IntLogUniformDistribution(low=1, high=10),
    }
Beispiel #12
0
def test_plot_slice_log_scale() -> None:

    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={"x_linear": 1.0, "y_log": 1e-3},
            distributions={
                "x_linear": UniformDistribution(0.0, 3.0),
                "y_log": LogUniformDistribution(1e-5, 1.0),
            },
        )
    )

    # Plot a parameter.
    figure = plot_slice(study, params=["y_log"])

    assert len(figure.get_lines()) == 0
    assert figure.xaxis.label.get_text() == "y_log"
    figure = plot_slice(study, params=["x_linear"])
    assert len(figure.get_lines()) == 0
    assert figure.xaxis.label.get_text() == "x_linear"

    # Plot multiple parameters.
    figure = plot_slice(study)
    assert len(figure) == 2
    assert len(figure[0].get_lines()) == 0
    assert len(figure[1].get_lines()) == 0
    assert figure[0].xaxis.label.get_text() == "x_linear"
    assert figure[1].xaxis.label.get_text() == "y_log"
Beispiel #13
0
def test_sample_single_distribution(
        sampler_class: Callable[[], BaseSampler]) -> None:

    relative_search_space = {
        "a": UniformDistribution(low=1.0, high=1.0),
        "b": LogUniformDistribution(low=1.0, high=1.0),
        "c": DiscreteUniformDistribution(low=1.0, high=1.0, q=1.0),
        "d": IntUniformDistribution(low=1, high=1),
        "e": IntLogUniformDistribution(low=1, high=1),
        "f": CategoricalDistribution([1]),
        "g": FloatDistribution(low=1.0, high=1.0),
        "h": FloatDistribution(low=1.0, high=1.0, log=True),
        "i": FloatDistribution(low=1.0, high=1.0, step=1.0),
        "j": IntDistribution(low=1, high=1),
        "k": IntDistribution(low=1, high=1, log=True),
    }

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
        sampler = sampler_class()
    study = optuna.study.create_study(sampler=sampler)

    # We need to test the construction of the model, so we should set `n_trials >= 2`.
    for _ in range(2):
        trial = study.ask(fixed_distributions=relative_search_space)
        study.tell(trial, 1.0)
        for param_name in relative_search_space.keys():
            assert trial.params[param_name] == 1
def test_set_and_get_trial_param(storage_init_func):
    # type: (Callable[[], BaseStorage]) -> None

    storage = storage_init_func()

    # Setup test across multiple studies and trials.
    study_id = storage.create_new_study()
    trial_id_1 = storage.create_new_trial(study_id)
    trial_id_2 = storage.create_new_trial(study_id)
    trial_id_3 = storage.create_new_trial(storage.create_new_study())

    # Setup Distributions.
    distribution_x = UniformDistribution(low=1.0, high=2.0)
    distribution_y_1 = CategoricalDistribution(choices=('Shibuya', 'Ebisu',
                                                        'Meguro'))
    distribution_y_2 = CategoricalDistribution(choices=('Shibuya', 'Shinsen'))
    distribution_z = LogUniformDistribution(low=1.0, high=100.0)

    # Test trial_1: setting new params.
    assert storage.set_trial_param(trial_id_1, 'x', 0.5, distribution_x)
    assert storage.set_trial_param(trial_id_1, 'y', 2, distribution_y_1)

    # Test trial_1: getting params.
    assert storage.get_trial_param(trial_id_1, 'x') == 0.5
    assert storage.get_trial_param(trial_id_1, 'y') == 2
    # Test trial_1: checking all params and external repr.
    assert storage.get_trial(trial_id_1).params == {'x': 0.5, 'y': 'Meguro'}
    # Test trial_1: setting existing name.
    assert not storage.set_trial_param(trial_id_1, 'x', 0.6, distribution_x)

    # Setup trial_2: setting new params (to the same study as trial_1).
    assert storage.set_trial_param(trial_id_2, 'x', 0.3, distribution_x)
    assert storage.set_trial_param(trial_id_2, 'z', 0.1, distribution_z)

    # Test trial_2: getting params.
    assert storage.get_trial_param(trial_id_2, 'x') == 0.3
    assert storage.get_trial_param(trial_id_2, 'z') == 0.1

    # Test trial_2: checking all params and external repr.
    assert storage.get_trial(trial_id_2).params == {'x': 0.3, 'z': 0.1}
    # Test trial_2: setting different distribution.
    with pytest.raises(ValueError):
        storage.set_trial_param(trial_id_2, 'x', 0.5, distribution_z)
    # Test trial_2: setting CategoricalDistribution in different order.
    with pytest.raises(ValueError):
        storage.set_trial_param(
            trial_id_2, 'y', 2,
            CategoricalDistribution(choices=('Meguro', 'Shibuya', 'Ebisu')))

    # Setup trial_3: setting new params (to different study from trial_1).
    if isinstance(storage, InMemoryStorage):
        with pytest.raises(ValueError):
            # InMemoryStorage shares the same study if create_new_study is additionally invoked.
            # Thus, the following line should fail due to distribution incompatibility.
            storage.set_trial_param(trial_id_3, 'y', 1, distribution_y_2)
    else:
        assert storage.set_trial_param(trial_id_3, 'y', 1, distribution_y_2)
        assert storage.get_trial_param(trial_id_3, 'y') == 1
        assert storage.get_trial(trial_id_3).params == {'y': 'Shinsen'}
Beispiel #15
0
def test_set_and_get_trial_param(storage_init_func):
    # type: (Callable[[], BaseStorage]) -> None

    storage = storage_init_func()

    # Setup test across multiple studies and trials.
    study_id = storage.create_new_study()
    trial_id_1 = storage.create_new_trial(study_id)
    trial_id_2 = storage.create_new_trial(study_id)
    trial_id_3 = storage.create_new_trial(storage.create_new_study())

    # Setup Distributions.
    distribution_x = UniformDistribution(low=1.0, high=2.0)
    distribution_y_1 = CategoricalDistribution(choices=("Shibuya", "Ebisu",
                                                        "Meguro"))
    distribution_y_2 = CategoricalDistribution(choices=("Shibuya", "Shinsen"))
    distribution_z = LogUniformDistribution(low=1.0, high=100.0)

    # Test trial_1: setting new params.
    assert storage.set_trial_param(trial_id_1, "x", 0.5, distribution_x)
    assert storage.set_trial_param(trial_id_1, "y", 2, distribution_y_1)

    # Test trial_1: getting params.
    assert storage.get_trial_param(trial_id_1, "x") == 0.5
    assert storage.get_trial_param(trial_id_1, "y") == 2
    # Test trial_1: checking all params and external repr.
    assert storage.get_trial(trial_id_1).params == {"x": 0.5, "y": "Meguro"}
    # Test trial_1: setting existing name.
    assert not storage.set_trial_param(trial_id_1, "x", 0.6, distribution_x)

    # Setup trial_2: setting new params (to the same study as trial_1).
    assert storage.set_trial_param(trial_id_2, "x", 0.3, distribution_x)
    assert storage.set_trial_param(trial_id_2, "z", 0.1, distribution_z)

    # Test trial_2: getting params.
    assert storage.get_trial_param(trial_id_2, "x") == 0.3
    assert storage.get_trial_param(trial_id_2, "z") == 0.1

    # Test trial_2: checking all params and external repr.
    assert storage.get_trial(trial_id_2).params == {"x": 0.3, "z": 0.1}
    # Test trial_2: setting different distribution.
    with pytest.raises(ValueError):
        storage.set_trial_param(trial_id_2, "x", 0.5, distribution_z)
    # Test trial_2: setting CategoricalDistribution in different order.
    with pytest.raises(ValueError):
        storage.set_trial_param(
            trial_id_2, "y", 2,
            CategoricalDistribution(choices=("Meguro", "Shibuya", "Ebisu")))

    # Setup trial_3: setting new params (to different study from trial_1).
    if isinstance(storage, InMemoryStorage):
        pass
    else:
        assert storage.set_trial_param(trial_id_3, "y", 1, distribution_y_2)
        assert storage.get_trial_param(trial_id_3, "y") == 1
        assert storage.get_trial(trial_id_3).params == {"y": "Shinsen"}
Beispiel #16
0
    def search_space() -> Dict[str, BaseDistribution]:

        return {
            "c": CategoricalDistribution(("a", "b")),
            "d": DiscreteUniformDistribution(-1, 9, 2),
            "i": IntUniformDistribution(-1, 1),
            "ii": IntUniformDistribution(-1, 3, 2),
            "l": LogUniformDistribution(0.001, 0.1),
            "u": UniformDistribution(-2, 2),
        }
Beispiel #17
0
    def search_space():
        # type: () -> Dict[str, BaseDistribution]

        return {
            'c': CategoricalDistribution(('a', 'b')),
            'd': DiscreteUniformDistribution(-1, 9, 2),
            'i': IntUniformDistribution(-1, 1),
            'l': LogUniformDistribution(0.001, 0.1),
            'u': UniformDistribution(-2, 2),
        }
Beispiel #18
0
    def suggest_loguniform(self, name, low, high):
        # type: (str, float, float) -> float
        """Suggest a value for the continuous parameter.

        The value is sampled from the range :math:`[\\mathsf{low}, \\mathsf{high})`
        in the log domain. When :math:`\\mathsf{low} = \\mathsf{high}`, the value of
        :math:`\\mathsf{low}` will be returned.

        Example:

            Suggest penalty parameter ``C`` of `SVC <https://scikit-learn.org/stable/modules/
            generated/sklearn.svm.SVC.html>`_.

            .. testcode::

                import numpy as np
                from sklearn.datasets import load_iris
                from sklearn.model_selection import train_test_split
                from sklearn.svm import SVC

                import optuna

                X, y = load_iris(return_X_y=True)
                X_train, X_valid, y_train, y_valid = train_test_split(X, y)

                def objective(trial):
                    c = trial.suggest_loguniform('c', 1e-5, 1e2)
                    clf = SVC(C=c, gamma='scale', random_state=0)
                    clf.fit(X_train, y_train)
                    return clf.score(X_valid, y_valid)

                study = optuna.create_study(direction='maximize')
                study.optimize(objective, n_trials=3)

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
            high:
                Upper endpoint of the range of suggested values. ``high`` is excluded from the
                range.

        Returns:
            A suggested float value.
        """

        distribution = LogUniformDistribution(low=low, high=high)

        self._check_distribution(name, distribution)

        if distribution.low == distribution.high:
            return self._set_new_param_or_get_existing(name, distribution.low, distribution)

        return self._suggest(name, distribution)
Beispiel #19
0
def _generate_trial(generator: random.Random) -> FrozenTrial:
    example_params = {
        "paramA": (generator.uniform(0, 1), UniformDistribution(0, 1)),
        "paramB": (generator.uniform(1, 2), LogUniformDistribution(1, 2)),
        "paramC": (
            generator.choice(["CatA", "CatB", "CatC"]),
            CategoricalDistribution(("CatA", "CatB", "CatC")),
        ),
        "paramD": (generator.uniform(-3, 0), UniformDistribution(-3, 0)),
        "paramE":
        (generator.choice([0.1, 0.2]), CategoricalDistribution((0.1, 0.2))),
    }
    example_attrs = {
        "attrA": "valueA",
        "attrB": 1,
        "attrC": None,
        "attrD": {
            "baseline_score": 0.001,
            "tags": ["image", "classification"]
        },
    }
    state = generator.choice(ALL_STATES)
    params = {}
    distributions = {}
    user_attrs = {}
    system_attrs = {}
    intermediate_values = {}
    for key, (value, dist) in example_params.items():
        if generator.choice([True, False]):
            params[key] = value
            distributions[key] = dist
    for key, value in example_attrs.items():
        if generator.choice([True, False]):
            user_attrs["usr_" + key] = value
        if generator.choice([True, False]):
            system_attrs["sys_" + key] = value
    for i in range(generator.randint(4, 10)):
        if generator.choice([True, False]):
            intermediate_values[i] = generator.uniform(-10, 10)
    return FrozenTrial(
        number=0,  # dummy
        state=state,
        value=generator.uniform(-10, 10),
        datetime_start=datetime.now(),
        datetime_complete=datetime.now() if state.is_finished() else None,
        params=params,
        distributions=distributions,
        user_attrs=user_attrs,
        system_attrs=system_attrs,
        intermediate_values=intermediate_values,
        trial_id=0,  # dummy
    )
Beispiel #20
0
def test_plot_parallel_coordinate_log_params() -> None:
    # Test with log params.
    study_log_params = create_study()
    study_log_params.add_trial(
        create_trial(
            value=0.0,
            params={
                "param_a": 1e-6,
                "param_b": 10
            },
            distributions={
                "param_a": LogUniformDistribution(1e-7, 1e-2),
                "param_b": LogUniformDistribution(1, 1000),
            },
        ))
    study_log_params.add_trial(
        create_trial(
            value=1.0,
            params={
                "param_a": 2e-5,
                "param_b": 200
            },
            distributions={
                "param_a": LogUniformDistribution(1e-7, 1e-2),
                "param_b": LogUniformDistribution(1, 1000),
            },
        ))
    study_log_params.add_trial(
        create_trial(
            value=0.1,
            params={
                "param_a": 1e-4,
                "param_b": 30
            },
            distributions={
                "param_a": LogUniformDistribution(1e-7, 1e-2),
                "param_b": LogUniformDistribution(1, 1000),
            },
        ))
    figure = plot_parallel_coordinate(study_log_params)
    assert len(figure.data[0]["dimensions"]) == 3
    assert figure.data[0]["dimensions"][0]["label"] == "Objective Value"
    assert figure.data[0]["dimensions"][0]["range"] == (0.0, 1.0)
    assert figure.data[0]["dimensions"][0]["values"] == (0.0, 1.0, 0.1)
    assert figure.data[0]["dimensions"][1]["label"] == "param_a"
    assert figure.data[0]["dimensions"][1]["range"] == (-6.0, -4.0)
    assert figure.data[0]["dimensions"][1]["values"] == (-6, math.log10(2e-5),
                                                         -4)
    assert figure.data[0]["dimensions"][1]["ticktext"] == ("1e-06", "1e-05",
                                                           "0.0001")
    assert figure.data[0]["dimensions"][1]["tickvals"] == (-6, -5, -4.0)
    assert figure.data[0]["dimensions"][2]["label"] == "param_b"
    assert figure.data[0]["dimensions"][2]["range"] == (1.0, math.log10(200))
    assert figure.data[0]["dimensions"][2]["values"] == (1.0, math.log10(200),
                                                         math.log10(30))
    assert figure.data[0]["dimensions"][2]["ticktext"] == ("10", "100", "200")
    assert figure.data[0]["dimensions"][2]["tickvals"] == (1.0, 2.0,
                                                           math.log10(200))
Beispiel #21
0
def test_plot_parallel_coordinate_unique_hyper_param() -> None:
    # Test case when one unique value is suggested during the optimization.

    study_categorical_params = create_study()
    study_categorical_params.add_trial(
        create_trial(
            value=0.0,
            params={
                "category_a": "preferred",
                "param_b": 30
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "param_b": LogUniformDistribution(1, 1000),
            },
        ))

    # both hyper-parameter contains unique value
    figure = plot_parallel_coordinate(study_categorical_params)
    assert figure.has_data()

    study_categorical_params.add_trial(
        create_trial(
            value=2.0,
            params={
                "category_a": "preferred",
                "param_b": 20
            },
            distributions={
                "category_a": CategoricalDistribution(("preferred", "opt")),
                "param_b": LogUniformDistribution(1, 1000),
            },
        ))

    # still "category_a" contains unique suggested value during the optimization.
    figure = plot_parallel_coordinate(study_categorical_params)
    assert figure.has_data()
Beispiel #22
0
def test_frozen_trial_validate():
    # type: () -> None

    # Valid.
    valid_trial = _create_frozen_trial()
    valid_trial._validate()

    # Invalid: `datetime_start` is not set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.datetime_start = None
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: `state` is `RUNNING` and `datetime_complete` is set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.state = TrialState.RUNNING
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: `state` is not `RUNNING` and `datetime_complete` is not set.
    for state in [TrialState.COMPLETE, TrialState.PRUNED, TrialState.FAIL]:
        invalid_trial = copy.copy(valid_trial)
        invalid_trial.state = state
        invalid_trial.datetime_complete = None
        with pytest.raises(ValueError):
            invalid_trial._validate()

    # Invalid: `state` is `COMPLETE` and `value` is not set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.value = None
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: Inconsistent `params` and `distributions`
    inconsistent_pairs = [
        # `params` has an extra element.
        ({"x": 0.1, "y": 0.5}, {"x": UniformDistribution(0, 1)}),
        # `distributions` has an extra element.
        ({"x": 0.1}, {"x": UniformDistribution(0, 1), "y": LogUniformDistribution(0, 1)}),
        # The value of `x` isn't contained in the distribution.
        ({"x": -0.5}, {"x": UniformDistribution(0, 1)}),
    ]  # type: List[Tuple[Dict[str, Any], Dict[str, BaseDistribution]]]

    for params, distributions in inconsistent_pairs:
        invalid_trial = copy.copy(valid_trial)
        invalid_trial.params = params
        invalid_trial.distributions = distributions
        with pytest.raises(ValueError):
            invalid_trial._validate()
Beispiel #23
0
def test_not_contained_param() -> None:
    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": UniformDistribution(1.0, 10.0)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": LogUniformDistribution(1.0, 10.0)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0, log=True) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": DiscreteUniformDistribution(1.0, 10.0, 1.0)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_float("x", 10.0, 100.0, step=1.0) == 1.0

    trial = create_trial(
        value=0.2,
        params={"x": 1.0},
        distributions={"x": IntUniformDistribution(1, 10)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100) == 1

    trial = create_trial(
        value=0.2,
        params={"x": 1},
        distributions={"x": IntUniformDistribution(1, 10, 1)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100, 1) == 1

    trial = create_trial(
        value=0.2,
        params={"x": 1},
        distributions={"x": IntLogUniformDistribution(1, 10)},
    )
    with pytest.warns(UserWarning):
        assert trial.suggest_int("x", 10, 100, log=True) == 1
Beispiel #24
0
def create_optuna_distribution_from_override(override: Override) -> Any:
    value = override.value()
    if not override.is_sweep_override():
        return value

    choices: List[CategoricalChoiceType] = []
    if override.is_choice_sweep():
        assert isinstance(value, ChoiceSweep)
        for x in override.sweep_iterator(transformer=Transformer.encode):
            assert isinstance(
                x, (str, int, float, bool)
            ), f"A choice sweep expects str, int, float, or bool type. Got {type(x)}."
            choices.append(x)
        return CategoricalDistribution(choices)

    if override.is_range_sweep():
        assert isinstance(value, RangeSweep)
        assert value.start is not None
        assert value.stop is not None
        if value.shuffle:
            for x in override.sweep_iterator(transformer=Transformer.encode):
                assert isinstance(
                    x, (str, int, float, bool)
                ), f"A choice sweep expects str, int, float, or bool type. Got {type(x)}."
                choices.append(x)
            return CategoricalDistribution(choices)
        return IntUniformDistribution(int(value.start),
                                      int(value.stop),
                                      step=int(value.step))

    if override.is_interval_sweep():
        assert isinstance(value, IntervalSweep)
        assert value.start is not None
        assert value.end is not None
        if "log" in value.tags:
            if isinstance(value.start, int) and isinstance(value.end, int):
                return IntLogUniformDistribution(int(value.start),
                                                 int(value.end))
            return LogUniformDistribution(value.start, value.end)
        else:
            if isinstance(value.start, int) and isinstance(value.end, int):
                return IntUniformDistribution(value.start, value.end)
            return UniformDistribution(value.start, value.end)

    raise NotImplementedError(
        f"{override} is not supported by Optuna sweeper.")
Beispiel #25
0
def restore_old_distribution(distribution_json: str) -> str:
    distribution = json_to_distribution(distribution_json)
    old_distribution: BaseDistribution

    # Float distributions.
    if isinstance(distribution, FloatDistribution):
        if distribution.log:
            old_distribution = LogUniformDistribution(
                low=distribution.low,
                high=distribution.high,
            )
        else:
            if distribution.step is not None:
                old_distribution = DiscreteUniformDistribution(
                    low=distribution.low,
                    high=distribution.high,
                    q=distribution.step,
                )
            else:
                old_distribution = UniformDistribution(
                    low=distribution.low,
                    high=distribution.high,
                )

    # Integer distributions.
    elif isinstance(distribution, IntDistribution):
        if distribution.log:
            old_distribution = IntLogUniformDistribution(
                low=distribution.low,
                high=distribution.high,
                step=distribution.step,
            )
        else:
            old_distribution = IntUniformDistribution(
                low=distribution.low,
                high=distribution.high,
                step=distribution.step,
            )

    # Categorical distribution.
    else:
        old_distribution = distribution

    return distribution_to_json(old_distribution)
Beispiel #26
0
def test_is_log_scale():
    # type: () -> None

    study = create_study()
    study._append_trial(
        value=0.0,
        params={"param_linear": 1.0,},
        distributions={"param_linear": UniformDistribution(0.0, 3.0),},
    )
    study._append_trial(
        value=2.0,
        params={"param_linear": 2.0, "param_log": 1e-3,},
        distributions={
            "param_linear": UniformDistribution(0.0, 3.0),
            "param_log": LogUniformDistribution(1e-5, 1.0),
        },
    )
    assert _is_log_scale(study.trials, "param_log")
    assert not _is_log_scale(study.trials, "param_linear")
Beispiel #27
0
def test_frozen_trial_suggest_loguniform() -> None:

    trial = FrozenTrial(
        number=0,
        trial_id=0,
        state=TrialState.COMPLETE,
        value=0.2,
        datetime_start=datetime.datetime.now(),
        datetime_complete=datetime.datetime.now(),
        params={"x": 0.99},
        distributions={"x": LogUniformDistribution(0.1, 1.0)},
        user_attrs={},
        system_attrs={},
        intermediate_values={},
    )
    assert trial.suggest_loguniform("x", 0.1, 1.0) == 0.99

    with pytest.raises(ValueError):
        trial.suggest_loguniform("y", 0.0, 1.0)
Beispiel #28
0
def test_is_log_scale():
    # type: () -> None

    study = create_study()
    study._append_trial(value=0.0,
                        params={
                            'param_linear': 1.0,
                        },
                        distributions={
                            'param_linear': UniformDistribution(0.0, 3.0),
                        })
    study._append_trial(value=2.0,
                        params={
                            'param_linear': 2.0,
                            'param_log': 1e-3,
                        },
                        distributions={
                            'param_linear': UniformDistribution(0.0, 3.0),
                            'param_log': LogUniformDistribution(1e-5, 1.),
                        })
    assert visualization._is_log_scale(study.trials, 'param_log')
    assert not visualization._is_log_scale(study.trials, 'param_linear')
Beispiel #29
0
def test_is_log_scale() -> None:

    study = create_study()
    study.add_trial(
        create_trial(
            value=0.0,
            params={"param_linear": 1.0},
            distributions={"param_linear": UniformDistribution(0.0, 3.0)},
        ))
    study.add_trial(
        create_trial(
            value=2.0,
            params={
                "param_linear": 2.0,
                "param_log": 1e-3
            },
            distributions={
                "param_linear": UniformDistribution(0.0, 3.0),
                "param_log": LogUniformDistribution(1e-5, 1.0),
            },
        ))
    assert _is_log_scale(study.trials, "param_log")
    assert not _is_log_scale(study.trials, "param_linear")
Beispiel #30
0
def test_distributions(storage_mode: str) -> None:
    def objective(trial: Trial) -> float:

        trial.suggest_float("a", 0, 10)
        trial.suggest_float("b", 0.1, 10, log=True)
        trial.suggest_float("c", 0, 10, step=1)
        trial.suggest_int("d", 0, 10)
        trial.suggest_categorical("e", ["foo", "bar", "baz"])
        trial.suggest_int("f", 1, 10, log=True)

        return 1.0

    with StorageSupplier(storage_mode) as storage:
        study = create_study(storage=storage)
        study.optimize(objective, n_trials=1)

        assert study.best_trial.distributions == {
            "a": UniformDistribution(low=0, high=10),
            "b": LogUniformDistribution(low=0.1, high=10),
            "c": DiscreteUniformDistribution(low=0, high=10, q=1),
            "d": IntUniformDistribution(low=0, high=10),
            "e": CategoricalDistribution(choices=("foo", "bar", "baz")),
            "f": IntLogUniformDistribution(low=1, high=10),
        }