def test_suggest_int_log(storage_init_func: Callable[[], storages.BaseStorage]) -> None: mock = Mock() mock.side_effect = [1, 2] sampler = samplers.RandomSampler() study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) distribution = IntLogUniformDistribution(low=1, high=3) with patch.object(sampler, "sample_independent", mock) as mock_object: assert trial._suggest("x", distribution) == 1 # Test suggesting a param. assert trial._suggest("x", distribution) == 1 # Test suggesting the same param. assert trial._suggest("y", distribution) == 2 # Test suggesting a different param. assert trial.params == {"x": 1, "y": 2} assert mock_object.call_count == 2 study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with warnings.catch_warnings(): # UserWarning will be raised since [0.5, 10] is not divisible by 1. warnings.simplefilter("ignore", category=UserWarning) with pytest.raises(ValueError): trial.suggest_int("z", 0.5, 10, log=True) # type: ignore study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.raises(ValueError): trial.suggest_int("w", 1, 3, step=2, log=True)
def test_sample_single_distribution( sampler_class: Callable[[], BaseSampler]) -> None: relative_search_space = { "a": UniformDistribution(low=1.0, high=1.0), "b": LogUniformDistribution(low=1.0, high=1.0), "c": DiscreteUniformDistribution(low=1.0, high=1.0, q=1.0), "d": IntUniformDistribution(low=1, high=1), "e": IntLogUniformDistribution(low=1, high=1), "f": CategoricalDistribution([1]), "g": FloatDistribution(low=1.0, high=1.0), "h": FloatDistribution(low=1.0, high=1.0, log=True), "i": FloatDistribution(low=1.0, high=1.0, step=1.0), "j": IntDistribution(low=1, high=1), "k": IntDistribution(low=1, high=1, log=True), } with warnings.catch_warnings(): warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning) sampler = sampler_class() study = optuna.study.create_study(sampler=sampler) # We need to test the construction of the model, so we should set `n_trials >= 2`. for _ in range(2): trial = study.ask(fixed_distributions=relative_search_space) study.tell(trial, 1.0) for param_name in relative_search_space.keys(): assert trial.params[param_name] == 1
def suggest_int(self, name: str, low: int, high: int, step: int = 1, log: bool = False) -> int: if step != 1: if log: raise ValueError( "The parameter `step != 1` is not supported when `log` is True." "The specified `step` is {}.".format(step)) else: distribution: Union[ IntUniformDistribution, IntLogUniformDistribution] = IntUniformDistribution( low=low, high=high, step=step) else: if log: distribution = IntLogUniformDistribution(low=low, high=high) else: distribution = IntUniformDistribution(low=low, high=high, step=step) return int(self._suggest(name, distribution))
def test_distributions(storage_init_func): # type: (Callable[[], storages.BaseStorage]) -> None def objective(trial): # type: (Trial) -> float trial.suggest_uniform("a", 0, 10) trial.suggest_loguniform("b", 0.1, 10) trial.suggest_discrete_uniform("c", 0, 10, 1) trial.suggest_int("d", 0, 10) trial.suggest_categorical("e", ["foo", "bar", "baz"]) trial.suggest_int("f", 1, 10, log=True) return 1.0 study = create_study(storage_init_func()) study.optimize(objective, n_trials=1) assert study.best_trial.distributions == { "a": UniformDistribution(low=0, high=10), "b": LogUniformDistribution(low=0.1, high=10), "c": DiscreteUniformDistribution(low=0, high=10, q=1), "d": IntUniformDistribution(low=0, high=10), "e": CategoricalDistribution(choices=("foo", "bar", "baz")), "f": IntLogUniformDistribution(low=1, high=10), }
def create_optuna_distribution_from_config( config: MutableMapping[str, Any]) -> BaseDistribution: kwargs = dict(config) if isinstance(config["type"], str): kwargs["type"] = DistributionType[config["type"]] param = DistributionConfig(**kwargs) if param.type == DistributionType.categorical: assert param.choices is not None return CategoricalDistribution(param.choices) if param.type == DistributionType.int: assert param.low is not None assert param.high is not None if param.log: return IntLogUniformDistribution(int(param.low), int(param.high)) step = int(param.step) if param.step is not None else 1 return IntUniformDistribution(int(param.low), int(param.high), step=step) if param.type == DistributionType.float: assert param.low is not None assert param.high is not None if param.log: return LogUniformDistribution(param.low, param.high) if param.step is not None: return DiscreteUniformDistribution(param.low, param.high, param.step) return UniformDistribution(param.low, param.high) raise NotImplementedError( f"{param.type} is not supported by Optuna sweeper.")
def test_search_space_transform_untransform_params() -> None: search_space = { "x0": DiscreteUniformDistribution(0, 1, q=0.2), "x1": CategoricalDistribution(["foo", "bar", "baz", "qux"]), "x2": IntLogUniformDistribution(1, 10), "x3": CategoricalDistribution(["quux", "quuz"]), "x4": UniformDistribution(2, 3), "x5": LogUniformDistribution(1, 10), "x6": IntUniformDistribution(2, 4), "x7": CategoricalDistribution(["corge"]), } params = { "x0": 0.2, "x1": "qux", "x2": 1, "x3": "quux", "x4": 2.0, "x5": 1.0, "x6": 2, "x7": "corge", } trans = _SearchSpaceTransform(search_space) trans_params = trans.transform(params) untrans_params = trans.untransform(trans_params) for name in params.keys(): assert untrans_params[name] == params[name]
def create_optuna_distribution_from_override(override: Override) -> Any: value = override.value() if not override.is_sweep_override(): return value if override.is_choice_sweep(): assert isinstance(value, ChoiceSweep) choices = [ x for x in override.sweep_iterator(transformer=Transformer.encode) ] return CategoricalDistribution(choices) if override.is_range_sweep(): choices = [ x for x in override.sweep_iterator(transformer=Transformer.encode) ] return CategoricalDistribution(choices) if override.is_interval_sweep(): assert isinstance(value, IntervalSweep) if "log" in value.tags: if "int" in value.tags: return IntLogUniformDistribution(value.start, value.end) return LogUniformDistribution(value.start, value.end) else: if "int" in value.tags: return IntUniformDistribution(value.start, value.end) return UniformDistribution(value.start, value.end) raise NotImplementedError( "{} is not supported by Optuna sweeper.".format(override))
def search_space() -> Dict[str, BaseDistribution]: return { "c": CategoricalDistribution(("a", "b")), "d": DiscreteUniformDistribution(-1, 9, 2), "i": IntUniformDistribution(-1, 1), "ii": IntUniformDistribution(-1, 3, 2), "il": IntLogUniformDistribution(2, 16), "l": LogUniformDistribution(0.001, 0.1), "u": UniformDistribution(-2, 2), }
def test_relative_parameters(storage_mode: str) -> None: relative_search_space = { "x": UniformDistribution(low=5, high=6), "y": UniformDistribution(low=5, high=6), } relative_params = {"x": 5.5, "y": 5.5, "z": 5.5} sampler = DeterministicRelativeSampler(relative_search_space, relative_params) # type: ignore with StorageSupplier(storage_mode) as storage: study = create_study(storage=storage, sampler=sampler) def create_trial() -> Trial: return Trial(study, study._storage.create_new_trial(study._study_id)) # Suggested from `relative_params`. trial0 = create_trial() distribution0 = UniformDistribution(low=0, high=100) assert trial0._suggest("x", distribution0) == 5.5 # Not suggested from `relative_params` (due to unknown parameter name). trial1 = create_trial() distribution1 = distribution0 assert trial1._suggest("w", distribution1) != 5.5 # Not suggested from `relative_params` (due to incompatible value range). trial2 = create_trial() distribution2 = UniformDistribution(low=0, high=5) assert trial2._suggest("x", distribution2) != 5.5 # Error (due to incompatible distribution class). trial3 = create_trial() distribution3 = IntUniformDistribution(low=1, high=100) with pytest.raises(ValueError): trial3._suggest("y", distribution3) # Error ('z' is included in `relative_params` but not in `relative_search_space`). trial4 = create_trial() distribution4 = UniformDistribution(low=0, high=10) with pytest.raises(ValueError): trial4._suggest("z", distribution4) # Error (due to incompatible distribution class). trial5 = create_trial() distribution5 = IntLogUniformDistribution(low=1, high=100) with pytest.raises(ValueError): trial5._suggest("y", distribution5)
def test_not_contained_param() -> None: trial = create_trial( value=0.2, params={"x": 1.0}, distributions={"x": UniformDistribution(1.0, 10.0)}, ) with pytest.warns(UserWarning): assert trial.suggest_float("x", 10.0, 100.0) == 1.0 trial = create_trial( value=0.2, params={"x": 1.0}, distributions={"x": LogUniformDistribution(1.0, 10.0)}, ) with pytest.warns(UserWarning): assert trial.suggest_float("x", 10.0, 100.0, log=True) == 1.0 trial = create_trial( value=0.2, params={"x": 1.0}, distributions={"x": DiscreteUniformDistribution(1.0, 10.0, 1.0)}, ) with pytest.warns(UserWarning): assert trial.suggest_float("x", 10.0, 100.0, step=1.0) == 1.0 trial = create_trial( value=0.2, params={"x": 1.0}, distributions={"x": IntUniformDistribution(1, 10)}, ) with pytest.warns(UserWarning): assert trial.suggest_int("x", 10, 100) == 1 trial = create_trial( value=0.2, params={"x": 1}, distributions={"x": IntUniformDistribution(1, 10, 1)}, ) with pytest.warns(UserWarning): assert trial.suggest_int("x", 10, 100, 1) == 1 trial = create_trial( value=0.2, params={"x": 1}, distributions={"x": IntLogUniformDistribution(1, 10)}, ) with pytest.warns(UserWarning): assert trial.suggest_int("x", 10, 100, log=True) == 1
def create_optuna_distribution_from_override(override: Override) -> Any: value = override.value() if not override.is_sweep_override(): return value choices: List[CategoricalChoiceType] = [] if override.is_choice_sweep(): assert isinstance(value, ChoiceSweep) for x in override.sweep_iterator(transformer=Transformer.encode): assert isinstance( x, (str, int, float, bool) ), f"A choice sweep expects str, int, float, or bool type. Got {type(x)}." choices.append(x) return CategoricalDistribution(choices) if override.is_range_sweep(): assert isinstance(value, RangeSweep) assert value.start is not None assert value.stop is not None if value.shuffle: for x in override.sweep_iterator(transformer=Transformer.encode): assert isinstance( x, (str, int, float, bool) ), f"A choice sweep expects str, int, float, or bool type. Got {type(x)}." choices.append(x) return CategoricalDistribution(choices) return IntUniformDistribution(int(value.start), int(value.stop), step=int(value.step)) if override.is_interval_sweep(): assert isinstance(value, IntervalSweep) assert value.start is not None assert value.end is not None if "log" in value.tags: if isinstance(value.start, int) and isinstance(value.end, int): return IntLogUniformDistribution(int(value.start), int(value.end)) return LogUniformDistribution(value.start, value.end) else: if isinstance(value.start, int) and isinstance(value.end, int): return IntUniformDistribution(value.start, value.end) return UniformDistribution(value.start, value.end) raise NotImplementedError( f"{override} is not supported by Optuna sweeper.")
def restore_old_distribution(distribution_json: str) -> str: distribution = json_to_distribution(distribution_json) old_distribution: BaseDistribution # Float distributions. if isinstance(distribution, FloatDistribution): if distribution.log: old_distribution = LogUniformDistribution( low=distribution.low, high=distribution.high, ) else: if distribution.step is not None: old_distribution = DiscreteUniformDistribution( low=distribution.low, high=distribution.high, q=distribution.step, ) else: old_distribution = UniformDistribution( low=distribution.low, high=distribution.high, ) # Integer distributions. elif isinstance(distribution, IntDistribution): if distribution.log: old_distribution = IntLogUniformDistribution( low=distribution.low, high=distribution.high, step=distribution.step, ) else: old_distribution = IntUniformDistribution( low=distribution.low, high=distribution.high, step=distribution.step, ) # Categorical distribution. else: old_distribution = distribution return distribution_to_json(old_distribution)
def test_frozen_trial_suggest_int_log() -> None: trial = FrozenTrial( number=0, trial_id=0, state=TrialState.COMPLETE, value=0.2, datetime_start=datetime.datetime.now(), datetime_complete=datetime.datetime.now(), params={"x": 1}, distributions={"x": IntLogUniformDistribution(1, 10)}, user_attrs={}, system_attrs={}, intermediate_values={}, ) assert trial.suggest_int("x", 1, 10, log=True) == 1 with pytest.raises(ValueError): trial.suggest_int("x", 1, 10, step=2, log=True) with pytest.raises(ValueError): trial.suggest_int("y", 1, 10, log=True)
def test_distributions(storage_mode: str) -> None: def objective(trial: Trial) -> float: trial.suggest_float("a", 0, 10) trial.suggest_float("b", 0.1, 10, log=True) trial.suggest_float("c", 0, 10, step=1) trial.suggest_int("d", 0, 10) trial.suggest_categorical("e", ["foo", "bar", "baz"]) trial.suggest_int("f", 1, 10, log=True) return 1.0 with StorageSupplier(storage_mode) as storage: study = create_study(storage=storage) study.optimize(objective, n_trials=1) assert study.best_trial.distributions == { "a": UniformDistribution(low=0, high=10), "b": LogUniformDistribution(low=0.1, high=10), "c": DiscreteUniformDistribution(low=0, high=10, q=1), "d": IntUniformDistribution(low=0, high=10), "e": CategoricalDistribution(choices=("foo", "bar", "baz")), "f": IntLogUniformDistribution(low=1, high=10), }
def suggest_int(self, name: str, low: int, high: int, step: int = 1, log: bool = False) -> int: """Suggest a value for the integer parameter. The value is sampled from the integers in :math:`[\\mathsf{low}, \\mathsf{high}]`. Example: Suggest the number of trees in `RandomForestClassifier <https://scikit-learn.org/ stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_. .. testcode:: import numpy as np from sklearn.datasets import load_iris from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import optuna X, y = load_iris(return_X_y=True) X_train, X_valid, y_train, y_valid = train_test_split(X, y) def objective(trial): n_estimators = trial.suggest_int("n_estimators", 50, 400) clf = RandomForestClassifier(n_estimators=n_estimators, random_state=0) clf.fit(X_train, y_train) return clf.score(X_valid, y_valid) study = optuna.create_study(direction="maximize") study.optimize(objective, n_trials=3) Args: name: A parameter name. low: Lower endpoint of the range of suggested values. ``low`` is included in the range. high: Upper endpoint of the range of suggested values. ``high`` is included in the range. step: A step of discretization. .. note:: Note that :math:`\\mathsf{high}` is modified if the range is not divisible by :math:`\\mathsf{step}`. Please check the warning messages to find the changed values. .. note:: The method returns one of the values in the sequence :math:`\\mathsf{low}, \\mathsf{low} + \\mathsf{step}, \\mathsf{low} + 2 * \\mathsf{step}, \\dots, \\mathsf{low} + k * \\mathsf{step} \\le \\mathsf{high}`, where :math:`k` denotes an integer. .. note:: The ``step != 1`` and ``log`` arguments cannot be used at the same time. To set the ``step`` argument :math:`\\mathsf{step} \\ge 2`, set the ``log`` argument to :obj:`False`. log: A flag to sample the value from the log domain or not. .. note:: If ``log`` is true, at first, the range of suggested values is divided into grid points of width 1. The range of suggested values is then converted to a log domain, from which a value is sampled. The uniformly sampled value is re-converted to the original domain and rounded to the nearest grid point that we just split, and the suggested value is determined. For example, if `low = 2` and `high = 8`, then the range of suggested values is `[2, 3, 4, 5, 6, 7, 8]` and lower values tend to be more sampled than higher values. .. note:: The ``step != 1`` and ``log`` arguments cannot be used at the same time. To set the ``log`` argument to :obj:`True`, set the ``step`` argument to 1. Raises: :exc:`ValueError`: If ``step != 1`` and ``log = True`` are specified. .. seealso:: :ref:`configurations` tutorial describes more details and flexible usages. """ if step != 1: if log: raise ValueError( "The parameter `step != 1` is not supported when `log` is True." "The specified `step` is {}.".format(step) ) else: distribution: Union[ IntUniformDistribution, IntLogUniformDistribution ] = IntUniformDistribution(low=low, high=high, step=step) else: if log: distribution = IntLogUniformDistribution(low=low, high=high) else: distribution = IntUniformDistribution(low=low, high=high, step=step) self._check_distribution(name, distribution) return int(self._suggest(name, distribution))
def to_optuna(self): """returns an equivalent optuna space""" if self.prior != 'log': return IntUniformDistribution(low=self.low, high=self.high) else: return IntLogUniformDistribution(low=self.low, high=self.high)
def test_search_space_group() -> None: search_space_group = _SearchSpaceGroup() # No search space. assert search_space_group.search_spaces == [] # No distributions. search_space_group.add_distributions({}) assert search_space_group.search_spaces == [] # Add a single distribution. search_space_group.add_distributions( {"x": IntUniformDistribution(low=0, high=10)}) assert search_space_group.search_spaces == [{ "x": IntUniformDistribution(low=0, high=10) }] # Add a same single distribution. search_space_group.add_distributions( {"x": IntUniformDistribution(low=0, high=10)}) assert search_space_group.search_spaces == [{ "x": IntUniformDistribution(low=0, high=10) }] # Add disjoint distributions. search_space_group.add_distributions({ "y": IntUniformDistribution(low=0, high=10), "z": UniformDistribution(low=-3, high=3), }) assert search_space_group.search_spaces == [ { "x": IntUniformDistribution(low=0, high=10) }, { "y": IntUniformDistribution(low=0, high=10), "z": UniformDistribution(low=-3, high=3), }, ] # Add distributions, which include one of search spaces in the group. search_space_group.add_distributions({ "y": IntUniformDistribution(low=0, high=10), "z": UniformDistribution(low=-3, high=3), "u": LogUniformDistribution(low=1e-2, high=1e2), "v": CategoricalDistribution(choices=["A", "B", "C"]), }) assert search_space_group.search_spaces == [ { "x": IntUniformDistribution(low=0, high=10) }, { "y": IntUniformDistribution(low=0, high=10), "z": UniformDistribution(low=-3, high=3), }, { "u": LogUniformDistribution(low=1e-2, high=1e2), "v": CategoricalDistribution(choices=["A", "B", "C"]), }, ] # Add a distribution, which is included by one of search spaces in the group. search_space_group.add_distributions( {"u": LogUniformDistribution(low=1e-2, high=1e2)}) assert search_space_group.search_spaces == [ { "x": IntUniformDistribution(low=0, high=10) }, { "y": IntUniformDistribution(low=0, high=10), "z": UniformDistribution(low=-3, high=3), }, { "u": LogUniformDistribution(low=1e-2, high=1e2) }, { "v": CategoricalDistribution(choices=["A", "B", "C"]) }, ] # Add distributions whose intersection with one of search spaces in the group is not empty. search_space_group.add_distributions({ "y": IntUniformDistribution(low=0, high=10), "w": IntLogUniformDistribution(low=2, high=8), }) assert search_space_group.search_spaces == [ { "x": IntUniformDistribution(low=0, high=10) }, { "u": LogUniformDistribution(low=1e-2, high=1e2) }, { "v": CategoricalDistribution(choices=["A", "B", "C"]) }, { "y": IntUniformDistribution(low=0, high=10) }, { "z": UniformDistribution(low=-3, high=3) }, { "w": IntLogUniformDistribution(low=2, high=8) }, ] # Add distributions which include some of search spaces in the group. search_space_group.add_distributions({ "y": IntUniformDistribution(low=0, high=10), "w": IntLogUniformDistribution(low=2, high=8), "t": UniformDistribution(low=10, high=100), }) assert search_space_group.search_spaces == [ { "x": IntUniformDistribution(low=0, high=10) }, { "u": LogUniformDistribution(low=1e-2, high=1e2) }, { "v": CategoricalDistribution(choices=["A", "B", "C"]) }, { "y": IntUniformDistribution(low=0, high=10) }, { "z": UniformDistribution(low=-3, high=3) }, { "w": IntLogUniformDistribution(low=2, high=8) }, { "t": UniformDistribution(low=10, high=100) }, ]
}, IntUniformDistribution(0, 10, step=2), ), ({ "type": "int", "low": 0, "high": 5 }, IntUniformDistribution(0, 5)), ( { "type": "int", "low": 1, "high": 100, "log": True }, IntLogUniformDistribution(1, 100), ), ({ "type": "float", "low": 0, "high": 1 }, UniformDistribution(0, 1)), ( { "type": "float", "low": 0, "high": 10, "step": 2 }, DiscreteUniformDistribution(0, 10, 2), ),
# 'x' value is corresponding to an index of distribution.choices. assert np.all(points >= 0) assert np.all(points <= len(distribution.choices) - 1) round_points = np.round(points) np.testing.assert_almost_equal(round_points, points) @parametrize_relative_sampler @pytest.mark.parametrize( "x_distribution", [ UniformDistribution(-1.0, 1.0), LogUniformDistribution(1e-7, 1.0), DiscreteUniformDistribution(-10, 10, 0.5), IntUniformDistribution(1, 10), IntLogUniformDistribution(1, 100), ], ) @pytest.mark.parametrize( "y_distribution", [ UniformDistribution(-1.0, 1.0), LogUniformDistribution(1e-7, 1.0), DiscreteUniformDistribution(-10, 10, 0.5), IntUniformDistribution(1, 10), IntLogUniformDistribution(1, 100), ], ) def test_sample_relative_numerical( relative_sampler_class: Callable[[], BaseSampler], x_distribution: BaseDistribution,
def test_group_decomposed_search_space() -> None: search_space = _GroupDecomposedSearchSpace() study = create_study() # No trial. assert search_space.calculate(study).search_spaces == [] # A single parameter. study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=1) assert search_space.calculate(study).search_spaces == [{ "x": IntUniformDistribution(low=0, high=10) }] # Disjoint parameters. study.optimize( lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3), n_trials=1) assert search_space.calculate(study).search_spaces == [ { "x": IntUniformDistribution(low=0, high=10) }, { "y": IntUniformDistribution(low=0, high=10), "z": UniformDistribution(low=-3, high=3), }, ] # Parameters which include one of search spaces in the group. study.optimize( lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3) + t. suggest_float("u", 1e-2, 1e2, log=True) + bool( t.suggest_categorical("v", ["A", "B", "C"])), n_trials=1, ) assert search_space.calculate(study).search_spaces == [ { "x": IntUniformDistribution(low=0, high=10) }, { "y": IntUniformDistribution(low=0, high=10), "z": UniformDistribution(low=-3, high=3), }, { "u": LogUniformDistribution(low=1e-2, high=1e2), "v": CategoricalDistribution(choices=["A", "B", "C"]), }, ] # A parameter which is included by one of search spaces in thew group. study.optimize(lambda t: t.suggest_float("u", 1e-2, 1e2, log=True), n_trials=1) assert search_space.calculate(study).search_spaces == [ { "x": IntUniformDistribution(low=0, high=10) }, { "y": IntUniformDistribution(low=0, high=10), "z": UniformDistribution(low=-3, high=3), }, { "u": LogUniformDistribution(low=1e-2, high=1e2) }, { "v": CategoricalDistribution(choices=["A", "B", "C"]) }, ] # Parameters whose intersection with one of search spaces in the group is not empty. study.optimize(lambda t: t.suggest_int("y", 0, 10) + t.suggest_int( "w", 2, 8, log=True), n_trials=1) assert search_space.calculate(study).search_spaces == [ { "v": CategoricalDistribution(choices=["A", "B", "C"]) }, { "x": IntUniformDistribution(low=0, high=10) }, { "u": LogUniformDistribution(low=1e-2, high=1e2) }, { "y": IntUniformDistribution(low=0, high=10) }, { "z": UniformDistribution(low=-3, high=3) }, { "w": IntLogUniformDistribution(low=2, high=8) }, ] search_space = _GroupDecomposedSearchSpace() study = create_study() # Failed or pruned trials are not considered in the calculation of # an intersection search space. def objective(trial: Trial, exception: Exception) -> float: trial.suggest_float("a", 0, 1) raise exception study.optimize(lambda t: objective(t, RuntimeError()), n_trials=1, catch=(RuntimeError, )) study.optimize(lambda t: objective(t, TrialPruned()), n_trials=1) assert search_space.calculate(study).search_spaces == [] # If two parameters have the same name but different distributions, # the first one takes priority. study.optimize(lambda t: t.suggest_float("a", -1, 1), n_trials=1) study.optimize(lambda t: t.suggest_float("a", 0, 1), n_trials=1) assert search_space.calculate(study).search_spaces == [{ "a": UniformDistribution(low=-1, high=1) }]
from optuna._transform import _SearchSpaceTransform from optuna.distributions import BaseDistribution from optuna.distributions import CategoricalDistribution from optuna.distributions import DiscreteUniformDistribution from optuna.distributions import IntLogUniformDistribution from optuna.distributions import IntUniformDistribution from optuna.distributions import LogUniformDistribution from optuna.distributions import UniformDistribution @pytest.mark.parametrize( "param,distribution", [ (0, IntUniformDistribution(0, 3)), (1, IntLogUniformDistribution(1, 10)), (2, IntUniformDistribution(0, 10, step=2)), (0.0, UniformDistribution(0, 3)), (1.0, LogUniformDistribution(1, 10)), (0.2, DiscreteUniformDistribution(0, 1, q=0.2)), ("foo", CategoricalDistribution(["foo"])), ("bar", CategoricalDistribution(["foo", "bar", "baz"])), ], ) def test_search_space_transform_shapes_dtypes(param: Any, distribution: BaseDistribution) -> None: trans = _SearchSpaceTransform({"x0": distribution}) trans_params = trans.transform({"x0": param}) if isinstance(distribution, CategoricalDistribution): expected_bounds_shape = (len(distribution.choices), 2) expected_params_shape = (len(distribution.choices),)
def suggest_int(self, name, low, high, step=1, log=False): # type: (str, int, int, int, bool) -> int """Suggest a value for the integer parameter. The value is sampled from the integers in :math:`[\\mathsf{low}, \\mathsf{high}]`, and the step of discretization is :math:`\\mathsf{step}`. More specifically, this method returns one of the values in the sequence :math:`\\mathsf{low}, \\mathsf{low} + \\mathsf{step}, \\mathsf{low} + 2 * \\mathsf{step}, \\dots, \\mathsf{low} + k * \\mathsf{step} \\le \\mathsf{high}`, where :math:`k` denotes an integer. Note that :math:`\\mathsf{high}` is modified if the range is not divisible by :math:`\\mathsf{step}`. Please check the warning messages to find the changed values. Example: Suggest the number of trees in `RandomForestClassifier <https://scikit-learn.org/ stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_. .. testcode:: import numpy as np from sklearn.datasets import load_iris from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import optuna X, y = load_iris(return_X_y=True) X_train, X_valid, y_train, y_valid = train_test_split(X, y) def objective(trial): n_estimators = trial.suggest_int('n_estimators', 50, 400) clf = RandomForestClassifier(n_estimators=n_estimators, random_state=0) clf.fit(X_train, y_train) return clf.score(X_valid, y_valid) study = optuna.create_study(direction='maximize') study.optimize(objective, n_trials=3) Args: name: A parameter name. low: Lower endpoint of the range of suggested values. ``low`` is included in the range. high: Upper endpoint of the range of suggested values. ``high`` is included in the range. step: A step of discretization. log: A flag to sample the value from the log domain or not. If ``log`` is true, at first, the range of suggested values is divided into grid points of width ``step``. The range of suggested values is then converted to a log domain, from which a value is uniformly sampled. The uniformly sampled value is re-converted to the original domain and rounded to the nearest grid point that we just split, and the suggested value is determined. For example, if `low = 2`, `high = 8` and `step = 2`, then the range of suggested values is divided by ``step`` as `[2, 4, 6, 8]` and lower values tend to be more sampled than higher values. """ if log: distribution = IntLogUniformDistribution( low=low, high=high, step=step ) # type: Union[IntUniformDistribution, IntLogUniformDistribution] else: distribution = IntUniformDistribution(low=low, high=high, step=step) self._check_distribution(name, distribution) if distribution.low == distribution.high: return self._set_new_param_or_get_existing(name, distribution.low, distribution) return int(self._suggest(name, distribution))