def test_study_optimize_with_single_search_space(): # type: () -> None def objective(trial): # type: (Trial) -> float a = trial.suggest_int("a", 0, 100) b = trial.suggest_uniform("b", -0.1, 0.1) c = trial.suggest_categorical("c", ("x", "y")) d = trial.suggest_discrete_uniform("d", -5, 5, 1) e = trial.suggest_loguniform("e", 0.0001, 1) if c == "x": return a * d else: return b * e # Test that all combinations of the grid is sampled. search_space = { "b": np.arange(-0.1, 0.1, 0.05), "c": ["x", "y"], "d": [-0.5, 0.5], "e": [0.1], "a": list(range(0, 100, 20)), } n_grids = _n_grids(search_space) study = optuna.create_study(sampler=samplers.GridSampler(search_space)) study.optimize(objective, n_trials=n_grids) def sorted_values(d): # type: (Mapping[str, Sequence[GridValueType]]) -> ValuesView[Sequence[GridValueType]] return OrderedDict(sorted(d.items())).values() all_grids = itertools.product(*sorted_values(search_space)) all_suggested_values = [ tuple([p for p in sorted_values(t.params)]) for t in study.trials ] assert set(all_grids) == set(all_suggested_values) ids = sorted([t.system_attrs["grid_id"] for t in study.trials]) assert ids == list(range(n_grids)) # Test a non-existing parameter name in the grid. search_space = {"a": list(range(0, 100, 20))} study = optuna.create_study(sampler=samplers.GridSampler(search_space)) with pytest.raises(ValueError): study.optimize(objective) # Test a value with out of range. search_space = { "a": [110], # 110 is out of range specified by the suggest method. "b": [0], "c": ["x"], "d": [0], "e": [0.1], } study = optuna.create_study(sampler=samplers.GridSampler(search_space)) with pytest.raises(ValueError): study.optimize(objective)
def test_study_optimize_with_single_search_space() -> None: def objective(trial: Trial) -> float: a = trial.suggest_int("a", 0, 100) b = trial.suggest_float("b", -0.1, 0.1) c = trial.suggest_categorical("c", ("x", "y")) d = trial.suggest_float("d", -5, 5, step=1) e = trial.suggest_float("e", 0.0001, 1, log=True) if c == "x": return a * d else: return b * e # Test that all combinations of the grid is sampled. search_space = { "b": np.arange(-0.1, 0.1, 0.05), "c": ["x", "y"], "d": [-0.5, 0.5], "e": [0.1], "a": list(range(0, 100, 20)), } study = optuna.create_study( sampler=samplers.GridSampler(search_space)) # type: ignore study.optimize(objective) def sorted_values( d: Mapping[str, Sequence[GridValueType]] ) -> ValuesView[Sequence[GridValueType]]: return OrderedDict(sorted(d.items())).values() all_grids = itertools.product(*sorted_values(search_space)) # type: ignore all_suggested_values = [ tuple([p for p in sorted_values(t.params)]) for t in study.trials ] assert set(all_grids) == set(all_suggested_values) # Test a non-existing parameter name in the grid. search_space = {"a": list(range(0, 100, 20))} study = optuna.create_study( sampler=samplers.GridSampler(search_space)) # type: ignore with pytest.raises(ValueError): study.optimize(objective) # Test a value with out of range. search_space = { "a": [110], # 110 is out of range specified by the suggest method. "b": [0], "c": ["x"], "d": [0], "e": [0.1], } study = optuna.create_study( sampler=samplers.GridSampler(search_space)) # type: ignore with pytest.warns(UserWarning): study.optimize(objective)
def test_study_optimize_with_multiple_search_spaces(): # type: () -> None def objective(trial): # type: (Trial) -> float a = trial.suggest_int("a", 0, 100) b = trial.suggest_uniform("b", -100, 100) return a * b # Run 3 trials with a search space. search_space_0 = { "a": [0, 50], "b": [-50, 0, 50] } # type: Dict[str, List[GridValueType]] sampler_0 = samplers.GridSampler(search_space_0) study = optuna.create_study(sampler=sampler_0) study.optimize(objective, n_trials=3) assert len(study.trials) == 3 for t in study.trials: assert sampler_0._same_search_space(t.system_attrs["search_space"]) # Run 2 trials with another space. search_space_1 = { "a": [0, 25], "b": [-50] } # type: Dict[str, List[GridValueType]] sampler_1 = samplers.GridSampler(search_space_1) study.sampler = sampler_1 study.optimize(objective, n_trials=2) assert not sampler_0._same_search_space(sampler_1._search_space) assert len(study.trials) == 5 for t in study.trials[:3]: assert sampler_0._same_search_space(t.system_attrs["search_space"]) for t in study.trials[3:5]: assert sampler_1._same_search_space(t.system_attrs["search_space"]) # Run 3 trials with the first search space again. study.sampler = sampler_0 study.optimize(objective, n_trials=3) assert len(study.trials) == 8 for t in study.trials[:3]: assert sampler_0._same_search_space(t.system_attrs["search_space"]) for t in study.trials[3:5]: assert sampler_1._same_search_space(t.system_attrs["search_space"]) for t in study.trials[5:]: assert sampler_0._same_search_space(t.system_attrs["search_space"])
def test_same_seed_trials() -> None: grid_values = [0, 20, 40, 60, 80, 100] seed = 0 sampler1 = samplers.GridSampler({"a": grid_values}, seed) study1 = optuna.create_study(sampler=sampler1) study1.optimize(lambda trial: trial.suggest_int("a", 0, 100)) sampler2 = samplers.GridSampler({"a": grid_values}, seed) study2 = optuna.create_study(sampler=sampler2) study2.optimize(lambda trial: trial.suggest_int("a", 0, 100)) for i in range(len(grid_values)): assert study1.trials[i].params["a"] == study2.trials[i].params["a"]
def test_enqueued_insufficient_trial() -> None: sampler = samplers.GridSampler({"a": [0, 50]}) study = optuna.create_study(sampler=sampler) study.enqueue_trial({}) with pytest.raises(ValueError): study.optimize(lambda trial: trial.suggest_int("a", 0, 100))
def test_study_optimize_with_pruning() -> None: def objective(trial: Trial) -> float: raise optuna.TrialPruned # Pruned trials should count towards grid consumption. search_space: Dict[str, List[GridValueType]] = {"a": [0, 50]} study = optuna.create_study(sampler=samplers.GridSampler(search_space)) study.optimize(objective, n_trials=None) assert len(study.trials) == 2
def test_has_same_search_space() -> None: search_space: Dict[str, List[Union[int, str]]] = {"x": [3, 2, 1], "y": ["a", "b", "c"]} sampler = samplers.GridSampler(search_space) assert sampler._same_search_space(search_space) assert sampler._same_search_space({"x": np.array([3, 2, 1]), "y": ["a", "b", "c"]}) assert sampler._same_search_space({"y": ["c", "a", "b"], "x": [1, 2, 3]}) assert not sampler._same_search_space({"x": [3, 2, 1, 0], "y": ["a", "b", "c"]}) assert not sampler._same_search_space({"x": [3, 2], "y": ["a", "b", "c"]})
def test_enqueued_trial() -> None: sampler = samplers.GridSampler({"a": [0, 50]}) study = optuna.create_study(sampler=sampler) study.enqueue_trial({"a": 100}) study.optimize(lambda trial: trial.suggest_int("a", 0, 100)) assert len(study.trials) == 3 assert study.trials[0].params["a"] == 100 assert sorted([study.trials[1].params["a"], study.trials[2].params["a"]]) == [0, 50]
def test_has_same_search_space(): # type: () -> None search_space = {'x': [3, 2, 1], 'y': ['a', 'b', 'c']} # type: Dict[str, List[GridValueType]] sampler = samplers.GridSampler(search_space) assert sampler._same_search_space(search_space) assert sampler._same_search_space({'x': np.array([3, 2, 1]), 'y': ['a', 'b', 'c']}) assert sampler._same_search_space({'y': ['c', 'a', 'b'], 'x': [1, 2, 3]}) assert not sampler._same_search_space({'x': [3, 2, 1, 0], 'y': ['a', 'b', 'c']}) assert not sampler._same_search_space({'x': [3, 2], 'y': ['a', 'b', 'c']})
def test_study_optimize_with_exceeding_number_of_trials() -> None: def objective(trial: Trial) -> float: return trial.suggest_int("a", 0, 100) # When `n_trials` is `None`, the optimization stops just after all grids are evaluated. search_space: Dict[str, List[GridValueType]] = {"a": [0, 50]} study = optuna.create_study(sampler=samplers.GridSampler(search_space)) study.optimize(objective, n_trials=None) assert len(study.trials) == 2 # If the optimization is triggered after all grids are evaluated, an additional trial runs. study.optimize(objective, n_trials=None) assert len(study.trials) == 3
def test_retried_trial() -> None: sampler = samplers.GridSampler({"a": [0, 50]}) study = optuna.create_study(sampler=sampler) trial = study.ask() trial.suggest_int("a", 0, 100) callback = RetryFailedTrialCallback() callback(study, study.trials[0]) study.optimize(lambda trial: trial.suggest_int("a", 0, 100)) assert len(study.trials) == 3 assert study.trials[0].params["a"] == study.trials[1].params["a"] assert study.trials[0].system_attrs["grid_id"] == study.trials[1].system_attrs["grid_id"]
def test_has_same_search_space(): # type: () -> None search_space = { "x": [3, 2, 1], "y": ["a", "b", "c"] } # type: Dict[str, List[GridValueType]] sampler = samplers.GridSampler(search_space) assert sampler._same_search_space(search_space) assert sampler._same_search_space({ "x": np.array([3, 2, 1]), "y": ["a", "b", "c"] }) assert sampler._same_search_space({"y": ["c", "a", "b"], "x": [1, 2, 3]}) assert not sampler._same_search_space({ "x": [3, 2, 1, 0], "y": ["a", "b", "c"] }) assert not sampler._same_search_space({"x": [3, 2], "y": ["a", "b", "c"]})
def test_study_optimize_with_single_search_space(): # type: () -> None def objective(trial): # type: (Trial) -> float a = trial.suggest_int('a', 0, 100) b = trial.suggest_uniform('b', -0.1, 0.1) c = trial.suggest_categorical('c', ('x', 'y')) d = trial.suggest_discrete_uniform('d', -5, 5, 1) e = trial.suggest_loguniform('e', 0.0001, 1) if c == 'x': return a * d else: return b * e # Test that all combinations of the grid is sampled. search_space = { 'b': np.arange(-0.1, 0.1, 0.05), 'c': ['x', 'y'], 'd': [-0.5, 0.5], 'e': [0.1], 'a': list(range(0, 100, 20)), } n_grids = _n_grids(search_space) study = optuna.create_study(sampler=samplers.GridSampler(search_space)) study.optimize(objective, n_trials=n_grids) def sorted_values(d): # type: (Dict[str, List[GridValueType]]) -> ValuesView[List[GridValueType]] return OrderedDict(sorted(d.items())).values() all_grids = itertools.product(*sorted_values(search_space)) all_suggested_values = [tuple([p for p in sorted_values(t.params)]) for t in study.trials] assert set(all_grids) == set(all_suggested_values) ids = sorted([t.system_attrs['grid_id'] for t in study.trials]) assert ids == list(range(n_grids)) # Test that an optimization fails if the number of trials is more than that of all grids. with pytest.raises(ValueError): study.optimize(objective, n_trials=1) # Test a non-existing parameter name in the grid. search_space = {'a': list(range(0, 100, 20))} study = optuna.create_study(sampler=samplers.GridSampler(search_space)) with pytest.raises(ValueError): study.optimize(objective) # Test a value with out of range. search_space = { 'a': [110], # 110 is out of range specified by the suggest method. 'b': [0], 'c': ['x'], 'd': [0], 'e': [0.1] } study = optuna.create_study(sampler=samplers.GridSampler(search_space)) with pytest.raises(ValueError): study.optimize(objective)
def test_reseed_rng() -> None: sampler = samplers.GridSampler({"a": [0, 100]}) original_seed = sampler._rng.get_state() sampler.reseed_rng() assert str(original_seed) != str(sampler._rng.get_state())