def test_check_distribution_suggest_float(storage_init_func): # type: (Callable[[], storages.BaseStorage]) -> None sampler = samplers.RandomSampler() study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) x1 = trial.suggest_float("x1", 10, 20) x2 = trial.suggest_uniform("x1", 10, 20) assert x1 == x2 x3 = trial.suggest_float("x2", 1e-5, 1e-3, log=True) x4 = trial.suggest_loguniform("x2", 1e-5, 1e-3) assert x3 == x4 x5 = trial.suggest_float("x3", 10, 20, step=1.0) x6 = trial.suggest_discrete_uniform("x3", 10, 20, 1.0) assert x5 == x6 with pytest.raises(ValueError): trial.suggest_float("x4", 1e-5, 1e-2, step=1e-5, log=True) with pytest.raises(ValueError): trial.suggest_int("x1", 10, 20) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.raises(ValueError): trial.suggest_int("x1", 10, 20)
def test_check_distribution_suggest_float(storage_mode: str) -> None: sampler = samplers.RandomSampler() with StorageSupplier(storage_mode) as storage: study = create_study(storage=storage, sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) x1 = trial.suggest_float("x1", 10, 20) x2 = trial.suggest_uniform("x1", 10, 20) assert x1 == x2 x3 = trial.suggest_float("x2", 1e-5, 1e-3, log=True) x4 = trial.suggest_loguniform("x2", 1e-5, 1e-3) assert x3 == x4 x5 = trial.suggest_float("x3", 10, 20, step=1.0) x6 = trial.suggest_discrete_uniform("x3", 10, 20, 1.0) assert x5 == x6 with pytest.raises(ValueError): trial.suggest_float("x4", 1e-5, 1e-2, step=1e-5, log=True) with pytest.raises(ValueError): trial.suggest_int("x1", 10, 20) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.raises(ValueError): trial.suggest_int("x1", 10, 20)
def _get_params(self, trial: trial_module.Trial) -> Dict[str, Any]: params = self.params.copy() # type: Dict[str, Any] if self.param_distributions is None: params["feature_fraction"] = trial.suggest_discrete_uniform( "feature_fraction", 0.1, 1.0, 0.05) params["max_depth"] = trial.suggest_int("max_depth", 1, 7) params["num_leaves"] = trial.suggest_int("num_leaves", 2, 2**params["max_depth"]) # See https://github.com/Microsoft/LightGBM/issues/907 params["min_data_in_leaf"] = trial.suggest_int( "min_data_in_leaf", 1, max(1, int(self.n_samples / params["num_leaves"])), ) params["lambda_l1"] = trial.suggest_loguniform( "lambda_l1", 1e-09, 10.0) params["lambda_l2"] = trial.suggest_loguniform( "lambda_l2", 1e-09, 10.0) if params["boosting_type"] != "goss": params["bagging_fraction"] = trial.suggest_discrete_uniform( "bagging_fraction", 0.5, 0.95, 0.05) params["bagging_freq"] = trial.suggest_int( "bagging_freq", 1, 10) return params for name, distribution in self.param_distributions.items(): params[name] = trial._suggest(name, distribution) return params
def objective(trial: Trial): cat_train_dataset = Pool(_train_df[features], _train_df["likes_log"], cat_features=cat_features) cat_valid_dataset = Pool(_valid_df[features], _valid_df["likes_log"], cat_features=cat_features) params = { "depth": trial.suggest_int("depth", 4, 30), "num_leaves": trial.suggest_int("num_leaves", 16, 300), "min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 4, 50), "learning_rate": trial.suggest_loguniform("learning_rate", 0.001, 0.3), "reg_lambda": trial.suggest_loguniform("reg_lambda", 0.01, 0.5), } cat_model = CatBoostRegressor(**params, iterations=3500, grow_policy="Lossguide") cat_model.fit( cat_train_dataset, verbose_eval=100, eval_set=[cat_valid_dataset], early_stopping_rounds=200, ) y_pred_cat = np.expm1(cat_model.predict(_valid_df[features])) y_pred_cat[y_pred_cat < 0] = 0 y_true = _valid_df["likes"].values rmsle = np.sqrt(mean_squared_log_error(y_true, y_pred_cat)) return rmsle
def test_suggest_int_range( storage_init_func: Callable[[], storages.BaseStorage], range_config: Dict[str, int] ) -> None: sampler = samplers.RandomSampler() # Check upper endpoints. mock = Mock() mock.side_effect = lambda study, trial, param_name, distribution: distribution.high with patch.object(sampler, "sample_independent", mock) as mock_object: study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.warns(UserWarning): x = trial.suggest_int( "x", range_config["low"], range_config["high"], step=range_config["step"] ) assert x == range_config["mod_high"] assert mock_object.call_count == 1 # Check lower endpoints. mock = Mock() mock.side_effect = lambda study, trial, param_name, distribution: distribution.low with patch.object(sampler, "sample_independent", mock) as mock_object: study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.warns(UserWarning): x = trial.suggest_int( "x", range_config["low"], range_config["high"], step=range_config["step"] ) assert x == range_config["low"] assert mock_object.call_count == 1
def test_suggest_int_log(storage_init_func: Callable[[], storages.BaseStorage]) -> None: mock = Mock() mock.side_effect = [1, 2] sampler = samplers.RandomSampler() study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) distribution = IntLogUniformDistribution(low=1, high=3) with patch.object(sampler, "sample_independent", mock) as mock_object: assert trial._suggest("x", distribution) == 1 # Test suggesting a param. assert trial._suggest("x", distribution) == 1 # Test suggesting the same param. assert trial._suggest("y", distribution) == 2 # Test suggesting a different param. assert trial.params == {"x": 1, "y": 2} assert mock_object.call_count == 2 study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with warnings.catch_warnings(): # UserWarning will be raised since [0.5, 10] is not divisible by 1. warnings.simplefilter("ignore", category=UserWarning) with pytest.raises(ValueError): trial.suggest_int("z", 0.5, 10, log=True) # type: ignore study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.raises(ValueError): trial.suggest_int("w", 1, 3, step=2, log=True)
def objective(trial: Trial) -> float: ret: float = trial.suggest_int("x1", 0, 10) ret += trial.suggest_int("x2", 1, 10, log=True) ret += trial.suggest_float("x3", 0, 10) ret += trial.suggest_float("x4", 1, 10, log=True) ret += trial.suggest_float("x5", 1, 10, step=3) _ = trial.suggest_categorical("x6", [1, 4, 7, 10]) return ret
def objective(trial: Trial) -> float: a = trial.suggest_float("a", 1, 9) b = trial.suggest_float("b", 1, 9, log=True) c = trial.suggest_float("c", 1, 9, step=1) d = trial.suggest_int("d", 1, 9) e = trial.suggest_int("e", 1, 9, log=True) f = trial.suggest_int("f", 1, 9, step=2) g = cast(int, trial.suggest_categorical("g", range(1, 10))) return a + b + c + d + e + f + g
def objective(trial: Trial) -> float: x0 = trial.suggest_float("x0", 0, 1) x1 = trial.suggest_float("x1", 0.1, 1, log=True) x2 = trial.suggest_float("x2", 0, 1, step=0.1) x3 = trial.suggest_int("x3", 0, 2) x4 = trial.suggest_int("x4", 2, 4, log=True) x5 = trial.suggest_int("x5", 0, 4, step=2) x6 = cast(float, trial.suggest_categorical("x6", [0.1, 0.2, 0.3])) return x0 + x1 + x2 + x3 + x4 + x5 + x6
def test_suggest_low_equals_high(storage_mode: str) -> None: with patch.object( distributions, "_get_single_value", wraps=distributions._get_single_value ) as mock_object, StorageSupplier(storage_mode) as storage: study = create_study(storage=storage, sampler=samplers.TPESampler(n_startup_trials=0)) trial = Trial(study, study._storage.create_new_trial(study._study_id)) assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 1 assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 1 assert trial.suggest_loguniform("b", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 2 assert trial.suggest_loguniform("b", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 2 assert trial.suggest_discrete_uniform("c", 1.0, 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 3 assert ( trial.suggest_discrete_uniform("c", 1.0, 1.0, 1.0) == 1.0 ) # Suggesting the same param. assert mock_object.call_count == 3 assert trial.suggest_int("d", 1, 1) == 1 # Suggesting a param. assert mock_object.call_count == 4 assert trial.suggest_int("d", 1, 1) == 1 # Suggesting the same param. assert mock_object.call_count == 4 assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting a param. assert mock_object.call_count == 5 assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 5 assert trial.suggest_float("f", 0.5, 0.5, log=True) == 0.5 # Suggesting a param. assert mock_object.call_count == 6 assert trial.suggest_float("f", 0.5, 0.5, log=True) == 0.5 # Suggesting the same param. assert mock_object.call_count == 6 assert trial.suggest_float("g", 0.5, 0.5, log=False) == 0.5 # Suggesting a param. assert mock_object.call_count == 7 assert trial.suggest_float("g", 0.5, 0.5, log=False) == 0.5 # Suggesting the same param. assert mock_object.call_count == 7 assert trial.suggest_float("h", 0.5, 0.5, step=1.0) == 0.5 # Suggesting a param. assert mock_object.call_count == 8 assert trial.suggest_float("h", 0.5, 0.5, step=1.0) == 0.5 # Suggesting the same param. assert mock_object.call_count == 8 assert trial.suggest_int("i", 1, 1, log=True) == 1 # Suggesting a param. assert mock_object.call_count == 9 assert trial.suggest_int("i", 1, 1, log=True) == 1 # Suggesting the same param. assert mock_object.call_count == 9
def objective(trial: Trial) -> float: trial.suggest_uniform("a", 0, 10) trial.suggest_loguniform("b", 0.1, 10) trial.suggest_discrete_uniform("c", 0, 10, 1) trial.suggest_int("d", 0, 10) trial.suggest_categorical("e", ["foo", "bar", "baz"]) trial.suggest_int("f", 1, 10, log=True) return 1.0
def objective(trial: Trial) -> float: trial.suggest_float("a", 0, 10) trial.suggest_float("b", 0.1, 10, log=True) trial.suggest_float("c", 0, 10, step=1) trial.suggest_int("d", 0, 10) trial.suggest_categorical("e", ["foo", "bar", "baz"]) trial.suggest_int("f", 1, 10, log=True) return 1.0
def generate_model_class_try_params(self, trial: Trial): params = { 'weights': trial.suggest_categorical('weights', ['distance', 'uniform']), 'p': trial.suggest_uniform('p', 1, 4), 'n_neighbors': int(trial.suggest_int('n_neighbors', 5, 30)), 'algorithm': trial.suggest_categorical('algorithm', ['ball_tree', 'kd_tree']) } if 'tree' in params.get('algorithm', None): params['leaf_size'] = int(trial.suggest_int('leaf_size', 10, 200)) return params
def sample_params_values(self, trial: Trial, suggested_params: Dict, estimated_n_trials: int) -> Dict: """Sample hyperparameters from suggested. Args: trial: Optuna trial object. suggested_params: Dict with parameters. estimated_n_trials: Maximum number of hyperparameter estimation. Returns: Dict with sampled hyperparameters. """ trial_values = copy(suggested_params) try: nan_rate = getattr(self, '_nan_rate') except AttributeError: nan_rate = 0 trial_values['max_depth'] = trial.suggest_int(name='max_depth', low=3, high=7) if nan_rate > 0: trial_values['nan_mode'] = trial.suggest_categorical( name='nan_mode', choices=['Max', 'Min']) if estimated_n_trials > 20: trial_values['l2_leaf_reg'] = trial.suggest_loguniform( name='l2_leaf_reg', low=1e-8, high=10.0, ) # trial_values['bagging_temperature'] = trial.suggest_loguniform( # name='bagging_temperature', # low=0.01, # high=10.0, # ) if estimated_n_trials > 50: trial_values['min_data_in_leaf'] = trial.suggest_int( name='min_data_in_leaf', low=1, high=20) # the only case when used this parameter is when categorical columns more than 0 if len(self._le_cat_features) > 0: trial_values['one_hot_max_size'] = trial.suggest_int( name='one_hot_max_size', low=3, high=10) return trial_values
def objective(trial: Trial) -> Tuple[float, float]: p0 = trial.suggest_float("p0", -10, 10) p1 = trial.suggest_float("p1", 3, 5) p2 = trial.suggest_float("p2", 0.00001, 0.1, log=True) p3 = trial.suggest_float("p3", 100, 200, step=5) p4 = trial.suggest_int("p4", -20, -15) p5 = cast(int, trial.suggest_categorical("p5", [7, 1, 100])) p6 = trial.suggest_float("p6", -10, 10, step=1.0) p7 = trial.suggest_int("p7", 1, 7, log=True) return ( p0 + p1 + p2, p3 + p4 + p5 + p6 + p7, )
def __init__(self, cfg: EEGLearnerConfig, trial: Trial): watch_hidden_size = trial.suggest_int('watch_hidden_size', 1, 15) reg_hidden_size = trial.suggest_int('reg_hidden_size', 1, 15) embedding_size = trial.suggest_int('embedding_size', 0, 15) lr = trial.suggest_loguniform('lr', 1e-5, 1e-1) super().__init__(cfg.net.watch_len, cfg.net.reg_len, watch_hidden_size, reg_hidden_size, embedding_size, lr, n_subjects=cfg.net.n_subjects)
def test_check_distribution_suggest_int(storage_init_func): # type: (typing.Callable[[], storages.BaseStorage]) -> None sampler = samplers.RandomSampler() study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.warns(None) as record: trial.suggest_int("x", 10, 20) trial.suggest_int("x", 10, 20) trial.suggest_int("x", 10, 22) # we expect exactly one warning assert len(record) == 1
def test_suggest_int(storage_mode: str) -> None: sampler = DeterministicSampler({"x": 1, "y": 2}) with StorageSupplier(storage_mode) as storage: study = create_study(storage=storage, sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) assert trial.suggest_int("x", 0, 3) == 1 # Test suggesting a param. assert trial.suggest_int("x", 0, 3) == 1 # Test suggesting the same param. assert trial.suggest_int("y", 0, 3) == 2 # Test suggesting a different param. assert trial.params == {"x": 1, "y": 2}
def function_optimize_optuna(trial : Trial): train_it= 600 stack_layers = trial.suggest_int('stack_layers', 1, 3) #2 max_pool_layers = trial.suggest_categorical('max_pool_layers', [True, False]) if max_pool_layers: max_pool_layers = stack_layers else: max_pool_layers = 0 st_filter = int(trial.suggest_discrete_uniform('st_filter', 30, 50, 10)) #40 inc_filter = int(trial.suggest_discrete_uniform('inc_filter', 30, 50, 10)) #50 extra_porc = trial.suggest_int('extra_porc', 1, 4) # 2 input_factor = trial.suggest_categorical('input_factor', [0.5,0.25]) # 0.5 lr = trial.suggest_categorical('learning_rate', [0.001,0.0001]) #loss_string_options = ['cross_entropy','mse'] #loss_string = trial.suggest_categorical('loss_string', loss_string_options) loss_string = 'cross_entropy' replace_max_pool_with_stride = trial.suggest_categorical('replace_max_pool_with_stride', [True,False]) exp_params = { 'tf_config' : tf.ConfigProto(allow_soft_placement = True), 'max_pool_layers' : max_pool_layers, 'stack_layers' : stack_layers, 'input_factor' : input_factor, 'extra_porc' : extra_porc, 'lr' : lr, 'st_filter' : st_filter, 'inc_filter' : inc_filter, 'loss_string' : loss_string, 'replace_max_pool_with_stride' : replace_max_pool_with_stride } print("PARAMS : {0}".format(exp_params)) out_dict,out_folder = train_run(train_it, save_model=True, interactive_plot=False, **exp_params) # save params metric = float(out_dict['global_F1']) trial.set_user_attr('out_path',out_folder) for k in out_dict: if k != 'global_F1': trial.set_user_attr(k,float(out_dict[k])) return metric
def test_check_distribution_suggest_int( storage_init_func: Callable[[], storages.BaseStorage], enable_log: bool ) -> None: sampler = samplers.RandomSampler() study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.warns(None) as record: trial.suggest_int("x", 10, 20, log=enable_log) trial.suggest_int("x", 10, 20, log=enable_log) trial.suggest_int("x", 10, 22, log=enable_log) # We expect exactly one warning. assert len(record) == 1
def __init__(self, trial: Trial): watch_hidden_size = trial.suggest_int('watch_hidden_size', 1, 15) reg_hidden_size = trial.suggest_int('reg_hidden_size', 1, 15) embedding_size = trial.suggest_int('embedding_size', 0, 15) lr = trial.suggest_loguniform('lr', 1e-5, 1e-1) super().__init__( self.watch_len, self.reg_len, watch_hidden_size, reg_hidden_size, embedding_size, lr, n_subjects=self.n_subjects )
def test_suggest_low_equals_high(storage_init_func): # type: (Callable[[], storages.BaseStorage]) -> None study = create_study(storage_init_func(), sampler=samplers.TPESampler(n_startup_trials=0)) trial = Trial(study, study._storage.create_new_trial(study._study_id)) # Parameter values are determined without suggestion when low == high. with patch.object(trial, "_suggest", wraps=trial._suggest) as mock_object: assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting a param. assert trial.suggest_uniform("a", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_loguniform("b", 1.0, 1.0) == 1.0 # Suggesting a param. assert trial.suggest_loguniform("b", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_discrete_uniform("c", 1.0, 1.0, 1.0) == 1.0 # Suggesting a param. assert ( trial.suggest_discrete_uniform("c", 1.0, 1.0, 1.0) == 1.0 ) # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_int("d", 1, 1) == 1 # Suggesting a param. assert trial.suggest_int("d", 1, 1) == 1 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting a param. assert trial.suggest_float("e", 1.0, 1.0) == 1.0 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_float("f", 0.5, 0.5, log=True) == 0.5 # Suggesting a param. assert trial.suggest_float("f", 0.5, 0.5, log=True) == 0.5 # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_float("g", 0.5, 0.5, log=False) == 0.5 # Suggesting a param. assert trial.suggest_float("g", 0.5, 0.5, log=False) == 0.5 # Suggesting the same param. assert mock_object.call_count == 0
def objective(trial: Trial): args = gen_args() # acquisition hyperparam's args.cluster = bool(trial.suggest_int('cluster', 0, 1)) if not args.cluster and random() > 0.5: args.epsilon = trial.suggest_float('epsilon', 0.00, 0.2, step=0.05) args.fps = None if args.model in {'rf', 'nn'} or args.cluster: args.encoder = trial.suggest_categorical('encoder', {'morgan', 'pair', 'rdkit'}) try: exp = Explorer(**vars(args)) except (IncompatibilityError, NotImplementedError) as e: print(e) return float('-inf') start = time() exp.run() total = time() - start m, s = divmod(total, 60) h, m = divmod(int(m), 60) print(f'Total time for trial #{trial.number}: {h}h {m}m {s:0.2f}s\n') return exp.top_k_avg
def objective(trial: Trial) -> float: x1 = trial.suggest_float("x1", 0.1, 3) x2 = trial.suggest_float("x2", 0.1, 3, log=True) x3 = trial.suggest_float("x3", 0, 3, step=1) x4 = trial.suggest_int("x4", -3, 3) x5 = trial.suggest_int("x5", 1, 5, log=True) x6 = trial.suggest_categorical("x6", [1.0, 1.1, 1.2]) if trial.number % 2 == 0: # Conditional parameters are ignored unless `params` is specified and is not `None`. x7 = trial.suggest_float("x7", 0.1, 3) assert isinstance(x6, float) value = x1**4 + x2 + x3 - x4**2 - x5 + x6 if trial.number % 2 == 0: value += x7 return value
def test_suggest_low_equals_high(storage_init_func): # type: (typing.Callable[[], storages.BaseStorage]) -> None study = create_study(storage_init_func(), sampler=samplers.TPESampler(n_startup_trials=0)) trial = Trial(study, study._storage.create_new_trial(study.study_id)) # Parameter values are determined without suggestion when low == high. with patch.object(trial, '_suggest', wraps=trial._suggest) as mock_object: assert trial.suggest_uniform('a', 1., 1.) == 1. # Suggesting a param. assert trial.suggest_uniform('a', 1., 1.) == 1. # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_loguniform('b', 1., 1.) == 1. # Suggesting a param. assert trial.suggest_loguniform('b', 1., 1.) == 1. # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_discrete_uniform('c', 1., 1., 1.) == 1. # Suggesting a param. assert trial.suggest_discrete_uniform( 'c', 1., 1., 1.) == 1. # Suggesting the same param. assert mock_object.call_count == 0 assert trial.suggest_int('d', 1, 1) == 1 # Suggesting a param. assert trial.suggest_int('d', 1, 1) == 1 # Suggesting the same param. assert mock_object.call_count == 0
def suggest_from_config(trial: Trial, configuration: str): """ Suggest From Config =================== Allows to generalize the `suggest_*` functions taking information from a YAML configuration file. Parameters ---------- trial : optuna.trial.Trial `Trial` object deriving from an Ask-and-Tell interface. configuration : str YAML file containing hyperparameters configuration. Returns ------- None """ with open(configuration) as file: params = yaml.full_load(file) for par in params.values(): if par['type'] == 'categorical': trial.suggest_categorical( name=str(par['name']), choices=list(par['choices']), ) elif par['type'] == 'float': trial.suggest_float( name=str(par['name']), low=float(par['low']), high=float(par['high']), step=float(par['step']) if par['step'] else None, log=bool(par['log']) if par['log'] else False, ) elif par['type'] == 'int': trial.suggest_int( name=str(par['name']), low=float(par['low']), high=float(par['high']), step=float(par['step']) if par['step'] else 1, log=bool(par['log']) if par['log'] else False, ) else: raise ValueError('Trial suggestion not implemented.')
def test_suggest_int_log_invalid_range(storage_mode: str) -> None: sampler = samplers.RandomSampler() with StorageSupplier(storage_mode) as storage: study = create_study(storage=storage, sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with warnings.catch_warnings(): # UserWarning will be raised since [0.5, 10] is not divisible by 1. warnings.simplefilter("ignore", category=UserWarning) with pytest.raises(ValueError): trial.suggest_int("z", 0.5, 10, log=True) # type: ignore with StorageSupplier(storage_mode) as storage: study = create_study(storage=storage, sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.raises(ValueError): trial.suggest_int("w", 1, 3, step=2, log=True)
def test_check_distribution_suggest_categorical( storage_init_func: Callable[[], storages.BaseStorage]) -> None: sampler = samplers.RandomSampler() study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) trial.suggest_categorical("x", [10, 20, 30]) with pytest.raises(ValueError): trial.suggest_categorical("x", [10, 20]) with pytest.raises(ValueError): trial.suggest_int("x", 10, 20) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.raises(ValueError): trial.suggest_int("x", 10, 20)
def test_check_distribution_suggest_categorical(storage_mode: str) -> None: sampler = samplers.RandomSampler() with StorageSupplier(storage_mode) as storage: study = create_study(storage=storage, sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) trial.suggest_categorical("x", [10, 20, 30]) with pytest.raises(ValueError): trial.suggest_categorical("x", [10, 20]) with pytest.raises(ValueError): trial.suggest_int("x", 10, 20) trial = Trial(study, study._storage.create_new_trial(study._study_id)) with pytest.raises(ValueError): trial.suggest_int("x", 10, 20)
def sample_params_values(self, trial: Trial, suggested_params: Dict, estimated_n_trials: int) -> Dict: """Sample hyperparameters from suggested. Args: trial: optuna trial object. suggested_params: dict with parameters. estimated_n_trials: maximum number of hyperparameter estimations. Returns: dict with sampled hyperparameters. """ logger.debug('Suggested parameters:') logger.debug(suggested_params) trial_values = copy(suggested_params) trial_values['feature_fraction'] = trial.suggest_uniform( name='feature_fraction', low=0.5, high=1.0, ) trial_values['num_leaves'] = trial.suggest_int( name='num_leaves', low=16, high=255, ) if estimated_n_trials > 30: trial_values['bagging_fraction'] = trial.suggest_uniform( name='bagging_fraction', low=0.5, high=1.0, ) trial_values['min_sum_hessian_in_leaf'] = trial.suggest_loguniform( name='min_sum_hessian_in_leaf', low=1e-3, high=10.0, ) if estimated_n_trials > 100: trial_values['reg_alpha'] = trial.suggest_loguniform( name='reg_alpha', low=1e-8, high=10.0, ) trial_values['reg_lambda'] = trial.suggest_loguniform( name='reg_lambda', low=1e-8, high=10.0, ) return trial_values