def test_suggest_loguniform(storage_init_func): # type: (Callable[[], storages.BaseStorage]) -> None with pytest.raises(ValueError): LogUniformDistribution(low=1.0, high=0.9) with pytest.raises(ValueError): LogUniformDistribution(low=0.0, high=0.9) mock = Mock() mock.side_effect = [1.0, 2.0, 3.0] sampler = samplers.RandomSampler() with patch.object(sampler, "sample_independent", mock) as mock_object: study = create_study(storage_init_func(), sampler=sampler) trial = Trial(study, study._storage.create_new_trial(study._study_id)) distribution = LogUniformDistribution(low=0.1, high=4.0) assert trial._suggest("x", distribution) == 1.0 # Test suggesting a param. assert trial._suggest( "x", distribution) == 1.0 # Test suggesting the same param. assert trial._suggest( "y", distribution) == 3.0 # Test suggesting a different param. assert trial.params == {"x": 1.0, "y": 3.0} assert mock_object.call_count == 3
def _get_params(self, trial: trial_module.Trial) -> Dict[str, Any]: params = self.params.copy() # type: Dict[str, Any] if self.param_distributions is None: params["feature_fraction"] = trial.suggest_discrete_uniform( "feature_fraction", 0.1, 1.0, 0.05) params["max_depth"] = trial.suggest_int("max_depth", 1, 7) params["num_leaves"] = trial.suggest_int("num_leaves", 2, 2**params["max_depth"]) # See https://github.com/Microsoft/LightGBM/issues/907 params["min_data_in_leaf"] = trial.suggest_int( "min_data_in_leaf", 1, max(1, int(self.n_samples / params["num_leaves"])), ) params["lambda_l1"] = trial.suggest_loguniform( "lambda_l1", 1e-09, 10.0) params["lambda_l2"] = trial.suggest_loguniform( "lambda_l2", 1e-09, 10.0) if params["boosting_type"] != "goss": params["bagging_fraction"] = trial.suggest_discrete_uniform( "bagging_fraction", 0.5, 0.95, 0.05) params["bagging_freq"] = trial.suggest_int( "bagging_freq", 1, 10) return params for name, distribution in self.param_distributions.items(): params[name] = trial._suggest(name, distribution) return params
def _get_params(self, trial: trial_module.Trial) -> Dict[str, Any]: params = self.params.copy() # type: Dict[str, Any] if self.param_distributions is None: params["colsample_bylevel"] = trial.suggest_discrete_uniform( "colsample_bylevel", 0.1, 1.0, 0.05 ) params["max_depth"] = trial.suggest_int("max_depth", 1, 7) # https://catboost.ai/docs/concepts/parameter-tuning.html#tree-growing-policy # params["num_leaves"] = trial.suggest_int( # "num_leaves", 2, 2 ** params["max_depth"] # ) # See https://github.com/Microsoft/LightGBM/issues/907 params["num_leaves"] = 31 params["min_data_in_leaf"] = trial.suggest_int( "min_data_in_leaf", 1, max(1, int(self.n_samples / params["num_leaves"])), ) params["l2_leaf_reg"] = trial.suggest_loguniform("lambda_l2", 1e-09, 10.0) if params["bootstrap_type"] == "Bayesian": params["bagging_temperature"] = trial.suggest_discrete_uniform( "bagging_temperature", 0.5, 0.95, 0.05 ) elif ( params["bootstrap_type"] == "Bernoulli" or params["bootstrap_type"] == "Poisson" ): params["subsample"] = trial.suggest_uniform("subsample", 0.1, 1) return params for name, distribution in self.param_distributions.items(): params[name] = trial._suggest(name, distribution) return params