Esempio n. 1
0
    def test_get_best_booster(self) -> None:
        unexpected_value = 20  # out of scope.

        params = {"verbose": -1, "lambda_l1": unexpected_value}  # type: Dict
        dataset = lgb.Dataset(np.zeros((10, 10)))
        study = optuna.create_study()

        with TemporaryDirectory() as tmpdir:
            tuner = LightGBMTunerCV(params,
                                    dataset,
                                    study=study,
                                    model_dir=tmpdir,
                                    return_cvbooster=True)

            with pytest.raises(ValueError):
                tuner.get_best_booster()

            with mock.patch.object(_OptunaObjectiveCV,
                                   "_get_cv_scores",
                                   return_value=[1.0]):
                tuner.tune_regularization_factors()

            best_boosters = tuner.get_best_booster().boosters
            for booster in best_boosters:
                assert booster.params["lambda_l1"] != unexpected_value

            tuner2 = LightGBMTunerCV(params,
                                     dataset,
                                     study=study,
                                     model_dir=tmpdir,
                                     return_cvbooster=True)
            best_boosters2 = tuner2.get_best_booster().boosters
            for booster, booster2 in zip(best_boosters, best_boosters2):
                assert booster.params == booster2.params
Esempio n. 2
0
    def test_optuna_callback(self) -> None:
        params: Dict[str, Any] = {"verbose": -1}
        dataset = lgb.Dataset(np.zeros((10, 10)))

        callback_mock = mock.MagicMock()

        study = optuna.create_study()
        tuner = LightGBMTunerCV(params, dataset, study=study, optuna_callbacks=[callback_mock])

        with mock.patch.object(_OptunaObjectiveCV, "_get_cv_scores", return_value=[1.0]):
            tuner._tune_params(["num_leaves"], 10, optuna.samplers.TPESampler(), "num_leaves")

        assert callback_mock.call_count == 10
Esempio n. 3
0
    def test_run_show_progress_bar(self, show_progress_bar: bool, expected: int) -> None:
        params: Dict = {"verbose": -1}
        dataset = lgb.Dataset(np.zeros((10, 10)))

        study = optuna.create_study()
        tuner = LightGBMTunerCV(
            params, dataset, study=study, time_budget=1, show_progress_bar=show_progress_bar
        )

        with mock.patch.object(
            _OptunaObjectiveCV, "_get_cv_scores", return_value=[1.0]
        ), mock.patch("tqdm.tqdm") as mock_tqdm:
            tuner.run()

        assert mock_tqdm.call_count == expected
Esempio n. 4
0
    def test_resume_run(self) -> None:
        params: Dict = {"verbose": -1}
        dataset = lgb.Dataset(np.zeros((10, 10)))

        study = optuna.create_study()
        tuner = LightGBMTunerCV(params, dataset, study=study)

        with mock.patch.object(_OptunaObjectiveCV, "_get_cv_scores", return_value=[1.0]):
            tuner.tune_regularization_factors()

        n_trials = len(study.trials)
        assert n_trials == len(study.trials)

        tuner2 = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)
        with mock.patch.object(_OptunaObjectiveCV, "_get_cv_scores", return_value=[1.0]):
            tuner2.tune_regularization_factors()
        assert n_trials == len(study.trials)
Esempio n. 5
0
    def test_model_dir(self, dir_exists: bool, expected: bool) -> None:
        unexpected_value = 20  # out of scope.

        params: Dict = {"verbose": -1, "lambda_l1": unexpected_value}
        dataset = lgb.Dataset(np.zeros((10, 10)))

        with mock.patch("os.mkdir") as m:
            with mock.patch("os.path.exists", return_value=dir_exists):
                LightGBMTunerCV(params, dataset, model_dir="./booster")
                assert m.called == expected
Esempio n. 6
0
    def test_run_verbosity(self, verbosity: int, level: int) -> None:
        # We need to reconstruct our default handler to properly capture stderr.
        optuna.logging._reset_library_root_logger()
        optuna.logging.set_verbosity(optuna.logging.INFO)

        params: Dict = {"verbose": -1}
        dataset = lgb.Dataset(np.zeros((10, 10)))

        study = optuna.create_study()
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=FutureWarning)
            tuner = LightGBMTunerCV(
                params, dataset, study=study, verbosity=verbosity, time_budget=1
            )

        with mock.patch.object(_OptunaObjectiveCV, "_get_cv_scores", return_value=[1.0]):
            tuner.run()

        assert optuna.logging.get_verbosity() == level
        assert tuner.lgbm_params["verbose"] == -1
Esempio n. 7
0
    def test_inconsistent_study_direction(self, metric: str, study_direction: str) -> None:

        params: Dict[str, Any] = {}
        if metric is not None:
            params["metric"] = metric
        train_set = lgb.Dataset(None)
        study = optuna.create_study(direction=study_direction)
        with pytest.raises(ValueError) as excinfo:
            LightGBMTunerCV(
                params, train_set, num_boost_round=5, early_stopping_rounds=2, study=study
            )

        assert excinfo.type == ValueError
        assert str(excinfo.value).startswith("Study direction is inconsistent with the metric")
Esempio n. 8
0
    def _get_tunercv_object(
        self,
        params: Dict[str, Any] = {},
        train_set: Optional[lgb.Dataset] = None,
        kwargs_options: Dict[str, Any] = {},
        study: Optional[optuna.study.Study] = None,
    ) -> LightGBMTunerCV:

        # Required keyword arguments.
        kwargs: Dict[str, Any] = dict(num_boost_round=5, early_stopping_rounds=2, study=study)
        kwargs.update(kwargs_options)

        runner = LightGBMTunerCV(params, train_set, **kwargs)
        return runner
Esempio n. 9
0
    def test_deprecated_args(self) -> None:
        dummy_dataset = lgb.Dataset(None)

        with pytest.warns(FutureWarning):
            LightGBMTunerCV({}, dummy_dataset, verbosity=1)
Esempio n. 10
0
    def test_get_best_booster_with_error(self) -> None:
        params = {"verbose": -1}  # type: Dict
        dataset = lgb.Dataset(np.zeros((10, 10)))
        study = optuna.create_study()

        tuner = LightGBMTunerCV(params,
                                dataset,
                                study=study,
                                model_dir=None,
                                return_cvbooster=True)
        # No trial is completed yet.
        with pytest.raises(ValueError):
            tuner.get_best_booster()

        with mock.patch.object(_OptunaObjectiveCV,
                               "_get_cv_scores",
                               return_value=[1.0]):
            tuner.tune_regularization_factors()

        tuner2 = LightGBMTunerCV(params,
                                 dataset,
                                 study=study,
                                 model_dir=None,
                                 return_cvbooster=True)
        # Resumed the study does not have the best booster.
        with pytest.raises(ValueError):
            tuner2.get_best_booster()

        with TemporaryDirectory() as tmpdir:
            tuner3 = LightGBMTunerCV(params,
                                     dataset,
                                     study=study,
                                     model_dir=tmpdir,
                                     return_cvbooster=True)
            # The booster was not saved hence not found in the `model_dir`.
            with pytest.raises(ValueError):
                tuner3.get_best_booster()