예제 #1
0
    def test_get_booster_best_score__using_valid_names_as_list(self) -> None:

        unexpected_value = 0.5
        expected_value = 1.0

        class DummyBooster(object):
            def __init__(self) -> None:

                self.best_score = {
                    "train": {"binary_logloss": unexpected_value},
                    "val": {"binary_logloss": expected_value},
                }

        booster = DummyBooster()
        dummy_train_dataset = lgb.Dataset(None)
        dummy_val_dataset = lgb.Dataset(None)

        tuner = _BaseTuner(
            lgbm_kwargs={
                "valid_names": ["train", "val"],
                "valid_sets": [dummy_train_dataset, dummy_val_dataset],
            }
        )
        val_score = tuner._get_booster_best_score(booster)
        assert val_score == expected_value
예제 #2
0
    def test_metric_with_eval_at(
        self, metric: str, eval_at_param: Dict[str, Union[int, List[int]]], expected: str
    ) -> None:

        params: Dict[str, Union[str, int, List[int]]] = {"metric": metric}
        params.update(eval_at_param)
        tuner = _BaseTuner(lgbm_params=params)
        assert tuner._metric_with_eval_at(metric) == expected
예제 #3
0
    def test_metric_with_eval_at(self, metric, eval_at_param, expected):
        # type: (str, Dict[str, Union[int, List[int]]], str) -> None

        params = {
            "metric": metric
        }  # type: Dict[str, Union[str, int, List[int]]]
        params.update(eval_at_param)
        tuner = _BaseTuner(lgbm_params=params)
        assert tuner._metric_with_eval_at(metric) == expected
예제 #4
0
    def test_higher_is_better(self) -> None:

        for metric in [
                "auc",
                "ndcg",
                "lambdarank",
                "rank_xendcg",
                "xendcg",
                "xe_ndcg",
                "xe_ndcg_mart",
                "xendcg_mart",
                "map",
                "mean_average_precision",
        ]:
            tuner = _BaseTuner(lgbm_params={"metric": metric})
            assert tuner.higher_is_better()

        for metric in ["rmsle", "rmse", "binary_logloss"]:
            tuner = _BaseTuner(lgbm_params={"metric": metric})
            assert not tuner.higher_is_better()
예제 #5
0
    def test_compare_validation_metrics(self) -> None:

        for metric in [
                "auc",
                "ndcg",
                "lambdarank",
                "rank_xendcg",
                "xendcg",
                "xe_ndcg",
                "xe_ndcg_mart",
                "xendcg_mart",
                "map",
                "mean_average_precision",
        ]:
            tuner = _BaseTuner(lgbm_params={"metric": metric})
            assert tuner.compare_validation_metrics(0.5, 0.1)
            assert not tuner.compare_validation_metrics(0.5, 0.5)
            assert not tuner.compare_validation_metrics(0.1, 0.5)

        for metric in ["rmsle", "rmse", "binary_logloss"]:
            tuner = _BaseTuner(lgbm_params={"metric": metric})
            assert not tuner.compare_validation_metrics(0.5, 0.1)
            assert not tuner.compare_validation_metrics(0.5, 0.5)
            assert tuner.compare_validation_metrics(0.1, 0.5)
예제 #6
0
    def test_get_booster_best_score__using_valid_names_as_str(self) -> None:

        expected_value = 1.0

        class DummyBooster(object):
            def __init__(self) -> None:

                self.best_score = {"dev": {"binary_logloss": expected_value}}

        booster = DummyBooster()
        dummy_dataset = lgb.Dataset(None)

        tuner = _BaseTuner(lgbm_kwargs={"valid_names": "dev", "valid_sets": dummy_dataset})
        val_score = tuner._get_booster_best_score(booster)
        assert val_score == expected_value
예제 #7
0
    def test_get_booster_best_score(self) -> None:

        expected_value = 1.0

        class DummyBooster(object):
            def __init__(self) -> None:

                self.best_score = {"valid_0": {"binary_logloss": expected_value}}

        booster = DummyBooster()
        dummy_dataset = lgb.Dataset(None)

        tuner = _BaseTuner(lgbm_kwargs=dict(valid_sets=dummy_dataset))
        val_score = tuner._get_booster_best_score(booster)
        assert val_score == expected_value
예제 #8
0
    def test_metric_with_eval_at_error(self) -> None:

        tuner = _BaseTuner(lgbm_params={"metric": "ndcg", "eval_at": "1"})
        with pytest.raises(ValueError):
            tuner._metric_with_eval_at("ndcg")