def test_compare_validation_metrics(self): # type: () -> None for metric in [ "auc", "ndcg", "lambdarank", "rank_xendcg", "xendcg", "xe_ndcg", "xe_ndcg_mart", "xendcg_mart", "map", "mean_average_precision", ]: tuner = BaseTuner(lgbm_params={"metric": metric}) assert tuner.compare_validation_metrics(0.5, 0.1) assert not tuner.compare_validation_metrics(0.5, 0.5) assert not tuner.compare_validation_metrics(0.1, 0.5) for metric in ["rmsle", "rmse", "binary_logloss"]: tuner = BaseTuner(lgbm_params={"metric": metric}) assert not tuner.compare_validation_metrics(0.5, 0.1) assert not tuner.compare_validation_metrics(0.5, 0.5) assert tuner.compare_validation_metrics(0.1, 0.5)
def test_compare_validation_metrics(self): # type: () -> None for metric in ['auc', 'accuracy']: tuner = BaseTuner(lgbm_params={'metric': metric}) assert tuner.compare_validation_metrics(0.5, 0.1) assert not tuner.compare_validation_metrics(0.5, 0.5) assert not tuner.compare_validation_metrics(0.1, 0.5) for metric in ['rmsle', 'rmse', 'binary_logloss']: tuner = BaseTuner(lgbm_params={'metric': metric}) assert not tuner.compare_validation_metrics(0.5, 0.1) assert not tuner.compare_validation_metrics(0.5, 0.5) assert tuner.compare_validation_metrics(0.1, 0.5)
def test_compare_validation_metrics(self): # type: () -> None for metric in [ 'auc', 'ndcg', 'lambdarank', 'rank_xendcg', 'xendcg', 'xe_ndcg', 'xe_ndcg_mart', 'xendcg_mart', 'map', 'mean_average_precision' ]: tuner = BaseTuner(lgbm_params={'metric': metric}) assert tuner.compare_validation_metrics(0.5, 0.1) assert not tuner.compare_validation_metrics(0.5, 0.5) assert not tuner.compare_validation_metrics(0.1, 0.5) for metric in ['rmsle', 'rmse', 'binary_logloss']: tuner = BaseTuner(lgbm_params={'metric': metric}) assert not tuner.compare_validation_metrics(0.5, 0.1) assert not tuner.compare_validation_metrics(0.5, 0.5) assert tuner.compare_validation_metrics(0.1, 0.5)