num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=REPORT_ROUNDS, ) lgb.plot_importance(model, grid=False, max_num_features=20, importance_type="gain") plt.show() TUNE_ETA = True best_etas = {"learning_rate": [], "score": []} if TUNE_ETA: for _ in range(120): eta = loguniform(-5, 1) best_etas["learning_rate"].append(eta) params["learning_rate"] = eta model = lgb.train( params, dt, valid_sets=[dt, dv], valid_names=["training", "valid"], num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=False, ) best_etas["score"].append(model.best_score["valid"][METRIC]) best_eta_df = pd.DataFrame.from_dict(best_etas) lowess_data = lowess(
params, dt, valid_sets=[dt, dv], valid_names=["training", "valid"], num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=REPORT_ROUNDS, ) lgb.plot_importance(model, grid=False, importance_type="gain") plt.show() best_etas = {"learning_rate": [], "score": []} for _ in range(200): eta = loguniform(-3, 0) best_etas["learning_rate"].append(eta) params["learning_rate"] = eta model = lgb.train( params, dt, valid_sets=[ds, dv], valid_names=["training", "valid"], num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=False, ) best_etas["score"].append(model.best_score["valid"][METRIC]) best_eta_df = pd.DataFrame.from_dict(best_etas) lowess_data = lowess(
params, dt, valid_sets=[dt, dv], valid_names=["training", "valid"], num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=REPORT_ROUNDS, ) lgb.plot_importance(model, grid=False, importance_type="gain") plt.show() best_etas = {"learning_rate": [], "score": []} for _ in range(30): eta = loguniform(-1, 0) best_etas["learning_rate"].append(eta) params["learning_rate"] = eta model = lgb.train( params, dt, valid_sets=[ds, dv], valid_names=["training", "valid"], num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=False, ) best_etas["score"].append(model.best_score["valid"][METRIC]) best_eta_df = pd.DataFrame.from_dict(best_etas) lowess_data = lowess(
valid_names=["training", "valid"], num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=REPORT_ROUNDS, ) lgb.plot_importance(model, grid=False, max_num_features=20, importance_type="gain") plt.show() best_etas = {"learning_rate": [], "score": []} for _ in range(60): eta = loguniform(-4, 0) best_etas["learning_rate"].append(eta) params["learning_rate"] = eta model = lgb.train( params, dt, valid_sets=[dt, dv], valid_names=["training", "valid"], num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=False, ) best_etas["score"].append(model.best_score["valid"][METRIC]) best_eta_df = pd.DataFrame.from_dict(best_etas) lowess_data = lowess(
num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=REPORT_ROUNDS, ) lgb.plot_importance(model, grid=False, max_num_features=20, importance_type="gain") plt.show() TUNE_ETA = True best_etas = {"learning_rate": [], "score": []} if TUNE_ETA: for _ in range(30): eta = loguniform(-6, 0) best_etas["learning_rate"].append(eta) params["learning_rate"] = eta model = lgb.train( params, dt, valid_sets=[dt, dv], valid_names=["training", "valid"], num_boost_round=MAX_ROUNDS, early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=False, ) best_etas["score"].append(model.best_score["valid"][METRIC]) best_eta_df = pd.DataFrame.from_dict(best_etas) lowess_data = lowess(