Esempio n. 1
0
 def fit(
     self,
     dataset: DatasetH,
     num_boost_round=None,
     early_stopping_rounds=None,
     verbose_eval=20,
     evals_result=None,
     reweighter=None,
     **kwargs,
 ):
     if evals_result is None:
         evals_result = {}  # in case of unsafety of Python default values
     ds_l = self._prepare_data(dataset, reweighter)
     ds, names = list(zip(*ds_l))
     self.model = lgb.train(
         self.params,
         ds[0],  # training dataset
         num_boost_round=self.num_boost_round
         if num_boost_round is None else num_boost_round,
         valid_sets=ds,
         valid_names=names,
         early_stopping_rounds=(self.early_stopping_rounds
                                if early_stopping_rounds is None else
                                early_stopping_rounds),
         verbose_eval=verbose_eval,
         evals_result=evals_result,
         **kwargs,
     )
     for k in names:
         for key, val in evals_result[k].items():
             name = f"{key}.{k}"
             for epoch, m in enumerate(val):
                 R.log_metrics(**{name.replace("@", "_"): m}, step=epoch)
Esempio n. 2
0
 def fit(
     self,
     dataset: DatasetH,
     num_boost_round=None,
     early_stopping_rounds=None,
     verbose_eval=20,
     evals_result=None,
     reweighter=None,
     **kwargs,
 ):
     if evals_result is None:
         evals_result = {}  # in case of unsafety of Python default values
     ds_l = self._prepare_data(dataset, reweighter)
     ds, names = list(zip(*ds_l))
     early_stopping_callback = lgb.early_stopping(
         self.early_stopping_rounds
         if early_stopping_rounds is None else early_stopping_rounds)
     # NOTE: if you encounter error here. Please upgrade your lightgbm
     verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
     evals_result_callback = lgb.record_evaluation(evals_result)
     self.model = lgb.train(
         self.params,
         ds[0],  # training dataset
         num_boost_round=self.num_boost_round
         if num_boost_round is None else num_boost_round,
         valid_sets=ds,
         valid_names=names,
         callbacks=[
             early_stopping_callback, verbose_eval_callback,
             evals_result_callback
         ],
         **kwargs,
     )
     for k in names:
         for key, val in evals_result[k].items():
             name = f"{key}.{k}"
             for epoch, m in enumerate(val):
                 R.log_metrics(**{name.replace("@", "_"): m}, step=epoch)