def evaluate_model_prediction(self, y_train_df, X_test_df, y_test_df, epoch=None): """ Evaluate ESRNN model against benchmark in y_test_df Parameters ---------- y_train_df: pandas dataframe panel with columns 'unique_id', 'ds', 'y' X_test_df: pandas dataframe panel with columns 'unique_id', 'ds', 'x' y_test_df: pandas dataframe panel with columns 'unique_id', 'ds', 'y' and a column y_hat_naive2 identifying benchmark predictions epoch: int the number of epoch to check early stopping results Returns ------- model_owa : float relative improvement of model with respect to benchmark, measured with the M4 overall weighted average. smape: float relative improvement of model with respect to benchmark, measured with the symmetric mean absolute percentage error. mase: float relative improvement of model with respect to benchmark, measured with the M4 mean absolute scaled error. """ assert self._fitted, "Model not fitted yet" y_panel = y_test_df.filter(['unique_id', 'ds', 'y']) y_naive2_panel = y_test_df.filter(['unique_id', 'ds', 'y_hat_naive2']) y_naive2_panel.rename(columns={'y_hat_naive2': 'y_hat'}, inplace=True) y_hat_panel = self.predict(X_test_df) y_insample = y_train_df.filter(['unique_id', 'ds', 'y']) model_owa, model_mase, model_smape = owa( y_panel, y_hat_panel, y_naive2_panel, y_insample, seasonality=self.mc.naive_seasonality) if self.min_owa > model_owa: self.min_owa = model_owa if epoch is not None: self.min_epoch = epoch print('OWA: {} '.format(np.round(model_owa, 3))) print('SMAPE: {} '.format(np.round(model_smape, 3))) print('MASE: {} '.format(np.round(model_mase, 3))) return model_owa, model_mase, model_smape
def evaluate_model_prediction(self, y_train_df, X_test_df, y_test_df, y_hat_benchmark='y_hat_naive2', epoch=None): """ Evaluate the model against baseline Naive2 model in y_test_df Args: y_train_df: pandas df panel with columns unique_id, ds, y X_test_df: pandas df panel with columns unique_id, ds, x y_test_df: pandas df panel with columns unique_id, ds, y and a column identifying benchmark predictions y_hat_benchmark: str columns name of benchmark predictions, default y_hat_naive2 """ assert self._fitted, "Model not fitted yet" y_panel = y_test_df.filter(['unique_id', 'ds', 'y']) y_benchmark_panel = y_test_df.filter( ['unique_id', 'ds', y_hat_benchmark]) y_benchmark_panel.rename(columns={y_hat_benchmark: 'y_hat'}, inplace=True) y_hat_panel = self.predict(X_test_df) y_insample = y_train_df.filter(['unique_id', 'ds', 'y']) model_owa, model_mase, model_smape = owa( y_panel, y_hat_panel, y_benchmark_panel, y_insample, seasonality=self.mc.naive_seasonality) if self.min_owa > model_owa: self.min_owa = model_owa if epoch is not None: self.min_epoch = epoch print('OWA: {} '.format(np.round(model_owa, 3))) print('SMAPE: {} '.format(np.round(model_smape, 3))) print('MASE: {} '.format(np.round(model_mase, 3))) return model_owa, model_mase, model_smape
def evaluate_model_prediction(self, y_train_df, X_test_df, y_test_df, epoch=None): """ y_train_df: pandas df panel with columns unique_id, ds, y X_test_df: pandas df panel with columns unique_id, ds, x y_test_df: pandas df panel with columns unique_id, ds, y, y_hat_naive2 model: python class python class with predict method """ assert self._fitted, "Model not fitted yet" y_panel = y_test_df.filter(['unique_id', 'ds', 'y']) y_naive2_panel = y_test_df.filter(['unique_id', 'ds', 'y_hat_naive2']) y_naive2_panel.rename(columns={'y_hat_naive2': 'y_hat'}, inplace=True) y_hat_panel = self.predict(X_test_df) y_insample = y_train_df.filter(['unique_id', 'ds', 'y']) model_owa, model_mase, model_smape = owa( y_panel, y_hat_panel, y_naive2_panel, y_insample, seasonality=self.mc.naive_seasonality) if self.min_owa > model_owa: self.min_owa = model_owa if epoch is not None: self.min_epoch = epoch print('OWA: {} '.format(np.round(model_owa, 3))) print('SMAPE: {} '.format(np.round(model_smape, 3))) print('MASE: {} '.format(np.round(model_mase, 3))) return model_owa, model_mase, model_smape