def debug_logger(): log.info("testing: Logger") log.setLevel("ERROR") log.parent.setLevel("WARNING") log.warning("### this WARNING should not show ###") log.parent.warning("this WARNING should show") log.error("this ERROR should show") log.setLevel("DEBUG") log.parent.setLevel("ERROR") log.debug("this DEBUG should show") log.parent.warning("### this WARNING not show ###") log.error("this ERROR should show") log.parent.error("this ERROR should show, too") # test existing test cases # test_all(log_level="DEBUG") # test the set_log_level function log.parent.parent.setLevel("INFO") m = NeuralProphet( n_forecasts=3, n_lags=5, yearly_seasonality=False, weekly_seasonality=False, daily_seasonality=False, log_level="DEBUG", epochs=5, ) log.parent.parent.debug("this DEBUG should show") m.set_log_level(log_level="WARNING") log.parent.parent.debug("### this DEBUG should not show ###") log.parent.parent.info("### this INFO should not show ###")
def nprophet_fit_and_predict_simple( y: [float], k: int, freq: str = None, model_params: dict = None) -> Tuple[List, List, Any, Any]: """ Simpler wrapper for testing - univariate only """ assert isinstance(y[0], float) freq = freq or NPROPHET_META['freq'] used_params = NPROPHET_MODEL used_params.update({'n_forecasts': k}) if model_params: used_params.update(model_params) if len(y) < used_params['n_lags']: x = [wrap(y)[0]] * k x_std = [1.0] * k return x, x_std, None, None else: model = NeuralProphet(**used_params) model.set_log_level(log_level='CRITICAL') df = pd.DataFrame(columns=['y'], data=y) df['ds'] = pd.date_range(start='2021-01-01', periods=len(y), freq=freq) metrics = model.fit(df, freq=freq, epochs=40, use_tqdm=False) future = model.make_future_dataframe(df) forecast = model.predict(future) x = [ forecast['yhat' + str(j + 1)].values[-k + j] for j in range(k) ] x_std = [1.0] * k return x, x_std, forecast, model