Beispiel #1
0
 def test_train_eval_test(self):
     log.info("testing: Train Eval Test")
     m = NeuralProphet(
         n_lags=10,
         n_forecasts=3,
         ar_sparsity=0.1,
         epochs=3,
         batch_size=32,
     )
     df = pd.read_csv(PEYTON_FILE, nrows=95)
     df = df_utils.check_dataframe(df, check_y=False)
     df = m._handle_missing_data(df, freq="D", predicting=False)
     df_train, df_test = m.split_df(df,
                                    freq="D",
                                    valid_p=0.1,
                                    inputs_overbleed=True)
     metrics = m.fit(df_train,
                     freq="D",
                     validate_each_epoch=True,
                     valid_p=0.1)
     metrics = m.fit(df_train, freq="D")
     val_metrics = m.test(df_test)
     log.debug("Metrics: train/eval: \n {}".format(
         metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
     log.debug("Metrics: test: \n {}".format(
         val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
Beispiel #2
0
    def test_train_eval_test(self):
        log.info("testing: Train Eval Test")
        m = NeuralProphet(
            n_lags=14,
            n_forecasts=7,
            ar_sparsity=0.1,
            epochs=2,
        )
        df = pd.read_csv(PEYTON_FILE, nrows=512)
        df_train, df_test = m.split_df(df, valid_p=0.1, inputs_overbleed=True)

        metrics = m.fit(df_train, freq="D", validate_each_epoch=True, valid_p=0.1)
        val_metrics = m.test(df_test)
        log.debug("Metrics: train/eval: \n {}".format(metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
        log.debug("Metrics: test: \n {}".format(val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
def test_global_modeling_global_normalization():
    ### GLOBAL MODELLING - NO EXOGENOUS VARIABLES - GLOBAL NORMALIZATION
    log.info("Global Modeling + Global Normalization")
    df = pd.read_csv(PEYTON_FILE, nrows=512)
    df1_0 = df.iloc[:128, :].copy(deep=True)
    df2_0 = df.iloc[128:256, :].copy(deep=True)
    df3_0 = df.iloc[256:384, :].copy(deep=True)
    m = NeuralProphet(n_forecasts=2,
                      n_lags=10,
                      epochs=EPOCHS,
                      batch_size=BATCH_SIZE,
                      global_normalization=True)
    train_dict = {"df1": df1_0, "df2": df2_0}
    test_dict = {"df3": df3_0}
    m.fit(train_dict)
    future = m.make_future_dataframe(test_dict)
    forecast = m.predict(future)
    metrics = m.test(test_dict)
    forecast_trend = m.predict_trend(test_dict)
    forecast_seasonal_componets = m.predict_seasonal_components(test_dict)
def test_global_modeling_no_exogenous_variable():
    ### GLOBAL MODELLING - NO EXOGENOUS VARIABLE
    log.info("Global Modeling - No exogenous variables")
    df = pd.read_csv(PEYTON_FILE, nrows=512)
    df1_0 = df.iloc[:128, :].copy(deep=True)
    df2_0 = df.iloc[128:256, :].copy(deep=True)
    df3_0 = df.iloc[256:384, :].copy(deep=True)
    df4_0 = df.iloc[384:, :].copy(deep=True)
    train_input = {
        0: df1_0,
        1: {
            "df1": df1_0,
            "df2": df2_0
        },
        2: {
            "df1": df1_0,
            "df2": df2_0
        }
    }
    test_input = {0: df3_0, 1: {"df1": df3_0}, 2: {"df1": df3_0, "df2": df4_0}}
    info_input = {
        0: "Testing df train / df test - no events, no regressors",
        1: "Testing dict df train / df test - no events, no regressors",
        2: "Testing dict df train / dict df test - no events, no regressors",
    }
    for i in range(0, 3):
        log.info(info_input[i])
        m = NeuralProphet(n_forecasts=2,
                          n_lags=10,
                          epochs=EPOCHS,
                          batch_size=BATCH_SIZE)
        metrics = m.fit(train_input[i], freq="D")
        forecast = m.predict(df=test_input[i])
        forecast_trend = m.predict_trend(df=test_input[i])
        forecast_seasonal_componets = m.predict_seasonal_components(
            df=test_input[i])
        if PLOT:
            forecast = forecast if isinstance(forecast, list) else [forecast]
            for key in forecast:
                fig1 = m.plot(forecast[key])
                fig2 = m.plot(forecast[key])
    with pytest.raises(ValueError):
        forecast = m.predict({"df4": df4_0})
    log.info(
        "Error - dict with names not provided in the train dict (not in the data params dict)"
    )
    with pytest.raises(ValueError):
        metrics = m.test({"df4": df4_0})
    log.info(
        "Error - dict with names not provided in the train dict (not in the data params dict)"
    )
    m = NeuralProphet(
        n_forecasts=2,
        n_lags=10,
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
    )
    m.fit({"df1": df1_0, "df2": df2_0}, freq="D")
    with pytest.raises(ValueError):
        forecast = m.predict({"df4": df4_0})
    # log.info("unknown_data_normalization was not set to True")
    with pytest.raises(ValueError):
        metrics = m.test({"df4": df4_0})
    # log.info("unknown_data_normalization was not set to True")
    with pytest.raises(ValueError):
        forecast_trend = m.predict_trend({"df4": df4_0})
    # log.info("unknown_data_normalization was not set to True")
    with pytest.raises(ValueError):
        forecast_seasonal_componets = m.predict_seasonal_components(
            {"df4": df4_0})
    # log.info("unknown_data_normalization was not set to True")
    # Set unknown_data_normalization to True - now there should be no errors
    m.config_normalization.unknown_data_normalization = True
    forecast = m.predict({"df4": df4_0})
    metrics = m.test({"df4": df4_0})
    forecast_trend = m.predict_trend({"df4": df4_0})
    forecast_seasonal_componets = m.predict_seasonal_components({"df4": df4_0})
    m.plot_parameters(df_name="df1")
    m.plot_parameters()