Пример #1
0
def neural_prophet(data):
    m = NeuralProphet()
    m.fit(data, freq='D', epochs=1000)
    future = m.make_future_dataframe(data, periods=7)
    forecast = m.predict(future)
    plot_neural_prophet(forecast, m)
    return forecast.iloc[:, 2].tolist()
def test_custom_torch_loss():
    log.info("TEST PyTorch Custom Loss")

    class MyLoss(torch.nn.modules.loss._Loss):
        def forward(self, input, target):
            alpha = 0.9
            y_diff = target - input
            yhat_diff = input - target
            loss = ((
                alpha * torch.max(y_diff, torch.zeros_like(y_diff)) +
                (1 - alpha) * torch.max(yhat_diff, torch.zeros_like(yhat_diff))
            ).sum().mean())
            return loss

    df = pd.read_csv(YOS_FILE, nrows=NROWS)
    m = NeuralProphet(loss_func=MyLoss, )
    with pytest.raises(ValueError):
        # find_learning_rate only suports normal torch Loss functions
        metrics = m.fit(df, freq="5min")

    df = pd.read_csv(YOS_FILE, nrows=NROWS)
    m = NeuralProphet(
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
        loss_func=MyLoss,
        learning_rate=1,  # bypasses find_learning_rate
    )
    metrics = m.fit(df, freq="5min")
    future = m.make_future_dataframe(df, periods=12, n_historic_predictions=12)
    forecast = m.predict(future)
Пример #3
0
 def test_random_seed(self):
     log.info("TEST random seed")
     df = pd.read_csv(PEYTON_FILE, nrows=512)
     set_random_seed(0)
     m = NeuralProphet(epochs=1)
     metrics_df = m.fit(df, freq="D")
     future = m.make_future_dataframe(df,
                                      periods=10,
                                      n_historic_predictions=10)
     forecast = m.predict(future)
     checksum1 = sum(forecast["yhat1"].values)
     set_random_seed(0)
     m = NeuralProphet(epochs=1)
     metrics_df = m.fit(df, freq="D")
     future = m.make_future_dataframe(df,
                                      periods=10,
                                      n_historic_predictions=10)
     forecast = m.predict(future)
     checksum2 = sum(forecast["yhat1"].values)
     set_random_seed(1)
     m = NeuralProphet(epochs=1)
     metrics_df = m.fit(df, freq="D")
     future = m.make_future_dataframe(df,
                                      periods=10,
                                      n_historic_predictions=10)
     forecast = m.predict(future)
     checksum3 = sum(forecast["yhat1"].values)
     log.debug("should be same: {} and {}".format(checksum1, checksum2))
     log.debug("should not be same: {} and {}".format(checksum1, checksum3))
     assert math.isclose(checksum1, checksum2)
     assert not math.isclose(checksum1, checksum3)
Пример #4
0
def neural_prophet(params: Dict[str, Union[bool, float, int]],
                   df: pd.DataFrame, freq: str) -> pd.DataFrame:
    model = NeuralProphet(**params)
    model.fit(df, freq=freq)
    future = model.make_future_dataframe(df, periods=144)
    forecast = model.predict(future)
    return forecast
Пример #5
0
 def test_train_eval_test(self):
     log.info("testing: Train Eval Test")
     m = NeuralProphet(
         n_lags=10,
         n_forecasts=3,
         ar_sparsity=0.1,
         epochs=3,
         batch_size=32,
     )
     df = pd.read_csv(PEYTON_FILE, nrows=95)
     df = df_utils.check_dataframe(df, check_y=False)
     df = m._handle_missing_data(df, freq="D", predicting=False)
     df_train, df_test = m.split_df(df,
                                    freq="D",
                                    valid_p=0.1,
                                    inputs_overbleed=True)
     metrics = m.fit(df_train,
                     freq="D",
                     validate_each_epoch=True,
                     valid_p=0.1)
     metrics = m.fit(df_train, freq="D")
     val_metrics = m.test(df_test)
     log.debug("Metrics: train/eval: \n {}".format(
         metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
     log.debug("Metrics: test: \n {}".format(
         val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
def test_callable_loss():
    log.info("TEST Callable Loss")

    def my_loss(output, target):
        assym_penalty = 1.25
        beta = 1
        e = target - output
        me = torch.abs(e)
        z = torch.where(me < beta, 0.5 * (me**2) / beta, me - 0.5 * beta)
        z = torch.where(e < 0, z, assym_penalty * z)
        return z

    df = pd.read_csv(YOS_FILE, nrows=NROWS)
    m = NeuralProphet(
        seasonality_mode="multiplicative",
        loss_func=my_loss,
    )
    with pytest.raises(ValueError):
        # find_learning_rate only suports normal torch Loss functions
        metrics = m.fit(df, freq="5min")

    df = pd.read_csv(YOS_FILE, nrows=NROWS)
    m = NeuralProphet(
        loss_func=my_loss,
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
        learning_rate=0.1,  # bypasses find_learning_rate
    )
    metrics = m.fit(df, freq="5min")
    future = m.make_future_dataframe(df,
                                     periods=12 * 24,
                                     n_historic_predictions=12 * 24)
    forecast = m.predict(future)
Пример #7
0
 def test_train_speed_custom(self):
     df = pd.read_csv(PEYTON_FILE, nrows=102)[:100]
     batch_size = 16
     epochs = 4
     learning_rate = 1.0
     check = {
         "-2": (int(batch_size / 4), int(epochs * 4), learning_rate / 4),
         "-1": (int(batch_size / 2), int(epochs * 2), learning_rate / 2),
         "0": (batch_size, epochs, learning_rate),
         "1":
         (int(batch_size * 2), max(1, int(epochs / 2)), learning_rate * 2),
         "2":
         (int(batch_size * 4), max(1, int(epochs / 4)), learning_rate * 4),
     }
     for train_speed in [-1, 0, 2]:
         m = NeuralProphet(
             learning_rate=learning_rate,
             batch_size=batch_size,
             epochs=epochs,
             train_speed=train_speed,
         )
         m.fit(df, freq="D")
         c = m.config_train
         log.debug(
             "train_speed: {}, batch: {}, epoch: {}, learning_rate: {}".
             format(train_speed, c.batch_size, c.epochs, c.learning_rate))
         batch, epoch, lr = check["{}".format(train_speed)]
         assert c.batch_size == batch
         assert c.epochs == epoch
         assert math.isclose(c.learning_rate, lr)
Пример #8
0
def test_check_duplicate_ds():
    # Check whether a ValueError is thrown in case there
    # are duplicate dates in the ds column of dataframe
    df = pd.read_csv(PEYTON_FILE, nrows=102)[:50]
    # introduce duplicates in dataframe
    df = pd.concat([df, df[8:9]]).reset_index()
    # Check if error thrown on duplicates
    m = NeuralProphet(
        n_lags=24,
        ar_sparsity=0.5,
    )
    with pytest.raises(ValueError):
        m.fit(df, freq="D")
Пример #9
0
def predictions(df: DataFrame) -> DataFrame:
    """
    Make predictions about
    :param df: Dataframe to make the predictions
    :return:
    """
    m = NeuralProphet()
    m.fit(df, freq='D')
    future = m.make_future_dataframe(df, periods=DAYS_OF_PREDICTION)
    forecast = m.predict(future)
    forecast['ds'] = pd.to_datetime(forecast['ds']).dt.strftime('%Y-%m-%d')
    forecast = forecast.set_index('ds')
    forecast.rename(columns={'yhat1': 'currency'}, inplace=True)
    forecast = forecast.round(2)
    return forecast
Пример #10
0
 def test_custom_changepoints(self):
     log.info("testing: Custom Changepoints")
     df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
     dates = df["ds"][range(1, len(df) - 1, int(len(df) / 5.0))]
     dates_list = [str(d) for d in dates]
     dates_array = pd.to_datetime(dates_list).values
     log.debug("dates: {}".format(dates))
     log.debug("dates_list: {}".format(dates_list))
     log.debug("dates_array: {} {}".format(dates_array.dtype, dates_array))
     for cp in [dates_list, dates_array]:
         m = NeuralProphet(
             changepoints=cp,
             yearly_seasonality=False,
             weekly_seasonality=False,
             daily_seasonality=False,
             epochs=EPOCHS,
             batch_size=BATCH_SIZE,
         )
         # print(m.config_trend)
         metrics_df = m.fit(df, freq="D")
         future = m.make_future_dataframe(df,
                                          periods=60,
                                          n_historic_predictions=60)
         forecast = m.predict(df=future)
         if self.plot:
             # m.plot(forecast)
             # m.plot_components(forecast)
             m.plot_parameters()
             plt.show()
Пример #11
0
    def test_lag_reg_deep(self):
        log.info("testing: List of Lagged Regressors (deep)")
        df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
        m = NeuralProphet(
            n_forecasts=1,
            n_lags=14,
            num_hidden_layers=2,
            d_hidden=32,
            weekly_seasonality=False,
            daily_seasonality=False,
            epochs=EPOCHS,
            batch_size=BATCH_SIZE,
        )
        df["A"] = df["y"].rolling(7, min_periods=1).mean()
        df["B"] = df["y"].rolling(15, min_periods=1).mean()
        df["C"] = df["y"].rolling(30, min_periods=1).mean()

        cols = [col for col in df.columns if col not in ["ds", "y"]]
        m = m.add_lagged_regressor(names=cols)

        m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
        metrics_df = m.fit(df, freq="D", validate_each_epoch=True)
        future = m.make_future_dataframe(df, n_historic_predictions=365)
        forecast = m.predict(future)

        if self.plot:
            # print(forecast.to_string())
            # m.plot_last_forecast(forecast, include_previous_forecasts=10)
            # m.plot(forecast)
            # m.plot_components(forecast)
            m.plot_parameters()
            plt.show()
Пример #12
0
    def test_custom_seasons(self):
        log.info("testing: Custom Seasonality")
        df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
        # m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
        other_seasons = False
        m = NeuralProphet(
            yearly_seasonality=other_seasons,
            weekly_seasonality=other_seasons,
            daily_seasonality=other_seasons,
            seasonality_mode="additive",
            # seasonality_mode="multiplicative",
            seasonality_reg=1,
            epochs=EPOCHS,
            batch_size=BATCH_SIZE,
        )
        m = m.add_seasonality(name="quarterly", period=90, fourier_order=5)
        log.debug("seasonalities: {}".format(m.season_config.periods))
        metrics_df = m.fit(df, freq="D", validate_each_epoch=True)
        future = m.make_future_dataframe(df,
                                         n_historic_predictions=365,
                                         periods=365)
        forecast = m.predict(df=future)
        log.debug("season params: {}".format(m.model.season_params.items()))

        if self.plot:
            m.plot(forecast)
            # m.plot_components(forecast)
            m.plot_parameters()
            plt.show()
Пример #13
0
    def test_future_reg(self):
        log.info("testing: Future Regressors")
        df = pd.read_csv(PEYTON_FILE)
        m = NeuralProphet(
            n_forecasts=1,
            n_lags=0,
            epochs=EPOCHS,
        )

        df["A"] = df["y"].rolling(7, min_periods=1).mean()
        df["B"] = df["y"].rolling(30, min_periods=1).mean()

        m = m.add_future_regressor(name="A", regularization=0.5)
        m = m.add_future_regressor(name="B",
                                   mode="multiplicative",
                                   regularization=0.3)

        metrics_df = m.fit(df, freq="D")
        regressors_df = pd.DataFrame(data={
            "A": df["A"][:50],
            "B": df["B"][:50]
        })
        future = m.make_future_dataframe(df=df,
                                         regressors_df=regressors_df,
                                         n_historic_predictions=10,
                                         periods=50)
        forecast = m.predict(df=future)

        if self.plot:
            # print(forecast.to_string())
            # m.plot_last_forecast(forecast, include_previous_forecasts=3)
            m.plot(forecast)
            m.plot_components(forecast)
            m.plot_parameters()
            plt.show()
Пример #14
0
    def test_future_reg(self):
        log.info("testing: Future Regressors")
        df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50)
        m = NeuralProphet(
            epochs=EPOCHS,
            batch_size=BATCH_SIZE,
        )

        df["A"] = df["y"].rolling(7, min_periods=1).mean()
        df["B"] = df["y"].rolling(30, min_periods=1).mean()
        regressors_df_future = pd.DataFrame(data={
            "A": df["A"][-50:],
            "B": df["B"][-50:]
        })
        df = df[:-50]
        m = m.add_future_regressor(name="A")
        m = m.add_future_regressor(name="B", mode="multiplicative")
        metrics_df = m.fit(df, freq="D")
        future = m.make_future_dataframe(df=df,
                                         regressors_df=regressors_df_future,
                                         n_historic_predictions=10,
                                         periods=50)
        forecast = m.predict(df=future)

        if self.plot:
            m.plot(forecast)
            m.plot_components(forecast)
            m.plot_parameters()
            plt.show()
Пример #15
0
 def test_ar_net(self):
     log.info("testing: AR-Net")
     df = pd.read_csv(PEYTON_FILE)
     m = NeuralProphet(
         n_forecasts=7,
         n_lags=14,
         # ar_sparsity=0.01,
         # num_hidden_layers=0,
         num_hidden_layers=2,
         d_hidden=64,
         # yearly_seasonality=False,
         # weekly_seasonality=False,
         # daily_seasonality=False,
         epochs=EPOCHS,
     )
     m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
     metrics_df = m.fit(df, freq="D", validate_each_epoch=True)
     future = m.make_future_dataframe(df,
                                      n_historic_predictions=len(df) -
                                      m.n_lags)
     forecast = m.predict(df=future)
     if self.plot:
         m.plot_last_forecast(forecast, include_previous_forecasts=3)
         m.plot(forecast)
         m.plot_components(forecast)
         m.plot_parameters()
         plt.show()
Пример #16
0
    def test_lag_reg(self):
        log.info("testing: Lagged Regressors")
        df = pd.read_csv(PEYTON_FILE)
        m = NeuralProphet(
            n_forecasts=3,
            n_lags=7,
            ar_sparsity=0.1,
            # num_hidden_layers=2,
            # d_hidden=64,
            # yearly_seasonality=False,
            # weekly_seasonality=False,
            # daily_seasonality=False,
            epochs=EPOCHS,
        )
        if m.n_lags > 0:
            df["A"] = df["y"].rolling(7, min_periods=1).mean()
            df["B"] = df["y"].rolling(30, min_periods=1).mean()
            m = m.add_lagged_regressor(name="A")
            m = m.add_lagged_regressor(name="B", only_last_value=True)

            # m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
        metrics_df = m.fit(df, freq="D", validate_each_epoch=True)
        future = m.make_future_dataframe(df, n_historic_predictions=365)
        forecast = m.predict(future)

        if self.plot:
            # print(forecast.to_string())
            m.plot_last_forecast(forecast, include_previous_forecasts=10)
            m.plot(forecast)
            m.plot_components(forecast)
            m.plot_parameters()
            plt.show()
    def nprophet_fit_and_predict_simple(
            y: [float],
            k: int,
            freq: str = None,
            model_params: dict = None) -> Tuple[List, List, Any, Any]:
        """ Simpler wrapper for testing - univariate only """
        assert isinstance(y[0], float)
        freq = freq or NPROPHET_META['freq']
        used_params = NPROPHET_MODEL
        used_params.update({'n_forecasts': k})
        if model_params:
            used_params.update(model_params)

        if len(y) < used_params['n_lags']:
            x = [wrap(y)[0]] * k
            x_std = [1.0] * k
            return x, x_std, None, None
        else:
            model = NeuralProphet(**used_params)
            model.set_log_level(log_level='CRITICAL')
            df = pd.DataFrame(columns=['y'], data=y)
            df['ds'] = pd.date_range(start='2021-01-01',
                                     periods=len(y),
                                     freq=freq)
            metrics = model.fit(df, freq=freq, epochs=40, use_tqdm=False)
            future = model.make_future_dataframe(df)
            forecast = model.predict(future)
            x = [
                forecast['yhat' + str(j + 1)].values[-k + j] for j in range(k)
            ]
            x_std = [1.0] * k
            return x, x_std, forecast, model
Пример #18
0
    def test_seasons(self):
        log.info("testing: Seasonality")
        df = pd.read_csv(PEYTON_FILE, nrows=512)
        # m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
        m = NeuralProphet(
            yearly_seasonality=8,
            weekly_seasonality=4,
            # daily_seasonality=False,
            seasonality_mode="additive",
            # seasonality_mode="multiplicative",
            seasonality_reg=1,
            epochs=EPOCHS,
        )
        metrics_df = m.fit(df, freq="D", validate_each_epoch=True)
        future = m.make_future_dataframe(df,
                                         n_historic_predictions=len(df),
                                         periods=365)
        forecast = m.predict(df=future)
        log.debug("SUM of yearly season params: {}".format(
            sum(abs(m.model.season_params["yearly"].data.numpy()))))
        log.debug("SUM of weekly season params: {}".format(
            sum(abs(m.model.season_params["weekly"].data.numpy()))))
        log.debug("season params: {}".format(m.model.season_params.items()))

        if self.plot:
            m.plot(forecast)
            # m.plot_components(forecast)
            m.plot_parameters()
            plt.show()
Пример #19
0
    def test_plot(self):
        log.info("testing: Plotting")
        df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
        m = NeuralProphet(
            n_forecasts=7,
            n_lags=14,
            epochs=EPOCHS,
            batch_size=BATCH_SIZE,
        )
        metrics_df = m.fit(df, freq="D")

        m.highlight_nth_step_ahead_of_each_forecast(7)
        future = m.make_future_dataframe(df, n_historic_predictions=10)
        forecast = m.predict(future)
        m.plot(forecast)
        m.plot_last_forecast(forecast, include_previous_forecasts=10)
        m.plot_components(forecast)
        m.plot_parameters()

        m.highlight_nth_step_ahead_of_each_forecast(None)
        future = m.make_future_dataframe(df, n_historic_predictions=10)
        forecast = m.predict(future)
        m.plot(forecast)
        m.plot_last_forecast(forecast)
        m.plot_components(forecast)
        m.plot_parameters()
        if self.plot:
            plt.show()
Пример #20
0
 def test_trend(self):
     log.info("testing: Trend")
     df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
     m = NeuralProphet(
         growth="linear",
         n_changepoints=10,
         changepoints_range=0.9,
         trend_reg=1,
         trend_reg_threshold=False,
         yearly_seasonality=False,
         weekly_seasonality=False,
         daily_seasonality=False,
         epochs=EPOCHS,
         batch_size=BATCH_SIZE,
     )
     # print(m.config_trend)
     metrics_df = m.fit(df, freq="D")
     future = m.make_future_dataframe(df,
                                      periods=60,
                                      n_historic_predictions=60)
     forecast = m.predict(df=future)
     if self.plot:
         m.plot(forecast)
         # m.plot_components(forecast)
         m.plot_parameters()
         plt.show()
Пример #21
0
    def test_callable_loss(self):
        log.info("TEST Callable Loss")

        def loss(output, target):
            assym_penalty = 1.25
            beta = 1
            e = target - output
            me = torch.abs(e)
            z = torch.where(me < beta, 0.5 * (me**2) / beta, me - 0.5 * beta)
            z = torch.where(e < 0, z, assym_penalty * z)
            return z.mean()

        df = pd.read_csv(YOS_FILE, nrows=NROWS)
        m = NeuralProphet(
            seasonality_mode="multiplicative",
            loss_func=loss,
            changepoints_range=0.95,
            n_changepoints=15,
            weekly_seasonality=False,
            epochs=EPOCHS,
            batch_size=BATCH_SIZE,
        )
        metrics = m.fit(df, freq="5min")
        future = m.make_future_dataframe(df,
                                         periods=12 * 24,
                                         n_historic_predictions=12 * 24)
        forecast = m.predict(future)
Пример #22
0
    def test_lag_reg(self):
        log.info("testing: Lagged Regressors")
        df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
        m = NeuralProphet(
            n_forecasts=2,
            n_lags=3,
            weekly_seasonality=False,
            daily_seasonality=False,
            epochs=EPOCHS,
            batch_size=BATCH_SIZE,
        )
        df["A"] = df["y"].rolling(7, min_periods=1).mean()
        df["B"] = df["y"].rolling(30, min_periods=1).mean()
        m = m.add_lagged_regressor(names="A")
        m = m.add_lagged_regressor(names="B", only_last_value=True)
        metrics_df = m.fit(df, freq="D", validate_each_epoch=True)
        future = m.make_future_dataframe(df, n_historic_predictions=10)
        forecast = m.predict(future)

        if self.plot:
            print(forecast.to_string())
            m.plot_last_forecast(forecast, include_previous_forecasts=5)
            m.plot(forecast)
            m.plot_components(forecast)
            m.plot_parameters()
            plt.show()
Пример #23
0
 def test_loss_func(self):
     log.info("TEST setting torch.nn loss func")
     df = pd.read_csv(PEYTON_FILE, nrows=512)
     loss_fn = torch.nn.MSELoss()
     m = NeuralProphet(epochs=1, loss_func=loss_fn)
     metrics_df = m.fit(df, freq="D")
     future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
     forecast = m.predict(future)
Пример #24
0
    def test_events(self):
        log.info("testing: Events")
        df = pd.read_csv(PEYTON_FILE)[-NROWS:]
        playoffs = pd.DataFrame(
            {
                "event": "playoff",
                "ds": pd.to_datetime(
                    [
                        "2008-01-13",
                        "2009-01-03",
                        "2010-01-16",
                        "2010-01-24",
                        "2010-02-07",
                        "2011-01-08",
                        "2013-01-12",
                        "2014-01-12",
                        "2014-01-19",
                        "2014-02-02",
                        "2015-01-11",
                        "2016-01-17",
                        "2016-01-24",
                        "2016-02-07",
                    ]
                ),
            }
        )
        superbowls = pd.DataFrame(
            {
                "event": "superbowl",
                "ds": pd.to_datetime(["2010-02-07", "2014-02-02", "2016-02-07"]),
            }
        )
        events_df = pd.concat((playoffs, superbowls))

        m = NeuralProphet(
            n_lags=2,
            n_forecasts=30,
            daily_seasonality=False,
            epochs=EPOCHS,
            batch_size=BATCH_SIZE,
        )
        # set event windows
        m = m.add_events(
            ["superbowl", "playoff"], lower_window=-1, upper_window=1, mode="multiplicative", regularization=0.5
        )
        # add the country specific holidays
        m = m.add_country_holidays("US", mode="additive", regularization=0.5)

        history_df = m.create_df_with_events(df, events_df)
        metrics_df = m.fit(history_df, freq="D")
        future = m.make_future_dataframe(df=history_df, events_df=events_df, periods=30, n_historic_predictions=90)
        forecast = m.predict(df=future)
        log.debug("Event Parameters:: {}".format(m.model.event_params))
        if self.plot:
            m.plot_components(forecast)
            m.plot(forecast)
            m.plot_parameters()
            plt.show()
def test_global_modeling_validation_df():
    log.info("Global Modeling + Local Normalization")
    df = pd.read_csv(PEYTON_FILE, nrows=512)
    df1_0 = df.iloc[:128, :].copy(deep=True)
    df2_0 = df.iloc[128:256, :].copy(deep=True)
    df_dict = {"df1": df1_0, "df2": df2_0}
    m = NeuralProphet(n_forecasts=2,
                      n_lags=10,
                      epochs=EPOCHS,
                      batch_size=BATCH_SIZE)
    with pytest.raises(ValueError):
        m.fit(df_dict, freq="D", validation_df=df2_0)
    log.info("Error - name of validation df was not provided")
    m = NeuralProphet(n_forecasts=2,
                      n_lags=10,
                      epochs=EPOCHS,
                      batch_size=BATCH_SIZE)
    m.fit(df_dict, freq="D", validation_df={"df2": df2_0})
Пример #26
0
 def test_reg_delay(self):
     df = pd.read_csv(PEYTON_FILE, nrows=102)[:100]
     m = NeuralProphet(epochs=10)
     m.fit(df, freq="D")
     c = m.config_train
     for w, e, i in [
         (0, 0, 1),
         (0, 3, 0),
         (0, 5, 0),
         (0.002739052315863355, 5, 0.1),
         (0.5, 6, 0.5),
         (0.9972609476841366, 7, 0.9),
         (1, 7, 1),
         (1, 8, 0),
     ]:
         weight = c.get_reg_delay_weight(e, i, reg_start_pct=0.5, reg_full_pct=0.8)
         log.debug("e {}, i {}, expected w {}, got w {}".format(e, i, w, weight))
         assert weight == w
def test_seasons():
    log.info("testing: Seasonality: additive")
    df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
    # m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
    m = NeuralProphet(
        yearly_seasonality=8,
        weekly_seasonality=4,
        seasonality_mode="additive",
        seasonality_reg=1,
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
    )
    metrics_df = m.fit(df, freq="D")
    future = m.make_future_dataframe(df,
                                     n_historic_predictions=365,
                                     periods=365)
    forecast = m.predict(df=future)
    log.debug("SUM of yearly season params: {}".format(
        sum(abs(m.model.season_params["yearly"].data.numpy()))))
    log.debug("SUM of weekly season params: {}".format(
        sum(abs(m.model.season_params["weekly"].data.numpy()))))
    log.debug("season params: {}".format(m.model.season_params.items()))
    if PLOT:
        m.plot(forecast)
        # m.plot_components(forecast)
        m.plot_parameters()
        plt.show()
    log.info("testing: Seasonality: multiplicative")
    df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
    # m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
    m = NeuralProphet(
        yearly_seasonality=8,
        weekly_seasonality=4,
        seasonality_mode="multiplicative",
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
    )
    metrics_df = m.fit(df, freq="D")
    future = m.make_future_dataframe(df,
                                     n_historic_predictions=365,
                                     periods=365)
    forecast = m.predict(df=future)
def test_loss_func_torch():
    log.info("TEST setting torch.nn loss func")
    df = pd.read_csv(PEYTON_FILE, nrows=512)
    m = NeuralProphet(
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
        loss_func=torch.nn.MSELoss,
    )
    metrics_df = m.fit(df, freq="D")
    future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
    forecast = m.predict(future)
def test_metrics():
    log.info("testing: Plotting")
    df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
    m = NeuralProphet(
        epochs=EPOCHS,
        batch_size=BATCH_SIZE,
        collect_metrics=["MAE", "MSE", "RMSE"],
    )
    metrics_df = m.fit(df, freq="D")
    assert metrics_df is not None
    forecast = m.predict(df)
def test_global_modeling_global_normalization():
    ### GLOBAL MODELLING - NO EXOGENOUS VARIABLES - GLOBAL NORMALIZATION
    log.info("Global Modeling + Global Normalization")
    df = pd.read_csv(PEYTON_FILE, nrows=512)
    df1_0 = df.iloc[:128, :].copy(deep=True)
    df2_0 = df.iloc[128:256, :].copy(deep=True)
    df3_0 = df.iloc[256:384, :].copy(deep=True)
    m = NeuralProphet(n_forecasts=2,
                      n_lags=10,
                      epochs=EPOCHS,
                      batch_size=BATCH_SIZE,
                      global_normalization=True)
    train_dict = {"df1": df1_0, "df2": df2_0}
    test_dict = {"df3": df3_0}
    m.fit(train_dict)
    future = m.make_future_dataframe(test_dict)
    forecast = m.predict(future)
    metrics = m.test(test_dict)
    forecast_trend = m.predict_trend(test_dict)
    forecast_seasonal_componets = m.predict_seasonal_components(test_dict)