def test_benchmark_silverkite_template_with_simulated_data():
    # setting every list to 1 item to speed up test case
    forecast_horizons = [30]
    max_cvs = [3]
    fit_algorithms = ["linear"]
    metric = EvaluationMetricEnum.MeanSquaredError
    evaluation_metric = EvaluationMetricParam(cv_selection_metric=metric.name)

    # Simulated data
    data_name = "daily_simulated"
    train_period = 365
    data = generate_df_for_tests(freq="D", periods=train_period)
    df = data["df"]
    time_col, value_col = df.columns
    metadata = MetadataParam(time_col=time_col, value_col=value_col, freq="D")
    result_silverkite_simulated = benchmark_silverkite_template(
        data_name=data_name,
        df=df,
        metadata=metadata,
        evaluation_metric=evaluation_metric,
        forecast_horizons=forecast_horizons,
        fit_algorithms=fit_algorithms,
        max_cvs=max_cvs)

    result_silverkite_simulated = result_silverkite_simulated[0]
    assert result_silverkite_simulated["data_name"] == data_name
    assert result_silverkite_simulated["forecast_model_name"] == "silverkite_linear"
    assert result_silverkite_simulated["train_period"] == train_period
    assert result_silverkite_simulated["forecast_horizon"] == 30
    assert result_silverkite_simulated["cv_folds"] == 3
def test_gen_moving_timeseries_forecast_with_regressors():
    """Test for `gen_moving_timeseries_forecast` with regressors"""
    data = generate_df_for_tests(freq="1H", periods=1000)
    df = data["df"]
    df["x"] = 40

    # A simple train-forecast function which always uses last available
    # value, then adds the regressor value -> forecasted value
    def train_forecast_func(
            df,
            value_col,
            time_col,
            forecast_horizon,
            new_external_regressor_df):
        # Simply get last observed value and offer as forecast
        value = df[value_col].values[-1]
        forecasted_values = np.repeat(a=value, repeats=forecast_horizon)
        fut_df = pd.DataFrame({value_col: forecasted_values})
        # Adds regerssor value
        fut_df[value_col] = fut_df[value_col] + new_external_regressor_df["x"]
        return {"fut_df": fut_df}

    compare_df = gen_moving_timeseries_forecast(
        df=df,
        time_col="ts",
        value_col="y",
        train_move_ahead=10,
        forecast_horizon=7,
        train_forecast_func=train_forecast_func,
        min_training_end_point=100,
        regressor_cols=["x"])["compare_df"]

    assert list(compare_df.head(5)["y_hat"].round(1).values) == [39.4] * 5
Пример #3
0
def test_run_template_1():
    """Runs default template"""
    data = generate_df_for_tests(
        freq="H",
        periods=700 * 24)
    df = data["train_df"]
    forecast_horizon = data["test_df"].shape[0]

    config = ForecastConfig(
        model_template=ModelTemplateEnum.SK.name,
        forecast_horizon=forecast_horizon,
    )

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        result = Forecaster().run_forecast_config(
            df=df,
            config=config,
        )

        rmse = EvaluationMetricEnum.RootMeanSquaredError.get_metric_name()
        q80 = EvaluationMetricEnum.Quantile80.get_metric_name()
        assert result.backtest.test_evaluation[rmse] == pytest.approx(2.037, rel=1e-2)
        assert result.backtest.test_evaluation[q80] == pytest.approx(0.836, rel=1e-2)
        assert result.forecast.train_evaluation[rmse] == pytest.approx(2.004, rel=1e-2)
        assert result.forecast.train_evaluation[q80] == pytest.approx(0.800, rel=1e-2)
        check_forecast_pipeline_result(
            result,
            coverage=None,
            strategy=None,
            score_func=EvaluationMetricEnum.MeanAbsolutePercentError.name,
            greater_is_better=False)
Пример #4
0
def hourly_data():
    """Generate 500 days of hourly data for tests"""
    return generate_df_for_tests(freq="H",
                                 periods=24 * 500,
                                 train_start_date=datetime.datetime(
                                     2018, 7, 1),
                                 conti_year_origin=2018)
def test_plot_grouping_evaluation():
    """Tests plot_grouping_evaluation function"""
    df = generate_df_for_tests(freq="D", periods=20)["df"]
    df.rename(columns={
        TIME_COL: "custom_time_column",
        VALUE_COL: "custom_value_column"
    },
              inplace=True)

    ts = UnivariateTimeSeries()
    ts.load_data(df,
                 time_col="custom_time_column",
                 value_col="custom_value_column")

    # groupby_time_feature
    fig = ts.plot_grouping_evaluation(aggregation_func=np.mean,
                                      aggregation_func_name="mean",
                                      groupby_time_feature="dow")

    assert fig.data[0].name == f"mean of {VALUE_COL}"
    assert fig.layout.xaxis.title.text == "dow"
    assert fig.layout.yaxis.title.text == f"mean of {VALUE_COL}"
    assert fig.layout.title.text == f"mean of {VALUE_COL} vs dow"
    assert fig.data[0].x.shape[0] == 7

    # groupby_sliding_window_size
    fig = ts.plot_grouping_evaluation(
        aggregation_func=np.max,
        aggregation_func_name="max",
        groupby_sliding_window_size=7
    )  # there are 20 training points, so this creates groups of size (6, 7, 7)
    assert fig.data[0].name == f"max of {VALUE_COL}"
    assert fig.layout.xaxis.title.text == f"{TIME_COL}_downsample"
    assert fig.layout.yaxis.title.text == f"max of {VALUE_COL}"
    assert fig.layout.title.text == f"max of {VALUE_COL} vs {TIME_COL}_downsample"
    assert fig.data[0].x.shape[0] == 3

    # groupby_custom_column
    custom_groups = pd.Series(["g1", "g2", "g3", "g4", "g5"],
                              name="custom_groups").repeat(4)
    fig = ts.plot_grouping_evaluation(aggregation_func=np.min,
                                      aggregation_func_name="min",
                                      groupby_custom_column=custom_groups)
    assert fig.data[0].name == f"min of {VALUE_COL}"
    assert fig.layout.xaxis.title.text == "custom_groups"
    assert fig.layout.yaxis.title.text == f"min of {VALUE_COL}"
    assert fig.layout.title.text == f"min of {VALUE_COL} vs custom_groups"
    assert fig.data[0].x.shape[0] == 5

    # custom xlabel, ylabel and title
    fig = ts.plot_grouping_evaluation(aggregation_func=np.mean,
                                      aggregation_func_name="mean",
                                      groupby_time_feature="dow",
                                      xlabel="Day of Week",
                                      ylabel="Average of y",
                                      title="Average of y by Day of week")
    assert fig.layout.xaxis.title.text == "Day of Week"
    assert fig.layout.yaxis.title.text == "Average of y"
    assert fig.layout.title.text == "Average of y by Day of week"
Пример #6
0
def test_prophet_template_default():
    """Tests prophet_template with default values, for limited data"""
    # prepares input data
    num_days = 10
    data = generate_df_for_tests(freq="D",
                                 periods=num_days,
                                 train_start_date="2018-01-01")
    df = data["df"]
    template = ProphetTemplate()
    config = ForecastConfig(model_template="PROPHET")
    params = template.apply_template_for_pipeline_params(df=df, config=config)
    # not modified
    assert config == ForecastConfig(model_template="PROPHET")
    # checks result
    metric = EvaluationMetricEnum.MeanAbsolutePercentError
    pipeline = params.pop("pipeline", None)
    expected_params = dict(
        df=df,
        time_col=cst.TIME_COL,
        value_col=cst.VALUE_COL,
        date_format=None,
        freq=None,
        train_end_date=None,
        anomaly_info=None,
        # model
        regressor_cols=None,
        lagged_regressor_cols=None,
        estimator=None,
        hyperparameter_grid=template.hyperparameter_grid,
        hyperparameter_budget=None,
        n_jobs=COMPUTATION_N_JOBS,
        verbose=1,
        # forecast
        forecast_horizon=None,
        coverage=None,
        test_horizon=None,
        periods_between_train_test=None,
        agg_periods=None,
        agg_func=None,
        # evaluation
        score_func=metric.name,
        score_func_greater_is_better=metric.get_metric_greater_is_better(),
        cv_report_metrics=CV_REPORT_METRICS_ALL,
        null_model_params=None,
        relative_error_tolerance=None,
        # CV
        cv_horizon=None,
        cv_min_train_periods=None,
        cv_expanding_window=True,
        cv_periods_between_splits=None,
        cv_periods_between_train_test=None,
        cv_max_splits=3)
    assert_basic_pipeline_equal(pipeline, template.pipeline)
    assert_equal(params, expected_params)
def test_gen_moving_timeseries_forecast():
    """Basic test for `gen_moving_timeseries_forecast`"""
    data = generate_df_for_tests(freq="1H", periods=1000)
    df = data["df"]

    # A simple train-forecast function which always uses last available
    # value as the forecasted value
    def train_forecast_func(
            df,
            value_col,
            time_col=None,
            forecast_horizon=1):
        # Simply get last observed value and offer as forecast
        value = df[value_col].values[df.shape[0] - 1]
        forecasted_values = np.repeat(a=value, repeats=forecast_horizon)
        fut_df = pd.DataFrame({value_col: forecasted_values})
        return {"fut_df": fut_df}

    compare_df = gen_moving_timeseries_forecast(
        df=df,
        time_col="ts",
        value_col="y",
        train_forecast_func=train_forecast_func,
        train_move_ahead=10,
        forecast_horizon=7,
        min_training_end_point=100)["compare_df"]

    assert list(compare_df.head(5)["y_hat"].round(1).values) == [-0.6] * 5

    # Input start and end times
    compare_df = gen_moving_timeseries_forecast(
        df=df,
        time_col="ts",
        value_col="y",
        train_forecast_func=train_forecast_func,
        train_move_ahead=10,
        forecast_horizon=7,
        min_training_end_point=None,
        min_training_end_timestamp="2018-07-15",
        max_forecast_end_point=None,
        max_forecast_end_timestamp="2018-08-01")["compare_df"]

    assert list(compare_df.head(5)["y_hat"].round(1).values) == [-3.5] * 5

    expected_match = "No reasonble train test period is found for validation"
    with pytest.raises(ValueError, match=expected_match):
        gen_moving_timeseries_forecast(
            df=df,
            time_col="ts",
            value_col="y",
            train_forecast_func=train_forecast_func,
            train_move_ahead=10,
            forecast_horizon=700,
            min_training_end_point=1000)["compare_df"]
Пример #8
0
def test_apply_template_decorator():
    data = generate_df_for_tests(freq="D", periods=10)
    df = data["df"]
    template = ProphetTemplate()
    with pytest.raises(
            ValueError,
            match=
            "ProphetTemplate only supports config.model_template='PROPHET', found 'UNKNOWN'"
    ):
        template.apply_template_for_pipeline_params(
            df=df, config=ForecastConfig(model_template="UNKNOWN"))
Пример #9
0
def test_generate_df_for_tests():
    """Test generate_df_for_tests"""
    data = generate_df_for_tests(freq="H",
                                 periods=24 * 10,
                                 train_start_date=datetime.datetime(
                                     2018, 1, 1),
                                 train_frac=0.9,
                                 remove_extra_cols=False)

    assert data["df"].shape == (24 * 10, 48)  # Contains time_feature columns
    assert not data["train_df"].isna().any().any()
    assert not data["test_df"][TIME_COL].isna().any().any()
Пример #10
0
def test_silverkite_template():
    """Tests test_silverkite_template with default config"""
    data = generate_df_for_tests(freq="D", periods=10)
    df = data["df"]
    template = SilverkiteTemplate()
    config = ForecastConfig(model_template="SK")
    params = template.apply_template_for_pipeline_params(
        df=df,
        config=config
    )
    assert config == ForecastConfig(model_template="SK")  # not modified
    pipeline = params.pop("pipeline", None)

    metric = EvaluationMetricEnum.MeanAbsolutePercentError
    expected_params = dict(
        df=df,
        time_col=TIME_COL,
        value_col=VALUE_COL,
        date_format=None,
        freq=None,
        train_end_date=None,
        anomaly_info=None,
        # model
        regressor_cols=None,
        estimator=None,
        hyperparameter_grid=template.hyperparameter_grid,
        hyperparameter_budget=None,
        n_jobs=COMPUTATION_N_JOBS,
        verbose=1,
        # forecast
        forecast_horizon=None,
        coverage=None,
        test_horizon=None,
        periods_between_train_test=None,
        agg_periods=None,
        agg_func=None,
        # evaluation
        score_func=metric.name,
        score_func_greater_is_better=metric.get_metric_greater_is_better(),
        cv_report_metrics=CV_REPORT_METRICS_ALL,
        null_model_params=None,
        relative_error_tolerance=None,
        # CV
        cv_horizon=None,
        cv_min_train_periods=None,
        cv_expanding_window=True,
        cv_periods_between_splits=None,
        cv_periods_between_train_test=None,
        cv_max_splits=3
    )
    assert_basic_pipeline_equal(pipeline, template.pipeline)
    assert_equal(params, expected_params)
Пример #11
0
def test_forecast_via_prophet_freq():
    """Tests prophet model at different frequencies"""
    holidays = pd.DataFrame({
        "ds":
        pd.to_datetime(["2018-12-25", "2019-12-25", "2020-12-25"]),
        "holiday":
        "christmas",
        "lower_window":
        -2,
        "upper_window":
        2,
    })
    params = dict(coverage=0.9,
                  growth="linear",
                  n_changepoints=2,
                  changepoint_range=0.9,
                  yearly_seasonality="auto",
                  weekly_seasonality="auto",
                  daily_seasonality="auto",
                  holidays=holidays,
                  seasonality_mode="additive",
                  seasonality_prior_scale=5.0,
                  holidays_prior_scale=5.0,
                  changepoint_prior_scale=0.10,
                  mcmc_samples=0,
                  uncertainty_samples=10)
    # A wide variety of frequencies listed here:
    # https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
    frequencies = [
        "B", "W", "W-SAT", "W-TUE", "M", "SM", "MS", "SMS", "CBMS", "BM", "B",
        "Q", "QS", "BQS", "BQ-AUG", "Y", "YS", "AS-SEP", "H", "BH", "T", "S"
    ]
    for freq in frequencies:
        df = generate_df_for_tests(freq=freq, periods=50)
        train_df = df["train_df"]
        test_df = df["test_df"]

        # tests model fit and predict work without error
        model = ProphetEstimator(**params)
        try:
            model.fit(train_df, time_col=TIME_COL, value_col=VALUE_COL)
            pred = model.predict(test_df)
        except Exception:
            print(f"Failed for frequency {freq}")
            raise

        assert list(pred.columns) == [
            TIME_COL, PREDICTED_COL, PREDICTED_LOWER_COL, PREDICTED_UPPER_COL
        ]
        assert pred[TIME_COL].equals(test_df[TIME_COL])
        model.summary()
Пример #12
0
def test_get_basic_pipeline_apply():
    """Tests get_basic_pipeline fit and predict methods on a dataset without regressors"""
    df = generate_df_for_tests("D", 50)
    pipeline = get_basic_pipeline(
        estimator=ProphetEstimator(),
        score_func=EvaluationMetricEnum.MeanSquaredError.name,
        score_func_greater_is_better=False,
        agg_periods=None,
        agg_func=None,
        relative_error_tolerance=None,
        coverage=0.95,
        null_model_params=None)
    pipeline.fit(df["train_df"])
    predictions = pipeline.predict(df["test_df"])
    assert predictions.shape[0] == df["test_df"].shape[0]
Пример #13
0
def test_run_forecast_json():
    """Tests:
     - no coverage
     - hourly data (2+ years)
     - default `hyperparameter_grid` (all interaction terms enabled)
    """
    # sets random state for consistent comparison
    data = generate_df_for_tests(freq="H", periods=700 * 24)
    df = data["train_df"]

    json_str = """{
        "model_template": "SILVERKITE",
        "forecast_horizon": 3359,
        "model_components_param": {
            "custom": {
                "fit_algorithm_dict": {
                    "fit_algorithm": "linear"
                }
            }
        }
    }"""

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        forecaster = Forecaster()
        result = forecaster.run_forecast_json(df=df, json_str=json_str)

        mse = EvaluationMetricEnum.RootMeanSquaredError.get_metric_name()
        q80 = EvaluationMetricEnum.Quantile80.get_metric_name()
        assert result.backtest.test_evaluation[mse] == pytest.approx(2.120,
                                                                     rel=0.03)
        assert result.backtest.test_evaluation[q80] == pytest.approx(0.863,
                                                                     rel=0.02)
        assert result.forecast.train_evaluation[mse] == pytest.approx(1.975,
                                                                      rel=0.02)
        assert result.forecast.train_evaluation[q80] == pytest.approx(0.786,
                                                                      rel=1e-2)
        check_forecast_pipeline_result(
            result,
            coverage=None,
            strategy=None,
            score_func=EvaluationMetricEnum.MeanAbsolutePercentError.name,
            greater_is_better=False)
def test_forecast_via_arima_freq(params):
    frequencies = ["H", "D", "M"]
    for freq in frequencies:
        df = generate_df_for_tests(freq=freq, periods=50)
        train_df = df["train_df"]
        test_df = df["test_df"]

        # tests model fit and predict work without error
        model = AutoArimaEstimator(**params)
        try:
            model.fit(train_df, time_col=TIME_COL, value_col=VALUE_COL)
            pred = model.predict(test_df)
        except Exception:
            print(f"Failed for frequency {freq}")
            raise

        assert list(pred.columns) == [
            TIME_COL, PREDICTED_COL, PREDICTED_LOWER_COL, PREDICTED_UPPER_COL
        ]
        assert pred[TIME_COL].equals(test_df[TIME_COL])
        model.summary()
Пример #15
0
def test_run_forecast_config():
    """Tests `run_forecast_config`"""
    data = generate_df_for_tests(freq="H", periods=14 * 24)
    df = data["df"]

    # Checks if exception is raised
    with pytest.raises(ValueError, match="is not recognized"):
        forecaster = Forecaster()
        forecaster.run_forecast_config(
            df=df, config=ForecastConfig(model_template="unknown_template"))
    with pytest.raises(ValueError, match="is not recognized"):
        forecaster = Forecaster()
        forecaster.run_forecast_json(
            df=df, json_str="""{ "model_template": "unknown_template" }""")

    # All run_forecast_config* functions return the same result for the default config,
    # call forecast_pipeline, and return a result with the proper format.
    np.random.seed(123)
    forecaster = Forecaster()
    default_result = forecaster.run_forecast_config(df=df)
    score_func = EvaluationMetricEnum.MeanAbsolutePercentError.name
    check_forecast_pipeline_result(default_result,
                                   coverage=None,
                                   strategy=None,
                                   score_func=score_func,
                                   greater_is_better=False)
    assert_equal(forecaster.forecast_result, default_result)

    np.random.seed(123)
    forecaster = Forecaster()
    json_result = forecaster.run_forecast_json(df=df)
    check_forecast_pipeline_result(json_result,
                                   coverage=None,
                                   strategy=None,
                                   score_func=score_func,
                                   greater_is_better=False)
    assert_forecast_pipeline_result_equal(json_result,
                                          default_result,
                                          rel=0.02)
Пример #16
0
def test_forecast_pipeline_rolling_evaluation_error():
    """Checks errors of forecast_pipeline_rolling_evaluation"""
    data = generate_df_for_tests(freq="D", periods=30)
    df = data["df"]
    tscv = RollingTimeSeriesSplit(forecast_horizon=7,
                                  periods_between_train_test=7,
                                  max_splits=1)
    # Different forecast_horizon in pipeline_params and tscv
    with pytest.raises(ValueError,
                       match="Forecast horizon in 'pipeline_params' "
                       "does not match that of the 'tscv'."):
        pipeline_params = mock_pipeline(df=df, forecast_horizon=15)
        forecast_pipeline_rolling_evaluation(pipeline_params=pipeline_params,
                                             tscv=tscv)

    with pytest.raises(
            ValueError,
            match="'periods_between_train_test' in 'pipeline_params' "
            "does not match that of the 'tscv'."):
        pipeline_params = mock_pipeline(df=df,
                                        forecast_horizon=7,
                                        periods_between_train_test=2)
        forecast_pipeline_rolling_evaluation(pipeline_params=pipeline_params,
                                             tscv=tscv)
def daily_data():
    return generate_df_for_tests(freq="D", periods=500, conti_year_origin=2018)
Пример #18
0
def test_forecast_similarity_based():
    """ Testing the function: forecast_similarity_based in various examples
    """
    data = generate_df_for_tests(freq="D", periods=30 * 8)  # 8 months
    df = data["df"]
    train_df = data["train_df"]
    test_df = data["test_df"]

    df["z"] = df["y"] + 1
    train_df["z"] = train_df["y"] + 1
    test_df["z"] = test_df["y"] + 1

    train_df = train_df[["ts", "y", "z"]]
    test_df = test_df[["ts", "y", "z"]]

    res = forecast_similarity_based(df=train_df,
                                    time_col="ts",
                                    value_cols=["y", "z"],
                                    agg_method="median",
                                    agg_func=None,
                                    match_cols=["dow"])

    # forecast using predict
    fdf_median = res["predict"](test_df)
    assert (fdf_median["z"] - fdf_median["y"] - 1.0).abs().max().round(2) == 0.0, \
        "forecast for z must be forecast for y + 1 at each timestamp"
    err = calc_pred_err(test_df["y"], fdf_median["y"])
    enum = EvaluationMetricEnum.Correlation
    assert err[enum.get_metric_name()] > 0.3

    # forecast using predict_n
    fdf_median = res["predict_n"](test_df.shape[0])
    err = calc_pred_err(test_df["y"], fdf_median["y"])
    assert err[enum.get_metric_name()] > 0.3

    res = forecast_similarity_based(df=train_df,
                                    time_col="ts",
                                    value_cols=["y", "z"],
                                    agg_method="mean",
                                    agg_func=None,
                                    match_cols=["dow"])

    # forecast using the mean of all similar times
    fdf_mean = res["predict"](test_df)
    err = calc_pred_err(test_df["y"], fdf_mean["y"])
    assert err[enum.get_metric_name()] > 0.3

    res = forecast_similarity_based(df=train_df,
                                    time_col="ts",
                                    value_cols=["y", "z"],
                                    agg_method="most_recent",
                                    agg_func=None,
                                    match_cols=["dow"],
                                    recent_k=3)

    # forecast using the mean of 3 recent times similar to the given time
    fdf_recent3_mean = res["predict"](test_df)
    err = calc_pred_err(test_df["y"], fdf_recent3_mean["y"])
    assert err[enum.get_metric_name()] > 0.3

    plt.plot(df["ts"].dt.strftime('%Y-%m-%d'),
             df["y"],
             label="true",
             alpha=0.5)
    plt.plot(fdf_median["ts"].dt.strftime('%Y-%m-%d'),
             fdf_median["y"],
             alpha=0.5,
             label="median pred wrt dow")
    plt.plot(fdf_mean["ts"].dt.strftime('%Y-%m-%d'),
             fdf_mean["y"],
             alpha=0.5,
             label="mean pred wrt dow")
    plt.plot(fdf_recent3_mean["ts"].dt.strftime('%Y-%m-%d'),
             fdf_recent3_mean["y"],
             alpha=0.5,
             label="mean recent 3 wrt dow")
    plt.xticks(rotation=15)
    plt.legend()
Пример #19
0
def test_run_forecast_config_with_single_simple_silverkite_template():
    # The generic name of single simple silverkite templates are not added to `ModelTemplateEnum`,
    # therefore we test if these are recognized.
    data = generate_df_for_tests(freq="D", periods=365)
    df = data["df"]
    metric = EvaluationMetricEnum.MeanAbsoluteError
    evaluation_metric = EvaluationMetricParam(cv_selection_metric=metric.name,
                                              agg_periods=7,
                                              agg_func=np.max,
                                              null_model_params={
                                                  "strategy": "quantile",
                                                  "constant": None,
                                                  "quantile": 0.5
                                              })

    evaluation_period = EvaluationPeriodParam(test_horizon=10,
                                              periods_between_train_test=5,
                                              cv_horizon=4,
                                              cv_min_train_periods=80,
                                              cv_expanding_window=False,
                                              cv_periods_between_splits=20,
                                              cv_periods_between_train_test=3,
                                              cv_max_splits=2)

    model_components = ModelComponentsParam(
        hyperparameter_override=[{
            "estimator__yearly_seasonality": 1
        }, {
            "estimator__yearly_seasonality": 2
        }])
    computation = ComputationParam(verbose=2)
    forecast_horizon = 27
    coverage = 0.90

    single_template_class = SimpleSilverkiteTemplateOptions(
        freq=SILVERKITE_COMPONENT_KEYWORDS.FREQ.value.DAILY,
        seas=SILVERKITE_COMPONENT_KEYWORDS.SEAS.value.NONE)

    forecast_config = ForecastConfig(model_template=[
        single_template_class, "DAILY_ALGO_SGD", "SILVERKITE_DAILY_90"
    ],
                                     computation_param=computation,
                                     coverage=coverage,
                                     evaluation_metric_param=evaluation_metric,
                                     evaluation_period_param=evaluation_period,
                                     forecast_horizon=forecast_horizon,
                                     model_components_param=model_components)

    forecaster = Forecaster()
    result = forecaster.run_forecast_config(df=df, config=forecast_config)

    summary = summarize_grid_search_results(result.grid_search)
    # single_template_class is 1 template,
    # "DAILY_ALGO_SGD" is 1 template and "SILVERKITE_DAILY_90" has 4 templates.
    # With 2 items in `hyperparameter_override, there should be a total of 12 cases.
    assert summary.shape[0] == 12

    # Tests functionality for single template class only.
    forecast_config = ForecastConfig(model_template=single_template_class,
                                     computation_param=computation,
                                     coverage=coverage,
                                     evaluation_metric_param=evaluation_metric,
                                     evaluation_period_param=evaluation_period,
                                     forecast_horizon=forecast_horizon)

    forecaster = Forecaster()
    pipeline_parameters = forecaster.apply_forecast_config(
        df=df, config=forecast_config)
    assert_equal(actual=pipeline_parameters["hyperparameter_grid"],
                 expected={
                     "estimator__time_properties": [None],
                     "estimator__origin_for_time_vars": [None],
                     "estimator__train_test_thresh": [None],
                     "estimator__training_fraction": [None],
                     "estimator__fit_algorithm_dict": [{
                         "fit_algorithm":
                         "linear",
                         "fit_algorithm_params":
                         None
                     }],
                     "estimator__holidays_to_model_separately": [[]],
                     "estimator__holiday_lookup_countries": [[]],
                     "estimator__holiday_pre_num_days": [0],
                     "estimator__holiday_post_num_days": [0],
                     "estimator__holiday_pre_post_num_dict": [None],
                     "estimator__daily_event_df_dict": [None],
                     "estimator__changepoints_dict": [None],
                     "estimator__seasonality_changepoints_dict": [None],
                     "estimator__yearly_seasonality": [0],
                     "estimator__quarterly_seasonality": [0],
                     "estimator__monthly_seasonality": [0],
                     "estimator__weekly_seasonality": [0],
                     "estimator__daily_seasonality": [0],
                     "estimator__max_daily_seas_interaction_order": [0],
                     "estimator__max_weekly_seas_interaction_order": [2],
                     "estimator__autoreg_dict": [None],
                     "estimator__min_admissible_value": [None],
                     "estimator__max_admissible_value": [None],
                     "estimator__uncertainty_dict": [None],
                     "estimator__growth_term": ["linear"],
                     "estimator__regressor_cols": [[]],
                     "estimator__feature_sets_enabled": [False],
                     "estimator__extra_pred_cols": [[]]
                 },
                 ignore_keys={"estimator__time_properties": None})
Пример #20
0
def pipeline_results():
    """Runs forecast_pipeline three times to get
     grid search results"""
    pipeline_results = {}

    data = generate_df_for_tests(freq="1D", periods=20 * 7)
    df = data["df"]
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
    hyperparameter_grid = [{
        "estimator__strategy": ["quantile"],
        "estimator__quantile": [0.9]
    }, {
        "estimator__strategy": ["mean"]
    }, {
        "estimator__strategy": ["constant"],
        "estimator__constant": [1.0, 2.0]
    }]
    pipeline = Pipeline([("estimator", DummyEstimator())])
    # Tests MAPE `score_func`, list `cv_report_metrics`
    metric = EvaluationMetricEnum.MeanAbsolutePercentError
    pipeline_results["1"] = forecast_pipeline(
        df,
        pipeline=pipeline,
        hyperparameter_grid=hyperparameter_grid,
        n_jobs=-1,
        forecast_horizon=20,
        coverage=None,
        agg_periods=7,
        agg_func=np.sum,
        score_func=metric.name,
        score_func_greater_is_better=metric.get_metric_greater_is_better(),
        cv_report_metrics=[
            EvaluationMetricEnum.MeanAbsoluteError.name,
            EvaluationMetricEnum.MeanSquaredError.name,
            EvaluationMetricEnum.MedianAbsolutePercentError.name,
        ],
        null_model_params=None)

    # Tests FRACTION_OUTSIDE_TOLERANCE `score_func`, all `cv_report_metrics`
    pipeline = Pipeline([("estimator", DummyEstimator())])
    pipeline_results["2"] = forecast_pipeline(
        df,
        pipeline=pipeline,
        hyperparameter_grid=hyperparameter_grid,
        n_jobs=-1,
        forecast_horizon=20,
        coverage=None,
        score_func=FRACTION_OUTSIDE_TOLERANCE,
        score_func_greater_is_better=False,
        cv_report_metrics=CV_REPORT_METRICS_ALL,
        null_model_params=None,
        relative_error_tolerance=0.02)

    # Tests callable `score_func`, greater_is_better=True, no `cv_report_metrics`
    fs1 = pd.DataFrame({
        "name": ["tow", "conti_year"],
        "period": [7.0, 1.0],
        "order": [3, 3],
        "seas_names": ["weekly", None]
    })
    fs2 = pd.DataFrame({
        "name": ["tow"],
        "period": [7.0],
        "order": [3],
        "seas_names": ["weekly"]
    })
    hyperparameter_grid = {
        "estimator__origin_for_time_vars": [2018],
        "estimator__extra_pred_cols": [["ct1"], ["ct2"]],
        "estimator__fit_algorithm_dict": [{
            "fit_algorithm": "linear"
        }],
        "estimator__fs_components_df": [fs1, fs2],
    }
    cv_max_splits = 2
    pipeline_results["3"] = forecast_pipeline(
        df,
        estimator=SilverkiteEstimator(),
        hyperparameter_grid=hyperparameter_grid,
        hyperparameter_budget=4,
        n_jobs=1,
        forecast_horizon=3 * 7,
        test_horizon=2 * 7,
        score_func=mean_absolute_error,  # callable score_func
        score_func_greater_is_better=
        True,  # Not really True, only for the sake of testing
        null_model_params=None,
        cv_horizon=1 * 7,
        cv_expanding_window=True,
        cv_min_train_periods=7 * 7,
        cv_periods_between_splits=7,
        cv_periods_between_train_test=3 * 7,
        cv_max_splits=cv_max_splits)
    return pipeline_results
def test_get_forecast_time_properties():
    """Tests get_forecast_time_properties"""
    num_training_points = 365  # one year of daily data
    data = generate_df_for_tests(freq="D", periods=num_training_points)
    df = data["df"]
    result = get_forecast_time_properties(df,
                                          time_col=TIME_COL,
                                          value_col=VALUE_COL,
                                          freq="D",
                                          forecast_horizon=0)
    default_origin = get_default_origin_for_time_vars(df, TIME_COL)
    assert result == {
        "period": TimeEnum.ONE_DAY_IN_SECONDS.value,
        "simple_freq": SimpleTimeFrequencyEnum.DAY,
        "num_training_points": num_training_points,
        "num_training_days": num_training_points,
        "days_per_observation": 1,
        "forecast_horizon": 0,
        "forecast_horizon_in_timedelta": timedelta(days=0),
        "forecast_horizon_in_days": 0,
        "start_year": 2018,
        "end_year": 2019,
        "origin_for_time_vars": default_origin
    }

    # longer forecast_horizon
    result = get_forecast_time_properties(df,
                                          time_col=TIME_COL,
                                          value_col=VALUE_COL,
                                          freq="D",
                                          forecast_horizon=365)
    default_origin = get_default_origin_for_time_vars(df, TIME_COL)
    assert result == {
        "period": TimeEnum.ONE_DAY_IN_SECONDS.value,
        "simple_freq": SimpleTimeFrequencyEnum.DAY,
        "num_training_points": num_training_points,
        "num_training_days": num_training_points,
        "days_per_observation": 1,
        "forecast_horizon": 365,
        "forecast_horizon_in_timedelta": timedelta(days=365),
        "forecast_horizon_in_days": 365,
        "start_year": 2018,
        "end_year": 2020,
        "origin_for_time_vars": default_origin
    }

    # two years of hourly data
    num_training_points = 2 * 365 * 24
    data = generate_df_for_tests(freq="H", periods=num_training_points)
    df = data["df"]
    result = get_forecast_time_properties(df,
                                          time_col=TIME_COL,
                                          value_col=VALUE_COL,
                                          freq="H",
                                          forecast_horizon=0)
    default_origin = get_default_origin_for_time_vars(df, TIME_COL)
    assert result == {
        "period": TimeEnum.ONE_HOUR_IN_SECONDS.value,
        "simple_freq": SimpleTimeFrequencyEnum.HOUR,
        "num_training_points": num_training_points,
        "num_training_days": num_training_points / 24,
        "days_per_observation": 1 / 24,
        "forecast_horizon": 0,
        "forecast_horizon_in_timedelta": timedelta(days=0),
        "forecast_horizon_in_days": 0,
        "start_year": 2018,
        "end_year": 2020,
        "origin_for_time_vars": default_origin
    }

    # longer forecast_horizon
    result = get_forecast_time_properties(df,
                                          time_col=TIME_COL,
                                          value_col=VALUE_COL,
                                          freq="H",
                                          forecast_horizon=365 * 24)
    default_origin = get_default_origin_for_time_vars(df, TIME_COL)
    assert result == {
        "period": TimeEnum.ONE_HOUR_IN_SECONDS.value,
        "simple_freq": SimpleTimeFrequencyEnum.HOUR,
        "num_training_points": num_training_points,
        "num_training_days": num_training_points / 24,
        "days_per_observation": 1 / 24,
        "forecast_horizon": 365 * 24,
        "forecast_horizon_in_timedelta": timedelta(days=365),
        "forecast_horizon_in_days": 365,
        "start_year": 2018,
        "end_year": 2021,
        "origin_for_time_vars": default_origin
    }

    # ``forecast_horizon=None``
    result = get_forecast_time_properties(df,
                                          time_col=TIME_COL,
                                          value_col=VALUE_COL,
                                          freq="H",
                                          forecast_horizon=None)
    default_origin = get_default_origin_for_time_vars(df, TIME_COL)
    assert result == {
        "period": TimeEnum.ONE_HOUR_IN_SECONDS.value,
        "simple_freq": SimpleTimeFrequencyEnum.HOUR,
        "num_training_points": num_training_points,
        "num_training_days": num_training_points / 24,
        "days_per_observation": 1 / 24,
        "forecast_horizon": 24,
        "forecast_horizon_in_timedelta": timedelta(days=1),
        "forecast_horizon_in_days": 1,
        "start_year": 2018,
        "end_year": 2020,
        "origin_for_time_vars": default_origin
    }

    # weekly df with regressors
    num_training_points = 50
    data = generate_df_with_reg_for_tests(freq="W-SUN",
                                          periods=num_training_points,
                                          train_start_date=datetime.datetime(
                                              2018, 11, 30),
                                          remove_extra_cols=True,
                                          mask_test_actuals=True)
    df = data["df"]
    train_df = data["train_df"]
    forecast_horizon = data["fut_time_num"]
    regressor_cols = [
        col for col in df.columns if col not in [TIME_COL, VALUE_COL]
    ]
    result = get_forecast_time_properties(df,
                                          time_col=TIME_COL,
                                          value_col=VALUE_COL,
                                          freq="W-SUN",
                                          regressor_cols=regressor_cols,
                                          forecast_horizon=forecast_horizon)
    default_origin = get_default_origin_for_time_vars(df, TIME_COL)
    assert result == {
        "period": TimeEnum.ONE_WEEK_IN_SECONDS.value,
        "simple_freq": SimpleTimeFrequencyEnum.WEEK,
        "num_training_points": train_df.shape[0],  # size of training set
        "num_training_days": train_df.shape[0] * 7,
        "days_per_observation": 7,
        "forecast_horizon": 9,
        "forecast_horizon_in_timedelta": timedelta(days=63),
        "forecast_horizon_in_days": 63.0,
        "start_year": 2018,
        "end_year": 2019,
        "origin_for_time_vars": default_origin
    }

    # checks `num_training_days` with `train_end_date`
    data = generate_df_with_reg_for_tests(freq="H",
                                          periods=300 * 24,
                                          train_start_date=datetime.datetime(
                                              2018, 7, 1),
                                          remove_extra_cols=True,
                                          mask_test_actuals=True)
    df = data["df"]
    train_end_date = datetime.datetime(2019, 2, 1)
    result = get_forecast_time_properties(
        df=df,
        time_col=TIME_COL,
        value_col=VALUE_COL,
        freq="H",
        regressor_cols=data["regressor_cols"],
        train_end_date=train_end_date,
        forecast_horizon=forecast_horizon)
    period = 3600  # seconds between observations
    time_delta = (train_end_date - df[TIME_COL].min()
                  )  # train end - train start
    num_training_days = (
        time_delta.days +
        (time_delta.seconds + period) / TimeEnum.ONE_DAY_IN_SECONDS.value)
    assert result["num_training_days"] == num_training_days

    # checks `num_training_days` without `train_end_date`
    result = get_forecast_time_properties(
        df=df,
        time_col=TIME_COL,
        value_col=VALUE_COL,
        freq="H",
        regressor_cols=data["regressor_cols"],
        train_end_date=None,
        forecast_horizon=forecast_horizon)
    time_delta = (
        datetime.datetime(2019, 2, 26) - df[TIME_COL].min()
    )  # by default, train end is the last date with nonnull value_col
    num_training_days = (
        time_delta.days +
        (time_delta.seconds + period) / TimeEnum.ONE_DAY_IN_SECONDS.value)
    assert result["num_training_days"] == num_training_days
Пример #22
0
def hourly_data():
    """Generate 500 days of hourly data for tests"""
    return generate_df_for_tests(freq="H", periods=24 * 500)
Пример #23
0
def daily_data():
    return generate_df_for_tests(freq="D",
                                 periods=730,
                                 train_start_date=datetime.datetime(
                                     2018, 1, 1),
                                 conti_year_origin=2018)