def test_naive1_abnormal_input(data, exception): ''' test naive1 raises correct exceptions on abnormal input ''' model = b.Naive1() with pytest.raises(exception): model.fit(data)
def test_cross_validation_folds_return_length(train_size, min_train_size, horizon, step, expected): y_train = np.arange(train_size) cv = ms.rolling_forecast_origin(y_train, min_train_size, horizon, step) result = ms.cross_validation_folds(b.Naive1(), cv) assert len(result) == expected
def test_cross_validation_score_return_length(train_size, min_train_size, horizon, step, expected): y_train = np.arange(train_size) cv = ms.rolling_forecast_origin(y_train, min_train_size, horizon, step) metric = metrics.mean_error result = ms.cross_validation_score(b.Naive1(), cv, metric=metric, n_jobs=1) assert len(result) == expected
def test_naive1_fit_predict(data, horizon, expected): ''' test the correct number of error metric functions are returned. ''' model = b.Naive1() # fit_predict for point forecasts only preds = model.fit_predict(pd.Series(data), horizon) assert len(preds) == expected
def test_naive1_forecast_output(data, expected): ''' test naive1 carries forward the last value in the series ''' model = b.Naive1() model.fit(pd.DataFrame(data)) # point forecasts only preds = model.predict(1) assert preds[0] == expected
def test_naive1_forecast_input_numpy(data, horizon, expected): ''' test the correct number of error metric functions are returned. ''' model = b.Naive1() model.fit(np.array(data)) # point forecasts only preds = model.predict(horizon) assert len(preds) == expected
def test_naive1_pi_horizon(data, horizon, alpha, expected): ''' test the correct forecast horizon is returned for prediction interval for Naive1 ''' model = b.Naive1() model.fit(pd.Series(data)) # point forecasts only _, intervals = model.predict(horizon, return_predict_int=True, alpha=alpha) assert len(intervals[0]) == expected
def test_naive1_fitted_values_length(training_length): ''' test naive1 .fittedvalues length is as expected ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=training_length) model = b.Naive1() model.fit(train) expected = training_length assert len(model.fittedvalues) == expected
def test_naive1_fitted_values_nan_length(): ''' test Naive1 .fittedvalues has the correct number of NaNs i.e. = 1 ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=200) model = b.Naive1() model.fit(train) expected = 1 n_nan = np.isnan(model.fittedvalues).sum() assert n_nan == expected
def test_forecast_accuracy_length(n_horizons, expected): model = b.Naive1() train = np.arange(10000) metric = metrics.mean_absolute_error horizons = np.arange(1, n_horizons + 1).tolist() cv = ms.rolling_forecast_origin(train, min_train_size=100, horizon=100) train_cv, test_cv = next(cv) result_h = ms.forecast_accuracy(model, train_cv, test_cv, horizons=horizons, metric=metric) assert len(result_h) == expected
def test_naive1_se(): ''' standard error of naive1 is root mean squared. ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=100) # low = [29.56885, 24.005, 19.73657, 16.13770, 12.96704] # high = [56.43115, 61.99451, 66.26343, 69.86230, 73.03296] model = b.Naive1() model.fit(train) expected = 10.48038 assert pytest.approx(model._resid_std) == expected
def test_naive1_prediction_interval_low(): ''' test naive 80% lower prediction interval ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=100) low = [29.56885, 24.005, 19.73657, 16.13770, 12.96704] # high = [56.43115, 61.99451, 66.26343, 69.86230, 73.03296] model = b.Naive1() model.fit(train) _, intervals = model.predict(5, return_predict_int=True, alpha=[0.2]) print(intervals[0].T[0]) assert pytest.approx(intervals[0].T[0], rel=1e-6, abs=0.1) == low
def test_naive1_prediction_interval_95_low(): ''' test naive1 95% lower prediction interval ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=100) low = [22.458831, 13.950400, 7.421651, 1.917662, -2.931450] # high = [63.54117, 72.04960, 78.57835, 84.08234, 88.93145] model = b.Naive1() model.fit(train) _, intervals = model.predict(5, return_predict_int=True, alpha=[0.05]) print(intervals[0].T[0]) assert pytest.approx(intervals[0].T[0], rel=1e-6, abs=0.1) == low
def test_mase_cv_number_of_folds(train_size, min_train_size, horizon, step, expected): ''' check that the number of folds returned from rolling origin is as expected when data source is a pandas.DataFrame ''' train = np.arange(train_size) cv = ms.rolling_forecast_origin(train, min_train_size=min_train_size, horizon=horizon, step=step) scores = ms.scaled_cross_validation_score(b.Naive1(), cv) print(expected, len(scores)) print(scores) assert expected == len(scores)
def test_bootstrap_prediction_interval_length(): np.random.seed(1066) train = np.random.poisson(lam=50, size=100) # low = [22.2100359, 13.2828923, 6.2277574, 0.1124247, -5.4196405] # high = [63.70916, 72.55549, 79.52982, 85.56434, 91.01560] # quarterly data model = b.Naive1() model.fit(train) preds = model.predict(horizon=5) expected = 5 y_intervals = b.boot_prediction_intervals(preds, model.resid, horizon=expected, levels=[0.8], boots=10) assert expected == len(y_intervals[0])
def test_bootstrap_prediction_interval_sets_returned(intervals, expected): ''' Test the number of bootstrap prediction intervals returned ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=100) # low = [22.2100359, 13.2828923, 6.2277574, 0.1124247, -5.4196405] # high = [63.70916, 72.55549, 79.52982, 85.56434, 91.01560] # quarterly data model = b.Naive1() model.fit(train) preds = model.predict(horizon=5) horizon = 5 y_intervals = b.boot_prediction_intervals(preds, model.resid, horizon=horizon, levels=intervals, boots=10) assert expected == len(y_intervals)
([1, 2, 3], 8, [0.8], 8)]) def test_average_pi_horizon(data, horizon, alpha, expected): ''' test the correct forecast horizon is returned for prediction interval for Average ''' model = b.Average() model.fit(pd.Series(data)) # point forecasts only _, intervals = model.predict(horizon, return_predict_int=True, alpha=alpha) assert len(intervals[0]) == expected @pytest.mark.parametrize( "model, data, horizon, alpha, expected", [(b.Naive1(), [1, 2, 3, 4, 5], 12, [0.2, 0.05], 2), (b.Naive1(), [1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 3), (b.SNaive(1), [1, 2, 3], 8, [0.8], 1), (b.SNaive(1), [1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 3), (b.Naive1(), [1, 2, 3], 8, None, 2), (b.SNaive(1), [1, 2, 3], 8, None, 2), (b.Average(), [1, 2, 3], 8, None, 2), (b.Drift(), [1, 2, 3], 8, None, 2), (b.Drift(), [1, 2, 3], 8, [0.8], 1), (b.Drift(), [1, 2, 3], 8, None, 2), (b.Average(), [1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 3)]) def test_naive_pi_set_number(model, data, horizon, alpha, expected): ''' test the correct number of Prediction intervals are returned for prediction interval for all Naive forecasting classes ''' model.fit(pd.Series(data)) # point forecasts only _, intervals = model.predict(horizon, return_predict_int=True, alpha=alpha)