def test_drift_abnormal_input(data, exception): ''' test drift raises correct exceptions on abnormal input ''' model = b.Drift() with pytest.raises(exception): model.fit(data)
def test_drift_fit_predict(data, horizon, expected): ''' test the correct number of error metric functions are returned. ''' model = b.Drift() # fit_predict for point forecasts only preds = model.fit_predict(pd.Series(data), horizon) assert len(preds) == expected
def test_drift_call_predict_before_fit(): ''' test Drift raises correct exceptions when predict is called before fit ''' model = b.Drift() with pytest.raises(UnboundLocalError): model.predict(10)
def test_drift_forecast_horizon(data, horizon, expected): ''' test the correct number of error metric functions are returned. ''' model = b.Drift() model.fit(np.array(data)) # point forecasts only preds = model.predict(horizon) assert len(preds) == expected
def test_drift_forecast_output_longer_horizon(data, period, expected): ''' test drift forecast predictions ''' model = b.Drift() model.fit(data) # point forecasts only preds = model.predict(period) assert np.array_equal(preds, expected)
def test_drift_pi_horizon(data, horizon, alpha, expected): ''' test the correct forecast horizon is returned for prediction interval for Drift ''' model = b.Drift() model.fit(pd.Series(data)) # point forecasts only _, intervals = model.predict(horizon, return_predict_int=True, alpha=alpha) assert len(intervals[0]) == expected
def test_drift_fitted_values_length(training_length): ''' test drift .fittedvalues ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=training_length) model = b.Drift() model.fit(train) expected = training_length assert len(model.fittedvalues) == expected
def test_drift_fitted_values_nan_length(): ''' test Drift .fittedvalues has the correct number of NaNs i.e. = 1 ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=200) model = b.Drift() model.fit(train) expected = 1 n_nan = np.isnan(model.fittedvalues).sum() assert n_nan == expected
def test_drift_prediction_interval_95_high(): ''' test drift 95% lower prediction interval intervals are matched from R forecast package ''' np.random.seed(1066) train = np.random.poisson(lam=50, size=100) # low = [22.2100359, 13.2828923, 6.2277574, 0.1124247, -5.4196405] high = [63.70916, 72.55549, 79.52982, 85.56434, 91.01560] # quarterly data model = b.Drift() model.fit(train) _, intervals = model.predict(5, return_predict_int=True, alpha=[0.05]) print(intervals[0].T[1]) # not ideal due to not adjusting for drift i think, assert pytest.approx(intervals[0].T[1], rel=1e-6, abs=1.2) == high
''' model = b.Average() model.fit(pd.Series(data)) # point forecasts only _, intervals = model.predict(horizon, return_predict_int=True, alpha=alpha) assert len(intervals[0]) == expected @pytest.mark.parametrize( "model, data, horizon, alpha, expected", [(b.Naive1(), [1, 2, 3, 4, 5], 12, [0.2, 0.05], 2), (b.Naive1(), [1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 3), (b.SNaive(1), [1, 2, 3], 8, [0.8], 1), (b.SNaive(1), [1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 3), (b.Naive1(), [1, 2, 3], 8, None, 2), (b.SNaive(1), [1, 2, 3], 8, None, 2), (b.Average(), [1, 2, 3], 8, None, 2), (b.Drift(), [1, 2, 3], 8, None, 2), (b.Drift(), [1, 2, 3], 8, [0.8], 1), (b.Drift(), [1, 2, 3], 8, None, 2), (b.Average(), [1, 2, 3, 4, 5], 24, [0.2, 0.10, 0.05], 3)]) def test_naive_pi_set_number(model, data, horizon, alpha, expected): ''' test the correct number of Prediction intervals are returned for prediction interval for all Naive forecasting classes ''' model.fit(pd.Series(data)) # point forecasts only _, intervals = model.predict(horizon, return_predict_int=True, alpha=alpha) assert len(intervals) == expected @pytest.mark.parametrize("data, period, expected", [(np.arange(1, 7), 6, np.arange(7, 13)),