Beispiel #1
0
def test_pred_errors_against_y_test(fh):
    """Check prediction performance on airline dataset.

    Y_test must lie in the prediction interval with coverage=0.1.

    Arguments
    ---------
    fh: ForecastingHorizon, fh at which to test prediction

    Raises
    ------
    AssertionError - if point forecasts do not lie withing the prediction intervals
    """
    y = load_airline()
    y_train, y_test = temporal_train_test_split(y)

    f = ThetaForecaster()
    f.fit(y_train, fh=fh)

    intervals = f.predict_interval(fh=fh, coverage=[0.1])

    y_test = y_test.iloc[check_fh(fh) - 1]

    # Performance should be good enough that all point forecasts lie within the
    # prediction intervals.
    for ints in intervals:
        if ints[1] < 0.5:
            assert np.all(y_test > intervals[ints].values)
        else:
            assert np.all(y_test <= intervals[ints].values)
from sktime.datasets import load_airline
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.forecasting.theta import ThetaForecaster
from sktime.performance_metrics.forecasting.probabilistic import PinballLoss

list_of_metrics = [PinballLoss]

# test data
y = np.log1p(load_airline())
y_train, y_test = temporal_train_test_split(y)
fh = np.arange(len(y_test)) + 1
f = ThetaForecaster(sp=12)
f.fit(y_train)

QUANTILE_PRED = f.predict_quantiles(fh=fh, alpha=[0.5])
INTERVAL_PRED = f.predict_interval(fh=fh, coverage=0.9)


@pytest.mark.parametrize("score_average", [True, False])
@pytest.mark.parametrize("Metric", list_of_metrics)
def test_output(Metric, score_average):
    """Test output is correct class."""
    y_true = y_test
    loss = Metric.create_test_instance()
    loss.set_params(score_average=score_average)
    eval_loss = loss.evaluate(y_true, y_pred=QUANTILE_PRED)
    index_loss = loss.evaluate_by_index(y_true, y_pred=QUANTILE_PRED)

    if score_average:
        assert isinstance(eval_loss, float)
        assert isinstance(index_loss, pd.Series)