예제 #1
0
def check_scoring(scoring):
    """
    Validates the performace scoring

    Parameters
    ----------
    scoring : object of class _MetricFunctionWrapper from sktime.performance_metrics.

    Returns
    ----------
    scoring :
        MeanAbsolutePercentageError if the object is None.

    Raises
    ----------
    TypeError
        if object is not callable from current scope.
    """
    # Note symmetric=True is default arg for MeanAbsolutePercentageError
    from sktime.performance_metrics.forecasting import MeanAbsolutePercentageError

    if scoring is None:
        return MeanAbsolutePercentageError()

    if not callable(scoring):
        raise TypeError("`scoring` must be a callable object")

    return scoring
예제 #2
0
def test_evaluate_no_exog_against_with_exog():
    """Check that adding exogenous data produces different results."""
    y, X = load_longley()
    forecaster = ARIMA(suppress_warnings=True)
    cv = SlidingWindowSplitter()
    scoring = MeanAbsolutePercentageError(symmetric=True)

    out_exog = evaluate(forecaster, cv, y, X=X, scoring=scoring)
    out_no_exog = evaluate(forecaster, cv, y, X=None, scoring=scoring)

    scoring_name = f"test_{scoring.name}"
    assert np.all(out_exog[scoring_name] != out_no_exog[scoring_name])
예제 #3
0
    def get_test_params(cls):
        """Return testing parameter settings for the estimator.

        Returns
        -------
        params : dict or list of dict
        """
        from sktime.forecasting.model_selection._split import SingleWindowSplitter
        from sktime.forecasting.naive import NaiveForecaster
        from sktime.performance_metrics.forecasting import MeanAbsolutePercentageError

        params = {
            "forecaster": NaiveForecaster(strategy="mean"),
            "cv": SingleWindowSplitter(fh=1),
            "param_distributions": {
                "window_length": [2, 5]
            },
            "scoring": MeanAbsolutePercentageError(symmetric=True),
        }
        return params
예제 #4
0
def check_scoring(scoring, allow_y_pred_benchmark=False):
    """
    Validate the performace scoring.

    Parameters
    ----------
    scoring : object that inherits from BaseMetric from sktime.performance_metrics.

    Returns
    -------
    scoring :
        MeanAbsolutePercentageError if the object is None.

    Raises
    ------
    TypeError
        if object is not callable from current scope.
    NotImplementedError
        if metric requires y_pred_benchmark to be passed
    """
    # Note symmetric=True is default arg for MeanAbsolutePercentageError
    from sktime.performance_metrics.forecasting import MeanAbsolutePercentageError

    if scoring is None:
        return MeanAbsolutePercentageError()

    scoring_req_bench = scoring.get_class_tag("requires-y-pred-benchmark",
                                              False)

    if scoring_req_bench and not allow_y_pred_benchmark:
        msg = """Scoring requiring benchmark forecasts (y_pred_benchmark) are not
                 fully supported yet. Please use a performance metric that does not
                 require y_pred_benchmark as a keyword argument in its call signature.
              """
        raise NotImplementedError(msg)

    if not callable(scoring):
        raise TypeError("`scoring` must be a callable object")

    return scoring
예제 #5
0
def test_evaluate_initial_window():
    initial_window = 20
    y = make_forecasting_problem(n_timepoints=30, index_type="int")
    forecaster = NaiveForecaster()
    fh = 1
    cv = SlidingWindowSplitter(fh=fh, initial_window=initial_window)
    scoring = MeanAbsolutePercentageError(symmetric=True)
    out = evaluate(forecaster=forecaster,
                   y=y,
                   cv=cv,
                   strategy="update",
                   scoring=scoring)
    _check_evaluate_output(out, cv, y, scoring)
    assert out.loc[0, "len_train_window"] == initial_window

    # check scoring
    actual = out.loc[0, f"test_{scoring.name}"]
    train, test = next(cv.split(y))
    f = clone(forecaster)
    f.fit(y.iloc[train], fh=fh)
    expected = scoring(y.iloc[test], f.predict())
    np.testing.assert_equal(actual, expected)
예제 #6
0
파일: _tune.py 프로젝트: juanitorduz/sktime
    def get_test_params(cls, parameter_set="default"):
        """Return testing parameter settings for the estimator.

        Parameters
        ----------
        parameter_set : str, default="default"
            Name of the set of test parameters to return, for use in tests. If no
            special parameters are defined for a value, will return `"default"` set.

        Returns
        -------
        params : dict or list of dict
        """
        from sktime.forecasting.model_selection._split import SingleWindowSplitter
        from sktime.forecasting.naive import NaiveForecaster
        from sktime.performance_metrics.forecasting import MeanAbsolutePercentageError

        params = {
            "forecaster": NaiveForecaster(strategy="mean"),
            "cv": SingleWindowSplitter(fh=1),
            "param_distributions": {"window_length": [2, 5]},
            "scoring": MeanAbsolutePercentageError(symmetric=True),
        }
        return params
예제 #7
0
from sktime.forecasting.model_selection import SingleWindowSplitter
from sktime.forecasting.model_selection import SlidingWindowSplitter
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.tests._config import TEST_N_ITERS
from sktime.forecasting.tests._config import TEST_OOS_FHS
from sktime.forecasting.tests._config import TEST_RANDOM_SEEDS
from sktime.forecasting.tests._config import TEST_WINDOW_LENGTHS
from sktime.forecasting.trend import PolynomialTrendForecaster
from sktime.performance_metrics.forecasting import (
    MeanAbsolutePercentageError,
    MeanSquaredError,
)
from sktime.transformations.series.detrend import Detrender

TEST_METRICS = [
    MeanAbsolutePercentageError(symmetric=True),
    MeanSquaredError()
]


def _get_expected_scores(forecaster, cv, param_grid, y, X, scoring):
    scores = np.zeros(len(param_grid))
    for i, params in enumerate(param_grid):
        f = clone(forecaster)
        f.set_params(**params)
        out = evaluate(f, cv, y, X=X, scoring=scoring)
        scores[i] = out.loc[:, f"test_{scoring.name}"].mean()
    return scores


def _check_cv(forecaster, gscv, cv, param_grid, y, X, scoring):
예제 #8
0
 StackingForecaster: {
     "forecasters": FORECASTERS
 },
 AutoEnsembleForecaster: {
     "forecasters": FORECASTERS
 },
 Detrender: {
     "forecaster": ExponentialSmoothing()
 },
 ForecastingGridSearchCV: {
     "forecaster": NaiveForecaster(strategy="mean"),
     "cv": SingleWindowSplitter(fh=1),
     "param_grid": {
         "window_length": [2, 5]
     },
     "scoring": MeanAbsolutePercentageError(symmetric=True),
 },
 ForecastingRandomizedSearchCV: {
     "forecaster": NaiveForecaster(strategy="mean"),
     "cv": SingleWindowSplitter(fh=1),
     "param_distributions": {
         "window_length": [2, 5]
     },
     "scoring": MeanAbsolutePercentageError(symmetric=True),
 },
 TabularToSeriesAdaptor: {
     "transformer": StandardScaler()
 },
 ColumnEnsembleClassifier: {
     "estimators":
     [(name, estimator, 0) for (name, estimator) in TIME_SERIES_CLASSIFIERS]
예제 #9
0
)
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.tests._config import (
    TEST_N_ITERS,
    TEST_OOS_FHS,
    TEST_RANDOM_SEEDS,
    TEST_WINDOW_LENGTHS_INT,
)
from sktime.forecasting.trend import PolynomialTrendForecaster
from sktime.performance_metrics.forecasting import (
    MeanAbsolutePercentageError,
    MeanSquaredError,
)
from sktime.transformations.series.detrend import Detrender

TEST_METRICS = [MeanAbsolutePercentageError(symmetric=True), MeanSquaredError()]


def _get_expected_scores(forecaster, cv, param_grid, y, X, scoring):
    scores = np.zeros(len(param_grid))
    for i, params in enumerate(param_grid):
        f = clone(forecaster)
        f.set_params(**params)
        out = evaluate(f, cv, y, X=X, scoring=scoring)
        scores[i] = out.loc[:, f"test_{scoring.name}"].mean()
    return scores


def _check_cv(forecaster, gscv, cv, param_grid, y, X, scoring):
    actual = gscv.cv_results_[f"mean_test_{scoring.name}"]
     "func": median_squared_error,
     "class": MedianSquaredError(),
 },
 "root_median_squared_error": {
     "test_case_1": 0.299216432,
     "test_case_2": 0.244094445,
     "test_case_3": 1.0,
     "func": median_squared_error,
     "class": MedianSquaredError(square_root=True),
 },
 "symmetric_mean_absolute_percentage_error": {
     "test_case_1": 0.16206745335345693,
     "test_case_2": 0.17096048184064724,
     "test_case_3": 1.0833333333333333,
     "func": mean_absolute_percentage_error,
     "class": MeanAbsolutePercentageError(symmetric=True),
 },
 "symmetric_median_absolute_percentage_error": {
     "test_case_1": 0.17291559217102262,
     "test_case_2": 0.15323286657516913,
     "test_case_3": 1.5,
     "func": median_absolute_percentage_error,
     "class": MedianAbsolutePercentageError(symmetric=True),
 },
 "mean_absolute_percentage_error": {
     "test_case_1": 0.16426360194846226,
     "test_case_2": 0.16956968442429066,
     "test_case_3": 1125899906842624.2,
     "func": mean_absolute_percentage_error,
     "class": MeanAbsolutePercentageError(symmetric=False),
 },