def test_disjoint_inits(small_initialized_da, small_verif_da, alignment):
    """Tests that alignment works with disjoint inits in the verification
    data, i.e., non-continuous initializing to verify with."""
    hind = small_initialized_da.drop_sel(init=1991)
    verif = small_verif_da
    actual = compute_hindcast(hind, verif, alignment=alignment, metric="mse")
    assert actual.notnull().all()
    # hindcast inits: [1990, 1992, 1993]
    # verif times: [1990, 1991, 1992, 1993, 1994]
    a = hind.rename({"init": "time"})
    b = verif.sel(time=[1991, 1993, 1994])
    a["time"] = b["time"]
    expected = xs.mse(a, b, "time")
    assert actual == expected
Exemple #2
0
def test_disjoint_verif_time(small_initialized_da, small_verif_da, alignment):
    """Tests that alignment works with disjoint time in the verification
    data, i.e., non-continuous time sampling to verify against."""
    hind = small_initialized_da
    verif = small_verif_da.drop_sel(time=1992)
    actual = (HindcastEnsemble(hind).add_observations(verif).verify(
        comparison="e2o", dim="init", alignment=alignment, metric="mse"))
    assert actual.notnull().all()
    # hindcast inits: [1990, 1991, 1992, 1993]
    # verif times: [1990, 1991, 1993, 1994]
    a = hind.sel(init=[1990, 1992, 1993]).rename({"init": "time"})
    b = verif.sel(time=[1991, 1993, 1994])
    a["time"] = b["time"]
    expected = xs.mse(a, b, "time")
    assert actual == expected
Exemple #3
0
def _mse(forecast, reference, dim='svd', comparison=None):
    """
    Calculate the Mean Sqaure Error (MSE).

    .. math::
        MSE = \\overline{(f - o)^{2}}

    Range:
        * perfect: 0
        * min: 0
        * max: ∞

    See also:
        * xskillscore.mse
    """
    return mse(forecast, reference, dim=dim)
Exemple #4
0
def mse(x, y, dim):
    """
    Compute Mean Squared Error.

    Parameters
    ----------
    x : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
        Mix of labeled and/or unlabeled arrays to which to apply the function.
    y : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
        Mix of labeled and/or unlabeled arrays to which to apply the function.
    dim : str
        The dimension to apply the correlation along.

    Returns
    -------
    Mean Squared Error
        Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
        numpy.ndarray, the first type on that list to appear on an input.

    """

    return xs.mse(x, y, dim)
Exemple #5
0
r = xs.pearson_r(obs, fct, "nlocs")

# 2-tailed p-value of Pearson's correlation coefficient
#jkim r_p_value = xs.pearson_r_p_value(obs, fct, "nlocs")

# Spearman's correlation coefficient
rs = xs.spearman_r(obs, fct, "nlocs")

# 2-tailed p-value associated with Spearman's correlation coefficient
#jkim rs_p_value = xs.spearman_r_p_value(obs, fct, "nlocs")

# Root Mean Squared Error
rmse = xs.rmse(obs, fct, "nlocs")

# Mean Squared Error
mse = xs.mse(obs, fct, "nlocs")

# Mean Absolute Error
mae = xs.mae(obs, fct, "nlocs")

# Median Absolute Error
median_absolute_error = xs.median_absolute_error(obs, fct, "nlocs")

# Mean Absolute Percentage Error
mape = xs.mape(obs, fct, "nlocs")

# Symmetric Mean Absolute Percentage Error
#jkim smape = xs.smape(obs, fct, "nlocs")

# You can also specify multiple axes for deterministic metrics:
# Apply Pearson's correlation coefficient