def test_PerfectModelEnsemble_constant_forecasts(
        perfectModelEnsemble_initialized_control, metric, comparison, how):
    """Test that PerfectModelEnsemble.verify() returns a perfect score for a perfectly
    identical forecasts."""
    pe = perfectModelEnsemble_initialized_control.isel(lead=[0, 1, 2])
    if how == "constant":  # replaces the variable with all 1's
        pe = pe.apply(xr.ones_like)
    elif (
            how == "increasing_by_lead"
    ):  # sets variable values to cftime index in days to have increase over time.
        pe = pe.apply(xr.zeros_like)
        pe._datasets["initialized"] = (pe._datasets["initialized"] +
                                       pe._datasets["initialized"].lead)
    # get metric and comparison strings incorporating alias
    metric = METRIC_ALIASES.get(metric, metric)
    Metric = get_metric_class(metric, PM_METRICS)
    category_edges = np.array([0, 0.5, 1])
    if metric in probabilistic_metrics_requiring_logical:

        def f(x):
            return x > 0.5

        metric_kwargs = {"logical": f}
    elif metric == "threshold_brier_score":
        metric_kwargs = {"threshold": 0.5}
    elif metric == "contingency":
        metric_kwargs = {
            "forecast_category_edges": category_edges,
            "observation_category_edges": category_edges,
            "score": "accuracy",
        }
    elif metric == "rps":
        metric_kwargs = {"category_edges": category_edges}
    else:
        metric_kwargs = {}
    if Metric.probabilistic:
        dim = (["member", "init"] if metric
               in probabilistic_metrics_requiring_more_than_member_dim else
               "member")
        comparison = "m2c"
        skill = pe.verify(metric=metric,
                          comparison=comparison,
                          dim=dim,
                          **metric_kwargs)
    else:
        dim = "init" if comparison == "e2c" else ["init", "member"]
        skill = pe.verify(metric=metric,
                          comparison=comparison,
                          dim=dim,
                          **metric_kwargs)
    # # TODO: test assert skill.variable == perfect).all()
    if metric == "contingency":
        assert (skill == 1).all()  # checks Contingency.accuracy
    else:
        assert skill == Metric.perfect
Esempio n. 2
0
def test_compute_hindcast_probabilistic_metric_e2o_fails(
        hind_da_initialized_1d, observations_da_1d, metric):
    metric = METRIC_ALIASES.get(metric, metric)
    with pytest.raises(ValueError) as excinfo:
        compute_hindcast(
            hind_da_initialized_1d,
            observations_da_1d,
            comparison="e2o",
            metric=metric,
            dim="member",
        )
    assert f"Probabilistic metric `{metric}` requires" in str(excinfo.value)
Esempio n. 3
0
def test_compute_hindcast_probabilistic_metric_not_dim_member_warn(
    hind_da_initialized_1d, observations_da_1d, metric, dim
):
    metric = METRIC_ALIASES.get(metric, metric)
    with pytest.warns(UserWarning) as record:
        compute_hindcast(
            hind_da_initialized_1d,
            observations_da_1d,
            comparison='m2o',
            metric=metric,
            dim=dim,
        )
    expected = (
        f'Probabilistic metric {metric} requires to be '
        f'computed over dimension `dim="member"`. '
        f'Set automatically.'
    )
    # Set this to the third message since the first two are about converting the integer
    # time to annual `cftime`.
    assert record[0].message.args[0] == expected
Esempio n. 4
0
def test_HindcastEnsemble_perfect_forecasts(hindcast_hist_obs_1d, metric,
                                            comparison, how, alignment):
    """Test that HindcastEnsemble.verify() returns a perfect score for a perfectly
    identical forecasts."""
    he = hindcast_hist_obs_1d.isel(lead=[0, 1], init=range(10))
    if how == "constant":  # replaces the variable with all 1's
        he = he.map(xr.ones_like)
    elif (
            how == "increasing_by_lead"
    ):  # sets variable values to cftime index in days to have increase over time.
        he = he.map(xr.ones_like)
        # set initialized values to init in cftime days
        units = "days since 1900-01-01"
        he._datasets[
            "initialized"] = he._datasets["initialized"] * xr.DataArray(
                cftime.date2num(he._datasets["initialized"].init, units),
                dims=["init"])
        # add initialized leads
        he._datasets["initialized"] = (he._datasets["initialized"] +
                                       he._datasets["initialized"].lead)
        # set uninitialized values to init in cftime days
        he._datasets[
            "uninitialized"] = he._datasets["uninitialized"] * xr.DataArray(
                cftime.date2num(he._datasets["uninitialized"].time, units),
                dims=["time"])
        # set obs values to init in cftime days
        he._datasets[
            "observations"] = he._datasets["observations"] * xr.DataArray(
                cftime.date2num(he._datasets["observations"].time, units),
                dims=["time"],
            )
    # get metric and comparison strings incorporating alias
    metric = METRIC_ALIASES.get(metric, metric)
    Metric = get_metric_class(metric, HINDCAST_METRICS)
    category_edges = np.array([0, 0.5, 1])
    if metric in probabilistic_metrics_requiring_logical:

        def f(x):
            return x > 0.5

        metric_kwargs = {"logical": f}
    elif metric == "threshold_brier_score":
        metric_kwargs = {"threshold": 0.5}
    elif metric == "contingency":
        metric_kwargs = {
            "forecast_category_edges": category_edges,
            "observation_category_edges": category_edges,
            "score": "accuracy",
        }
    elif metric == "roc":
        metric_kwargs = {"bin_edges": category_edges}
    elif metric == "rps":
        metric_kwargs = {"category_edges": category_edges}
    else:
        metric_kwargs = {}
    if Metric.probabilistic:
        skill = he.verify(
            metric=metric,
            comparison="m2o",
            dim=["member", "init"]
            if metric in probabilistic_metrics_requiring_more_than_member_dim
            else "member",
            alignment=alignment,
            **metric_kwargs,
        ).SST
    else:
        dim = "member" if comparison == "m2o" else "init"
        skill = he.verify(
            metric=metric,
            comparison=comparison,
            dim=dim,
            alignment=alignment,
            **metric_kwargs,
        ).SST
    if metric == "contingency" and how == "constant":
        assert (skill.mean() == 1).all(), print(
            f"{metric} found", skill)  # checks Contingency.accuracy
    elif metric in ["msess", "crpss"]:
        pass  # identical forecasts produce NaNs
    elif Metric.perfect and metric not in pearson_r_containing_metrics:
        assert (skill == Metric.perfect).all(), print(f"{metric} perfect",
                                                      Metric.perfect, "found",
                                                      skill)
    else:
        pass