def test_PerfectModelEnsemble_constant_forecasts(
        perfectModelEnsemble_initialized_control, metric, comparison, how):
    """Test that PerfectModelEnsemble.verify() returns a perfect score for a perfectly
    identical forecasts."""
    pe = perfectModelEnsemble_initialized_control.isel(lead=[0, 1, 2])
    if how == "constant":  # replaces the variable with all 1's
        pe = pe.apply(xr.ones_like)
    elif (
            how == "increasing_by_lead"
    ):  # sets variable values to cftime index in days to have increase over time.
        pe = pe.apply(xr.zeros_like)
        pe._datasets["initialized"] = (pe._datasets["initialized"] +
                                       pe._datasets["initialized"].lead)
    # get metric and comparison strings incorporating alias
    metric = METRIC_ALIASES.get(metric, metric)
    Metric = get_metric_class(metric, PM_METRICS)
    category_edges = np.array([0, 0.5, 1])
    if metric in probabilistic_metrics_requiring_logical:

        def f(x):
            return x > 0.5

        metric_kwargs = {"logical": f}
    elif metric == "threshold_brier_score":
        metric_kwargs = {"threshold": 0.5}
    elif metric == "contingency":
        metric_kwargs = {
            "forecast_category_edges": category_edges,
            "observation_category_edges": category_edges,
            "score": "accuracy",
        }
    elif metric == "rps":
        metric_kwargs = {"category_edges": category_edges}
    else:
        metric_kwargs = {}
    if Metric.probabilistic:
        dim = (["member", "init"] if metric
               in probabilistic_metrics_requiring_more_than_member_dim else
               "member")
        comparison = "m2c"
        skill = pe.verify(metric=metric,
                          comparison=comparison,
                          dim=dim,
                          **metric_kwargs)
    else:
        dim = "init" if comparison == "e2c" else ["init", "member"]
        skill = pe.verify(metric=metric,
                          comparison=comparison,
                          dim=dim,
                          **metric_kwargs)
    # # TODO: test assert skill.variable == perfect).all()
    if metric == "contingency":
        assert (skill == 1).all()  # checks Contingency.accuracy
    else:
        assert skill == Metric.perfect
Exemple #2
0
def test_pm_comparison_stack_dims_when_deterministic(PM_da_initialized_1d,
                                                     comparison, metric):
    metric = get_metric_class(metric, PM_METRICS)
    comparison = get_comparison_class(comparison, PM_COMPARISONS)
    actual_f, actual_r = comparison.function(PM_da_initialized_1d,
                                             metric=metric)
    if not metric.probabilistic:
        assert "member" in actual_f.dims
        assert "member" in actual_r.dims
    else:
        assert "member" in actual_f.dims
        assert "member" not in actual_r.dims
Exemple #3
0
def test_all(PM_da_initialized_1d, comparison, metric):
    metric = get_metric_class(metric, PM_METRICS)
    ds = PM_da_initialized_1d
    comparison = get_comparison_class(comparison, PM_COMPARISONS)
    forecast, obs = comparison.function(ds, metric=metric)
    assert not forecast.isnull().any()
    assert not obs.isnull().any()
    if not metric.probabilistic:
        # same dimensions for deterministic metrics
        assert forecast.dims == obs.dims
    else:
        if comparison.name in PROBABILISTIC_PM_COMPARISONS:
            # same but member dim for probabilistic
            assert set(forecast.dims) - set(["member"]) == set(obs.dims)
Exemple #4
0
def test_get_metric_class_fail():
    """Test if passing something not in the dict raises the right error."""
    with pytest.raises(KeyError) as excinfo:
        get_metric_class("not_metric", DETERMINISTIC_PM_METRICS)
    assert "Specify metric from" in str(excinfo.value)
Exemple #5
0
def test_get_metric_class():
    """Test if passing in a string gets the right metric function."""
    actual = get_metric_class("pearson_r", DETERMINISTIC_PM_METRICS).name
    expected = __pearson_r.name
    assert actual == expected
Exemple #6
0
def test_HindcastEnsemble_perfect_forecasts(hindcast_hist_obs_1d, metric,
                                            comparison, how, alignment):
    """Test that HindcastEnsemble.verify() returns a perfect score for a perfectly
    identical forecasts."""
    he = hindcast_hist_obs_1d.isel(lead=[0, 1], init=range(10))
    if how == "constant":  # replaces the variable with all 1's
        he = he.map(xr.ones_like)
    elif (
            how == "increasing_by_lead"
    ):  # sets variable values to cftime index in days to have increase over time.
        he = he.map(xr.ones_like)
        # set initialized values to init in cftime days
        units = "days since 1900-01-01"
        he._datasets[
            "initialized"] = he._datasets["initialized"] * xr.DataArray(
                cftime.date2num(he._datasets["initialized"].init, units),
                dims=["init"])
        # add initialized leads
        he._datasets["initialized"] = (he._datasets["initialized"] +
                                       he._datasets["initialized"].lead)
        # set uninitialized values to init in cftime days
        he._datasets[
            "uninitialized"] = he._datasets["uninitialized"] * xr.DataArray(
                cftime.date2num(he._datasets["uninitialized"].time, units),
                dims=["time"])
        # set obs values to init in cftime days
        he._datasets[
            "observations"] = he._datasets["observations"] * xr.DataArray(
                cftime.date2num(he._datasets["observations"].time, units),
                dims=["time"],
            )
    # get metric and comparison strings incorporating alias
    metric = METRIC_ALIASES.get(metric, metric)
    Metric = get_metric_class(metric, HINDCAST_METRICS)
    category_edges = np.array([0, 0.5, 1])
    if metric in probabilistic_metrics_requiring_logical:

        def f(x):
            return x > 0.5

        metric_kwargs = {"logical": f}
    elif metric == "threshold_brier_score":
        metric_kwargs = {"threshold": 0.5}
    elif metric == "contingency":
        metric_kwargs = {
            "forecast_category_edges": category_edges,
            "observation_category_edges": category_edges,
            "score": "accuracy",
        }
    elif metric == "roc":
        metric_kwargs = {"bin_edges": category_edges}
    elif metric == "rps":
        metric_kwargs = {"category_edges": category_edges}
    else:
        metric_kwargs = {}
    if Metric.probabilistic:
        skill = he.verify(
            metric=metric,
            comparison="m2o",
            dim=["member", "init"]
            if metric in probabilistic_metrics_requiring_more_than_member_dim
            else "member",
            alignment=alignment,
            **metric_kwargs,
        ).SST
    else:
        dim = "member" if comparison == "m2o" else "init"
        skill = he.verify(
            metric=metric,
            comparison=comparison,
            dim=dim,
            alignment=alignment,
            **metric_kwargs,
        ).SST
    if metric == "contingency" and how == "constant":
        assert (skill.mean() == 1).all(), print(
            f"{metric} found", skill)  # checks Contingency.accuracy
    elif metric in ["msess", "crpss"]:
        pass  # identical forecasts produce NaNs
    elif Metric.perfect and metric not in pearson_r_containing_metrics:
        assert (skill == Metric.perfect).all(), print(f"{metric} perfect",
                                                      Metric.perfect, "found",
                                                      skill)
    else:
        pass