示例#1
0
def test_pm_metric_skipna(PM_da_initialized_3d, PM_da_control_3d, metric):
    """Test skipna in compute_perfect_model."""
    PM_da_initialized_3d = PM_da_initialized_3d.copy()
    # manipulating data
    PM_da_initialized_3d.values[1:3, 1:4, 1:4, 4:6, 4:6] = np.nan

    base = compute_perfect_model(
        PM_da_initialized_3d,
        PM_da_control_3d,
        metric=metric,
        skipna=False,
        dim='init',
        comparison='m2e',
    ).mean('member')
    skipping = compute_perfect_model(
        PM_da_initialized_3d,
        PM_da_control_3d,
        metric=metric,
        skipna=True,
        dim='init',
        comparison='m2e',
    ).mean('member')
    assert ((base - skipping) != 0.0).any()
    assert base.isel(lead=2, x=5, y=5).isnull()
    assert not skipping.isel(lead=2, x=5, y=5).isnull()
示例#2
0
def test_pm_metric_weights_m2x(PM_da_initialized_3d, PM_da_control_3d,
                               comparison, metric):
    """Test init weights in compute_perfect_model."""
    # distribute weights on initializations
    dim = 'init'
    base = compute_perfect_model(
        PM_da_initialized_3d,
        PM_da_control_3d,
        dim=dim,
        metric=metric,
        comparison=comparison,
    )
    weights = xr.DataArray(np.arange(1, 1 + PM_da_initialized_3d[dim].size),
                           dims=dim)
    weights = xr.DataArray(
        np.arange(
            1,
            1 + PM_da_initialized_3d[dim].size *
            PM_da_initialized_3d['member'].size,
        ),
        dims='init',
    )

    weighted = compute_perfect_model(
        PM_da_initialized_3d,
        PM_da_control_3d,
        dim=dim,
        comparison=comparison,
        metric=metric,
        weights=weights,
    )
    print((base / weighted).mean(['x', 'y']))
    # test for difference
    assert (xs.smape(base, weighted, ['x', 'y']) > 0.01).any()
示例#3
0
def test_compute_perfect_model_lead0_lead1(
    PM_da_initialized_1d,
    PM_da_initialized_1d_lead0,
    PM_da_control_1d,
    comparison,
    metric,
    dim,
):
    """
    Checks that metric results are identical for a lead 0 and lead 1 setup.
    """
    res1 = compute_perfect_model(
        PM_da_initialized_1d,
        PM_da_control_1d,
        comparison=comparison,
        metric=metric,
        dim=dim,
    )
    res2 = compute_perfect_model(
        PM_da_initialized_1d_lead0,
        PM_da_control_1d,
        comparison=comparison,
        metric=metric,
        dim=dim,
    )
    assert (res1.values == res2.values).all()
示例#4
0
def test_compute_pm_probabilistic_metric_non_probabilistic_comparison_fails(
        PM_ds_initialized_1d, PM_ds_control_1d, metric, comparison):
    with pytest.raises(ValueError,
                       match=f"Probabilistic metric `{metric}` requires"):
        compute_perfect_model(
            PM_ds_initialized_1d,
            PM_ds_control_1d,
            comparison=comparison,
            metric=metric,
        )
示例#5
0
def test_compute_pm_probabilistic_metric_non_probabilistic_comparison_fails(
        pm_da_ds1d, pm_da_control1d, metric, comparison):
    with pytest.raises(ValueError) as excinfo:
        compute_perfect_model(pm_da_ds1d,
                              pm_da_control1d,
                              comparison=comparison,
                              metric=metric)
    assert (
        f'Probabilistic metric {metric} cannot work with comparison {comparison}'
        in str(excinfo.value))
def test_compute_perfect_model_metric_keyerrors(pm_da_ds1d, pm_da_control1d,
                                                metric):
    """
    Checks that wrong metric names get caught.
    """
    with pytest.raises(KeyError) as excinfo:
        compute_perfect_model(pm_da_ds1d,
                              pm_da_control1d,
                              comparison='e2c',
                              metric=metric)
    assert 'Specify metric from' in str(excinfo.value)
示例#7
0
def test_compute_pm_probabilistic_metric_non_probabilistic_comparison_fails(
        PM_da_initialized_1d, PM_da_control_1d, metric, comparison):
    with pytest.raises(ValueError) as excinfo:
        compute_perfect_model(
            PM_da_initialized_1d,
            PM_da_control_1d,
            comparison=comparison,
            metric=metric,
        )
    assert f"Probabilistic metric `{metric}` requires comparison" in str(
        excinfo.value)
示例#8
0
def test_compute_perfect_model_metric_keyerrors(
    PM_da_initialized_1d, PM_da_control_1d, metric
):
    """
    Checks that wrong metric names get caught.
    """
    with pytest.raises(KeyError) as excinfo:
        compute_perfect_model(
            PM_da_initialized_1d, PM_da_control_1d, comparison="e2c", metric=metric,
        )
    assert "Specify metric from" in str(excinfo.value)
示例#9
0
def test_compute_perfect_model_comparison_keyerrors(
    PM_da_initialized_1d, PM_da_control_1d, comparison
):
    """
    Checks that wrong comparison names get caught.
    """
    with pytest.raises(KeyError) as excinfo:
        compute_perfect_model(
            PM_da_initialized_1d, PM_da_control_1d, comparison=comparison, metric="mse",
        )
    assert "Specify comparison from" in str(excinfo.value)
def test_compute_perfect_model_comparison_keyerrors(pm_da_ds1d,
                                                    pm_da_control1d,
                                                    comparison):
    """
    Checks that wrong comparison names get caught.
    """
    with pytest.raises(KeyError) as excinfo:
        compute_perfect_model(pm_da_ds1d,
                              pm_da_control1d,
                              comparison=comparison,
                              metric='mse')
    assert 'Specify comparison from' in str(excinfo.value)
示例#11
0
def test_compute_pm_probabilistic_metric_not_dim_member_warn(
        pm_da_ds1d, pm_da_control1d, metric, dim):
    with pytest.warns(UserWarning) as record:
        compute_perfect_model(pm_da_ds1d,
                              pm_da_control1d,
                              comparison='m2c',
                              metric=metric,
                              dim=dim)
    expected = (f'Probabilistic metric {metric} requires to be '
                f'computed over dimension `dim="member"`. '
                f'Set automatically.')
    assert record[0].message.args[0] == expected
def test_compute_perfect_model_lead0_lead1(pm_da_ds1d, pm_da_ds1d_lead0,
                                           pm_da_control1d, comparison,
                                           metric):
    """
    Checks that metric results are identical for a lead 0 and lead 1 setup.
    """
    res1 = compute_perfect_model(pm_da_ds1d,
                                 pm_da_control1d,
                                 comparison=comparison,
                                 metric=metric)
    res2 = compute_perfect_model(pm_da_ds1d_lead0,
                                 pm_da_control1d,
                                 comparison=comparison,
                                 metric=metric)
    assert (res1.values == res2.values).all()
示例#13
0
def test_new_comparison_passed_to_compute(PM_da_initialized_1d,
                                          PM_da_control_1d, metric):
    actual = compute_perfect_model(
        PM_da_initialized_1d,
        PM_da_control_1d,
        comparison=my_m2me_comparison,
        metric=metric,
    )

    expected = compute_perfect_model(PM_da_initialized_1d,
                                     PM_da_control_1d,
                                     comparison="m2e",
                                     metric="mse")

    assert (actual - expected).mean() != 0
示例#14
0
def test_compute_perfect_model_ds_not_nan(PM_ds_ds, PM_ds_control, metric,
                                          comparison):
    actual = compute_perfect_model(PM_ds_ds,
                                   PM_ds_control,
                                   metric=metric,
                                   comparison=comparison).isnull().any()
    assert actual == False
示例#15
0
def test_compute_after_smooth_goddard_2013(pm_da_ds3d, pm_da_control3d):
    """Test compute_perfect_model works after smoothings."""
    pm_da_control3d = smooth_goddard_2013(pm_da_control3d)
    pm_da_ds3d = smooth_goddard_2013(pm_da_ds3d)
    actual = compute_perfect_model(pm_da_ds3d, pm_da_control3d)
    north_atlantic = actual.sel(lat=slice(40, 50), lon=slice(-30, -20))
    assert not north_atlantic.isnull().any()
def test_compute_perfect_model_da1d_not_nan(PM_da_initialized_1d,
                                            PM_da_control_1d, comparison,
                                            metric, dim):
    """
    Checks that there are no NaNs on perfect model metrics of 1D time series.
    """
    if metric == "contingency":
        metric_kwargs = {
            "forecast_category_edges": category_edges,
            "observation_category_edges": category_edges,
            "score": "accuracy",
        }
    else:
        metric_kwargs = {}
    # acc on dim member only is ill defined
    if dim == "member" and metric in [
            "pearson_r",
            "spearman_r",
            "pearson_r_p_value",
            "spearman_r_p_value",
            "msess_murphy",
            "bias_slope",
            "conditional_bias",
    ]:
        dim = ["init", "member"]
    actual = compute_perfect_model(PM_da_initialized_1d,
                                   PM_da_control_1d,
                                   comparison=comparison,
                                   metric=metric,
                                   dim=dim,
                                   **metric_kwargs)
    if metric == "contingency":
        assert not actual.isnull().all()
    else:
        assert not actual.isnull().any()
示例#17
0
def test_compute_perfect_model_da1d_not_nan_probabilistic(
        pm_da_ds1d, pm_da_control1d, metric, comparison):
    """
    Checks that there are no NaNs on perfect model probabilistic metrics of 1D
    time series.
    """
    if 'threshold' in metric:
        threshold = 10.5
    else:
        threshold = None

    if metric == 'brier_score':

        def func(x):
            return x > 0

    else:
        func = None

    actual = compute_perfect_model(
        pm_da_ds1d,
        pm_da_control1d,
        comparison=comparison,
        metric=metric,
        threshold=threshold,
        gaussian=True,
        func=func,
        dim='member',
    )
    actual = actual.isnull().any()
    assert not actual
def test_seasonal_resolution_perfect_model(monthly_initialized, monthly_obs):
    """Tests that seasonal resolution perfect model predictions work."""
    seasonal_pm = (
        monthly_initialized.rolling(lead=3, center=True).mean().dropna(dim="lead")
    )
    seasonal_pm = seasonal_pm.isel(lead=slice(0, None, 3))
    seasonal_obs = monthly_obs.rolling(time=3, center=True).mean().dropna(dim="time")
    assert compute_perfect_model(seasonal_pm, seasonal_obs).all()
示例#19
0
def test_compute_perfect_model_stack_dims_True_and_False_quite_close(
        pm_da_ds1d, pm_da_control1d):
    """Test whether dim=['init','member'] for stack_dims=False and
    dim='member' for stack_dims=True give similar results."""
    stack_dims_true = compute_perfect_model(
        pm_da_ds1d,
        pm_da_control1d,
        comparison='m2c',
        metric='rmse',
        dim=['init', 'member'],
    )
    stack_dims_false = compute_perfect_model(pm_da_ds1d,
                                             pm_da_control1d,
                                             comparison='m2c',
                                             metric='rmse',
                                             dim='member').mean(['init'])
    # no more than 10% difference
    assert_allclose(stack_dims_true, stack_dims_false, rtol=0.1, atol=0.03)
示例#20
0
def test_compute_after_smooth_goddard_2013(PM_da_initialized_3d_full,
                                           PM_da_control_3d_full):
    """Test compute_perfect_model works after smoothings."""
    PM_da_control_3d_full = smooth_goddard_2013(PM_da_control_3d_full)
    PM_da_initialized_3d_full = smooth_goddard_2013(PM_da_initialized_3d_full)
    actual = compute_perfect_model(PM_da_initialized_3d_full,
                                   PM_da_control_3d_full)
    north_atlantic = actual.sel(lat=slice(40, 50), lon=slice(-30, -20))
    assert not north_atlantic.isnull().any()
示例#21
0
 def time_compute_perfect_model(self, metric, comparison):
     """Take time for `compute_perfect_model`."""
     dim = 'member' if metric in PROBABILISTIC_METRICS else None
     ensure_loaded(
         compute_perfect_model(self.ds,
                               self.control,
                               metric=metric,
                               comparison=comparison,
                               dim=dim))
示例#22
0
def test_custom_metric_passed_to_compute(PM_da_initialized_1d,
                                         PM_da_control_1d, comparison):
    """Test custom metric in compute_perfect_model."""
    actual = compute_perfect_model(
        PM_da_initialized_1d,
        PM_da_control_1d,
        comparison=comparison,
        metric=my_mse,
    )

    expected = compute_perfect_model(
        PM_da_initialized_1d,
        PM_da_control_1d,
        comparison=comparison,
        metric='mse',
    )

    assert_allclose(actual, expected)
示例#23
0
def test_compute_pm_probabilistic_metric_not_dim_member_warn(
    PM_da_initialized_1d, PM_da_control_1d, metric, dim
):
    with pytest.warns(UserWarning) as record:
        compute_perfect_model(
            PM_da_initialized_1d,
            PM_da_control_1d,
            comparison='m2c',
            metric=metric,
            dim=dim,
        )
    expected = (
        f'Probabilistic metric {metric} requires to be '
        f'computed over dimension `dim="member"`. '
        f'Set automatically.'
    )
    # get second warning here
    assert record[1].message.args[0] == expected
示例#24
0
 def peakmem_compute_perfect_model(self, metric, comparison):
     """Take memory peak for `compute_perfect_model`."""
     dim = "member" if metric in PROBABILISTIC_METRICS else None
     ensure_loaded(
         compute_perfect_model(self.ds,
                               self.control,
                               metric=metric,
                               comparison=comparison,
                               dim=dim))
def test_compute_perfect_model_da1d_not_nan(pm_da_ds1d, pm_da_control1d,
                                            comparison, metric):
    """
    Checks that there are no NaNs on perfect model metrics of 1D time series.
    """
    actual = (compute_perfect_model(pm_da_ds1d,
                                    pm_da_control1d,
                                    comparison=comparison,
                                    metric=metric).isnull().any())
    assert not actual
示例#26
0
def test_compute_perfect_model_dim_over_member(pm_da_ds1d, pm_da_control1d,
                                               comparison):
    """Test deterministic metric calc skill over member dim."""
    actual = compute_perfect_model(pm_da_ds1d,
                                   pm_da_control1d,
                                   comparison=comparison,
                                   metric='rmse',
                                   dim='member')
    assert 'init' in actual.dims
    assert not actual.isnull().any()
示例#27
0
def test_compute_perfect_model_different_dims_quite_close(
        PM_da_initialized_1d, PM_da_control_1d):
    """Test whether dim=['init','member'] and
    dim='member' results."""
    stack_dims_true = compute_perfect_model(
        PM_da_initialized_1d,
        PM_da_control_1d,
        comparison='m2c',
        metric='rmse',
        dim=['init', 'member'],
    )
    stack_dims_false = compute_perfect_model(
        PM_da_initialized_1d,
        PM_da_control_1d,
        comparison='m2c',
        metric='rmse',
        dim='member',
    ).mean(['init'])
    # no more than 10% difference
    assert_allclose(stack_dims_true, stack_dims_false, rtol=0.1, atol=0.03)
def test_compute_perfect_model_ds_not_nan(PM_ds_ds3d, PM_ds_control3d, metric,
                                          comparison):
    """
    Checks that there are no NaNs on perfect model comparison for Dataset.
    """
    actual = compute_perfect_model(PM_ds_ds3d,
                                   PM_ds_control3d,
                                   metric=metric,
                                   comparison=comparison).isnull().any()
    for var in actual.data_vars:
        assert not actual[var]
示例#29
0
def test_pm_crpss_orientation(pm_da_ds1d, pm_da_control1d):
    """
    Checks that CRPSS in PM as skill score > 0.
    """
    actual = compute_perfect_model(pm_da_ds1d,
                                   pm_da_control1d,
                                   comparison='m2m',
                                   metric='crpss',
                                   dim='member')
    if 'init' in actual.coords:
        actual = actual.mean('init')
    assert not (actual.isel(lead=[0, 1]) < 0).any()
示例#30
0
def Sef2018_Fig1_Different_PH_Definitions(ds, control, unit='PgC/yr', sig=95, bootstrap=1000):
    # from esmtools.prediction import predictability_horizon
    from PMMPIESM.plot import _set_integer_xaxis
    rsig = (100 - sig)/100
    _control = control
    _ds = ds
    ss = compute_perfect_model(
        _ds, _control, metric='rmse', comparison='m2e')
    ss['lead'] = np.arange(1, ss.lead.size + 1)
    # ss.name = 'every'
    ss_boot = bootstrap_perfect_model(_ds, _control, metric='rmse',
                                      comparison='m2e', sig=sig, bootstrap=bootstrap)
    ss_p = ss_boot.sel(kind='uninit', results='p')
    ss_ci_high = ss_boot.sel(kind='uninit', results='low_ci')

    ph_Spring_2019 = predictability_horizon(
        ss.where(ss_p < rsig)).values

    b_m2e, ph_Sef_2018, c_m2e = fit_ph_int(ss.to_series())
    print('ph_Sef_2018', ph_Sef_2018)
    print('ph_Spring_2019', int(ph_Spring_2019))

    fig, ax = plt.subplots(figsize=(10, 4))
    std = _control.std('time').values

    every_color = 'mediumorchid'
    ss.name = 'skill'
    ss.to_dataframe().plot(ax=ax, label='skill', color='k', marker='o')

    t_fit = np.arange(0, _ds.lead.size)
    ax.plot(t_fit[1:], func(t_fit, b_m2e, ph_Sef_2018, c_m2e)[1:],
            linewidth=3, color=every_color, label='Sef 2018 breakpoint fit')
    ax.axvline(x=ph_Sef_2018, linestyle='-.',
               color=every_color, label='PH Sef 2018')
    ax.axhline(y=std, ls='--', c='k', alpha=.3, label='std control')
    ax.axhline(y=ss_ci_high.mean('lead'), ls=':',
               c='royalblue', label='Bootstrapped high CI')

    ax.axvline(x=ph_Spring_2019, ls='-.', c='royalblue',
               label='PH Spring 2019')
    ax.set_xlabel('Lead Time [time]')
    ax.set_ylabel('RMSE [' + unit + ']')
    ax.set_ylim([0, ss.max() * 1.1])
    ax.set_xlim([0, 10])
    _set_integer_xaxis(ax)
    ax.legend(frameon=False, ncol=2)
    ax.set_xticks(range(1, 11))
    ax.set_title(
        ' Global oceanic CO$_2$ flux: Differences in definitions of Predictability Horizon')
    if savefig:
        plt.tight_layout()
        plt.savefig('FigureSI_Differences_PH_definition')