예제 #1
0
    def test_mon_U(self, mon_series, series, mon_triangular, kind, name):
        """
        Train on
        hist: U
        ref: U + monthly cycle

        Predict on hist to get ref
        """
        u = np.random.rand(10000)

        # Define distributions
        xd = uniform(loc=2, scale=0.1)
        yd = uniform(loc=4, scale=0.1)
        noise = uniform(loc=0, scale=1e-7)

        # Generate random numbers
        x = xd.ppf(u)
        y = yd.ppf(u) + noise.ppf(u)

        # Test train
        hist = sim = series(x, name)
        ref = mon_series(y, name)

        QM = EmpiricalQuantileMapping(kind=kind,
                                      group="time.month",
                                      nquantiles=5)
        QM.train(ref, hist)
        p = QM.adjust(sim)
        mqm = QM.ds.af.mean(dim="quantiles")
        expected = apply_correction(mon_triangular, 2, kind)
        np.testing.assert_array_almost_equal(mqm, expected, 1)

        # Test predict
        np.testing.assert_array_almost_equal(p, ref, 2)
예제 #2
0
    def test_add_dims(self, use_dask):
        if use_dask:
            chunks = {"location": -1}
        else:
            chunks = None
        ref = (open_dataset(
            "sdba/ahccd_1950-2013.nc",
            chunks=chunks,
            drop_variables=["lat",
                            "lon"]).sel(time=slice("1981", "2010")).tasmax)
        ref = convert_units_to(ref, "K")
        ref = ref.isel(location=1, drop=True).expand_dims(location=["Amos"])

        dsim = open_dataset("sdba/CanESM2_1950-2100.nc",
                            chunks=chunks,
                            drop_variables=["lat", "lon"]).tasmax
        hist = dsim.sel(time=slice("1981", "2010"))
        sim = dsim.sel(time=slice("2041", "2070"))

        # With add_dims, "does it run" test
        group = Grouper("time.dayofyear", window=5, add_dims=["location"])
        EQM = EmpiricalQuantileMapping.train(ref, hist, group=group)
        EQM.adjust(sim).load()

        # Without, sanity test.
        group = Grouper("time.dayofyear", window=5)
        EQM2 = EmpiricalQuantileMapping.train(ref, hist, group=group)
        scen2 = EQM2.adjust(sim).load()
        assert scen2.sel(location=["Kugluktuk", "Vancouver"]).isnull().all()
예제 #3
0
    def test_simple(self, c_thresh, q_thresh, frac, power):
        n = 45 * 365

        def gen_testdata(c, s):
            base = np.clip(norm.rvs(loc=0, scale=s, size=(n, )), 0, None)
            qv = np.quantile(base[base > 1], q_thresh)
            base[base > qv] = genpareto.rvs(c,
                                            loc=qv,
                                            scale=s,
                                            size=base[base > qv].shape)
            return xr.DataArray(
                base,
                dims=("time", ),
                coords={
                    "time":
                    xr.cftime_range("1990-01-01", periods=n, calendar="noleap")
                },
                attrs={
                    "units": "mm/day",
                    "thresh": qv
                },
            )

        ref = jitter_under_thresh(gen_testdata(-0.1, 2), 1e-3)
        hist = jitter_under_thresh(gen_testdata(-0.1, 2), 1e-3)
        sim = gen_testdata(-0.15, 2.5)

        EQM = EmpiricalQuantileMapping(group="time.dayofyear",
                                       nquantiles=15,
                                       kind="*")
        EQM.train(ref, hist)
        scen = EQM.adjust(sim)

        EX = ExtremeValues(c_thresh, q_thresh=q_thresh)
        EX.train(ref, hist)

        qv = (ref.thresh + hist.thresh) / 2
        np.testing.assert_allclose(EX.ds.fit_params, [-0.1, qv, 2],
                                   atol=0.5,
                                   rtol=0.1)
        np.testing.assert_allclose(EX.ds.thresh, qv, atol=0.15, rtol=0.01)

        scen2 = EX.adjust(scen, sim, frac=frac, power=power)

        # What to test???
        # Test if extreme values of sim are still extreme
        exval = sim > EX.ds.thresh
        assert (scen2.where(exval) >
                EX.ds.thresh).sum() > (scen.where(exval) > EX.ds.thresh).sum()
        # ONLY extreme values have been touched (but some might not have been modified)
        assert (((scen != scen2) | exval) == exval).all()
예제 #4
0
    def test_real_data(self):

        dsim = open_dataset("sdba/CanESM2_1950-2100.nc").chunk()
        dref = open_dataset("sdba/ahccd_1950-2013.nc").chunk()

        ref = convert_units_to(dref.sel(time=slice("1950", "2009")).pr, "mm/d")
        hist = convert_units_to(
            dsim.sel(time=slice("1950", "2009")).pr, "mm/d")

        quantiles = np.linspace(0.01, 0.99, num=50)

        with xr.set_options(keep_attrs=True):
            ref = ref + uniform_noise_like(ref, low=1e-6, high=1e-3)
            hist = hist + uniform_noise_like(hist, low=1e-6, high=1e-3)

        EQM = EmpiricalQuantileMapping.train(ref,
                                             hist,
                                             group=Grouper("time.dayofyear",
                                                           window=31),
                                             nquantiles=quantiles)

        scen = EQM.adjust(hist, interp="linear", extrapolation="constant")

        EX = ExtremeValues.train(ref,
                                 hist,
                                 cluster_thresh="1 mm/day",
                                 q_thresh=0.97)
        new_scen = EX.adjust(scen, hist, frac=0.000000001)
        new_scen.load()
예제 #5
0
    def test_quantiles(self, series, kind, name):
        """Train on
        hist: U
        ref: Normal

        Predict on hist to get ref
        """
        u = np.random.rand(10000)

        # Define distributions
        xd = uniform(loc=10, scale=1)
        yd = norm(loc=12, scale=1)

        # Generate random numbers with u so we get exact results for comparison
        x = xd.ppf(u)
        y = yd.ppf(u)

        # Test train
        hist = sim = series(x, name)
        ref = series(y, name)
        QM = EmpiricalQuantileMapping(
            kind=kind,
            group="time",
            nquantiles=50,
        )
        QM.train(ref, hist)
        p = QM.adjust(sim, interp="linear")

        q = QM.ds.coords["quantiles"]
        expected = get_correction(xd.ppf(q), yd.ppf(q), kind)

        # Results are not so good at the endpoints
        np.testing.assert_array_almost_equal(QM.ds.af[2:-2], expected[2:-2], 1)

        # Test predict
        # Accept discrepancies near extremes
        middle = (x > 1e-2) * (x < 0.99)
        np.testing.assert_array_almost_equal(p[middle], ref[middle], 1)
예제 #6
0
def test_stack_variables():
    ds1 = open_dataset("sdba/CanESM2_1950-2100.nc")
    ds2 = open_dataset("sdba/ahccd_1950-2013.nc")

    da1 = stack_variables(ds1)
    da2 = stack_variables(ds2)

    assert list(da1.multivar.values) == ["pr", "tasmax"]
    assert da1.multivar.attrs["_standard_name"] == [
        "precipitation_flux",
        "air_temperature",
    ]
    assert da2.multivar.attrs["is_variables"]
    assert da1.multivar.equals(da2.multivar)

    da1p = da1.sortby("multivar", ascending=False)

    with pytest.raises(ValueError, match="Inputs have different multivariate"):
        EmpiricalQuantileMapping.train(da1p, da2)

    ds1p = unstack_variables(da1)

    xr.testing.assert_equal(ds1, ds1p)
예제 #7
0
    def test_dask_julia(self):

        dsim = open_dataset("sdba/CanESM2_1950-2100.nc").chunk()
        dref = open_dataset("sdba/ahccd_1950-2013.nc").chunk()
        dexp = open_dataset("sdba/adjusted_external.nc")

        ref = convert_units_to(dref.sel(time=slice("1950", "2009")).pr, "mm/d")
        hist = convert_units_to(
            dsim.sel(time=slice("1950", "2009")).pr, "mm/d")

        quantiles = np.linspace(0.01, 0.99, num=50)

        EQM = EmpiricalQuantileMapping(group=Grouper("time.dayofyear",
                                                     window=31),
                                       nquantiles=quantiles)

        with xr.set_options(keep_attrs=True):
            ref = ref + uniform_noise_like(ref, low=1e-6, high=1e-3)
            hist = hist + uniform_noise_like(hist, low=1e-6, high=1e-3)

        EQM.train(ref, hist)
        scen = EQM.adjust(hist, interp="linear", extrapolation="constant")

        EX = ExtremeValues(cluster_thresh="1 mm/day", q_thresh=0.97)
        EX.train(ref, hist)
        new_scen = EX.adjust(scen, hist, frac=0.000000001)

        new_scen.load()

        exp_scen = dexp.extreme_values_julia
        xr.testing.assert_allclose(
            new_scen.where(new_scen != scen).transpose("time", "location"),
            exp_scen.where(new_scen != scen).transpose("time", "location"),
            atol=0.005,
            rtol=2e-3,
        )
예제 #8
0
def cannon_2015_figure_2():
    n = 10000
    ref, hist, sim = tu.cannon_2015_rvs(n, random=False)
    QM = EmpiricalQuantileMapping(kind="*", group="time", interp="linear")
    QM.train(ref, hist)
    sim_eqm = QM.predict(sim)

    DQM = DetrendedQuantileMapping(kind="*", group="time", interp="linear")
    DQM.train(ref, hist)
    sim_dqm = DQM.predict(sim, degree=0)

    QDM = QuantileDeltaMapping(kind="*", group="time", interp="linear")
    QDM.train(ref, hist)
    sim_qdm = QDM.predict(sim)

    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(11, 4))
    x = np.linspace(0, 105, 50)
    ax1.plot(x, gaussian_kde(ref)(x), color="r", label="Obs hist")
    ax1.plot(x, gaussian_kde(hist)(x), color="k", label="GCM hist")
    ax1.plot(x, gaussian_kde(sim)(x), color="blue", label="GCM simure")

    ax1.plot(x, gaussian_kde(sim_qdm)(x), color="lime", label="QDM future")
    ax1.plot(x,
             gaussian_kde(sim_eqm)(x),
             color="darkgreen",
             ls="--",
             label="QM future")
    ax1.plot(x,
             gaussian_kde(sim_dqm)(x),
             color="lime",
             ls=":",
             label="DQM future")
    ax1.legend(frameon=False)
    ax1.set_xlabel("Value")
    ax1.set_ylabel("Density")

    tau = np.array([0.25, 0.5, 0.75, 0.95, 0.99]) * 100
    bc_gcm = (scoreatpercentile(sim, tau) -
              scoreatpercentile(hist, tau)) / scoreatpercentile(hist, tau)
    bc_qdm = (scoreatpercentile(sim_qdm, tau) -
              scoreatpercentile(ref, tau)) / scoreatpercentile(ref, tau)
    bc_eqm = (scoreatpercentile(sim_eqm, tau) -
              scoreatpercentile(ref, tau)) / scoreatpercentile(ref, tau)
    bc_dqm = (scoreatpercentile(sim_dqm, tau) -
              scoreatpercentile(ref, tau)) / scoreatpercentile(ref, tau)

    ax2.plot([0, 1], [0, 1], ls=":", color="blue")
    ax2.plot(bc_gcm, bc_gcm, "-", color="blue", label="GCM")
    ax2.plot(bc_gcm, bc_qdm, marker="o", mfc="lime", label="QDM")
    ax2.plot(
        bc_gcm,
        bc_eqm,
        marker="o",
        mfc="darkgreen",
        ls=":",
        color="darkgreen",
        label="QM",
    )
    ax2.plot(
        bc_gcm,
        bc_dqm,
        marker="s",
        mec="lime",
        mfc="w",
        ls="--",
        color="lime",
        label="DQM",
    )

    for i, s in enumerate(tau / 100):
        ax2.text(bc_gcm[i],
                 bc_eqm[i],
                 f"{s}  ",
                 ha="right",
                 va="center",
                 fontsize=9)
    ax2.set_xlabel("GCM relative change")
    ax2.set_ylabel("Bias adjusted relative change")
    ax2.legend(loc="upper left", frameon=False)
    ax2.set_aspect("equal")
    plt.tight_layout()
    return fig
예제 #9
0
def test_default_grouper_understood(tas_series):
    ref = tas_series(np.arange(730).astype(float))

    EQM = EmpiricalQuantileMapping.train(ref, ref)
    EQM.adjust(ref)
    assert EQM.group.dim == "time"
예제 #10
0
def test_raise_on_multiple_chunks(tas_series):
    ref = tas_series(np.arange(730).astype(float)).chunk({"time": 365})
    with pytest.raises(ValueError):
        EmpiricalQuantileMapping.train(ref, ref, group=Grouper("time.month"))