def test_gfwed_and_indicators(): # Also tests passing parameters as quantity strings ds = open_dataset("FWI/GFWED_sample_2017.nc") outs = fire_weather_indexes( tas=ds.tas, pr=ds.prbc, snd=ds.snow_depth, hurs=ds.rh, sfcWind=ds.sfcwind, lat=ds.lat, season_method="GFWED", overwintering=False, dry_start="GFWED", temp_condition_days=3, snow_condition_days=3, temp_start_thresh="6 degC", temp_end_thresh="6 degC", ) for exp, out in zip([ds.DC, ds.DMC, ds.FFMC, ds.ISI, ds.BUI, ds.FWI], outs): np.testing.assert_allclose(out.isel(loc=[0, 1]), exp.isel(loc=[0, 1]), rtol=0.03) ds2 = ds.isel(time=slice(1, None)) with set_options(cf_compliance="log"): mask = atmos.fire_season( tas=ds2.tas, snd=ds2.snow_depth, method="GFWED", temp_condition_days=3, snow_condition_days=3, temp_start_thresh="6 degC", temp_end_thresh="6 degC", ) # 3 first days are false by default assume same as 4th day. mask = mask.where(mask.time > mask.time[2]).bfill("time") outs = atmos.fire_weather_indexes( tas=ds2.tas, pr=ds2.prbc, snd=ds2.snow_depth, hurs=ds2.rh, sfcWind=ds2.sfcwind, lat=ds2.lat, dc0=ds.DC.isel(time=0), dmc0=ds.DMC.isel(time=0), ffmc0=ds.FFMC.isel(time=0), season_mask=mask, overwintering=False, dry_start="GFWED", initial_start_up=False, ) for exp, out in zip([ds2.DC, ds2.DMC, ds2.FFMC, ds2.ISI, ds2.BUI, ds2.FWI], outs): np.testing.assert_allclose(out, exp, rtol=0.03)
def test_cannon_and_diagnostics(self, cannon_2015_dist, cannon_2015_rvs): ref, hist, sim = cannon_2015_rvs(15000, random=False) # Quantile mapping with set_options(sdba_extra_output=True): QDM = QuantileDeltaMapping.train(ref, hist, kind="*", group="time", nquantiles=50) scends = QDM.adjust(sim) assert isinstance(scends, xr.Dataset) sim_q_exp = sim.rank(dim="time", pct=True) np.testing.assert_array_equal(sim_q_exp, scends.sim_q) # Theoretical results # ref, hist, sim = cannon_2015_dist # u1 = equally_spaced_nodes(1001, None) # u = np.convolve(u1, [0.5, 0.5], mode="valid") # pu = ref.ppf(u) * sim.ppf(u) / hist.ppf(u) # pu1 = ref.ppf(u1) * sim.ppf(u1) / hist.ppf(u1) # pdf = np.diff(u1) / np.diff(pu1) # mean = np.trapz(pdf * pu, pu) # mom2 = np.trapz(pdf * pu ** 2, pu) # std = np.sqrt(mom2 - mean ** 2) bc_sim = scends.scen np.testing.assert_almost_equal(bc_sim.mean(), 41.5, 1) np.testing.assert_almost_equal(bc_sim.std(), 16.7, 0)
def test_degree_days_exceedance_date(): tas = open_dataset("FWI/GFWED_sample_2017.nc").tas tas.attrs.update(cell_methods="time: mean within days", standard_name="air_temperature") out = atmos.degree_days_exceedance_date( tas=tas, thresh="4 degC", op=">", sum_thresh="200 K days", ) np.testing.assert_array_equal(out, np.array([[153, 136, 9, 6]]).T) assert "tmean > 4 degc" in out.attrs["description"] with set_options(check_missing="skip"): out = atmos.degree_days_exceedance_date( tas=tas, thresh="4 degC", op=">", sum_thresh="1500 K days", start_date="07-02", freq="YS", ) np.testing.assert_array_equal(out, np.array([[np.nan, 280, 241, 244]]).T)
def test_indicator_output(tas_series): tas = tas_series(np.zeros(365)) with set_options(metadata_locales=["fr"]): tgmean = atmos.tg_mean(tas, freq="YS") assert "long_name_fr" in tgmean.attrs assert (tgmean.attrs["description_fr"] == "Moyenne annuelle de la température journalière moyenne")
def test_cf(indname, atmosds): with set_options(cf_compliance="warn"): # skip when missing default values ind = registry[indname].get_instance() for name, param in ind.parameters.items(): if param["kind"] is not InputKind.DATASET and param[ "default"] is None: pytest.skip( f"Indicator {ind.identifier} has no default for {name}.") ind(ds=atmosds)
def test_virtual_modules(virtual_indicator, atmosds): with set_options(cf_compliance="warn"): # skip when missing default values mod, indname, ind = virtual_indicator for name, param in ind.parameters.items(): if param.kind is not InputKind.DATASET and ( param.default in (None, _empty) or (param.default == name and name not in atmosds) ): pytest.skip(f"Indicator {mod}.{indname} has no default for {name}.") ind(ds=atmosds)
def _adjust( cls, ref: xr.DataArray, hist: xr.DataArray, sim: xr.DataArray, *, base: TrainAdjust = QuantileDeltaMapping, base_kws: Optional[Mapping[str, Any]] = None, n_escore: int = 0, n_iter: int = 20, pts_dim: str = "multivar", adj_kws: Optional[Mapping[str, Any]] = None, rot_matrices: Optional[xr.DataArray] = None, ): base_kws or {} if "kind" in base_kws: warn( f'The adjustment kind cannot be controlled when using {cls.__name__}, it defaults to "+".' ) base_kws.setdefault("kind", "+") # Assuming sim has the same coords as hist # We get the safest new name of the rotated dim. rot_dim = xr.core.utils.get_temp_dimname( set(ref.dims).union(hist.dims).union(sim.dims), pts_dim + "_prime" ) # Get the rotation matrices rot_matrices = rot_matrices or rand_rot_matrix( ref[pts_dim], num=n_iter, new_dim=rot_dim ).rename(matrices="iterations") # Call a map_blocks on the iterative function # Sadly, this is a bit too complicated for map_blocks, we'll do it by hand. escores_tmpl = xr.broadcast( ref.isel({pts_dim: 0, "time": 0}), hist.isel({pts_dim: 0, "time": 0}), )[0].expand_dims(iterations=rot_matrices.iterations) template = xr.Dataset( data_vars={ "scenh": xr.full_like(hist, np.NaN).rename(time="time_hist"), "scen": xr.full_like(sim, np.NaN), "escores": escores_tmpl, } ) # Input data, rename time dim on sim since it can't be aligned with ref or hist. ds = xr.Dataset( data_vars={ "ref": ref.rename(time="time_hist"), "hist": hist.rename(time="time_hist"), "sim": sim, "rot_matrices": rot_matrices, } ) kwargs = { "base": base, "base_kws": base_kws, "n_escore": n_escore, "n_iter": n_iter, "pts_dim": pts_dim, "adj_kws": adj_kws or {}, } with set_options(sdba_extra_output=False): out = ds.map_blocks(npdf_transform, template=template, kwargs=kwargs) out = out.assign(rotation_matrices=rot_matrices) out.scenh.attrs["units"] = hist.units return out
def ufunc(request): with set_options(run_length_ufunc=request.param): yield request.param