Exemple #1
0
                def worker(lt, scale_km):
                    lt_idx = int(lt / v_accu_min - 1)
                    if not np.any(np.isfinite(R_obs_c[lt_idx, :, :])):
                        return

                    R_fct_s, _ = aggregate_fields_space(R_fct_c,
                                                        metafct_c,
                                                        scale_km * 1000,
                                                        ignore_nan=False)
                    R_obs_s, _ = aggregate_fields_space(R_obs_c,
                                                        metaobs_c,
                                                        scale_km * 1000,
                                                        ignore_nan=False)

                    for R_thr in R_thrs:
                        P_fct = excprob(R_fct_s[:, lt_idx, :, :],
                                        R_thr,
                                        ignore_nan=True)

                        probscores.reldiag_accum(
                            results[c, m]["reldiag"][R_thr][scale_km][lt],
                            P_fct, R_obs_s[lt_idx, :, :])
                        ensscores.rankhist_accum(
                            results[c, m]["rankhist"][R_thr][scale_km][lt],
                            R_fct_s[:, lt_idx, :, :], R_obs_s[lt_idx, :, :])
                        probscores.ROC_curve_accum(
                            results[c, m]["ROC"][R_thr][scale_km][lt], P_fct,
                            R_obs_s[lt_idx, :, :])
                    # Only for lowest threshold
                    probscores.CRPS_accum(
                        results[c, m]["CRPS"][R_thrs[0]][scale_km][lt],
                        R_fct_s[:, lt_idx, :, :], R_obs_s[lt_idx, :, :])
                    results[c,m][skill_varname][R_thrs[0]][scale_km][lt]["sum"] +=\
                        ensscores.ensemble_skill(R_fct_s[:, lt_idx, :, :],R_obs_s[lt_idx, :, :],spread_skill_metric)
                    results[c,m][spread_varname][R_thrs[0]][scale_km][lt]["sum"] +=\
                        ensscores.ensemble_spread(R_fct_s[:, lt_idx, :, :],spread_skill_metric)
                    results[c, m][skill_varname][
                        R_thrs[0]][scale_km][lt]["n"] += 1
                    results[c, m][spread_varname][
                        R_thrs[0]][scale_km][lt]["n"] += 1
Exemple #2
0
def test_rainfarm_shape(alpha, ds_factor, threshold, return_alpha):
    """Test that the output of rainfarm is consistent with the downscalnig factor."""

    window = metadata["xpixelsize"] * ds_factor
    precip_lr, __ = aggregate_fields_space(precip, metadata, window)

    rainfarm = downscaling.get_method("rainfarm")

    precip_hr = rainfarm(precip_lr, alpha, ds_factor, threshold, return_alpha)

    assert precip_hr.ndim == precip.ndim
    assert precip_hr.shape[0] == precip.shape[0]
    assert precip_hr.shape[1] == precip.shape[1]
Exemple #3
0
def test_rainfarm_alpha(alpha, ds_factor, threshold, return_alpha):
    """Test that rainfarm computes and returns alpha."""

    window = metadata["xpixelsize"] * ds_factor
    precip_lr, __ = aggregate_fields_space(precip, metadata, window)

    rainfarm = downscaling.get_method("rainfarm")

    precip_hr = rainfarm(precip_lr, alpha, ds_factor, threshold, return_alpha)

    assert len(precip_hr) == 2
    if alpha is None:
        assert not precip_hr[1] == alpha
    else:
        assert precip_hr[1] == alpha
Exemple #4
0
def get_precipitation_fields(
    num_prev_files=0,
    num_next_files=0,
    return_raw=False,
    metadata=False,
    upscale=None,
    source="mch",
    log_transform=True,
    clip=None,
    **importer_kwargs,
):
    """
    Get a precipitation field from the archive to be used as reference.

    Source: bom
    Reference time: 2018/06/16 10000 UTC

    Source: fmi
    Reference time: 2016/09/28 1600 UTC

    Source: knmi
    Reference time: 2010/08/26 0000 UTC

    Source: mch
    Reference time: 2015/05/15 1630 UTC

    Source: opera
    Reference time: 2018/08/24 1800 UTC

    Source: saf
    Reference time: 2018/06/01 0700 UTC

    Source: mrms
    Reference time: 2019/06/10 0000 UTC

    Parameters
    ----------

    num_prev_files: int, optional
        Number of previous times (files) to return with respect to the
        reference time.

    num_next_files: int, optional
        Number of future times (files) to return with respect to the
        reference time.

    return_raw: bool, optional
        Do not preprocess the precipitation fields. False by default.
        The pre-processing steps are: 1) Convert to mm/h,
        2) Mask invalid values, 3) Log-transform the data [dBR].

    metadata: bool, optional
        If True, also return file metadata.

    clip: scalars (left, right, bottom, top), optional
        The extent of the bounding box in data coordinates to be used to clip
        the data.

    upscale: float or None, optional
        Upscale fields in space during the pre-processing steps.
        If it is None, the precipitation field is not modified.
        If it is a float, represents the length of the space window that is
        used to upscale the fields.

    source: {"bom", "fmi" , "knmi", "mch", "opera", "saf", "mrms"}, optional
        Name of the data source to be used.

    log_transform: bool
        Whether to transform the output to dB.

    Other Parameters
    ----------------

    importer_kwargs : dict
        Additional keyword arguments passed to the importer.

    Returns
    -------
    reference_field : array

    metadata : dict
    """

    if source == "bom":
        pytest.importorskip("netCDF4")

    if source == "fmi":
        pytest.importorskip("pyproj")

    if source == "knmi":
        pytest.importorskip("h5py")

    if source == "mch":
        pytest.importorskip("PIL")

    if source == "opera":
        pytest.importorskip("h5py")

    if source == "saf":
        pytest.importorskip("netCDF4")

    if source == "mrms":
        pytest.importorskip("pygrib")

    try:
        date = _reference_dates[source]
    except KeyError:
        raise ValueError(
            f"Unknown source name '{source}'\n"
            "The available data sources are: "
            f"{str(list(_reference_dates.keys()))}"
        )

    data_source = rcparams.data_sources[source]
    root_path = data_source["root_path"]
    path_fmt = data_source["path_fmt"]
    fn_pattern = data_source["fn_pattern"]
    fn_ext = data_source["fn_ext"]
    importer_name = data_source["importer"]
    _importer_kwargs = data_source["importer_kwargs"].copy()
    _importer_kwargs.update(**importer_kwargs)
    timestep = data_source["timestep"]

    # Find the input files from the archive
    fns = io.archive.find_by_date(
        date,
        root_path,
        path_fmt,
        fn_pattern,
        fn_ext,
        timestep=timestep,
        num_prev_files=num_prev_files,
        num_next_files=num_next_files,
    )

    # Read the radar composites
    importer = io.get_method(importer_name, "importer")

    reference_field, __, ref_metadata = io.read_timeseries(
        fns, importer, **_importer_kwargs
    )

    if not return_raw:

        if (num_prev_files == 0) and (num_next_files == 0):
            # Remove time dimension
            reference_field = np.squeeze(reference_field)

        # Convert to mm/h
        reference_field, ref_metadata = stp.utils.to_rainrate(
            reference_field, ref_metadata
        )

        # Clip domain
        reference_field, ref_metadata = stp.utils.clip_domain(
            reference_field, ref_metadata, clip
        )

        # Upscale data
        reference_field, ref_metadata = aggregate_fields_space(
            reference_field, ref_metadata, upscale
        )

        # Mask invalid values
        reference_field = np.ma.masked_invalid(reference_field)

        if log_transform:
            # Log-transform the data [dBR]
            reference_field, ref_metadata = stp.utils.dB_transform(
                reference_field, ref_metadata, threshold=0.1, zerovalue=-15.0
            )

        # Set missing values with the fill value
        np.ma.set_fill_value(reference_field, ref_metadata["zerovalue"])
        reference_field.data[reference_field.mask] = ref_metadata["zerovalue"]

    if metadata:
        return reference_field, ref_metadata

    return reference_field
plot_precip_field(precip, geodata=metadata)
plt.show()

# Assign the fill value to all the Nans
precip[~np.isfinite(precip)] = metadata["zerovalue"]

###############################################################################
# Upscale the field
# -----------------
#
# To test our downscaling method, we first need to upscale the original field to
# a lower resolution. We are going to use an upscaling factor of 16 x.

upscaling_factor = 16
upscale_to = metadata["xpixelsize"] * upscaling_factor  # upscaling factor : 16 x
precip_lr, metadata_lr = aggregate_fields_space(precip, metadata, upscale_to)

# Plot the upscaled rainfall field
plt.figure()
plot_precip_field(precip_lr, geodata=metadata_lr)

###############################################################################
# Downscale the field
# -------------------
#
# We can now use RainFARM to generate stochastic realizations of the downscaled
# precipitation field.

fig = plt.figure(figsize=(5, 8))
# Set the number of stochastic realizations
num_realizations = 5
Exemple #6
0
def get_precipitation_fields(num_prev_files=0,
                             num_next_files=0,
                             return_raw=False,
                             metadata=False,
                             upscale=None):
    """
    Get a precipitation field from the archive to be used as reference.

    Source: mch
    Reference time: 2015/05/15 1630 UTC

    Parameters
    ----------

    num_prev_files: int
        Number of previous times (files) to return with respect to the
        reference time.

    num_next_files: int
        Number of future times (files) to return with respect to the
        reference time.

    return_raw: bool
        Do not preprocess the precipitation fields. False by default.
        The pre-processing steps are: 1) Convert to mm/h,
        2) Mask invalid values, 3) Log-transform the data [dBR].

    metadata : bool
        If True, also return file metadata.

    upscale: float or None
        Upscale fields in space during the pre-processing steps.
        If it is None, the precipitation field is not
        modified.
        If it is a float, represents the length of the space window that is
        used to upscale the fields.


    Returns
    -------
    reference_field : array

    metadata : dict


    """
    pytest.importorskip('PIL')
    # Selected case
    date = datetime.strptime("201505151630", "%Y%m%d%H%M")
    data_source = rcparams.data_sources["mch"]

    root_path = data_source["root_path"]
    path_fmt = data_source["path_fmt"]
    fn_pattern = data_source["fn_pattern"]
    fn_ext = data_source["fn_ext"]
    importer_name = data_source["importer"]
    importer_kwargs = data_source["importer_kwargs"]
    timestep = data_source["timestep"]

    # Find the input files from the archive
    fns = io.archive.find_by_date(date,
                                  root_path,
                                  path_fmt,
                                  fn_pattern,
                                  fn_ext,
                                  timestep=timestep,
                                  num_prev_files=num_prev_files,
                                  num_next_files=num_next_files)

    # Read the radar composites
    importer = io.get_method(importer_name, "importer")
    reference_field, __, ref_metadata = io.read_timeseries(fns, importer,
                                                           **importer_kwargs)

    if not return_raw:

        if (num_prev_files == 0) and (num_next_files == 0):
            # Remove time dimension
            reference_field = np.squeeze(reference_field)

        # Convert to mm/h
        reference_field, ref_metadata = stp.utils.to_rainrate(reference_field,
                                                              ref_metadata)

        # Upscale data to 2 km
        reference_field, ref_metadata = aggregate_fields_space(reference_field,
                                                               ref_metadata,
                                                               upscale)

        # Mask invalid values
        reference_field = np.ma.masked_invalid(reference_field)

        # Log-transform the data [dBR]
        reference_field, ref_metadata = stp.utils.dB_transform(reference_field,
                                                               ref_metadata,
                                                               threshold=0.1,
                                                               zerovalue=-15.0)

        # Set missing values with the fill value
        reference_field.data[reference_field.mask] = -15.0

    if metadata:
        return reference_field, ref_metadata

    return reference_field