Beispiel #1
0
def _sal_detect_objects(precip, thr_factor, thr_quantile, tstorm_kwargs):
    """
    Detect coherent precipitation objects using a multi-threshold approach from
    :cite:`Feldmann2021`.

    Parameters
    ----------
    precip: array-like
        Array of shape (m,n) containing input data. Nan values are ignored.
    thr_factor: float
        Factor used to compute the detection threshold as in eq. 1 of :cite:`WHZ2009`.
        If not None, this is used to identify coherent objects enclosed by the
        threshold contour `thr_factor * thr_quantile(precip)`.
    thr_quantile: float
        The wet quantile between 0 and 1 used to define the detection threshold.
        Required if `thr_factor` is not None.
    tstorm_kwargs: dict
        Optional dictionary containing keyword arguments for the tstorm feature
        detection algorithm. If None, default values are used.
        See the documentation of :py:func:`pysteps.feature.tstorm.detection`.

    Returns
    -------
    precip_objects: pd.DataFrame
        Dataframe containing all detected cells and their respective properties.
    """
    if not PANDAS_IMPORTED:
        raise MissingOptionalDependency(
            "The pandas package is required for the SAL "
            "verification method but it is not installed"
        )
    if not SKIMAGE_IMPORTED:
        raise MissingOptionalDependency(
            "The scikit-image package is required for the SAL "
            "verification method but it is not installed"
        )
    if thr_factor is not None and thr_quantile is None:
        raise ValueError("You must pass thr_quantile, too")
    if tstorm_kwargs is None:
        tstorm_kwargs = dict()
    if thr_factor is not None:
        zero_value = np.nanmin(precip)
        threshold = thr_factor * np.nanquantile(
            precip[precip > zero_value], thr_quantile
        )
        tstorm_kwargs = {
            "minmax": tstorm_kwargs.get("minmax", threshold),
            "maxref": tstorm_kwargs.get("maxref", threshold + 1e-5),
            "mindiff": tstorm_kwargs.get("mindiff", 1e-5),
            "minref": tstorm_kwargs.get("minref", threshold),
        }
    _, labels = tstorm_detect.detection(precip, **tstorm_kwargs)
    labels = labels.astype(int)
    precip_objects = regionprops_table(
        labels, intensity_image=precip, properties=REGIONPROPS
    )
    return pd.DataFrame(precip_objects)
Beispiel #2
0
def import_bom_rf3(filename, **kwargs):
    """Import a NetCDF radar rainfall product from the BoM Rainfields3.

    Parameters
    ----------
    filename : str
        Name of the file to import.

    Returns
    -------
    out : tuple
        A three-element tuple containing the rainfall field in mm/h imported
        from the Bureau RF3 netcdf, the quality field and the metadata. The
        quality field is currently set to None.

    """
    if not netcdf4_imported:
        raise MissingOptionalDependency(
            "netCDF4 package is required to import BoM Rainfields3 products "
            "but it is not installed")

    R = _import_bom_rf3_data(filename)

    geodata = _import_bom_rf3_geodata(filename)
    metadata = geodata
    # TODO(import_bom_rf3): Add missing georeferencing data.

    metadata["transform"] = None
    metadata["zerovalue"] = np.nanmin(R)
    if np.any(np.isfinite(R)):
        metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
    else:
        metadata["threshold"] = np.nan

    return R, None, metadata
Beispiel #3
0
def parse_proj4_string(proj4str):
    """Construct a dictionary from a PROJ.4 projection string.

    Parameters
    ----------
    proj4str: str
      A PROJ.4-compatible projection string.

    Returns
    -------
    out: dict
      Dictionary, where keys and values are parsed from the projection
      parameter tokens beginning with '+'.
    """

    if not PYPROJ_IMPORTED:
        raise MissingOptionalDependency(
            "pyproj package is required for parse_proj4_string function utility "
            "but it is not installed")

    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=UserWarning)
        # Ignore the warning raised by to_dict() about losing information.
        proj_dict = pyproj.Proj(proj4str).crs.to_dict()

    return proj_dict
Beispiel #4
0
def import_fmi_geotiff(filename, **kwargs):
    """Import a reflectivity field (dBZ) from an FMI GeoTIFF file.

    Parameters
    ----------
    filename : str
        Name of the file to import.

    Returns
    -------
    out : tuple
        A three-element tuple containing the precipitation field, the associated
        quality field and metadata. The quality field is currently set to None.

    """
    if not gdal_imported:
        raise MissingOptionalDependency(
            "gdal package is required to import "
            "FMI's radar reflectivity composite in GeoTIFF format "
            "but it is not installed")

    f = gdal.Open(filename, gdalconst.GA_ReadOnly)

    rb = f.GetRasterBand(1)
    R = rb.ReadAsArray()
    MASK = R == 255
    R = R.astype(float) * rb.GetScale() + rb.GetOffset()
    R = (R - 64.0) / 2.0
    R[MASK] = np.nan

    sr = osr.SpatialReference()
    pr = f.GetProjection()
    sr.ImportFromWkt(pr)

    projdef = sr.ExportToProj4()

    gt = f.GetGeoTransform()

    metadata = {}

    metadata["projection"] = projdef
    metadata["x1"] = gt[0]
    metadata["y1"] = gt[3] + gt[5] * f.RasterYSize
    metadata["x2"] = metadata["x1"] + gt[1] * f.RasterXSize
    metadata["y2"] = gt[3]
    metadata["xpixelsize"] = abs(gt[1])
    metadata["ypixelsize"] = abs(gt[5])
    if gt[5] < 0:
        metadata["yorigin"] = "upper"
    else:
        metadata["yorigin"] = "lower"
    metadata["institution"] = "Finnish Meteorological Institute"
    metadata["unit"] = rb.GetUnitType()
    metadata["transform"] = None
    metadata["accutime"] = 5.0
    R_min = np.nanmin(R)
    metadata["threshold"] = np.nanmin(R[R > R_min])
    metadata["zerovalue"] = R_min

    return R, None, metadata
Beispiel #5
0
def _sal_scaled_volume(precip_objects):
    """
    Calculate the scaled volume based on :cite:`WPHF2008`.

    Parameters
    ----------
    precip_objects: pd.DataFrame
        Dataframe containing all detected cells and their respective properties
        as returned by the :py:func:`pysteps.verification.salsscores._sal_detect_objects`
        function.

    Returns
    -------
    object_volume: pd.Series
        A pandas Series with the scaled volume of each precipitation object.
    """
    if not PANDAS_IMPORTED:
        raise MissingOptionalDependency(
            "The pandas package is required for the SAL "
            "verification method but it is not installed"
        )
    objects_volume_scaled = []
    for _, precip_object in precip_objects.iterrows():
        intensity_sum = precip_object.intensity_image.sum()
        max_intensity = precip_object.max_intensity
        volume_scaled = intensity_sum / max_intensity
        objects_volume_scaled.append(volume_scaled)
    return pd.Series(
        data=objects_volume_scaled, index=precip_objects.label, name="scaled_volume"
    )
Beispiel #6
0
def export_forecast_dataset(F, exporter):
    """Write a forecast array into a file.

    The written dataset has dimensions
    (num_ens_members,num_timesteps,shape[0],shape[1]), where shape refers to
    the shape of the two-dimensional forecast grids. If the exporter was
    initialized with incremental!=None, the array is appended to the existing
    dataset either along the ensemble member or time axis.

    Parameters
    ----------
    exporter : dict
        An exporter object created with any initialization method implemented
        in :py:mod:`pysteps.io.exporters`.
    F : array_like
        The array to write. The required shape depends on the choice of the
        'incremental' parameter the exporter was initialized with:

        +-----------------+---------------------------------------------------+
        |    incremental  |                    required shape                 |
        +=================+===================================================+
        |    None         | (num_ens_members,num_timesteps,shape[0],shape[1]) |
        +-----------------+---------------------------------------------------+
        |    'timestep'   | (num_ens_members,shape[0],shape[1])               |
        +-----------------+---------------------------------------------------+
        |    'member'     | (num_timesteps,shape[0],shape[1])                 |
        +-----------------+---------------------------------------------------+

    """
    if exporter["method"] == "netcdf" and not netcdf4_imported:
        raise MissingOptionalDependency(
            "netCDF4 package is required for netcdf "
            "exporters but it is not installed")

    if exporter["incremental"] is None:
        shp = (exporter["num_ens_members"], exporter["num_timesteps"],
               exporter["shape"][0], exporter["shape"][1])
        if F.shape != shp:
            raise ValueError("F has invalid shape: %s != %s" %
                             (str(F.shape), str(shp)))
    elif exporter["incremental"] == "timestep":
        shp = (exporter["num_ens_members"], exporter["shape"][0],
               exporter["shape"][1])
        if F.shape != shp:
            raise ValueError("F has invalid shape: %s != %s" %
                             (str(F.shape), str(shp)))
    elif exporter["incremental"] == "member":
        shp = (exporter["num_timesteps"], exporter["shape"][0],
               exporter["shape"][1])
        if F.shape != shp:
            raise ValueError("F has invalid shape: %s != %s" %
                             (str(F.shape), str(shp)))

    if exporter["method"] == "netcdf":
        _export_netcdf(F, exporter)
    elif exporter["method"] == "kineros":
        _export_kineros(F, exporter)
    else:
        raise ValueError("unknown exporter method %s" % exporter["method"])
Beispiel #7
0
def morph_opening(input_image, thr, n):
    """
    Filter out small scale noise on the image by applying a binary
    morphological opening, that is, erosion followed by dilation.

    .. _MaskedArray:\
        https://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray

    .. _ndarray:\
    https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html

    Parameters
    ----------
    input_image: ndarray_ or MaskedArray_
        Array of shape (m, n) containing the input image.
    thr: float
        The threshold used to convert the image into a binary image.
    n: int
        The structuring element size [pixels].

    Returns
    -------
    input_image: ndarray_ or MaskedArray_
        Array of shape (m,n) containing the filtered image.
    """
    if not CV2_IMPORTED:
        raise MissingOptionalDependency(
            "opencv package is required for the morphologyEx "
            "routine but it is not installed")

    input_image = input_image.copy()

    # Check if a MaskedArray is used. If not, mask the ndarray
    to_ndarray = False
    if not isinstance(input_image, MaskedArray):
        to_ndarray = True
        input_image = np.ma.masked_invalid(input_image)

    np.ma.set_fill_value(input_image, input_image.min())

    # Convert to binary image
    field_bin = np.ndarray.astype(input_image.filled() > thr, "uint8")

    # Build a structuring element of size n
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (n, n))

    # Apply morphological opening (i.e. erosion then dilation)
    field_bin_out = cv2.morphologyEx(field_bin, cv2.MORPH_OPEN, kernel)

    # Build mask to be applied on the original image
    mask = (field_bin - field_bin_out) > 0

    # Filter out small isolated pixels based on mask
    input_image[mask] = np.nanmin(input_image)

    if to_ndarray:
        input_image = np.array(input_image)

    return input_image
Beispiel #8
0
def binary_mse_accum(bmse, X_f, X_o):
    """Accumulate forecast-observation pairs to an BMSE object.

    Parameters
    -----------

    bmse : dict
        The BMSE object initialized with
        :py:func:`pysteps.verification.spatialscores.binary_mse_init`.

    X_f : array_like
        Array of shape (m, n) containing the forecast field.

    X_o : array_like
        Array of shape (m, n) containing the observation field.
    """
    if not pywt_imported:
        raise MissingOptionalDependency(
            "PyWavelets package is required for the binary MSE spatial "
            "verification method but it is not installed")

    if len(X_f.shape) != 2 or len(X_o.shape) != 2 or X_f.shape != X_o.shape:
        message = "X_f and X_o must be two-dimensional arrays"
        message += " having the same shape"
        raise ValueError(message)

    thr = bmse["thr"]
    wavelet = bmse["wavelet"]

    X_f = X_f.copy()
    X_f[~np.isfinite(X_f)] = thr - 1
    X_o = X_o.copy()
    X_o[~np.isfinite(X_o)] = thr - 1

    w = pywt.Wavelet(wavelet)

    I_f = (X_f >= thr).astype(float)
    I_o = (X_o >= thr).astype(float)

    E_decomp = _wavelet_decomp(I_f - I_o, w)

    n_scales = len(E_decomp)
    if bmse["scales"] is None:
        bmse["scales"] = pow(2, np.arange(n_scales))[::-1]
        bmse["mse"] = np.zeros(n_scales)

    # update eps
    eps = 1.0 * np.sum((X_o >= thr).astype(int)) / X_o.size
    if np.isfinite(eps):
        bmse["eps"] = (bmse["eps"] * bmse["n"] + eps) / (bmse["n"] + 1)

    # update mse
    for j in range(n_scales):
        mse = np.mean(E_decomp[j]**2)
        if np.isfinite(mse):
            bmse["mse"][j] = (bmse["mse"][j] * bmse["n"] + mse) / (bmse["n"] +
                                                                   1)

    bmse["n"] += 1
Beispiel #9
0
def _LucasKanade_features_tracking(prvs, next, p0, winsize_LK, nr_levels_LK):
    """Call the Lucas-Kanade features tracking algorithm.

    Parameters
    ----------
    prvs : array-like
        Array of shape (m,n) containing the first 8-bit input image.
    next : array-like
        Array of shape (m,n) containing the successive 8-bit input image.
    p0 : list
        Vector of 2D points for which the flow needs to be found.
        Point coordinates must be single-precision floating-point numbers.
    winsize_LK : tuple
        Size of the search window at each pyramid level.
        Small windows (e.g. 10) lead to unrealistic motion.
    nr_levels_LK : int
        0-based maximal pyramid level number.
        Not very sensitive parameter.

    Returns
    -------
    x0 : array-like
        Output vector of x-coordinates of detected point motions.
    y0 : array-like
        Output vector of y-coordinates of detected point motions.
    u : array-like
        Output vector of u-components of detected point motions.
    v : array-like
        Output vector of v-components of detected point motions.

    """
    if not cv2_imported:
        raise MissingOptionalDependency(
            "opencv package is required for the Lucas-Kanade method "
            "optical flow method but it is not installed")

    # LK parameters
    lk_params = dict(winSize=winsize_LK,
                     maxLevel=nr_levels_LK,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0))

    # Lucas-Kande
    p1, st, err = cv2.calcOpticalFlowPyrLK(prvs, next, p0, None, **lk_params)

    # keep only features that have been found
    st = st[:, 0] == 1
    p1 = p1[st, :, :]
    p0 = p0[st, :, :]
    err = err[st, :]

    # extract vectors
    x0 = p0[:, :, 0]
    y0 = p0[:, :, 1]
    u = np.array((p1 - p0)[:, :, 0])
    v = np.array((p1 - p0)[:, :, 1])

    return x0, y0, u, v
Beispiel #10
0
def _ShiTomasi_features_to_track(R, max_corners_ST, quality_level_ST,
                                 min_distance_ST, block_size_ST, mask):
    """Call the Shi-Tomasi corner detection algorithm.

    Parameters
    ----------
    R : array-like
        Array of shape (m,n) containing the input precipitation field passed as
        8-bit image.

    max_corners_ST : int
        Maximum number of corners to return. If there are more corners than are
        found, the strongest of them is returned.

    quality_level_ST : float
        Parameter characterizing the minimal accepted quality of image corners.
        See original documentation for more details (https://docs.opencv.org).

    min_distance_ST : int
        Minimum possible Euclidean distance between the returned corners [px].

    block_size_ST : int
        Size of an average block for computing a derivative covariation matrix
        over each pixel neighborhood.

    mask : ndarray_
        Array of shape (m,n). It specifies the region in which the corners are
        detected.

    Returns
    -------
    p0 : list
        Output vector of detected corners.

    """
    if not cv2_imported:
        raise MissingOptionalDependency(
            "opencv package is required for the Lucas-Kanade "
            "optical flow method but it is not installed")

    if len(R.shape) != 2:
        raise ValueError("R must be a two-dimensional array")
    if R.dtype != "uint8":
        raise ValueError("R must be passed as 8-bit image")

    # ShiTomasi corner detection parameters
    ShiTomasi_params = dict(
        maxCorners=max_corners_ST,
        qualityLevel=quality_level_ST,
        minDistance=min_distance_ST,
        blockSize=block_size_ST,
    )

    # detect corners
    p0 = cv2.goodFeaturesToTrack(R, mask=mask, **ShiTomasi_params)

    return p0
Beispiel #11
0
def import_fmi_pgm(filename, **kwargs):
    """Import a 8-bit PGM radar reflectivity composite from the FMI archive.

    Parameters
    ----------

    filename : str
        Name of the file to import.

    Other Parameters
    ----------------

    gzipped : bool
        If True, the input file is treated as a compressed gzip file.

    Returns
    -------

    out : tuple
        A three-element tuple containing the reflectivity composite in dBZ
        and the associated quality field and metadata. The quality field is
        currently set to None.
    """
    if not pyproj_imported:
        raise MissingOptionalDependency("pyproj package is required to import "
                                        "FMI's radar reflectivity composite "
                                        "but it is not installed")

    gzipped = kwargs.get("gzipped", False)

    pgm_metadata = _import_fmi_pgm_metadata(filename, gzipped=gzipped)

    if gzipped is False:
        R = imread(filename)
    else:
        R = imread(gzip.open(filename, "r"))
    geodata = _import_fmi_pgm_geodata(pgm_metadata)

    MASK = R == pgm_metadata["missingval"]
    R = R.astype(float)
    R[MASK] = np.nan
    R = (R - 64.0) / 2.0

    metadata = geodata
    metadata["institution"] = "Finnish Meteorological Institute"
    metadata["accutime"] = 5.0
    metadata["unit"] = "dBZ"
    metadata["transform"] = "dB"
    metadata["zerovalue"] = np.nanmin(R)
    if np.any(np.isfinite(R)):
        metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
    else:
        metadata["threshold"] = np.nan
    metadata["zr_a"] = 223.0
    metadata["zr_b"] = 1.53

    return R, None, metadata
Beispiel #12
0
def _sal_weighted_distance(precip, thr_factor, thr_quantile, tstorm_kwargs):
    """
    Compute the weighted averaged distance between the centers of mass of the
    individual objects and the center of mass of the total precipitation field.

    Parameters
    ----------
    precip: array-like
        Array of shape (m,n). NaNs are ignored.
    thr_factor: float
        Factor used to compute the detection threshold as in eq. 1 of :cite:`WHZ2009`.
        If not None, this is used to identify coherent objects enclosed by the
        threshold contour `thr_factor * thr_quantile(precip)`.
    thr_quantile: float
        The wet quantile between 0 and 1 used to define the detection threshold.
        Required if `thr_factor` is not None.
    tstorm_kwargs: dict
        Optional dictionary containing keyword arguments for the tstorm feature
        detection algorithm. If None, default values are used.
        See the documentation of :py:func:`pysteps.feature.tstorm.detection`.

    Returns
    -------
    weighted_distance: float
        The weighted averaged distance between the centers of mass of the
        individual objects and the center of mass of the total precipitation field.
        The returned value is NaN if no objects are detected.
    """
    if not PANDAS_IMPORTED:
        raise MissingOptionalDependency(
            "The pandas package is required for the SAL "
            "verification method but it is not installed"
        )
    precip_objects = _sal_detect_objects(
        precip, thr_factor, thr_quantile, tstorm_kwargs
    )
    if len(precip_objects) == 0:
        return np.nan
    centroid_total = center_of_mass(np.nan_to_num(precip))
    r = []
    for i in precip_objects.label - 1:
        xd = (precip_objects["weighted_centroid-1"][i] - centroid_total[1]) ** 2
        yd = (precip_objects["weighted_centroid-0"][i] - centroid_total[0]) ** 2

        dst = sqrt(xd + yd)
        sumr = (precip_objects.intensity_image[i].sum()) * dst

        sump = precip_objects.intensity_image[i].sum()

        r.append({"sum_dist": sumr, "sum_p": sump})
    rr = pd.DataFrame(r)
    return rr.sum_dist.sum() / (rr.sum_p.sum())
Beispiel #13
0
def morph_opening(input_image, thr, n):
    """Filter out small scale noise on the image by applying a binary
    morphological opening, that is, erosion followed by dilation.

    Parameters
    ----------

    input_image : array_like
        Array of shape (m, n) containing the input image.

    thr : float
        The threshold used to convert the image into a binary image.

    n : int
        The structuring element size [pixels].

    Returns
    -------

    input_image : array_like
        Array of shape (m,n) containing the filtered image.
    """
    if not CV2_IMPORTED:
        raise MissingOptionalDependency(
            "opencv package is required for the morphologyEx "
            "routine but it is not installed"
        )

    # Convert to binary image
    field_bin = np.ndarray.astype(input_image > thr, "uint8")

    # Build a structuring element of size n
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (n, n))

    # Apply morphological opening (i.e. erosion then dilation)
    field_bin_out = cv2.morphologyEx(field_bin, cv2.MORPH_OPEN, kernel)

    # Build mask to be applied on the original image
    mask = (field_bin - field_bin_out) > 0

    # Filter out small isolated pixels based on mask
    input_image[mask] = np.nanmin(input_image)

    return input_image
Beispiel #14
0
def _get_fft_method(name):
    if name == "numpy":
        return fft.numpy_fft, {}
    elif name == "scipy":
        return fft.scipy_fft, {}
    elif name == "pyfftw":
        if not fft.pyfftw_imported:
            raise MissingOptionalDependency(
                "pyfftw is required but it is not installed")
        # TODO: Multithreading is currently disabled because it gives segfault
        # with dask.
        return fft.pyfftw_fft, {
            "threads": 1,
            "planner_effort": "FFTW_ESTIMATE"
        }
    else:
        raise ValueError(
            "unknown method %s, the available methods are 'numpy', 'scipy' and 'pyfftw'"
            % name)
Beispiel #15
0
def _clean_image(R, n=3, thr=0):
    """Apply a binary morphological opening to filter small isolated echoes.

    Parameters
    ----------
    R : array-like
        Array of shape (m,n) containing the input precipitation field.
    n : int
        The structuring element size [px].
    thr : float
        The rain/no-rain threshold to convert the image into a binary image.

    Returns
    -------
    R : array
        Array of shape (m,n) containing the cleaned precipitation field.

    """
    if not cv2_imported:
        raise MissingOptionalDependency(
            "opencv package is required for the Lucas-Kanade method "
            "optical flow method but it is not installed")

    # convert to binary image (rain/no rain)
    field_bin = np.ndarray.astype(R > thr, "uint8")

    # build a structuring element of size (nx)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (n, n))

    # apply morphological opening (i.e. erosion then dilation)
    field_bin_out = cv2.morphologyEx(field_bin, cv2.MORPH_OPEN, kernel)

    # build mask to be applied on the original image
    mask = (field_bin - field_bin_out) > 0

    # filter out small isolated echoes based on mask
    R[mask] = np.nanmin(R)

    return R
Beispiel #16
0
def import_mch_metranet(filename, product, unit, accutime):
    """Import a 8-bit bin radar reflectivity composite from the MeteoSwiss
    archive.

    Parameters
    ----------

    filename : str
        Name of the file to import.

    product : {"AQC", "CPC", "RZC", "AZC"}
        The name of the MeteoSwiss QPE product.\n
        Currently supported prducts:

        +------+----------------------------+
        | Name |          Product           |
        +======+============================+
        | AQC  |     Acquire                |
        +------+----------------------------+
        | CPC  |     CombiPrecip            |
        +------+----------------------------+
        | RZC  |     Precip                 |
        +------+----------------------------+
        | AZC  |     RZC accumulation       |
        +------+----------------------------+

    unit : {"mm/h", "mm", "dBZ"}
        the physical unit of the data

    accutime : float
        the accumulation time in minutes of the data

    Returns
    -------

    out : tuple
        A three-element tuple containing the precipitation field in mm/h imported
        from a MeteoSwiss gif file and the associated quality field and metadata.
        The quality field is currently set to None.
    """
    if not metranet_imported:
        raise MissingOptionalDependency(
            "metranet package needed for importing MeteoSwiss "
            "radar composites but it is not installed")

    ret = metranet.read_file(filename, physic_value=True, verbose=False)
    R = ret.data

    geodata = _import_mch_geodata()

    # read metranet
    metadata = geodata
    metadata["institution"] = "MeteoSwiss"
    metadata["accutime"] = accutime
    metadata["unit"] = unit
    metadata["transform"] = None
    metadata["zerovalue"] = np.nanmin(R)
    if np.isnan(metadata["zerovalue"]):
        metadata["threshold"] = np.nan
    else:
        metadata["threshold"] = np.nanmin(R[R > metadata["zerovalue"]])
    metadata["zr_a"] = 316.0
    metadata["zr_b"] = 1.5

    return R, None, metadata
Beispiel #17
0
def import_mch_hdf5(filename, **kwargs):
    """Import a precipitation field (and optionally the quality field) from a
    MeteoSwiss HDF5 file conforming to the ODIM specification.

    Parameters
    ----------

    filename : str
        Name of the file to import.

    Other Parameters
    ----------------

    qty : {'RATE', 'ACRR', 'DBZH'}
        The quantity to read from the file. The currently supported identitiers
        are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall
        accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value
        is 'RATE'.

    Returns
    -------

    out : tuple
        A three-element tuple containing the OPERA product for the requested
        quantity and the associated quality field and metadata. The quality
        field is read from the file if it contains a dataset whose quantity
        identifier is 'QIND'.
    """
    if not h5py_imported:
        raise MissingOptionalDependency(
            "h5py package is required to import "
            "radar reflectivity composites using ODIM HDF5 specification "
            "but it is not installed")

    qty = kwargs.get("qty", "RATE")

    if qty not in ["ACRR", "DBZH", "RATE"]:
        raise ValueError(
            "unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'"
        )

    f = h5py.File(filename, "r")

    R = None
    Q = None

    for dsg in f.items():
        if dsg[0].startswith("dataset"):
            what_grp_found = False
            # check if the "what" group is in the "dataset" group
            if "what" in list(dsg[1].keys()):
                qty_, gain, offset, nodata, undetect = _read_mch_hdf5_what_group(
                    dsg[1]["what"])
                what_grp_found = True

            for dg in dsg[1].items():
                if dg[0][0:4] == "data":
                    # check if the "what" group is in the "data" group
                    if "what" in list(dg[1].keys()):
                        qty_, gain, offset, nodata, undetect = _read_mch_hdf5_what_group(
                            dg[1]["what"])
                    elif not what_grp_found:
                        raise DataModelError("Non ODIM compilant file: "
                                             "no what group found from {} "
                                             "or its subgroups".format(dg[0]))

                    if qty_.decode() in [qty, "QIND"]:
                        ARR = dg[1]["data"][...]
                        MASK_N = ARR == nodata
                        MASK_U = ARR == undetect
                        MASK = np.logical_and(~MASK_U, ~MASK_N)

                        if qty_.decode() == qty:
                            R = np.empty(ARR.shape)
                            R[MASK] = ARR[MASK] * gain + offset
                            R[MASK_U] = np.nan
                            R[MASK_N] = np.nan
                        elif qty_.decode() == "QIND":
                            Q = np.empty(ARR.shape, dtype=float)
                            Q[MASK] = ARR[MASK]
                            Q[~MASK] = np.nan

    if R is None:
        raise IOError("requested quantity %s not found" % qty)

    where = f["where"]
    proj4str = where.attrs["projdef"].decode()  # is empty ...

    geodata = _import_mch_geodata()
    metadata = geodata

    # TODO: use those from the hdf5 file instead
    # xpixelsize = where.attrs["xscale"] * 1000.0
    # ypixelsize = where.attrs["yscale"] * 1000.0
    # xsize = where.attrs["xsize"]
    # ysize = where.attrs["ysize"]

    if qty == "ACRR":
        unit = "mm"
        transform = None
    elif qty == "DBZH":
        unit = "dBZ"
        transform = "dB"
    else:
        unit = "mm/h"
        transform = None

    if np.any(np.isfinite(R)):
        thr = np.nanmin(R[R > np.nanmin(R)])
    else:
        thr = np.nan

    metadata.update({
        "yorigin": "upper",
        "institution": "MeteoSwiss",
        "accutime": 5.0,
        "unit": unit,
        "transform": transform,
        "zerovalue": np.nanmin(R),
        "threshold": thr,
        "zr_a": 316.0,
        "zr_b": 1.5,
    })

    f.close()

    return R, Q, metadata
Beispiel #18
0
def import_mch_gif(filename, product, unit, accutime):
    """Import a 8-bit gif radar reflectivity composite from the MeteoSwiss
    archive.

    Parameters
    ----------

    filename : str
        Name of the file to import.

    product : {"AQC", "CPC", "RZC", "AZC"}
        The name of the MeteoSwiss QPE product.\n
        Currently supported prducts:

        +------+----------------------------+
        | Name |          Product           |
        +======+============================+
        | AQC  |     Acquire                |
        +------+----------------------------+
        | CPC  |     CombiPrecip            |
        +------+----------------------------+
        | RZC  |     Precip                 |
        +------+----------------------------+
        | AZC  |     RZC accumulation       |
        +------+----------------------------+

    unit : {"mm/h", "mm", "dBZ"}
        the physical unit of the data

    accutime : float
        the accumulation time in minutes of the data

    Returns
    -------

    out : tuple
        A three-element tuple containing the precipitation field in mm/h imported
        from a MeteoSwiss gif file and the associated quality field and metadata.
        The quality field is currently set to None.
    """
    if not pil_imported:
        raise MissingOptionalDependency(
            "PIL package is required to import "
            "radar reflectivity composite from MeteoSwiss"
            "but it is not installed")

    geodata = _import_mch_geodata()

    metadata = geodata

    # import gif file
    B = PIL.Image.open(filename)

    if product.lower() in ["azc", "rzc", "precip"]:

        # convert 8-bit GIF colortable to RGB values
        Brgb = B.convert("RGB")

        # load lookup table
        if product.lower() == "azc":
            lut_filename = os.path.join(os.path.dirname(__file__),
                                        "mch_lut_8bit_Metranet_AZC_V104.txt")
        else:
            lut_filename = os.path.join(os.path.dirname(__file__),
                                        "mch_lut_8bit_Metranet_v103.txt")
        lut = np.genfromtxt(lut_filename, skip_header=1)
        lut = dict(zip(zip(lut[:, 1], lut[:, 2], lut[:, 3]), lut[:, -1]))

        # apply lookup table conversion
        R = np.zeros(len(Brgb.getdata()))
        for i, dn in enumerate(Brgb.getdata()):
            R[i] = lut.get(dn, np.nan)

        # convert to original shape
        width, height = B.size
        R = R.reshape(height, width)

        # set values outside observational range to NaN,
        # and values in non-precipitating areas to zero.
        R[R < 0] = 0
        R[R > 9999] = np.nan

    elif product.lower() in ["aqc", "cpc", "acquire ", "combiprecip"]:

        # convert digital numbers to physical values
        B = np.array(B, dtype=int)

        # build lookup table [mm/5min]
        lut = np.zeros(256)
        A = 316.0
        b = 1.5
        for i in range(256):
            if (i < 2) or (i > 250 and i < 255):
                lut[i] = 0.0
            elif i == 255:
                lut[i] = np.nan
            else:
                lut[i] = (10.0**((i - 71.5) / 20.0) / A)**(1.0 / b)

        # apply lookup table
        R = lut[B]

    else:
        raise ValueError("unknown product %s" % product)

    metadata["accutime"] = accutime
    metadata["unit"] = unit
    metadata["transform"] = None
    metadata["zerovalue"] = np.nanmin(R)
    if np.any(R > np.nanmin(R)):
        metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
    else:
        metadata["threshold"] = np.nan
    metadata["institution"] = "MeteoSwiss"
    metadata["product"] = product
    metadata["zr_a"] = 316.0
    metadata["zr_b"] = 1.5

    return R, None, metadata
Beispiel #19
0
def import_knmi_hdf5(filename, **kwargs):
    """Import a precipitation or reflectivity field (and optionally the quality
    field) from a HDF5 file conforming to the KNMI Data Centre specification.

    Parameters
    ----------

    filename : str
        Name of the file to import.

    Other Parameters
    ----------------

    qty : {'ACRR', 'DBZH'}
        The quantity to read from the file. The currently supported identifiers
        are: 'ACRR'=hourly rainfall accumulation (mm) and 'DBZH'=max-reflectivity
        (dBZ). The default value is 'ACRR'.

    accutime : float
        The accumulation time of the dataset in minutes. A 5 min accumulation
        is used as default, but hourly, daily and monthly accumulations
        are also available.

    pixelsize: float
        The pixel size of a raster cell in meters. The default value for the KNMI
        datasets is 1000 m grid cell size, but datasets with 2400 m pixel size
        are also available.

    Returns
    -------

    out : tuple
        A three-element tuple containing precipitation accumulation [mm] /
        reflectivity [dBZ] of the KNMI product, the associated quality field
        and metadata. The quality field is currently set to None.

    Notes
    -----

    Every KNMI data type has a slightly different naming convention. The
    standard setup is based on the accumulated rainfall product on 1 km2 spatial
    and 5 min temporal resolution.
    See https://data.knmi.nl/datasets?q=radar for a list of all available KNMI
    radar data.
    """

    # TODO: Add quality field.

    if not h5py_imported:
        raise MissingOptionalDependency("h5py package is required to import "
                                        "KNMI's radar datasets "
                                        "but it is not installed")

    ###
    # Options for kwargs.get
    ###

    # The unit in the 2D fields: either hourly rainfall accumulation (ACRR) or
    # reflectivity (DBZH)
    qty = kwargs.get("qty", "ACRR")

    if qty not in ["ACRR", "DBZH"]:
        raise ValueError(
            "unknown quantity %s: the available options are 'ACRR' and 'DBZH' "
        )

    # The time step. Generally, the 5 min data is used, but also hourly, daily
    # and monthly accumulations are present.
    accutime = kwargs.get("accutime", 5.0)
    # The pixel size. Recommended is to use KNMI datasets with 1 km grid cell size.
    # 1.0 or 2.4 km datasets are available - give pixelsize in meters
    pixelsize = kwargs.get("pixelsize", 1000.0)

    ####
    # Precipitation fields
    ####

    f = h5py.File(filename, "r")
    dset = f["image1"]["image_data"]
    R_intermediate = np.copy(dset)  # copy the content

    # In case R is a rainfall accumulation (ACRR), R is divided by 100.0,
    # because the data is saved as hundreds of mm (so, as integers). 65535 is
    # the no data value. The precision of the data is two decimals (0.01 mm).
    if qty == "ACRR":
        R = np.where(R_intermediate == 65535, np.NaN, R_intermediate / 100.0)

    # In case reflectivities are imported, the no data value is 255. Values are
    # saved as integers. The reflectivities are not directly saved in dBZ, but
    # as: dBZ = 0.5 * pixel_value - 31.5
    if qty == "DBZH":
        R = np.where(R_intermediate == 255, np.NaN,
                     R_intermediate * 0.5 - 31.5)

    if R is None:
        raise IOError("requested quantity not found")

    # TODO: Check if the reflectivity conversion equation is still up to date (unfortunately not well documented)

    ####
    # Meta data
    ####

    metadata = {}

    if qty == "ACRR":
        unit = "mm"
        transform = None
    elif qty == "DBZH":
        unit = "dBZ"
        transform = "dB"

    # The 'where' group of mch- and Opera-data, is called 'geographic' in the
    # KNMI data.
    geographic = f["geographic"]
    proj4str = geographic["map_projection"].attrs[
        "projection_proj4_params"].decode()
    pr = pyproj.Proj(proj4str)
    metadata["projection"] = proj4str

    # Get coordinates
    latlon_corners = geographic.attrs["geo_product_corners"]
    LL_lat = latlon_corners[1]
    LL_lon = latlon_corners[0]
    UR_lat = latlon_corners[5]
    UR_lon = latlon_corners[4]
    LR_lat = latlon_corners[7]
    LR_lon = latlon_corners[6]
    UL_lat = latlon_corners[3]
    UL_lon = latlon_corners[2]

    LL_x, LL_y = pr(LL_lon, LL_lat)
    UR_x, UR_y = pr(UR_lon, UR_lat)
    LR_x, LR_y = pr(LR_lon, LR_lat)
    UL_x, UL_y = pr(UL_lon, UL_lat)
    x1 = min(LL_x, UL_x)
    y2 = min(LL_y, LR_y)
    x2 = max(LR_x, UR_x)
    y1 = max(UL_y, UR_y)

    # Fill in the metadata
    metadata["x1"] = x1 * 1000.0
    metadata["y1"] = y1 * 1000.0
    metadata["x2"] = x2 * 1000.0
    metadata["y2"] = y2 * 1000.0
    metadata["xpixelsize"] = pixelsize
    metadata["ypixelsize"] = pixelsize
    metadata["yorigin"] = "upper"
    metadata[
        "institution"] = "KNMI - Royal Netherlands Meteorological Institute"
    metadata["accutime"] = accutime
    metadata["unit"] = unit
    metadata["transform"] = transform
    metadata["zerovalue"] = 0.0
    metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
    metadata["zr_a"] = 200.0
    metadata["zr_b"] = 1.6

    f.close()

    return R, None, metadata
Beispiel #20
0
def proj4_to_cartopy(proj4str):
    """Convert a PROJ.4 projection string into a Cartopy coordinate reference
    system (crs) object.

    Parameters
    ----------
    proj4str : str
        A PROJ.4-compatible projection string.

    Returns
    -------
    out : object
        Instance of a crs class defined in cartopy.crs.

    """
    if not cartopy_imported:
        raise MissingOptionalDependency(
            "cartopy package is required for proj4_to_cartopy function utility "
            "but it is not installed")

    if not pyproj_imported:
        raise MissingOptionalDependency(
            "pyproj package is required for proj4_to_cartopy function utility "
            "but it is not installed")

    proj = pyproj.Proj(proj4str)

    if proj.is_latlong():
        return ccrs.PlateCarree()

    km_proj = {"lon_0": "central_longitude",
               "lat_0": "central_latitude",
               "lat_ts": "true_scale_latitude",
               "x_0": "false_easting",
               "y_0": "false_northing",
               "k": "scale_factor",
               "zone": "zone"}
    km_globe = {'a': "semimajor_axis",
                'b': "semiminor_axis"}
    km_std = {"lat_1": "lat_1",
              "lat_2": "lat_2"}

    kw_proj  = {}
    kw_globe = {}
    kw_std   = {}

    for s in proj.srs.split('+'):
        s = s.split('=')
        if len(s) != 2:
            continue
        k = s[0].strip()
        v = s[1].strip()
        try:
            v = float(v)
        except:
            pass

        if k == "proj":
            if v == "tmerc":
                cl = ccrs.TransverseMercator
            elif v == "laea":
                cl = ccrs.LambertAzimuthalEqualArea
            elif v == "lcc":
                cl = ccrs.LambertConformal
            elif v == "merc":
                cl = ccrs.Mercator
            elif v == "utm":
                cl = ccrs.UTM
            elif v == "stere":
                cl = ccrs.Stereographic
            elif v == "aea":
                cl = ccrs.AlbersEqualArea
            elif v == "somerc":
                raise UnsupportedSomercProjection("unsupported projection: somerc")
            else:
                raise ValueError("unsupported projection: %s" % v)
        elif k in km_proj:
            kw_proj[km_proj[k]] = v
        elif k in km_globe:
            kw_globe[km_globe[k]] = v
        elif k in km_std:
            kw_std[km_std[k]] = v

    globe = None
    if kw_globe:
        globe = ccrs.Globe(**kw_globe)
    if kw_std:
        kw_proj["standard_parallels"] = (kw_std["lat_1"], kw_std["lat_2"])

    if cl.__name__ == "Mercator":
        kw_proj.pop("false_easting",  None)
        kw_proj.pop("false_northing", None)

    return cl(globe=globe, **kw_proj)
Beispiel #21
0
def import_saf_crri(filename, **kwargs):
    """Import a NetCDF radar rainfall product from the Convective Rainfall Rate
    Intensity (CRRI) product from the Satellite Application Facilities (SAF).

    Product description available on http://www.nwcsaf.org/crr_description
    (last visited Jan 26, 2020).

    Parameters
    ----------

    filename : str
        Name of the file to import.

    Other Parameters
    ----------------

    extent : scalars (left, right, bottom, top), optional
        The spatial extent specified in data coordinates.
        If None, the full extent is imported.

    Returns
    -------

    out : tuple
        A three-element tuple containing the rainfall field in mm/h, the quality
        field and the metadata imported from the CRRI SAF netcdf file.
        The quality field includes values [1, 2, 4, 8, 16, 24, 32] meaning
        "nodata", "internal_consistency", "temporal_consistency", "good",
        "questionable", "bad", and "interpolated", respectively.
    """
    if not NETCDF4_IMPORTED:
        raise MissingOptionalDependency(
            "netCDF4 package is required to import CRRI SAF products "
            "but it is not installed")

    extent = kwargs.get("extent", None)

    geodata = _import_saf_crri_geodata(filename)
    metadata = geodata

    if extent:
        xcoord = np.arange(metadata["x1"], metadata["x2"],
                           metadata["xpixelsize"]) + metadata["xpixelsize"] / 2
        ycoord = np.arange(metadata["y1"], metadata["y2"],
                           metadata["ypixelsize"]) + metadata["ypixelsize"] / 2
        ycoord = ycoord[::-1]  # yorigin = "upper"
        idx_x = np.logical_and(xcoord < extent[1], xcoord > extent[0])
        idx_y = np.logical_and(ycoord < extent[3], ycoord > extent[2])

        # update geodata
        metadata["x1"] = xcoord[idx_x].min() - metadata["xpixelsize"] / 2
        metadata["x2"] = xcoord[idx_x].max() + metadata["xpixelsize"] / 2
        metadata["y1"] = ycoord[idx_y].min() - metadata["ypixelsize"] / 2
        metadata["y2"] = ycoord[idx_y].max() + metadata["ypixelsize"] / 2

    else:

        idx_x = None
        idx_y = None

    precip, quality = _import_saf_crri_data(filename, idx_x, idx_y)

    metadata["transform"] = None
    metadata["zerovalue"] = np.nanmin(precip)
    if np.any(np.isfinite(precip)):
        metadata["threshold"] = np.nanmin(precip[precip > np.nanmin(precip)])
    else:
        metadata["threshold"] = np.nan

    return precip, quality, metadata
Beispiel #22
0
def detection(
    input_image,
    minref=35,
    maxref=48,
    mindiff=6,
    minsize=50,
    minmax=41,
    mindis=10,
    output_feat=False,
    time="000000000",
):
    """
    This function detects thunderstorms using a multi-threshold approach. It is
    recommended to use a 2-D Cartesian maximum reflectivity composite, however the
    function will process any 2-D array.
    The thunderstorm cell detection requires both scikit-image and pandas.

    Parameters
    ----------
    input_image : array-like
        Array of shape (m,n) containing input image, usually maximum reflectivity in
        dBZ with a resolution of 1 km. Nan values are ignored.
    minref : float, optional
        Lower threshold for object detection. Lower values will be set to NaN.
        The default is 35 dBZ.
    maxref : float, optional
        Upper threshold for object detection. Higher values will be set to this value.
        The default is 48 dBZ.
    mindiff : float, optional
        Minimal difference between two identified maxima within same area to split area
        into two objects. The default is 6 dBZ.
    minsize : float, optional
        Minimal area for possible detected object. The default is 50 pixels.
    minmax : float, optional
        Minimum value of maximum in identified objects. Objects with a maximum lower
        than this will be discarded. The default is 41 dBZ.
    mindis : float, optional
        Minimum distance between two maxima of identified objects. Objects with a
        smaller distance will be merged. The default is 10 km.
    output_feat: bool, optional
        Set to True to return only the cell coordinates.
    time : string, optional
        Date and time as string. Used to label time in the resulting dataframe.
        The default is '000000000'.

    Returns
    -------
    cells_id : pandas dataframe
        Pandas dataframe containing all detected cells and their respective properties
        corresponding to the input image.
        Columns of dataframe: ID - cell ID, time - time stamp, x - array of all
        x-coordinates of cell, y -  array of all y-coordinates of cell, cen_x -
        x-coordinate of cell centroid, cen_y - y-coordinate of cell centroid, max_ref -
        maximum (reflectivity) value of cell, cont - cell contours
    labels : array-like
        Array of shape (m,n), grid of labelled cells.
    """
    if not SKIMAGE_IMPORTED:
        raise MissingOptionalDependency(
            "skimage is required for thunderstorm DATing " "but it is not installed"
        )
    if not PANDAS_IMPORTED:
        raise MissingOptionalDependency(
            "pandas is required for thunderstorm DATing " "but it is not installed"
        )
    filt_image = np.zeros(input_image.shape)
    filt_image[input_image >= minref] = input_image[input_image >= minref]
    filt_image[input_image > maxref] = maxref
    max_image = np.zeros(filt_image.shape)
    max_image[filt_image == maxref] = 1
    labels, n_groups = ndi.label(max_image)
    for n in range(1, n_groups + 1):
        indx, indy = np.where(labels == n)
        if len(indx) > 3:
            max_image[indx[0], indy[0]] = 2
    filt_image[max_image == 2] = maxref + 1
    binary = np.zeros(filt_image.shape)
    binary[filt_image > 0] = 1
    labels, n_groups = ndi.label(binary)
    for n in range(1, n_groups + 1):
        ind = np.where(labels == n)
        size = len(ind[0])
        maxval = np.nanmax(input_image[ind])
        if size < minsize:  # removing too small areas
            binary[labels == n] = 0
            labels[labels == n] = 0
        if maxval < minmax:  # removing areas with too low max value
            binary[labels == n] = 0
            labels[labels == n] = 0
    filt_image = filt_image * binary
    if mindis % 2 == 0:
        elem = mindis - 1
    else:
        elem = mindis
    struct = np.ones([elem, elem])
    if np.nanmax(filt_image.flatten()) < minref:
        maxima = np.zeros(filt_image.shape)
    else:
        maxima = skim.h_maxima(filt_image, h=mindiff, selem=struct)
    loc_max = np.where(maxima > 0)

    loc_max = longdistance(loc_max, mindis)
    i_cell = labels[loc_max]
    n_cell = np.unique(labels)[1:]
    for n in n_cell:
        if n not in i_cell:
            binary[labels == n] = 0
            labels[labels == n] = 0

    maxima_dis = np.zeros(maxima.shape)
    maxima_dis[loc_max] = 1

    areas, lines = breakup(input_image, np.nanmin(input_image.flatten()), maxima_dis)

    cells_id, labels = get_profile(areas, binary, input_image, loc_max, time, minref)

    if not output_feat:
        return cells_id, labels
    if output_feat:
        return np.column_stack(
            [np.array(cells_id.cen_x), np.array(cells_id.cen_y)]
        )
Beispiel #23
0
def initialize_forecast_exporter_netcdf(
    outpath,
    outfnprefix,
    startdate,
    timestep,
    n_timesteps,
    shape,
    metadata,
    n_ens_members=1,
    incremental=None,
    **kwargs,
):
    """
    Initialize a netCDF forecast exporter. All outputs are written to a
    single file named as '<outfnprefix>_.nc'.

    Parameters
    ----------
    outpath: str
        Output path.
    outfnprefix: str
        Prefix for output file names.
    startdate: datetime.datetime
        Start date of the forecast.
    timestep: int
        Time step of the forecast (minutes).
    n_timesteps: int
        Number of time steps in the forecast this argument is ignored if
        incremental is set to 'timestep'.
    shape: tuple of int
        Two-element tuple defining the shape (height,width) of the forecast
        grids.
    metadata: dict
        Metadata dictionary containing the projection, x1, x2, y1, y2,
        unit attributes (projection and variable units) described in the
        documentation of :py:mod:`pysteps.io.importers`.
    n_ens_members: int
        Number of ensemble members in the forecast. This argument is ignored if
        incremental is set to 'member'.
    incremental: {None,'timestep','member'}, optional
        Allow incremental writing of datasets into the netCDF files.\n
        The available options are: 'timestep' = write a forecast or a forecast
        ensemble for  a given time step; 'member' = write a forecast sequence
        for a given ensemble member. If set to None, incremental writing is
        disabled.

    Returns
    -------
    exporter: dict
        The return value is a dictionary containing an exporter object. This c
        an be used with :py:func:`pysteps.io.exporters.export_forecast_dataset`
        to write datasets into the given file format.
    """

    if not NETCDF4_IMPORTED:
        raise MissingOptionalDependency(
            "netCDF4 package is required for netcdf "
            "exporters but it is not installed"
        )

    if not PYPROJ_IMPORTED:
        raise MissingOptionalDependency(
            "pyproj package is required for netcdf " "exporters but it is not installed"
        )

    if incremental not in [None, "timestep", "member"]:
        raise ValueError(
            f"unknown option {incremental}: incremental must be "
            + "'timestep' or 'member'"
        )

    if incremental == "timestep":
        n_timesteps = None
    elif incremental == "member":
        n_ens_members = None
    elif incremental is not None:
        raise ValueError(
            f"unknown argument value incremental='{str(incremental)}': "
            + "must be 'timestep' or 'member'"
        )

    n_ens_gt_one = False
    if n_ens_members is not None:
        if n_ens_members > 1:
            n_ens_gt_one = True

    exporter = {}

    outfn = os.path.join(outpath, outfnprefix + ".nc")
    ncf = netCDF4.Dataset(outfn, "w", format="NETCDF4")

    ncf.Conventions = "CF-1.7"
    ncf.title = "pysteps-generated nowcast"
    ncf.institution = "the pySTEPS community (https://pysteps.github.io)"
    ncf.source = "pysteps"  # TODO(exporters): Add pySTEPS version here
    ncf.history = ""
    ncf.references = ""
    ncf.comment = ""

    h, w = shape

    ncf.createDimension("ens_number", size=n_ens_members)
    ncf.createDimension("time", size=n_timesteps)
    ncf.createDimension("y", size=h)
    ncf.createDimension("x", size=w)

    if metadata["unit"] == "mm/h":
        var_name = "precip_intensity"
        var_standard_name = None
        var_long_name = "instantaneous precipitation rate"
        var_unit = "mm h-1"
    elif metadata["unit"] == "mm":
        var_name = "precip_accum"
        var_standard_name = None
        var_long_name = "accumulated precipitation"
        var_unit = "mm"
    elif metadata["unit"] == "dBZ":
        var_name = "reflectivity"
        var_long_name = "equivalent reflectivity factor"
        var_standard_name = "equivalent_reflectivity_factor"
        var_unit = "dBZ"
    else:
        raise ValueError("unknown unit %s" % metadata["unit"])

    xr = np.linspace(metadata["x1"], metadata["x2"], w + 1)[:-1]
    xr += 0.5 * (xr[1] - xr[0])
    yr = np.linspace(metadata["y1"], metadata["y2"], h + 1)[:-1]
    yr += 0.5 * (yr[1] - yr[0])

    # flip yr vector if yorigin is upper
    if metadata["yorigin"] == "upper":
        yr = np.flip(yr)

    var_xc = ncf.createVariable("x", np.float32, dimensions=("x",))
    var_xc[:] = xr
    var_xc.axis = "X"
    var_xc.standard_name = "projection_x_coordinate"
    var_xc.long_name = "x-coordinate in Cartesian system"
    var_xc.units = metadata["cartesian_unit"]

    var_yc = ncf.createVariable("y", np.float32, dimensions=("y",))
    var_yc[:] = yr
    var_yc.axis = "Y"
    var_yc.standard_name = "projection_y_coordinate"
    var_yc.long_name = "y-coordinate in Cartesian system"
    var_yc.units = metadata["cartesian_unit"]

    x_2d, y_2d = np.meshgrid(xr, yr)
    pr = pyproj.Proj(metadata["projection"])
    lon, lat = pr(x_2d.flatten(), y_2d.flatten(), inverse=True)

    var_lon = ncf.createVariable("lon", float, dimensions=("y", "x"))
    var_lon[:] = lon.reshape(shape)
    var_lon.standard_name = "longitude"
    var_lon.long_name = "longitude coordinate"
    # TODO(exporters): Don't hard-code the unit.
    var_lon.units = "degrees_east"

    var_lat = ncf.createVariable("lat", float, dimensions=("y", "x"))
    var_lat[:] = lat.reshape(shape)
    var_lat.standard_name = "latitude"
    var_lat.long_name = "latitude coordinate"
    # TODO(exporters): Don't hard-code the unit.
    var_lat.units = "degrees_north"

    ncf.projection = metadata["projection"]

    (
        grid_mapping_var_name,
        grid_mapping_name,
        grid_mapping_params,
    ) = _convert_proj4_to_grid_mapping(metadata["projection"])
    # skip writing the grid mapping if a matching name was not found
    if grid_mapping_var_name is not None:
        var_gm = ncf.createVariable(grid_mapping_var_name, int, dimensions=())
        var_gm.grid_mapping_name = grid_mapping_name
        for i in grid_mapping_params.items():
            var_gm.setncattr(i[0], i[1])

    if incremental == "member" or n_ens_gt_one:
        var_ens_num = ncf.createVariable("ens_number", int, dimensions=("ens_number",))
        if incremental != "member":
            var_ens_num[:] = list(range(1, n_ens_members + 1))
        var_ens_num.long_name = "ensemble member"
        var_ens_num.standard_name = "realization"
        var_ens_num.units = ""

    var_time = ncf.createVariable("time", int, dimensions=("time",))
    if incremental != "timestep":
        var_time[:] = [i * timestep * 60 for i in range(1, n_timesteps + 1)]
    var_time.long_name = "forecast time"
    startdate_str = datetime.strftime(startdate, "%Y-%m-%d %H:%M:%S")
    var_time.units = "seconds since %s" % startdate_str

    if incremental == "member" or n_ens_gt_one:
        var_f = ncf.createVariable(
            var_name,
            np.float32,
            dimensions=("ens_number", "time", "y", "x"),
            zlib=True,
            complevel=9,
        )
    else:
        var_f = ncf.createVariable(
            var_name, np.float32, dimensions=("time", "y", "x"), zlib=True, complevel=9
        )

    if var_standard_name is not None:
        var_f.standard_name = var_standard_name
    var_f.long_name = var_long_name
    var_f.coordinates = "y x"
    var_f.units = var_unit
    if grid_mapping_var_name is not None:
        var_f.grid_mapping = grid_mapping_var_name

    exporter["method"] = "netcdf"
    exporter["ncfile"] = ncf
    exporter["var_F"] = var_f
    if incremental == "member" or n_ens_gt_one:
        exporter["var_ens_num"] = var_ens_num
    exporter["var_time"] = var_time
    exporter["var_name"] = var_name
    exporter["startdate"] = startdate
    exporter["timestep"] = timestep
    exporter["metadata"] = metadata
    exporter["incremental"] = incremental
    exporter["num_timesteps"] = n_timesteps
    exporter["num_ens_members"] = n_ens_members
    exporter["shape"] = shape

    return exporter
Beispiel #24
0
def plot_precip_field(R,
                      type="intensity",
                      map=None,
                      geodata=None,
                      units='mm/h',
                      bbox=None,
                      colorscale='pysteps',
                      probthr=None,
                      title=None,
                      colorbar=True,
                      drawlonlatlines=False,
                      lw=0.5,
                      axis="on",
                      cax=None,
                      **kwargs):
    """
    Function to plot a precipitation intensity or probability field with a
    colorbar.

    .. _Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes

    .. _SubplotSpec: https://matplotlib.org/api/_as_gen/matplotlib.gridspec.SubplotSpec.html

    .. _cartopy: https://scitools.org.uk/cartopy/docs/latest

    .. _mpl_toolkits.basemap: https://matplotlib.org/basemap

    Parameters
    ----------
    R : array-like
        Two-dimensional array containing the input precipitation field or an
        exceedance probability map.
    type : {'intensity', 'depth', 'prob'}, optional
        Type of the map to plot: 'intensity' = precipitation intensity field,
        'depth' = precipitation depth (accumulation) field,
        'prob' = exceedance probability field.
    map : {'basemap', 'cartopy'}, optional
        Optional method for plotting a map: 'basemap' or 'cartopy'. The former
        uses `mpl_toolkits.basemap`_, while the latter uses cartopy_.
    geodata : dictionary, optional
        Optional dictionary containing geographical information about
        the field. Required is map is not None.

        If geodata is not None, it must contain the following key-value pairs:

        .. tabularcolumns:: |p{1.5cm}|L|

        +-----------------+---------------------------------------------------+
        |        Key      |                  Value                            |
        +=================+===================================================+
        |    projection   | PROJ.4-compatible projection definition           |
        +-----------------+---------------------------------------------------+
        |    x1           | x-coordinate of the lower-left corner of the data |
        |                 | raster                                            |
        +-----------------+---------------------------------------------------+
        |    y1           | y-coordinate of the lower-left corner of the data |
        |                 | raster                                            |
        +-----------------+---------------------------------------------------+
        |    x2           | x-coordinate of the upper-right corner of the     |
        |                 | data raster                                       |
        +-----------------+---------------------------------------------------+
        |    y2           | y-coordinate of the upper-right corner of the     |
        |                 | data raster                                       |
        +-----------------+---------------------------------------------------+
        |    yorigin      | a string specifying the location of the first     |
        |                 | element in the data raster w.r.t. y-axis:         |
        |                 | 'upper' = upper border, 'lower' = lower border    |
        +-----------------+---------------------------------------------------+
    units : {'mm/h', 'mm', 'dBZ'}, optional
        Units of the input array. If type is 'prob', this specifies the unit of
        the intensity threshold.
    bbox : tuple, optional
        Four-element tuple specifying the coordinates of the bounding box. Use
        this for plotting a subdomain inside the input grid. The coordinates are
        of the form (lower left x,lower left y,upper right x,upper right y). If
        map is not None, the x- and y-coordinates are longitudes and latitudes.
        Otherwise they represent image pixels.
    colorscale : {'pysteps', 'STEPS-BE', 'BOM-RF3'}, optional
        Which colorscale to use. Applicable if units is 'mm/h', 'mm' or 'dBZ'.
    probthr : float, optional
        Intensity threshold to show in the color bar of the exceedance
        probability map.
        Required if type is "prob" and colorbar is True.
    title : str, optional
        If not None, print the title on top of the plot.
    colorbar : bool, optional
        If set to True, add a colorbar on the right side of the plot.
    drawlonlatlines : bool, optional
        If set to True, draw longitude and latitude lines. Applicable if map is
        'basemap' or 'cartopy'.
    lw: float, optional
        Linewidth of the map (administrative boundaries and coastlines).
    axis : {'off','on'}, optional
        Whether to turn off or on the x and y axis.
    cax : Axes_ object, optional
        Axes into which the colorbar will be drawn. If no axes is provided
        the colorbar axes are created next to the plot.

    Other parameters
    ----------------
    Optional parameters are contained in **kwargs. See basemaps.plot_geography.

    Returns
    -------
    ax : fig Axes_
        Figure axes. Needed if one wants to add e.g. text inside the plot.

    """
    if type not in ["intensity", "depth", "prob"]:
        raise ValueError("invalid type '%s', must be " +
                         "'intensity', 'depth' or 'prob'" % type)
    if units not in ["mm/h", "mm", "dBZ"]:
        raise ValueError("invalid units '%s', must be " +
                         "'mm/h', 'mm' or 'dBZ'" % units)
    if type == "prob" and colorbar and probthr is None:
        raise ValueError("type='prob' but probthr not specified")
    if map is not None and geodata is None:
        raise ValueError("map!=None but geodata=None")
    if len(R.shape) != 2:
        raise ValueError("the input is not two-dimensional array")

    # get colormap and color levels
    cmap, norm, clevs, clevsStr = get_colormap(type, units, colorscale)

    # extract extent and origin
    if geodata is not None:
        field_extent = (geodata['x1'], geodata['x2'], geodata['y1'],
                        geodata['y2'])
        if bbox is None:
            bm_extent = field_extent
        else:
            if not PYPROJ_IMPORTED:
                raise MissingOptionalDependency(
                    "pyproj package is required to import "
                    "FMI's radar reflectivity composite "
                    "but it is not installed")
            pr = pyproj.Proj(geodata["projection"])
            x1, y1 = pr(bbox[0], bbox[1])
            x2, y2 = pr(bbox[2], bbox[3])
            bm_extent = (x1, x2, y1, y2)
        origin = geodata["yorigin"]
    else:
        field_extent = (0, R.shape[1] - 1, 0, R.shape[0] - 1)
        origin = "upper"

    # plot geography
    if map is not None:
        try:
            ax = basemaps.plot_geography(map, geodata["projection"], bm_extent,
                                         R.shape, lw, drawlonlatlines,
                                         **kwargs)
            reefs = cfeature.NaturalEarthFeature('physical', 'reefs', '10m')
            islands = cfeature.NaturalEarthFeature('physical', 'minor_islands',
                                                   '10m')
            coast = cfeature.NaturalEarthFeature('physical', 'coastline',
                                                 '10m')

            ax.add_feature(reefs, facecolor='None', edgecolor='black')
            ax.add_feature(islands, edgecolor='black')
            ax.add_feature(coast, edgecolor='black')
            regular_grid = True
        except UnsupportedSomercProjection:
            # Define default fall-back projection for Swiss data(EPSG:3035)
            # This will work reasonably well for Europe only.
            t_proj4str = "+proj=laea +lat_0=52 +lon_0=10 "
            t_proj4str += "+x_0=4321000 +y_0=3210000 +ellps=GRS80 "
            t_proj4str += "+units=m +no_defs"
            geodata = utils.reproject_geodata(geodata,
                                              t_proj4str,
                                              return_grid="quadmesh")
            bm_extent = (geodata['x1'], geodata['x2'], geodata['y1'],
                         geodata['y2'])
            X, Y = geodata["X_grid"], geodata["Y_grid"]
            regular_grid = geodata["regular_grid"]

            ax = basemaps.plot_geography(map, geodata["projection"], bm_extent,
                                         R.shape, lw, drawlonlatlines,
                                         **kwargs)
    else:
        regular_grid = True

    if bbox is not None and map is not None:
        x1, y1 = pr(geodata["x1"], geodata["y1"], inverse=True)
        x2, y2 = pr(geodata["x2"], geodata["y2"], inverse=True)
        if map == "basemap":
            x1, y1 = ax(x1, y1)
            x2, y2 = ax(x2, y2)
        else:
            x1, y1 = pr(x1, y1)
            x2, y2 = pr(x2, y2)
        field_extent = (x1, x2, y1, y2)

    # plot rainfield
    if regular_grid:
        ax = plt.gca()
        im = _plot_field(R,
                         ax,
                         type,
                         units,
                         colorscale,
                         extent=field_extent,
                         origin=origin)
    else:
        if origin == "upper":
            Y = np.flipud(Y)
        im = _plot_field_pcolormesh(X, Y, R, ax, type, units, colorscale)

    # plot radar domain mask
    mask = np.ones(R.shape)
    mask[~np.isnan(R)] = np.nan  # Fully transparent within the radar domain
    ax.imshow(mask,
              cmap=colors.ListedColormap(['gray']),
              alpha=0.5,
              zorder=1e6,
              extent=field_extent,
              origin=origin)

    # ax.pcolormesh(X, Y, np.flipud(mask),
    #               cmap=colors.ListedColormap(['gray']),
    #               alpha=0.5, zorder=1e6)
    # TODO: pcolormesh doesn't work properly with the alpha parameter

    if title is not None:
        plt.title(title)

    # add colorbar
    if colorbar:
        if type in ["intensity", "depth"]:
            extend = "max"
        else:
            extend = "neither"
        cbar = plt.colorbar(im,
                            ticks=clevs,
                            spacing='uniform',
                            norm=norm,
                            extend=extend,
                            shrink=0.8,
                            cax=cax)
        if clevsStr is not None:
            cbar.ax.set_yticklabels(clevsStr)

        if type == "intensity":
            cbar.ax.set_title(units, fontsize=10)
            cbar.set_label("Precipitation intensity")
        elif type == "depth":
            cbar.ax.set_title(units, fontsize=10)
            cbar.set_label("Precipitation depth")
        else:
            cbar.set_label("P(R > %.1f %s)" % (probthr, units))

    if map is None and bbox is not None:
        ax = plt.gca()
        ax.set_xlim(bbox[0], bbox[2])
        ax.set_ylim(bbox[1], bbox[3])

    if geodata is None or axis == "off":
        axes = plt.gca()
        axes.xaxis.set_ticks([])
        axes.xaxis.set_ticklabels([])
        axes.yaxis.set_ticks([])
        axes.yaxis.set_ticklabels([])

    return plt.gca()
def initialize_forecast_exporter_netcdf(filename,
                                        startdate,
                                        timestep,
                                        n_timesteps,
                                        shape,
                                        n_ens_members,
                                        metadata,
                                        incremental=None):
    """Initialize a netCDF forecast exporter."""
    if not netcdf4_imported:
        raise MissingOptionalDependency(
            "netCDF4 package is required for netcdf "
            "exporters but it is not installed")

    if not pyproj_imported:
        raise MissingOptionalDependency(
            "pyproj package is required for netcdf "
            "exporters but it is not installed")

    if incremental not in [None, "timestep", "member"]:
        raise ValueError(
            "unknown option %s: incremental must be 'timestep' or 'member'" %
            incremental)

    if incremental == "timestep":
        n_timesteps = None
    elif incremental == "member":
        n_ens_members = None
    elif incremental is not None:
        raise ValueError(
            "unknown argument value incremental='%s': must be 'timestep' or 'member'"
            % str(incremental))

    exporter = {}

    ncf = netCDF4.Dataset(filename, 'w', format="NETCDF4")

    ncf.Conventions = "CF-1.7"
    ncf.title = "pysteps-generated nowcast"
    ncf.institution = "the pySTEPS community (https://pysteps.github.io)"
    ncf.source = "pysteps"  # TODO: Add pySTEPS version here
    ncf.history = ""
    ncf.references = ""
    ncf.comment = ""

    h, w = shape

    ncf.createDimension("ens_number", size=n_ens_members)
    ncf.createDimension("time", size=n_timesteps)
    ncf.createDimension("y", size=h)
    ncf.createDimension("x", size=w)

    if metadata["unit"] == "mm/h":
        var_name = "precip_intensity"
        var_standard_name = None
        var_long_name = "instantaneous precipitation rate"
        var_unit = "mm h-1"
    elif metadata["unit"] == "mm":
        var_name = "precip_accum"
        var_standard_name = None
        var_long_name = "accumulated precipitation"
        var_unit = "mm"
    elif metadata["unit"] == "dBZ":
        var_name = "reflectivity"
        var_long_name = "equivalent reflectivity factor"
        var_standard_name = "equivalent_reflectivity_factor"
        var_unit = "dBZ"
    else:
        raise ValueError("unknown unit %s" % metadata["unit"])

    xr = np.linspace(metadata["x1"], metadata["x2"], w + 1)[:-1]
    xr += 0.5 * (xr[1] - xr[0])
    yr = np.linspace(metadata["y1"], metadata["y2"], h + 1)[:-1]
    yr += 0.5 * (yr[1] - yr[0])

    var_xc = ncf.createVariable("xc", np.float32, dimensions=("x", ))
    var_xc[:] = xr
    var_xc.axis = 'X'
    var_xc.standard_name = "projection_x_coordinate"
    var_xc.long_name = "x-coordinate in Cartesian system"
    # TODO: Don't hard-code the unit.
    var_xc.units = 'm'

    var_yc = ncf.createVariable("yc", np.float32, dimensions=("y", ))
    var_yc[:] = yr
    var_yc.axis = 'Y'
    var_yc.standard_name = "projection_y_coordinate"
    var_yc.long_name = "y-coordinate in Cartesian system"
    # TODO: Don't hard-code the unit.
    var_yc.units = 'm'

    X, Y = np.meshgrid(xr, yr)
    pr = pyproj.Proj(metadata["projection"])
    lon, lat = pr(X.flatten(), Y.flatten(), inverse=True)

    var_lon = ncf.createVariable("lon", np.float, dimensions=("y", "x"))
    var_lon[:] = lon
    var_lon.standard_name = "longitude"
    var_lon.long_name = "longitude coordinate"
    # TODO: Don't hard-code the unit.
    var_lon.units = "degrees_east"

    var_lat = ncf.createVariable("lat", np.float, dimensions=("y", "x"))
    var_lat[:] = lat
    var_lat.standard_name = "latitude"
    var_lat.long_name = "latitude coordinate"
    # TODO: Don't hard-code the unit.
    var_lat.units = "degrees_north"

    ncf.projection = metadata["projection"]

    grid_mapping_var_name,grid_mapping_name,grid_mapping_params = \
        _convert_proj4_to_grid_mapping(metadata["projection"])
    # skip writing the grid mapping if a matching name was not found
    if grid_mapping_var_name is not None:
        var_gm = ncf.createVariable(grid_mapping_var_name,
                                    np.int,
                                    dimensions=())
        var_gm.grid_mapping_name = grid_mapping_name
        for i in grid_mapping_params.items():
            var_gm.setncattr(i[0], i[1])

    var_ens_num = ncf.createVariable("ens_number",
                                     np.int,
                                     dimensions=("ens_number", ))
    if incremental != "member":
        var_ens_num[:] = list(range(1, n_ens_members + 1))
    var_ens_num.long_name = "ensemble member"
    var_ens_num.units = ""

    var_time = ncf.createVariable("time", np.int, dimensions=("time", ))
    if incremental != "timestep":
        var_time[:] = [i * timestep * 60 for i in range(1, n_timesteps + 1)]
    var_time.long_name = "forecast time"
    startdate_str = datetime.strftime(startdate, "%Y-%m-%d %H:%M:%S")
    var_time.units = "seconds since %s" % startdate_str

    var_F = ncf.createVariable(var_name,
                               np.float32,
                               dimensions=("ens_number", "time", "y", "x"),
                               zlib=True,
                               complevel=9)

    if var_standard_name is not None:
        var_F.standard_name = var_standard_name
    var_F.long_name = var_long_name
    var_F.coordinates = "y x"
    var_F.units = var_unit

    exporter["method"] = "netcdf"
    exporter["ncfile"] = ncf
    exporter["var_F"] = var_F
    exporter["var_ens_num"] = var_ens_num
    exporter["var_time"] = var_time
    exporter["var_name"] = var_name
    exporter["startdate"] = startdate
    exporter["timestep"] = timestep
    exporter["metadata"] = metadata
    exporter["incremental"] = incremental
    exporter["num_timesteps"] = n_timesteps
    exporter["num_ens_members"] = n_ens_members
    exporter["shape"] = shape

    return exporter
Beispiel #26
0
def import_opera_hdf5(filename, **kwargs):
    """Import a precipitation field (and optionally the quality field) from an
    OPERA HDF5 file conforming to the ODIM specification.

    Parameters
    ----------

    filename : str
        Name of the file to import.

    Other Parameters
    ----------------

    qty : {'RATE', 'ACRR', 'DBZH'}
        The quantity to read from the file. The currently supported identitiers
        are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall
        accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value
        is 'RATE'.

    Returns
    -------

    out : tuple
        A three-element tuple containing the OPERA product for the requested
        quantity and the associated quality field and metadata. The quality
        field is read from the file if it contains a dataset whose quantity
        identifier is 'QIND'.
    """
    if not h5py_imported:
        raise MissingOptionalDependency(
            "h5py package is required to import "
            "radar reflectivity composites using ODIM HDF5 specification "
            "but it is not installed")

    qty = kwargs.get("qty", "RATE")

    if qty not in ["ACRR", "DBZH", "RATE"]:
        raise ValueError(
            "unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'"
        )

    f = h5py.File(filename, "r")

    R = None
    Q = None

    for dsg in f.items():
        if dsg[0].startswith("dataset"):
            what_grp_found = False
            # check if the "what" group is in the "dataset" group
            if "what" in list(dsg[1].keys()):
                qty_, gain, offset, nodata, undetect = _read_opera_hdf5_what_group(
                    dsg[1]["what"])
                what_grp_found = True

            for dg in dsg[1].items():
                if dg[0][0:4] == "data":
                    # check if the "what" group is in the "data" group
                    if "what" in list(dg[1].keys()):
                        qty_, gain, offset, nodata, undetect = _read_opera_hdf5_what_group(
                            dg[1]["what"])
                    elif not what_grp_found:
                        raise DataModelError("Non ODIM compilant file: "
                                             "no what group found from {} "
                                             "or its subgroups".format(dg[0]))

                    if qty_.decode() in [qty, "QIND"]:
                        ARR = dg[1]["data"][...]
                        MASK_N = ARR == nodata
                        MASK_U = ARR == undetect
                        MASK = np.logical_and(~MASK_U, ~MASK_N)

                        if qty_.decode() == qty:
                            R = np.empty(ARR.shape)
                            R[MASK] = ARR[MASK] * gain + offset
                            R[MASK_U] = 0.0
                            R[MASK_N] = np.nan
                        elif qty_.decode() == "QIND":
                            Q = np.empty(ARR.shape, dtype=float)
                            Q[MASK] = ARR[MASK]
                            Q[~MASK] = np.nan

    if R is None:
        raise IOError("requested quantity %s not found" % qty)

    where = f["where"]
    proj4str = where.attrs["projdef"].decode()
    pr = pyproj.Proj(proj4str)

    LL_lat = where.attrs["LL_lat"]
    LL_lon = where.attrs["LL_lon"]
    UR_lat = where.attrs["UR_lat"]
    UR_lon = where.attrs["UR_lon"]
    if ("LR_lat" in where.attrs.keys() and "LR_lon" in where.attrs.keys()
            and "UL_lat" in where.attrs.keys()
            and "UL_lon" in where.attrs.keys()):
        LR_lat = float(where.attrs["LR_lat"])
        LR_lon = float(where.attrs["LR_lon"])
        UL_lat = float(where.attrs["UL_lat"])
        UL_lon = float(where.attrs["UL_lon"])
        full_cornerpts = True
    else:
        full_cornerpts = False

    LL_x, LL_y = pr(LL_lon, LL_lat)
    UR_x, UR_y = pr(UR_lon, UR_lat)
    if full_cornerpts:
        LR_x, LR_y = pr(LR_lon, LR_lat)
        UL_x, UL_y = pr(UL_lon, UL_lat)
        x1 = min(LL_x, UL_x)
        y1 = min(LL_y, LR_y)
        x2 = max(LR_x, UR_x)
        y2 = max(UL_y, UR_y)
    else:
        x1 = LL_x
        y1 = LL_y
        x2 = UR_x
        y2 = UR_y

    if "xscale" in where.attrs.keys() and "yscale" in where.attrs.keys():
        xpixelsize = where.attrs["xscale"]
        ypixelsize = where.attrs["yscale"]
    else:
        xpixelsize = None
        ypixelsize = None

    if qty == "ACRR":
        unit = "mm"
        transform = None
    elif qty == "DBZH":
        unit = "dBZ"
        transform = "dB"
    else:
        unit = "mm/h"
        transform = None

    if np.any(np.isfinite(R)):
        thr = np.nanmin(R[R > np.nanmin(R)])
    else:
        thr = np.nan

    metadata = {
        "projection": proj4str,
        "ll_lon": LL_lon,
        "ll_lat": LL_lat,
        "ur_lon": UR_lon,
        "ur_lat": UR_lat,
        "x1": x1,
        "y1": y1,
        "x2": x2,
        "y2": y2,
        "xpixelsize": xpixelsize,
        "ypixelsize": ypixelsize,
        "yorigin": "upper",
        "institution": "Odyssey datacentre",
        "accutime": 15.0,
        "unit": unit,
        "transform": transform,
        "zerovalue": np.nanmin(R),
        "threshold": thr,
    }

    f.close()

    return R, Q, metadata
Beispiel #27
0
def get_pyfftw(shape, fftn_shape=None, n_threads=1, **kwargs):
    try:
        import pyfftw.interfaces.numpy_fft as pyfftw_fft
        import pyfftw

        pyfftw.interfaces.cache.enable()
    except ImportError:
        raise MissingOptionalDependency("pyfftw is required but not installed")

    X = pyfftw.empty_aligned(shape, dtype="complex128")
    F = pyfftw.empty_aligned(shape, dtype="complex128")

    fft_obj = pyfftw.FFTW(
        X,
        F,
        flags=["FFTW_ESTIMATE"],
        direction="FFTW_FORWARD",
        axes=(0, 1),
        threads=n_threads,
    )
    ifft_obj = pyfftw.FFTW(
        F,
        X,
        flags=["FFTW_ESTIMATE"],
        direction="FFTW_BACKWARD",
        axes=(0, 1),
        threads=n_threads,
    )

    if fftn_shape is not None:
        X = pyfftw.empty_aligned(fftn_shape, dtype="complex128")
        F = pyfftw.empty_aligned(fftn_shape, dtype="complex128")

        fftn_obj = pyfftw.FFTW(
            X,
            F,
            flags=["FFTW_ESTIMATE"],
            direction="FFTW_FORWARD",
            axes=list(range(len(fftn_shape))),
            threads=n_threads,
        )

    X = pyfftw.empty_aligned(shape, dtype="float64")
    output_shape = list(shape[:-1])
    output_shape.append(int(shape[-1] / 2) + 1)
    output_shape = tuple(output_shape)
    F = pyfftw.empty_aligned(output_shape, dtype="complex128")

    rfft_obj = pyfftw.FFTW(
        X,
        F,
        flags=["FFTW_ESTIMATE"],
        direction="FFTW_FORWARD",
        axes=(0, 1),
        threads=n_threads,
    )
    irfft_obj = pyfftw.FFTW(
        F,
        X,
        flags=["FFTW_ESTIMATE"],
        direction="FFTW_BACKWARD",
        axes=(0, 1),
        threads=n_threads,
    )

    f = {
        "fft2": lambda X: fft_obj(input_array=X.copy()).copy(),
        "ifft2": lambda X: ifft_obj(input_array=X.copy()).copy(),
        "rfft2": lambda X: rfft_obj(input_array=X.copy()).copy(),
        "irfft2": lambda X: irfft_obj(input_array=X.copy()).copy(),
        "fftshift": pyfftw_fft.fftshift,
        "ifftshift": pyfftw_fft.ifftshift,
        "fftfreq": pyfftw_fft.fftfreq,
    }

    if fftn_shape is not None:
        f["fftn"] = lambda X: fftn_obj(input_array=X).copy()
    fft = SimpleNamespace(**f)

    return fft
Beispiel #28
0
def reproject_geodata(geodata, t_proj4str, return_grid=None):
    """
    Reproject geodata and optionally create a grid in a new projection.
    
    Parameters
    ----------
    geodata : dictionary
        Dictionary containing geographical information about the field.
        It must contain the attributes projection, x1, x2, y1, y2, xpixelsize, 
        ypixelsize, as defined in the documentation of pysteps.io.importers.
    t_proj4str: str
        The target PROJ.4-compatible projection string (fallback).
    return_grid : {None, 'coords', 'quadmesh'}, optional
        Whether to return the coordinates of the projected grid.
        The default return_grid=None does not compute the grid,
        return_grid='coords' returns the centers of projected grid points,
        return_grid='quadmesh' returns the coordinates of the quadrilaterals
        (e.g. to be used by pcolormesh).
    
    Returns
    -------
    geodata : dictionary
        Dictionary containing the reprojected geographical information 
        and optionally the required X_grid and Y_grid. \n
        It also includes a fixed boolean attribute regular_grid=False to indicate
        that the reprojected grid has no regular spacing.
    """
    if not pyproj_imported:
        raise MissingOptionalDependency(
            "pyproj package is required for reproject_geodata function utility "
            "but it is not installed")
    
    geodata = geodata.copy()
    s_proj4str = geodata["projection"]
    extent = (geodata["x1"], geodata["x2"], geodata["y1"], geodata["y2"])
    shape = (int((geodata["y2"]-geodata["y1"])/geodata["ypixelsize"]),
            int((geodata["x2"]-geodata["x1"])/geodata["xpixelsize"]))
    
    s_srs = pyproj.Proj(s_proj4str)
    t_srs = pyproj.Proj(t_proj4str)
    
    x1 = extent[0]
    x2 = extent[1]
    y1 = extent[2]
    y2 = extent[3]
    
    # Reproject grid on fall-back projection
    if return_grid is not None:
        if return_grid == "coords":
            y_coord = np.linspace(y1, y2, shape[0]) + geodata["ypixelsize"]/2.0
            x_coord = np.linspace(x1, x2, shape[1]) + geodata["xpixelsize"]/2.0
        elif return_grid == "quadmesh":
            y_coord = np.linspace(y1, y2, shape[0] + 1)
            x_coord = np.linspace(x1, x2, shape[1] + 1)
        else:
            raise ValueError("unknown return_grid value %s" % return_grid)
            
        X, Y = np.meshgrid(x_coord, y_coord)
        
        X, Y = pyproj.transform(s_srs, t_srs, X.flatten(), Y.flatten())    
        X = X.reshape((y_coord.size, x_coord.size))
        Y = Y.reshape((y_coord.size, x_coord.size))
    
    # Reproject extent on fall-back projection
    x1, y1 = pyproj.transform(s_srs, t_srs, x1, y1)
    x2, y2 = pyproj.transform(s_srs, t_srs, x2, y2)
    
    # update geodata
    geodata["projection"] = t_proj4str
    geodata["x1"] = x1
    geodata["x2"] = x2
    geodata["y1"] = y1
    geodata["y2"] = y2
    geodata["regular_grid"] = False
    geodata["xpixelsize"] = None
    geodata["ypixelsize"] = None
    geodata["X_grid"] = X
    geodata["Y_grid"] = Y
    
    return geodata
Beispiel #29
0
def track_features(
    prvs_image,
    next_image,
    points,
    winsize=(50, 50),
    nr_levels=3,
    criteria=(3, 10, 0),
    flags=0,
    min_eig_thr=1e-4,
    verbose=False,
):
    """
    Interface to the OpenCV `Lucas-Kanade`_ features tracking algorithm
    (cv.calcOpticalFlowPyrLK).

    .. _`Lucas-Kanade`:\
       https://docs.opencv.org/3.4/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323

    .. _calcOpticalFlowPyrLK:\
       https://docs.opencv.org/3.4/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323


    .. _MaskedArray:\
        https://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray

    .. _ndarray:\
    https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html

    Parameters
    ----------

    prvs_image : ndarray_ or MaskedArray_
        Array of shape (m, n) containing the first image.
        Invalid values (Nans or infs) are filled using the min value.

    next_image : ndarray_ or MaskedArray_
        Array of shape (m, n) containing the successive image.
        Invalid values (Nans or infs) are filled using the min value.

    points : array_like
        Array of shape (p, 2) indicating the pixel coordinates of the
        tracking points (corners).

    winsize : tuple of int, optional
        The **winSize** parameter in calcOpticalFlowPyrLK_.
        It represents the size of the search window that it is used at each
        pyramid level.

    nr_levels : int, optional
        The **maxLevel** parameter in calcOpticalFlowPyrLK_.
        It represents the 0-based maximal pyramid level number.

    criteria : tuple of int, optional
        The **TermCriteria** parameter in calcOpticalFlowPyrLK_ ,
        which specifies the termination criteria of the iterative search
        algorithm.

    flags : int, optional
        Operation flags, see documentation calcOpticalFlowPyrLK_.

    min_eig_thr : float, optional
        The **minEigThreshold** parameter in calcOpticalFlowPyrLK_.

    verbose : bool, optional
        Print the number of vectors that have been found.

    Returns
    -------

    xy : ndarray_
        Array of shape (d, 2) with the x- and y-coordinates of *d* <= *p*
        detected sparse motion vectors.

    uv : ndarray_
        Array of shape (d, 2) with the u- and v-components of *d* <= *p*
        detected sparse motion vectors.

    Notes
    -----

    The tracking points can be obtained with the
    :py:func:`pysteps.utils.images.ShiTomasi_detection` routine.

    See also
    --------

    pysteps.motion.lucaskanade.dense_lucaskanade

    References
    ----------

    Bouguet,  J.-Y.:  Pyramidal  implementation  of  the  affine  Lucas Kanade
    feature tracker description of the algorithm, Intel Corp., 5, 4,
    https://doi.org/10.1109/HPDC.2004.1323531, 2001

    Lucas, B. D. and Kanade, T.: An iterative image registration technique with
    an application to stereo vision, in: Proceedings of the 1981 DARPA Imaging
    Understanding Workshop, pp. 121–130, 1981.
    """

    if not CV2_IMPORTED:
        raise MissingOptionalDependency(
            "opencv package is required for the calcOpticalFlowPyrLK() "
            "routine but it is not installed"
        )

    prvs_img = prvs_image.copy()
    next_img = next_image.copy()
    p0 = np.copy(points)

    # Check if a MaskedArray is used. If not, mask the ndarray
    if not isinstance(prvs_img, MaskedArray):
        prvs_img = np.ma.masked_invalid(prvs_img)
    np.ma.set_fill_value(prvs_img, prvs_img.min())

    if not isinstance(next_img, MaskedArray):
        next_img = np.ma.masked_invalid(next_img)
    np.ma.set_fill_value(next_img, next_img.min())

    # scale between 0 and 255
    im_min = prvs_img.min()
    im_max = prvs_img.max()
    if im_max - im_min > 1e-8:
        prvs_img = (prvs_img.filled() - im_min) / (im_max - im_min) * 255
    else:
        prvs_img = prvs_img.filled() - im_min

    im_min = next_img.min()
    im_max = next_img.max()
    if im_max - im_min > 1e-8:
        next_img = (next_img.filled() - im_min) / (im_max - im_min) * 255
    else:
        next_img = next_img.filled() - im_min

    # convert to 8-bit
    prvs_img = np.ndarray.astype(prvs_img, "uint8")
    next_img = np.ndarray.astype(next_img, "uint8")

    # Lucas-Kanade
    # TODO: use the error returned by the OpenCV routine
    params = dict(
        winSize=winsize,
        maxLevel=nr_levels,
        criteria=criteria,
        flags=flags,
        minEigThreshold=min_eig_thr,
    )
    p1, st, __ = cv2.calcOpticalFlowPyrLK(prvs_img, next_img, p0, None, **params)

    # keep only features that have been found
    st = np.atleast_1d(st.squeeze()) == 1
    if np.any(st):
        p1 = p1[st, :]
        p0 = p0[st, :]

        # extract vectors
        xy = p0
        uv = p1 - p0

    else:
        xy = uv = np.empty(shape=(0, 2))

    if verbose:
        print("--- %i sparse vectors found ---" % xy.shape[0])

    return xy, uv
Beispiel #30
0
def initialize_forecast_exporter_geotiff(
    outpath,
    outfnprefix,
    startdate,
    timestep,
    n_timesteps,
    shape,
    metadata,
    n_ens_members=1,
    incremental=None,
    **kwargs,
):
    """
    Initialize a GeoTIFF forecast exporter.

    The output files are named as '<outfnprefix>_<startdate>_<t>.tif', where
    startdate is in YYmmddHHMM format and t is lead time (minutes). GDAL needs
    to be installed to use this exporter.

    Parameters
    ----------
    outpath: str
        Output path.

    outfnprefix: str
        Prefix for output file names.

    startdate: datetime.datetime
        Start date of the forecast.

    timestep: int
        Time step of the forecast (minutes).

    n_timesteps: int
        Number of time steps in the forecast. This argument is ignored if
        incremental is set to 'timestep'.

    shape: tuple of int
        Two-element tuple defining the shape (height,width) of the forecast
        grids.

    metadata: dict
        Metadata dictionary containing the projection,x1,x2,y1,y2 and unit
        attributes described in the documentation of
        :py:mod:`pysteps.io.importers`.

    n_ens_members: int
        Number of ensemble members in the forecast.

    incremental: {None,'timestep'}, optional
        Allow incremental writing of datasets into the GeoTIFF files. Set to
        'timestep' to enable writing forecasts or forecast ensembles separately
        for each time step. If set to None, incremental writing is disabled and
        the whole forecast is written in a single function call. The 'member'
        option is not currently implemented.

    Returns
    -------
    exporter: dict
        The return value is a dictionary containing an exporter object.
        This can be used with
        :py:func:`pysteps.io.exporters.export_forecast_dataset`
        to write the datasets.

    """

    if len(shape) != 2:
        raise ValueError("shape has %d elements, 2 expected" % len(shape))

    del kwargs  # kwargs not used

    if not GDAL_IMPORTED:
        raise MissingOptionalDependency(
            "gdal package is required for GeoTIFF " "exporters but it is not installed"
        )

    if incremental == "member":
        raise ValueError(
            "incremental writing of GeoTIFF files with"
            + " the 'member' option is not supported"
        )

    exporter = dict(
        method="geotiff",
        outfnprefix=outfnprefix,
        startdate=startdate,
        timestep=timestep,
        num_timesteps=n_timesteps,
        shape=shape,
        metadata=metadata,
        num_ens_members=n_ens_members,
        incremental=incremental,
        dst=[],
    )
    driver = gdal.GetDriverByName("GTiff")
    exporter["driver"] = driver

    if incremental != "timestep":
        for i in range(n_timesteps):
            outfn = _get_geotiff_filename(
                outfnprefix, startdate, n_timesteps, timestep, i
            )
            outfn = os.path.join(outpath, outfn)
            dst = _create_geotiff_file(outfn, driver, shape, metadata, n_ens_members)
            exporter["dst"].append(dst)
    else:
        exporter["num_files_written"] = 0

    return exporter