Exemplo n.º 1
0
def test_feature(method, max_num_features):
    input_field, _ = get_precipitation_fields(0, 0, True, True, None, "mch")

    detector = feature.get_method(method)

    kwargs = {"max_num_features": max_num_features}
    output = detector(input_field.squeeze(), **kwargs)

    assert isinstance(output, np.ndarray)
    assert output.ndim == 2
    assert output.shape[0] > 0
    if max_num_features is not None:
        assert output.shape[0] <= max_num_features
    assert output.shape[1] == 2
Exemplo n.º 2
0
def dense_lucaskanade(
    input_images,
    lk_kwargs=None,
    fd_method="shitomasi",
    fd_kwargs=None,
    interp_method="rbfinterp2d",
    interp_kwargs=None,
    dense=True,
    nr_std_outlier=3,
    k_outlier=30,
    size_opening=3,
    decl_scale=20,
    verbose=False,
):
    """Run the Lucas-Kanade optical flow routine and interpolate the motion
    vectors.

    .. _OpenCV: https://opencv.org/

    .. _`Lucas-Kanade`:\
        https://docs.opencv.org/3.4/dc/d6b/group__video__track.html#ga473e4b886d0bcc6b65831eb88ed93323

    .. _MaskedArray:\
        https://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray

    .. _ndarray:\
    https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html

    Interface to the OpenCV_ implementation of the local `Lucas-Kanade`_ optical
    flow method applied in combination to a feature detection routine.

    The sparse motion vectors are finally interpolated to return the whole
    motion field.

    Parameters
    ----------
    input_images: ndarray_ or MaskedArray_
        Array of shape (T, m, n) containing a sequence of *T* two-dimensional
        input images of shape (m, n). The indexing order in **input_images** is
        assumed to be (time, latitude, longitude).

        *T* = 2 is the minimum required number of images.
        With *T* > 2, all the resulting sparse vectors are pooled together for
        the final interpolation on a regular grid.

        In case of ndarray_, invalid values (Nans or infs) are masked,
        otherwise the mask of the MaskedArray_ is used. Such mask defines a
        region where features are not detected for the tracking algorithm.

    lk_kwargs: dict, optional
        Optional dictionary containing keyword arguments for the `Lucas-Kanade`_
        features tracking algorithm. See the documentation of
        :py:func:`pysteps.tracking.lucaskanade.track_features`.

    fd_method: {"shitomasi", "blob", "tstorm"}, optional
      Name of the feature detection routine. See feature detection methods in
      :py:mod:`pysteps.feature`.

    fd_kwargs: dict, optional
        Optional dictionary containing keyword arguments for the features
        detection algorithm.
        See the documentation of :py:mod:`pysteps.feature`.

    interp_method: {"rbfinterp2d"}, optional
      Name of the interpolation method to use. See interpolation methods in
      :py:mod:`pysteps.utils.interpolate`.

    interp_kwargs: dict, optional
        Optional dictionary containing keyword arguments for the interpolation
        algorithm. See the documentation of :py:mod:`pysteps.utils.interpolate`.

    dense: bool, optional
        If True, return the three-dimensional array (2, m, n) containing
        the dense x- and y-components of the motion field.

        If False, return the sparse motion vectors as 2-D **xy** and **uv**
        arrays, where **xy** defines the vector positions, **uv** defines the
        x and y direction components of the vectors.

    nr_std_outlier: int, optional
        Maximum acceptable deviation from the mean in terms of number of
        standard deviations. Any sparse vector with a deviation larger than
        this threshold is flagged as outlier and excluded from the
        interpolation.
        See the documentation of
        :py:func:`pysteps.utils.cleansing.detect_outliers`.

    k_outlier: int or None, optional
        The number of nearest neighbours used to localize the outlier detection.
        If set to None, it employs all the data points (global detection).
        See the documentation of
        :py:func:`pysteps.utils.cleansing.detect_outliers`.

    size_opening: int, optional
        The size of the structuring element kernel in pixels. This is used to
        perform a binary morphological opening on the input fields in order to
        filter isolated echoes due to clutter. If set to zero, the filtering
        is not perfomed.
        See the documentation of
        :py:func:`pysteps.utils.images.morph_opening`.

    decl_scale: int, optional
        The scale declustering parameter in pixels used to reduce the number of
        redundant sparse vectors before the interpolation.
        Sparse vectors within this declustering scale are averaged together.
        If set to less than 2 pixels, the declustering is not perfomed.
        See the documentation of
        :py:func:`pysteps.utils.cleansing.decluster`.

    verbose: bool, optional
        If set to True, print some information about the program.

    Returns
    -------
    out: ndarray_ or tuple
        If **dense=True** (the default), return the advection field having shape
        (2, m, n), where out[0, :, :] contains the x-components of the motion
        vectors and out[1, :, :] contains the y-components.
        The velocities are in units of pixels / timestep, where timestep is the
        time difference between the two input images.
        Return a zero motion field of shape (2, m, n) when no motion is
        detected.

        If **dense=False**, it returns a tuple containing the 2-dimensional
        arrays **xy** and **uv**, where x, y define the vector locations,
        u, v define the x and y direction components of the vectors.
        Return two empty arrays when no motion is detected.

    See also
    --------
    pysteps.motion.lucaskanade.track_features

    References
    ----------
    Bouguet,  J.-Y.:  Pyramidal  implementation  of  the  affine  Lucas Kanade
    feature tracker description of the algorithm, Intel Corp., 5, 4, 2001

    Lucas, B. D. and Kanade, T.: An iterative image registration technique with
    an application to stereo vision, in: Proceedings of the 1981 DARPA Imaging
    Understanding Workshop, pp. 121–130, 1981.
    """

    input_images = input_images.copy()

    if verbose:
        print("Computing the motion field with the Lucas-Kanade method.")
        t0 = time.time()

    nr_fields = input_images.shape[0]
    domain_size = (input_images.shape[1], input_images.shape[2])

    feature_detection_method = feature.get_method(fd_method)
    interpolation_method = utils.get_method(interp_method)

    if fd_kwargs is None:
        fd_kwargs = dict()
    if fd_method == "tstorm":
        fd_kwargs.update({"output_feat": True})

    if lk_kwargs is None:
        lk_kwargs = dict()

    if interp_kwargs is None:
        interp_kwargs = dict()

    xy = np.empty(shape=(0, 2))
    uv = np.empty(shape=(0, 2))
    for n in range(nr_fields - 1):

        # extract consecutive images
        prvs_img = input_images[n, :, :].copy()
        next_img = input_images[n + 1, :, :].copy()

        # Check if a MaskedArray is used. If not, mask the ndarray
        if not isinstance(prvs_img, MaskedArray):
            prvs_img = np.ma.masked_invalid(prvs_img)
        np.ma.set_fill_value(prvs_img, prvs_img.min())

        if not isinstance(next_img, MaskedArray):
            next_img = np.ma.masked_invalid(next_img)
        np.ma.set_fill_value(next_img, next_img.min())

        # remove small noise with a morphological operator (opening)
        if size_opening > 0:
            prvs_img = morph_opening(prvs_img, prvs_img.min(), size_opening)
            next_img = morph_opening(next_img, next_img.min(), size_opening)

        # features detection
        points = feature_detection_method(prvs_img,
                                          **fd_kwargs).astype(np.float32)

        # skip loop if no features to track
        if points.shape[0] == 0:
            continue

        # get sparse u, v vectors with Lucas-Kanade tracking
        xy_, uv_ = track_features(prvs_img, next_img, points, **lk_kwargs)

        # skip loop if no vectors
        if xy_.shape[0] == 0:
            continue

        # stack vectors
        xy = np.append(xy, xy_, axis=0)
        uv = np.append(uv, uv_, axis=0)

    # return zero motion field is no sparse vectors are found
    if xy.shape[0] == 0:
        if dense:
            return np.zeros((2, domain_size[0], domain_size[1]))
        else:
            return xy, uv

    # detect and remove outliers
    outliers = detect_outliers(uv, nr_std_outlier, xy, k_outlier, verbose)
    xy = xy[~outliers, :]
    uv = uv[~outliers, :]

    if verbose:
        print("--- LK found %i sparse vectors ---" % xy.shape[0])

    # return sparse vectors if required
    if not dense:
        return xy, uv

    # decluster sparse motion vectors
    if decl_scale > 1:
        xy, uv = decluster(xy, uv, decl_scale, 1, verbose)

    # return zero motion field if no sparse vectors are left for interpolation
    if xy.shape[0] == 0:
        return np.zeros((2, domain_size[0], domain_size[1]))

    # interpolation
    xgrid = np.arange(domain_size[1])
    ygrid = np.arange(domain_size[0])
    uvgrid = interpolation_method(xy, uv, xgrid, ygrid, **interp_kwargs)

    if verbose:
        print("--- total time: %.2f seconds ---" % (time.time() - t0))

    return uvgrid
Exemplo n.º 3
0
def _linda_deterministic_init(
    precip_fields,
    advection_field,
    feature_method,
    max_num_features,
    feature_kwargs,
    ari_order,
    kernel_type,
    localization_window_radius,
    extrap_method,
    extrap_kwargs,
    add_perturbations,
    num_workers,
    measure_time,
):
    """Initialize the deterministic LINDA nowcast model."""
    fct_gen = {}
    fct_gen["advection_field"] = advection_field
    fct_gen["ari_order"] = ari_order
    fct_gen["add_perturbations"] = add_perturbations
    fct_gen["num_workers"] = num_workers
    fct_gen["measure_time"] = measure_time

    precip_fields = precip_fields[-(ari_order + 2) :]
    input_length = precip_fields.shape[0]

    starttime_init = time.time()

    extrapolator = extrapolation.get_method(extrap_method)
    extrap_kwargs = extrap_kwargs.copy()
    extrap_kwargs["allow_nonfinite_values"] = True
    fct_gen["extrapolator"] = extrapolator
    fct_gen["extrap_kwargs"] = extrap_kwargs

    # detect features from the most recent input field
    if feature_method in {"blob", "shitomasi"}:
        precip_field_ = precip_fields[-1].copy()
        precip_field_[~np.isfinite(precip_field_)] = 0.0
        feature_detector = feature.get_method(feature_method)

        if measure_time:
            starttime = time.time()

        feature_kwargs = feature_kwargs.copy()
        feature_kwargs["max_num_features"] = max_num_features

        feature_coords = np.fliplr(
            feature_detector(precip_field_, **feature_kwargs)[:, :2]
        )

        feature_type = "blobs" if feature_method == "blob" else "corners"
        print("")
        print("Detecting features... ", end="", flush=True)
        if measure_time:
            print(
                f"found {feature_coords.shape[0]} {feature_type} in {time.time() - starttime:.2f} seconds."
            )
        else:
            print(f"found {feature_coords.shape[0]} {feature_type}.")

        if len(feature_coords) == 0:
            raise ValueError(
                "no features found, check input data and feature detector configuration"
            )
    elif feature_method == "domain":
        feature_coords = np.zeros((1, 2), dtype=int)
    else:
        raise NotImplementedError(
            "feature detector '%s' not implemented" % feature_method
        )
    fct_gen["feature_coords"] = feature_coords

    # compute interpolation weights
    interp_weights = _compute_window_weights(
        feature_coords,
        precip_fields.shape[1],
        precip_fields.shape[2],
        localization_window_radius,
    )
    interp_weights /= np.sum(interp_weights, axis=0)
    fct_gen["interp_weights"] = interp_weights

    # transform the input fields to the Lagrangian coordinates
    precip_fields_lagr = np.empty(precip_fields.shape)

    def worker(i):
        precip_fields_lagr[i, :] = extrapolator(
            precip_fields[i, :],
            advection_field,
            input_length - 1 - i,
            "min",
            **extrap_kwargs,
        )[-1]

    if DASK_IMPORTED and num_workers > 1:
        res = []

    print("Transforming to Lagrangian coordinates... ", end="", flush=True)

    if measure_time:
        starttime = time.time()

    for i in range(precip_fields.shape[0] - 1):
        if DASK_IMPORTED and num_workers > 1:
            res.append(dask.delayed(worker)(i))
        else:
            worker(i)

    if DASK_IMPORTED and num_workers > 1:
        dask.compute(*res, num_workers=min(num_workers, len(res)), scheduler="threads")
    precip_fields_lagr[-1] = precip_fields[-1]

    if measure_time:
        print(f"{time.time() - starttime:.2f} seconds.")
    else:
        print("done.")

    # compute advection mask and set nan to pixels, where one or more of the
    # advected input fields has a nan value
    mask_adv = np.all(np.isfinite(precip_fields_lagr), axis=0)
    fct_gen["mask_adv"] = mask_adv
    for i in range(precip_fields_lagr.shape[0]):
        precip_fields_lagr[i, ~mask_adv] = np.nan

    # compute differenced input fields in the Lagrangian coordinates
    precip_fields_lagr_diff = np.diff(precip_fields_lagr, axis=0)

    # estimate parameters of the deterministic model (i.e. the convolution and
    # the ARI process)

    print("Estimating the first convolution kernel... ", end="", flush=True)

    if measure_time:
        starttime = time.time()

    # estimate convolution kernel for the differenced component
    convol_weights = _compute_window_weights(
        feature_coords,
        precip_fields.shape[1],
        precip_fields.shape[2],
        localization_window_radius,
    )

    kernels_1 = _estimate_convol_params(
        precip_fields_lagr_diff[-2],
        precip_fields_lagr_diff[-1],
        convol_weights,
        mask_adv,
        kernel_type=kernel_type,
        num_workers=num_workers,
    )
    fct_gen["kernels_1"] = kernels_1

    if measure_time:
        print(f"{time.time() - starttime:.2f} seconds.")
    else:
        print("done.")

    # compute convolved difference fields
    precip_fields_lagr_diff_c = precip_fields_lagr_diff[:-1].copy()
    for i in range(precip_fields_lagr_diff_c.shape[0]):
        for _ in range(ari_order - i):
            precip_fields_lagr_diff_c[i] = _composite_convolution(
                precip_fields_lagr_diff_c[i],
                kernels_1,
                interp_weights,
            )

    print("Estimating the ARI(p,1) parameters... ", end="", flush=True)

    if measure_time:
        starttime = time.time()

    # estimate ARI(p,1) parameters
    weights = _compute_window_weights(
        feature_coords,
        precip_fields.shape[1],
        precip_fields.shape[2],
        localization_window_radius,
    )

    if ari_order == 1:
        psi = _estimate_ar1_params(
            precip_fields_lagr_diff_c[-1],
            precip_fields_lagr_diff[-1],
            weights,
            interp_weights,
            num_workers=num_workers,
        )
    else:
        psi = _estimate_ar2_params(
            precip_fields_lagr_diff_c[-2:],
            precip_fields_lagr_diff[-1],
            weights,
            interp_weights,
            num_workers=num_workers,
        )
    fct_gen["psi"] = psi

    if measure_time:
        print(f"{time.time() - starttime:.2f} seconds.")
    else:
        print("done.")

    # apply the ARI(p,1) model and integrate the differences
    precip_fields_lagr_diff_c = _iterate_ar_model(precip_fields_lagr_diff_c, psi)
    precip_fct = precip_fields_lagr[-2] + precip_fields_lagr_diff_c[-1]
    precip_fct[precip_fct < 0.0] = 0.0

    print("Estimating the second convolution kernel... ", end="", flush=True)

    if measure_time:
        starttime = time.time()

    # estimate the second convolution kernels based on the forecast field
    # computed above
    kernels_2 = _estimate_convol_params(
        precip_fct,
        precip_fields[-1],
        convol_weights,
        mask_adv,
        kernel_type=kernel_type,
        num_workers=num_workers,
    )
    fct_gen["kernels_2"] = kernels_2

    if measure_time:
        print(f"{time.time() - starttime:.2f} seconds.")
    else:
        print("done.")

    if measure_time:
        return fct_gen, precip_fields_lagr_diff, time.time() - starttime_init
    else:
        return fct_gen, precip_fields_lagr_diff