示例#1
0
def get_obs_fcst_TRT_Rank(TRT_t0, TRT_diff_pred, TRT_diff_obs, TRT_tneg5):
    obs = TRT_t0 + TRT_diff_obs
    pred_mod = TRT_t0 + TRT_diff_pred
    pred_mod.name = "Rank model prediction"

    pred_mod_PM = pd.Series(nonparam_match_empirical_cdf(
        pred_mod.values, obs.values),
                            index=pred_mod.index,
                            name="Rank model prediction (PM)")

    pred_pers = TRT_t0.copy()
    pred_pers.name = "Rank persistency prediction"
    pred_pers_PM = pd.Series(nonparam_match_empirical_cdf(
        pred_pers.values, obs.values),
                             index=pred_pers.index,
                             name="Rank persistency prediction (PM)")

    pred_diff = TRT_t0 + (TRT_t0 - TRT_tneg5)
    pred_diff.name = "Rank constant gradient prediction"

    diff_pred = pd.Series(TRT_diff_pred,
                          index=TRT_t0.index,
                          name="TRT rank difference model prediction")
    return (obs, pred_mod, pred_mod_PM, pred_pers, pred_pers_PM, pred_diff,
            diff_pred)
示例#2
0
def forecast(
    R,
    V,
    n_timesteps,
    n_cascade_levels=6,
    R_thr=None,
    extrap_method="semilagrangian",
    decomp_method="fft",
    bandpass_filter_method="gaussian",
    ar_order=2,
    conditional=False,
    probmatching_method="cdf",
    num_workers=1,
    fft_method="numpy",
    domain="spatial",
    extrap_kwargs=None,
    filter_kwargs=None,
    measure_time=False,
):
    """Generate a nowcast by using the Spectral Prognosis (S-PROG) method.

    Parameters
    ----------
    R : array-like
      Array of shape (ar_order+1,m,n) containing the input precipitation fields
      ordered by timestamp from oldest to newest. The time steps between
      the inputs are assumed to be regular.
    V : array-like
      Array of shape (2,m,n) containing the x- and y-components of the
      advection field.
      The velocities are assumed to represent one time step between the
      inputs. All values are required to be finite.
    n_timesteps : int
      Number of time steps to forecast.
    n_cascade_levels : int, optional
      The number of cascade levels to use.
    R_thr : float
      The threshold value for minimum observable precipitation intensity.
    extrap_method : str, optional
      Name of the extrapolation method to use. See the documentation of
      pysteps.extrapolation.interface.
    decomp_method : {'fft'}, optional
      Name of the cascade decomposition method to use. See the documentation
      of pysteps.cascade.interface.
    bandpass_filter_method : {'gaussian', 'uniform'}, optional
      Name of the bandpass filter method to use with the cascade decomposition.
      See the documentation of pysteps.cascade.interface.
    ar_order : int, optional
      The order of the autoregressive model to use. Must be >= 1.
    conditional : bool, optional
      If set to True, compute the statistics of the precipitation field
      conditionally by excluding pixels where the values are
      below the threshold R_thr.
    probmatching_method : {'cdf','mean',None}, optional
      Method for matching the conditional statistics of the forecast field
      (areas with precipitation intensity above the threshold R_thr) with those
      of the most recently observed one. 'cdf'=map the forecast CDF to the
      observed one, 'mean'=adjust only the mean value,
      None=no matching applied.
    num_workers : int, optional
      The number of workers to use for parallel computation. Applicable if dask
      is enabled or pyFFTW is used for computing the FFT.
      When num_workers>1, it is advisable to disable OpenMP by setting
      the environment variable OMP_NUM_THREADS to 1.
      This avoids slowdown caused by too many simultaneous threads.
    fft_method : str, optional
      A string defining the FFT method to use (see utils.fft.get_method).
      Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
      the recommended method is 'pyfftw'.
    domain : {"spatial", "spectral"}
      If "spatial", all computations are done in the spatial domain (the
      classical S-PROG model). If "spectral", the AR(2) models are applied
      directly in the spectral domain to reduce memory footprint and improve
      performance :cite:`PCH2019a`.
    extrap_kwargs : dict, optional
      Optional dictionary containing keyword arguments for the extrapolation
      method. See the documentation of pysteps.extrapolation.
    filter_kwargs : dict, optional
      Optional dictionary containing keyword arguments for the filter method.
      See the documentation of pysteps.cascade.bandpass_filters.py.
    measure_time : bool
      If set to True, measure, print and return the computation time.

    Returns
    -------
    out : ndarray
      A three-dimensional array of shape (n_timesteps,m,n) containing a time
      series of forecast precipitation fields. The time series starts from
      t0+timestep, where timestep is taken from the input precipitation fields
      R. If measure_time is True, the return value is a three-element tuple
      containing the nowcast array, the initialization time of the nowcast
      generator and the time used in the main loop (seconds).

    See also
    --------
    pysteps.extrapolation.interface, pysteps.cascade.interface

    References
    ----------
    :cite:`Seed2003`, :cite:`PCH2019a`

    """
    _check_inputs(R, V, ar_order)

    if extrap_kwargs is None:
        extrap_kwargs = dict()

    if filter_kwargs is None:
        filter_kwargs = dict()

    if np.any(~np.isfinite(V)):
        raise ValueError("V contains non-finite values")

    print("Computing S-PROG nowcast:")
    print("-------------------------")
    print("")

    print("Inputs:")
    print("-------")
    print("input dimensions: %dx%d" % (R.shape[1], R.shape[2]))
    print("")

    print("Methods:")
    print("--------")
    print("extrapolation:          %s" % extrap_method)
    print("bandpass filter:        %s" % bandpass_filter_method)
    print("decomposition:          %s" % decomp_method)
    print("conditional statistics: %s" % ("yes" if conditional else "no"))
    print("probability matching:   %s" % probmatching_method)
    print("FFT method:             %s" % fft_method)
    print("domain:                 %s" % domain)
    print("")

    print("Parameters:")
    print("-----------")
    print("number of time steps:     %d" % n_timesteps)
    print("parallel threads:         %d" % num_workers)
    print("number of cascade levels: %d" % n_cascade_levels)
    print("order of the AR(p) model: %d" % ar_order)
    print("precip. intensity threshold: %g" % R_thr)

    if measure_time:
        starttime_init = time.time()

    fft = utils.get_method(fft_method,
                           shape=R.shape[1:],
                           n_threads=num_workers)

    M, N = R.shape[1:]

    # initialize the band-pass filter
    filter_method = cascade.get_method(bandpass_filter_method)
    filter = filter_method((M, N), n_cascade_levels, **filter_kwargs)

    decomp_method, recomp_method = cascade.get_method(decomp_method)

    extrapolator_method = extrapolation.get_method(extrap_method)

    R = R[-(ar_order + 1):, :, :].copy()
    R_min = np.nanmin(R)

    # determine the domain mask from non-finite values
    domain_mask = np.logical_or.reduce(
        [~np.isfinite(R[i, :]) for i in range(R.shape[0])])

    # determine the precipitation threshold mask
    if conditional:
        MASK_thr = np.logical_and.reduce(
            [R[i, :, :] >= R_thr for i in range(R.shape[0])])
    else:
        MASK_thr = None

    # initialize the extrapolator
    x_values, y_values = np.meshgrid(np.arange(R.shape[2]),
                                     np.arange(R.shape[1]))

    xy_coords = np.stack([x_values, y_values])

    extrap_kwargs = extrap_kwargs.copy()
    extrap_kwargs["xy_coords"] = xy_coords
    extrap_kwargs["allow_nonfinite_values"] = True

    # advect the previous precipitation fields to the same position with the
    # most recent one (i.e. transform them into the Lagrangian coordinates)
    res = list()

    def f(R, i):
        return extrapolator_method(R[i, :], V, ar_order - i, "min",
                                   **extrap_kwargs)[-1]

    for i in range(ar_order):
        if not DASK_IMPORTED:
            R[i, :, :] = f(R, i)
        else:
            res.append(dask.delayed(f)(R, i))

    if DASK_IMPORTED:
        num_workers_ = len(res) if num_workers > len(res) else num_workers
        R = np.stack(
            list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]])

    # replace non-finite values with the minimum value
    R = R.copy()
    for i in range(R.shape[0]):
        R[i, ~np.isfinite(R[i, :])] = np.nanmin(R[i, :])

    # compute the cascade decompositions of the input precipitation fields
    R_d = []
    for i in range(ar_order + 1):
        R_ = decomp_method(
            R[i, :, :],
            filter,
            mask=MASK_thr,
            fft_method=fft,
            output_domain=domain,
            normalize=True,
            compute_stats=True,
            compact_output=True,
        )
        R_d.append(R_)

    # rearrange the cascade levels into a four-dimensional array of shape
    # (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
    R_c = nowcast_utils.stack_cascades(R_d,
                                       n_cascade_levels,
                                       convert_to_full_arrays=True)

    # compute lag-l temporal autocorrelation coefficients for each cascade level
    GAMMA = np.empty((n_cascade_levels, ar_order))
    for i in range(n_cascade_levels):
        if domain == "spatial":
            GAMMA[i, :] = correlation.temporal_autocorrelation(R_c[i],
                                                               mask=MASK_thr)
        else:
            GAMMA[i, :] = correlation.temporal_autocorrelation(
                R_c[i], domain="spectral", x_shape=R.shape[1:])

    R_c = nowcast_utils.stack_cascades(R_d,
                                       n_cascade_levels,
                                       convert_to_full_arrays=False)

    R_d = R_d[-1]

    nowcast_utils.print_corrcoefs(GAMMA)

    if ar_order == 2:
        # adjust the lag-2 correlation coefficient to ensure that the AR(p)
        # process is stationary
        for i in range(n_cascade_levels):
            GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(
                GAMMA[i, 0], GAMMA[i, 1])

    # estimate the parameters of the AR(p) model from the autocorrelation
    # coefficients
    PHI = np.empty((n_cascade_levels, ar_order + 1))
    for i in range(n_cascade_levels):
        PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])

    nowcast_utils.print_ar_params(PHI)

    # discard all except the p-1 last cascades because they are not needed for
    # the AR(p) model
    R_c = [R_c[i][-ar_order:] for i in range(n_cascade_levels)]

    D = None

    if probmatching_method == "mean":
        mu_0 = np.mean(R[-1, :, :][R[-1, :, :] >= R_thr])

    # compute precipitation mask and wet area ratio
    MASK_p = R[-1, :, :] >= R_thr
    war = 1.0 * np.sum(MASK_p) / (R.shape[1] * R.shape[2])

    if measure_time:
        init_time = time.time() - starttime_init

    R = R[-1, :, :]

    print("Starting nowcast computation.")

    if measure_time:
        starttime_mainloop = time.time()

    R_f = []

    # iterate each time step
    for t in range(n_timesteps):
        print("Computing nowcast for time step %d... " % (t + 1), end="")
        sys.stdout.flush()
        if measure_time:
            starttime = time.time()

        for i in range(n_cascade_levels):
            R_c[i] = autoregression.iterate_ar_model(R_c[i], PHI[i, :])

        R_d["cascade_levels"] = [
            R_c[i][-1, :] for i in range(n_cascade_levels)
        ]
        if domain == "spatial":
            R_d["cascade_levels"] = np.stack(R_d["cascade_levels"])
        R_c_ = recomp_method(R_d)

        if domain == "spectral":
            R_c_ = fft.irfft2(R_c_)

        MASK = _compute_sprog_mask(R_c_, war)
        R_c_[~MASK] = R_min

        if probmatching_method == "cdf":
            # adjust the CDF of the forecast to match the most recently
            # observed precipitation field
            R_c_ = probmatching.nonparam_match_empirical_cdf(R_c_, R)
        elif probmatching_method == "mean":
            mu_fct = np.mean(R_c_[MASK])
            R_c_[MASK] = R_c_[MASK] - mu_fct + mu_0

        R_c_[domain_mask] = np.nan

        # advect the recomposed precipitation field to obtain the forecast for
        # time step t
        extrap_kwargs.update({
            "displacement_prev": D,
            "return_displacement": True
        })
        R_f_, D_ = extrapolator_method(R_c_, V, 1, **extrap_kwargs)
        D = D_
        R_f_ = R_f_[0]
        R_f.append(R_f_)

        if measure_time:
            print("%.2f seconds." % (time.time() - starttime))
        else:
            print("done.")

    if measure_time:
        mainloop_time = time.time() - starttime_mainloop

    R_f = np.stack(R_f)

    if measure_time:
        return R_f, init_time, mainloop_time
    else:
        return R_f
示例#3
0
        def worker(j):
            if noise_method is not None:
                # generate noise field
                EPS = generate_noise(
                    pp, randstate=randgen_prec[j], fft_method=fft_objs[j], domain=domain
                )

                # decompose the noise field into a cascade
                EPS = decomp_method(
                    EPS,
                    filter,
                    fft_method=fft_objs[j],
                    input_domain=domain,
                    output_domain=domain,
                    compute_stats=True,
                    normalize=True,
                    compact_output=True,
                )
            else:
                EPS = None

            # iterate the AR(p) model for each cascade level
            for i in range(n_cascade_levels):
                # normalize the noise cascade
                if EPS is not None:
                    EPS_ = EPS["cascade_levels"][i]
                    EPS_ *= noise_std_coeffs[i]
                else:
                    EPS_ = None
                # apply AR(p) process to cascade level
                if EPS is not None or vel_pert_method is not None:
                    R_c[j][i] = autoregression.iterate_ar_model(
                        R_c[j][i], PHI[i, :], eps=EPS_
                    )
                else:
                    # use the deterministic AR(p) model computed above if
                    # perturbations are disabled
                    R_c[j][i] = R_m[i]

            EPS = None
            EPS_ = None

            # compute the recomposed precipitation field(s) from the cascades
            # obtained from the AR(p) model(s)
            R_d[j]["cascade_levels"] = [
                R_c[j][i][-1, :] for i in range(n_cascade_levels)
            ]
            if domain == "spatial":
                R_d[j]["cascade_levels"] = np.stack(R_d[j]["cascade_levels"])
            R_f_new = recomp_method(R_d[j])

            if domain == "spectral":
                R_f_new = fft_objs[j].irfft2(R_f_new)

            if mask_method is not None:
                # apply the precipitation mask to prevent generation of new
                # precipitation into areas where it was not originally
                # observed
                R_cmin = R_f_new.min()
                if mask_method == "incremental":
                    R_f_new = R_cmin + (R_f_new - R_cmin) * MASK_prec[j]
                    MASK_prec_ = R_f_new > R_cmin
                else:
                    MASK_prec_ = MASK_prec

                # Set to min value outside of mask
                R_f_new[~MASK_prec_] = R_cmin

            if probmatching_method == "cdf":
                # adjust the CDF of the forecast to match the most recently
                # observed precipitation field
                R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R)
            elif probmatching_method == "mean":
                MASK = R_f_new >= R_thr
                mu_fct = np.mean(R_f_new[MASK])
                R_f_new[MASK] = R_f_new[MASK] - mu_fct + mu_0

            if mask_method == "incremental":
                MASK_prec[j] = _compute_incremental_mask(
                    R_f_new >= R_thr, struct, mask_rim
                )

            R_f_new[domain_mask] = np.nan

            R_f_out = []
            extrap_kwargs_ = extrap_kwargs.copy()

            V_pert = V

            # advect the recomposed precipitation field to obtain the forecast for
            # the current time step (or subtimesteps if non-integer time steps are
            # given)
            for t_sub in subtimesteps:
                if t_sub > 0:
                    t_diff_prev_int = t_sub - int(t_sub)
                    if t_diff_prev_int > 0.0:
                        R_f_ip = (1.0 - t_diff_prev_int) * R_f_prev[
                            j
                        ] + t_diff_prev_int * R_f_new
                    else:
                        R_f_ip = R_f_prev[j]

                    t_diff_prev = t_sub - t_prev[j]
                    t_total[j] += t_diff_prev

                    # compute the perturbed motion field
                    if vel_pert_method is not None:
                        V_pert = V + generate_vel_noise(vps[j], t_total[j] * timestep)

                    extrap_kwargs_["displacement_prev"] = D[j]
                    R_f_ep, D[j] = extrapolator_method(
                        R_f_ip,
                        V_pert,
                        [t_diff_prev],
                        **extrap_kwargs_,
                    )
                    R_f_out.append(R_f_ep[0])
                    t_prev[j] = t_sub

            # advect the forecast field by one time step if no subtimesteps in the
            # current interval were found
            if not subtimesteps:
                t_diff_prev = t + 1 - t_prev[j]
                t_total[j] += t_diff_prev

                # compute the perturbed motion field
                if vel_pert_method is not None:
                    V_pert = V + generate_vel_noise(vps[j], t_total[j] * timestep)

                extrap_kwargs_["displacement_prev"] = D[j]
                _, D[j] = extrapolator_method(
                    None,
                    V_pert,
                    [t_diff_prev],
                    **extrap_kwargs_,
                )
                t_prev[j] = t + 1

            R_f_prev[j] = R_f_new

            return R_f_out
示例#4
0
        def worker(j):
            if noise_method is not None:
                # generate noise field
                EPS = generate_noise(
                    pp, randstate=randgen_prec[j], fft_method=fft_objs[j], domain=domain
                )

                # decompose the noise field into a cascade
                EPS = decomp_method(
                    EPS,
                    filter,
                    fft_method=fft_objs[j],
                    input_domain=domain,
                    output_domain=domain,
                    compute_stats=True,
                    normalize=True,
                    compact_output=True,
                )
            else:
                EPS = None

            # iterate the AR(p) model for each cascade level
            for i in range(n_cascade_levels):
                # normalize the noise cascade
                if EPS is not None:
                    EPS_ = EPS["cascade_levels"][i]
                    EPS_ *= noise_std_coeffs[i]
                else:
                    EPS_ = None
                # apply AR(p) process to cascade level
                if EPS is not None or vel_pert_method is not None:
                    R_c[j][i] = autoregression.iterate_ar_model(
                        R_c[j][i], PHI[i, :], eps=EPS_
                    )
                else:
                    # use the deterministic AR(p) model computed above if
                    # perturbations are disabled
                    R_c[j][i] = R_m[i]

            EPS = None
            EPS_ = None

            # compute the recomposed precipitation field(s) from the cascades
            # obtained from the AR(p) model(s)
            R_d[j]["cascade_levels"] = [
                R_c[j][i][-1, :] for i in range(n_cascade_levels)
            ]
            if domain == "spatial":
                R_d[j]["cascade_levels"] = np.stack(R_d[j]["cascade_levels"])
            R_c_ = recomp_method(R_d[j])

            if domain == "spectral":
                R_c_ = fft_objs[j].irfft2(R_c_)

            if mask_method is not None:
                # apply the precipitation mask to prevent generation of new
                # precipitation into areas where it was not originally
                # observed
                R_cmin = R_c_.min()
                if mask_method == "incremental":
                    R_c_ = R_cmin + (R_c_ - R_cmin) * MASK_prec[j]
                    MASK_prec_ = R_c_ > R_cmin
                else:
                    MASK_prec_ = MASK_prec

                # Set to min value outside of mask
                R_c_[~MASK_prec_] = R_cmin

            if probmatching_method == "cdf":
                # adjust the CDF of the forecast to match the most recently
                # observed precipitation field
                R_c_ = probmatching.nonparam_match_empirical_cdf(R_c_, R)
            elif probmatching_method == "mean":
                MASK = R_c_ >= R_thr
                mu_fct = np.mean(R_c_[MASK])
                R_c_[MASK] = R_c_[MASK] - mu_fct + mu_0

            if mask_method == "incremental":
                MASK_prec[j] = _compute_incremental_mask(
                    R_c_ >= R_thr, struct, mask_rim
                )

            # compute the perturbed motion field
            if vel_pert_method is not None:
                V_ = V + generate_vel_noise(vps[j], (t + 1) * timestep)
            else:
                V_ = V

            R_c_[domain_mask] = np.nan

            # advect the recomposed precipitation field to obtain the forecast
            # for time step t
            extrap_kwargs_ = extrap_kwargs.copy()
            extrap_kwargs_["displacement_prev"] = D[j]
            R_f_, D_ = extrapolator_method(R_c_, V_, 1, **extrap_kwargs_)
            D[j] = D_
            R_f_ = R_f_[0]

            return R_f_
示例#5
0
def plot_raincast_det(start_datetime, end_datetime, analysis_time,
                      raincast_det_path, radar_path):

    raincast_valid_times = []

    with Dataset(raincast_det_path) as cur_nc:
        raincast = cur_nc.variables['forecast'][:]
        latitude = cur_nc.variables['latitude'][:]
        longitude = cur_nc.variables['longitude'][:]
        data_mask = cur_nc.variables['data_mask'][:]
        time = cur_nc.variables['time'][:]
        calendar = 'gregorian'
        units = 'seconds since 1970-01-01 00:0:0'
        for t in range(len(time)):
            raincast_valid_times.append(
                num2date(time[t], units=units, calendar=calendar))

    radar_valid_times = []
    with Dataset(radar_path) as cur_nc:
        radar = cur_nc.variables['forecast'][:]
        latitude = cur_nc.variables['latitude'][:]
        longitude = cur_nc.variables['longitude'][:]
        data_mask = cur_nc.variables['data_mask'][:]
        time = cur_nc.variables['time'][:]
        calendar = 'gregorian'
        units = 'seconds since 1970-01-01 00:0:0'
        for t in range(len(time)):
            radar_valid_times.append(
                num2date(time[t], units=units, calendar=calendar))

    valid_time = start_datetime
    while valid_time <= end_datetime:
        print('raincast_det: start processing {}'.format(valid_time))
        t_index0 = raincast_valid_times.index(valid_time)
        raincast_data = raincast[t_index0, :, :]

        t_index1 = radar_valid_times.index(valid_time)
        radar_data = radar[t_index1, :, :]

        raincast_data[numpy.isnan(raincast_data)] = 0.0
        radar_data[numpy.isnan(radar_data)] = 0.0
        raincast_data = probmatching.nonparam_match_empirical_cdf(
            raincast_data, radar_data)

        longitude = longitude % 360.0

        generate_plot(latitude,
                      longitude,
                      raincast_data,
                      'raincast',
                      analysis_time,
                      valid_time,
                      '{model_name}_{valid_time}.png'.format(
                          model_name='raincast',
                          valid_time=valid_time.strftime('%Y%m%d%H%M')),
                      llcrnrlon=172,
                      urcrnrlon=180,
                      llcrnrlat=-40.0,
                      urcrnrlat=-35.0)
        generate_plot(latitude,
                      longitude,
                      radar_data,
                      'radar',
                      analysis_time,
                      valid_time,
                      '{model_name}_{valid_time}.png'.format(
                          model_name='radar',
                          valid_time=valid_time.strftime('%Y%m%d%H%M')),
                      llcrnrlon=172,
                      urcrnrlon=180,
                      llcrnrlat=-40.0,
                      urcrnrlat=-35.0)
        valid_time += timedelta(seconds=600.0)
示例#6
0
        def worker(j):
            if noise_method is not None:
                # generate noise field
                EPS = generate_noise(pp, randstate=randgen_prec[j],
                                     fft_method=fft_objs[j])
                # decompose the noise field into a cascade
                EPS = decomp_method(EPS, filter, fft_method=fft_objs[j])
            else:
                EPS = None

            # iterate the AR(p) model for each cascade level
            for i in range(n_cascade_levels):
                # normalize the noise cascade
                if EPS is not None:
                    EPS_ = (EPS["cascade_levels"][i, :, :] - EPS["means"][i]) / EPS["stds"][i]
                    EPS_ *= noise_std_coeffs[i]
                else:
                    EPS_ = None
                # apply AR(p) process to cascade level
                if EPS is not None or vel_pert_method is not None:
                    R_c[j, i, :, :, :] = \
                        autoregression.iterate_ar_model(R_c[j, i, :, :, :],
                                                        PHI[i, :], EPS=EPS_)
                else:
                    # use the deterministic AR(p) model computed above if
                    # perturbations are disabled
                    R_c[j, i, :, :, :] = R_m[i, :, :, :]

            EPS = None
            EPS_ = None

            # compute the recomposed precipitation field(s) from the cascades
            # obtained from the AR(p) model(s)
            R_c_ = nowcast_utils.recompose_cascade(R_c[j, :, -1, :, :], mu, sigma)

            if mask_method is not None:
                # apply the precipitation mask to prevent generation of new
                # precipitation into areas where it was not originally
                # observed
                R_cmin = R_c_.min()
                if mask_method == "incremental":
                    R_c_ = R_cmin + (R_c_ - R_cmin) * MASK_prec[j]
                    MASK_prec_ = R_c_ > R_cmin
                else:
                    MASK_prec_ = MASK_prec

                # Set to min value outside of mask
                R_c_[~MASK_prec_] = R_cmin

            if probmatching_method == "cdf":
                # adjust the CDF of the forecast to match the most recently
                # observed precipitation field
                R_c_ = probmatching.nonparam_match_empirical_cdf(R_c_, R)
            elif probmatching_method == "mean":
                MASK = R_c_ >= R_thr
                mu_fct = np.mean(R_c_[MASK])
                R_c_[MASK] = R_c_[MASK] - mu_fct + mu_0

            if mask_method == "incremental":
                MASK_prec[j] = _compute_incremental_mask(R_c_ >= R_thr, struct, mask_rim)

            # compute the perturbed motion field
            if vel_pert_method is not None:
                V_ = V + generate_vel_noise(vps[j], (t + 1) * timestep)
            else:
                V_ = V

            # advect the recomposed precipitation field to obtain the forecast
            # for time step t
            extrap_kwargs.update({"D_prev": D[j], "return_displacement": True})
            R_f_, D_ = extrapolator_method(R_c_, V_, 1, **extrap_kwargs)
            D[j] = D_
            R_f_ = R_f_[0]

            return R_f_