예제 #1
0
    def obtain_instant_covs_corrs(hist_fcst, nmem, delta_ti, delta_tj):
        # a40 (17.3)
        corr_ijt = np.empty((STEPS, N_MODEL, N_MODEL))
        corr_ijt[:, :, :] = np.nan
        cov_ijt = np.empty((STEPS, N_MODEL, N_MODEL))
        cov_ijt[:, :, :] = np.nan

        for it in range(STEPS // 2, STEPS):
            if it % AINT == 0:
                fcsti = hist_fcst[it, :, :].copy()
                fcstj = hist_fcst[it, :, :].copy()
                for k in range(nmem):
                    for jt in range(delta_ti):
                        fcsti[k, :] = model.timestep(fcsti[k, :], DT)
                    for jt in range(delta_tj):
                        fcstj[k, :] = model.timestep(fcstj[k, :], DT)

                for i in range(N_MODEL):
                    for j in range(N_MODEL):
                        # a38p40
                        vector_i = np.copy(fcsti[:, i])
                        vector_j = np.copy(fcstj[:, j])
                        vector_i[:] -= np.mean(vector_i)
                        vector_j[:] -= np.mean(vector_j)
                        numera = np.sum(vector_i * vector_j)
                        denomi = (np.sum(vector_i ** 2) * np.sum(vector_j ** 2)) ** 0.5
                        corr_ijt[it, i, j] = numera / denomi
                        cov_ijt[it, i, j] = numera / (nmem - 1.0)
        return corr_ijt, cov_ijt
예제 #2
0
    def test_tangent_model(self):
        """m1 has largest error due to Eular-forward truncation, which is expected to scale O(DT^2)"""

        ptb = 1.0e-6
        eps1 = 0.1
        eps2 = 1.0e-4
        step_verif = 1

        maxd1 = 0.0
        maxd2 = 0.0
        for itr in range(10):
            x_t0 = np.random.randn(N_MODEL) * FERR_INI
            for i in range(STEPS):
                x_t0 = model.timestep(x_t0, DT)
            x_t1 = np.copy(x_t0)
            for i in range(step_verif):
                x_t1 = model.timestep(x_t1, DT)

            m1 = model.finite_time_tangent(x_t0, DT, step_verif)
            m2 = model.finite_time_tangent_using_nonlinear(
                x_t0, DT, step_verif)
            m3 = np.empty((N_MODEL, N_MODEL))

            for i in range(N_MODEL):
                x_t0_ptb = np.copy(x_t0)
                x_t0_ptb[i] = x_t0[i] + ptb
                x_t1_ptb = np.copy(x_t0_ptb)
                for j in range(step_verif):
                    x_t1_ptb = model.timestep(x_t1_ptb, DT)
                m3[:, i] = (x_t1_ptb - x_t1) / ptb

            maxd1 = max(maxd1, np.max(np.abs(m2 - m1)))
            maxd2 = max(maxd2, np.max(np.abs(m3 - m2)))
            self.assertTrue((np.abs(m2 - m1) < eps1).all())
            self.assertTrue((np.abs(m3 - m2) < eps2).all())
예제 #3
0
    def test_timestep_dt_continuity(self):
        """fails if DT is too large"""

        for i in range(100):
            anl = np.random.randn(N_MODEL)
            fcst1 = model.timestep(anl, DT)
            fcst2 = model.timestep(anl, DT / 2)
            fcst2 = model.timestep(fcst2, DT / 2)
            eps = 1.0e-4
            self.assertTrue((np.abs(fcst1 - fcst2) < eps).all())
            self.assertTrue((np.abs(anl - fcst1) > eps).any())
예제 #4
0
def fdvar_2j(anl_0_nda: np.ndarray, fcst_0_nda: np.ndarray, h_nda: np.ndarray,
             r_nda: np.ndarray, yo_nda: np.ndarray, aint: int, i_s: int,
             i_e: int, amp_b: float, bc: np.ndarray) -> np.ndarray:
    """
    :param anl_0_nda:  [dimc] temporary analysis field
    :param fcst_0_nda: [dimc] first guess field
    :param h_nda:      [pc_obs, dimc] observation operator
    :param r_nda:      [pc_obs, pc_obs] observation error covariance
    :param yo_nda:     [pc_obs, 1] observation
    :param aint:       assimilation interval
    :param i_s:        model grid number, assimilate only [i_s, i_e)
    :param i_e:
    :param amp_b:
    :param bc:         [N_MODEL] boundary condition if needed
    :return:           cost function 2J
    """

    h = np.asmatrix(h_nda)
    r = np.asmatrix(r_nda)
    yo = np.asmatrix(yo_nda)
    b = np.matrix(amp_b * stats_const.tdvar_b()[i_s:i_e, i_s:i_e])
    anl_0 = np.asmatrix(anl_0_nda).T
    fcst_0 = np.asmatrix(fcst_0_nda).T

    anl_1_nda = np.copy(anl_0_nda)
    for i in range(0, aint):
        anl_1_nda = model.timestep(anl_1_nda, DT, i_s, i_e, bc)

    # all array-like objects below are np.matrix
    anl_1 = np.matrix(anl_1_nda).T
    twoj = (anl_0 - fcst_0).T * b.I * (anl_0 - fcst_0) + \
           (h * anl_1 - yo).T * r.I * (h * anl_1 - yo)
    return twoj[0, 0]
예제 #5
0
def exec_free_run(settings: dict) -> np.ndarray:
    """
    :param settings:
    :return free_run: [STEPS, nmem, N_MODEL]
    """
    free_run = np.empty((STEPS, settings["nmem"], N_MODEL))
    for m in range(0, settings["nmem"]):
        free_run[0, m, :] = np.random.randn(N_MODEL) * FERR_INI
        for i in range(1, STEP_FREE):
            free_run[i, m, :] = model.timestep(free_run[i - 1, m, :], DT)
    return free_run
예제 #6
0
def exec_deterministic_fcst(settings: dict, anl: np.ndarray) -> int:
    """
    :param settings:
    :param anl:      [STEPS, nmem, N_MODEL]
    """
    if FCST_LT == 0:
        return 0

    fcst_all = np.empty((STEPS, FCST_LT, N_MODEL))
    for i in range(STEP_FREE, STEPS):
        if i % AINT == 0:
            fcst_all[i, 0, :] = np.mean(anl[i, :, :], axis=0)
            for lt in range(1, FCST_LT):
                fcst_all[i, lt, :] = model.timestep(fcst_all[i - 1, lt, :], DT)
    fcst_all.tofile("data/%s_fcst.bin" % settings["name"])
    return 0
예제 #7
0
def fdvar_2j_deriv(anl_0_nda: np.ndarray,
                   fcst_0_nda: np.ndarray,
                   h_nda: np.ndarray,
                   r_nda: np.ndarray,
                   yo_nda: np.ndarray,
                   aint: int,
                   i_s: int,
                   i_e: int,
                   amp_b: float,
                   bc: np.ndarray = None) -> np.ndarray:
    """
    :param anl_0_nda:   [dimc] temporary analysis field
    :param fcst_0_nda:  [dimc] first guess field
    :param h_nda:       [pc_obs, dimc] observation operator
    :param r_nda:       [pc_obs, pc_obs] observation error covariance
    :param yo_nda:      [pc_obs, 1] observation
    :param aint:        assimilation interval
    :param i_s:         model grid number, assimilate only [i_s, i_e)
    :param i_e:
    :param amp_b:
    :param bc:
    :return:            [dimc] gradient of cost function 2J
    """

    if i_s != 0 or i_e != N_MODEL:
        raise Exception(
            "method fdvar_2j_deriv does not support non/weakly coupled DA")

    h = np.asmatrix(h_nda)
    r = np.asmatrix(r_nda)
    yo = np.asmatrix(yo_nda)
    b = np.matrix(amp_b * stats_const.tdvar_b()[i_s:i_e, i_s:i_e])
    anl_0 = np.asmatrix(anl_0_nda).T
    fcst_0 = np.asmatrix(fcst_0_nda).T

    m = model.finite_time_tangent(fcst_0_nda, DT, aint)
    inc = anl_0 - fcst_0
    fcst_1_nda = np.copy(fcst_0_nda)
    for i in range(aint):
        fcst_1_nda = model.timestep(fcst_1_nda, DT)
    fcst_1 = np.asmatrix(fcst_1_nda).T
    d = yo - h * fcst_1

    j_deriv = b.I * inc + (m.T * h.T * r.I) * (h * m * inc - d)

    return j_deriv.A.flatten() * 2.0
예제 #8
0
def fdvar_analytical(fcst_0_nda: np.ndarray,
                     h_nda: np.ndarray,
                     r_nda: np.ndarray,
                     yo_nda: np.ndarray,
                     aint: int,
                     i_s: int,
                     i_e: int,
                     amp_b: float,
                     bc: np.ndarray = None) -> np.ndarray:
    """
    :param fcst_0_nda: [dimc] first guess at beginning of window
    :param h_nda:      [pc_obs, dimc] observation operator
    :param r_nda:      [pc_obs, pc_obs] observation error covariance
    :param yo_nda:     [pc_obs, 1] observation
    :param aint:       assimilation interval
    :param i_s:        model grid number, assimilate only [i_s, i_e)
    :param i_e:
    :param amp_b:
    :param bc:         [N_MODEL] boundary condition if needed
    :return anl_1_nda: [dimc] assimilated field
    """

    if not (i_s == 0 and i_e == N_MODEL):
        raise Exception(
            "fdvar_analytical_innerloop() is not for non-coupled. i_s = %d and i_e = %d is given."
            % (i_s, i_e))

    m = np.asmatrix(
        model.finite_time_tangent_using_nonlinear(fcst_0_nda, DT, aint))
    h = np.asmatrix(h_nda)
    r = np.asmatrix(r_nda)
    yo = np.asmatrix(yo_nda)
    b = np.matrix(amp_b * stats_const.tdvar_b()[i_s:i_e, i_s:i_e])
    fcst_0 = np.asmatrix(fcst_0_nda).T

    d = yo - h * fcst_0
    mt_ht_ri = m.T * h.T * r.I
    delta_x0 = (b.I + mt_ht_ri * h * m).I * mt_ht_ri * d
    anl_0_nda = fcst_0_nda + delta_x0.A.flatten()

    anl_1_nda = np.copy(anl_0_nda)
    for i in range(aint):
        anl_1_nda = model.timestep(anl_1_nda, DT, i_s, i_e, bc)

    return anl_1_nda
예제 #9
0
def fdvar(fcst_0: np.ndarray,
          h: np.ndarray,
          r: np.ndarray,
          yo: np.ndarray,
          aint: int,
          i_s: int,
          i_e: int,
          amp_b: float,
          bc: np.ndarray = None) -> np.ndarray:
    """
    only assimilate one set of obs at t1 = t0+dt*aint
    input fcst_0 is [aint] steps former than analysis time

    :param fcst_0: [dimc]first guess at beginning of window
    :param h:      [pc_obs, dimc]
    :param r:      [pc_obs, pc_obs] observation error covariance
    :param yo:     [pc_obs, 1]
    :param aint:   assimilation interval
    :param i_s:    model grid number, assimilate only [i_s, i_e)
    :param i_e:
    :param amp_b:
    :param bc:     [N_MODEL] boundary condition if needed
    :return:       [dimc] assimilated field
    """
    try:
        anl_0 = np.copy(fcst_0)
        anl_0 = fmin_bfgs(fdvar_2j,
                          anl_0,
                          args=(fcst_0, h, r, yo, aint, i_s, i_e, amp_b, bc),
                          disp=False)
    except ValueError:
        print(
            "Method fmin_bfgs failed to converge. Use fmin for this step instead."
        )
        anl_0 = np.copy(fcst_0)
        anl_0 = fmin(fdvar_2j,
                     anl_0,
                     args=(fcst_0, h, r, yo, aint, i_s, i_e, amp_b, bc),
                     disp=False)

    anl_1 = np.copy(anl_0)
    for i in range(0, aint):
        anl_1 = model.timestep(anl_1, DT, i_s, i_e, bc)
    return anl_1.T
예제 #10
0
def obtain_climatology():
    nstep = 100000
    all_true = np.empty((nstep, N_MODEL))

    np.random.seed((10 ** 8 + 7) * 11)
    true = np.random.randn(N_MODEL) * FERR_INI

    for i in range(0, nstep):
        true[:] = model.timestep(true[:], DT)
        all_true[i, :] = true[:]
    # all_true.tofile("data/true_for_clim.bin")

    mean = np.mean(all_true[nstep // 2:, :], axis=0)
    print("mean")
    print(mean)

    mean2 = np.mean(all_true[nstep // 2:, :] ** 2, axis=0)
    stdv = np.sqrt(mean2 - mean ** 2)
    print("stdv")
    print(stdv)

    return 0
예제 #11
0
def exec_nature() -> np.ndarray:
    """
    :return all_true: [STEPS, N_MODEL]
    """
    all_true = np.empty((STEPS, N_MODEL))
    true = np.random.randn(N_MODEL) * FERR_INI

    # forward integration i-1 -> i
    for i in range(0, STEPS):
        true[:] = model.timestep(true[:], DT)
        all_true[i, :] = true[:]
    all_true.tofile("data/true.bin")

    np.random.seed((10**9 + 7) * 2)
    if Calc_lv:
        all_blv, all_ble = vectors.calc_blv(all_true)
        all_flv, all_fle = vectors.calc_flv(all_true)
        all_clv = vectors.calc_clv(all_true, all_blv, all_flv)
        vectors.calc_fsv(all_true)
        vectors.calc_isv(all_true)
        vectors.write_lyapunov_exponents(all_ble, all_fle, all_clv)

    return all_true
예제 #12
0
def exec_assim_cycle(settings: dict, all_fcst: np.ndarray,
                     all_obs: np.ndarray) -> np.ndarray:
    """
    :param settings:
    :param all_fcst:  [STEPS, nmem, N_MODEL]
    :param all_obs:   [STEPS, P_OBS]
    :return all_fcst: [STEPS, nmem, N_MODEL]
    """

    n_atm = N_ATM
    p_atm = P_ATM

    # prepare containers
    r = getr()
    h = geth()
    fcst = np.empty((settings["nmem"], N_MODEL))
    all_ba = np.empty((STEPS, N_MODEL, N_MODEL))
    all_ba[:, :, :] = np.nan
    all_bf = np.empty((STEPS, N_MODEL, N_MODEL))
    all_bf[:, :, :] = np.nan
    obs_used = np.empty((STEPS, P_OBS))
    obs_used[:, :] = np.nan

    all_inflation = np.empty((STEPS, 3))
    all_inflation[:, :] = np.nan
    if settings["method"] == "etkf" and (settings["rho"] == "adaptive" or
                                         settings["rho"] == "adaptive_each"):
        obj_adaptive = etkf.init_etkf_adaptive_inflation()
    else:
        obj_adaptive = None

    # forecast-analysis cycle
    try:
        for i in range(STEP_FREE, STEPS):
            if settings["couple"] == "none" and settings["bc"] == "climatology":
                persis_bc = None
            elif settings["couple"] == "none" and settings[
                    "bc"] == "independent":
                persis_bc = all_obs[i - 1, :].copy()
            else:  # persistence BC
                persis_bc = np.mean(all_fcst[i - 1, :, :], axis=0)

            for m in range(0, settings["nmem"]):
                if settings["couple"] == "strong" or settings[
                        "couple"] == "weak":
                    fcst[m, :] = model.timestep(all_fcst[i - 1, m, :], DT)
                elif settings["couple"] == "none":
                    fcst[m, :n_atm] = model.timestep(
                        all_fcst[i - 1, m, :n_atm], DT, 0, n_atm, persis_bc)
                    fcst[m,
                         n_atm:] = model.timestep(all_fcst[i - 1, m,
                                                           n_atm:], DT, n_atm,
                                                  N_MODEL, persis_bc)

            if i % AINT == 0:
                obs_used[i, :] = all_obs[i, :]
                fcst_pre = all_fcst[i - AINT, :, :].copy()

                if settings["couple"] == "strong":
                    fcst[:, :], all_bf[i, :, :], all_ba[i, :, :], obj_adaptive = \
                        analyze_one_window(fcst, fcst_pre, all_obs[i, :], h, r, settings, obj_adaptive)
                elif settings["couple"] == "weak" or settings[
                        "couple"] == "none":
                    # atmospheric assimilation
                    fcst[:, :n_atm], all_bf[i, :n_atm, :n_atm], all_ba[i, :n_atm, :n_atm], obj_adaptive \
                        = analyze_one_window(fcst[:, :n_atm], fcst_pre[:, :n_atm],
                                             all_obs[i, :p_atm], h[:p_atm, :n_atm],
                                             r[:p_atm, :p_atm], settings, obj_adaptive, 0, n_atm, persis_bc)
                    # oceanic assimilation
                    fcst[:, n_atm:], all_bf[i, n_atm:, n_atm:], all_ba[i, n_atm:, n_atm:], obj_adaptive \
                        = analyze_one_window(fcst[:, n_atm:], fcst_pre[:, n_atm:],
                                             all_obs[i, p_atm:], h[p_atm:, n_atm:],
                                             r[p_atm:, p_atm:], settings, obj_adaptive, n_atm, N_MODEL, persis_bc)

            all_fcst[i, :, :] = fcst[:, :]
            if settings["method"] == "etkf" and (settings["rho"] == "adaptive"
                                                 or settings["rho"]
                                                 == "adaptive_each"):
                all_inflation[i, :] = obj_adaptive[0, :]
    except (np.linalg.LinAlgError, ValueError) as e:
        import traceback
        print("")
        print("ANALYSIS CYCLE DIVERGED: %s" % e)
        print("Settings: ", settings)
        print(
            "This experiment is terminated (see error traceback below). Continue on next experiments."
        )
        print("")
        traceback.print_exc()
        print("")

    # save to files
    obs_used.tofile("data/%s_obs.bin" % settings["name"])
    all_fcst.tofile("data/%s_cycle.bin" % settings["name"])
    all_bf.tofile("data/%s_covr_back.bin" % settings["name"])
    all_ba.tofile("data/%s_covr_anl.bin" % settings["name"])
    if settings["method"] == "etkf" and (settings["rho"] == "adaptive" or
                                         settings["rho"] == "adaptive_each"):
        all_inflation.tofile("data/%s_inflation.bin" % settings["name"])

    return all_fcst