def tdvar(fcst, olist, sigma_b, t_anl, do_cvt=False): assert fcst.shape == (N_MODEL, ) assert_synoptic(olist, t_anl) r_inv = np.linalg.inv(getr(olist)) b_inv = np.linalg.inv(sigma_b**2 * static_b()) if do_cvt: # control variable transform of Bannister (2008) l = np.linalg.cholesky(sigma_b**2 * static_b()) first_guess = np.zeros(N_MODEL) yo = get_yo(olist) yb = get_background_obs(olist, fcst[None, None, :], t_anl, aint=1) d = yo - yb h = get_h_matrix(olist) hl = h @ l cf = partial(tdvar_cvt_2j, d=d, hl=hl, r_inv=r_inv) opt = minimize(cf, first_guess, method="bfgs") anl = fcst[:, None] + l @ opt.x[:, None] anl = anl[:, 0] else: first_guess = np.copy(fcst) cf = partial(tdvar_2j, fcst_in=fcst, olist=olist, r_inv=r_inv, b_inv=b_inv, t_anl=t_anl) opt = minimize(cf, first_guess, method="bfgs") anl = opt.x return anl
def ensrf_single(fcst, obs, t_end, aint): assert isinstance(obs, Scaler_obs) k_ens = fcst.shape[1] assert fcst.shape == (aint, k_ens, N_MODEL) assert isinstance(k_ens, int) assert isinstance(t_end, int) assert t_end - aint < obs.time <= t_end yo = np.empty((1, 1)) yo[0, 0] = obs.val R = getr([obs]) assert R.shape == (1, 1) yb_raw = get_background_obs([obs], fcst, t_end, aint) assert yb_raw.shape == (1, k_ens) X_raw = np.empty((aint * N_MODEL, k_ens)) for t in range(aint): X_raw[t * N_MODEL:(t + 1) * N_MODEL, :] = fcst[t, :, :].T x_mean = np.mean(X_raw, axis=1)[:, None] X_ptb = X_raw - x_mean Ef = (k_ens - 1.0)**(-0.5) * X_ptb yb_mean = np.mean(yb_raw, axis=1)[:, None] HEf = (k_ens - 1.0)**(-0.5) * (yb_raw - yb_mean) K = Ef @ HEf.T @ inv(HEf @ HEf.T + R) xa_mean = x_mean + K @ (yo - yb_mean) alpha = (1.0 + (R[0, 0] / ((HEf @ HEf.T) + R[0, 0]))**0.5)**(-1) Ea = (k_ens - 1.0)**0.5 * (Ef - alpha * K @ HEf) Xa = Ea + xa_mean assert Xa.shape == (aint * N_MODEL, k_ens) anl = np.empty((aint, k_ens, N_MODEL)) for t in range(aint): anl[t, :, :] = Xa[t * N_MODEL:(t + 1) * N_MODEL, :].T return anl
def tdvar_2j(anl_in, fcst_in, olist, r_inv, b_inv, t_anl): assert anl_in.shape == fcst_in.shape == (N_MODEL, ) anl = anl_in[:, None] fcst = fcst_in[:, None] yo = get_yo(olist) yb = get_background_obs(olist, anl_in[None, None, :], t_anl, aint=1) twoj = (anl - fcst).T @ b_inv @ (anl - fcst) + \ (yb - yo).T @ r_inv @ (yb - yo) assert twoj.shape == (1, 1) return twoj[0, 0]
def tdvar_analytic(fcst, olist, sigma_b, t_anl): assert fcst.shape == (N_MODEL, ) assert_synoptic(olist, t_anl) yb = get_background_obs(olist, fcst[None, None, :], t_anl, aint=1) yo = get_yo(olist) d = yo - yb r_inv = np.linalg.inv(getr(olist)) b_inv = np.linalg.inv(sigma_b**2 * static_b()) h = get_h_matrix(olist) delta_x = np.linalg.inv(b_inv + h.T @ r_inv @ h) @ h.T @ r_inv @ d anl = fcst[:, None] + delta_x assert anl.shape == (N_MODEL, 1) return anl[:, 0]
def letkf(fcst, olist, rho, l_loc, t_end, aint): k_ens = fcst.shape[1] assert isinstance(olist, list) p_obs = len(olist) assert fcst.shape == (aint, k_ens, N_MODEL) assert isinstance(rho, float) assert isinstance(k_ens, int) assert isinstance(l_loc, (int, float)) assert isinstance(t_end, int) i_mm = np.identity(k_ens) i_1m = np.ones((1, k_ens)) yo = np.empty((p_obs, 1)) for j in range(p_obs): assert t_end - aint < olist[j].time <= t_end yo[j, 0] = olist[j].val r = getr(olist) xf_raw = fcst[-1, :, :].T xf = np.mean(xf_raw, axis=1)[:, np.newaxis] xfpt = xf_raw - xf @ i_1m yb_raw = get_background_obs(olist, fcst, t_end, aint) yb = np.mean(yb_raw, axis=1)[:, np.newaxis] ybpt = yb_raw[:, :] - yb[:, :] xai = np.zeros((k_ens, N_MODEL)) for i in range(N_MODEL): # step 3 ind = obs_within(i, l_loc, olist) lw = get_localization_weight(ind, i, l_loc, olist) yol = yo[ind, :] ybl = yb[ind, :] ybptl = ybpt[ind, :] xfl = xf[i:i + 1, :] xfptl = xfpt[i:i + 1, :] rl = r[ind, :][:, ind] # step 4-9 cl = ybptl.T @ (np.linalg.inv(rl) * lw) pal = np.linalg.inv(((k_ens - 1.0) / rho) * i_mm + cl @ ybptl) waptl = np.real(sqrtm((k_ens - 1.0) * pal)) wal = pal @ cl @ (yol - ybl) xail = xfl @ i_1m + xfptl @ (wal @ i_1m + waptl) assert xail.shape == (1, k_ens) xai[:, i] = xail[0, :] return xai