def _next_time_step(self, prev_xa, prev_Pa, xo_in_AW): N = prev_xa.size xa = prev_xa Pa = prev_Pa for j in range(self.J): H = ffa.get_H(xo_in_AW[j]) the_xo = ffa.left_aligned(xo_in_AW[j]) the_xf = self.l96.run(xa, days=self.assim_interval_days * (j + 1)) M = self.l96.jacobian(xa, days=self.assim_interval_days * (j + 1)) PaMTHT = Pa @ M.T @ H.T K = PaMTHT @ np.linalg.inv(H @ M @ PaMTHT + self.R) the_xa = xa + K @ (the_xo - (H @ the_xf)) the_Pa = (np.identity(N) - K @ H @ M) @ Pa # xa, Paをfor loopの中でupdateする xa = the_xa Pa = the_Pa xa_in_AW = self.l96.get_xf_in_AW(xa, self.J, days=self.assim_interval_days) M = self.l96.jacobian(xa, days=self.assim_interval_days * self.J) the_Pa = (1 + self.delta) * M @ Pa @ M.T # xaは、時刻t=-1に於ける解析値 # xa_in_AWは、時刻t=0~J-1に於ける解析値 # the_Paは、時刻t=J-1に於ける解析誤差共分散行列 return xa_in_AW, xa, the_Pa
def _next_time_step(self, prev_Xa, the_xo): H = ffa.get_H(the_xo) Xf = self.l96.ensemble_run(prev_Xa, days=self.assim_interval_days) Xf_bar = np.average(Xf, axis=1) # broadcasting dXf = np.sqrt(1 + self.delta) * (Xf - Xf_bar[:, None]) dYf = H.dot(dXf) # broadcasting K = dXf @ dYf.T @ np.linalg.inv(dYf @ dYf.T + (self.m - 1) * self.R) # K = dXf.dot(dYf.T.dot(np.linalg.inv(dYf.dot(dYf.T) + (self.m - 1) * self.R))) the_xo = ffa.left_aligned(the_xo) # Xa = np.zeros((self.N, self.m)) # for k in range(self.m): # # Perturbation。理論上はalpha = 1 # e = self.alpha * np.random.randn(self.p) # Kxoxf = K @ (the_xo + e - H @ Xf[:, k]) # # Kxoxf = K @ (the_xo + e - H @ Xf[:, k].reshape[0]).reshape(K.shape[0]) # # Kxoxf = K.dot( # # the_xo + e - H.dot(Xf[:, k]).reshape(H.shape[0])).reshape(K.shape[0]) # Xa[:, k] = Xf[:, k] + Kxoxf # 上記のコードを最適化すると以下のとおりとなる e_mat = self.alpha * np.random.randn(self.p, self.m) Xa = Xf + K @ (the_xo[:, None] + e_mat - H @ Xf) return Xa
def _next_time_step_estimating_F(prev_xa, prev_F, prev_Pa, the_xo, R, delta, delta_of_F, dt=0.01, days=0.25, KSC=1e+5): N = prev_xa.size # 解析値の地点数 the_xf = Lorenz96.run(prev_xa, F=prev_F, dt=dt, days=days) M = Lorenz96.jacobian_for_s(prev_xa, dt=dt, days=days, F=prev_F) # Fの部分だけ別の値でinflationする。 the_Pf = (1 + delta) * M @ prev_Pa @ M.T the_Pf[N, :] = (1 + delta_of_F) * the_Pf[N, :] / (1 + delta) # the_Pf[:, N] = (1 + delta_of_F) * the_Pf[:, N] / (1 + delta) # the_Pf[N, N] = (1 + delta_of_F) * the_Pf[N, N] / (1 + delta) # ↑このコメントアウトを外すと発散してしまう。 # あくまでクロスタームに対応する部分だけをFに関する部分と見なす。 H = ffa.get_H(the_xo, KSC=KSC) H = ffa.get_H_for_s(H, 1) K = the_Pf @ H.T @ np.linalg.inv(H @ the_Pf @ H.T + R) the_sf = np.hstack((the_xf, prev_F)) the_xo = ffa.left_aligned(the_xo, KSC=KSC) the_sa = the_sf + K @ (the_xo - H @ the_sf) the_xa = the_sa[:N] the_F = the_sa[the_sa.size - 1] the_Pa = (np.identity(K.shape[0]) - K @ H) @ the_Pf return the_xa, the_Pa, the_F
def _next_time_step(self, prev_xa, prev_Pa, the_xo): # 予報 the_xf = self.l96.run(prev_xa, days=self.assim_interval_days) # カルマンゲイン計算 M = self.l96.jacobian(prev_xa, days=self.assim_interval_days) MPM = M @ prev_Pa @ M.T the_Pf = (1 + self.delta) * MPM H = ffa.get_H(the_xo) K = the_Pf @ H.T @ np.linalg.inv(H @ the_Pf @ H.T + self.R) # 解析値算出 the_xo = ffa.left_aligned(the_xo) d_ob = the_xo - H @ the_xf the_xa = the_xf + K @ d_ob the_Pa = (np.identity(self.N) - K @ H) @ the_Pf # inflation deltaの推定 d_ab = the_xa - the_xf Cov_for_est_Pf1 = using_jit.cal_covmat(H @ d_ab, d_ob) Cov_for_est_Pf2 = using_jit.cal_covmat(d_ob, d_ob) - self.R # Rの推定 d_oa = the_xo - H @ the_xa Cov_for_est_R = using_jit.cal_covmat(d_oa, d_ob) # 推定値保存 self.Cov_for_est_Pf[self.l] = Cov_for_est_Pf1 self.Cov_for_est_Pf2[self.l] = Cov_for_est_Pf2 self.Cov_for_est_R[self.l] = Cov_for_est_R self.Pf[self.l] = the_Pf return the_xa, the_Pa
def _next_time_step(self, prev_Enxa, the_xo): # 予報 Xf = self.l96.ensemble_run(prev_Enxa, days=self.assim_interval_days) # 予測にノイズを加える(発散対策) Xf += self.delta * np.random.randn(*Xf.shape) # 観測行列作成・xoを欠損値抜きにして寄せる。 H = ffa.get_H(the_xo) # 観測があるグリッドのインデックス indices_not_ms = np.arange(self.N)[~np.isnan(the_xo)] the_xo = ffa.left_aligned(the_xo) # 各アンサンブル(粒子)に対して尤度を求める lkhs = self.likelihood(Xf, the_xo, H) # 重みwの計算 w = get_w(lkhs) # Xf[:, i]から、重みwによって「重み付きサンプリング」することで解析値を求める Xa = resampling(Xf, w, indices_not_ms) # メンバ変数に保存 self.w_timeseries[self.l] = w self.Enxf[self.l] = Xf self.Xf[self.l] = np.average(Xf, axis=1) return Xa
def _next_time_step(self, prev_xa, xo_in_AW): # jedit_flg = 0の時は何も表示させない。 = 1の時は評価関数Jが小さくなっている様子を表示させる。 days = self.assim_interval_days J = self.J N = self.N p = self.p H_in_AW = np.zeros((J, p, N)) xo_in_AW_aligned = np.zeros((J, p)) for j in range(J): xo_in_AW_aligned[j] = ffa.left_aligned(xo_in_AW[j]) H_in_AW[j] = ffa.get_H(xo_in_AW[j]) # 参考:http://org-technology.com/posts/scipy-unconstrained-minimization-of-multivariate-scalar-functions.html def ObjectiveFunction(x): xf_in_AW = self.l96.get_xf_in_AW(x, J, days) obs_term = 0.0 for j in range(J): d = H_in_AW[j] @ xf_in_AW[j] - xo_in_AW_aligned[j] obs_term += 0.5 * d @ self.Rinv @ d return 0.5 * (x - prev_xa) @ self.Binv @ (x - prev_xa) + obs_term def gradient(x): xf_in_AW = self.l96.get_xf_in_AW(x, J, days) M_in_AW = self.l96.get_M_in_AW(x, J, days, analysis=self.analysis) obs_term = 0.0 for j in range(J): Mj = M_in_AW[j] Hj = H_in_AW[j] # 線形アジョイントモデル obs_term += Mj.T @ Hj.T @ self.Rinv @ (Hj @ xf_in_AW[j] - xo_in_AW_aligned[j]) return self.Binv @ (x - prev_xa) + obs_term # 初期値はランダム。ただし収束を速くするために、prev_xaとする x0 = prev_xa if self.optimization_method == 'l-bfgs': # scipyを用いて最適化し、ObjectiveFunctionを最小にするxを求める。それがx0になる res = minimize(ObjectiveFunction, x0, jac=gradient, method='l-bfgs-b') x0 = res.x elif self.optimization_method == 'steepest': # 最急降下法を自分で実装してやってみた。 x0 = self.steepest_descent_method(gradient, x0) else: raise ValueError("your optimization method is not valid!") xa_in_AW = self.l96.get_xf_in_AW(x0, J, days) if abs(xa_in_AW[J - 1, 0] - self.l96.run(x0, J * days)[0]) > 1e-14: raise ValueError('your calculation is not valid!') return xa_in_AW, x0
def _next_time_step(self, prev_Xa, xo_in_AW): Xa = prev_Xa for j in range(self.J): # このfor loopの中で同化ウィンドウ内の観測を取り込んで、Xaをアップデートしていく。 # 観測が存在するモデルグリッド Not_ms = np.arange(self.N)[~np.isnan(xo_in_AW[j])] Xf = self.l96.ensemble_run(Xa, days=self.assim_interval_days * (j + 1)) Xf_bar = np.average(Xf, axis=1) dXf = Xf - Xf_bar[:, None] Pf = dXf @ dXf.T / (self.m - 1) the_xo = ffa.left_aligned(xo_in_AW[j]) for i in range(self.p): # for loop Obs start obs_point = Not_ms[i] RHO = self.k_localizer.get_rho(obs_point) Xa_bar = np.average(Xa, axis=1) if i == 0: dXa = np.sqrt(1 + self.delta) * (Xa - Xa_bar[:, None]) else: dXa = Xa - Xa_bar[:, None] # 観測点1点に対応するHの計算 x_for_making_H = np.full(self.N, np.nan) x_for_making_H[obs_point] = 0.0 # 使う観測点1点だけをKSC以外の値に H = ffa.get_H(x_for_making_H) # 観測1点に対応するH # 観測点1点に対応するHの計算 dYf = H @ dXf localR = self.R[i, i] # スカラー localPf = Pf[obs_point, obs_point] # スカラー K = RHO[:, None] * dXa @ dYf.T / ((self.m - 1) * (localPf + localR)) # アンサンブルアップデート(第一推定値の置き換え) Xa_bar_new = Xa_bar + K @ (the_xo[i] - H @ Xf_bar) alpha = 1.0 / (1.0 + np.sqrt(localR / (localR + localPf))) K_childa = alpha * K dXa_new = dXa - K_childa @ H @ dXf # アンサンブルアップデート Xa = Xa_bar_new[:, None] + dXa_new # for loop Obs end xa = np.average(Xa, axis=1) xa_in_AW = self.l96.get_xf_in_AW(xa, self.J, days=self.assim_interval_days) the_Xa = self.l96.ensemble_run(Xa, days=self.assim_interval_days * self.J) return xa_in_AW, xa, the_Xa
def _next_time_step(self, prev_xa, prev_Pa, next_xo): # 時刻t=Tに於ける観測を同化して、時刻t=0に於ける解析値を計算する。 # 解析値を算出するために、次のステップにおける予報値も計算する必要がある。 the_xf = self.l96.run(prev_xa, days=self.assim_interval_days) next_xf = self.l96.run(the_xf, days=self.assim_interval_days) M = self.l96.jacobian(prev_xa, days=self.assim_interval_days) the_Pf = (1 + self.delta) * M @ prev_Pa @ M.T H = ffa.get_H(next_xo) PfMTHT = the_Pf @ M.T @ H.T K = PfMTHT @ np.linalg.inv(H @ M @ PfMTHT + self.R) next_xo = ffa.left_aligned(next_xo) the_xa = the_xf + K @ (next_xo - H @ next_xf) the_Pa = (np.identity(self.N) - K @ H @ M) @ the_Pf return the_xa, the_Pa
def _next_time_step_estimating_dynamicF(prev_xa, prev_F, prev_Pa, the_xo, R, dt=0.01, days=0.25, delta=0.0, delta_of_F=1e-2, KSC=1e+5): N = prev_xa.size # 解析値の地点数 the_xf = Lorenz96.run_dynamic_F(prev_xa, F=prev_F, dt=dt, days=days) M = Lorenz96.jacobian_dynamic_F(prev_xa, dt=dt, days=days, F=prev_F) the_Pf = (1 + delta) * M @ prev_Pa @ M.T the_Pf[N:, :] = (1 + delta_of_F) * the_Pf[N:, :] / (1 + delta) H = ffa.get_H(the_xo, KSC=KSC) H = ffa.get_H_for_s(H, prev_F.size) K = the_Pf @ H.T @ np.linalg.inv(H @ the_Pf @ H.T + R) the_sf = np.hstack((the_xf, prev_F)) the_xo = ffa.left_aligned(the_xo, KSC=KSC) the_sa = the_sf + K @ (the_xo - H @ the_sf) the_xa = the_sa[:N] the_F = the_sa[N:] the_Pa = (np.identity(K.shape[0]) - K @ H) @ the_Pf return the_xa, the_F, the_Pa, the_xf, the_Pf
def _next_time_step(self, prev_Xa, next_xo): # 時刻6hの観測データを同化して時刻0hにおける解析値を計算する Not_ms = np.arange(self.N)[~np.isnan(next_xo)] # 観測が存在するモデルグリッド the_Xf = self.l96.ensemble_run(prev_Xa, days=self.assim_interval_days) next_Xf = self.l96.ensemble_run(the_Xf, days=self.assim_interval_days) next_xo = ffa.left_aligned(next_xo) next_Xf_bar = np.average(next_Xf, axis=1) next_dXf = next_Xf - next_Xf_bar[:, None] next_Pf = next_dXf @ next_dXf.T / (self.m - 1) for i in range(self.p): obs_point = Not_ms[i] RHO = self.k_localizer.get_rho(obs_point) the_Xf_bar = np.average(the_Xf, axis=1) if i == 0: the_dXf = np.sqrt(1 + self.delta) * (the_Xf - the_Xf_bar[:, None]) else: the_dXf = the_Xf - the_Xf_bar[:, None] # 観測点1点に対応するHの計算 x_for_making_H = np.full(self.N, np.nan) x_for_making_H[obs_point] = 0.0 # 使う観測点1点だけをKSC以外の値に H = ffa.get_H(x_for_making_H) # 観測1点に対応するH # 観測点1点に対応するHの計算 next_dYf = H @ next_dXf localR = self.R[i, i] # スカラー localPf = next_Pf[obs_point, obs_point] # スカラー K = RHO[:, None] * the_dXf @ next_dYf.T / ((self.m - 1) * (localPf + localR)) # アンサンブルアップデート(第一推定値の置き換え) Xa_bar = the_Xf_bar + K @ (next_xo[i] - H @ next_Xf_bar) alpha = 1.0 / (1.0 + np.sqrt(localR / (localR + localPf))) K_childa = alpha * K dXa = the_dXf - K_childa @ H @ next_dXf # アンサンブルアップデート the_Xf = Xa_bar[:, None] + dXa Xa = Xa_bar[:, None] + dXa return Xa
def _next_time_step(self, prev_xa, the_xo): the_xf = self.l96.run(prev_xa, days=self.assim_interval_days) H = ffa.get_H(the_xo) K = self.B @ H.T @ (np.linalg.inv(H @ self.B @ H.T + self.R)) the_xo = ffa.left_aligned(the_xo) d_ob = the_xo - H @ the_xf the_xa = the_xf + K @ d_ob # Bの推定 # なお、BそのものではなくHBH.Tを推定する。 d_ab = the_xa - the_xf est_B1 = using_jit.cal_covmat(H @ d_ab, d_ob) est_B2 = using_jit.cal_covmat(d_ob, d_ob) - self.R # Rの推定 d_oa = the_xo - H @ the_xa Cov_for_est_R = using_jit.cal_covmat(d_oa, d_ob) return the_xa, est_B1, est_B2, Cov_for_est_R
def _next_time_step(self, prev_Xa, xo_in_AW): Xa = prev_Xa for j in range(self.J): # このfor loopの中で同化ウィンドウ内の観測を取り込んで、Xaをアップデートしていく。 H = ffa.get_H(xo_in_AW[j]) the_xo = ffa.left_aligned(xo_in_AW[j]) Xa_bar = np.average(Xa, axis=1) dXa = Xa - Xa_bar[:, None] Xf = self.l96.ensemble_run(Xa, days=self.assim_interval_days * (j + 1)) Xf_bar = np.average(Xf, axis=1) dXf = Xf - Xf_bar[:, None] dYf = H @ dXf A = (self.m - 1) / self.rho * np.identity(self.m) + dYf.T @ self.Rinv @ dYf U, D = ffa.Eigenvalue_decomp(A) Dinv = np.diag(1. / np.diag(D)) # Dは対角行列より、np.linalg.invよりこっちのほうが高速 sqrtDinv = np.sqrt(Dinv) # Dinvは対角行列より、np.sqrtでMatrix square rootが求まる K = dXa @ U @ Dinv @ U.T @ dYf.T @ self.Rinv T = np.sqrt(self.m - 1) * U @ sqrtDinv @ U.T Kxoxf = K @ (the_xo - H @ Xf_bar) # すべての列に同じ配列を格納している。(N, m)の行列になる。 # 以下のコードの最適化。 # Xa_bar_MAT = np.zeros((self.N, self.m)) # Kxoxf_MAT = np.zeros((self.N, self.m)) # for k in range(self.m): # Xa_bar_MAT[:, k] = Xa_bar[:] # Kxoxf_MAT[:, k] = Kxoxf[:] Xa_bar_MAT = np.repeat(Xa_bar[:, None], self.m, axis=1) Kxoxf_MAT = np.repeat(Kxoxf[:, None], self.m, axis=1) Xa = Xa_bar_MAT + Kxoxf_MAT + dXa @ T xa = np.average(Xa, axis=1) xa_in_AW = self.l96.get_xf_in_AW(xa, self.J, days=self.assim_interval_days) the_Xa = self.l96.ensemble_run(Xa, days=self.assim_interval_days * self.J) return xa_in_AW, xa, the_Xa
def _next_time_step(self, prev_Xa, next_xo): H = ffa.get_H(next_xo) next_xo = ffa.left_aligned(next_xo) the_Xf = self.l96.ensemble_run(prev_Xa, days=self.assim_interval_days) the_Xf_bar = np.average(the_Xf, axis=1) the_dXf = the_Xf - the_Xf_bar[:, None] next_Xf = self.l96.ensemble_run(the_Xf, days=self.assim_interval_days) next_Xf_bar = np.average(next_Xf, axis=1) next_dXf = next_Xf - next_Xf_bar[:, None] next_dYf = H @ next_dXf A = (self.m - 1) / self.rho * np.identity( self.m) + next_dYf.T @ self.Rinv @ next_dYf U, D = ffa.Eigenvalue_decomp(A) Dinv = np.diag(1. / np.diag(D)) # Dは対角行列より、np.linalg.invよりこっちのほうが高速 sqrtDinv = np.sqrt(Dinv) # Dinvは対角行列より、np.sqrtでMatrix square rootが求まる K = the_dXf @ U @ Dinv @ U.T @ next_dYf.T @ self.Rinv T = np.sqrt(self.m - 1) * U @ sqrtDinv @ U.T Kxoxf = K @ (next_xo - H @ next_Xf_bar) # すべての列に同じ配列を格納している。(N, m)の行列になる。 # 以下のコードの最適化。 # the_Xf_bar_MAT = np.zeros((self.N, self.m)) # Kxoxf_MAT = np.zeros((self.N, self.m)) # for k in range(self.m): # the_Xf_bar_MAT[:, k] = the_Xf_bar[:] # Kxoxf_MAT[:, k] = Kxoxf[:] the_Xf_bar_MAT = np.repeat(the_Xf_bar[:, None], self.m, axis=1) Kxoxf_MAT = np.repeat(Kxoxf[:, None], self.m, axis=1) Xa = the_Xf_bar_MAT + Kxoxf_MAT + the_dXf @ T return Xa