def assimilate(self, HMM, xx, yy): E = zeros((HMM.tseq.K + 1, self.N, HMM.Dyn.M)) Ef = E.copy() E[0] = HMM.X0.sample(self.N) # Forward pass for k, ko, t, dt in progbar(HMM.tseq.ticker): E[k] = HMM.Dyn(E[k - 1], t - dt, dt) E[k] = add_noise(E[k], dt, HMM.Dyn.noise, self.fnoise_treatm) Ef[k] = E[k] if ko is not None: self.stats.assess(k, ko, 'f', E=E[k]) Eo = HMM.Obs(E[k], t) y = yy[ko] E[k] = EnKF_analysis(E[k], Eo, HMM.Obs.noise, y, self.upd_a, self.stats, ko) E[k] = post_process(E[k], self.infl, self.rot) self.stats.assess(k, ko, 'a', E=E[k]) # Backward pass for k in progbar(range(HMM.tseq.K)[::-1]): A = center(E[k])[0] Af = center(Ef[k + 1])[0] J = tinv(Af) @ A J *= self.DeCorr E[k] += (E[k + 1] - Ef[k + 1]) @ J for k, ko, _, _ in progbar(HMM.tseq.ticker, desc='Assessing'): self.stats.assess(k, ko, 'u', E=E[k]) if ko is not None: self.stats.assess(k, ko, 's', E=E[k])
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats # Inefficient version, storing full time series ensemble. # See iEnKS for a "rolling" version. E = zeros((chrono.K+1, self.N, Dyn.M)) E[0] = X0.sample(self.N) for k, kObs, t, dt in progbar(chrono.ticker): E[k] = Dyn(E[k-1], t-dt, dt) E[k] = add_noise(E[k], dt, Dyn.noise, self.fnoise_treatm) if kObs is not None: stats.assess(k, kObs, 'f', E=E[k]) Eo = Obs(E[k], t) y = yy[kObs] # Inds within Lag kk = range(max(0, k-self.Lag*chrono.dkObs), k+1) EE = E[kk] EE = self.reshape_to(EE) EE = EnKF_analysis(EE, Eo, Obs.noise, y, self.upd_a, stats, kObs) E[kk] = self.reshape_fr(EE, Dyn.M) E[k] = post_process(E[k], self.infl, self.rot) stats.assess(k, kObs, 'a', E=E[k]) for k, kObs, _, _ in progbar(chrono.ticker, desc='Assessing'): stats.assess(k, kObs, 'u', E=E[k]) if kObs is not None: stats.assess(k, kObs, 's', E=E[k])
def fun_k(x0, k, *args, **kwargs): xx = np.zeros((k + 1, ) + x0.shape) xx[0] = x0 rg = range(k) if isinstance(prog, str): rg = progbar(rg, prog) elif prog: rg = progbar(rg, 'Recurs.') for i in rg: xx[i + 1] = func(xx[i], *args, **kwargs) return xx
def assimilate(self, HMM, xx, yy): Nx = HMM.Dyn.M R = HMM.Obs.noise.C.full Q = 0 if HMM.Dyn.noise.C == 0 else HMM.Dyn.noise.C.full mu = np.zeros((HMM.tseq.K+1, Nx)) P = np.zeros((HMM.tseq.K+1, Nx, Nx)) # Forecasted values muf = np.zeros((HMM.tseq.K+1, Nx)) Pf = np.zeros((HMM.tseq.K+1, Nx, Nx)) Ff = np.zeros((HMM.tseq.K+1, Nx, Nx)) mu[0] = HMM.X0.mu P[0] = HMM.X0.C.full self.stats.assess(0, mu=mu[0], Cov=P[0]) # Forward pass for k, ko, t, dt in progbar(HMM.tseq.ticker, 'ExtRTS->'): mu[k] = HMM.Dyn(mu[k-1], t-dt, dt) F = HMM.Dyn.linear(mu[k-1], t-dt, dt) P[k] = self.infl**(dt)*(F@P[k-1]@F.T) + dt*Q # Store forecast and Jacobian muf[k] = mu[k] Pf[k] = P[k] Ff[k] = F if ko is not None: self.stats.assess(k, ko, 'f', mu=mu[k], Cov=P[k]) H = HMM.Obs.linear(mu[k], t) KG = mrdiv(P[k] @ H.T, H@P[k]@H.T + R) y = yy[ko] mu[k] = mu[k] + KG@(y - HMM.Obs(mu[k], t)) KH = KG@H P[k] = (np.eye(Nx) - KH) @ P[k] self.stats.assess(k, ko, 'a', mu=mu[k], Cov=P[k]) # Backward pass for k in progbar(range(HMM.tseq.K)[::-1], 'ExtRTS<-'): J = mrdiv(P[k]@Ff[k+1].T, Pf[k+1]) J *= self.DeCorr mu[k] = mu[k] + J @ (mu[k+1] - muf[k+1]) P[k] = P[k] + J @ (P[k+1] - Pf[k+1]) @ J.T for k in progbar(range(HMM.tseq.K+1), desc='Assess'): self.stats.assess(k, mu=mu[k], Cov=P[k])
def assimilate(self, HMM, xx, yy): R = HMM.Obs.noise.C.full Q = 0 if HMM.Dyn.noise.C == 0 else HMM.Dyn.noise.C.full mu = HMM.X0.mu P = HMM.X0.C.full self.stats.assess(0, mu=mu, Cov=P) for k, ko, t, dt in progbar(HMM.tseq.ticker): mu = HMM.Dyn(mu, t-dt, dt) F = HMM.Dyn.linear(mu, t-dt, dt) P = self.infl**(dt)*(F@[email protected]) + dt*Q # Of academic interest? Higher-order linearization: # mu_i += 0.5 * (Hessian[f_i] * P).sum() if ko is not None: self.stats.assess(k, ko, 'f', mu=mu, Cov=P) H = HMM.Obs.linear(mu, t) KG = mrdiv(P @ H.T, H@[email protected] + R) y = yy[ko] mu = mu + KG@(y - HMM.Obs(mu, t)) KH = KG@H P = (np.eye(HMM.Dyn.M) - KH) @ P self.stats.trHK[ko] = KH.trace()/HMM.Dyn.M self.stats.assess(k, ko, mu=mu, Cov=P)
def assimilate(self, HMM, xx, yy): N, Nx, Rm12 = self.N, HMM.Dyn.M, HMM.Obs.noise.C.sym_sqrt_inv E = HMM.X0.sample(N) w = 1/N*np.ones(N) self.stats.assess(0, E=E, w=w) for k, ko, t, dt in progbar(HMM.tseq.ticker): E = HMM.Dyn(E, t-dt, dt) if HMM.Dyn.noise.C != 0: D = rnd.randn(N, Nx) E += np.sqrt(dt*self.qroot)*([email protected]) if self.qroot != 1.0: # Evaluate p/q (for each col of D) when q:=p**(1/self.qroot). w *= np.exp(-0.5*np.sum(D**2, axis=1) * (1 - 1/self.qroot)) w /= w.sum() if ko is not None: self.stats.assess(k, ko, 'f', E=E, w=w) innovs = (yy[ko] - HMM.Obs(E, t)) @ Rm12.T w = reweight(w, innovs=innovs) if trigger_resampling(w, self.NER, [self.stats, E, k, ko]): C12 = self.reg*auto_bandw(N, Nx)*raw_C12(E, w) # C12 *= np.sqrt(rroot) # Re-include? idx, w = resample(w, self.resampl, wroot=self.wroot) E, chi2 = regularize(C12, E, idx, self.nuj) # if rroot != 1.0: # # Compensate for rroot # w *= np.exp(-0.5*chi2*(1 - 1/rroot)) # w /= w.sum() self.stats.assess(k, ko, 'u', E=E, w=w)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats R = Obs.noise.C.full Q = 0 if Dyn.noise.C == 0 else Dyn.noise.C.full mu = X0.mu P = X0.C.full stats.assess(0, mu=mu, Cov=P) for k, kObs, t, dt in progbar(chrono.ticker): mu = Dyn(mu, t - dt, dt) F = Dyn.linear(mu, t - dt, dt) P = self.infl**(dt) * (F @ P @ F.T) + dt * Q # Of academic interest? Higher-order linearization: # mu_i += 0.5 * (Hessian[f_i] * P).sum() if kObs is not None: stats.assess(k, kObs, 'f', mu=mu, Cov=P) H = Obs.linear(mu, t) KG = mrdiv(P @ H.T, H @ P @ H.T + R) y = yy[kObs] mu = mu + KG @ (y - Obs(mu, t)) KH = KG @ H P = (np.eye(Dyn.M) - KH) @ P stats.trHK[kObs] = KH.trace() / Dyn.M stats.assess(k, kObs, mu=mu, Cov=P)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, stats = HMM.Dyn, HMM.Obs, HMM.t, self.stats # Compute "climatological" Kalman gain muC = np.mean(xx, 0) AC = xx - muC PC = (AC.T @ AC) / (xx.shape[0] - 1) # Setup scalar "time-series" covariance dynamics. # ONLY USED FOR DIAGNOSTICS, not to affect the Kalman gain. L = series.estimate_corr_length(AC.ravel(order='F')) SM = fit_sigmoid(1/2, L, 0) # Init mu = muC stats.assess(0, mu=mu, Cov=PC) for k, kObs, t, dt in progbar(chrono.ticker): # Forecast mu = Dyn(mu, t-dt, dt) if kObs is not None: stats.assess(k, kObs, 'f', mu=muC, Cov=PC) # Analysis H = Obs.linear(muC, t) KG = mrdiv([email protected], H@[email protected] + Obs.noise.C.full) mu = muC + KG@(yy[kObs] - Obs(muC, t)) P = (np.eye(Dyn.M) - KG@H) @ PC SM = fit_sigmoid(P.trace()/PC.trace(), L, k) stats.assess(k, kObs, mu=mu, Cov=2*PC*SM(k))
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats N1 = self.N-1 R = Obs.noise Rm12 = Obs.noise.C.sym_sqrt_inv E = X0.sample(self.N) stats.assess(0, E=E) for k, kObs, t, dt in progbar(chrono.ticker): E = Dyn(E, t-dt, dt) E = add_noise(E, dt, Dyn.noise, self.fnoise_treatm) if kObs is not None: stats.assess(k, kObs, 'f', E=E) y = yy[kObs] inds = serial_inds(self.ordr, y, R, center(E)[0]) state_taperer = Obs.localizer(self.loc_rad, 'y2x', t, self.taper) for j in inds: # Prep: # ------------------------------------------------------ Eo = Obs(E, t) xo = np.mean(Eo, 0) Y = Eo - xo mu = np.mean(E, 0) A = E-mu # Update j-th component of observed ensemble: # ------------------------------------------------------ Y_j = Rm12[j, :] @ Y.T dy_j = Rm12[j, :] @ (y - xo) # Prior var * N1: sig2_j = Y_j@Y_j if sig2_j < 1e-9: continue # Update (below, we drop the locality subscript: _j) sig2_u = 1/(1/sig2_j + 1/N1) # KG * N1 alpha = (N1/(N1+sig2_j))**(0.5) # Update contraction factor dy2 = sig2_u * dy_j/N1 # Mean update Y2 = alpha*Y_j # Anomaly update # Update state (regress update from obs space, using localization) # ------------------------------------------------------ ii, tapering = state_taperer(j) if len(ii) == 0: continue Regression = (A[:, ii]*tapering).T @ Y_j/np.sum(Y_j**2) mu[ii] += Regression*dy2 A[:, ii] += np.outer(Y2 - Y_j, Regression) # Without localization: # Regression = A.T @ Y_j/np.sum(Y_j**2) # mu += Regression*dy2 # A += np.outer(Y2 - Y_j, Regression) E = mu + A E = post_process(E, self.infl, self.rot) stats.assess(k, kObs, E=E)
def assimilate(self, HMM, xx, yy): prev = xx[0] self.stats.assess(0, mu=prev) for k, ko, _t, _dt in progbar(HMM.tseq.ticker): self.stats.assess(k, ko, 'fu', mu=xx[k - 1]) if ko is not None: self.stats.assess(k, ko, 'a', mu=prev) prev = xx[k]
def replay(self, figlist="default", speed=np.inf, t1=0, t2=None, **kwargs): """Replay LivePlot with what's been stored in 'self'. - t1, t2: time window to plot. - 'figlist' and 'speed': See LivePlot's doc. .. note:: `store_u` (whether to store non-obs-time stats) must have been `True` to have smooth graphs as in the actual LivePlot. .. note:: Ensembles are generally not stored in the stats and so cannot be replayed. """ # Time settings tseq = self.HMM.tseq if t2 is None: t2 = t1 + tseq.Tplot # Ens does not get stored in stats, so we cannot replay that. # If the LPs are initialized with P0!=None, then they will avoid ens plotting. # TODO 4: This system for switching from Ens to stats must be replaced. # It breaks down when M is very large. try: P0 = np.full_like(self.HMM.X0.C.full, np.nan) except AttributeError: # e.g. if X0 is defined via sampling func P0 = np.eye(self.HMM.Nx) LP = liveplotting.LivePlot(self, figlist, P=P0, speed=speed, Tplot=t2 - t1, replay=True, **kwargs) plt.pause(.01) # required when speed=inf # Remember: must use progbar to unblock read1. # Let's also make a proper description. desc = self.xp.da_method + " (replay)" # Play through assimilation cycles for k, ko, t, _dt in progbar(tseq.ticker, desc): if t1 <= t <= t2: if ko is not None: LP.update((k, ko, 'f'), None, None) LP.update((k, ko, 'a'), None, None) LP.update((k, ko, 'u'), None, None) # Pause required when speed=inf. # On Mac, it was also necessary to do it for each fig. if LP.any_figs: for _name, updater in LP.figures.items(): if plt.fignum_exists(_name) and getattr( updater, 'is_active', 1): plt.figure(_name) plt.pause(0.01)
def assimilate(self, HMM, xx, yy): muC = np.mean(xx, 0) AC = xx - muC PC = CovMat(AC, 'A') self.stats.assess(0, mu=muC, Cov=PC) self.stats.trHK[:] = 0 for k, ko, _, _ in progbar(HMM.tseq.ticker): fau = 'u' if ko is None else 'fau' self.stats.assess(k, ko, fau, mu=muC, Cov=PC)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats N, xN, Nx, Rm12 = self.N, self.xN, Dyn.M, Obs.noise.C.sym_sqrt_inv DD = None E = X0.sample(N) w = 1 / N * np.ones(N) stats.assess(0, E=E, w=w) for k, kObs, t, dt in progbar(chrono.ticker): E = Dyn(E, t - dt, dt) if Dyn.noise.C != 0: E += np.sqrt(dt) * (rnd.randn(N, Nx) @ Dyn.noise.C.Right) if kObs is not None: stats.assess(k, kObs, 'f', E=E, w=w) y = yy[kObs] wD = w.copy() innovs = (y - Obs(E, t)) @ Rm12.T w = reweight(w, innovs=innovs) if trigger_resampling(w, self.NER, [stats, E, k, kObs]): # Compute kernel colouring matrix cholR = self.Qs * auto_bandw(N, Nx) * raw_C12(E, wD) cholR = chol_reduce(cholR) # Generate N·xN random numbers from NormDist(0,1) if DD is None or not self.re_use: DD = rnd.randn(N * xN, Nx) # Duplicate and jitter ED = E.repeat(xN, 0) wD = wD.repeat(xN) / xN ED += DD[:, :len(cholR)] @ cholR # Update weights innovs = (y - Obs(ED, t)) @ Rm12.T wD = reweight(wD, innovs=innovs) # Resample and reduce wroot = 1.0 while wroot < self.wroot_max: idx, w = resample(wD, self.resampl, wroot=wroot, N=N) dups = sum(mask_unique_of_sorted(idx)) if dups == 0: E = ED[idx] break else: wroot += 0.1 stats.assess(k, kObs, 'u', E=E, w=w)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats, N = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats, self.N N1 = N - 1 step = 1 / N cdf_grid = np.linspace(step / 2, 1 - step / 2, N) R = Obs.noise Rm12 = Obs.noise.C.sym_sqrt_inv E = X0.sample(N) stats.assess(0, E=E) for k, kObs, t, dt in progbar(chrono.ticker): E = Dyn(E, t - dt, dt) E = add_noise(E, dt, Dyn.noise, self.fnoise_treatm) if kObs is not None: stats.assess(k, kObs, 'f', E=E) y = yy[kObs] inds = serial_inds(self.ordr, y, R, center(E)[0]) for _, j in enumerate(inds): Eo = Obs(E, t) xo = np.mean(Eo, 0) Y = Eo - xo mu = np.mean(E, 0) A = E - mu # Update j-th component of observed ensemble dYf = Rm12[j, :] @ (y - Eo).T # NB: does Rm12 make sense? Yj = Rm12[j, :] @ Y.T Regr = A.T @ Yj / np.sum(Yj**2) Sorted = np.argsort(dYf) Revert = np.argsort(Sorted) dYf = dYf[Sorted] w = reweight(np.ones(N), innovs=dYf[:, None]) # Lklhd w = w.clip(1e-10) # Avoid zeros in interp1 cw = w.cumsum() cw /= cw[-1] cw *= N1 / N cdfs = np.minimum(np.maximum(cw[0], cdf_grid), cw[-1]) dhE = -dYf + np.interp(cdfs, cw, dYf) dhE = dhE[Revert] # Update state by regression E += np.outer(-dhE, Regr) E = post_process(E, self.infl, self.rot) stats.assess(k, kObs, E=E)
def assimilate(self, HMM, xx, yy): chrono, stats = HMM.t, self.stats muC = np.mean(xx, 0) AC = xx - muC PC = CovMat(AC, 'A') stats.assess(0, mu=muC, Cov=PC) stats.trHK[:] = 0 for k, kObs, _, _ in progbar(chrono.ticker): fau = 'u' if kObs is None else 'fau' stats.assess(k, kObs, fau, mu=muC, Cov=PC)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats N, Nx, Rm12 = self.N, Dyn.M, Obs.noise.C.sym_sqrt_inv E = X0.sample(N) w = 1 / N * np.ones(N) stats.assess(0, E=E, w=w) for k, kObs, t, dt in progbar(chrono.ticker): E = Dyn(E, t - dt, dt) if Dyn.noise.C != 0: D = rnd.randn(N, Nx) E += np.sqrt(dt * self.qroot) * (D @ Dyn.noise.C.Right) if self.qroot != 1.0: # Evaluate p/q (for each col of D) when q:=p**(1/self.qroot). w *= np.exp(-0.5 * np.sum(D**2, axis=1) * (1 - 1 / self.qroot)) w /= w.sum() if kObs is not None: stats.assess(k, kObs, 'f', E=E, w=w) innovs = (yy[kObs] - Obs(E, t)) @ Rm12.T w = reweight(w, innovs=innovs) if trigger_resampling(w, self.NER, [stats, E, k, kObs]): C12 = self.reg * auto_bandw(N, Nx) * raw_C12(E, w) # C12 *= np.sqrt(rroot) # Re-include? wroot = 1.0 while True: s = (w**(1 / wroot - 1)).clip(max=1e100) s /= (s * w).sum() sw = s * w if 1 / (sw @ sw) < N * self.alpha: wroot += 0.2 else: stats.wroot[kObs] = wroot break idx, w = resample(sw, self.resampl, wroot=1) E, chi2 = regularize(C12, E, idx, self.nuj) # if rroot != 1.0: # Compensate for rroot # w *= np.exp(-0.5*chi2*(1 - 1/rroot)) # w /= w.sum() stats.assess(k, kObs, 'u', E=E, w=w)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats E = zeros((chrono.K + 1, self.N, Dyn.M)) Ef = E.copy() E[0] = X0.sample(self.N) # Forward pass for k, kObs, t, dt in progbar(chrono.ticker): E[k] = Dyn(E[k - 1], t - dt, dt) E[k] = add_noise(E[k], dt, Dyn.noise, self.fnoise_treatm) Ef[k] = E[k] if kObs is not None: stats.assess(k, kObs, 'f', E=E[k]) Eo = Obs(E[k], t) y = yy[kObs] E[k] = EnKF_analysis(E[k], Eo, Obs.noise, y, self.upd_a, stats, kObs) E[k] = post_process(E[k], self.infl, self.rot) stats.assess(k, kObs, 'a', E=E[k]) # Backward pass for k in progbar(range(chrono.K)[::-1]): A = center(E[k])[0] Af = center(Ef[k + 1])[0] J = tinv(Af) @ A J *= self.cntr E[k] += (E[k + 1] - Ef[k + 1]) @ J for k, kObs, _, _ in progbar(chrono.ticker, desc='Assessing'): stats.assess(k, kObs, 'u', E=E[k]) if kObs is not None: stats.assess(k, kObs, 's', E=E[k])
def assimilate(self, HMM, xx, yy): chrono, X0, stats = HMM.t, HMM.X0, self.stats R, KObs = HMM.Obs.noise.C, HMM.t.KObs Rm12 = R.sym_sqrt_inv assert HMM.Dyn.noise.C == 0, ( "Q>0 not yet supported." " See Sakov et al 2017: 'An iEnKF with mod. error'") if self.bundle: EPS = 1e-4 # Sakov/Boc use T=EPS*eye(N), with EPS=1e-4, but I ... else: EPS = 1.0 # ... prefer using T=EPS*T, yielding a conditional cloud shape # Initial ensemble E = X0.sample(self.N) # Forward ensemble to kObs = 0 if Lag = 0 t = 0 k = 0 if self.Lag == 0: for k, t, dt in chrono.cycle(kObs=0): stats.assess(k - 1, None, 'u', E=E) E = HMM.Dyn(E, t - dt, dt) # Loop over DA windows (DAW). for kObs in progbar(range(0, KObs + self.Lag + 1)): kLag = kObs - self.Lag DAW = range(max(0, kLag + 1), min(kObs, KObs) + 1) # Assimilation (if ∃ "not-fully-assimlated" obs). if kObs <= KObs: E = iEnKS_update(self.upd_a, E, DAW, HMM, stats, EPS, yy[kObs], (k, kObs, t), Rm12, self.xN, self.MDA, (self.nIter, self.wtol)) E = post_process(E, self.infl, self.rot) # Slide/shift DAW by propagating smoothed ('s') ensemble from [kLag]. if kLag >= 0: stats.assess(chrono.kkObs[kLag], kLag, 's', E=E) cycle_window = range(max(kLag + 1, 0), min(max(kLag + 1 + 1, 0), KObs + 1)) for kCycle in cycle_window: for k, t, dt in chrono.cycle(kCycle): stats.assess(k - 1, None, 'u', E=E) E = HMM.Dyn(E, t - dt, dt) stats.assess(k, KObs, 'us', E=E)
def fun_k(x0, k, *args, **kwargs): xx = np.zeros((k + 1, ) + x0.shape) xx[0] = x0 # Prog. bar name if prog == False: desc = None elif prog == None: desc = "Recurs." else: desc = prog for i in progbar(range(k), desc): xx[i + 1] = func(xx[i], *args, **kwargs) return xx
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats Rm12 = Obs.noise.C.sym_sqrt_inv E = X0.sample(self.N) stats.assess(0, E=E) for k, kObs, t, dt in progbar(chrono.ticker): E = Dyn(E, t - dt, dt) E = add_noise(E, dt, Dyn.noise, self.fnoise_treatm) if kObs is not None: stats.assess(k, kObs, 'f', E=E) mu = np.mean(E, 0) A = E - mu Eo = Obs(E, t) xo = np.mean(Eo, 0) YR = (Eo - xo) @ Rm12.T yR = (yy[kObs] - xo) @ Rm12.T state_batches, obs_taperer = Obs.localizer( self.loc_rad, 'x2y', t, self.taper) for ii in state_batches: # Localize obs jj, tapering = obs_taperer(ii) if len(jj) == 0: return Y_jj = YR[:, jj] * np.sqrt(tapering) dy_jj = yR[jj] * np.sqrt(tapering) # NETF: # This "paragraph" is the only difference to the LETKF. innovs = (dy_jj - Y_jj) / self.Rs if 'laplace' in str(type(Obs.noise)).lower(): w = laplace_lklhd(innovs) else: # assume Gaussian w = reweight(np.ones(self.N), innovs=innovs) dmu = w @ A[:, ii] AT = np.sqrt(self.N) * funm_psd( np.diag(w) - np.outer(w, w), np.sqrt) @ A[:, ii] E[:, ii] = mu[ii] + dmu + AT E = post_process(E, self.infl, self.rot) stats.assess(k, kObs, E=E)
def simulate(self, desc='Truth & Obs'): """Generate synthetic truth and observations.""" Dyn, Obs, chrono, X0 = self.Dyn, self.Obs, self.t, self.X0 # Init xx = np.zeros((chrono.K + 1, Dyn.M)) yy = np.zeros((chrono.KObs+1, Obs.M)) xx[0] = X0.sample(1) # Loop for k, kObs, t, dt in pb.progbar(chrono.ticker, desc): xx[k] = Dyn(xx[k-1], t-dt, dt) + np.sqrt(dt)*Dyn.noise.sample(1) if kObs is not None: yy[kObs] = Obs(xx[k], t) + Obs.noise.sample(1) return xx, yy
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats N, Nx, R = self.N, Dyn.M, Obs.noise.C.full E = X0.sample(N) w = 1 / N * np.ones(N) stats.assess(0, E=E, w=w) for k, kObs, t, dt in progbar(chrono.ticker): E = Dyn(E, t - dt, dt) if Dyn.noise.C != 0: E += np.sqrt(dt) * (rnd.randn(N, Nx) @ Dyn.noise.C.Right) if kObs is not None: stats.assess(k, kObs, 'f', E=E, w=w) y = yy[kObs] Eo = Obs(E, t) innovs = y - Eo # EnKF-ish update s = self.Qs * auto_bandw(N, Nx) As = s * raw_C12(E, w) Ys = s * raw_C12(Eo, w) C = Ys.T @ Ys + R KG = As.T @ mrdiv(Ys, C) E += sample_quickly_with(As)[0] D = Obs.noise.sample(N) dE = KG @ (y - Obs(E, t) - D).T E = E + dE.T # Importance weighting chi2 = innovs * mldiv(C, innovs.T).T logL = -0.5 * np.sum(chi2, axis=1) w = reweight(w, logL=logL) # Resampling if trigger_resampling(w, self.NER, [stats, E, k, kObs]): C12 = self.reg * auto_bandw(N, Nx) * raw_C12(E, w) idx, w = resample(w, self.resampl, wroot=self.wroot) E, _ = regularize(C12, E, idx, self.nuj) stats.assess(k, kObs, 'u', E=E, w=w)
def assimilate(self, HMM, xx, yy): # Init E = HMM.X0.sample(self.N) self.stats.assess(0, E=E) # Cycle for k, ko, t, dt in progbar(HMM.tseq.ticker): E = HMM.Dyn(E, t - dt, dt) E = add_noise(E, dt, HMM.Dyn.noise, self.fnoise_treatm) # Analysis update if ko is not None: self.stats.assess(k, ko, 'f', E=E) E = EnKF_analysis(E, HMM.Obs(E, t), HMM.Obs.noise, yy[ko], self.upd_a, self.stats, ko) E = post_process(E, self.infl, self.rot) self.stats.assess(k, ko, E=E)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats if isinstance(self.B, np.ndarray): # compare ndarray 1st to avoid == error for ndarray B = self.B.astype(float) elif self.B in (None, 'clim'): # Use climatological cov, estimated from truth B = np.cov(xx.T) elif self.B == 'eye': B = np.eye(HMM.Nx) else: raise ValueError("Bad input B.") B *= self.xB # ONLY USED FOR DIAGNOSTICS, not to change the Kalman gain. CC = 2 * np.cov(xx.T) L = series.estimate_corr_length(center(xx)[0].ravel(order='F')) P = X0.C.full SM = fit_sigmoid(P.trace() / CC.trace(), L, 0) # Init mu = X0.mu stats.assess(0, mu=mu, Cov=P) for k, kObs, t, dt in progbar(chrono.ticker): # Forecast mu = Dyn(mu, t - dt, dt) P = CC * SM(k) if kObs is not None: stats.assess(k, kObs, 'f', mu=mu, Cov=P) # Analysis H = Obs.linear(mu, t) KG = mrdiv(B @ H.T, H @ B @ H.T + Obs.noise.C.full) mu = mu + KG @ (yy[kObs] - Obs(mu, t)) # Re-calibrate fit_sigmoid with new W0 = Pa/B P = (np.eye(Dyn.M) - KG @ H) @ B SM = fit_sigmoid(P.trace() / CC.trace(), L, k) stats.assess(k, kObs, mu=mu, Cov=P)
def simulate(self, desc='Truth & Obs'): """Generate synthetic truth and observations.""" Dyn, Obs, tseq, X0 = self.Dyn, self.Obs, self.tseq, self.X0 # Init xx = np.zeros((tseq.K + 1, Dyn.M)) yy = np.zeros((tseq.Ko + 1, Obs.M)) x = X0.sample(1) xx[0] = x # Loop for k, ko, t, dt in pb.progbar(tseq.ticker, desc): x = Dyn(x, t - dt, dt) x = x + np.sqrt(dt) * Dyn.noise.sample(1) if ko is not None: yy[ko] = Obs(x, t) + Obs.noise.sample(1) xx[k] = x return xx, yy
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats # Init E = X0.sample(self.N) stats.assess(0, E=E) # Loop for k, kObs, t, dt in progbar(chrono.ticker): E = Dyn(E, t-dt, dt) E = add_noise(E, dt, Dyn.noise, self.fnoise_treatm) # Analysis update if kObs is not None: stats.assess(k, kObs, 'f', E=E) E = EnKF_analysis(E, Obs(E, t), Obs.noise, yy[kObs], self.upd_a, stats, kObs) E = post_process(E, self.infl, self.rot) stats.assess(k, kObs, E=E)
def assimilate(self, HMM, xx, yy): E = HMM.X0.sample(self.N) self.stats.assess(0, E=E) self.stats.new_series("ad_inf", 1, HMM.tseq.Ko + 1) with multiproc.Pool(self.mp) as pool: for k, ko, t, dt in progbar(HMM.tseq.ticker): E = HMM.Dyn(E, t - dt, dt) E = add_noise(E, dt, HMM.Dyn.noise, self.fnoise_treatm) if ko is not None: self.stats.assess(k, ko, 'f', E=E) batch, taper = HMM.Obs.localizer(self.loc_rad, 'x2y', t, self.taper) E, stats = local_analyses(E, HMM.Obs(E, t), HMM.Obs.noise.C, yy[ko], batch, taper, pool.map, self.xN, self.g) self.stats.write(stats, k, ko, "a") E = post_process(E, self.infl, self.rot) self.stats.assess(k, ko, E=E)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats, N = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats, self.N R, KObs, N1 = HMM.Obs.noise.C, HMM.t.KObs, N - 1 Rm12 = R.sym_sqrt_inv assert Dyn.noise.C == 0, ( "Q>0 not yet supported." " See Sakov et al 2017: 'An iEnKF with mod. error'") if self.bundle: EPS = 1e-4 # Sakov/Boc use T=EPS*eye(N), with EPS=1e-4, but I ... else: EPS = 1.0 # ... prefer using T=EPS*T, yielding a conditional cloud shape # Initial ensemble E = X0.sample(N) # Loop over DA windows (DAW). for kObs in progbar(np.arange(-1, KObs + self.Lag + 1)): kLag = kObs - self.Lag DAW = range(max(0, kLag + 1), min(kObs, KObs) + 1) # Assimilation (if ∃ "not-fully-assimlated" obs). if 0 <= kObs <= KObs: # Init iterations. X0, x0 = center(E) # Decompose ensemble. w = np.zeros(N) # Control vector for the mean state. T = np.eye(N) # Anomalies transform matrix. Tinv = np.eye(N) # Explicit Tinv [instead of tinv(T)] allows for merging MDA code # with iEnKS/EnRML code, and flop savings in 'Sqrt' case. for iteration in np.arange(self.nIter): # Reconstruct smoothed ensemble. E = x0 + (w + EPS * T) @ X0 # Forecast. for kCycle in DAW: for k, t, dt in chrono.cycle(kCycle): # noqa E = Dyn(E, t - dt, dt) # Observe. Eo = Obs(E, t) # Undo the bundle scaling of ensemble. if EPS != 1.0: E = inflate_ens(E, 1 / EPS) Eo = inflate_ens(Eo, 1 / EPS) # Assess forecast stats; store {Xf, T_old} for analysis assessment. if iteration == 0: stats.assess(k, kObs, 'f', E=E) Xf, xf = center(E) T_old = T # Prepare analysis. y = yy[kObs] # Get current obs. Y, xo = center(Eo) # Get obs {anomalies, mean}. dy = (y - xo) @ Rm12.T # Transform obs space. Y = Y @ Rm12.T # Transform obs space. Y0 = Tinv @ Y # "De-condition" the obs anomalies. V, s, UT = svd0(Y0) # Decompose Y0. # Set "cov normlzt fctr" za ("effective ensemble size") # => pre_infl^2 = (N-1)/za. if self.xN is None: za = N1 else: za = zeta_a(*hyperprior_coeffs(s, N, self.xN), w) if self.MDA: # inflation (factor: nIter) of the ObsErrCov. za *= self.nIter # Post. cov (approx) of w, # estimated at current iteration, raised to power. def Cowp(expo): return (V * (pad0(s**2, N) + za)**-expo) @ V.T Cow1 = Cowp(1.0) if self.MDA: # View update as annealing (progressive assimilation). Cow1 = Cow1 @ T # apply previous update dw = dy @ Y.T @ Cow1 if 'PertObs' in self.upd_a: # == "ES-MDA". By Emerick/Reynolds D = mean0(np.random.randn(*Y.shape)) * np.sqrt( self.nIter) T -= (Y + D) @ Y.T @ Cow1 elif 'Sqrt' in self.upd_a: # == "ETKF-ish". By Raanes T = Cowp(0.5) * np.sqrt(za) @ T elif 'Order1' in self.upd_a: # == "DEnKF-ish". By Emerick T -= 0.5 * Y @ Y.T @ Cow1 # Tinv = eye(N) [as initialized] coz MDA does not de-condition. else: # View update as Gauss-Newton optimzt. of log-posterior. grad = Y0 @ dy - w * za # Cost function gradient dw = grad @ Cow1 # Gauss-Newton step # ETKF-ish". By Bocquet/Sakov. if 'Sqrt' in self.upd_a: # Sqrt-transforms T = Cowp(0.5) * np.sqrt(N1) Tinv = Cowp(-.5) / np.sqrt(N1) # Tinv saves time [vs tinv(T)] when Nx<N # "EnRML". By Oliver/Chen/Raanes/Evensen/Stordal. elif 'PertObs' in self.upd_a: D = mean0(np.random.randn(*Y.shape)) \ if iteration == 0 else D gradT = -(Y + D) @ Y0.T + N1 * (np.eye(N) - T) T = T + gradT @ Cow1 # Tinv= tinv(T, threshold=N1) # unstable Tinv = sla.inv(T + 1) # the +1 is for stability. # "DEnKF-ish". By Raanes. elif 'Order1' in self.upd_a: # Included for completeness; does not make much sense. gradT = -0.5 * Y @ Y0.T + N1 * (np.eye(N) - T) T = T + gradT @ Cow1 Tinv = tinv(T, threshold=N1) w += dw if dw @ dw < self.wtol * N: break # Assess (analysis) stats. # The final_increment is a linearization to # (i) avoid re-running the model and # (ii) reproduce EnKF in case nIter==1. final_increment = (dw + T - T_old) @ Xf # See docs/snippets/iEnKS_Ea.jpg. stats.assess(k, kObs, 'a', E=E + final_increment) stats.iters[kObs] = iteration + 1 if self.xN: stats.infl[kObs] = np.sqrt(N1 / za) # Final (smoothed) estimate of E at [kLag]. E = x0 + (w + T) @ X0 E = post_process(E, self.infl, self.rot) # Slide/shift DAW by propagating smoothed ('s') ensemble from [kLag]. if -1 <= kLag < KObs: if kLag >= 0: stats.assess(chrono.kkObs[kLag], kLag, 's', E=E) for k, t, dt in chrono.cycle(kLag + 1): stats.assess(k - 1, None, 'u', E=E) E = Dyn(E, t - dt, dt) stats.assess(k, KObs, 'us', E=E)
def assimilate(self, HMM, xx, yy): Dyn, Obs, chrono, X0, stats = HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats R, KObs = HMM.Obs.noise.C, HMM.t.KObs Rm12 = R.sym_sqrt_inv Nx = Dyn.M # Set background covariance. Note that it is static (compare to iEnKS). if self.B in (None, 'clim'): # Use climatological cov, ... B = np.cov(xx.T) # ... estimated from truth elif self.B == 'eye': B = np.eye(Nx) else: B = self.B B *= self.xB B12 = CovMat(B).sym_sqrt # Init x = X0.mu stats.assess(0, mu=x, Cov=B) # Loop over DA windows (DAW). for kObs in progbar(np.arange(-1, KObs + self.Lag + 1)): kLag = kObs - self.Lag DAW = range(max(0, kLag + 1), min(kObs, KObs) + 1) # Assimilation (if ∃ "not-fully-assimlated" obs). if 0 <= kObs <= KObs: # Init iterations. w = np.zeros(Nx) # Control vector for the mean state. x0 = x.copy() # Increment reference. for iteration in np.arange(self.nIter): # Reconstruct smoothed state. x = x0 + B12 @ w X = B12 # Aggregate composite TLMs onto B12 # Forecast. for kCycle in DAW: for k, t, dt in chrono.cycle(kCycle): # noqa X = Dyn.linear(x, t - dt, dt) @ X x = Dyn(x, t - dt, dt) # Assess forecast stats if iteration == 0: stats.assess(k, kObs, 'f', mu=x, Cov=X @ X.T) # Observe. Y = Obs.linear(x, t) @ X xo = Obs(x, t) # Analysis prep. y = yy[kObs] # Get current obs. dy = Rm12 @ (y - xo) # Transform obs space. Y = Rm12 @ Y # Transform obs space. V, s, UT = svd0(Y.T) # Decomp for lin-alg update comps. # Post. cov (approx) of w, # estimated at current iteration, raised to power. Cow1 = (V * (pad0(s**2, Nx) + 1)**-1.0) @ V.T # Compute analysis update. grad = Y.T @ dy - w # Cost function gradient dw = Cow1 @ grad # Gauss-Newton step w += dw # Step if dw @ dw < self.wtol * Nx: break # Assess (analysis) stats. final_increment = X @ dw stats.assess(k, kObs, 'a', mu=x + final_increment, Cov=X @ Cow1 @ X.T) stats.iters[kObs] = iteration + 1 # Final (smoothed) estimate at [kLag]. x = x0 + B12 @ w X = B12 # Slide/shift DAW by propagating smoothed ('s') state from [kLag]. if -1 <= kLag < KObs: if kLag >= 0: stats.assess(chrono.kkObs[kLag], kLag, 's', mu=x, Cov=X @ Cow1 @ X.T) for k, t, dt in chrono.cycle(kLag + 1): stats.assess(k - 1, None, 'u', mu=x, Cov=Y @ Y.T) X = Dyn.linear(x, t - dt, dt) @ X x = Dyn(x, t - dt, dt) stats.assess(k, KObs, 'us', mu=x, Cov=X @ Cow1 @ X.T)
def assimilate(self, HMM, xx, yy): # Unpack Dyn, Obs, chrono, X0, stats = \ HMM.Dyn, HMM.Obs, HMM.t, HMM.X0, self.stats R, N, N1 = HMM.Obs.noise.C, self.N, self.N-1 # Init E = X0.sample(N) stats.assess(0, E=E) # Loop for k, kObs, t, dt in progbar(chrono.ticker): # Forecast E = Dyn(E, t-dt, dt) E = add_noise(E, dt, Dyn.noise, self.fnoise_treatm) # Analysis if kObs is not None: stats.assess(k, kObs, 'f', E=E) Eo = Obs(E, t) y = yy[kObs] mu = np.mean(E, 0) A = E - mu xo = np.mean(Eo, 0) Y = Eo-xo dy = y - xo V, s, UT = svd0(Y @ R.sym_sqrt_inv.T) du = UT @ (dy @ R.sym_sqrt_inv.T) def dgn_N(l1): return pad0((l1*s)**2, N) + N1 # Adjust hyper-prior # xN_ = noise_level(self.xN,stats,chrono,N1,kObs,A, # locals().get('A_old',None)) eN, cL = hyperprior_coeffs(s, N, self.xN, self.g) if self.dual: # Make dual cost function (in terms of l1) def pad_rk(arr): return pad0(arr, min(N, Obs.M)) def dgn_rk(l1): return pad_rk((l1*s)**2) + N1 def J(l1): val = np.sum(du**2/dgn_rk(l1)) \ + eN/l1**2 \ + cL*np.log(l1**2) return val # Derivatives (not required with minimize_scalar): def Jp(l1): val = -2*l1 * np.sum(pad_rk(s**2) * du**2/dgn_rk(l1)**2) \ + -2*eN/l1**3 + 2*cL/l1 return val def Jpp(l1): val = 8*l1**2 * np.sum(pad_rk(s**4) * du**2/dgn_rk(l1)**3) \ + 6*eN/l1**4 + -2*cL/l1**2 return val # Find inflation factor (optimize) l1 = Newton_m(Jp, Jpp, 1.0) # l1 = fmin_bfgs(J, x0=[1], gtol=1e-4, disp=0) # l1 = minimize_scalar(J, bracket=(sqrt(prior_mode), 1e2), # tol=1e-4).x else: # Primal form, in a fully linearized version. def za(w): return zeta_a(eN, cL, w) def J(w): return \ .5*np.sum(((dy-w@Y)@R.sym_sqrt_inv.T)**2) + \ .5*N1*cL*np.log(eN + w@w) # Derivatives (not required with fmin_bfgs): def Jp(w): return [email protected]@(dy-w@Y) + w*za(w) # Jpp = lambda w: [email protected]@Y.T + \ # za(w)*(eye(N) - 2*np.outer(w,w)/(eN + w@w)) # Approx: no radial-angular cross-deriv: # Jpp = lambda w: [email protected]@Y.T + za(w)*eye(N) def nvrs(w): # inverse of Jpp-approx return (V * (pad0(s**2, N) + za(w)) ** -1.0) @ V.T # Find w (optimize) wa = Newton_m(Jp, nvrs, zeros(N), is_inverted=True) # wa = Newton_m(Jp,Jpp ,zeros(N)) # wa = fmin_bfgs(J,zeros(N),Jp,disp=0) l1 = sqrt(N1/za(wa)) # Uncomment to revert to ETKF # l1 = 1.0 # Explicitly inflate prior # => formulae look different from `bib.bocquet2015expanding`. A *= l1 Y *= l1 # Compute sqrt update Pw = (V * dgn_N(l1)**(-1.0)) @ V.T w = [email protected]@Y.T@Pw # For the anomalies: if not self.Hess: # Regular ETKF (i.e. sym sqrt) update (with inflation) T = (V * dgn_N(l1)**(-0.5)) @ V.T * sqrt(N1) # = ([email protected]@Y.T/N1 + eye(N))**(-0.5) else: # Also include angular-radial co-dependence. # Note: denominator not squared coz # unlike `bib.bocquet2015expanding` we have inflated Y. Hw = [email protected]@Y.T/N1 + eye(N) - 2*np.outer(w, w)/(eN + w@w) T = funm_psd(Hw, lambda x: x**-.5) # is there a sqrtm Woodbury? E = mu + w@A + T@A E = post_process(E, self.infl, self.rot) stats.infl[kObs] = l1 stats.trHK[kObs] = (((l1*s)**2 + N1)**(-1.0)*s**2).sum()/HMM.Ny stats.assess(k, kObs, E=E)