# Use small dt to "cope with" ocean sector blow up # (due to spatially-constant infl) OneYear = 0.05 * (24 / 6) * 365 t = dpr.Chronology(0.005, dtObs=0.05, T=110 * OneYear, Tplot=Tplot, BurnIn=10 * OneYear) land_sites = np.arange(Nx // 2) ocean_sites = np.arange(Nx // 2, Nx) jj = land_sites Obs = dpr.partial_Id_Obs(Nx, jj) Obs['noise'] = 1 Obs['localizer'] = nd_Id_localization((Nx, ), (1, ), jj) HMM = dpr.HiddenMarkovModel(Dyn, Obs, t, X0, LP=LPs(jj), sectors={ 'land': land_sites, 'ocean': ocean_sites }) #################### # Suggested tuning ####################
def obs_inds(t): return jj + random_offset(t) @modelling.ens_compatible def hmod(E, t): return E[obs_inds(t)] # Localization. batch_shape = [2, 2] # width (in grid points) of each state batch. # Increasing the width # => quicker analysis (but less rel. speed-up by parallelzt., depending on NPROC) # => worse (increased) rmse (but width 4 is only slightly worse than 1); # if inflation is applied locally, then rmse might actually improve. localizer = nd_Id_localization(shape[::-1], batch_shape[::-1], obs_inds, periodic=False) Obs = { 'M': Ny, 'model': hmod, 'noise': modelling.GaussRV(C=4*np.eye(Ny)), 'localizer': localizer, } # Moving localization mask for smoothers: Obs['loc_shift'] = lambda ii, dt: ii # no movement (suboptimal, but easy) # Jacobian left unspecified coz it's (usually) employed by methods that # compute full cov, which in this case is too big.
Dyn = { 'M': Nx, 'model': step, # It's not clear from the paper whether Q=0.5 or 0.25. # But I'm pretty sure it's applied each dto (not dt). 'noise': 0.25 / tseq.dto, # 'noise': 0.5 / t.dto, } X0 = modelling.GaussRV(mu=x0(Nx), C=0.001) jj = linspace_int(Nx, Nx // 4, periodic=True) Obs = modelling.partial_Id_Obs(Nx, jj) Obs['noise'] = 0.1**2 Obs['localizer'] = nd_Id_localization((Nx, ), (1, ), jj, periodic=True) HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0) HMM.liveplotters = LPs(jj) # Pinheiro et al. use # - N = 30, but this is only really stated for their PF. # - loc-rad = 4, but don't state its definition, nor the exact tapering function. # - infl-factor 1.05, but I'm not sure if it's squared or not. # # They find RMSE (figure 8) hovering around 0.8 for the LETKF, # and conclude that "the new filter consistency outperforms the LETKF". # I cannot get the LETKF to work well with so much noise, # but we note that optimal interpolation (a relatively unsophisticated method) # scores around 0.56:
Nx = KS.Nx # nRepeat=10 t = dpr.Chronology(KS.dt, dkObs=2, KObs=2 * 10**4, BurnIn=2 * 10**3, Tplot=Tplot) Dyn = {'M': Nx, 'model': KS.step, 'linear': KS.dstep_dx, 'noise': 0} X0 = dpr.GaussRV(mu=KS.x0, C=0.001) Obs = Id_Obs(Nx) Obs['noise'] = 1 Obs['localizer'] = nd_Id_localization((Nx, ), (4, )) HMM = dpr.HiddenMarkovModel(Dyn, Obs, t, X0) HMM.liveplotters = LPs(np.arange(Nx)) #################### # Suggested tuning #################### # Reproduce (top-right panel) of Fig. 4 of bocquet2019consistency # Expected rmse.a: # -------------------------------------------------------------------------------- # xps += LETKF(N=4 , loc_rad=15/1.82, infl=1.11,rot=True,taper='GC') # 0.18 # xps += LETKF(N=6, loc_rad=25/1.82, infl=1.06,rot=True,taper='GC') # 0.14 # xps += LETKF(N=16, loc_rad=51/1.82, infl=1.02,rot=True,taper='GC') # 0.11 #
import numpy as np import dapper.mods as modelling from dapper.mods.Lorenz05 import Model from dapper.tools.localization import nd_Id_localization # Sakov uses K=300000, BurnIn=1000*0.05 tseq = modelling.Chronology(0.002, dto=0.05, Ko=400, Tplot=2, BurnIn=5) model = Model(b=8) Dyn = { 'M': model.M, 'model': model.step, 'noise': 0, 'object': model, } X0 = modelling.GaussRV(mu=model.x0, C=0.001) jj = np.arange(model.M) # obs_inds Obs = modelling.partial_Id_Obs(model.M, jj) Obs['noise'] = 1 Obs['localizer'] = nd_Id_localization((model.M, ), (6, )) HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0)