Exemple #1
0
from common import *

from mods.Lorenz95.core import step, dfdx
from tools.localization import partial_direct_obs_nd_loc_setup as loc_setup

t = Chronology(0.05, dtObs=0.4, T=4**5, BurnIn=20)

Nx = 40
Dyn = {'M': Nx, 'model': step, 'jacob': dfdx, 'noise': 0}

X0 = GaussRV(M=Nx, C=0.001)

jj = 1 + arange(0, Nx, 2)
Obs = partial_direct_Obs(Nx, jj)
Obs['noise'] = 0.5
Obs['localizer'] = loc_setup((Nx, ), (2, ), jj, periodic=True)

HMM = HiddenMarkovModel(Dyn, Obs, t, X0)

####################
# Suggested tuning
####################
# Compare to Table 1 and 3 from frei2013bridging. Note:
#  - N is too large to be very interesting.
#  - We obtain better EnKF scores than they report,
#    and use inflation and sqrt updating,
#    and don't really need localization.
# from mods.Lorenz95.frei2013bridging import HMM             # rmse_a
# cfgs += EnKF_N(N=400,rot=1)                                # 0.80
# cfgs += LETKF(N=400,rot=True,infl=1.01,loc_rad=10/1.82)    # 0.79 # short experiment only
# cfgs += Var3D(infl=0.8)                                    # ≈2.5 # short experiment only
Exemple #2
0
from tools.localization import partial_direct_obs_nd_loc_setup as loc_setup

t = Chronology(0.05,dkObs=2,T=4**5,BurnIn=20)

m = 80
f = {
    'm'    : m,
    'model': core.step,
    'noise': 0
    }

X0 = GaussRV(m=m, C=0.001)

jj = arange(0,m,2)
h = partial_direct_obs_setup(m,jj)
h['localizer'] = loc_setup( (m,), (1,), jj, periodic=True )
# h['noise'] = LaplaceRV(C=1,m=len(jj))
h['noise'] = LaplaceParallelRV(C=1,m=len(jj))

other = {'name': os.path.relpath(__file__,'mods/')}
setup = TwinSetup(f,h,t,X0,**other)

####################
# Suggested tuning
####################

#                                                           rmse_a
# cfgs += LETKF(N=20,rot=True,infl=1.04       ,loc_rad=5) # 0.44
# cfgs += LETKF(N=40,rot=True,infl=1.04       ,loc_rad=5) # 0.44
# cfgs += LETKF(N=80,rot=True,infl=1.04       ,loc_rad=5) # 0.43
# These scores are quite variable:
Exemple #3
0
def obs_inds(t):
    return jj + random_offset(t)


@ens_compatible
def hmod(E, t):
    return E[obs_inds(t)]


# Localization.
batch_shape = [3, 3]  # width (in grid points) of each state batch.
# Increasing the width
#  => quicker analysis (but less relative speed-up by parallelization, depending on NPROC)
#  => worse (increased) rmse (but width 4 is only slightly worse than 1);
#     if inflation is applied locally, then rmse might actually improve.
localizer = loc_setup(shape[::-1], batch_shape[::-1], obs_inds, periodic=False)

h = {
    'm': p,
    'model': hmod,
    'noise': GaussRV(C=4 * eye(p)),
    'localizer': localizer,
}

# Moving localization mask for smoothers:
h['loc_shift'] = lambda ii, dt: ii  # no movement (suboptimal, but easy)

# Jacobian left unspecified coz it's (usually) employed by methods that
# compute full cov, which in this case is too big.

############################