Esempio n. 1
0
def HMMs(stepper="Tay2", resolution="Low", R=1):
    """Define the various HMMs used."""
    # Use small version of L96. Has 4 non-stable Lyapunov exponents.
    Nx = 10

    # Time sequence
    # Grudzien'2020 uses the below chronology with Ko=25000, BurnIn=5000.
    t = modelling.Chronology(dt=0.005, dto=.1, T=30, Tplot=Tplot, BurnIn=10)
    if resolution == "High":
        t.dt = 0.001
    elif stepper != "Tay2":
        t.dt = 0.01

    # Dynamical operator
    Dyn = {'M': Nx, 'model': steppers(stepper)}

    # (Random) initial condition
    X0 = modelling.GaussRV(mu=x0(Nx), C=0.001)

    # Observation operator
    jj = range(Nx)  # obs_inds
    Obs = modelling.partial_Id_Obs(Nx, jj)
    Obs['noise'] = R

    return modelling.HiddenMarkovModel(Dyn, Obs, t, X0)
Esempio n. 2
0
Nx = 80
Dyn = {
    'M': Nx,
    'model': step,
    'noise': 0,
}

X0 = modelling.GaussRV(M=Nx, C=0.001)

jj = np.arange(0, Nx, 2)
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['localizer'] = nd_Id_localization((Nx,), (1,), jj)
# Obs['noise'] = RVs.LaplaceRV(C=1,M=len(jj))
Obs['noise'] = RVs.LaplaceParallelRV(C=1, M=len(jj))

HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0)

####################
# Suggested tuning
####################

#                                                          rmse.a
# xps += LETKF(N=20,rot=True,infl=1.04       ,loc_rad=5) # 0.44
# xps += LETKF(N=40,rot=True,infl=1.04       ,loc_rad=5) # 0.44
# xps += LETKF(N=80,rot=True,infl=1.04       ,loc_rad=5) # 0.43
# These scores are quite variable:
# xps += LNETF(N=40,rot=True,infl=1.10,Rs=2.5,loc_rad=5) # 0.57
# xps += LNETF(N=80,rot=True,infl=1.10,Rs=1.6,loc_rad=5) # 0.45

# In addition to standard post-analysis inflation,
# we also find the necessity of tuning the inflation (Rs) for R,
Esempio n. 3
0
Fm = Fmat(Nx, -1, 1, tseq.dt)


def step(x, t, dt):
    assert dt == tseq.dt
    return x @ Fm.T


Dyn = {
    'M': Nx,
    'model': lambda x, t, dt: damp * step(x, t, dt),
    'linear': lambda x, t, dt: damp * Fm,
    'noise': modelling.GaussRV(C=modelling.CovMat(L, 'Left')),
}

HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0, LP=LPs(jj))

####################
# Suggested tuning
####################

# Expected rmse.a = 0.3
# xp = EnKF('PertObs',N=30,infl=3.2)
# Note that infl=1 may yield approx optimal rmse, even though then rmv << rmse.
# Why is rmse so INsensitive to inflation, especially for PertObs?

# Reproduce raanes'2015 "extending sqrt method to model noise":
# xp = EnKF('Sqrt',fnoise_treatm='XXX',N=30,infl=1.0),
# where XXX is one of:
# - Stoch
# - Mult-1
Esempio n. 4
0
    'model': modelling.with_rk4(LUV.dxdt, autonom=True),
    'noise': 0,
    'linear': LUV.dstep_dx,
}

X0 = modelling.GaussRV(mu=LUV.x0, C=0.01)

R = 0.1
jj = np.arange(nU)
Obs = modelling.partial_Id_Obs(LUV.M, jj)
Obs['noise'] = R

other = {'name': rel2mods(__file__) + '_full'}
HMM_full = modelling.HiddenMarkovModel(Dyn,
                                       Obs,
                                       tseq,
                                       X0,
                                       LP=LUV.LPs(jj),
                                       **other)

################
# Truncated
################

# Just change dt from 005 to 05
tseq = modelling.Chronology(dt=0.05, dto=0.05, T=4**3, BurnIn=6)

Dyn = {
    'M': nU,
    'model': modelling.with_rk4(LUV.dxdt_parameterized),
    'noise': 0,
}
Esempio n. 5
0
    'model': hmod,
    'noise': modelling.GaussRV(C=4*np.eye(Ny)),
    'localizer': localizer,
}

# Moving localization mask for smoothers:
Obs['loc_shift'] = lambda ii, dt: ii  # no movement (suboptimal, but easy)

# Jacobian left unspecified coz it's (usually) employed by methods that
# compute full cov, which in this case is too big.


############################
# Other
############################
HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0, LP=LP_setup(obs_inds))


####################
# Suggested tuning
####################
# Reproducing Fig 7 from Sakov and Oke "DEnKF" paper from 2008.

# Notes:
# - If N<=25, then typically need to increase the dissipation
#      to be almost sure to avoid divergence. See counillon2009.py for example.
#    - We have not had the need to increase the dissipation parameter for the EnKF.
# - Our experiments differ from Sakov's in the following minor details:
#    - We use a batch width (unsure what Sakov uses).
#    - The "EnKF-Matlab" code has a bug: it forgets to take sqrt() of the taper coeffs.
#      This is equivalent to: R_actually_used = R_reported / sqrt(2).
Esempio n. 6
0
Dyn = {
    'M': LUV.M,
    'model': modelling.with_rk4(LUV.dxdt, autonom=True),
    'noise': 0,
    'linear': LUV.dstep_dx,
}

X0 = modelling.GaussRV(mu=LUV.x0, C=0.01)

R = 1.0
jj = np.arange(nU)
Obs = modelling.partial_Id_Obs(LUV.M, jj)
Obs['noise'] = R

other = {'name': rel2mods(__file__) + '_full'}
HMM_full = modelling.HiddenMarkovModel(Dyn, Obs, t, X0, **other)

################
# Truncated
################

# Just change dt from 005 to 05
t = modelling.Chronology(dt=0.05, dtObs=0.05, T=4**3, BurnIn=6)

Dyn = {
    'M': nU,
    'model': modelling.with_rk4(LUV.dxdt_parameterized),
    'noise': 0,
}

X0 = modelling.GaussRV(mu=LUV.x0[:nU], C=0.01)
Esempio n. 7
0
import dapper.mods as modelling

from dapper.mods.LotkaVolterra import step, dstep_dx, x0, LP_setup, Tplot

t = modelling.Chronology(0.5, dto=10, T=1000, BurnIn=Tplot, Tplot=Tplot)

Nx = len(x0)

Dyn = {
    'M': Nx,
    'model': step,
    'linear': dstep_dx,
    'noise': 0,
}

X0 = modelling.GaussRV(mu=x0, C=0.01**2)

jj = [1, 3]
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.04**2

HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0, LP=LP_setup(jj))

####################
# Suggested tuning
####################
# Not carefully tuned:
# xps += EnKF_N(N=6)
# xps += ExtKF(infl=1.02)
Esempio n. 8
0
        jj, zoomy=0.8, dims=[0, Nx], labels=["$x_0$", "Force"]),
     ),
]

# Labels for sectors of state vector.
# DAPPER will compute diagnostic statistics for the full state vector,
# but also for each sector of it (averaged in space according to the
# methods specified in your .dpr_config.yaml:field_summaries key).
# The name "sector" comes from its typical usage to distinguish
# "ocean" and "land" parts of the state vector.
# Here we use it to get individual statistics of the parameter and state.
parts = dict(state=np.arange(Nx),
             param=np.arange(Np)+Nx)

# Wrap-up model specification
HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, sectors=parts, LP=LP)


# #### Treat truth and DA methods differently

# Bocquet et al. do not sample the true parameter value from the
# Bayesian (random) prior / initial cond's (ICs), given to the DA methods.
# Instead it is simply set to 8.

TRUTH = 8
GUESS = 7

# Seeing how far off the intial guess (and its uncertainty, defined below)
# is from the truth, this constitutes a kind of  model error.
# It is not a feature required to make this experiment interesting.
# However, our goal here is to reproduce the results of Bocquet et al.,
Esempio n. 9
0
                         BurnIn=10 * OneYear)

land_sites = np.arange(Nx // 2)
ocean_sites = np.arange(Nx // 2, Nx)

jj = land_sites
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 1
Obs['localizer'] = nd_Id_localization((Nx, ), (1, ), jj)

HMM = modelling.HiddenMarkovModel(
    Dyn,
    Obs,
    t,
    X0,
    LP=LPs(jj),
    sectors={
        'land': land_sites,
        'ocean': ocean_sites
    },
)

####################
# Suggested tuning
####################

# Reproduce Miyoshi Figure 5               # rmse.a  rmse.land.a  rmse.ocean.a
# ------------------------------------------------------------------------------
# xps += LETKF(N=10,rot=False,
#              infl=sqrt(1.015),loc_rad=3) # 2.1     0.38         2.9
Esempio n. 10
0
                               np.arange(Nx)[:, None],
                               domain=(Nx, ))
batches = np.arange(40)[:, None]
# Define operator
Obs = {
    'M': len(H),
    'model': lambda E, t: E @ H.T,
    'linear': lambda E, t: H,
    'noise': 1,
    'localizer': localization_setup(lambda t: y2x_dists, batches),
}

HMM = modelling.HiddenMarkovModel(
    Dyn,
    Obs,
    tseq,
    X0,
    LP=LPs(),
    sectors={'land': np.arange(*xtrema(obs_sites)).astype(int)})

####################
# Suggested tuning
####################

# Reproduce Anderson Figure 2
# -----------------------------------------------------------------------------------
# xp = SL_EAKF(N=6, infl=sqrt(1.1), loc_rad=0.2/1.82*40)
# for lbl in ['err', 'spread']:
#     stat = getattr(xp.stats,lbl).f[HMM.tseq.masko]
#     plt.plot(sqrt(np.mean(stat**2, axis=0)),label=lbl)
#