Esempio n. 1
0
                   Tplot=Tplot,
                   BurnIn=10 * OneYear)

land_sites = np.arange(Nx // 2)
ocean_sites = np.arange(Nx // 2, Nx)

jj = land_sites
Obs = dpr.partial_Id_Obs(Nx, jj)
Obs['noise'] = 1
Obs['localizer'] = nd_Id_localization((Nx, ), (1, ), jj)

HMM = dpr.HiddenMarkovModel(Dyn,
                            Obs,
                            t,
                            X0,
                            LP=LPs(jj),
                            sectors={
                                'land': land_sites,
                                'ocean': ocean_sites
                            })

####################
# Suggested tuning
####################

# Reproduce Miyoshi Figure 5                              # rmse.a  rmse.land.a  rmse.ocean.a
# ---------------------------------------------------------------------------------------------
# xps += LETKF(N=10,rot=False,infl=sqrt(1.015),loc_rad=3) # 2.1     0.38         2.9

# It can be seen that Miyoshi's "Global RMSE" is just the average of the land and ocean RMSE's,
# which explains why this differs so much from DAPPER's (conventionally defined) global RMSE.
Esempio n. 2
0
import dapper as dpr
from dapper.mods.Lorenz96 import dstep_dx, step
from dapper.tools.localization import nd_Id_localization

t = dpr.Chronology(0.05, dtObs=0.4, T=4**5, BurnIn=20)

Nx = 40
Dyn = {'M': Nx, 'model': step, 'linear': dstep_dx, 'noise': 0}

X0 = dpr.GaussRV(M=Nx, C=0.001)

jj = 1 + np.arange(0, Nx, 2)
Obs = dpr.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.5
Obs['localizer'] = nd_Id_localization((Nx, ), (2, ), jj)

HMM = dpr.HiddenMarkovModel(Dyn, Obs, t, X0)

####################
# Suggested tuning
####################
# Compare to Table 1 and 3 from frei2013bridging. Note:
#  - N is too large to be very interesting.
#  - We obtain better EnKF scores than they report,
#    and use inflation and sqrt updating,
#    and don't really need localization.
# from dapper.mods.Lorenz96.frei2013bridging import HMM     # rmse.a
# xps += EnKF_N(N=400,rot=1)                                # 0.80
# xps += LETKF( N=400,rot=True,infl=1.01,loc_rad=10/1.82)   # 0.79 # short xp. only
# xps += Var3D()                                            # 2.42 # short xp. only
Esempio n. 3
0
Nx = 100

# def step(x,t,dt):
#   return np.roll(x,1,axis=x.ndim-1)
Fm = Fmat(Nx, -1, 1, tseq.dt)


def step(x, t, dt):
    assert dt == tseq.dt
    return x @ Fm.T


Dyn = {'M': Nx, 'model': step, 'linear': lambda x, t, dt: Fm, 'noise': 0}

X0 = dpr.GaussRV(mu=np.zeros(Nx),
                 C=homogeneous_1D_cov(Nx, Nx / 8, kind='Gauss'))

Ny = 4
jj = dpr.linspace_int(Nx, Ny)
Obs = dpr.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.01

HMM = dpr.HiddenMarkovModel(Dyn, Obs, tseq, X0, LP=LPs(jj))

####################
# Suggested tuning
####################
# xps += EnKF('PertObs',N=16 ,infl=1.02)
# xps += EnKF('Sqrt'   ,N=16 ,infl=1.0)
Esempio n. 4
0
# Settings not taken from anywhere

import dapper as dpr

from dapper.mods.LotkaVolterra import step, dstep_dx, x0, LP_setup, Tplot

# dt has been chosen after noting that
# using dt up to 0.7 does not change the chaotic properties much,
# as adjudged with eye-ball and Lyapunov measures.

t = dpr.Chronology(0.5, dtObs=10, T=1000, BurnIn=Tplot, Tplot=Tplot)

Nx = len(x0)

Dyn = {'M': Nx, 'model': step, 'linear': dstep_dx, 'noise': 0}

X0 = dpr.GaussRV(mu=x0, C=0.01**2)

jj = [1, 3]
Obs = dpr.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.04**2

HMM = dpr.HiddenMarkovModel(Dyn, Obs, t, X0, LP=LP_setup(jj))

####################
# Suggested tuning
####################
# Not carefully tuned:
# xps += EnKF_N(N=6)
# xps += ExtKF(infl=1.02)
Esempio n. 5
0
Dyn = {
    'M': LUV.M,
    'model': dpr.with_rk4(LUV.dxdt, autonom=True),
    'noise': 0,
    'linear': LUV.dstep_dx,
}

X0 = dpr.GaussRV(mu=LUV.x0, C=0.01)

R = 1.0
jj = np.arange(nU)
Obs = dpr.partial_Id_Obs(LUV.M, jj)
Obs['noise'] = R

other = {'name': utils.rel2mods(__file__)+'_full'}
HMM_full = dpr.HiddenMarkovModel(Dyn, Obs, t, X0, **other)


################
# Truncated
################

# Just change dt from 005 to 05
t = dpr.Chronology(dt=0.05, dtObs=0.05, T=4**3, BurnIn=6)

Dyn = {
    'M': nU,
    'model': dpr.with_rk4(LUV.dxdt_parameterized),
    'noise': 0,
}
Esempio n. 6
0
Dyn = {
    'M': LUV.M,
    'model': dpr.with_rk4(LUV.dxdt, autonom=True),
    'noise': 0,
    'linear': LUV.dstep_dx,
}

X0 = dpr.GaussRV(mu=LUV.x0, C=0.01)

R = 0.1
jj = np.arange(nU)
Obs = dpr.partial_Id_Obs(LUV.M, jj)
Obs['noise'] = R

other = {'name': utils.rel2mods(__file__) + '_full'}
HMM_full = dpr.HiddenMarkovModel(Dyn, Obs, t, X0, LP=LUV.LPs(jj), **other)

################
# Truncated
################

# Just change dt from 005 to 05
t = dpr.Chronology(dt=0.05, dtObs=0.05, T=4**3, BurnIn=6)

Dyn = {
    'M': nU,
    'model': dpr.with_rk4(LUV.dxdt_parameterized),
    'noise': 0,
}

X0 = dpr.GaussRV(mu=LUV.x0[:nU], C=0.01)
Esempio n. 7
0
    'M': Ny,
    'model': hmod,
    'noise': dpr.GaussRV(C=4 * np.eye(Ny)),
    'localizer': localizer,
}

# Moving localization mask for smoothers:
Obs['loc_shift'] = lambda ii, dt: ii  # no movement (suboptimal, but easy)

# Jacobian left unspecified coz it's (usually) employed by methods that
# compute full cov, which in this case is too big.

############################
# Other
############################
HMM = dpr.HiddenMarkovModel(Dyn, Obs, t, X0, LP=LP_setup(obs_inds))

####################
# Suggested tuning
####################
# Reproducing Fig 7 from Sakov and Oke "DEnKF" paper from 2008.

# Notes:
# - If N<=25, then typically need to increase the dissipation
#      to be almost sure to avoid divergence. See counillon2009.py for example.
#    - We have not had the need to increase the dissipation parameter for the EnKF.
# - Our experiments differ from Sakov's in the following minor details:
#    - We use a batch width (unsure what Sakov uses).
#    - The "EnKF-Matlab" code has a bug: it forgets to take sqrt() of the taper coeffs.
#      This is equivalent to: R_actually_used = R_reported / sqrt(2).
# - The boundary cells are all fixed at 0 by BCs,
Esempio n. 8
0
                               periodic=True,
                               domain=(Nx, ))
batches = np.arange(40)[:, None]
# Define operator
Obs = {
    'M': len(H),
    'model': lambda E, t: E @ H.T,
    'linear': lambda E, t: H,
    'noise': 1,
    'localizer': localization_setup(lambda t: y2x_dists, batches),
}

HMM = dpr.HiddenMarkovModel(
    Dyn,
    Obs,
    t,
    X0,
    LP=LPs(),
    sectors={'land': np.arange(*xtrema(obs_sites)).astype(int)})

####################
# Suggested tuning
####################

# Reproduce Anderson Figure 2
# -----------------------------------------------------------------------------------
# xp = SL_EAKF(N=6, infl=sqrt(1.1), loc_rad=0.2/1.82*40)
# for lbl in ['err','std']:
#     stat = getattr(xp.stats,lbl).f[HMM.t.maskObs_BI]
#     plt.plot(sqrt(np.mean(stat**2, axis=0)),label=lbl)
#