Beispiel #1
0
def test_operator_dyn():
    """Simple test using 1D linear advection."""
    Nx = 6

    tseq = modelling.Chronology(dt=1, dko=5, T=10)
    Fm = Fmat(Nx, c=-1, dx=1, dt=tseq.dt)

    def step(x, t, dt):
        assert dt == tseq.dt
        return x @ Fm.T

    Dyn = {
        "M": Nx,
        "model": step,
        "linear": lambda x, t, dt: Fm,
        "noise": 0,
    }

    Dyn_op = modelling.Operator(**Dyn)

    # Square wave
    x = np.array([1, 1, 1, 0, 0, 0])

    x1 = Dyn_op(x, tseq.T - 1, tseq.dt)
    assert (x1 == np.array([0, 1, 1, 1, 0, 0])).all()

    x2 = Dyn_op(x1, tseq.T - 3, 1)
    assert (x2 == np.array([0, 0, 1, 1, 1, 0])).all()

    x3 = Dyn_op(x2, tseq.T - 4, 1)
    assert (x3 == np.array([0, 0, 0, 1, 1, 1])).all()

    x4 = Dyn_op(x3, tseq.T - 5, 1)
    assert (x4 == np.array([1, 0, 0, 0, 1, 1])).all()
Beispiel #2
0
def HMMs(stepper="Tay2", resolution="Low", R=1):
    """Define the various HMMs used."""
    # Use small version of L96. Has 4 non-stable Lyapunov exponents.
    Nx = 10

    # Time sequence
    # Grudzien'2020 uses the below chronology with Ko=25000, BurnIn=5000.
    t = modelling.Chronology(dt=0.005, dto=.1, T=30, Tplot=Tplot, BurnIn=10)
    if resolution == "High":
        t.dt = 0.001
    elif stepper != "Tay2":
        t.dt = 0.01

    # Dynamical operator
    Dyn = {'M': Nx, 'model': steppers(stepper)}

    # (Random) initial condition
    X0 = modelling.GaussRV(mu=x0(Nx), C=0.001)

    # Observation operator
    jj = range(Nx)  # obs_inds
    Obs = modelling.partial_Id_Obs(Nx, jj)
    Obs['noise'] = R

    return modelling.HiddenMarkovModel(Dyn, Obs, t, X0)
Beispiel #3
0
import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz05 import Model
from dapper.tools.localization import nd_Id_localization

# Sakov uses K=300000, BurnIn=1000*0.05
tseq = modelling.Chronology(0.002, dto=0.05, Ko=400, Tplot=2, BurnIn=5)

model = Model(b=8)

Dyn = {
    'M': model.M,
    'model': model.step,
    'noise': 0,
    'object': model,
}

X0 = modelling.GaussRV(mu=model.x0, C=0.001)

jj = np.arange(model.M)  # obs_inds
Obs = modelling.partial_Id_Obs(model.M, jj)
Obs['noise'] = 1
Obs['localizer'] = nd_Id_localization((model.M, ), (6, ))

HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0)
Beispiel #4
0
"""Harder settings than in `dapper.mods.Lorenz84.pajonk2012`.

This was adjudged by noting that with their settings,
the average val. of `trHK` is 0.013.

Here we increase `dkObs` to make the DA problem more difficult.
"""

import dapper.mods as modelling
from dapper.mods.Lorenz84.pajonk2012 import HMM as _HMM

HMM = _HMM.copy()
HMM.t = modelling.Chronology(0.05, dkObs=10, T=4**5, BurnIn=20)

####################
# Suggested tuning
####################
# xps += ExtKF(infl=8)
# xps += EnKF ('Sqrt',N=10,infl=1.05)
# xps += EnKF ('Sqrt',N=100,rot=True,infl=1.01)
# xps += EnKF_N (N=4)
# xps += PartFilt(N=100, NER=0.4) # add reg!
# xps += PartFilt(N=1000, NER=0.1) # add reg!
Beispiel #5
0
"""Reproduce results from Fig. 2 of `bib.raanes2014ext`."""

import numpy as np

import dapper.mods as modelling
from dapper.mods.LA import Fmat, sinusoidal_sample
from dapper.mods.Lorenz96 import LPs
from dapper.tools.linalg import tsvd

# Burn-in allows damp*x and x+noise balance out
tseq = modelling.Chronology(dt=1, dkObs=5, T=500, BurnIn=60, Tplot=100)

Nx = 1000
Ny = 40

jj = modelling.linspace_int(Nx, Ny)
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.01

#################
#  Noise setup  #
#################
# Instead of sampling model noise from sinusoidal_sample(),
# we will replicate it below by a covariance matrix approach.
# But, for strict equivalence, one would have to use
# uniform (i.e. not Gaussian) random numbers.
wnumQ = 25
sample_filename = modelling.rc.dirs.samples / ('LA_Q_wnum%d.npz' % wnumQ)

try:
    # Load pre-generated
Beispiel #6
0
There is nothing to reproduce from the paper as there are no
statistically converged numbers.
"""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz63 import LPs
from dapper.mods.Lorenz84 import dstep_dx, step, x0

Nx = len(x0)
Ny = Nx

day = 0.05 / 6 * 24  # coz dt=0.05 <--> 6h in "model time scale"
t = modelling.Chronology(0.05, dkObs=1, T=200 * day, BurnIn=10 * day)

Dyn = {
    'M': Nx,
    'model': step,
    'linear': dstep_dx,
    'noise': 0,
}

# X0 = modelling.GaussRV(C=0.01,M=Nx) # Decreased from Pajonk's C=1.
X0 = modelling.GaussRV(C=0.01, mu=x0)

jj = np.arange(Nx)
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.1
Beispiel #7
0
############################
# Time series, model, initial condition
############################

model = model_config("sakov2008", {})
Dyn = {
    'M': np.prod(shape),
    'model': model.step,
    'noise': 0,
}

# Considering that I have 8GB mem on the Mac, and the estimate:
# ≈ (8 bytes/float)*(129² float/stat)*(7 stat/k) * K,
# it should be possible to run experiments of length (K) < 8000.
t = modelling.Chronology(dt=model.prms['dtout'], dko=1, T=1500, BurnIn=250)
# In my opinion the burn in should be 400.
# Sakov also used 10 repetitions.

X0 = modelling.RV(M=Dyn['M'], file=sample_filename)


############################
# Observation settings
############################

# This will look like satellite tracks when plotted in 2D
Ny = 300
jj = modelling.linspace_int(Dyn['M'], Ny)

# Want: random_offset(t1)==random_offset(t2) if t1==t2.
Beispiel #8
0
"""Settings as in `bib.frei2013bridging`.

They also cite its use in the following:

`bib.bengtsson2003toward`, `bib.lei2011moment`, `bib.frei2013mixture`.
"""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz96 import dstep_dx, step
from dapper.tools.localization import nd_Id_localization

t = modelling.Chronology(0.05, dtObs=0.4, T=4**5, BurnIn=20)

Nx = 40
Dyn = {
    'M': Nx,
    'model': step,
    'linear': dstep_dx,
    'noise': 0,
}

X0 = modelling.GaussRV(M=Nx, C=0.001)

jj = 1 + np.arange(0, Nx, 2)
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.5
Obs['localizer'] = nd_Id_localization((Nx, ), (2, ), jj)

HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0)
Beispiel #9
0
"""Settings that produce somewhat interesting/challenging DA problems.

`dt` has been chosen after noting that
using `dt` up to 0.7 does not change the chaotic properties much,
as adjudged with eye-ball and Lyapunov measures.
"""

import dapper.mods as modelling

from dapper.mods.LotkaVolterra import step, dstep_dx, x0, LP_setup, Tplot

t = modelling.Chronology(0.5, dto=10, T=1000, BurnIn=Tplot, Tplot=Tplot)

Nx = len(x0)

Dyn = {
    'M': Nx,
    'model': step,
    'linear': dstep_dx,
    'noise': 0,
}

X0 = modelling.GaussRV(mu=x0, C=0.01**2)

jj = [1, 3]
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.04**2

HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0, LP=LP_setup(jj))

####################
Beispiel #10
0
"""Settings from `bib.pinheiro2019efficient`."""
import dapper.mods as modelling
import dapper.mods.Lorenz96 as model
from dapper.mods.Lorenz96 import LPs, step, x0
from dapper.mods.utils import linspace_int
from dapper.tools.localization import nd_Id_localization

model.Force = 8.17
tseq = modelling.Chronology(0.01, dko=10, K=4000, Tplot=10, BurnIn=10)

Nx = 1000

Dyn = {
    'M': Nx,
    'model': step,
    # It's not clear from the paper whether Q=0.5 or 0.25.
    # But I'm pretty sure it's applied each dto (not dt).
    'noise': 0.25 / tseq.dto,
    # 'noise': 0.5 / t.dto,
}

X0 = modelling.GaussRV(mu=x0(Nx), C=0.001)

jj = linspace_int(Nx, Nx // 4, periodic=True)
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 0.1**2
Obs['localizer'] = nd_Id_localization((Nx, ), (1, ), jj, periodic=True)

HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0)

HMM.liveplotters = LPs(jj)
Beispiel #11
0
"""Reproduce experiments from `bib.counillon2009application`."""

import dapper.mods as modelling
from dapper.mods.QG import model_config
from dapper.mods.QG.sakov2008 import HMM as _HMM

HMM = _HMM.copy()
dt = 1.25 * 10  # 10 steps between obs (also requires dko=1)
HMM.tseq = modelling.Chronology(dt=dt, dko=1, T=1000 * dt, BurnIn=10 * dt)

HMM.Dyn.model = model_config("counillon2009_ens", {
    "dtout": dt,
    'RKH2': 2.0e-11
}).step
truth_model = model_config("counillon2009_truth", {"dtout": dt}).step

####################
# Suggested tuning
####################
# Reproduce Table 1 results.
# - Note that Counillon et al:
#    - Report forecast rmse's (but they are pretty close to analysis rmse anyways).
#    - Use enkf-matlab which has a bug which cause them to report the
#      wrong localization radius (see mods/QG/sakov2008.py).
#      Eg. enkf-matlab radius 15 (resp 25) corresponds to
#      DAPPER radius 10.6 (resp 17.7).

# R = 17.7 # equiv. to R=25 in enkf-matlab
# from dapper.mods.QG.counillon2009 import HMM, truth_model     # rmse.f:
# xps += LETKF(mp=True, N=25,infl=1.15,taper='Gauss',loc_rad=R) # 1.11
# xps += LETKF(mp=True, N=15,infl=1.35,taper='Gauss',loc_rad=R) # 1.2
Beispiel #12
0
"""A land-ocean setup from `bib.anderson2009spatially`."""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz96.sakov2008 import X0, Dyn, LPs, Nx, Tplot
from dapper.tools.localization import localization_setup, pairwise_distances
from dapper.tools.viz import xtrema

tseq = modelling.Chronology(0.05,
                            dto=0.05,
                            Ko=4000,
                            Tplot=Tplot,
                            BurnIn=2000 * 0.05)

# Define obs sites
obs_sites = 0.395 + 0.01 * np.arange(1, 21)
obs_sites *= 40
# Surrounding inds
ii_below = obs_sites.astype(int)
ii_above = ii_below + 1
# Linear-interpolation weights
w_above = obs_sites - ii_below
w_below = 1 - w_above
# Define obs matrix
H = np.zeros((20, 40))
H[np.arange(20), ii_below] = w_below
H[np.arange(20), ii_above] = w_above
# Measure obs-state distances
y2x_dists = pairwise_distances(obs_sites[:, None],
                               np.arange(Nx)[:, None],
Beispiel #13
0
"""A land-ocean setup from `bib.anderson2009spatially`."""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz96.sakov2008 import X0, Dyn, LPs, Nx, Tplot
from dapper.tools.localization import localization_setup, pairwise_distances
from dapper.tools.viz import xtrema

t = modelling.Chronology(0.05,
                         dtObs=0.05,
                         KObs=4000,
                         Tplot=Tplot,
                         BurnIn=2000 * 0.05)

# Define obs sites
obs_sites = 0.395 + 0.01 * np.arange(1, 21)
obs_sites *= 40
# Surrounding inds
ii_below = obs_sites.astype(int)
ii_above = ii_below + 1
# Linear-interpolation weights
w_above = obs_sites - ii_below
w_below = 1 - w_above
# Define obs matrix
H = np.zeros((20, 40))
H[np.arange(20), ii_below] = w_below
H[np.arange(20), ii_above] = w_above
# Measure obs-state distances
y2x_dists = pairwise_distances(obs_sites[:, None],
                               np.arange(Nx)[:, None],
Beispiel #14
0
"""Reproduce experiments from `bib.counillon2009application`."""

import dapper.mods as modelling
from dapper.mods.QG import model_config
from dapper.mods.QG.sakov2008 import HMM as _HMM

HMM = _HMM.copy()
dt = 1.25 * 10  # 10 steps between obs (also requires dkObs=1)
HMM.t = modelling.Chronology(dt=dt, dkObs=1, T=1000 * dt, BurnIn=10 * dt)

HMM.Dyn.model = model_config("counillon2009_ens", {
    "dtout": dt,
    'RKH2': 2.0e-11
}).step
truth_model = model_config("counillon2009_truth", {"dtout": dt}).step

####################
# Suggested tuning
####################
# Reproduce Table 1 results.
# - Note that Counillon et al:
#    - Report forecast rmse's (but they are pretty close to analysis rmse anyways).
#    - Use enkf-matlab which has a bug which cause them to report the
#      wrong localization radius (see mods/QG/sakov2008.py).
#      Eg. enkf-matlab radius 15 (resp 25) corresponds to
#      DAPPER radius 10.6 (resp 17.7).

# R = 17.7 # equiv. to R=25 in enkf-matlab
# from dapper.mods.QG.counillon2009 import HMM, truth_model     # rmse.f:
# xps += LETKF(mp=True, N=25,infl=1.15,taper='Gauss',loc_rad=R) # 1.11
# xps += LETKF(mp=True, N=15,infl=1.35,taper='Gauss',loc_rad=R) # 1.2
Beispiel #15
0
"""The identity model (that does nothing, i.e. sets `output = input`).

This means that the state dynamics are just Brownian motion.

Next to setting the state to a constant, this is the simplest model you can think of.
"""
import dapper.mods as modelling

tseq = modelling.Chronology(1, dko=1, Ko=2000, Tplot=10, BurnIn=0)
M = 4
Obs = {'noise': 2, 'M': M}
Dyn = {'noise': 1, 'M': M}
X0 = modelling.GaussRV(C=1, M=M)

HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0)

#########################
#  Benchmarking script  #
#########################
# import dapper as dpr
# dpr.rc.field_summaries.append("ms")

# # We do not include Climatology and OptInterp because their variance and accuracy
# # are less interesting since they grow with the duration of the experiment.
# import dapper.da_methods as da
# xps = dpr.xpList()
# xps += da.Var3D("eye", xB=2)
# xps += da.ExtKF()
# xps += da.EnKF('Sqrt', N=100)

# save_as = xps.launch(HMM, save_as=False)
Beispiel #16
0
"""From Fig. 1 of `bib.bocquet2010beyond`."""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz96 import step

tseq = modelling.Chronology(0.05, dko=1, T=4**3, BurnIn=20)

Nx = 10
Dyn = {
    'M': Nx,
    'model': step,
    'noise': 0,
}

X0 = modelling.GaussRV(M=Nx, C=0.001)

jj = np.arange(0, Nx, 2)
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 1.5

HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0)

####################
# Suggested tuning
####################
# Why are these benchmarks superior to those in the article?
# We use, in the EnKF,
# - inflation instead of additive noise ?
# - Sqrt      instead of perturbed obs
Beispiel #17
0
"""Settings that produce somewhat interesting/challenging DA problems."""

import dapper.mods as modelling

from dapper.mods.DoublePendulum import step, x0, LP_setup, dstep_dx

t = modelling.Chronology(0.01, dko=100, T=30, BurnIn=10)

Dyn = {
    'M': len(x0),
    'model': step,
    'noise': 0,
    'linear': dstep_dx,
}

X0 = modelling.GaussRV(mu=x0, C=0.01**2)

jj = [0, 2]
Obs = modelling.partial_Id_Obs(len(x0), jj)
Obs['noise'] = 0.1**2

HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0, LP=LP_setup(jj))

####################
# Suggested tuning
####################
# from dapper.mods.DoublePendulum.settings101 import HMM # Expct rmse.a:

# HMM.tseq.dko = anything
# xps += Climatology()                                 # 5
# xps += OptInterp()                                   # 2.5
Beispiel #18
0
"""Settings from `bib.anderson2010non`."""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz63 import Tplot, dstep_dx, step, x0

t = modelling.Chronology(0.01,
                         dkObs=12,
                         KObs=1000,
                         Tplot=Tplot,
                         BurnIn=4 * Tplot)

Nx = len(x0)

Dyn = {
    'M': Nx,
    'model': step,
    'linear': dstep_dx,
    'noise': 0,
}

X0 = modelling.GaussRV(C=2, mu=x0)

Obs = modelling.partial_Id_Obs(Nx, np.arange(Nx))
Obs['noise'] = 8.0

HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0)

####################
# Suggested tuning
Beispiel #19
0
"""Settings that produce somewhat interesting/challenging DA problems."""

import numpy as np
import dapper.mods as modelling

from dapper.mods.Ikeda import step, x0, Tplot, LPs

tseq = modelling.Chronology(1, dko=1, Ko=1000, Tplot=Tplot, BurnIn=4 * Tplot)

Nx = len(x0)

Dyn = {
    'M': Nx,
    'model': step,
    'noise': 0,
}

X0 = modelling.GaussRV(C=.1, mu=x0)

jj = np.arange(Nx)  # obs_inds
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = .1  # modelling.GaussRV(C=CovMat(1*eye(Nx)))

HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0)

HMM.liveplotters = LPs(jj)

####################
# Suggested tuning
####################
Beispiel #20
0
This HMM is used (with small variations) in many DA papers, for example

`bib.bocquet2011ensemble`, `bib.sakov2012iterative`,
`bib.bocquet2015expanding`, `bib.bocquet2013joint`.
"""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz96 import LPs, Tplot, dstep_dx, step, x0
from dapper.tools.localization import nd_Id_localization

# Sakov uses K=300000, BurnIn=1000*0.05
t = modelling.Chronology(0.05,
                         dkObs=1,
                         KObs=1000,
                         Tplot=Tplot,
                         BurnIn=2 * Tplot)

Nx = 40
x0 = x0(Nx)

Dyn = {
    'M': Nx,
    'model': step,
    'linear': dstep_dx,
    'noise': 0,
}

X0 = modelling.GaussRV(mu=x0, C=0.001)
Beispiel #21
0
import numpy as np

import dapper.mods as modelling
from dapper.mods.LorenzUV import model_instance

from ..utils import rel2mods

LUV = model_instance(nU=36, J=10, F=10)
nU = LUV.nU

################
# Full
################

t = modelling.Chronology(dt=0.005, dtObs=0.05, T=4**3, BurnIn=6)

Dyn = {
    'M': LUV.M,
    'model': modelling.with_rk4(LUV.dxdt, autonom=True),
    'noise': 0,
    'linear': LUV.dstep_dx,
}

X0 = modelling.GaussRV(mu=LUV.x0, C=0.01)

R = 1.0
jj = np.arange(nU)
Obs = modelling.partial_Id_Obs(LUV.M, jj)
Obs['noise'] = R
Beispiel #22
0
Refs: `bib.miyoshi2011gaussian`, which was inspired by `bib.lorenz1998optimal`.
"""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz96.sakov2008 import X0, Dyn, LPs, Nx, Tplot
from dapper.tools.localization import nd_Id_localization

# Use small dt to "cope with" ocean sector blow up
# (due to spatially-constant infl)
OneYear = 0.05 * (24 / 6) * 365
t = modelling.Chronology(0.005,
                         dtObs=0.05,
                         T=110 * OneYear,
                         Tplot=Tplot,
                         BurnIn=10 * OneYear)

land_sites = np.arange(Nx // 2)
ocean_sites = np.arange(Nx // 2, Nx)

jj = land_sites
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 1
Obs['localizer'] = nd_Id_localization((Nx, ), (1, ), jj)

HMM = modelling.HiddenMarkovModel(
    Dyn,
    Obs,
    t,
Beispiel #23
0
    and so these absolute rmse values are not so useful
    for quantatative evaluation of DA methods.
    For that purpose, see `dapper.mods.LA.raanes2015` instead.
"""

import numpy as np

import dapper.mods as modelling
from dapper.mods.LA import Fmat, sinusoidal_sample
from dapper.mods.Lorenz96 import LPs

Nx = 1000
Ny = 4
jj = modelling.linspace_int(Nx, Ny)

tseq = modelling.Chronology(dt=1, dkObs=5, T=300, BurnIn=-1, Tplot=100)

# WITHOUT explicit matrix (assumes dt == dx/c):
# step = lambda x,t,dt: np.roll(x,1,axis=x.ndim-1)
# WITH:
Fm = Fmat(Nx, c=-1, dx=1, dt=tseq.dt)


def step(x, t, dt):
    assert dt == tseq.dt
    return x @ Fm.T


Dyn = {
    'M': Nx,
    'model': step,
Beispiel #24
0
#   It is not immediately clear if OOP is then more convenient.
# - There are also some intriguing possibilities relating to namedtuples.
# TODO 4: revise the above text.


# Turn dxdt into `step` such that `x_{k+1} = step(x_k, t, dt)`
step = modelling.with_rk4(dxdt_augmented, autonom=True)


# #### HMM

# Define the sequence of the experiment
# See `modelling.Chronology` for more details.
t = modelling.Chronology(
    dt=0.05,     # Integrational time step
    dkObs=1,     # Steps of duration dt between obs
    KObs=10**3,  # Total number of obs in experiment
    BurnIn=5,    # Omit from averages the period t=0 --> BurnIn
    Tplot=7)     # Default plot length

# Define dynamical model
Dyn = {
    'M': Nx+Np,     # Length of (total/augmented) state vector
    'model': step,  # Actual model
    'noise': 0,     # Additive noise (variance)
    # 'noise': GaussRV(C=.1*np.eye(Nx+Np)),
}

# Define observation model using convenience function partial_Id_Obs
jj = np.arange(Nx)  # obs indices (y = x[jj])
Obs = modelling.partial_Id_Obs(Nx+Np, jj)
Obs['noise'] = 1
Beispiel #25
0
from ..utils import rel2mods

LUV = model_instance()
nU = LUV.nU

# Wilks2005 uses dt=1e-4 with RK4 for the full model,
# and dt=5e-3 with RK2 for the forecast/truncated model.
# As berry2014linear notes, this is possible coz
# "numerical stiffness disappears when fast processes are removed".

################
# Full
################

# tseq = modelling.Chronology(dt=0.001,dto=0.05,T=4**3,BurnIn=6) # allows using rk2
tseq = modelling.Chronology(dt=0.005, dto=0.05, T=4**3,
                            BurnIn=6)  # requires rk4

Dyn = {
    'M': LUV.M,
    'model': modelling.with_rk4(LUV.dxdt, autonom=True),
    'noise': 0,
    'linear': LUV.dstep_dx,
}

X0 = modelling.GaussRV(mu=LUV.x0, C=0.01)

R = 0.1
jj = np.arange(nU)
Obs = modelling.partial_Id_Obs(LUV.M, jj)
Obs['noise'] = R
Beispiel #26
0
"""Settings as in `bib.sakov2008deterministic`.

This HMM is used (with small variations) in many DA papers, for example

`bib.bocquet2011ensemble`, `bib.sakov2012iterative`,
`bib.bocquet2015expanding`, `bib.bocquet2013joint`.
"""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz96 import LPs, Tplot, dstep_dx, step, x0
from dapper.tools.localization import nd_Id_localization

# Sakov uses K=300000, BurnIn=1000*0.05
tseq = modelling.Chronology(0.05, dko=1, Ko=1000, Tplot=Tplot, BurnIn=2*Tplot)

Nx = 40
x0 = x0(Nx)

Dyn = {
    'M': Nx,
    'model': step,
    'linear': dstep_dx,
    'noise': 0,
}

X0 = modelling.GaussRV(mu=x0, C=0.001)

jj = np.arange(Nx)  # obs_inds
Obs = modelling.partial_Id_Obs(Nx, jj)
Beispiel #27
0
"""Concerns figure 4 of `bib.todter2015second`."""

import numpy as np

import dapper.mods as modelling
import dapper.tools.randvars as RVs
from dapper.mods.Lorenz96 import step
from dapper.tools.localization import nd_Id_localization

t = modelling.Chronology(0.05, dko=2, T=4**5, BurnIn=20)

Nx = 80
Dyn = {
    'M': Nx,
    'model': step,
    'noise': 0,
}

X0 = modelling.GaussRV(M=Nx, C=0.001)

jj = np.arange(0, Nx, 2)
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['localizer'] = nd_Id_localization((Nx,), (1,), jj)
# Obs['noise'] = RVs.LaplaceRV(C=1,M=len(jj))
Obs['noise'] = RVs.LaplaceParallelRV(C=1, M=len(jj))

HMM = modelling.HiddenMarkovModel(Dyn, Obs, t, X0)

####################
# Suggested tuning
####################
Beispiel #28
0
"""Settings from `bib.wiljes2016second`."""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz63.sakov2012 import HMM as _HMM
from dapper.mods.Lorenz63.sakov2012 import Nx

HMM = _HMM.copy()
HMM.t = modelling.Chronology(0.01, dkObs=12, T=4**5, BurnIn=4)

jj = np.array([0])
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 8
HMM.Obs = modelling.Operator(**Obs)

####################
# Suggested tuning
####################
# Reproduce benchmarks for NETF and ESRF (here EnKF-N) from left pane of Fig 1.
# from dapper.mods.Lorenz63.wiljes2017 import HMM # rmse.a reported by DAPPER / PAPER:
# ------------------------------------------------------------------------------
# HMM.t.KObs = 10**2
# xps += OptInterp()                                                # 5.4    / N/A
# xps += Var3D(xB=0.3)                                              # 3.0    / N/A
# xps += EnKF_N(N=5)                                                # 2.68   / N/A
# xps += EnKF_N(N=30,rot=True)                                      # 2.52   / 2.5
# xps += LNETF(N=40,rot=True,infl=1.02,Rs=1.0,loc_rad='NA')         # 2.61   / ~2.2
# xps += PartFilt(N=35 ,reg=1.4,NER=0.3)                            # 2.05   / 1.4 *
# *: tuning settings not given
#
Beispiel #29
0
"""Settings from `bib.wiljes2016second`."""

import numpy as np

import dapper.mods as modelling
from dapper.mods.Lorenz63.sakov2012 import HMM as _HMM
from dapper.mods.Lorenz63.sakov2012 import Nx

HMM = _HMM.copy()
HMM.tseq = modelling.Chronology(0.01, dko=12, T=4**5, BurnIn=4)

jj = np.array([0])
Obs = modelling.partial_Id_Obs(Nx, jj)
Obs['noise'] = 8
HMM.Obs = modelling.Operator(**Obs)

####################
# Suggested tuning
####################
# Reproduce benchmarks for NETF and ESRF (here EnKF-N) from left pane of Fig 1.
# from dapper.mods.Lorenz63.wiljes2017 import HMM # rmse.a reported by DAPPER / PAPER:
# ------------------------------------------------------------------------------
# HMM.tseq.Ko = 10**2
# xps += OptInterp()                                                # 5.4    / N/A
# xps += Var3D(xB=0.3)                                              # 3.0    / N/A
# xps += EnKF_N(N=5)                                                # 2.68   / N/A
# xps += EnKF_N(N=30,rot=True)                                      # 2.52   / 2.5
# xps += LNETF(N=40,rot=True,infl=1.02,Rs=1.0,loc_rad='NA')         # 2.61   / ~2.2
# xps += PartFilt(N=35 ,reg=1.4,NER=0.3)                            # 2.05   / 1.4 *
# *: tuning settings not given
#
Beispiel #30
0
"""Harder settings than in `dapper.mods.Lorenz84.pajonk2012`.

This was adjudged by noting that with their settings,
the average val. of `trHK` is 0.013.

Here we increase `dko` to make the DA problem more difficult.
"""

import dapper.mods as modelling
from dapper.mods.Lorenz84.pajonk2012 import HMM as _HMM

HMM = _HMM.copy()
HMM.tseq = modelling.Chronology(0.05, dko=10, T=4**5, BurnIn=20)


####################
# Suggested tuning
####################
# xps += ExtKF(infl=8)
# xps += EnKF ('Sqrt',N=10,infl=1.05)
# xps += EnKF ('Sqrt',N=100,rot=True,infl=1.01)
# xps += EnKF_N (N=4)
# xps += PartFilt(N=100, NER=0.4) # add reg!
# xps += PartFilt(N=1000, NER=0.1) # add reg!