示例#1
0
    def costfun(xa):
        xavec = np.mat(xa).T
        Jback = (xavec - x0bvec).T * invB * (xavec - x0bvec)

        #if model=='lor63':
        #    taux,xaaux = lorenz63(xa,o2t*anawin*tstep_truth)
        if model == 'lor96':
            taux, xaaux = lorenz96(o2t * anawin * tstep_truth, xa, N)

        indobs = range(o2t, anawin * o2t + 1, o2t)
        xobs = xaaux[indobs, :]
        xobs = np.mat(xobs).T
        Jobs = np.empty(len(indobs))
        Jobs.fill(np.nan)
        for iJobs in range(len(indobs)):
            Jobs[iJobs] = (y[:,iJobs]-H*(xobs[:,iJobs])).T*invR \
                          *(y[:,iJobs]-H*(xobs[:,iJobs]))
        J = Jback + np.sum(Jobs)
        return J
示例#2
0
def evolvemembers(xold, tstep_truth, o2t):
    """Evolving the members.
 Inputs:  - xold, a [N,M] array of initial conditions for the
            M members and N variables
          - tstep_truth, the time step used in the nature run
          - o2t, frequency of observations in time steps
 Outputs: - xnew, a [o2t+1,N,M] array with the evolved members"""

    t_anal = o2t * tstep_truth
    N, M = np.shape(xold)
    xnew = np.empty((o2t + 1, N, M))
    xnew.fill(np.nan)

    for j in range(M):
        taux, xaux = lorenz96(t_anal, xold[:, j], N)  # [o2t+1,N]
        xnew[:, :, j] = xaux
    del j

    return xnew
示例#3
0
def getBsimple(model,N):
    """A very simple method to obtain the background error covariance.

    Obtained from a long run of a model.

    Inputs:  - model, the name of the model 'lor63' or 'lor96'
             - N, the number of variables
    Outputs: - B, the covariance matrix
             - Bcorr, the correlation matrix"""

#    if model=='lor63':
#        total_steps = 10000
#        tstep = 0.01
#        tmax = tstep*total_steps
#        x0 = np.array([-10,-10,25])
#        t,xt = lorenz63(x0,tmax)
#        samfreq = 16
#        err2 = 2
    if model=='lor96':
        total_steps = 5000
        tstep = 0.025
        tmax = tstep*total_steps
        x0 = None
        t,xt = lorenz96(tmax,x0,N)
        samfreq = 2
        err2 = 2

    # Precreate the matrix
    ind_sample = range(0,total_steps,samfreq)
    x_sample = xt[ind_sample,:]
    Bcorr = np.mat(np.corrcoef(x_sample,rowvar=0))

    B = np.mat(np.cov(x_sample,rowvar=0))
    alpha = err2/np.amax(np.diag(B))
    B = alpha*B

    return B,Bcorr
示例#4
0
from mpl_toolkits.mplot3d import Axes3D

from L96_model import lorenz96
from L96_misc import gen_obs, rmse_spread, createH, getBsimple
from L96_var import var3d, var4d
from L96_plots import plotL96, plotL96obs, plotL96DA_var, plotRMSP, tileplotB


###############################################################################
### 1.a The Nature Run
# Let us perform a 'free' run of the model, which we will consider the truth
# The initial conditions
x0 = None # let it spin from rest (x_n(t=0) = F, forall n )
tmax = 4
Nx = 12
t,xt = lorenz96(tmax,x0,Nx) # Nx>=12
plotL96(t,xt,Nx)

# imperfect initial guess for our DA experiments
forc = 8.0; aux1 = forc*np.ones(Nx); aux2 = range(Nx); 
x0guess = aux1 + ((-1)*np.ones(Nx))**aux2
del aux1, aux2


###############################################################################
### 2. The observations
# Decide what variables to observe
obsgrid = '1010'
H, observed_vars = createH(obsgrid,Nx)
period_obs = 2
var_obs = 2
示例#5
0
def var3d(x0, t, tobs, y, H, B, R, model, N):
    """Data assimilation routine for both Lorenz 1963 & 1996 using 3DVar.
    Inputs:  - x0, the real initial conditions
             - t, time array of the model (should be evenly spaced)
             - tobs, time array of the observations (should be evenly
               spaced with a timestep that is a multiple of the model
               timestep)
             - y, the observations
             - H, observation matrix
             - B, the background error covariance matrix
             - R, the observational error covariance matrix
             - model, a string indicating the name of the model: 'lor63'
               or 'lor96'
             - N, the number of variables
    Outputs: - x_b, the background
             - x_a, the analysis"""

    # General settings
    Nsteps = np.size(t)
    # For the true time
    tstep_truth = t[1] - t[0]
    # For the analysis
    tstep_obs = tobs[1] - tobs[0]
    # The ratio
    o2t = int(tstep_obs / tstep_truth + 0.5)

    # Precreate the arrays for background and analysis
    x_b = np.empty((Nsteps, N))
    x_b.fill(np.nan)
    x_a = np.empty((Nsteps, N))
    x_a.fill(np.nan)

    invB = pinv(B)
    invR = pinv(R)

    # For the original background ensemble let's start close from the truth
    orig_bgd = 'fixed'
    #orig_bgd = 'random'

    if orig_bgd == 'fixed':
        indaux = np.arange(N)
        x0_aux = x0 + (-1)**indaux
    elif orig_bgd == 'random':
        x0_aux = x0 + np.random.randn(N)

    # For the first instant b and a are equal
    x_b[0, :] = x0_aux
    x_a[0, :] = x0_aux

    # The following cycle contains evolution and assimilation
    for j in range(len(tobs) - 1):
        yaux = y[j + 1, :]

        # First compute background; our initial condition is the
        # forecast from the analysis at the previous observational
        # time
        xb0 = x_a[j * o2t, :]
        #if model=='lor63':
        #    taux,xbaux = lorenz63(xb0,o2t*tstep_truth)
        if model == 'lor96':
            taux, xbaux = lorenz96(o2t * tstep_truth, xb0, N)

        x_b[j * o2t + 1:(j + 1) * o2t + 1, :] = xbaux[1:, :]
        x_a[j * o2t + 1:(j + 1) * o2t + 1, :] = xbaux[1:, :]

        xa_aux = one3dvar(xbaux[o2t, :], yaux, H, B, R, invB, invR)
        x_a[(j + 1) * o2t, :] = xa_aux
        print 't =', t[o2t * (j + 1)]

    return x_b, x_a
示例#6
0
def var4d(x0, t, tobs, anawin, y, H, B, R, model, N):
    """Data assimilation routine for both Lorenz 1963 & 1996 using 4DVar.
    Inputs:  - x0, the real initial conditions
             - t, time array of the model (should be evenly spaced)
             - tobs, time array of the observations (should be evenly
               spaced with a timestep that is a multiple of the model
               timestep)
             - anawin, length of the 4D assim window, expressed as
               number of future obs included
             - y, the observations
             - H, observation matrix
             - B, the background error covariance matrix
             - R, the observational error covariance matrix
             - model, a string indicating the name of the model: 'lor63'
               or 'lor96'
             - N, the number of variables
    Outputs: - x_b, the background
             - x_a, the analysis"""

    # General settings
    Nsteps = np.size(t)
    Ntobs = np.size(tobs)
    # For the true time
    tstep_truth = t[1] - t[0]
    # For the analysis
    tstep_obs = tobs[1] - tobs[0]
    # The ratio
    o2t = int(tstep_obs / tstep_truth + 0.5)

    totana = (Ntobs - 1) / anawin

    # Precreate the arrays for background and analysis
    x_b = np.empty((Nsteps, N))
    x_b.fill(np.nan)
    x_a = np.empty((Nsteps, N))
    x_a.fill(np.nan)

    invB = pinv(B)
    invR = pinv(R)

    # For the original background ensemble let's start close from the truth
    orig_bgd = 'fixed'
    #orig_bgd = 'random'

    if orig_bgd == 'fixed':
        indaux = np.arange(N)
        x0_aux = x0 + (-1)**indaux
    elif orig_bgd == 'random':
        x0_aux = x0 + np.random.randn(N)

    # For the first instant b and a are equal
    x_b[0, :] = x0_aux
    x_a[0, :] = x0_aux

    # The following cycle contains evolution and assimilation
    for j in range(totana):
        # Get the observations; these are distributed all over the
        # assimilation window
        yaux = y[anawin * j + 1:anawin * (j + 1) + 1, :]  # [anawin,L]

        # First compute background; our background is the forecast
        # from the analysis
        xb0 = x_a[j * anawin * o2t, :]
        #if model=='lor63':
        #    taux,xbaux = lorenz63(xb0,o2t*anawin*tstep_truth)
        if model == 'lor96':
            taux, xbaux = lorenz96(o2t * anawin * tstep_truth, xb0, N)

        x_b[j * o2t * anawin:(j + 1) * o2t * anawin + 1, :] = xbaux
        xa0 = one4dvar(tstep_truth, o2t, anawin, xb0, yaux, H, B, R, model, N,
                       invB, invR)

        #if model=='lor63':
        #    taux,xaaux = lorenz63(xa0,o2t*anawin*tstep_truth)
        if model == 'lor96':
            taux, xaaux = lorenz96(o2t * anawin * tstep_truth, xa0, N)

        x_a[j * o2t * anawin:(j + 1) * o2t * anawin + 1, :] = xaaux
        print 't =', tobs[anawin * (j + 1)]

    return x_b, x_a