Пример #1
0
 def __correlateBlock(self,w):
     N = len(w[0,:]);
     n = len(w[:,0]);
     wout = zeros((n,N));
     T = Timer("Convolving signal with Block..",0,N)
     for j in range(0,N):
         block = self.__block(self._t,self._t[j],self._pa);
         for i in range(0,n):
             
             wout[i,j] = sum(block*w[i,:]);
         T.looptime(j)
     return wout;
Пример #2
0
def DEM(M,x0,u,y,p,Pw,Pz,alpha_x,alpha_th,embed=0,gembed=1,maxit=1000,numerical=True,mfts=False,tol=1e-6,conit=100):
    
    # Allocate memory
    J  = zeros((    1,       maxit));
    th = zeros((M.nth,       maxit));
    th[:,0] = M.th;
    
    # Set-up loop
    i = 0;
    j = 0;
    T = Timer('Dynamic Expectation Maximization',i,maxit-1,maxit-1);
    
    # Run loop
    while i < maxit-1 and j < conit:
        
        # Update cost, covariance and parameter estimates
        if numerical:
            J[:,i],th[:,i+1] = NDEMstep(M,x0,u,y,p,Pw,Pz,alpha_x,alpha_th,embed,mfts)[0:2];
        else:
            J[:,i],th[:,i+1] = ADEMstep(M,x0,u,y,p,Pw,Pz,alpha_x,alpha_th,embed,gembed,mfts)[0:2]
        M.th = th[:,i+1];
    
        # Stopping criterion: J < <tol> in last <conit> iterations
        if j == 0:
            if abs(J[:,i]-J[:,i-1]) < tol:
                j = 1;
        else:
            if abs(J[:,i]-J[:,i-1]) < tol:
                j +=1;
            else:
                j = 0;
        i +=1;
        T.looptime(i);
    
    # Get final hidden state and output estimations
    xp,xu,yp,yu = FEF(M,x0,u,y,p,alpha_x,embed,mfts);
            
    # If converged before maxit, then fill remainer of matrices with last value
    for j in range(i-1,maxit):
        J[:,j]   = J[:,i-1];
        th[:,j]  = th[:,i];
    
    dat = pedata('DEM',M,J,[],[],[],th,xp,xu,yp,yu);
    return dat;
Пример #3
0
 def __correlateGaussian(self,w):
     N = len(w[0,:]);
     n = len(w[:,0]);
     wout = zeros((n,N));
     tol = 1e-6;
     # find for which coordinate the gaussian < tol
     gaus = self.__gaussian(self._t,0,self._pa);
     k=0
     while gaus[k] > tol:
         k+=1;
     gaus = zeros(2*k);
     T = Timer("Convolving signal with Gaussian..",0,N)
     for j in range(0,N):
         kmin = max([0,j-k]);
         kmax = min([N-1,j+k]);
         if j <= k or j >= N-k:
             gaus = self.__gaussian(self._t[kmin:kmax],self._t[j],self._pa);
         else:
             gaus[:] = self.__gaussian(self._t[kmin:kmax],self._t[j],self._pa);
         for i in range(0,n):
             wout[i,j] = sum(gaus*w[i,kmin:kmax]);
         T.looptime(j)
     return wout;
Пример #4
0
def EM(M,x0,u,y,Q,R,alpha=1,maxit=1000,numerical=True,tol=1e-6,conit=100,gembed=1):
    
    """
    Expectation Maximization
    INPUTS
      M            Data-structure of Model class
      x0           Initial hidden state - 1-dimensional array_like (list works)
      u            Input sequence nu x N numpy_array 
      y            Output sequence ny x N numpy_array 
      Q            State-noise covariance - nx x nx numpy_array 
      R            Output-noise covariance - ny x ny numpy_array 
      alpha        Updating gain - scalar float, int - (opt. def=1)
      maxit        Max. number of iterations - scalar int - (opt. def=1000)
      numerical    Use numerical gradients - boolean - (opt. def=True)
      tol          Error tolerance for stopping cond. - float - (opt. def=True)
      conit        Number of converence iterations. - fint - (opt. def=100)
      gembed       Gradient-embedding order (if algebraical gradient )
                                                  - scalar int - (opt. def=1)
    OUTPUTS
      dat          Data-structure of pedata class containing
    """
    
    # Allocate memory
    J  = zeros((    1,       maxit));
    K  = zeros(( M.nx, M.ny, maxit));
    P  = zeros(( M.nx, M.nx, maxit));
    S  = zeros(( M.ny, M.ny, maxit));
    th = zeros((M.nth,       maxit));
    th[:,0] = M.th;
    
    # Set-up loop
    i = 0;
    j = 0;
    T = Timer('Expectation Maximization',i,maxit-1,maxit-1);
    
    # Run loop
    while i < maxit-1 and j < conit:
        
        # Update cost, covariance and parameter estimates
        if numerical:
            J[:,i],K[:,:,i],P[:,:,i],S[:,:,i],th[:,i+1] \
                    = NEMstep(M,x0,u,y,Q,R,alpha)[0:5];
        else:
            J[:,i],K[:,:,i],P[:,:,i],S[:,:,i],th[:,i+1] \
                    = AEMstep(M,x0,u,y,Q,R,alpha,gembed)[0:5];
        
        M.th = th[:,i+1];
    
        # Stopping criterion: J < <tol> in last <conit> iterations
        if j == 0:
            if abs(J[:,i]-J[:,i-1]) < tol:
                j = 1;
        else:
            if abs(J[:,i]-J[:,i-1]) < tol:
                j +=1;
            else:
                j = 0;
        i +=1;
        T.looptime(i);
    
    # Get final hidden state and output estimations
    xp,xu,yp,yu = M.filt(K[:,:,i],x0,u,y);
    
    # If converged before maxit, then fill remainer of matrices with last value
    for j in range(i-1,maxit):
        J[:,j]   = J[:,i-1]
        K[:,:,j] = K[:,:,i-1]
        P[:,:,j] = P[:,:,i-1]
        S[:,:,j] = S[:,:,i-1]
        th[:,j]  = th[:,i]
    
    dat = pedata('EM',M,J,K,P,S,th,xp,xu,yp,yu);
    return dat;
yu = zeros((M.ny, N))
yp = zeros((M.ny, N))
th = zeros((M.nth, N))
M.th = th0 * th_r
th[:, 0] = M.th

# Get noise and simulate system (generate training data)
W = Noise(dt, T, 1, 1, 1, pd, [], [], seed)
w_tmp = W.getNoise()[1]
w[:, :] = w_tmp[0:2, :]
v[:, :] = w_tmp[2, :]
x, y = S.sim([0, 0], u, w, v)

f = plt.figure(1, figsize=(wfig, hfig))
anim = OPEanimation(f, t, x, y, th_r, leg=['Truth', 'EM'])

# Fetch Timer
Tim = Timer('Animated Online EM', 2, N, N)
# Running the online algorithm
for k in range(2, N):

    Nk = min(k, gembed)
    d,d,P,d,th[:,-1],xp[:,k],xu[:,k],yp[:,k],yu[:,k] = OEMstep(M,u[:,k-Nk:k+1],\
                                      xu[:,k-Nk:k+1],xp[:,k-Nk:k+1],y[:,k-Nk:k+1],\
                                     yp[:,k-Nk:k+1],th[:,k-Nk:k+1],Q,R,P,alpha)
    M.th = th[:, k]

    if k == 2 or k % fps == 0:
        anim.updateAnim(k, xu[:, :k], yu[:, :k], th[:, :k])
    Tim.looptime(k)
Пример #6
0
        xp = zeros((M.nx, N))
        yp = zeros((M.ny, N))

        # Log-likelihood
        K = dare(M.A([0, 0], [0], th), M.C([0, 0], [0], th), Q, R)[0]

        for k in range(1, N):
            xp[:, k] = M.f(x[:, k - 1], u[:, k - 1], th)
            yp[:, k] = M.h(xp[:, k], u[:, k], th)
            xu[:, k] = x[:, k] + dot(K, y[:, k] - yp[:, k])

        # Calculate cost
        Jt[i, j] = -ll(y, yp, R, x, xp, Q)
        Jp[i, j] = -ll(y, yp, R, xu, xp, Q)

        Tim.looptime(i * res + j)

    # Reset current parameter to actual value
    th[i] = th[i] - maxth

    # Normalize for plot scaling
    Jt[i, :] = (Jt[i, :] - min(Jt[i, :])) / (max(Jt[i, :]) - min(Jt[i, :]))
    Jp[i, :] = (Jp[i, :] - min(Jp[i, :])) / (max(Jp[i, :]) - min(Jp[i, :]))

f1 = plt.figure(1, figsize=(wfig, hfig))
plotcost(Jt, maxth, 'black', 2, 1)
plotcost(Jp, maxth, tudcols[0], 2, 1)

#%% Case 1.2: EM with known states, kernel width = 0.1 s

# Case-specific parameters