def ODEMstep(M,x0,u,y,p,Pw,Pz,alpha_x,alpha_th,embed=0,gembed=1,mfts=False): th = M.th; # Fetch state estimates based on current parameter estimates xp,xu,yp,yu = FEFstep(M,x0,u,y,p,alpha_x,embed,mfts); # parameter gradients dydth = - M.H(xu,u[:,1],th) - dotn(M.C(xu,u[:,1],th),M.F(xu,u[:,1],th)); dxdth = - M.F(xu,u[:,1],th) - dotn(M.A(xu,u[:,1],th),M.F(xu,u[:,0],th)); # Update gradient dth = dotn(tp(dxdth),Pw,xu - xp) + dotn(tp(dydth),Pz,y - yp); #Mean-field terms if mfts: dth += dWxdth(M,xu,u[:,1]); # Calculate cost J = qcf(y,yp,Pz,xu,xp,Pw); # Update parameters th = M.th - alpha_th*dth; return J,th,xp,xu,yp,yu;
def FEF(M_t,x0,u_t,y_t,p,alpha,embed=0,mfts=False): # Embed # 0 embedded predictions # 1 embedded derivatives # 2 embedded history N = len(u_t[0,:]); # Fetch functions A = M_t.A; C = M_t.C; f = M_t.f; h = M_t.h; # Initialize storage xu_t = zeros((M_t.nx,N)); xu_t[:,0] = x0; xp_t = zeros((M_t.nx,N)); xp_t[:,0] = x0; yu_t = zeros((M_t.nu,N)); yu_t[:,0] = h(x0, u_t[:,0], M_t.th); yp_t = zeros((M_t.ny,N)); yp_t[:,0] = h(x0, u_t[:,0], M_t.th); # upper-shift matrix Du = shiftmatrix(p=p,embed=embed,nx = int(M_t.nx/p),dt=1e-2); #Fetch precisions Pw = M_t.Q; Pz = M_t.R; # Run filter for k in range(1,N): # Predictions xp_t[:,k] = f(xu_t[:,k-1], u_t[:,k-1], M_t.th); yp_t[:,k] = h(xp_t[:,k], u_t[:,k], M_t.th); xu_t[:,k] = dotn(Du,xu_t[:,k-1]); # Errors ex = xu_t[:,k] - xp_t[:,k]; ey = y_t[:,k] - yp_t[:,k]; # Error gradients dexdx = Du - A(xu_t[:,k-1], u_t[:,k-1], M_t.th); deydx = - C(xu_t[:,k], u_t[:,k], M_t.th); # Free-Energy gradient dFEdx = dotn(tp(dexdx),Pw,ex) + dotn(tp(deydx),Pz,ey); # Mean-field terms if specified if mfts: dFEdx += dWthdx(M_t,xu_t[:,k-1],u_t[:,k-1]); # Upddates xu_t[:,k] = xu_t[:,k] - alpha*dFEdx yu_t[:,k] = h(xu_t[:,k], u_t[:,k], M_t.th); return xu_t,xp_t,yu_t,yp_t;
def SDO(p,u,x0,dt,wc=100): p +=1; N = len(u[0,:]); P = pascal(p+1); A = eye(p,p,1); a = zeros(p) for i in range(0,p): a[p-1-i] = P[-1,i+1]*wc**(i+1); A[-1,:] = -a; B = zeros((p,1)); B[-1,-1] = 1; C = wc**p*eye(p-1,p); D = zeros((p-1,1)); Ad = expm(A*dt); Bd = dotn(inv(A),Ad-eye(p),B); x = zeros((p,N)); y = zeros((p-1,N)); x[0:p-1,0] = x0; y[:,0] = dotn(C,x[:,0]) + dotn(D,u[:,0]); for k in range(1,N): x[:,k] = dotn(Ad,x[:,k-1]) + dotn(Bd,u[:,k-1]); y[:,k] = dotn(C,x[:,k]) + dotn(D,u[:,k]); return y;
def NDO(k,x0,dt): N = len(x0[0,:]); x = zeros((k,N)); x[0,:] = x0[0,:]; for j in range(1,k): x[j,j:N] = x0[:,0:N-j]; P = dotn(powdiag(k,1/dt),pascal(k),powdiag(k,-1)); x = dot(P,x); return x;
def FEFstep(M_t,x0,u_t,y_t,p,alpha,embed=0,mfts=False): # Embed # 0 embedded predictions # 1 embedded derivatives # 2 embedded history # Fetch matrices A = M_t.A(x0, u_t[0], M_t.th); C = M_t.C(x0, u_t[0], M_t.th); # upper-shift matrix if embed == 1 or embed == 0: Du = kron(eye(p,p,1),eye(int(M_t.nx/p))); if embed == 1: Du = expm(Du*M_t.dt); elif embed == 2: Du = kron(eye(p,p,-1),eye(int(M_t.nx/p))); #Fetch precisions Pw = M_t.Q; Pz = M_t.R; # Predictions xp_t = M_t.f(x0, u_t[:,0], M_t.th); yp_t = M_t.h(xp_t, u_t[:,1], M_t.th); xu_t = dotn(Du,x0); # Free-Energy gradient dFEdx = dotn(tp(Du - A),Pw,xu_t - xp_t) + dotn(tp(- C),Pz,y_t - yp_t); # Mean-field terms if specified if mfts: dFEdx += dWthdx(M_t,x0,u_t[:,0]); # Upddates xu_t = xu_t - alpha*dFEdx yu_t = M_t.h(xu_t, u_t[:,1], M_t.th); return xu_t,xp_t,yu_t,yp_t;
def KF(M,x0,u,y,th=[],Q=[],R=[]): #if M.typ != 'ltiss': # raise Exception('KF code for non-linear state-space (EKF) not complete'); if isempty(th): th = M.th; if isempty(Q): Q = M.Q; if isempty(R): R = M.R; A = M.A(zeros(M.nx),zeros(M.nu),th); C = M.C(zeros(M.nx),zeros(M.nu),th); #K,P,S = dare(A,C,Q,R); nx = M.nx; ny = M.ny; N = len(u[0,:]); K = zeros((nx,ny)); P = zeros((nx,nx)); S = zeros((ny,ny)); xp = zeros((nx,N)); xp[:,0] = x0; xu = zeros((nx,N)); xu[:,0] = x0; yp = zeros((ny,N)); yp[:,0] = M.h(xp[:,0],u[:,0],th); yu = zeros((ny,N)); yu[:,0] = M.h(xu[:,0],u[:,0],th); for k in range(1,N): A[:,:] = M.A(xu[:,k-1],u[:,k-1],th); C[:,:] = M.C(xu[:,k-1],u[:,k-1],th); # Predict xp[:,k] = M.f(xu[:,k-1],u[:,k-1],th); yp[:,k] = M.h(xp[:,k ],u[:,k ],th); P[:,:] = dotn(A,P,tp(A)) + M.Q; # Update S[:,:] = dotn(C,P,tp(C)) + M.R; K[:,:] = dotn(P,tp(C),inv(S)); xu[:,k] = xp[:,k] + dotn(K,y[:,k] - yp[:,k]); yu[:,k] = M.h(xu[:,k ],u[:,k ],th); P[:,:] = dotn(eye(M.nx)-dotn(K,C),P); return K,P,S,xp,xu,yp,yu;
def dare(A,C,Q,R,tol=1e-6,maxit=1000,convthresh=200): nx = len(A[:,0]); ny = len(C[:,0]); P = zeros((nx,nx)); S = zeros((ny,ny)); K = zeros((nx,ny)); e1 = 1000; J = 1000; e2 = 0; i = 0; j = 0; while i < maxit and j < convthresh: S = dotn(C,P,tp(C)) + R; P = dotn(A,P,tp(A)) + Q; K = dotn(P,tp(C),inv(S)); P = dotn(eye(2)-dotn(K,C),P); # Stopping criterion: J > tol in last convthresh iterations J = P - dotn(A,P,tp(A)) + Q; e2 = e1; e1 = norm(K,2) + norm(P,2) + norm(S,2); J = abs(e2-e1); if j == 0: if J < tol: j = 1; else: if J < tol: j +=1; else: j = 0; i +=1; return K,P,S;
def KFstep(M,xu,u,y,P): th = M.th; A = M.A(xu,u[:,0],th); C = M.C(xu,u[:,0],th); # Predict xp = M.f(xu,u[:,0],th); yp = M.h(xp,u[:,1],th); P = dotn(A,P,tp(A)) + M.Q; # Update S = dotn(C,P,tp(C)) + M.R; K = dotn(P,tp(C),inv(S)); xu = xp + dotn(K,y - yp); P = dotn(eye(M.nx)-dotn(K,C),P); yu = M.h(xu,u[:,1],th); return K,P,S,xp,xu,yp,yu;
def ADEMstep(M,x0,u,y,p,Pw,Pz,alpha_x,alpha_th,embed=0,gembed=1,mfts=False,x='def'): N = len(u[0,:]); th = M.th; # Fetch state estimates based on current parameter estimates if x=='def': xp,xu,yp,yu = FEF(M,x0,u,y,p,alpha_x,embed); else: D = shiftmatrix(p=p,embed=embed,nx = int(M.nx/p),dt=M.dt); xu = zeros((M.nx,N)); xp = zeros((M.nx,N)); yu = zeros((M.ny,N)); yp = zeros((M.ny,N)); for k in range(1,N): xu[:,k] = dotn(D,x[:,k-1]); xp[:,k] = M.f( x[:,k-1],u[:,k-1],th); yu[:,k] = M.h( x[:,k ],u[:,k ],th); yp[:,k] = M.h(xp[:,k ],u[:,k ],th); # Initialize gradients dth = zeros((M.nth,1)); dxdth = zeros((M.nx,M.nth)); dydth = zeros((M.ny,M.nth)); ex = zeros((M.nx,1)); ey = zeros((M.ny,1)); # Pre-calculate all matrices (trade storage for efficiency) F = zeros((M.nx,M.nth,N)); H = zeros((M.ny,M.nth,N)); A = zeros((M.nx,M.nx,N)); B = zeros((M.nx,M.nu,N)); C = zeros((M.ny,M.nx,N)); D = zeros((M.ny,M.nu,N)); if x=='def': for k in range(0,N): F[:,:,k] = M.F(xu[:,k],u[:,k],th); H[:,:,k] = M.H(xu[:,k],u[:,k],th); A[:,:,k] = M.A(xu[:,k],u[:,k],th); B[:,:,k] = M.B(xu[:,k],u[:,k],th); C[:,:,k] = M.C(xu[:,k],u[:,k],th); D[:,:,k] = M.D(xu[:,k],u[:,k],th); else: for k in range(0,N): F[:,:,k] = M.F(x[:,k],u[:,k],th); H[:,:,k] = M.H(x[:,k],u[:,k],th); A[:,:,k] = M.A(x[:,k],u[:,k],th); B[:,:,k] = M.B(x[:,k],u[:,k],th); C[:,:,k] = M.C(x[:,k],u[:,k],th); D[:,:,k] = M.D(x[:,k],u[:,k],th); # Calculate FE-to-parameter gradient for k in range(0,N): # Errors ex[:,0] = xu[:,k] - xp[:,k]; ey[:,0] = y[:,k] - yu[:,k]; # To do: Should at some point it turn out that gradient-emnedding is # necessary, then: # # State-to-parameter gradients # dxdth[:,:] = 0; # imax = min(k,gembed); # for i in range(1,imax): # dxdth[:] = F[:,:,k-imax+i] + dotn(A[:,:,k-imax+i],dxdth) # dydth[:,:] = - H[:,:,k] - dotn(C[:,:,k],dxdth); # dxdth[:,:] = - F[:,:,k] - dotn(A[:,:,k],dxdth); # But until then: dydth[:,:] = - H[:,:,k]; #- dotn(C[:,:,k],F[:,:,k-1]); dxdth[:,:] = - F[:,:,k]; #- dotn(A[:,:,k],F[:,:,k-1]); # update gradient dth[:,:] += dotn(tp(dxdth),Pw,ex) + dotn(tp(dydth),Pz,ey); # Calculate cost J = qcf(y,yu,Pz,xu,xp,Pw); # Update parameters th = M.th - alpha_th*dth[:,0]; return J,th,xp,xu,yp,yu;
def AEMstep(M,x0,u,y,Q,R,alpha=1,gembed=1): """ # 1-step Algebraical-gradient Expectation-Maximization INPUTS M Data-structure of Model class x0 Initial hidden state - 1-dimensional array_like (list works) u Input sequence nu x N numpy_array y Output sequence ny x N numpy_array Q State-noise covariance - nx x nx numpy_array R Output-noise covariance - ny x ny numpy_array alpha Updating gain - scalar float, int - (opt. def=1) gembed Gradient-embedding order - scalar int - (opt. def=1) OUTPUTS J Cost (log-likelihood) - scalar float K Kalman gain - nx x ny numpy_array P State-error covariance estimate - nx x nx numpy_array S Output-error covariance estimate - nx x nx numpy_array th Updated parameter estimate - 1-dimensional numpy_array xp Predicted hidden state estimates - nx x N numpy_array xu Updated hidden state estimates - nx x N numpy_array xp Predicted output estimates - ny x N numpy_array x """ N = len(u[0,:]); xu = zeros((M.nx,N)); xp = zeros((M.nx,N)); yu = zeros((M.ny,N)); yp = zeros((M.ny,N)); #th = zeros((M.nth,N)); th = M.th; # P = zeros((2,2)); # # for k in range(1,N): # # # Nk = min(k,gembed); # J,K,P,S,th[:,k],xp[:,k],xu[:,k],yp[:,k],yu[:,k] = OEMstep(M,u[:,k-Nk:k+1],\ # xu[:,k-Nk:k+1],xp[:,k-Nk:k+1],y[:,k-Nk:k+1],\ # yp[:,k-Nk:k+1],th[:,k-Nk:k+1],iQ,iR,P,alpha) # # M.th = th[:,k]; # # J = ll(y,yp,iR,xu,xp,iQ); # # return J,K,P,S,th[:,-1],xp,xu,yp,yu; # # Fetch state estimates based on current parameter estimates K,P,S,xp,xu,yp,yu = KF(M,x0,u,y,th); iQ = inv(P); iR = inv(S); # Initialize gradients dth = zeros(M.nth); dxdth = zeros((M.nx,M.nth)); dydth = zeros((M.ny,M.nth)); # Pre-calculate all matrices (trade storage for efficiency) F = zeros((M.nx,M.nth,N)); H = zeros((M.ny,M.nth,N)); A = zeros((M.nx,M.nx,N)); B = zeros((M.nx,M.nu,N)); C = zeros((M.ny,M.nx,N)); D = zeros((M.ny,M.nu,N)); for k in range(0,N): F[:,:,k] = M.F(xu[:,k],u[:,k],th); H[:,:,k] = M.H(xu[:,k],u[:,k],th); A[:,:,k] = M.A(xu[:,k],u[:,k],th); B[:,:,k] = M.B(xu[:,k],u[:,k],th); C[:,:,k] = M.C(xu[:,k],u[:,k],th); D[:,:,k] = M.D(xu[:,k],u[:,k],th); # Calculate FE-to-parameter gradient for k in range(0,N-1): # Errors ex = xu[:,k+1] - xp[:,k+1]; ey = y[:,k ] - yu[:,k ]; # State-to-parameter gradients dxdth[:,:] = 0; kmin = max(0,k-gembed); for i in range(kmin,k): dxdth[:,:] = F[:,:,i] + dotn(A[:,:,i],dxdth); dydth[:,:] = - H[:,:,k] - dotn(C[:,:,k],dxdth); dxdth[:,:] = - F[:,:,k] - dotn(A[:,:,k],dxdth); # update gradient dth[:] += dotn(tp(dxdth),iQ,ex) + dotn(tp(dydth),iR,ey); # Update parameters th[:] = th - alpha*dth; # Calculate cost J = ll(y,yp,iR,xu,xp,iQ); return J,K,P,S,th,xp,xu,yp,yu;
def sgfilter(A,B,C,D,K,x0,u,y): nx = len(A[:,0]); ny = len(C[:,0]); N = len(u[0,:]); xp = zeros((nx,N)); xp[:,0] = x0; xu = zeros((nx,N)); xu[:,0] = x0; yp = zeros((ny,N)); yp[:,0] = dotn(C,xp[:,0]) + dotn(D,u[:,0]); yu = zeros((ny,N)); yu[:,0] = dotn(C,xu[:,0]) + dotn(D,u[:,0]); for k in range(1,N): # Predict xp[:,k] = dotn(A,xu[:,k-1]) + dotn(B,u[:,k-1]); yp[:,k] = dotn(C,xp[:,k ]) + dotn(D,u[:,k ]); # Update xu[:,k] = xp[:,k] + dotn(K,y[:,k] - yp[:,k]); yu[:,k] = dotn(C,xp[:,k ]) + dotn(D,u[:,k] ); return xp,xu,yp,yu;
A[1,0] = -k/m; A[1,1] = -d/m; A = eye(2) + dt*A; B = zeros((2,1)); B[1,0] = 1/m; B = dt*B; C = zeros((1,2)); C[0,0] = 1; D = zeros((1,1)); # Get noise signals zeta = zeros((3,2)); zeta[:,1] = std; # Sufficient statistics (mn, std) Q = diag(std[0:2]); Q = dotn(tp(Q),Q); # Covariance on state noise R = diag([std[2]]); R = dotn(tp(R),R); # Covariance on soutput noise t,W = Noise(dt,T,1,1,1,zeta,[],[],seed).getNoise(); w[:,:] = W[0:1,:]; # State noise z[:,:] = W[2,:]; # Output noise # Construct input signal (step at 2 sec.) u[0,int(2/dt+1):] = 10; # Simulate system y[:,0] = dotn(C,x[:,0]) + dotn(D,u[:,0]) + z[:,0]; for k in range(1,N): x[:,k] = dotn(A,x[:,k-1]) + dotn(B,u[:,k-1]) + w[:,k-1]; y[:,k] = dotn(C,x[:,k]) + dotn(D,u[:,k]) + z[:,k]; # General figure settings