def __init__(self, data, forecast_len=FORECAST): if len(data) == 0: raise ValueError self.data = data self.forecast_len = forecast_len self.Z = [1., 0.] self.Q = 0.0 self.fc = [None]*(len(data)+self.first_forecast_index()+forecast_len) self.p = [None]*len(self.fc) self.e = [None]*len(data) self.var = 10000. first = self.first_forecast_index() self.fc[first] = 2*data[1]-data[0] psi = vec([1]) dfpmin(lambda x: -self.llh(exp(x),first+1), psi) self.p[first] = exp(psi[0]) #exp(psi[0]/10.) self.trend = 0 self.zeta = 0 n = len(self.data) if n > first+1: self.update(first+1) self.p[first] *= self.epsilon for i in range(first+2,n): self.update(i) # Compute forescast beyond the end of the given data # Use equations (2.38) in Durbin-Koopman. Basically, just set v_t = 0 and K_t = 0. for i in range(self.forecast_len): self.fc[n+i] = self.fc[n+i-1] + self.trend self.p[n+i] = self.p[n+i-1] + self.zeta #+ self.epsilon
def __init__ (self, data, forecast_len=FORECAST): self.data = data if len(data) == 0: raise ValueError n = len(data) self.forecast_len = forecast_len self.sigma = 0. self.nu = 0. self.fc = [None]*(n+self.forecast_len) # filtered states self.p = [None]*len(self.fc) # variances of predictions (not filtered states) idx = self.first_forecast_index() self.fc[idx] = self.data[0] psi = vec([1.0]) dfpmin(lambda x: self.llh(exp(x),idx+1), psi) self.var = self.p[idx] = exp(psi[0]) + 1. if n > idx+1: self.update(idx+1) self.p[idx] *= self.sigma for i in range(idx+2,n): self.update(i) self.var = self.p[n-1] # Compute forescast beyond the end of the given data # Use equations (2.38) in Durbin-Koopman. Basically, just set v_t = 0 and K_t = 0. for i in range(self.forecast_len): self.fc[n+i] = self.fc[n-1] self.p[n+i] = self.p[n+i-1] + self.nu
def update(self, k): psi = vec([1./self.scale, .5/self.scale, 1./self.scale]) dfpmin(lambda x,y,z: self.llh(x,y,z,k), psi) eps = LLB.eps [t1,t2,t3] = psi self.Q[0] = t1**2+eps self.Q[1] = (abs(t1)+eps)*t2 self.Q[2] = t3**2+eps+t2**2 # self.Q[0] = exp(self.scale*psi[0]) # self.Q[1] = exp(self.scale*psi[0]/2.)*psi[1] # self.Q[2] = exp(self.scale*psi[2]) + psi[1]**2 data = self.data[:k] a = [self.data[0][0],self.data[0][1]] P = [self.Q[0]+1., self.Q[1], self.Q[2]+1.] K = [0.]*3 Fi = [1.,0.,1.] v = [0.]*2 epsilon = 0. steady = False for i,detF in enumerate(self.next_state(data,1,a,v,P,K,Fi)): epsilon += v[0]*Fi[0]*v[0] + 2*v[0]*Fi[1]*v[1] + v[1]*Fi[2]*v[1] epsilon /= 2.*k self.epsilon = epsilon self.Q = [epsilon*t for t in self.Q] self.P[k] = [(P[0]+1)*epsilon,P[1]*epsilon,(P[2]+1)*epsilon] self.a[k] = [a[0],a[1]]
def update(self,k): psi = vec([1.0]) dfpmin(lambda x: self.llh(exp(x),k+1), psi) q = exp(psi[0]) data = self.data[:k+1] a = data[0] p = 1. + q sigma = 0. for i in range(1,k): [a,p,v,f] = self.update_kalman(data[i],a,p,q) sigma += v*v/f sigma /= (k-1) self.sigma = sigma self.nu = q*sigma self.fc[k] = a self.p[k] = (p+1+q)*sigma
def update(self, k): Z = [1., 0.] Q = 0.0 psi = vec([1.0]) dfpmin(lambda x: -self.llh(exp(x),k), psi) Q = zeta = exp(psi[0]) #exp(psi[0]/10.) a = [2.*self.data[1]-self.data[0],self.data[1]-self.data[0]] P = [5.+2.*zeta,3.+zeta, 3.+zeta,2.+zeta] P2 = [None]*4 F = P[0]+1.0 lF = log(F) K = [(P[0]+P[2])/F, P[2]/F] epsilon = 0. data = self.data[self.first_forecast_index():k] steady = False for y in data: if not steady: [v,F,lF] = self.update_kalman(y,a,P,P2,K,zeta) norm = 0 for i in xrange(4): norm += abs(P2[i]-P[i]) if norm < 0.001: steady = True for i in xrange(4): P[i] = P2[i] else: v = self.steady_update(y,a,K) epsilon += v*v/F self.epsilon = epsilon/len(data) self.zeta = zeta*self.epsilon self.trend = a[1] self.fc[k] = a[0] self.var = self.p[k] = (P[0] + 1)*self.epsilon