def __init__(self): self.A = None self.P = None self.Q = None self.H = None self.R = None self.olrState = OnlineLinearRegression() #state self.olrMeasurement = OnlineLinearRegression() #measurement self.x = None self.z = None
def __init__(self): ''' accept the data in a matrix format ''' self.olrState = OnlineLinearRegression() #state self.olrMeasurement = OnlineLinearRegression() #measurement self.x = None self.z = None self.xhat = None self.yhat = None self.P_yy = None self.P_xy = None self.Phatx = None
def __init__(self): self.A = None self.P = None self.Q = None self.H = None self.K = None self.R = None self.olrState = OnlineLinearRegression() #状态估计 #self.olrMeasurement = OnlineLinearRegression() #测量估计 self.x = None self.z = None
class TimeDifference: """ This implementation is inspired by the description in the papers. Hamilton, J. D. (1994). Time series analysis. Chichester, United Kingdom: Princeton University Press. """ def __init__(self, d=1): self.mLR = OnlineLinearRegression() self.n, self.m, self.d = 0, 0, 0 self.d = d def train(self, X): n, m = X.shape #set parameters self.n, self.m = n, m for indx in range(0, n - self.d): for ind in range(indx, n, self.d): if indx != ind: continue beg = ind end = ind + self.d if end >= n: end = n - 1 if beg == end: continue if end < (n - 1): x, y = X[beg:end, ].flatten().reshape( (1, self.d * m)), X[(end + 1), ] self.mLR.update(x, y) def predict(self, y): yflat = y.flatten().reshape((1, self.m * self.d)) theta = self.mLR.getA() #predict ypred = np.dot(yflat, theta) return ypred
class UnscentedKalmanFilter: """ This is a simplification of the algorithm description paper on the subject. [1] Wan , E. A., & Merwe, R. (2000). The unscented kalman filters for nonlinear estimation. IEEE Adaptive Systems for Signal Processing, Communications, and Control Symposium, pp. 153-158. The paper made use of a feed forward network to predict the next measurement based on the immediate past measurement data. This implementation avoided the use of feed forward network to avoid the second system effects. Instead a recurvise linear regression was used to estimate the parameters. This paper made use of sigma matrix for each auto-coreelation between the present data and the past measurement. The mixing matrix results in a form of matrix factorization which was avoided in this implementation. In our solution, we have use vector as inputs to the recursive linear algorithm to avoid computational cost of matrix factorization. X is state Z is measurement [2] Julier, S. J. The scaled unscented transformation. Retrieved 06/15, 2017, from https://www.cs.unc.edu/~welch/kalman/media/pdf/ACC02-IEEE1357.PDF [3] Galanos, K. Recursive least squares. Retrieved 06/15, 2017, from http://saba.kntu.ac.ir/eecd/people/aliyari/NN%20%20files/rls.pdf """ def __init__(self): ''' accept the data in a matrix format ''' self.olrState = OnlineLinearRegression() #state self.olrMeasurement = OnlineLinearRegression() #measurement self.x = None self.z = None self.xhat = None self.yhat = None self.P_yy = None self.P_xy = None self.Phatx = None def init(self, X, Z): """ perform initial train on a batch to estimate initial parameter. X: states, Z: measurement """ #prepare z and z-1 for measurement for x, z in zip(Z, Z[1:]): xv, zv = x.reshape((1, len(x))), z.reshape((1, len(z))) self.olrMeasurement.update(xv, zv) #prepare state measurement for xk_1, xk in zip(Z, X): x, y = xk_1.reshape((1, len(xk_1))), xk.reshape((1, len(xk))) self.olrState.update(x, y) self.x = np.mean(X, axis=0)[np.newaxis] self.z = np.mean(Z, axis=0)[np.newaxis] self.H = self.olrState.getA() self.F = self.olrMeasurement.getA() def Wm(self, X, k=0, alpha=0.0001): tmplist = [] L = X.shape[1] #dimension of X start, end = 0, L lamda = (alpha * alpha * (L + k)) - L den = (lamda + L) for ind in range(start, end): if ind == start: if den < 0.00001: tmp = 0.00001 else: tmp = lamda / den tmplist.append(tmp) else: if den < 0.00001: tmp = 0.00001 else: tmp = 0.5 / den tmplist.append(tmp) tmparray = np.asarray([tmplist]) indices = [0] * (L) return tmparray[ indices].T #convert a vector to a matrix and perform a transpose def Wc(self, X, k=0, alpha=0.001, beta=2): tmplist = [] L = X.shape[1] #dimension of X start, end = 0, L lamda = (alpha * alpha * (L + k)) - L den = (lamda + L) for ind in range(start, end): if ind == start: tmp = 0 if den < 0.00001: tmp = 0.00001 else: tmp = lamda / den tmp = tmp + (1 - (alpha * alpha) + beta) tmplist.append(tmp) else: if den < 0.00001: tmp = 0.00001 else: tmp = 0.5 / den tmplist.append(tmp) tmparray = np.asarray([tmplist]) indices = [0] * (L) return tmparray[ indices].T #convert a vector to a matrix and perform a transpose def sigmaXMatrix(self, X, k=0, alpha=0.0001): tmplist = [] Xval = None L = X.shape[1] #dimension of X sigma vector start, end = 0, (2 * L) Px = np.dot(X.T, X) lamda = (alpha * alpha * (L + k)) - L meanVec = X.reshape( (1, L)) # for single data point no need to estimate the mean for ind in range(start, end): if ind == start: tmplist.append(meanVec.tolist()[0]) elif ind < (end // 2): tvec = (L + lamda) * Px[ind, ] b = meanVec + np.power(tvec, 0.5) tmplist.append(b.tolist()[0]) else: tvec = (L + lamda) * Px[(ind - L), ] b = meanVec - np.power(tvec, 0.5) tmplist.append(b.tolist()[0]) return np.asarray(tmplist).T def update(self): ''' update the parameter of the kalman filter ''' #update prior zPos = np.dot(self.z, self.F) X = self.sigmaXMatrix(zPos) # sigma matrix X = np.mat(X) wm = np.mat(self.Wm(X)) wc = np.mat(self.Wc(X)) xhat = X * wm tmm = (X - xhat) Phatx = wc * tmm.T * tmm yPos = np.dot(self.z, self.H) yX = self.sigmaXMatrix(yPos) # sigma matrix wm_y = np.mat(self.Wm(yX)) wc_y = np.mat(self.Wc(yX)) Y = np.mat(yX) yhat = Y * wm_y tmm = (Y - yhat) P_yy = wm_y * tmm.T * tmm tmmy = (Y - yhat) tmmx = (X - xhat) H = np.mat(self.H) P_xy = wc_y * tmmy.T * H.T * tmmx self.xhat = xhat self.yhat = yhat self.P_yy = P_yy self.P_xy = P_xy self.Phatx = Phatx def predict(self, x, z): """ predict next state based on past state and measurement """ #make the inverse of the matrix stable K = np.linalg.pinv(np.nan_to_num(self.P_yy)) * self.P_xy #kalman gain H = np.mat(self.H) nextstate = z * H error = self.sigmaXMatrix(x) - self.sigmaXMatrix(self.x) self.z = error * K * self.xhat.T self.z = np.mean(self.z, axis=0)[np.newaxis] self.Phatx = self.Phatx - (K.T * self.P_yy * K) self.olrMeasurement.update(self.z, x) #( Z, Z[1:] ) self.olrState.update(self.z, x) # ( Z, X ) self.H = self.olrState.getA() self.F = self.olrMeasurement.getA() return nextstate
def __init__( self, p=1, q=1 ): self.mLR = OnlineLinearRegression() self.window = None # store some data point in memory #set parameters self.p, self.q = p, q
class arma: """ This implementation is inspired by the description in the papers. This is the arma proccess. Hamilton, J. D. (1994). Time series analysis. Chichester, United Kingdom: Princeton University Press. """ def __init__( self, p=1, q=1 ): self.mLR = OnlineLinearRegression() self.window = None # store some data point in memory #set parameters self.p, self.q = p, q def init( self, X): n, m = X.shape #white noise mean, std = 0, 1 self.error = np.random.normal(mean, std, size=self.q).reshape((1, self.q)) mVal = max(self.p , self.q) if mVal + 1 > n : raise AssertionError("faulty matrix dimension as the error") y = X[-1].reshape((1, m)) #target prediction ind = n - 1 pvec = X[ind-self.p: ind] qvec = self.error tvec = pvec.tolist()[0] + qvec.tolist()[0] x = np.array ( tvec ).reshape((1, len(tvec))) self.mLR.update( x, y ) self.window = X def update ( self, x ): """ x: is a row vector """ #update the error theta = self.mLR.getA( ) window = self.window n, m = window.shape mbias = np.ones((n, self.q)) window = np.column_stack(( window, mbias)) ypred = np.dot ( window, theta ) error = self.window - ypred error = LA.norm(error, axis=1) ind = n err = error[ind-self.q: ind] self.error = np.array ( err ).reshape((1, self.q)) #add element at back X = np.vstack(( self.window, x )) #remove element from front X = np.delete(X, (0), axis=0) self.window = X def predict ( self ): X = np.array (self.window ) n, m = X.shape y = X[-1].reshape((1, m)) #target prediction ind = n - 1 pvec = X[ind-self.p: ind] qvec = self.error tvec = pvec.tolist()[0] + qvec.tolist()[0] yflat = np.array ( tvec ).reshape((1, len(tvec))) theta = self.mLR.getA( ) #predict ypred = np.dot ( yflat, theta ) return ypred
class DiscreteKalmanFilter: """ This is based on the description in the paper Welch, G., & Bishop, G. An introduction to the kalman filter. Retrieved 06/15, 2017, from https://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf. The equations presented can be described as xk = Axk-a + Buk-1 P = APk-1A.T + Q all the variable names match the meaning in the paper. x: state z: measurement Caveat: The algorithm only work with the dimension of X and Z are equal. In practice, we can pad with zero and select the specific column. Q cannot be observed directly and as such cannot be observed but can be randomized as bad choice may still give good result. """ def __init__(self): self.A = None self.P = None self.Q = None self.H = None self.R = None self.olrState = OnlineLinearRegression() #state self.olrMeasurement = OnlineLinearRegression() #measurement self.x = None self.z = None def init(self, X, Z): """ perform initial train on a batch to estimate initial parameter. X: states, Z: measurement """ #prepare xk and xk_1 for state for xk_1, xk in zip(X, X[1:]): x, y = xk_1.reshape((1, len(xk_1))), xk.reshape((1, len(xk_1))) self.olrState.update(x, y) self.A = self.olrState.getA() self.P = self.olrState.getCovarianceMatrix() #prepare z and x for measurement for x, z in zip(X, Z): a, b = x.reshape((1, len(x))), z.reshape((1, len(z))) self.olrMeasurement.update(a, b) self.H = self.olrMeasurement.getA() self.R = self.olrMeasurement.getCovarianceMatrix() n, m = self.A.shape self.Q = np.random.rand(n, m) self.x = np.mean(X, axis=0).reshape((1, X.shape[1])) self.z = np.mean(Z, axis=0).reshape((1, Z.shape[1])) def update(self): ''' update the parameter of the kalman filter ''' #update prior P = np.mat(self.P) R = np.mat(self.R) H = np.mat(self.H) z = np.mat(self.z) x = np.mat(self.x) tempM = (H.T * P * H) + R tmat = np.linalg.pinv(np.nan_to_num(tempM)) #ensure stability K = P * H * tmat self.x = x + ((z - (x * H)) * K.T) I = np.mat(np.eye(len(P))) self.P = (I - (K * H.T)) * P def predict(self, x, z): """ predict next state based on past state and measurement """ self.z = z nextstate = np.dot(x, self.A) #update posterior self.x = nextstate P = np.mat(self.P) Q = np.mat(self.Q) A = np.mat(self.A) self.P = (A * P * A.T) + Q #ADDED DETAILS self.olrState.update(x, nextstate) self.A = self.olrState.getA() self.olrMeasurement.update(x, z) #self.Q = self.olrMeasurement.getCovarianceNoiseMatrix() n, m = self.A.shape self.Q = np.random.rand(n, m) self.H = self.olrMeasurement.getA() return nextstate
######################################################################### ###### Name: Kenneth Emeka Odoh ###### Purpose: An example of using the Recursive Linear Regression ######################################################################### from __future__ import division import pandas as pd import numpy as np import sys import os sys.path.append(os.path.abspath('../ML')) from utils import OnlineLinearRegression if __name__ == "__main__": X = np.random.rand(100, 5) olr = OnlineLinearRegression() y = np.random.rand(1, 5) x = np.random.rand(1, 15) print x.shape, y.shape olr.update(x, y) print "A" print olr.getA() print olr.getA().shape print "B" print olr.getB() print olr.getB().shape """ y = np.random.rand(1,5)
""" load an existing file """ ispresent = os.path.exists(self.fileNameWithPath) if ispresent: module = cPickle.load(open(self.fileNameWithPath)) #get the model return module raise FileNotFoundError( "Attempting to load a module that was not saved!!!!") if __name__ == "__main__": filename = "linear.pkl" mObject = OnlineLinearRegression() #state fObject = FileManager(filename) fObject.save(mObject) #save a model model = fObject.load() #load a model X = np.random.rand(100, 2) for xk_1, xk in zip(X, X[1:]): x, y = xk_1.reshape((1, len(xk_1))), xk.reshape((1, len(xk_1))) model.update(x, y) print model.getA()
class DiscreteKalmanFilter: def __init__(self): self.A = None self.P = None self.Q = None self.H = None self.K = None self.R = None self.olrState = OnlineLinearRegression() #状态估计 #self.olrMeasurement = OnlineLinearRegression() #测量估计 self.x = None self.z = None def init(self, X, Z): ''' :param X: 状态矩阵 n×m :param Z: 观测矩阵 n×m ''' for xk_1, xk in zip(X, X[1:]): x, y = xk_1.reshape((1, len(xk_1))), xk.reshape((1, len(xk_1))) self.olrState.update(x, y) self.A = self.olrState.getA() #状态传输矩阵 self.P = self.olrState.getCovarianceMatrix() #for x, z in zip(X, Z): # a, b = x.reshape((1, len(x))), z.reshape((1, len(z))) # self.olrMeasurement.update(a, b) self.H = np.mat(np.eye(X.shape[1])) self.R = np.mat(np.zeros(shape=(X.shape[1], X.shape[1]))) n, m = self.A.shape self.Q = np.random.randn(n, m) * 100 ##这里Q是正太分布的 self.x = np.array(X[-1]).reshape((1, X.shape[1])) self.z = np.mean(Z, axis=0).reshape((1, Z.shape[1])) def update(self, z_in): ''' :param z_in: 输入的最新观测 1×m ''' P = np.mat(self.P) #噪声方差 R = np.mat(self.R) #噪声方差 H = np.mat(self.H) z = np.mat(z_in) x = np.mat(self.x) x = np.dot(x, self.A) tempM = (H.T * P * H) + R tmat = np.linalg.pinv(np.nan_to_num(tempM)) #算出广义逆矩阵 K = P * H * tmat #kalman的kalman增益 print(K) self.x = x + ((z - (x * H)) * K.T) I = np.mat(np.eye(len(P))) self.P = (I - (K * H.T)) * P #更新P值 #print(self.x) return np.squeeze(np.array(self.x))[-1] + np.random.randn() * 100 def predict(self, x, z): self.z = z nextstate = np.dot(x, self.A) nextmeasurement = np.dot(nextstate, self.H) self.x = nextstate P = np.mat(self.P) Q = np.mat(self.Q) A = np.mat(self.A) self.P = (A * P * A.T) + Q self.olrState.update(x, nextstate) self.A = self.olrState.getA() #self.olrMeasurement.update(x, z) n, m = self.A.shape self.Q = np.random.rand(n, m) #self.H = self.olrMeasurement.getA( ) return nextstate
def __init__(self, d=1): self.mLR = OnlineLinearRegression() self.n, self.m, self.d = 0, 0, 0 self.d = d
class ExtendedKalmanFilter: """ This implementation is inspired by the description in the papers. [1] Welch, G., & Bishop, G. An introduction to the kalman filter. Retrieved 06/15, 2017, from https://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf. [2] Terejanu, G. A. Extended kalman filter tutorial. Retrieved 06/15, 2017, from https://www.cse.sc.edu/~terejanu/files/tutorialEKF.pdf The equations presented can be described as xk = f (xk-a, wk-1 ) zk = h (xk, vk) x: state z: measurement all the variable names match the meaning in the paper. Q cannot be observed directly and as such cannot be observed but can be randomized as bad choice may still give good result. """ def __init__(self): self.A = None self.P = None self.Q = None self.H = None self.R = None self.W = None self.olrState = OnlineLinearRegression() #state self.olrMeasurement = OnlineLinearRegression() #measurement self.x = None self.z = None def init(self, X, Z): """ perform initial train on a batch to estimate initial parameter. X: states, Z: measurement """ #prepare xk and xk_1 for state for xk_1, xk in zip(X, X[1:]): x, y = xk_1.reshape((1, len(xk_1))), xk.reshape((1, len(xk_1))) self.olrState.update(x, y) self.P = self.olrState.getCovarianceMatrix() self.A = self.olrState.getA() #prepare z and x for measurement for x, z in zip(X, Z): a, b = x.reshape((1, len(x))), z.reshape((1, len(z))) self.olrMeasurement.update(a, b) self.H = self.olrMeasurement.getA() self.R = self.olrMeasurement.getCovarianceMatrix() n, m = self.A.shape self.Q = np.random.rand(n, m) self.x = np.mean(X, axis=0).reshape((1, X.shape[1])) self.z = np.mean(Z, axis=0).reshape((1, Z.shape[1])) def update(self): ''' update the parameter of the kalman filter ''' #update prior v = self.olrMeasurement.getCovarianceNoiseMatrix() w = self.olrState.getCovarianceNoiseMatrix() self.A = differentiate(f, self.A, self.x) P = np.mat(self.P) R = np.mat(self.R) W = np.mat(differentiate(f, self.A, w)) H = np.mat(differentiate(h, self.H, self.x)) V = np.mat(differentiate(h, self.H, v)) z = np.mat(self.z) x = np.mat(self.x) val = int(math.fabs(V.shape[0] - R.shape[0])) or int( math.fabs(V.shape[1] - R.shape[1])) # diffierence in dimension zeros = np.random.rand(1, V.shape[0]) zeros = zeros.reshape((1, V.shape[0])) for ind in range(val): V = np.concatenate((V, zeros.T), axis=1) #pad with zeros tmpM = (H * P * H.T) + (V * R * V.T) tmat = np.linalg.pinv(np.nan_to_num(tmpM)) #ensure stability K = P * H.T * tmat self.x = x + ((z - h(x * H.T)) * K.T) I = np.mat(np.eye(len(P))) self.P = (I - (K * H)) * P self.W = W def predict(self, x, z): """ predict next state based on past state and measurement """ self.z = z nextstate = f(np.dot(x, self.A)) #update posterior self.x = nextstate P = np.mat(self.P) Q = np.mat(self.Q) A = np.mat(self.A) self.P = (A * P * A.T) + self.W * Q * self.W.T #extra details self.olrState.update(x, nextstate) self.A = self.olrState.getA() #self.A = differentiate(f, self.A, x) self.olrMeasurement.update(x, z) n, m = self.A.shape self.Q = np.random.rand(n, m) self.H = self.olrMeasurement.getA() return nextstate