Example #1
0
def cost_func(B, model, profile, profiler):

    cost = ml.zeros([model.T, 1])
    U = ml.zeros([model.uDim, model.T])
    T = model.T

    for t in range(T - 1):
        U[:, t] = B[0:model.xDim, t + 1] - B[0:model.xDim, t]

        B[:, t + 1] = belief.belief_dynamics(B[:, t], U[:, t], None, model)

        if max(U[:, t]) > 1:
            cost[t] = 1000

        elif abs(B[0, t]) > model.xMax[0]:
            cost[t] = 1000

        elif abs(B[1, t]) > model.xMax[1]:
            cost[t] = 1000

        else:
            null, s = belief.decompose_belief(B[:, t], model)
            cost[t] = model.alpha_belief * ml.trace(
                s * s) + model.alpha_control * ml.sum(U[:, t].T * U[:, t])

    x, s = belief.decompose_belief(B[:, T - 1], model)
    cost[T - 1] = model.alpha_final_belief * ml.trace(s * s)

    return cost, B, U
Example #2
0
def cost_func(B,model,profile,profiler):

    cost = ml.zeros([model.T,1])
    U = ml.zeros([model.uDim,model.T])
    T = model.T

    for t in range(T-1):
        U[:,t] = B[0:model.xDim,t+1]-B[0:model.xDim,t];

        B[:,t+1] = belief.belief_dynamics(B[:,t],U[:,t],None,model);
 
        if max(U[:,t])> 1:
            cost[t] = 1000

        elif abs(B[0,t]) > model.xMax[0]:
            cost[t] = 1000

        elif abs(B[1,t]) > model.xMax[1]:
            cost[t] = 1000
            
        else:  
            null, s = belief.decompose_belief(B[:,t], model)
            cost[t] = model.alpha_belief*ml.trace(s*s)+model.alpha_control*ml.sum(U[:,t].T*U[:,t])

    x, s = belief.decompose_belief(B[:,T-1], model)
    cost[T-1] = model.alpha_final_belief*ml.trace(s*s)



    return cost, B, U
    def calculateQ(self, k):
        baseQ = M(self.Q(k))
        K     = M(self.kf.K[k-1])
        D     = M(self.D[k])
        
        Qest = K * D * K.T
        #print D, K

        f = mt.trace(Qest) / mt.trace(baseQ)
        f = np.power(f, self.exponent)
        f = min(10000000., f)
        Q = baseQ * f
        self.alpha[k] = f
        return Q
Example #4
0
def compute_merit(B, U, model, penalty_coeff):
    xDim = model.xDim
    T = model.T
    merit = 0
    
    for t in xrange(0,T-1):
        x, s = belief.decompose_belief(B[:,t], model)
        merit += model.alpha_belief*ml.trace(s*s)
        merit += model.alpha_control*ml.sum(U[:,t].T*U[:,t])
        merit += penalty_coeff*ml.sum(np.abs(B[:,t+1]-belief.belief_dynamics(B[:,t],U[:,t],None,model)))
    
    x, s = belief.decompose_belief(B[:,T-1], model)
    merit += model.alpha_final_belief*ml.trace(s*s)

    return merit
 def calculateQ(self, k):
   Q = M(self.Q(k))
   R = M(self.kf.R(k))
   H = M(self.kf.H(k))
   D = M(self.D[k-1])
   
   alpha = np.trace(D - R) / np.trace(H * M(self.kf.Pminus[k-1]) * H.T)
   alpha = np.asscalar(alpha)
   if np.isfinite(alpha) and alpha>0:
       alpha = np.power(alpha, self.exponent)
       alpha = max(0.0001, min(alpha, 1000.*mt.trace(R) / mt.trace(Q)))
   else:
       alpha = 0.0001
   Q = Q * alpha
   self.alpha[k] = alpha
   return Q
Example #6
0
def compute_forward_simulated_cost(b, U, model):
 
    T = model.T
    cost = 0
    
    b_t = b

    for t in xrange(0,T-1):
        x_t, s_t = decompose_belief(b_t, model)
        cost += model.alpha_belief*ml.trace(s_t*s_t)
        cost += model.alpha_control*ml.sum(U[:,t].T*U[:,t])
        b_t = belief_dynamics(b_t, U[:,t], None, model)
    
    x_T, s_T = decompose_belief(b_t, model)
    cost += model.alpha_final_belief*ml.trace(s_T*s_T)

    return cost
Example #7
0
def compute_forward_simulated_cost(b, U, model):

    T = model.T
    cost = 0

    b_t = b

    for t in xrange(0, T - 1):
        x_t, s_t = decompose_belief(b_t, model)
        cost += model.alpha_belief * ml.trace(s_t * s_t)
        cost += model.alpha_control * ml.sum(U[:, t].T * U[:, t])
        b_t = belief_dynamics(b_t, U[:, t], None, model)

    x_T, s_T = decompose_belief(b_t, model)
    cost += model.alpha_final_belief * ml.trace(s_T * s_T)

    return cost
Example #8
0
def SPIRIT(A, lamb, energy, k0=1, holdOffTime=0, reorthog=False, evalMetrics="F"):

    A = np.mat(A)

    n = A.shape[1]
    totalTime = A.shape[0]
    Proj = npm.ones((totalTime, n)) * np.nan
    recon = npm.zeros((totalTime, n))

    # initialize w_i to unit vectors
    W = npm.eye(n)
    d = 0.01 * npm.ones((n, 1))
    m = k0  # number of eigencomponents

    relErrors = npm.zeros((totalTime, 1))

    sumYSq = 0.0
    E_t = []
    sumXSq = 0.0
    E_dash_t = []

    res = {}
    k_hist = []
    W_hist = []
    anomalies = []

    # incremental update W
    lastChangeAt = 0

    for t in range(totalTime):

        k_hist.append(m)

        # update W for each y_t
        x = A[t, :].T  # new data as column vector

        for j in range(m):
            W[:, j], d[j], x = updateW(x, W[:, j], d[j], lamb)
            Wj = W[:, j]

        # Grams smit reorthog
        if reorthog == True:
            W[:, :m], R = npm.linalg.qr(W[:, :m])

        # compute low-D projection, reconstruction and relative error
        Y = W[:, :m].T * A[t, :].T  # project to m-dimensional space
        xActual = A[t, :].T  # actual vector of the current time
        xProj = W[:, :m] * Y  # reconstruction of the current time
        Proj[t, :m] = Y.T
        recon[t, :] = xProj.T
        xOrth = xActual - xProj
        relErrors[t] = npm.sum(npm.power(xOrth, 2)) / npm.sum(npm.power(xActual, 2))

        # update energy
        sumYSq = lamb * sumYSq + npm.sum(npm.power(Y, 2))
        E_dash_t.append(sumYSq)
        sumXSq = lamb * sumXSq + npm.sum(npm.power(A[t, :], 2))
        E_t.append(sumXSq)

        # Record RSRE
        if t == 0:
            top = 0.0
            bot = 0.0

        top = top + npm.power(npm.linalg.norm(xActual - xProj), 2)

        bot = bot + npm.power(npm.linalg.norm(xActual), 2)

        new_RSRE = top / bot

        if t == 0:
            RSRE = new_RSRE
        else:
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### Metric EVALUATION ###
        # deviation from truth
        if evalMetrics == "T":

            Qt = W[:, :m]

            if t == 0:
                res["subspace_error"] = npm.zeros((totalTime, 1))
                res["orthog_error"] = npm.zeros((totalTime, 1))
                res["angle_error"] = npm.zeros((totalTime, 1))
                Cov_mat = npm.zeros([n, n])

            # Calculate Covarentce Matrix of data up to time t
            Cov_mat = lamb * Cov_mat + npm.dot(xActual, xActual.T)
            # Get eigenvalues and eigenvectors
            WW, V = npm.linalg.eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = WW.argsort()  # Get sort index
            eig_idx = eig_idx[::-1]  # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:m]]
            # Calculate subspace error
            C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T)
            res["subspace_error"][t, 0] = 10 * np.log10(npm.trace(npm.dot(C.T, C)))  # frobenius norm in dB
            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k)
            eigVal, eigVec = npm.linalg.eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))
            res["angle_error"][t, 0] = angle

            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T, Qt) - npm.eye(m)
            res["orthog_error"][t, 0] = 10 * np.log10(npm.trace(npm.dot(F.T, F)))  # frobenius norm in dB

        # Energy thresholding
        ######################
        # check the lower bound of energy level
        if sumYSq < energy[0] * sumXSq and lastChangeAt < t - holdOffTime and m < n:
            lastChangeAt = t
            m = m + 1
            anomalies.append(t)
        # print 'Increasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq)
        # check the upper bound of energy level
        elif sumYSq > energy[1] * sumXSq and lastChangeAt < t - holdOffTime and m < n and m > 1:
            lastChangeAt = t
            m = m - 1
        # print 'Decreasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq)
        W_hist.append(W[:, :m])
    # set outputs

    # Grams smit reorthog
    if reorthog == True:
        W[:, :m], R = npm.linalg.qr(W[:, :m])

    # Data Stores
    res2 = {
        "hidden": Proj,  # Array for hidden Variables
        "E_t": np.array(E_t),  # total energy of data
        "E_dash_t": np.array(E_dash_t),  # hidden var energy
        "e_ratio": np.array(E_dash_t) / np.array(E_t),  # Energy ratio
        "rel_orth_err": relErrors,  # orthoX error
        "RSRE": RSRE,  # Relative squared Reconstruction error
        "recon": recon,  # reconstructed data
        "r_hist": k_hist,  # history of r values
        "W_hist": W_hist,  # history of Weights
        "anomalies": anomalies,
    }

    res.update(res2)

    return res
Example #9
0
def SPIRIT(streams, energyThresh, lamb, evalMetrics):

    # Make 
    if type(streams) == np.ndarray:
        streams_iter = iter(streams)

    # Max No. Streams
    if streams.ndim == 1:
        streams = np.expand_dims(streams, axis=1)
        num_streams = streams.shape[1]
    else: 
        num_streams = streams.shape[1]

    count_over = 0
    count_under = 0

#===============================================================================
#      Initalise k, w and d, lamb
#===============================================================================

    k = 1 # Hidden Variables, initialise to one 
    
    # Weights
    pc_weights = npm.zeros(num_streams)
    pc_weights[0, 0] = 1
    
    # initialise outputs
    res = {}
    all_weights = []
    k_hist = []
    anomalies = []
    x_dash = npm.zeros((1,num_streams))
    
    Eng = mat([0.00000001, 0.00000001])    
    
    E_xt = 0  # Energy of X at time t
    E_rec_i = mat([0.000000000000001]) # Energy of reconstruction

    Y = npm.zeros(num_streams)
    
    timeSteps = streams.shape[0]
    
#===============================================================================
# Main Loop 
#===============================================================================
    for t in range(1, timeSteps + 1): # t = 1,...,200

        k_hist.append(k)

        x_t_plus_1 = mat(streams_iter.next()) # Read in next signals

        d_i = E_rec_i * t

        # Step 1 - Update Weights 
        pc_weights, y_t_i, error = track_W(x_t_plus_1, 
                                               k, pc_weights, d_i,
                                               num_streams, 
                                               lamb)
        # Record hidden variables
        padding = num_streams - k
        y_bar_t = npm.hstack((y_t_i, mat([nan] * padding)))
        Y = npm.vstack((Y,y_bar_t))
        
        # Record Weights
        all_weights.append(pc_weights)  
        # Record reconstrunted z and RSRE
        x_dash = npm.vstack((x_dash, y_t_i * pc_weights))
               
        # Record RSRE
        if t == 1:
            top = 0.0
            bot = 0.0
            
        top = top + (norm(x_t_plus_1 - x_dash) ** 2 )

        bot = bot + (norm(x_t_plus_1) ** 2)
        
        new_RSRE = top / bot   
                  
        if t == 1:
            RSRE = new_RSRE
        else:                  
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### FOR EVALUATION ###
        #deviation from truth
        if evalMetrics == 'T' :
            
            Qt = pc_weights.T            
            
            if t == 1 :
                res['subspace_error'] = npm.zeros((timeSteps,1))
                res['orthog_error'] = npm.zeros((timeSteps,1))                


                res['angle_error'] = npm.zeros((timeSteps,1))
                Cov_mat = npm.zeros([num_streams,num_streams])
                
            # Calculate Covarentce Matrix of data up to time t   
            Cov_mat = lamb * Cov_mat +  npm.dot(x_t_plus_1,  x_t_plus_1.T)
            # Get eigenvalues and eigenvectors             
            W , V = eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = W.argsort() # Get sort index
            eig_idx = eig_idx[::-1] # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:k]]          
            # Calculate subspace error        
            C = npm.dot(V_k , V_k.T) - npm.dot(Qt , Qt.T)  
            res['subspace_error'][t-1,0] = 10 * np.log10(npm.trace(npm.dot(C.T , C))) #frobenius norm in dB
        
            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k) 
            eigVal, eigVec = eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))        
            res['angle_error'][t-1,0] = angle        
    
            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T , Qt) - npm.eye(k)
            res['orthog_error'][t-1,0] = 10 * np.log10(npm.trace(npm.dot(F.T , F))) #frobenius norm in dB
              

        # Step 2 - Update Energy estimate
        E_xt = ((lamb * (t-1) * E_xt) + norm(x_t_plus_1) ** 2) / t
    
        for i in range(k):
            E_rec_i[0, i] = ((lamb * (t-1) * E_rec_i[0, i]) + (y_t_i[0, i] ** 2)) / t

        # Step 3 - Estimate the retained energy
        E_retained = npm.sum(E_rec_i,1)
    
        # Record Energy  
        Eng_new = npm.hstack((E_xt, E_retained[0,0]))
        Eng = npm.vstack((Eng, Eng_new))
    
        if E_retained < energyThresh[0] * E_xt:
            if k != num_streams:
                k = k + 1       
                # Initalise Ek+1 <-- 0 
                E_rec_i = npm.hstack((E_rec_i, mat([0]))) 
                # Initialise W_i+1
                new_weight_vec = npm.zeros(num_streams)  
                new_weight_vec[0, k-1] = 1
                pc_weights = npm.vstack((pc_weights, new_weight_vec))
                anomalies.append(t -1)
            else:
                count_over += 1
        elif E_retained > energyThresh[1] * E_xt:
            if k > 1 :
                k = k - 1
                # discard w_k and error
                pc_weights = delete(pc_weights, -1, 0)    
                # Discard E_rec_i[k]
                E_rec_i = delete(E_rec_i, -1)
            else:
                count_under += 1
          
          
    # Data Stores
    res2 = {'hidden' :  Y,                        # Array for hidden Variables
           'weights' : all_weights,
           'E_t' : Eng[:,0],                     # total energy of data 
           'E_dash_t' : Eng[:,1],                # hidden var energy
           'e_ratio' : np.divide(Eng[:,1], Eng[:,0]),      # Energy ratio 
           'RSRE' : RSRE,                        # Relative squared Reconstruction error 
           'recon' : x_dash,                     # reconstructed data
           'r_hist' : k_hist, # history of r values 
           'anomalies' : anomalies}  
           
    res.update(res2)
              
    return res, all_weights
Example #10
0
def SPIRIT(A,
           lamb,
           energy,
           k0=1,
           holdOffTime=0,
           reorthog=False,
           evalMetrics='F'):

    A = np.mat(A)

    n = A.shape[1]
    totalTime = A.shape[0]
    Proj = npm.ones((totalTime, n)) * np.nan
    recon = npm.zeros((totalTime, n))

    # initialize w_i to unit vectors
    W = npm.eye(n)
    d = 0.01 * npm.ones((n, 1))
    m = k0  # number of eigencomponents

    relErrors = npm.zeros((totalTime, 1))

    sumYSq = 0.
    E_t = []
    sumXSq = 0.
    E_dash_t = []

    res = {}
    k_hist = []
    W_hist = []
    anomalies = []

    # incremental update W
    lastChangeAt = 0

    for t in range(totalTime):

        k_hist.append(m)

        # update W for each y_t
        x = A[t, :].T  # new data as column vector

        for j in range(m):
            W[:, j], d[j], x = updateW(x, W[:, j], d[j], lamb)
            Wj = W[:, j]

        # Grams smit reorthog
        if reorthog == True:
            W[:, :m], R = npm.linalg.qr(W[:, :m])

        # compute low-D projection, reconstruction and relative error
        Y = W[:, :m].T * A[t, :].T  # project to m-dimensional space
        xActual = A[t, :].T  # actual vector of the current time
        xProj = W[:, :m] * Y  # reconstruction of the current time
        Proj[t, :m] = Y.T
        recon[t, :] = xProj.T
        xOrth = xActual - xProj
        relErrors[t] = npm.sum(npm.power(xOrth, 2)) / npm.sum(
            npm.power(xActual, 2))

        # update energy
        sumYSq = lamb * sumYSq + npm.sum(npm.power(Y, 2))
        E_dash_t.append(sumYSq)
        sumXSq = lamb * sumXSq + npm.sum(npm.power(A[t, :], 2))
        E_t.append(sumXSq)

        # Record RSRE
        if t == 0:
            top = 0.0
            bot = 0.0

        top = top + npm.power(npm.linalg.norm(xActual - xProj), 2)

        bot = bot + npm.power(npm.linalg.norm(xActual), 2)

        new_RSRE = top / bot

        if t == 0:
            RSRE = new_RSRE
        else:
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### Metric EVALUATION ###
        #deviation from truth
        if evalMetrics == 'T':

            Qt = W[:, :m]

            if t == 0:
                res['subspace_error'] = npm.zeros((totalTime, 1))
                res['orthog_error'] = npm.zeros((totalTime, 1))
                res['angle_error'] = npm.zeros((totalTime, 1))
                Cov_mat = npm.zeros([n, n])

            # Calculate Covarentce Matrix of data up to time t
            Cov_mat = lamb * Cov_mat + npm.dot(xActual, xActual.T)
            # Get eigenvalues and eigenvectors
            WW, V = npm.linalg.eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = WW.argsort()  # Get sort index
            eig_idx = eig_idx[::-1]  # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:m]]
            # Calculate subspace error
            C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T)
            res['subspace_error'][t, 0] = 10 * np.log10(
                npm.trace(npm.dot(C.T, C)))  #frobenius norm in dB
            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k)
            eigVal, eigVec = npm.linalg.eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))
            res['angle_error'][t, 0] = angle

            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T, Qt) - npm.eye(m)
            res['orthog_error'][t, 0] = 10 * np.log10(
                npm.trace(npm.dot(F.T, F)))  #frobenius norm in dB

        # Energy thresholding
        ######################
        # check the lower bound of energy level
        if sumYSq < energy[
                0] * sumXSq and lastChangeAt < t - holdOffTime and m < n:
            lastChangeAt = t
            m = m + 1
            anomalies.append(t)
        # print 'Increasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq)
        # check the upper bound of energy level
        elif sumYSq > energy[
                1] * sumXSq and lastChangeAt < t - holdOffTime and m < n and m > 1:
            lastChangeAt = t
            m = m - 1
        # print 'Decreasing m to %d at time %d (ratio %6.2f)\n' % (m, t, 100 * sumYSq/sumXSq)
        W_hist.append(W[:, :m])
    # set outputs

    # Grams smit reorthog
    if reorthog == True:
        W[:, :m], R = npm.linalg.qr(W[:, :m])

    # Data Stores
    res2 = {
        'hidden': Proj,  # Array for hidden Variables
        'E_t': np.array(E_t),  # total energy of data 
        'E_dash_t': np.array(E_dash_t),  # hidden var energy
        'e_ratio': np.array(E_dash_t) / np.array(E_t),  # Energy ratio 
        'rel_orth_err': relErrors,  # orthoX error
        'RSRE': RSRE,  # Relative squared Reconstruction error 
        'recon': recon,  # reconstructed data
        'r_hist': k_hist,  # history of r values 
        'W_hist': W_hist,  # history of Weights
        'anomalies': anomalies
    }

    res.update(res2)

    return res
Example #11
0
def SPIRIT(streams, energyThresh, lamb, evalMetrics):

    # Make
    if type(streams) == np.ndarray:
        streams_iter = iter(streams)

    # Max No. Streams
    if streams.ndim == 1:
        streams = np.expand_dims(streams, axis=1)
        num_streams = streams.shape[1]
    else:
        num_streams = streams.shape[1]

    count_over = 0
    count_under = 0

    #===============================================================================
    #      Initalise k, w and d, lamb
    #===============================================================================

    k = 1  # Hidden Variables, initialise to one

    # Weights
    pc_weights = npm.zeros(num_streams)
    pc_weights[0, 0] = 1

    # initialise outputs
    res = {}
    all_weights = []
    k_hist = []
    anomalies = []
    x_dash = npm.zeros((1, num_streams))

    Eng = mat([0.00000001, 0.00000001])

    E_xt = 0  # Energy of X at time t
    E_rec_i = mat([0.000000000000001])  # Energy of reconstruction

    Y = npm.zeros(num_streams)

    timeSteps = streams.shape[0]

    #===============================================================================
    # Main Loop
    #===============================================================================
    for t in range(1, timeSteps + 1):  # t = 1,...,200

        k_hist.append(k)

        x_t_plus_1 = mat(streams_iter.next())  # Read in next signals

        d_i = E_rec_i * t

        # Step 1 - Update Weights
        pc_weights, y_t_i, error = track_W(x_t_plus_1, k, pc_weights, d_i,
                                           num_streams, lamb)
        # Record hidden variables
        padding = num_streams - k
        y_bar_t = npm.hstack((y_t_i, mat([nan] * padding)))
        Y = npm.vstack((Y, y_bar_t))

        # Record Weights
        all_weights.append(pc_weights)
        # Record reconstrunted z and RSRE
        x_dash = npm.vstack((x_dash, y_t_i * pc_weights))

        # Record RSRE
        if t == 1:
            top = 0.0
            bot = 0.0

        top = top + (norm(x_t_plus_1 - x_dash)**2)

        bot = bot + (norm(x_t_plus_1)**2)

        new_RSRE = top / bot

        if t == 1:
            RSRE = new_RSRE
        else:
            RSRE = npm.vstack((RSRE, new_RSRE))

        ### FOR EVALUATION ###
        #deviation from truth
        if evalMetrics == 'T':

            Qt = pc_weights.T

            if t == 1:
                res['subspace_error'] = npm.zeros((timeSteps, 1))
                res['orthog_error'] = npm.zeros((timeSteps, 1))

                res['angle_error'] = npm.zeros((timeSteps, 1))
                Cov_mat = npm.zeros([num_streams, num_streams])

            # Calculate Covarentce Matrix of data up to time t
            Cov_mat = lamb * Cov_mat + npm.dot(x_t_plus_1, x_t_plus_1.T)
            # Get eigenvalues and eigenvectors
            W, V = eig(Cov_mat)
            # Use this to sort eigenVectors in according to deccending eigenvalue
            eig_idx = W.argsort()  # Get sort index
            eig_idx = eig_idx[::-1]  # Reverse order (default is accending)
            # v_r = highest r eigen vectors (accoring to thier eigenvalue if sorted).
            V_k = V[:, eig_idx[:k]]
            # Calculate subspace error
            C = npm.dot(V_k, V_k.T) - npm.dot(Qt, Qt.T)
            res['subspace_error'][t - 1, 0] = 10 * np.log10(
                npm.trace(npm.dot(C.T, C)))  #frobenius norm in dB

            # Calculate angle between projection matrixes
            D = npm.dot(npm.dot(npm.dot(V_k.T, Qt), Qt.T), V_k)
            eigVal, eigVec = eig(D)
            angle = npm.arccos(np.sqrt(max(eigVal)))
            res['angle_error'][t - 1, 0] = angle

            # Calculate deviation from orthonormality
            F = npm.dot(Qt.T, Qt) - npm.eye(k)
            res['orthog_error'][t - 1, 0] = 10 * np.log10(
                npm.trace(npm.dot(F.T, F)))  #frobenius norm in dB

        # Step 2 - Update Energy estimate
        E_xt = ((lamb * (t - 1) * E_xt) + norm(x_t_plus_1)**2) / t

        for i in range(k):
            E_rec_i[0, i] = ((lamb * (t - 1) * E_rec_i[0, i]) +
                             (y_t_i[0, i]**2)) / t

        # Step 3 - Estimate the retained energy
        E_retained = npm.sum(E_rec_i, 1)

        # Record Energy
        Eng_new = npm.hstack((E_xt, E_retained[0, 0]))
        Eng = npm.vstack((Eng, Eng_new))

        if E_retained < energyThresh[0] * E_xt:
            if k != num_streams:
                k = k + 1
                # Initalise Ek+1 <-- 0
                E_rec_i = npm.hstack((E_rec_i, mat([0])))
                # Initialise W_i+1
                new_weight_vec = npm.zeros(num_streams)
                new_weight_vec[0, k - 1] = 1
                pc_weights = npm.vstack((pc_weights, new_weight_vec))
                anomalies.append(t - 1)
            else:
                count_over += 1
        elif E_retained > energyThresh[1] * E_xt:
            if k > 1:
                k = k - 1
                # discard w_k and error
                pc_weights = delete(pc_weights, -1, 0)
                # Discard E_rec_i[k]
                E_rec_i = delete(E_rec_i, -1)
            else:
                count_under += 1

    # Data Stores
    res2 = {
        'hidden': Y,  # Array for hidden Variables
        'weights': all_weights,
        'E_t': Eng[:, 0],  # total energy of data 
        'E_dash_t': Eng[:, 1],  # hidden var energy
        'e_ratio': np.divide(Eng[:, 1], Eng[:, 0]),  # Energy ratio 
        'RSRE': RSRE,  # Relative squared Reconstruction error 
        'recon': x_dash,  # reconstructed data
        'r_hist': k_hist,  # history of r values 
        'anomalies': anomalies
    }

    res.update(res2)

    return res, all_weights