def cp(self,X,R): I = self.shape(X) r = min([I[0]*I[1], I[1]*I[2], I[0]*I[2]]) assert R<r, "Reduce R. Its too large comapred to the dimension of the data" # Initialization of factor matrices A = [np.random.random((I[k],R)) for k in range(3)] #print("Shape of A: ", A[0].shape, A[1].shape, A[2].shape) # Unfold X along every mode Xn = [self.unfold(X,k+1) for k in range(3)] # Alternative way to initialize A is to take first R left singular vectors from unfolded tensors #for k in range(3): #U,S,V = np.linalg.svd(Xn[k]) #A[k] = U[:,0:R] # Iterative step of ALS for reps in range(10000): tmp = A[:] for k in range(3):# iterate for each mode idx = [0,1,2] idx.remove(k) #print("Shape 0 and 1: ",A[idx[0]].shape, A[idx[1]].shape ) Z = self.khatri_rao([A[idx[1]],A[idx[0]]]) W = self.hadamard(mat_mul([A[idx[0]].T,A[idx[0]]]),mat_mul([A[idx[1]].T,A[idx[1]]])) Winv = np.linalg.pinv(W,rcond=0.001) A[k] = mat_mul([Xn[k],Z,Winv]) if (reps+1)%100==0: err_ = sum([np.linalg.norm(A[p]-tmp[p]) for p in range(3)]) if err_<0.001: print("The algorithm has converged. Number of iterations in ALS: ",reps) break else: tmp = A[:] return A # Return A = [[a1,a2,..,aR], [b1,b2,...,bR],[c1,c2,...,cR]]
def cov_regression(self,X_Data,Y_Data, alpha, R_X, R_Y): # alpha is between 0 and 1. alpha is approximately (input fitting)/(output correlation) coefficient # R_X and R_Y are dimensions of tucker factors for X_Data and Y_Data respectively I_X = self.shape(X_Data) I_Y = self.shape(Y_Data) assert I_X[2]==I_Y[2] and R_X[2]==R_Y[2], "The number of samples must equal the dimension in the third mode" C = np.random.random((I_X[2],R_X[2])) # C_X = C_Y C_old = np.copy(C) for reps in range(1000): (Ax,Gx_)= self.partial_tucker3(X_Data,R_X, C, mode=3) (Ay,Gy_)= self.partial_tucker3(Y_Data,R_Y, C, mode=3) Gx = self.unfold(Gx_,3) Gy = self.unfold(Gy_,3) X_flat = self.unfold(X_Data,3) Y_flat = self.unfold(Y_Data,3) Px = mat_mul([Gx,self.kron([Ax[1].T,Ax[0].T])]) Py = mat_mul([Gy,self.kron([Ay[1].T,Ay[0].T])]) X_pinv = np.linalg.pinv(X_flat,rcond=0.001) Z = np.sqrt(alpha)*mat_mul([X_flat,Px.T])+np.sqrt(1-alpha)*mat_mul([Y_flat,Py.T]) #Alternatively, U = np.sqrt(alpha)*mat_mul([Px,Px.T])+np.sqrt(1-alpha)*mat_mul([Py,Py.T]) U = np.sqrt(alpha)*mat_mul([Gx,Gx.T])+np.sqrt(1-alpha)*mat_mul([Gy,Gy.T]) X_pinv = np.linalg.pinv(X_flat) U_pinv = np.linalg.pinv(U) W = mat_mul([X_pinv,Z,U_pinv]) C = mat_mul([X_flat,W]) if (reps+1)%500==0: if np.linalg.norm(C-C_old)<0.1: print("The algorithm has converged. Number of iterations: ",reps) break X_fit = mat_mul([C,Px]) Error = np.linalg.norm(X_fit-X_flat) print("Error in input data fit: ", Error, "and norm of the input data: ",np.linalg.norm(X_flat) ) return (Ax,Gx,Ay,Gy,W,Py)
def partial_tucker3(self,X,R,A_,mode): assert len(R)==3, "Input list should have exactly three elements" I = self.shape(X) A = [np.random.random((I[k],R[k])) for k in range(3)] #Initialize the factors A[mode-1] = A_ Xn = [self.unfold(X,n+1) for n in range(3)] # Iterate till convergence or max_step max_step = 1000 id_ = [0,1,2] id_.remove(mode-1) for reps in range(max_step): tmp = A[:] for k in id_: # for each mode idx = [0,1,2] idx.remove(k) Z = self.kron([A[idx[1]], A[idx[0]]]) Yk = mat_mul([Xn[k],Z]) U,S,V = np.linalg.svd(Yk) A[k] = (U[:,0:R[k]])# Take the R[k] leading left singular vectors of Yk if (reps+1)%10==0: # Check for convergence err_ = sum([np.linalg.norm(A[p].flatten()-tmp[p].flatten()) for p in range(3)]) if err_<= 0.001: #print("The algorithm has converged. Number of iterations: ",reps) break else: tmp = A[:] At = [A[k].T for k in range(len(A))] G = self.tucker_product(X,At,[1,2,3]) #A = [At[k].T for k in range(3)] return (A,G)
def anormal_mix_gamma(self,X,p_mix,M_mix,Cov_mix): #pdb.set_trace() K = len(p_mix) X_0 = self.unfold(X,1) x = X_0.flatten('F') # verify this Cov = [None]*K for k in range(K): cov_k = Cov_mix[k] cov_k.reverse() Cov[k] = self.kron(cov_k) M = [self.unfold(M_mix[k],1).flatten('F') for k in range(K)] d = max(M[0].shape) v = [-0.5*mat_mul([(x-M[k]).T,np.linalg.inv(Cov[k]), (x-M[k])]) for k in range(K)] ln_w = [None]*K for k in range(K): ln_w[k]=np.log(p_mix[k])+ np.log(0.00001+np.linalg.det(Cov[k]))*(-1/2.0)+ np.log(2*np.pi)*(-d/2.0) #print(np.array(v)) #print(np.array(ln_w)) #print(np.array(ln_w)+np.array(v)) a = np.max(np.array(v)+np.array(ln_w)) # print(a) tmp = [np.exp(v[k]+ln_w[k]-a) for k in range(K)] normalise = sum(tmp) #print("normalise: ", normalise) gamma_ = [tmp[k]/normalise for k in range(K)] if abs(1-sum(gamma_))>0.01: print("gamma is being computed wrong. Instead of 1.0, gamma adds to: ", sum(gamma_)) return gamma_
def anormal_sampling_2D(self,M,Cov): A = [None]*2 #Cov[:] # Compute the matrix-square-root (Cov = A*A') of covariance matrices for j in range(2): A[j],_ = self.mat_root(Cov[j]) Z = np.random.standard_normal(tuple(M.shape)) X_ = M + mat_mul([A[0],Z, A[1].T])#mat_mul([Z, A[1].T])# # x = np.random.multivariate_normal(M.flatten(), np.kron(Cov[1],Cov[0])) # X_ = x.reshape(M.shape) return X_
def anormal_condition(self,M,Cov,Ia,X_a,slice_): # pdb.set_trace() # mode: along which mode unfolding each column is partially known # Ia: Index of the know data along the given mode # X_a: data slice at index Ia # mode = 1,2,3 => incomplete info along each row, columns, pipe respectively Ix = M.shape if slice_ == 1:# incomplete info along each row when unfoded along mode 2 OR slice given along mode 1 mode = 2 Ib = set(range(Ix[2])) Ib = list(Ib - set(Ia)) #index of unknown row elements Cov_mode = Cov[mode-1] # row covariance i.e. covariance along mode 2 unfolding M_b = M[:,:,Ib] M_a = M[:,:,Ia] elif slice_ == 2:# incomplete info along each column when unfolded along mode 1 OR slice given along mode 2 mode = 1 Ib = set(range(Ix[1]))# index of unknown columns Ib = list(Ib - set(Ia)) Cov_mode = Cov[mode-1] # row covariance M_b = M[:,Ib,:] M_a = M[:,Ia,:] elif slice_==3:# incomplete info along each pipe mode = 3 Ib = set(range(Ix[0]))# index of unknown columns Ib = list(Ib - set(Ia)) Cov_mode = Cov[mode-1] # row covariance M_b = M[Ib,:, :] M_a = M[Ia,:,:] else: raise ValueError('Invalid mode passed') Cov_aa = Cov_mode[np.ix_(Ia,Ia)] Cov_ba = Cov_mode[np.ix_(Ib,Ia)] Cov_bb = Cov_mode[np.ix_(Ib,Ib)] Cov_ab = Cov_mode[np.ix_(Ia,Ib)] inv_Cov_aa = np.linalg.inv(Cov_aa) update_coef = mat_mul([Cov_ba,inv_Cov_aa]) Cov_o = Cov[:] Cov_a = Cov[:] Cov_o[mode-1] = Cov_bb - mat_mul([update_coef,Cov_ba.T]) update_M = self.mode_n_product((X_a-M_a),update_coef,mode) M_o = M_b + update_M Cov_a[mode-1] = Cov_aa[:,:] return (M_o,Cov_o, M_a, Cov_a)
def tucker_matrix_product(self,X, M_,n): # M = [M1,M2,M3,..,MN] M = M_[:] Mn = M[n-1] M.pop(n-1) M.reverse() M = [M[k].T for k in range(len(M))] Z = self.kron(M) Xn = self.unfold(X,n) Yn = mat_mul([Mn,Xn, Z]) return Yn # Returns the unfolded tensor along mode n after taking tucker product
def anormal_condition_2D(self,M,Cov,Ia,X_a): Ib = list(set(range(M.shape[1]))- set(Ia)) #index of unknown columns M_b = M[:,Ib] M_a = M[:,Ia] #print(M_a.shape) #X_a = X_a.reshape(3,1) Cov_mode = Cov[1] Cov_aa = Cov_mode[np.ix_(Ia,Ia)] Cov_ba = Cov_mode[np.ix_(Ib,Ia)] Cov_bb = Cov_mode[np.ix_(Ib,Ib)] Cov_ab = Cov_mode[np.ix_(Ia,Ib)] inv_Cov_aa = np.linalg.inv(Cov_aa) update_coef = mat_mul([Cov_ba,inv_Cov_aa]) Cov_o = Cov[:] Cov_a = Cov[:] Cov_o[1] = Cov_bb - mat_mul([update_coef,Cov_ba.T]) #pdb.set_trace() update_M = mat_mul([(X_a-M_a),update_coef.T]) M_o = M_b + update_M Cov_a[1] = Cov_aa[:,:] return (M_o,Cov_o, M_a, Cov_a)
def anormal_hoff3(self, X, coef =0.9, constraint=False): # X = {X1, X2,...,Xn} I = self.shape(X[0]) N = len(X) # Compute the mean: M = 0 for X_ in X: M = M + X_ M = M/N shape_ = list(X[0].shape) shape_.insert(0,N) M_ext = np.empty(shape_) X_ext = np.empty(shape_) # Extended array mean and array for i in range(N): M_ext[i,:,:,:] = M X_ext[i,:,:,:] = X[i] # Residual: E = X_ext - M_ext # X_i ~ M + Z x {Cov1, Cov2, Cov3}, and CovK = Ak*Ak' # Intialize the covariance matrices (mode-1, mode-2,mode-3) # mode-1 covariance = column cov, mode-2 cov= row covariance, mode-3 is pipe cov Cov = [np.random.rand(I[i],I[i]) for i in range(3)] Cov = [mat_mul([Cov[i],Cov[i].T]) for i in range(3)] ar = 3 if constraint==True: # Give auto-regressive structure to the longituduinal covariance matrix corr_ = [coef**j for j in range(I[2])] Cov[2][0,:] = np.array(corr_) ar = 2 for j in range(I[2]-1): el = corr_.pop() corr_.insert(0,coef**(j+1)) Cov[2][j+1,:] = np.array(corr_) # Compute matrix-square-root of the covariances A = [None]*3 A_inv = [None]*3 for j in range(3): (A[j],A_inv[j]) = self.mat_root(Cov[j]) A_inv_ext = A_inv[:] A_inv_ext.append(np.identity(N)) for reps in range(1000): tmp = Cov[:] for k in range(ar): #iterate over each mode idx = list(range(ar)) idx.pop(k) for j in idx: (A[j], A_inv_ext[j]) = self.mat_root(Cov[j]) A_inv_ext[k] = np.identity(I[k]) E_ = self.tucker_product(E,A_inv_ext,[1,2,3,4]) E_k = self.unfold4(E_,k+1) S = mat_mul([E_k,E_k.T]) nk = N*np.prod(I)/I[k] Cov[k] = S/nk if (reps+1)%10==0: err_ = sum([np.linalg.norm(Cov[p]-tmp[p]) for p in range(3)]) print(err_) if err_ < 0.1: print("MLE converged in ", reps, " steps") break else: tmp = Cov[:] return (M,Cov,A)
def anormal_hoff(self, X, coef =0.9,var = 1,pow_=1, constraint=False): # X = {X1, X2,...,Xn} I = self.shape(X[0]) N = len(X) # Compute the mean: M = 0 for X_ in X: M = M + X_ M = M/N shape_ = list(X[0].shape) shape_.insert(0,N) M_ext = np.empty(shape_) X_ext = np.empty(shape_) # Extended array mean and array for i in range(N): M_ext[i,:,:,:] = M X_ext[i,:,:,:] = X[i] # Residual: E = X_ext - M_ext # X_i ~ M + Z x {Cov1, Cov2, Cov3}, and CovK = Ak*Ak' # Intialize the covariance matrices (mode-1, mode-2,mode-3) # mode-1 covariance = column cov, mode-2 cov= row covariance, mode-3 is pipe cov Cov = [np.identity(I[i]) for i in range(3)] ar = 3 if constraint==True: # Give auto-regressive structure to the longituduinal covariance matrix ar = 2 corr_ = [coef**(pow_*j) for j in range(I[2])] Cov[2][0,:] = np.array(corr_) for j in range(I[2]-1): el = corr_.pop() corr_.insert(0,coef**(j+1)) Cov[2][j+1,:] = np.array(corr_) Cov[2] = var*Cov[2] # Compute matrix-square-root of the covariances A = Cov[:] A_inv = Cov[:] A_inv_ext = A_inv[:] A_inv_ext.append(np.identity(N)) eps = 0.01 cov_norm = np.array([np.linalg.norm(Cov[j]) for j in range(3)]) for reps in range(10000): tmp = Cov[:] for k in range(ar): #iterate over each mode if cov_norm.any()<eps: print("One of the covariances is Zero. Terminating MLE at k= ", k) break idx = list(range(ar)) idx.pop(k) for j in idx: (A[j], A_inv_ext[j]) = self.mat_root(Cov[j]) A_inv_ext[k] = np.identity(I[k]) E_ = self.tucker_product(E,A_inv_ext,[1,2,3,4]) E_k = self.unfold4(E_,k+1) S = mat_mul([E_k,E_k.T]) nk = N*np.prod(I)/I[k] Cov[k] = S/nk cov_norm[k] = np.linalg.norm(Cov[k]) err_ = np.linalg.norm(self.kron(Cov)-self.kron(tmp)) print(err_) if err_ < eps or np.linalg.norm(self.kron(Cov))<eps : print("MLE has converged in ", reps, " steps") break else: tmp = Cov[:] A = [self.mat_root(Cov[j])[0] for j in range(3)] return (M,Cov,A)
def anormal2D(self,X,Nb=10, rbf=False): # X = {X1, X2,...,Xn} N = len(X) # Number of data points Nk = N*1 N1 = X[0].shape[0] N2 = X[0].shape[1] # Compute the mean: M = 0 for X_ in X: M = M + X_ M = M/N #print("M:",M.shape) X_n = [Xs-M for Xs in X] # Mean subtracted data # else: # X_n = X[:] # X_i ~ M + Z x {A1, A2}, and CovK = Ak*Ak' # Intialize the covariance matrices (mode-1, mode-2) Cov = [np.identity(N1),np.identity(N2)] # Construct the Basis Function Phi (x = Phi*w) delta = 0.0000001 t_data = np.linspace(0,1,N2) t_basis = np.linspace(0,1,Nb) # Centre of the basis sig = 1.0/Nb ; Phi = np.empty([N2,Nb]) for i in range(Nb): # over t_basis for j in range(N2): # over t_data Phi[j,i] = np.exp(-(t_basis[i]-t_data[j])**2/sig**2) PhiT = np.transpose(Phi) invPhi = np.linalg.pinv(Phi) invPhiT = np.transpose(invPhi) # Assume a covariance matrix for weights w: Uw Uw = np.identity(Nb) Reg = delta*np.identity(N2) # Regularisation Cov[0] = np.random.randn(N1,N1)#np.identity(N1) Cov[1] = mat_mul([Phi, Uw, PhiT]) + Reg if rbf==False: Cov[1] = np.random.randn(N2,N2) #np.identity(N2) eps = 0.00001 cov_norm = np.array([np.linalg.norm(Cov[j]) for j in range(2)]) for reps in range(1000000): tmp = Cov[:] # Compute Uw and U2 given U1 try: Z2 = np.linalg.inv(Cov[0]) except: print("The Cov1 matrix in anormal became singular") Z2 = np.linalg.inv(Cov[0]+eps*np.identity(N1)) sum_ = 0. for j in range(N): X_k = X_n[j] sum_ = sum_ + mat_mul([X_k.T,Z2,X_k]) nk = Nk*N1 Cov2 = sum_/nk Uw = mat_mul([invPhi,Cov2,invPhiT])#- delta* np.identity(Nb) if rbf==True: Cov[1]=mat_mul([Phi,Uw,PhiT])+ eps*np.identity(N2) else: Cov[1] = Cov2+eps*np.identity(N2) #Cov[1] = (1/Cov[1][0,0])*Cov[1] #pdb.set_trace() if cov_norm.any()<eps: print("Atleast one Cov matrix is zero,. Verify the data") break # Compute U1 given Uw/U2 try: Z1 = np.linalg.inv(Cov[1]) except: print("The Cov1 matrix in anormal became singular") Z1 = np.linalg.inv(Cov[1]+eps*np.identity(N1)) sum_ = 0. for j in range(N): X_k = X_n[j] sum_ = sum_ + mat_mul([X_k,Z1,X_k.T]) nk = Nk*N2 Cov[0] = sum_/nk + eps*np.identity(N1) Cov[0] = (1/Cov[0][0,0])*Cov[0] cov_norm = np.array([np.linalg.norm(Cov[j]) for j in range(2)]) err_ = np.linalg.norm(self.kron(Cov)-self.kron(tmp)) print("Error EM: ", err_) #print(np.linalg.det(Cov[0]), np.linalg.det(Cov[1])) if (reps+1)%10==0: if err_ < eps or np.linalg.norm(self.kron(Cov))<eps : print("MLE has converged in ", reps, " steps") # if for_mix_model==False: # print("MLE has converged in ", reps, " steps") if np.linalg.norm(self.kron(Cov))<eps: print("The Cov matrices are close to nonsingular ") break else: tmp = Cov[:] #Cov = [Cov_ for Cov_ in Cov] A = [self.mat_root(Cov[j]) for j in range(2)] return (M,Cov,A, Uw, Phi)
def anormal(self, X, coef=0.9, var=1, pow_ = 1, constraint=False, normalised=False, for_mix_model=False,gamma_k=None,Cov_Old=[]): # X = {X1, X2,...,Xn} I = self.shape(X[0]) N = len(X) Nk = N*1 # Compute the mean: M = 0 t_data = np.linspace(0,1,I[2]) if normalised == False: for X_ in X: M = M + X_ M = M/N #print("M:",M.shape) X_n = [X_-M for X_ in X] # Mean subtracted data else: X_n = X[:] # X_i ~ M + Z x {A1, A2, A3}, and CovK = Ak*Ak' # Intialize the covariance matrices (mode-1, mode-2,mode-3) Cov = [np.identity(I[i]) for i in range(3)] ar = 3 if for_mix_model==True: Nk = np.sum(gamma_k) X_n = [np.sqrt(gamma_k[i])*X[i] for i in range(N)] # Construct the Basis Function Phi (x = Phi*w) Nb = 10 delta = 0.0001 t_basis = np.linspace(0,1,Nb) # Centre of the basis sig = 2.0/Nb ; Phi = np.empty([I[2],Nb]) # print("t_basis: ", t_basis.shape) # print("t_data: ", t_data.shape) # print("Phi_shape: ", Phi.shape) for i in range(Nb): # over t_basis for j in range(I[2]): # over t_data Phi[j,i] = np.exp(-(t_basis[i]-t_data[j])**2/sig**2) PhiT = np.transpose(Phi) invPhi = np.linalg.pinv(Phi) invPhiT = np.transpose(invPhi) # Assume a covariance matrix for weights w: Uw Uw = np.identity(Nb) Reg = delta*np.identity(I[2]) Cov[2] = mat_mul([Phi, Uw, PhiT]) + Reg # # Logitudinal Covariance from data # sum_ = 0. # for j in range(N): # X_k = self.unfold(X_n[j],3) # sum_ = sum_ + mat_mul([X_k,X_k.T]) # n2 = Nk*np.prod(I)/I[2] # Cov_ = sum_/n2 # Cov_d = np.diagonal(sum_/n2) # print(Cov_d) ## Cov_struct = 1.0*Cov[2] det_struct = np.linalg.det(Cov_struct)# corr_[T-1] = CovTime[0,T-1] eps = 0.00001 cov_norm = np.array([np.linalg.norm(Cov[j]) for j in range(3)]) for reps in range(100): tmp = Cov[:] for k in range(3): #iterate over each mode if cov_norm.any()<eps: print("Atleast one Cov matrix is zero,. Verify the data") break idx = list(range(3)) idx.pop(k) #print("before inv norm: ",norm(np.kron(Cov[idx[1]],Cov[idx[0]]))) try: Z = np.linalg.inv(np.kron(Cov[idx[1]],Cov[idx[0]])) except: print("The Z matrix in anormal became singular") Z_arg = np.kron(Cov[idx[1]],Cov[idx[0]]) Z = np.linalg.inv(Z_arg+0.001*np.identity(Z_arg.shape[0])) sum_ = 0. for j in range(N): X_k = self.unfold(X_n[j],k+1) sum_ = sum_ + mat_mul([X_k,Z,X_k.T]) nk = Nk*np.prod(I)/I[k] Cov[k] = sum_/nk if k==2: Uw = mat_mul([invPhi,Cov[2],invPhiT]) Cov[2] = mat_mul([Phi,Uw,PhiT])+Reg cov_norm = np.array([np.linalg.norm(Cov[j]) for j in range(3)]) err_ = np.linalg.norm(self.kron(Cov)-self.kron(tmp)) # print("Error EM: ", err_) if (reps+1)%2==0: if err_ < eps or np.linalg.norm(self.kron(Cov))<eps : if for_mix_model==False: print("MLE has converged in ", reps, " steps") if np.linalg.norm(self.kron(Cov))<eps: print("The Cov matrices are close to nonsingular ") break else: tmp = Cov[:] Cov = [Cov_ for Cov_ in Cov] A = [self.mat_root(Cov[j]) for j in range(3)] return (M,Cov,A)
def anormal_old(self, X, coef=0.9, var=1, pow_ = 1, constraint=False, normalised=False, for_mix_model=False,gamma_k=None,Cov_Old=[]): # X = {X1, X2,...,Xn} I = self.shape(X[0]) N = len(X) Nk = N*1 # Compute the mean: M = 0 if normalised == False: for X_ in X: M = M + X_ M = M/N #print("M:",M.shape) X_n = [X_-M for X_ in X] # Mean subtracted data else: X_n = X[:] # X_i ~ M + Z x {A1, A2, A3}, and CovK = Ak*Ak' # Intialize the covariance matrices (mode-1, mode-2,mode-3) Cov = [np.identity(I[i]) for i in range(3)] ar = 3 if for_mix_model==True: Nk = np.sum(gamma_k) X_n = [np.sqrt(gamma_k[i])*X[i] for i in range(N)] if constraint==True: # Give auto-regressive structure to the longituduinal covariance matrix ar = 2 # # Logitudinal Covariance from data # sum_ = 0. # for j in range(N): # X_k = self.unfold(X_n[j],3) # sum_ = sum_ + mat_mul([X_k,X_k.T]) # n2 = Nk*np.prod(I)/I[2] # Cov_ = sum_/n2 # Cov_d = np.diagonal(sum_/n2) # print(Cov_d) ## # Approximate toeplitz structure from AR(1) T = I[2] #cof = sum(Cov_d)/T corr_ = [coef**(pow_*j)for j in range(T)] #[coef**(pow_*j)for j in range(T)] Cov[2][0,:] = np.array(corr_) #print("Corr_: ",corr_) for j in range(I[2]-1): corr_.pop() corr_.insert(0,0) Cov[2][j+1,:] = np.array(corr_) Cov[2] = Cov[2] + Cov[2].T #print(np.diagonal(Cov[2])) np.fill_diagonal(Cov[2], 1) # print(np.diagonal(Cov[2])) Cov[2] = var*Cov[2] # for k in range(Cov[2].shape[0]): # if k<35 and k >25: # Cov[2][k,k] = 10 # else: # Cov[2][k,k] = 0.01 Cov_struct = 1.0*Cov[2] det_struct = np.linalg.det(Cov_struct)# corr_[T-1] = CovTime[0,T-1] #print("Cov_Struct: \n", Cov_struct) eps = 0.00001 cov_norm = np.array([np.linalg.norm(Cov[j]) for j in range(3)]) for reps in range(100): tmp = Cov[:] for k in range(ar): #iterate over each mode if cov_norm.any()<eps: print("Atleast one Cov matrix is zero,. Verify the data") break idx = list(range(3)) idx.pop(k) #print("before inv norm: ",norm(np.kron(Cov[idx[1]],Cov[idx[0]]))) try: Z = np.linalg.inv(np.kron(Cov[idx[1]],Cov[idx[0]])) except: print("The Z matrix in anormal became singular") Z_arg = np.kron(Cov[idx[1]],Cov[idx[0]]) Z = np.linalg.inv(Z_arg+0.01*np.identity(Z_arg.shape[0])) sum_ = 0. for j in range(N): X_k = self.unfold(X_n[j],k+1) sum_ = sum_ + mat_mul([X_k,Z,X_k.T]) nk = Nk*np.prod(I)/I[k] Cov[k] = sum_/nk cov_norm = np.array([np.linalg.norm(Cov[j]) for j in range(3)]) err_ = np.linalg.norm(self.kron(Cov)-self.kron(tmp)) #print(err_) if (reps+1)%2==0: #print("cov error in anormal: ",err_) #print([np.linalg.norm(Cov[i]) for i in range(3)]) if err_ < eps or np.linalg.norm(self.kron(Cov))<eps : if for_mix_model==False: print("MLE has converged in ", reps, " steps") if np.linalg.norm(self.kron(Cov))<eps: print("The Cov matrices are close to nonsingular ") break else: tmp = Cov[:] Cov = [Cov_ for Cov_ in Cov] A = [self.mat_root(Cov[j]) for j in range(3)] return (M,Cov,A)