def gaussian(x,u,C): res=(1/sqrt(2*math.pi*abs(Matrix.determinant(C)))) temp=Matrix.transpose(Matrix.minus(x, u)) temp=Matrix.multiply(temp, Matrix.inverse(C)) temp=Matrix.multiply(temp,Matrix.minus(x, u)) try: res*=exp(-0.5*temp[0][0]) except: print(temp[0][0]) print(x) print(u) print(C) return res
def analysis(self,k): # Calculate the empirical mean means=Multi_Dimension_Data_Statictis.get_average(self.data) # Calculate the deviations from the mean deviations=Multi_Dimension_Data_Statictis.get_deviations(self.data) #unused mean_subtracted_data=Matrix.minus(self.data, Matrix.multiply([[1] for _ in range(len(self.data))], Matrix.transpose(means))) # Find the covariance matrix covariance_matrix=Multi_Dimension_Data_Statictis.get_covariance_matrix(mean_subtracted_data) # Find the eigenvectors and eigenvalues of the covariance matrix x= np.mat(covariance_matrix) eigenvalues,eigenvectors=np.linalg.eigh(x) eigenvalues=eigenvalues.tolist() eigenvectors=Matrix.transpose(eigenvectors.tolist()) # Rearrange the eigenvectors and eigenvalues eigenvalue_and_eigenvector=[] for i in range(len(eigenvalues)): eigenvalue_and_eigenvector.append((eigenvalues[i],eigenvectors[i])) eigenvalue_and_eigenvector=sorted(eigenvalue_and_eigenvector, reverse=True) # Choosing k eigenvectors with the largest eigenvalues transform_matrix=[] for i in range(k): transform_matrix.append(eigenvalue_and_eigenvector[i][1]) return Matrix.transpose(Matrix.multiply(transform_matrix,Matrix.transpose(self.data)))
def iteration(self): A=Matrix.multiply(self.data_X, self.weights) E=Matrix.minus(self.g_function(A),self.data_Y) temp=Matrix.multiply_integer(Matrix.multiply(Matrix.transpose(self.data_X), E), self.learning_rate) self.weights=Matrix.minus(self.weights,temp)
def test_minus(self): A=[[1,2,3],[4,5,6]] B=[[6,5,4],[3,2,1]] expected_result=[[-5,-3,-1],[1,3,5]] actual_result=Matrix.minus(A, B) self.assertEqual(expected_result, actual_result)