def solve_gauss_seidel(A):
        '''
        This function solves a set of linear equations using Gauss Jordan method 
        in which a given augmented matrix A is converted to row echelon form
        '''

        n = len(A)
        #rank = linear_algebra.get_matrix_rank(A)
        #if (rank < n):
        #    raise linear_algebra_error("Rank of augmented matrix: " + str(rank) + " less than number of rows. Unique solution is not possible" )

        # This code gets the Lower Matrix of the Augmented Matrix##
        linear_algebra.log("A=" + str(A))
        parm = 1
        L = linear_algebra.gsiedel(A, parm)
        linear_algebra.log("L=" + str(L))

        # This code gets the Strictly upper Matrix of the Augmented Matrix#
        parm = 2
        U = linear_algebra.gsiedel(A, parm)
        linear_algebra.log("U=" + str(U))

        # This code gets the Value Matrix of the Augmented Matrix#
        parm = 3
        B = linear_algebra.gsiedel(A, parm)
        B = list(map(list, zip(B)))
        linear_algebra.log("B=" + str(B))

        # The Following Lines of Code will Call Matrix inverse Function and get the Inverse of Matrix L#
        Linv = linear_algebra.get_matrix_inverse(L)
        linear_algebra.log("Linv=" + str(Linv))
        # The Following Code will Call Matrix Multiplication Code and Get the Resultant Matrix #

        HT = linear_algebra.get_matrix_product(Linv, U)
        s = -1
        T = linear_algebra.get_matrix_scalar_product(s, HT)
        linear_algebra.log("T=" + str(T))
        # The following lines of code will calculate the C matrix #

        C = linear_algebra.get_matrix_product(Linv, B)
        linear_algebra.log("C=" + str(C))

        # The Following Lines of Code will Create the X Matrix ###############

        HX = [1.0 for i in xrange(len(A))]
        X = list(map(list, zip(HX)))
        linear_algebra.log("X=" + str(X))

        # The Following Lines of code will :
        #1) Call a function which will take the T Matrix, C Matrix and Initial X matrix
        #2) Calculate the TX+C matrix .
        #3) Compare the result with the Previous Value
        #4) find out the Value of unknowns

        Result = linear_algebra.solveeq(T, C, X)
        return Result
    def solveeq(T, C, X):
        ST = X
        brk = 0
        count = 0
        linear_algebra.log("count=" + str(count) + " brk =" + str(brk) +
                           " count =" + str(count))

        while brk == 0:
            ix = linear_algebra.get_matrix_product(T, X)
            X = linear_algebra.get_matrix_sum(ix, C)
            compres = linear_algebra.compare(X, ST)
            linear_algebra.log("count=" + str(count) + " X=" + str(X) +
                               ": ST=" + str(ST) + ": compres=" + str(compres))
            if compres == 1:
                brk = 1
                break
            else:
                ST = X
                count += 1

        return X
    def pca(matrix, dimensions):
        '''
        This method is used for PCA transformation of data
        Input matrix used for the data
        Input dimensions used for how may dimensions pca should be transformed
        '''
        features = []
        for k in xrange(len(matrix[0])):
            feature = list(zip(*matrix)[k])
            mn = linear_algebra.get_mean(feature)
            mean_diff_feature = [feature[i] - mn for i in xrange(len(feature))]
            features.append(mean_diff_feature)
        linear_algebra.log("features=" + str(features))
        covariance_matrix = []
        for i in xrange(len(features)):
            row_covariance = []
            for j in xrange(len(features)):
                if (i == j):
                    row_covariance.append(
                        linear_algebra.get_variance(features[i]))
                else:
                    row_covariance.append(
                        linear_algebra.get_covariance(features[i],
                                                      features[j]))
            covariance_matrix.append(row_covariance)

        linear_algebra.log("covariance_matrix=" + str(covariance_matrix))
        eigen_value_list = linear_algebra.get_eigen_values(covariance_matrix)
        linear_algebra.log("eigen_value_list=" + str(eigen_value_list))
        eigen_value_list.sort(reverse=True)
        linear_algebra.log("descending sorted eigen_value_list=" +
                           str(eigen_value_list))
        #eigen_vector_list = [linear_algebra.get_eigen_vector(covariance_matrix, eigen_value_list[i]) for i in xrange(len( eigen_value_list))]
        eigen_vector_list_all = [
            linear_algebra.get_eigen_vector(covariance_matrix,
                                            eigen_value_list[i])
            for i in xrange(len(eigen_value_list))
        ]
        linear_algebra.log(" I am here")
        linear_algebra.log("eigen_vector_list_all=" +
                           str(eigen_vector_list_all))
        #eigen_vector_list = [linear_algebra.get_eigen_vector(covariance_matrix, eigen_value_list[i]) for i in xrange(len( eigen_value_list))][0]
        #linear_algebra.log( "descending eigen vector list="  + str(eigen_vector_list))
        #filtered_eigen_vector_list  = eigen_vector_list[0][:dimensions]
        #filtered_eigen_vector_list = eigen_vector_list[:dimensions]
        #filtered_eigen_vector_list = eigen_vector_list[:]
        filtered_eigen_vector_list = [[
            eigen_vector_list_all[i][j][0]
            for j in xrange(len(eigen_vector_list_all[0]))
        ] for i in xrange(dimensions)]
        linear_algebra.log("filtered_eigen_vector_list=" +
                           str(filtered_eigen_vector_list))
        row_feature_vector = filtered_eigen_vector_list
        #row_feature_vector =  linear_algebra.get_matrix_transpose(filtered_eigen_vector_list)
        linear_algebra.log("row_feature_vector=" + str(row_feature_vector))
        #row_data_adjust = get_matrix_transpose(features)
        row_data_adjust = features
        linear_algebra.log("row_data_adjust=" + str(row_data_adjust))
        final_data = linear_algebra.get_matrix_transpose(
            linear_algebra.get_matrix_product(row_feature_vector,
                                              row_data_adjust))
        linear_algebra.log("final_data=" + str(final_data))
        return final_data
    def get_eigen_vector(matrix, eigen_value):
        '''
        This method finds eigen vector for a given eigen value eigen_value
        This is calculated using Inverse Iteration https://en.wikipedia.org/wiki/Inverse_iteration
        '''

        eigen_value += 0.000001
        length = len(matrix)
        #bprev_vector = [[1 for i in xrange(length)]]
        bprev_vector = [[1] for i in xrange(length)]

        linear_algebra.log("bprev_vector=" + str(bprev_vector))

        identity_matrix = linear_algebra.get_matrix_identity(length)
        m = linear_algebra.get_matrix_scalar_product(eigen_value,
                                                     identity_matrix)
        linear_algebra.log("matrix=" + str(matrix))
        linear_algebra.log("m=" + str(m))

        s = linear_algebra.get_matrix_diff(matrix, m)
        linear_algebra.log("s=" + str(s))
        inv = linear_algebra.get_matrix_inverse(s)
        nrm = linear_algebra.get_matrix_norm(inv, 2)
        inv_normalized = inv
        linear_algebra.log("inv_normalized=" + str(inv_normalized))
        if (nrm > 0.0):
            inv_normalized = linear_algebra.get_matrix_scalar_product(
                1.0 / nrm, inv)

        iteration = 1
        tol = 0.01
        while abs(tol) >= 0.00001 and iteration <= 100:
            b_vector = linear_algebra.get_matrix_product(
                inv_normalized, bprev_vector)
            linear_algebra.log(" After iteration=" + str(iteration) +
                               " b_vector=" + str(b_vector))
            diff_vector = linear_algebra.get_matrix_diff(
                b_vector, bprev_vector)
            tol = max(
                [abs(diff_vector[i][0]) for i in xrange(len(diff_vector))])
            linear_algebra.log(" After iteration=" + str(iteration) +
                               ", diff_vector=" + str(diff_vector) +
                               " tolerance=" + str(tol))
            iteration += 1
            bprev_vector = b_vector

        abs_vector = [bprev_vector[i][0] for i in xrange(len(bprev_vector))]
        linear_algebra.log("abs_vector=" + str(abs_vector))
        max_val = max(abs_vector)
        linear_algebra.log("max_val=" + str(max_val))
        bprev_vector_zero = [[
            bprev_vector[i][0]
            if max_val / abs(bprev_vector[i][0]) < 100000 else 0.0
        ] for i in xrange(len(bprev_vector))]
        linear_algebra.log("bprev_vector_zero=" + str(bprev_vector_zero))
        bprev_vector_no_zero = filter(linear_algebra.removeZero,
                                      bprev_vector_zero)
        linear_algebra.log("bprev_vector_no_zero=" + str(bprev_vector_no_zero))
        min_val = 1.0
        if (len(bprev_vector_no_zero) > 0):
            min_val = min([
                abs(bprev_vector_no_zero[i][0])
                for i in xrange(len(bprev_vector_no_zero))
            ])
        linear_algebra.log("min_val=" + str(min_val))
        final_vector = [[bprev_vector_zero[i][0] / min_val]
                        for i in xrange(len(bprev_vector_zero))]
        less_than_zero_vector = [
            final_vector[i][0] < 0 for i in xrange(len(final_vector))
        ]
        linear_algebra.log("less_than_zero_vector=" +
                           str(less_than_zero_vector))
        less_than_zero_vector_length = sum(less_than_zero_vector)
        linear_algebra.log("less_than_zero_vector=" +
                           str(less_than_zero_vector_length))
        if less_than_zero_vector_length == len(final_vector):
            final_vector = [[-1.0 * final_vector[i][0]]
                            for i in xrange(len(bprev_vector_zero))]
        linear_algebra.log("final_vector=" + str(final_vector))

        return final_vector
    def get_eigen_values(matrix):
        '''
        This method returns a list of eigen values of a given matrix
        Eigen value is calculate using QR algorithm
        '''

        A = matrix
        R = A
        QT = [[]]

        iteration = 1
        tol = 0.01
        while abs(tol) >= 0.00001 and iteration <= 100:
            linear_algebra.log(" Working for iteration=" + str(iteration))
            R = A
            QT = [[]]
            for k in xrange(len(R[0]) - 1):
                column = linear_algebra.get_matrix_transpose(
                    [list(zip(*R)[k])])
                linear_algebra.log("column[" + str(k) + "] =" + str(column))
                nm = linear_algebra.get_matrix_norm(column, 2)
                linear_algebra.log("nm=" + str(nm))
                if nm == 0.0:
                    std_column = column
                else:
                    std_column = linear_algebra.get_matrix_scalar_product(
                        1.0 / nm, column)
                linear_algebra.log("std_column[" + str(k) + "] =" +
                                   str(std_column))
                d = linear_algebra.get_matrix_norm(std_column[k:][:], 2)
                if (std_column[k][0] > 0.0):
                    d = -1.0 * d
                linear_algebra.log("d=" + str(d))
                vk = (0.5 * (1 - std_column[k][0] / d))**0.5
                t = -d * vk
                linear_algebra.log("t=" + str(t))
                v = [[
                    0.0 if i < k else vk if i == k else std_column[i][0] /
                    (2 * t)
                ] for i in xrange(len(std_column))]
                linear_algebra.log("v=" + str(v))
                identity_matrix = linear_algebra.get_matrix_identity(
                    len(std_column))
                vtrans = linear_algebra.get_matrix_transpose(v)
                linear_algebra.log("vtrans=" + str(vtrans))
                multi_v_vtrans = linear_algebra.get_matrix_product(v, vtrans)
                p = linear_algebra.get_matrix_diff(
                    identity_matrix,
                    linear_algebra.get_matrix_scalar_product(
                        2.0, multi_v_vtrans))
                linear_algebra.log("vtrans=" + str(vtrans))
                R = linear_algebra.get_matrix_product(p, R)
                linear_algebra.log("R=" + str(R))
                if k == 0:
                    QT = p
                else:
                    QT = linear_algebra.get_matrix_product(p, QT)
                linear_algebra.log("QT=" + str(QT))

            Q = linear_algebra.get_matrix_transpose(QT)
            linear_algebra.log("Q=" + str(Q))
            linear_algebra.log("R=" + str(R))
            res = linear_algebra.get_matrix_product(Q, R)
            linear_algebra.log("res=" + str(res))
            newA = linear_algebra.get_matrix_product(R, Q)
            linear_algebra.log("newA=" + str(newA))
            diff_matrix = linear_algebra.get_matrix_diff(newA, A)
            tol = max([
                abs(diff_matrix[i][j]) if i == j else 0
                for i in xrange(len(diff_matrix))
                for j in xrange(len(diff_matrix[0]))
            ])
            A = newA
            linear_algebra.log(" After iteration=" + str(iteration) +
                               " diff_matrix = " + str(diff_matrix) +
                               " tolerance=" + str(tol))
            iteration += 1

        linear_algebra.log("Final A=" + str(A))
        eigen_value_list = [A[k][k] for k in xrange(len(A))]
        linear_algebra.log("eigen_value_list=" + str(eigen_value_list))
        return eigen_value_list
    matrix3 = [[10, 20, 10], [20, 40, 20], [30, 60, 30]]
    rank = linear_algebra.get_matrix_rank(matrix3)
    print "matrix3 rank=" + str(rank)

    #matrix1 = [[1,1], [2,2]]
    #la = linear_algebra(matrix1)
    #dim = la.dimensions()
    #print "dimensions=" + str(dim)
    #la.printMatrix()
    # A= [[10,-1,2,0,6],[-1,11,-1,3,25],[2,-1,10,-1,-11],[0,3,-1,8,15]]
    # A= [[10.0,-1.0,2.0,0.0,6.0],[-1.0,11.0,-1.0,3.0,25.0],[2.0,-1.0,10.0,-1.0,-11.0],[0.0,3.0,-1.0,8.0,15.0]]
    A = [[16.0, 3.0, 11.0], [7.0, -11.0, 13.0]]
    #   X = linear_algebra.solve_gauss_jordan(A)
    #linear_algebra.print_matrix(X)
    #   print "X=" + str(X)
    X2 = linear_algebra.solve_gauss_seidel(A)
    print "X2=" + str(X2)

    Y1 = [[10, -1, 2, 0], [-1, 11, -1, 3], [2, -1, 10, -1], [0, 3, -1, 8]]
    #Y2 =[[1], [2], [-3], [-2]]
    Y2 = [[1.0], [2.0], [-1.0], [1.0]]
    Y3 = linear_algebra.get_matrix_product(Y1, Y2)

    print "Y3 = " + str(Y3)

    Y4 = [[1.349391319689485], [1.6573747353563866], [-0.918269230769231],
          [0.9999999999999999]]
    Y5 = linear_algebra.get_matrix_product(Y1, Y4)
    print "Y5 = " + str(Y5)