예제 #1
0
def projection2(matrix,posneg,eps):
    """
    def projection2(matrix,posneg,eps):
        Algorithm
        return P,Q1
    Returns a projector P and an orthonormal spanning set Q1
    of the invariant subspace associated with the given matrix
    and the specified subspace.

    Input "matrix" is the matrix from which the eigenprojection comes,
    "posneg" is 1 or -1 depending on whether the unstable or stable space is
    sought. The input eps gives a bound on how small the eigenvalues sought
    can be, which is desirable when a zero mode should be avoided.
    """

    T1,U1,sdim1 = linalg.schur(matrix,output='complex',sort=lambda x: posneg*x.real>eps)
    Q1 = U1[:,:sdim1]
    try:
        T2,U2,sdim2 = linalg.schur(-matrix,output='complex',sort=lambda x: posneg*x.real>-eps)
        Q2 = U2[:,:sdim2]
    except:
        print("Error in bin.py -- could not take Schur decomposition")
        raise ValueError("Problem with Schur Decomposition -- see projection2 in bin.py")

    R = np.concatenate((Q1, Q2), axis=1)
    L = linalg.inv(R)
    P = np.zeros(matrix.shape)

    for i in range(sdim1):
        P = P + np.outer(R[:,i],L[i,:])

    return P,Q1
예제 #2
0
 def test_simple(self):
     a = [[8,12,3],[2,9,3],[10,3,6]]
     t,z = schur(a)
     assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
     tc,zc = schur(a,'complex')
     assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))
     assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)
     tc2,zc2 = rsf2csf(tc,zc)
     assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)
예제 #3
0
def SimilarityMatrix (A1, A2):
    """
    Returns the matrix that transforms A1 to A2.
    
    Parameters
    ----------
    A1 : matrix, shape (N,N)
        The smaller matrix
    A2 : matrix, shape (M,M)
        The larger matrix (M>=N)
    
    Returns
    -------
    B : matrix, shape (N,M)
        The matrix satisfying `A_1\,B = B\,A_2`
        
    Notes
    -----
    For the existence of a (unique) solution the larger 
    matrix has to inherit the eigenvalues of the smaller one.
    """

    if A1.shape[0]!=A1.shape[1] or A2.shape[0]!=A2.shape[1]:
        raise Exception("SimilarityMatrix: The input matrices must be square!")

    N1 = A1.shape[0]
    N2 = A2.shape[1]

    if N1>N2:
        raise Exception("SimilarityMatrix: The first input matrix must be smaller than the second one!")

    [R1,Q1]=la.schur(A1,'complex')
    [R2,Q2]=la.schur(A2,'complex')
    Q1 = ml.matrix(Q1)
    Q2 = ml.matrix(Q2)
    
    c1 = ml.matrix(np.sum(Q2.H,1))
    c2 = np.sum(Q1.H,1)
    I = ml.eye(N2)
    X = ml.zeros((N1,N2), dtype=complex)
    for k in range(N1-1,-1,-1):
        M = R1[k,k]*I-R2
        if k==N1:
            m = ml.zeros((1,N2))
        else:
            m = -R1[k,k+1:]*X[k+1:,:]
        X[k,:] = Linsolve(np.hstack((M,c1)),np.hstack((m,c2[k])))
    return (Q1*X*ml.matrix(Q2).H ).real
예제 #4
0
def pfaffian_schur(A, overwrite_a=False):
    """Calculate Pfaffian of a real antisymmetric matrix using
    the Schur decomposition. (Hessenberg would in principle be faster,
    but scipy-0.8 messed up the performance for scipy.linalg.hessenberg()).

    This function does not make use of the skew-symmetry of the matrix A,
    but uses a LAPACK routine that is coded in FORTRAN and hence faster
    than python. As a consequence, pfaffian_schur is only slightly slower
    than pfaffian().
    """

    assert np.issubdtype(A.dtype, np.number) and not np.issubdtype(
        A.dtype, np.complexfloating)

    assert A.shape[0] == A.shape[1] > 0

    assert abs(A + A.T).max() < 1e-14

    # Quick return if possible
    if A.shape[0] % 2 == 1:
        return 0

    (t, z) = la.schur(A, output='real', overwrite_a=overwrite_a)
    l = np.diag(t, 1)
    return np.prod(l[::2]) * la.det(z)
예제 #5
0
파일: oom.py 프로젝트: markovmodel/oom
def truncated_svd_psd(A,m=np.inf):
    m=min(m,A.shape[0])
    S,U=linalg.schur(A)
    s=np.diag(S)
    tol=A.shape[0]*np.spacing(s.max())
    m=min(m,np.count_nonzero(s>tol))
    idx=(-s).argsort()[:m]
    return U[:,idx],np.diag(s[idx])
예제 #6
0
def check_eigens():
    T, Z = schur(A)
    for i in range(0,T.shape[0]-1):
        if T[i+1,i]!=0:
            for eig in np.linalg.eigvals(T[i:i+2,i:i+2]):
                print eig
                if eig.real < 0:
                    T, Z = rsf2csf(T,Z)
                    return T,Z
    return T,Z
예제 #7
0
파일: lqr.py 프로젝트: alexansari101/nlsymb
    def solve(self, **kwargs):
        BRB = matmult(self.B, inv(self.R), self.B.T)
        M = np.vstack([
            np.hstack([self.A, -BRB]),
            np.hstack([-self.Q, -self.A.T])
        ])
        (L, Z, sdim) = schur(M, sort='lhp')
        U = Z.T

        self.P = matmult(inv(U[0:sdim, 0:sdim]),
                         U[0:sdim, sdim:]).conj().T
def schur_decomposition(grayscale_image_2darray):
    """
     Schur decomposition. Input a grayscale image to get it's upper triangular matrix and unitary matrix.
     
     Schur's theorem announced that : if A ∈ Cn×n, there exists a unitary matrix U and an upper triangular matrix T,
     such that: A = U × T × U'
     
     [email protected], 23-July-2015
    """

    triangular_matrix, unitary_matrix = schur(grayscale_image_2darray)

    return triangular_matrix, unitary_matrix
예제 #9
0
def mysqrtm(A):
    # Schur decomposition and cast to complex array
    T, Z = lm.schur(A)
    T, Z = lm.rsf2csf(T,Z)
    n,n = T.shape

    # Inner loop of sqrtm algorithm -> call C code
    R = np.zeros((n,n), dtype=T.dtype)
    stat = sqrtm_loop(R, T, n)

    R, Z = lm.all_mat(R,Z)
    X = (Z * R * Z.H)

    return X.A
예제 #10
0
파일: solutions.py 프로젝트: smwade/ACME-1
def ps_contour_plot(A, m = 20,epsilon_vals=None):
    '''Plots the pseudospectrum of the matrix A as a contour plot.  Also,
    plots the eigenvalues.
    Parameters:
        A : square, 2D ndarray
            The matrix whose pseudospectrum is to be plotted
        m : int
            accuracy
        epsilon_vals : list of floats
            If k is in epsilon_vals, then the epsilon-pseudospectrum
            is plotted for epsilon=10**-k
            If epsilon_vals=None, the defaults of plt.contour() are used
            instead of any specified values.
    '''
    n = A.shape[0]
    T = la.schur(A)[0]
    eigsA = np.diagonal(T)
    xvals, yvals = ps_grid(eigsA, m)
    sigmin = np.zeros((m, m))
    for k in xrange(m):
        for j in xrange(m):
            T1 = (xvals[k] + 1j*yvals[j]) * np.eye(n) - T
            T2 = T1.T.conjugate()
            sigold = 0
            qold = np.zeros((n, 1))
            beta = 0
            H = np.zeros((n, n))
            q = np.random.normal(size=(n, 1)) + 1j * np.random.normal(size=(n, 1))
            q = q/la.norm(q, ord=2)
            for p in xrange(n-1):
                b1 = la.solve(T2, q)
                b2 = la.solve(T1, b1)
                v = b2 - beta * qold
                alpha = np.real(np.vdot(q,v))
                v = v - alpha * q
                beta = la.norm(v)
                qold = q
                q = v/beta
                H[p+1, p] = beta
                H[p, p+1] = beta
                H[p, p] = alpha
                sig = np.abs(np.max(la.eig(H[:p+1,:p+1])[0]))
                if np.abs(sigold/sig - 1) < .001:
                    break
                sigold = sig
            sigmin[j, k] = np.sqrt(sig)
    plt.contour(xvals,yvals,np.log10(sigmin), levels=epsilon_vals)
    plt.scatter(la.eig(A)[0].real, la.eig(A)[0].imag)
    plt.show()
예제 #11
0
def msroots(M):
    """Computes the roots to a system via the eigenvalues of the Möller-Stetter
    matrices. Implicitly performs a random rotation of the coordinate system
    to avoid repeated eigenvalues arising from special structure in the underlying
    polynomial system. Approximates the joint eigenvalue problem using a Schur
    factorization of a linear combination of the matrices.

    Parameters
    ----------
    M : (n,n,dim) ndarray
        Array containing the nxn Möller-Stetter matrices, where the matrix
        corresponding to multiplication by x_i is M[...,i]

    Returns
    -------
    roots : (n,dim) ndarray
        Array containing the approximate roots of the system, where each row
        is a root.
    """
    dim = M.shape[-1]

    # perform a random rotation with a random orthogonal Q
    Q = ortho_group.rvs(dim)
    M = (Q @ M[..., np.newaxis])[..., 0]

    eigs = np.empty((dim, M.shape[0]), dtype='complex')
    # Compute the matrix U that triangularizes a random linear combination
    c = np.random.randn(dim)
    U = schur((M * c).sum(axis=-1), output='complex')[1]

    # Compute the eigenvalues of each matrix, and use the computed U to sort them
    T = (U.conj().T) @ (M[..., 0]) @ U
    w, v = eig(M[..., 0])
    arr = sort_eigs(w, np.diag(T))
    eigs[0] = w[arr]

    # compute eigenvalue condition numbers (will be the same for all matrices)
    cond = condeigs(M[..., 0], eigs[0], v[:, arr])

    for i in range(1, dim):
        T = (U.conj().T) @ (M[..., i]) @ U
        w = eig(M[..., i], right=False)
        arr = sort_eigs(w, np.diag(T))
        eigs[i] = w[arr]

    # Rotate back before returning, transposing to match expected shape
    return (Q.T @ eigs).T, cond
예제 #12
0
def schur_ordered(A, ct=False):
    r"""Returns block ordered complex Schur form of matrix :math:`\mathbf{A}`

    .. math:: \mathbf{TAT}^H = \mathbf{A}_s = \begin{bmatrix} A_{11} & A_{12} \\ 0 & A_{22} \end{bmatrix}

    where :math:`A_{11}\in\mathbb{C}^{s\times s}` contains the :math:`s` stable
    eigenvalues of :math:`\mathbf{A}\in\mathbb{R}^{m\times m}`.

    Args:
        A (np.ndarray): Matrix to decompose.
        ct (bool): Continuous time system.

    Returns:
        tuple: Tuple containing the Schur decomposition of :math:`\mathbf{A}`, :math:`\mathbf{A}_s`; the transformation
        :math:`\mathbf{T}\in\mathbb{C}^{m\times m}`; and the number of stable eigenvalues of :math:`\mathbf{A}`.

    Notes:
        This function is a wrapper of ``scipy.linalg.schur`` imposing the settings required for this application.

    """
    if ct:
        sort_eigvals = 'lhp'
    else:
        sort_eigvals = 'iuc'

    # if A.dtype == complex:
    #     output_form = 'complex'
    # else:
    #     output_form = 'real'
    # issues when not using the complex form of the Schur decomposition

    output_form = 'complex'
    As, Tt, n_stable1 = sclalg.schur(A, output=output_form, sort=sort_eigvals)

    if sort_eigvals == 'lhp':
        n_stable = np.sum(np.linalg.eigvals(A).real <= 0)
    elif sort_eigvals == 'iuc':
        n_stable = np.sum(np.abs(np.linalg.eigvals(A)) <= 1.)
    else:
        raise NotImplementedError(
            'Unknown sorting of eigenvalues. Either iuc or lhp')

    assert n_stable == n_stable1, 'Number of stable eigenvalues not equal in Schur output and manual calculation'

    assert (np.abs(As - np.conj(Tt.T).dot(A.dot(Tt))) <
            1e-4).all(), 'Schur breakdown - A_schur != T^H A T'
    return As, Tt.T, n_stable
예제 #13
0
def cSqrtm(A):
    """
    Computes custom matrix square root using Scipy algorithm with inner loop written in C.
    """

    # Schur decomposition and cast to complex array
    T, Z = schur(A)
    T, Z = rsf2csf(T,Z)
    n,n = T.shape

    # Inner loop of sqrtm algorithm -> call C code
    R = np.zeros((n,n), dtype=T.dtype)
    stat = sqrtm_loop(R, T, n)
    R, Z = all_mat(R,Z)
    X = (Z * R * Z.H)

    return X.A
예제 #14
0
파일: nonnormal.py 프로젝트: ketch/pseudopy
    def __init__(self, A, points, method='svd'):
        '''Evaluates the inverse resolvent norm on the given list of points

        Stores result in self.vals and points in self.points
        '''
        self.points = points
        if method == 'lanczosinv':
            self.vals = []

            # algorithm from page 375 of Trefethen/Embree 2005
            T, _ = schur(A, output='complex')
            m, n = A.shape
            if m != n:
                raise ValueError('m != n is not allowed in dense mode')
            for point in points:
                M = T - point * numpy.eye(*T.shape)

                def matvec(x):
                    r'''Matrix-vector multiplication

                    Matrix-vector multiplication with matrix
                    :math:`\begin{bmatrix}0&(A-\lambda I)^{-1}\\(A-\lambda I)^{-1}&0\end{bmatrix}`'''
                    return solve_triangular(M,
                                            solve_triangular(
                                                M, x, check_finite=False),
                                            trans=2,
                                            check_finite=False)

                MH_M = LinearOperator(matvec=matvec,
                                      dtype=numpy.complex,
                                      shape=(n, n))

                evals = eigsh(MH_M,
                              k=1,
                              tol=1e-3,
                              which='LM',
                              maxiter=n,
                              ncv=n,
                              return_eigenvectors=False)

                self.vals.append(1 / numpy.sqrt(numpy.max(numpy.abs(evals))))
        else:
            self.vals = [
                inv_resolvent_norm(A, point, method=method) for point in points
            ]
예제 #15
0
def symmetric_product_pos_def(B, P, invertP=False):
    """Computes the product B.H @ P.I @ B in a symmetry-preserving way
    input:
        P: pos. def. 2d-array
        B: 2d array
    output:
        B.H @ P.I @ B with B.H @ P.I @ B - (B.H @ P.I @ B).H = 0
    """
    T, Z = linalg.schur(P)
    Z = np.asmatrix(Z)
    if invertP:
        D = np.diag(1 / np.sqrt(np.diag(T)))  # force diagonal matrix
    else:
        D = np.diag(np.sqrt(np.diag(T)))
    product = D @ Z.H @ B

    product = product.H @ product
    return product
예제 #16
0
    def __init__(self, A, points, method='svd'):
        '''Evaluates the inverse resolvent norm on the given list of points

        Stores result in self.vals and points in self.points
        '''
        self.points = points
        if method == 'lanczosinv':
            self.vals = []

            # algorithm from page 375 of Trefethen/Embree 2005
            T, _ = schur(A, output='complex')
            m, n = A.shape
            if m != n:
                raise ValueError('m != n is not allowed in dense mode')
            for point in points:
                M = T - point*numpy.eye(*T.shape)

                def matvec(x):
                    r'''Matrix-vector multiplication

                    Matrix-vector multiplication with matrix
                    :math:`\begin{bmatrix}0&(A-\lambda I)^{-1}\\(A-\lambda I)^{-1}&0\end{bmatrix}`'''
                    return solve_triangular(
                        M,
                        solve_triangular(
                            M,
                            x,
                            check_finite=False
                            ),
                        trans=2,
                        check_finite=False
                        )
                MH_M = LinearOperator(matvec=matvec, dtype=numpy.complex,
                                      shape=(n, n))

                evals = eigsh(MH_M, k=1, tol=1e-3, which='LM',
                              maxiter=n,
                              ncv=n,
                              return_eigenvectors=False)

                self.vals.append(1/numpy.sqrt(numpy.max(numpy.abs(evals))))
        else:
            self.vals = [inv_resolvent_norm(A, point, method=method)
                         for point in points]
예제 #17
0
def ave_control(A, c=1):
    # Bassett Lab, University of Pennsylvania, 2016.
    # Reference: Gu, Pasqualetti, Cieslak, Telesford, Yu, Kahn, Medaglia,
    #            Vettel, Miller, Grafton & Bassett, Nature Communications
    #            6:8414, 2015.

    if c is not None:
        u, s, vt = svd(A)  # singluar value decomposition
        A = A / (c + s[0])  # Matrix normalization

    T, U = schur(A, 'real')  # Schur stability
    midMat = np.multiply(U, U).transpose()
    v = np.matrix(np.diag(T)).transpose()
    N = A.shape[0]
    P = np.diag(1 - np.matmul(v, v.transpose()))
    P = repmat(P.reshape([N, 1]), 1, N)
    ac = sum(np.divide(midMat, P))

    return ac
예제 #18
0
def sorted_brandts_schur(
        P: np.ndarray,
        k: int,
        z: str = "LM") -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    """
    Compute a sorted Schur decomposition.

    This function uses :mod:`scipy` for the decomposition and Brandts'
    method (see [Brandts02]_) for the sorting.

    Parameters
    ----------
    %(P)s
    %(k)s
    %(z)s

    Returns
    -------
    Tuple of the following:

    R
        %(R_sort)s
    Q
        %(Q_sort)s
    eigenvalues
        %(eigenvalues_k)s
    """
    # Make a Schur decomposition of P.
    R, Q = schur(P, output="real")

    # Sort the Schur matrix and vectors.
    Q, R, ap = sort_real_schur(Q, R, z=z, b=k)

    # Warnings
    if np.any(np.array(ap) > 1.0):
        warnings.warn("Reordering of Schur matrix was inaccurate.")

    # compute eigenvalues
    T, _ = rsf2csf(R, Q)
    eigenvalues = np.diag(T)[:k]

    return R, Q, eigenvalues
예제 #19
0
def sobjective(v,R):
    total = float( np.sum([Nspikes for Nspikes in R['N_spikes']]) )
    Nspikes = R['N_spikes']/total
    dSTA  = np.concatenate(
            [STA[:,np.newaxis]-R['statistics']['features']['mean'][:,np.newaxis]
            for STA in R['statistics']['features']['STA']], axis=1)    
    direct = np.sum(v*Nspikes*( dSTA - 0.5*np.dot(R['statistics']['features']['cov'],v)))
    D,Z = schur(R['statistics']['features']['cov']/2)
    DD  = np.diag(D)
    keep= DD>1e-10
    P   =  (Z[:,keep] * np.sqrt(DD[keep])).T
    y   =  np.dot ( (Z[:,keep] * 1/np.sqrt(DD[keep])).T , dSTA ) /2

#    print np.max( D - np.diag(np.diag(D)) )
#    print 'max( Z D Z.T - C ) ', \
#    np.max(np.abs(np.dot(Z,np.dot(D,Z.T)) - R['statistics']['features']['cov']/2))
#    print '2*dot(P.T,y) - dSTA ', np.max(np.abs( 2*np.dot(P.T,y) - dSTA ))

    return [ direct , np.sum( - (np.sqrt(Nspikes)*(y-np.dot(P,v)))**2 + 
                                (np.sqrt(Nspikes)*y)**2 ) ]
예제 #20
0
def main():
    from scipy.integrate import quad, dblquad, nquad
    #一元数值积分:结果数值加误差:(1.0000000000000002, 5.842607038578007e-11)
    print("一元积分:")
    print(quad(lambda x: np.exp(-x), 0, np.inf))
    print("二元积分:")
    print(
        dblquad(lambda t, x: np.exp(-x * t) / t**3, 0, np.inf, lambda x: 1,
                lambda x: np.inf))
    print("多元积分:")

    def f(x, y):
        return x * y

    def bound_y():
        return [0, 0.5]

    def bound_x(y):
        return [0, 1 - 2 * y]

    print(nquad(f, [bound_x, bound_y]))

    from scipy import linalg as lg
    arr = np.array([[1, 2], [3, 4]])
    print("行列式:")
    print(lg.det(arr))
    print("逆矩阵:", lg.inv(arr))
    #解线性方程组
    b = np.array([6, 14])
    print("Sol:", lg.solve(arr, b))
    #特征值,特征向量
    print("Eig:")
    print(lg.eig(arr))
    print("LU分解:")
    print(lg.lu(arr))
    print("QR分解:")
    print(lg.qr(arr))
    print("SVD分解:")
    print(lg.svd(arr))
    print("Schur分解:")
    print(lg.schur(arr))
예제 #21
0
 def obtain_squared_P(self, P, circ, pos):
     """
     :param P:
     :param circ:
     :param pos:
     :return:
     Obtain the radius of the covariance after squaring the matrix and plotting it. Both position vehicle and
     position feature covariances are plotted
     """
     R = np.zeros((2, 2))
     [T, Q] = schur(P)
     R[0, 0] = np.sqrt(T[0, 0])
     R[1, 1] = np.sqrt(T[1, 1])
     R[0, 1] = T[0, 1] / (R[0, 0] + R[1, 1])
     r = linalg.multi_dot([Q, R, Q.T])
     a = np.dot(r, circ)
     position = np.squeeze(pos)
     position = position.reshape((2, 1))
     p = a + np.matlib.repmat(position, 1, a.shape[1])
     p1 = self.axtot.scatter(p[0], p[1], s=1, color='green')
     return p1
예제 #22
0
    def test_mb03rd_default(self):
        # regression: mb03rd was failing with no third arg (X) supplied
        A = np.array([[6, -1, -7, -2, 2], [-3, 4, 2, -7, 6],
                      [-6, -9, -3, -1, 10], [-2, -4, 1, 5, 7],
                      [-7, -5, -6, 6, 7]])

        Aschur, Tschur = schur(A)

        X = Tschur.copy()

        Ar, Xr, blsize, W = mb03rd(Aschur.shape[0],
                                   Aschur,
                                   X,
                                   'U',
                                   'N',
                                   pmax=1.0,
                                   tol=0.0)

        Ar2, Xr2, blsize2, W2 = mb03rd(Aschur.shape[0], Aschur)

        assert_allclose(Ar, Ar2)
        assert_allclose(Xr, Tschur.dot(Xr2))
예제 #23
0
파일: main.py 프로젝트: kolia/subunits
def ARD( stats , lam=0.00 ):
    stats['D'], stats['Z'] = schur(stats['cov'])
    print 'Starting ARD of size ', stats['Z'].shape,' with lambda=',lam
    sys.stdout.flush()
    D,Z = stats['D']/2 , stats['Z']
    print 'Schur decomposition completed'
    sys.stdout.flush()
    DD  = numpy.diag(D)
    keep= DD>1e-10
    P    =  (Z[:,keep] * numpy.sqrt(DD[keep])).T
    dSTA = numpy.concatenate(
        [STA[:,numpy.newaxis]-stats['mean'][:,numpy.newaxis] 
         for STA in stats['STA']], axis=1)
    y    =  numpy.dot ( (Z[:,keep] * 1/numpy.sqrt(DD[keep])).T , dSTA ) / 2
    iW = 1e-1
    for i in range(1):
        print 'Irlsing'
        sys.stdout.flush()
        V, iW = IRLS.IRLS( y, P, x=0, disp_every=1, lam=lam, maxiter=2 , 
                           ftol=1e-5, nonzero=1e-1, iw=iW)
        save({'V':V,'iW':iW},'Localizing_lam%.0e'%lam)
    return V, iW
예제 #24
0
def _choi_to_kraus(data, input_dim, output_dim, atol=ATOL_DEFAULT):
    """Transform Choi representation to Kraus representation."""
    from scipy import linalg as la

    # Check if hermitian matrix
    if is_hermitian_matrix(data, atol=atol):
        # Get eigen-decomposition of Choi-matrix
        # This should be a call to la.eigh, but there is an OpenBlas
        # threading issue that is causing segfaults.
        # Need schur here since la.eig does not
        # guarentee orthogonality in degenerate subspaces
        w, v = la.schur(data, output="complex")
        w = w.diagonal().real
        # Check eigenvalues are non-negative
        if len(w[w < -atol]) == 0:
            # CP-map Kraus representation
            kraus = []
            for val, vec in zip(w, v.T):
                if abs(val) > atol:
                    k = np.sqrt(val) * vec.reshape(
                        (output_dim, input_dim), order="F")
                    kraus.append(k)
            # If we are converting a zero matrix, we need to return a Kraus set
            # with a single zero-element Kraus matrix
            if not kraus:
                kraus.append(np.zeros((output_dim, input_dim), dtype=complex))
            return kraus, None
    # Non-CP-map generalized Kraus representation
    mat_u, svals, mat_vh = la.svd(data)
    kraus_l = []
    kraus_r = []
    for val, vec_l, vec_r in zip(svals, mat_u.T, mat_vh.conj()):
        kraus_l.append(
            np.sqrt(val) * vec_l.reshape((output_dim, input_dim), order="F"))
        kraus_r.append(
            np.sqrt(val) * vec_r.reshape((output_dim, input_dim), order="F"))
    return kraus_l, kraus_r
def modal_control(A):
    '''
    FUNCTION:
     Returns values of MODAL CONTROLLABILITY for each node in a
     network, given the adjacency matrix for that network. Modal
     controllability indicates the ability of that node to steer the
     system into difficult-to-reach states, given input at that node.

    INPUT:
     A is the structural (NOT FUNCTIONAL) network adjacency matrix,
     such that the simple linear model of dynamics outlined in the
     reference is an accurate estimate of brain state fluctuations.
     Assumes all values in the matrix are positive, and that the
     matrix is symmetric.

    OUTPUT:
     Vector of modal controllability values for each node

    Bassett Lab, University of Pennsylvania, 2016.
    Reference: Gu, Pasqualetti, Cieslak, Telesford, Yu, Kahn, Medaglia,
               Vettel, Miller, Grafton & Bassett, Nature Communications 6:8414, 2015.
    '''

    # Normalize the matrix based on largest singular value
    A = A / (1 + np.linalg.svd(A)[1][0])

    # Evaluate schur stability
    T, U = scila.schur(A, 'real')
    eigVals = np.diag(T)

    N = A.shape[0]
    phi = np.zeros(N)
    for ii in xrange(N):
        phi[ii] = np.dot(U[ii, :]**2, 1 - eigVals**2)

    return phi
예제 #26
0
def test():
    # 矩阵
    arr = np.array([[1, 2], [3, 4]])
    ## 计算行列式
    print("Det:", lg.det(arr))
    ## 计算矩阵求逆
    print("Inv:", lg.inv(arr))

    # 解线性方程组
    b = np.array([6, 14])
    print("Sol:", lg.solve(arr, b))
    # 特征值
    print("Eig:", lg.eig(arr))
    # 矩阵分解
    ## LU分解
    print("LU:", lg.lu(arr))
    ## QR分解
    print("QR:", lg.qr(arr))
    ## 奇异值分解
    print("SVD:", lg.svd(arr))
    ## 舒尔分解
    print("Schur:", lg.schur(arr))

    pass
예제 #27
0
def pfaffian_schur(A, overwrite_a=False):
    """Calculate Pfaffian of a real antisymmetric matrix using
    the Schur decomposition. (Hessenberg would in principle be faster,
    but scipy-0.8 messed up the performance for scipy.linalg.hessenberg()).

    This function does not make use of the skew-symmetry of the matrix A,
    but uses a LAPACK routine that is coded in FORTRAN and hence faster
    than python. As a consequence, pfaffian_schur is only slightly slower
    than pfaffian().
    """

    assert np.issubdtype(A.dtype, np.number) and not np.issubdtype(A.dtype, np.complexfloating)

    assert A.shape[0] == A.shape[1] > 0

    assert abs(A + A.T).max() < 1e-14

    #Quick return if possible
    if A.shape[0]%2 == 1:
        return 0

    (t, z) = la.schur(A, output='real', overwrite_a=overwrite_a)
    l = np.diag(t, 1)
    return np.prod(l[::2]) * la.det(z)
def average_control(A):
    '''
    FUNCTION:
     Returns values of AVERAGE CONTROLLABILITY for each node in a
     network, given the adjacency matrix for that network. Average
     controllability indicates the ability of that node to steer the
     system into difficult-to-reach states, given input at that node.

    INPUT:
     A is the structural (NOT FUNCTIONAL) network adjacency matrix,
     such that the simple linear model of dynamics outlined in the
     reference is an accurate estimate of brain state fluctuations.
     Assumes all values in the matrix are positive, and that the
     matrix is symmetric.

    OUTPUT:
     Vector of average controllability values for each node

    Bassett Lab, University of Pennsylvania, 2016.
    Reference: Gu, Pasqualetti, Cieslak, Telesford, Yu, Kahn, Medaglia,
               Vettel, Miller, Grafton & Bassett, Nature Communications 6:8414, 2015.
    '''

    # Normalize the matrix based on largest singular value
    A = A / (1 + np.linalg.svd(A)[1][0])

    # Evaluate schur stability
    T, U = scila.schur(A, 'real')
    midMat = (U**2)

    v = np.expand_dims(np.diag(T), axis=1)
    v = np.dot(v, v.T)
    P = np.tile(np.diag(1 - v), (v.shape[0], 1))

    vals = np.sum(midMat / P, axis=1)
    return vals
예제 #29
0
def projection1(matrix,posneg,eps):
    """
    def projection1(matrix,posneg,eps):
        Algorithm
        return P,Q1
    Returns a projector P and an orthonormal spanning set Q1
    of the invariant subspace associated with the given matrix
    and the specified subspace.

    Input "matrix" is the matrix from which the eigenprojection comes,
    "posneg" is 1,-1, or 0 if the unstable, stable, or center space is
    sought respectively. The input eps gives a bound on how small the eigenvalues sought
    can be, which is desirable when a zero mode should be avoided.
    """

    if posneg ==1:
        T1,U1,sdim1 = linalg.schur(matrix,output='complex',sort=lambda x: x.real>eps)
        Q1 = U1[:,:sdim1]

        T2,U2,sdim2 = linalg.schur(matrix,output='complex',sort=lambda x: x.real<=eps)
        Q2 = U2[:,:sdim2]
    elif posneg == -1:
        T1,U1,sdim1 = linalg.schur(matrix,output='complex',sort=lambda x: x.real<-eps)
        Q1 = U1[:,:sdim1]

        T2,U2,sdim2 = linalg.schur(matrix,output='complex',sort=lambda x: x.real>=-eps)
        Q2 = U2[:,:sdim2]
    elif posneg == 0:
        T1,U1,sdim1 = linalg.schur(matrix,output='complex',sort=lambda x: abs(x.real)<eps)
        Q1 = U1[:,:sdim1]

        T2,U2,sdim2 = linalg.schur(matrix,output='complex',sort=lambda x: abs(x.real)>=eps)
        Q2 = U2[:,:sdim2]

    R = np.concatenate((Q1, Q2), axis = 1);
    L = linalg.inv(R)
    P = np.zeros(matrix.shape)

    for i in range(sdim1):
        P = P + np.outer(R[:,i],L[i,:] )

    return P,Q1
예제 #30
0
def bdschur(a, condmax=None, sort=None):
    """Block-diagonal Schur decomposition

    Parameters
    ----------
        a : (M, M) array_like
            Real matrix to decompose
        condmax : None or float, optional
            If None (default), use 1/sqrt(eps), which is approximately 1e8
        sort : {None, 'continuous', 'discrete'}
            Block sorting; see below.

    Returns
    -------
        amodal : (M, M) real ndarray
            Block-diagonal Schur decomposition of `a`
        tmodal : (M, M) real ndarray
            Similarity transform relating `a` and `amodal`
        blksizes : (N,) int ndarray
            Array of Schur block sizes

    Notes
    -----
    If `sort` is None, the blocks are not sorted.

    If `sort` is 'continuous', the blocks are sorted according to
    associated eigenvalues.  The ordering is first by real part of
    eigenvalue, in descending order, then by absolute value of
    imaginary part of eigenvalue, also in decreasing order.

    If `sort` is 'discrete', the blocks are sorted as for
    'continuous', but applied to log of eigenvalues
    (i.e., continuous-equivalent eigenvalues).
    """
    if condmax is None:
        condmax = np.finfo(np.float64).eps**-0.5

    if not (np.isscalar(condmax) and condmax >= 1.0):
        raise ValueError(
            'condmax="{}" must be a scalar >= 1.0'.format(condmax))

    a = np.atleast_2d(a)
    if a.shape[0] == 0 or a.shape[1] == 0:
        return a.copy(), np.eye(a.shape[1], a.shape[0]), np.array([])

    aschur, tschur = schur(a)
    amodal, tmodal, blksizes, eigvals = _bdschur_condmax_search(
        aschur, tschur, condmax)

    if sort in ('continuous', 'discrete'):

        idxs = np.cumsum(np.hstack([0, blksizes[:-1]]))

        ev_per_blk = [
            complex(eigvals[i].real, abs(eigvals[i].imag)) for i in idxs
        ]

        if sort == 'discrete':
            ev_per_blk = np.log(ev_per_blk)

        # put most unstable first
        sortidx = np.argsort(ev_per_blk)[::-1]

        # block indices
        blkidxs = [
            np.arange(i0, i0 + ilen) for i0, ilen in zip(idxs, blksizes)
        ]

        # reordered
        permidx = np.hstack([blkidxs[i] for i in sortidx])
        rperm = np.eye(amodal.shape[0])[permidx]

        tmodal = tmodal @ rperm
        amodal = rperm @ amodal @ rperm.T
        blksizes = blksizes[sortidx]

    elif sort is None:
        pass

    else:
        raise ValueError('unknown sort value "{}"'.format(sort))

    return amodal, tmodal, blksizes
예제 #31
0
def antisymmetric_canonical_form(antisymmetric_matrix):
    """Compute the canonical form of an antisymmetric matrix.

    The input is a real, antisymmetric n x n matrix A, where n is even.
    Its canonical form is::

        A = R^T C R

    where R is a real, orthogonal matrix and C has the form::

        [  0     D ]
        [ -D     0 ]

    where D is a diagonal matrix with nonnegative entries.

    Args:
        antisymmetric_matrix(ndarray): An antisymmetric matrix with even
            dimension.

    Returns:
        canonical(ndarray): The canonical form C of antisymmetric_matrix
        orthogonal(ndarray): The orthogonal transformation R.
    """
    m, p = antisymmetric_matrix.shape

    if m != p or p % 2 != 0:
        raise ValueError('The input matrix must be square with even '
                         'dimension.')

    # Check that input matrix is antisymmetric
    matrix_plus_transpose = antisymmetric_matrix + antisymmetric_matrix.T
    maxval = numpy.max(numpy.abs(matrix_plus_transpose))
    if maxval > EQ_TOLERANCE:
        raise ValueError('The input matrix must be antisymmetric.')

    # Compute Schur decomposition
    canonical, orthogonal = schur(antisymmetric_matrix, output='real')

    # The returned form is block diagonal; we need to permute rows and columns
    # to put it into the form we want
    n = p // 2
    for i in range(1, n, 2):
        swap_rows(canonical, i, n + i - 1)
        swap_columns(canonical, i, n + i - 1)
        swap_columns(orthogonal, i, n + i - 1)
        if n % 2 != 0:
            swap_rows(canonical, n - 1, n + i)
            swap_columns(canonical, n - 1, n + i)
            swap_columns(orthogonal, n - 1, n + i)

    # Now we permute so that the upper right block is non-negative
    for i in range(n):
        if canonical[i, n + i] < -EQ_TOLERANCE:
            swap_rows(canonical, i, n + i)
            swap_columns(canonical, i, n + i)
            swap_columns(orthogonal, i, n + i)

    # Now we permute so that the nonzero entries are ordered by magnitude
    # We use insertion sort
    diagonal = canonical[range(n), range(n, 2 * n)]
    for i in range(n):
        # Insert the smallest element from the unsorted part of the list into
        # index i
        arg_min = numpy.argmin(diagonal[i:]) + i
        if arg_min != i:
            # Permute the upper right block
            swap_rows(canonical, i, arg_min)
            swap_columns(canonical, n + i, n + arg_min)
            swap_columns(orthogonal, n + i, n + arg_min)
            # Permute the lower left block
            swap_rows(canonical, n + i, n + arg_min)
            swap_columns(canonical, i, arg_min)
            swap_columns(orthogonal, i, arg_min)
            # Update diagonal
            swap_rows(diagonal, i, arg_min)

    return canonical, orthogonal.T
예제 #32
0
파일: misc.py 프로젝트: MMaus/mutils
def pseudoSpect(A, npts=200, s=2., gridPointSelect=100, verbose=True,
                lstSqSolve=True):
    """ 
    original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m
    % psa.m - Simple code for 2-norm pseudospectra of given matrix A.
    %         Typically about N/4 times faster than the obvious SVD method.
    %         Comes with no guarantees!   - L. N. Trefethen, March 1999.
    
    parameter: A: the matrix to analyze
               npts: number of points at the grid
               s: axis limits (-s ... +s)
               gridPointSelect: ???
               verbose: prints progress messages
               lstSqSolve: if true, use least squares in algorithm where
                  solve could be used (probably) instead. (replacement for
                  ldivide in MatLab)
    """
    
    from scipy.linalg import schur, triu
    from pylab import (meshgrid, norm, dot, zeros, eye, diag, find,  linspace,                       
                       arange, isreal, inf, ones, lstsq, solve, sqrt, randn,
                       eig, all)

    ldiv = lambda M1,M2 :lstsq(M1,M2)[0] if lstSqSolve else lambda M1,M2: solve(M1,M2)

    def planerot(x):
        '''
        return (G,y)
        with a matrix G such that y = G*x with y[1] = 0    
        '''
        G = zeros((2,2))
        xn = x / norm(x)
        G[0,0] = xn[0]
        G[1,0] = -xn[1]
        G[0,1] = xn[1]
        G[1,1] = xn[0]
        return G, dot(G,x)

    xmin = -s
    xmax = s
    ymin = -s
    ymax = s;  
    x = linspace(xmin,xmax,npts,endpoint=False)
    y = linspace(ymin,ymax,npts,endpoint=False)
    xx,yy = meshgrid(x,y)
    zz = xx + 1j*yy
     
    #% Compute Schur form and plot eigenvalues:
    T,Z = schur(A,output='complex');
        
    T = triu(T)
    eigA = diag(T)
    
    # Reorder Schur decomposition and compress to interesting subspace:
    select = find( eigA.real > -250)           # % <- ALTER SUBSPACE SELECTION
    n = len(select)
    for i in arange(n):
        for k in arange(select[i]-1,i,-1): #:-1:i
            G = planerot([T[k,k+1],T[k,k]-T[k+1,k+1]] )[0].T[::-1,::-1]
            J = slice(k,k+2)
            T[:,J] = dot(T[:,J],G)
            T[J,:] = dot(G.T,T[J,:])
          
    T = triu(T[:n,:n])
    I = eye(n);
    
    # Compute resolvent norms by inverse Lanczos iteration and plot contours:
    sigmin = inf*ones((len(y),len(x)));
    #A = eye(5)
    niter = 0
    for i in arange(len(y)): # 1:length(y)        
        if all(isreal(A)) and (ymax == -ymin) and (i > len(y)/2):
            sigmin[i,:] = sigmin[len(y) - i,:]
        else:
            for jj in arange(len(x)):
                z = zz[i,jj]
                T1 = z * I - T 
                T2 = T1.conj().T
                if z.real < gridPointSelect:    # <- ALTER GRID POINT SELECTION
                    sigold = 0
                    qold = zeros((n,1))
                    beta = 0
                    H = zeros((100,100))                
                    q = randn(n,1) + 1j*randn(n,1)                
                    while norm(q) < 1e-8:
                        q = randn(n,1) + 1j*randn(n,1)                
                    q = q/norm(q)
                    for k in arange(99):
                        v = ldiv(T1,(ldiv(T2,q))) - dot(beta,qold)
                        #stop
                        alpha = dot(q.conj().T, v).real
                        v = v - alpha*q
                        beta = norm(v)
                        qold = q
                        q = v/beta
                        H[k+1,k] = beta
                        H[k,k+1] = beta
                        H[k,k] = alpha
                        if (alpha > 1e100):
                            sig = alpha 
                        else:
                            sig = max(abs(eig(H[:k+1,:k+1])[0]))
                        if (abs(sigold/sig-1) < .001) or (sig < 3 and k > 2):
                            break
                        sigold = sig
                        niter += 1
                        #print 'niter = ', niter
                
                  #%text(x(jj),y(i),num2str(k))         % <- SHOW ITERATION COUNTS
                    sigmin[i,jj] = 1./sqrt(sig);
                #end
                #  end
        if verbose:
            print 'finished line ', str(i), ' out of ', str(len(y))
    
    return x,y,sigmin
예제 #33
0
def _eig(W):
    e, v = schur(W, "complex")
    return np.diag(e), v
예제 #34
0
# plt.pcolor(H.__abs__())
# plt.show()

P = H * 0

eigenvalues, eigenvectors = la.eigh(H)
for j in range(N_total):
    P += np.outer(eigenvectors[:, j], np.conj(
        eigenvectors[:, j])) if eigenvalues[j] <= 0 else 0

UVUV = np.linalg.multi_dot(
    [P, X_exp, P, Y_exp, P, X_exp_star, P, Y_exp_star, P])
M = UVUV + np.eye(N_total, N_total) - P

T, Z = la.schur(M)
U, s_vals, V = la.svd(M)

eigs = np.diag(T)
nubers = np.arange(N_total)

# plt.subplot(1,2,1)
# thetas = np.linspace(0, 2 * np.pi, 1000)
# x_circ = np.cos(thetas)
# y_circ = np.sin(thetas)
# plt.scatter((eigs).real, (eigs).imag)
# plt.plot(x_circ+1, y_circ)
# plt.plot(x_circ * cutoff, y_circ * cutoff)
#
# plt.subplot(1,2,2)
# plt.plot(eigs.__abs__())
예제 #35
0
파일: librom.py 프로젝트: wong-hl/sharpy
def balreal_direct_py(A, B, C, DLTI=True, Schur=False, full_outputs=False):
    r"""
    Find balanced realisation of continuous (``DLTI = False``) and discrete (``DLTI = True``)
    time of LTI systems using  scipy libraries.

    The function proceeds to achieve balanced realisation of the state-space system by first solving
    the Lyapunov equations. They are solved using Barlets-Stewart algorithm for
    Sylvester equation, which is based on A matrix Schur decomposition.

    .. math::
        \mathbf{A\,W_c + W_c\,A^T + B\,B^T} &= 0  \\
        \mathbf{A^T\,W_o + W_o\,A + C^T\,C} &= 0

    to obtain the reachability and observability gramians, which are positive definite matrices.

    Then, the gramians are decomposed into their Cholesky factors such that:

    .. math::
        \mathbf{W_c} &= \mathbf{Q_c\,Q_c^T} \\
        \mathbf{W_o} &= \mathbf{Q_o\,Q_o^T}

    A singular value decomposition (SVD) of the product of the Cholesky factors is performed

    .. math:: (\mathbf{Q_o^T\,Q_c}) = \mathbf{U\,\Sigma\,V^*}

    The singular values are then used to build the transformation matrix :math:`\mathbf{T}`

    .. math::
        \mathbf{T} &= \mathbf{Q_c\,V\,\Sigma}^{-1/2} \\
        \mathbf{T}^{-1} &= \mathbf{\Sigma}^{-1/2}\,\mathbf{U^T\,Q_o^T}

    The balanced system is therefore of the form:

    .. math::
        \mathbf{A_b} &= \mathbf{T\,A\,T^{-1}} \\
        \mathbf{B_b} &= \mathbf{T\,B} \\
        \mathbf{C_b} &= \mathbf{C\,T^{-1}} \\
        \mathbf{D_b} &= \mathbf{D}

    Warnings:
        This function may be less computationally efficient than the ``balreal``
        Matlab implementation and does not offer the option to bound the realisation
        in frequency and time.

    Notes:
        Lyapunov equations are solved using Barlets-Stewart algorithm for
        Sylvester equation, which is based on A matrix Schur decomposition.

    Args:
        A (np.ndarray): Plant Matrix
        B (np.ndarray): Input Matrix
        C (np.ndarray): Output Matrix
        DLTI (bool): Discrete time state-space flag
        Schur (bool): Use Schur decomposition to solve the Lyapunov equations

    Returns:
        tuple of np.ndarrays: Tuple of the form ``(S, T, Tinv)`` containing:
            - Singular values in diagonal matrix (``S``)
            - Transformation matrix (``T``).
            - Inverse transformation matrix(``Tinv``).

    References:
        Anthoulas, A.C.. Approximation of Large Scale Dynamical Systems. Chapter 7. Advances in Design and Control.
        SIAM. 2005.
    """

    ### select solver for Lyapunov equation
    # Notation reminder:
    # scipy: A X A.T - X = -Q
    # contr: A W A.T - W = - B B.T
    # obser: A.T W A - W = - C.T C
    if DLTI:
        sollyap = scalg.solve_discrete_lyapunov
    else:
        sollyap = scalg.solve_lyapunov

    # solve Lyapunov
    if Schur:
        # decompose A
        Atri, U = scalg.schur(A)
        # solve Lyapunov
        BBtri = np.dot(U.T, np.dot(B, np.dot(B.T, U)))
        CCtri = np.dot(U.T, np.dot(C.T, np.dot(C, U)))
        Wctri = sollyap(Atri, BBtri)
        Wotri = sollyap(Atri.T, CCtri)
        # reconstruct Wo,Wc
        Wc = np.dot(U, np.dot(Wctri, U.T))
        Wo = np.dot(U, np.dot(Wotri, U.T))
    else:
        Wc = sollyap(A, np.dot(B, B.T))
        Wo = sollyap(A.T, np.dot(C.T, C))

    # Choleski factorisation: W=Q Q.T
    # Qc = scalg.cholesky(Wc).T
    # Qo = scalg.cholesky(Wo).T

    # build M matrix and SVD
    # M = np.dot(Qo.T, Qc)
    # U, s, Vh = scalg.svd(M)
    # S = np.diag(s)
    # Sinv = np.diag(1. / s)
    # V = Vh.T

    # Build transformation matrices
    # T = np.dot(Qc, np.dot(V, np.sqrt(Sinv)))
    # Tinv = np.dot(np.sqrt(Sinv), np.dot(U.T, Qo.T))

    # return S, T, Tinv

    ### Find transformation matrices
    # avoid Cholevski - unstable
    hsv_sq, Tinv = np.linalg.eig(np.dot(Wc, Wo))
    T = np.linalg.inv(Tinv)

    # sort
    iisort = np.argsort(hsv_sq)[::-1]
    hsv = np.sqrt(hsv_sq[iisort])
    T = T[:, iisort]
    Tinv = Tinv[iisort, :]

    if full_outputs is False:
        return hsv, T, Tinv

    else:
        # get square-root factors
        UT, QoT = scalg.qr(np.dot(np.diag(np.sqrt(hsv)), Tinv), pivoting=False)
        Vh, QcT = scalg.qr(np.dot(T, np.diag(np.sqrt(hsv))).T, pivoting=False)

        return hsv, UT.T, Vh, QcT.T, QoT.T
예제 #36
0
파일: misc.py 프로젝트: MMaus/mutils
def pseudoSpect(A,
                npts=200,
                s=2.,
                gridPointSelect=100,
                verbose=True,
                lstSqSolve=True):
    """ 
    original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m
    % psa.m - Simple code for 2-norm pseudospectra of given matrix A.
    %         Typically about N/4 times faster than the obvious SVD method.
    %         Comes with no guarantees!   - L. N. Trefethen, March 1999.
    
    parameter: A: the matrix to analyze
               npts: number of points at the grid
               s: axis limits (-s ... +s)
               gridPointSelect: ???
               verbose: prints progress messages
               lstSqSolve: if true, use least squares in algorithm where
                  solve could be used (probably) instead. (replacement for
                  ldivide in MatLab)
    """

    from scipy.linalg import schur, triu
    from pylab import (meshgrid, norm, dot, zeros, eye, diag, find, linspace,
                       arange, isreal, inf, ones, lstsq, solve, sqrt, randn,
                       eig, all)

    ldiv = lambda M1, M2: lstsq(M1, M2)[
        0] if lstSqSolve else lambda M1, M2: solve(M1, M2)

    def planerot(x):
        '''
        return (G,y)
        with a matrix G such that y = G*x with y[1] = 0    
        '''
        G = zeros((2, 2))
        xn = x / norm(x)
        G[0, 0] = xn[0]
        G[1, 0] = -xn[1]
        G[0, 1] = xn[1]
        G[1, 1] = xn[0]
        return G, dot(G, x)

    xmin = -s
    xmax = s
    ymin = -s
    ymax = s
    x = linspace(xmin, xmax, npts, endpoint=False)
    y = linspace(ymin, ymax, npts, endpoint=False)
    xx, yy = meshgrid(x, y)
    zz = xx + 1j * yy

    #% Compute Schur form and plot eigenvalues:
    T, Z = schur(A, output='complex')

    T = triu(T)
    eigA = diag(T)

    # Reorder Schur decomposition and compress to interesting subspace:
    select = find(eigA.real > -250)  # % <- ALTER SUBSPACE SELECTION
    n = len(select)
    for i in arange(n):
        for k in arange(select[i] - 1, i, -1):  #:-1:i
            G = planerot([T[k, k + 1],
                          T[k, k] - T[k + 1, k + 1]])[0].T[::-1, ::-1]
            J = slice(k, k + 2)
            T[:, J] = dot(T[:, J], G)
            T[J, :] = dot(G.T, T[J, :])

    T = triu(T[:n, :n])
    I = eye(n)

    # Compute resolvent norms by inverse Lanczos iteration and plot contours:
    sigmin = inf * ones((len(y), len(x)))
    #A = eye(5)
    niter = 0
    for i in arange(len(y)):  # 1:length(y)
        if all(isreal(A)) and (ymax == -ymin) and (i > len(y) / 2):
            sigmin[i, :] = sigmin[len(y) - i, :]
        else:
            for jj in arange(len(x)):
                z = zz[i, jj]
                T1 = z * I - T
                T2 = T1.conj().T
                if z.real < gridPointSelect:  # <- ALTER GRID POINT SELECTION
                    sigold = 0
                    qold = zeros((n, 1))
                    beta = 0
                    H = zeros((100, 100))
                    q = randn(n, 1) + 1j * randn(n, 1)
                    while norm(q) < 1e-8:
                        q = randn(n, 1) + 1j * randn(n, 1)
                    q = q / norm(q)
                    for k in arange(99):
                        v = ldiv(T1, (ldiv(T2, q))) - dot(beta, qold)
                        #stop
                        alpha = dot(q.conj().T, v).real
                        v = v - alpha * q
                        beta = norm(v)
                        qold = q
                        q = v / beta
                        H[k + 1, k] = beta
                        H[k, k + 1] = beta
                        H[k, k] = alpha
                        if (alpha > 1e100):
                            sig = alpha
                        else:
                            sig = max(abs(eig(H[:k + 1, :k + 1])[0]))
                        if (abs(sigold / sig - 1) < .001) or (sig < 3
                                                              and k > 2):
                            break
                        sigold = sig
                        niter += 1
                        #print 'niter = ', niter

                #%text(x(jj),y(i),num2str(k))         % <- SHOW ITERATION COUNTS
                    sigmin[i, jj] = 1. / sqrt(sig)
                #end
                #  end
        if verbose:
            print 'finished line ', str(i), ' out of ', str(len(y))

    return x, y, sigmin
예제 #37
0
파일: main_SGL.py 프로젝트: kolia/subunits
R = simulate( V2, N_filters, nonlinearity=NL, N_cells=N_cells , sigma_spatial=[10.,3.],
              N_timebins = 1000000 )
 
#testR = simulate( V2, nonlinearity=NL, N_cells=N_cells , sigma_spatial=[20.,3.],
#                  N_timebins = 90000 )


total = float( np.sum([Nspikes for Nspikes in R['N_spikes']]) )

Nspikes = R['N_spikes']/total

dSTA  = np.concatenate(
        [STA[:,np.newaxis]-R['statistics']['features']['mean'][:,np.newaxis]
        for STA in R['statistics']['features']['STA']], axis=1)

D,Z = schur(R['statistics']['features']['cov']/2)
DD  = np.diag(D)
keep= DD>1e-10
P   =  (Z[:,keep] * np.sqrt(DD[keep])).T
y   =  np.dot ( (Z[:,keep] * 1/np.sqrt(DD[keep])).T , dSTA ) / 2

predictors    = [ block_diag(*[column*np.sqrt(Nspke) for Nspke in Nspikes]).T
                  for column in P.T]


start = time()

group_weights = [0.1 for _ in predictors]
weights       = [0.05*np.ones(pp.shape[1]) for pp in predictors]

r,coeffs  = sgl.initialize_group_lasso(predictors, (np.sqrt(Nspikes)*y).T.flatten())
예제 #38
0
def spd_inv_split(W, epsilon=1e-10, method='QR', canonical_signs=False):
    """
    Compute :math:`W^{-1} = L L^T` of the symmetric positive-definite matrix :math:`W`.

    by first reducing W to a low-rank approximation that is truly spd.

    Parameters
    ----------
    W : ndarray((m,m), dtype=float)
        Symmetric positive-definite (spd) matrix.
    epsilon : float
        Truncation parameter. Eigenvalues with norms smaller than this cutoff will
        be removed.
    method : str
        Method to perform the decomposition of :math:`W` before inverting. Options are:

        * 'QR': QR-based robust eigenvalue decomposition of W
        * 'schur': Schur decomposition of W

     canonical_signs : boolean, default = False
        Fix signs in L, s. t. the largest element of in every column of L is positive.

    Returns
    -------
    L : ndarray((n, r))
        Matrix :math:`L` from the decomposition :math:`W^{-1} = L L^T`.

    """
    # check input
    assert _np.allclose(W.T, W), 'C0 is not a symmetric matrix'

    if (_np.shape(W)[0] == 1):
        L = 1./_np.sqrt(W[0,0])
    else:
        if method.lower() == 'qr':
            from .eig_qr.eig_qr import eig_qr
            s, V = eig_qr(W)
        # compute the Eigenvalues of C0 using Schur factorization
        elif method.lower() == 'schur':
            from scipy.linalg import schur
            S, V = schur(W)
            s = _np.diag(S)
        else:
            raise ValueError('method not implemented: ' + method)

        s, V = sort_by_norm(s, V) # sort them

        # determine the cutoff. We know that C0 is an spd matrix,
        # so we select the truncation threshold such that everything that is negative vanishes
        evmin = _np.min(s)
        if evmin < 0:
            epsilon = max(epsilon, -evmin + 1e-16)

        # determine effective rank m and perform low-rank approximations.
        evnorms = _np.abs(s)
        n = _np.shape(evnorms)[0]
        m = n - _np.searchsorted(evnorms[::-1], epsilon)
        Vm = V[:, 0:m]
        sm = s[0:m]

        if canonical_signs:
            # enforce canonical eigenvector signs
            for j in range(m):
                jj = _np.argmax(_np.abs(Vm[:, j]))
                Vm[:, j] *= _np.sign(Vm[jj, j])

        L = _np.dot(Vm, _np.diag(1.0/_np.sqrt(sm)))

    # return split
    return L
예제 #39
0
파일: _solvers.py 프로젝트: ilayn/harold
def _solve_continuous_lyapunov(A, Y):
    '''
            Solves A.T X + X A + Y = 0

    '''
    mat33 = np.zeros((3, 3), dtype=float)
    mat44 = np.zeros((4, 4), dtype=float)
    i2 = np.eye(2, dtype=float)

    def mini_sylvester(Ar, Yt, Al=None):
        '''
        A helper function to solve the 1x1 or 2x2 Sylvester equations
        arising in the solution of the continuous-time Lyapunov equations

        Note that, this doesn't have any protection against LinAlgError
        hence the caller needs to `try` to see whether it is properly
        executed.
        '''

        # The symmetric problem
        if Al is None:
            if Ar.size == 1:
                return - Yt / (Ar * 2)
            else:
                a, b, c, d = Ar.reshape(1, 4).tolist()[0]

                mat33[0, :] = [2*a, 2*c, 0]
                mat33[1, :] = [b, a + d, c]
                mat33[2, :] = [0, 2*b, 2*d]
                a, b, c = solve(mat33, -Yt.reshape(-1, 1)[[0, 1, 3], :]
                                ).ravel().tolist()

                return np.array([[a, b], [b, c]], dtype=float)

        # Nonsymmetric
        elif Ar.size == 4:
            if Al.size == 4:
                a00, a01, a10, a11 = Al.reshape(1, 4).tolist()[0]
                b00, b01, b10, b11 = Ar.reshape(1, 4).tolist()[0]

                mat44[0, :] = [a00+b00, b10, a10, 0]
                mat44[1, :] = [b01, a00 + b11, 0, a10]
                mat44[2, :] = [a01, 0, a11 + b00, b10]
                mat44[3, :] = [0, a01, b01, a11 + b11]

                return solve(mat44, -Yt.reshape(-1, 1)).reshape(2, 2)
            # Ar is 2x2 , Al is scalar
            else:
                return solve(Ar.T + Al[0, 0] * i2, -Yt.T).T

        elif Al.size == 4:
            return solve(Al.T + Ar[0, 0] * i2, -Yt)
        else:
            return -Yt / (Ar + Al)

    # =============================
    # Prepare the data
    # =============================
    # if the problem is small then solve directly
    if A.shape[0] < 3:
        return mini_sylvester(A, Y)

    As, S = schur(A, output='real')
    Ys = S.T @ Y @ S
    n = As.shape[0]

    # If there are nontrivial entries on the subdiagonal, we have a 2x2 block.
    # Based on that we have the block sizes `bz` and starting positions `bs`.

    subdiag_entries = np.abs(As[range(1, n), range(0, n-1)]) > 0
    subdiag_indices = [ind for ind, x in enumerate(subdiag_entries) if x]
    bz = np.ones(n)
    for x in subdiag_indices:
        bz[x] = 2
        bz[x+1] = np.nan

    bz = bz[~np.isnan(bz)].astype(int)
    bs = [0] + np.cumsum(bz[:-1]).tolist() + [None]
    total_blk = bz.size
    Xs = np.empty_like(Y)

    # =============================
    #  Main Loop
    # =============================

    # Now we know how the matrices should be partitioned. We then start
    # from the uppper left corner and alternate between updating the
    # Y term and solving the next entry of X. We walk over X row-wise
    for row in range(total_blk):
        thisr = bs[row]
        nextr = bs[row+1]

        # This block is executed at the second and further spins of the
        # for loop. Humans should start reading from (**)
        if row is not 0:
            Ys[thisr:nextr, thisr:] +=  \
                      Xs[thisr:nextr, 0:thisr] @ As[0:thisr, thisr:]

        # (**) Solve for the diagonal via Akk , Ykk and place it in Xkk
        tempx = mini_sylvester(As[thisr:nextr, thisr:nextr],
                               Ys[thisr:nextr, thisr:nextr])

#        X_placer( tempx , row , row )
        Xs[thisr:nextr, thisr:nextr] = tempx
        # Update Y terms right of the diagonal
        Ys[thisr:nextr, nextr:] += tempx @ As[thisr:nextr, nextr:]

        # Walk over upper triangular terms
        for col in range(row + 1, total_blk):
            thisc = bs[col]
            nextc = bs[col+1]

            # The corresponding Y term has already been updated, solve for X
            tempx = mini_sylvester(As[thisc:nextc, thisc:nextc],
                                   Ys[thisr:nextr, thisc:nextc],
                                   As[thisr:nextr, thisr:nextr])

            # Place it in the data
            Xs[thisr:nextr, thisc:nextc] = tempx
            Xs[thisc:nextc, thisr:nextr] = tempx.T

            # Update Y towards left
            Ys[thisr:nextr, nextc:] += tempx @ As[thisc:nextc, nextc:]

            # Update Y downwards
            Ys[nextr:nextc, thisc:nextc] += \
                As[thisr:nextr, nextr:nextc].T @ tempx

    return S @ Xs @ S.T
예제 #40
0
파일: sylvester.py 프로젝트: pymor/pymor
def solve_sylv_schur(A, Ar, E=None, Er=None, B=None, Br=None, C=None, Cr=None):
    r"""Solve Sylvester equation by Schur decomposition.

    Solves Sylvester equation

    .. math::
        A V E_r^T + E V A_r^T + B B_r^T = 0

    or

    .. math::
        A^T W E_r + E^T W A_r + C^T C_r = 0

    or both using (generalized) Schur decomposition (Algorithms 3 and 4
    in [BKS11]_), if the necessary parameters are given.

    Parameters
    ----------
    A
        Real |Operator|.
    Ar
        Real |Operator|.
        It is converted into a |NumPy array| using
        :func:`~pymor.algorithms.to_matrix.to_matrix`.
    E
        Real |Operator| or `None` (then assumed to be the identity).
    Er
        Real |Operator| or `None` (then assumed to be the identity).
        It is converted into a |NumPy array| using
        :func:`~pymor.algorithms.to_matrix.to_matrix`.
    B
        Real |Operator| or `None`.
    Br
        Real |Operator| or `None`.
        It is assumed that `Br.range.from_numpy` is implemented.
    C
        Real |Operator| or `None`.
    Cr
        Real |Operator| or `None`.
        It is assumed that `Cr.source.from_numpy` is implemented.

    Returns
    -------
    V
        Returned if `B` and `Br` are given, |VectorArray| from
        `A.source`.
    W
        Returned if `C` and `Cr` are given, |VectorArray| from
        `A.source`.

    Raises
    ------
    ValueError
        If `V` and `W` cannot be returned.
    """
    # check types
    assert isinstance(A, OperatorInterface) and A.linear and A.source == A.range
    assert isinstance(Ar, OperatorInterface) and Ar.linear and Ar.source == Ar.range

    assert E is None or isinstance(E, OperatorInterface) and E.linear and E.source == E.range == A.source
    if E is None:
        E = IdentityOperator(A.source)
    assert Er is None or isinstance(Er, OperatorInterface) and Er.linear and Er.source == Er.range == Ar.source

    compute_V = B is not None and Br is not None
    compute_W = C is not None and Cr is not None

    if not compute_V and not compute_W:
        raise ValueError('Not enough parameters are given to solve a Sylvester equation.')

    if compute_V:
        assert isinstance(B, OperatorInterface) and B.linear and B.range == A.source
        assert isinstance(Br, OperatorInterface) and Br.linear and Br.range == Ar.source
        assert B.source == Br.source

    if compute_W:
        assert isinstance(C, OperatorInterface) and C.linear and C.source == A.source
        assert isinstance(Cr, OperatorInterface) and Cr.linear and Cr.source == Ar.source
        assert C.range == Cr.range

    # convert reduced operators
    Ar = to_matrix(Ar, format='dense')
    r = Ar.shape[0]
    if Er is not None:
        Er = to_matrix(Er, format='dense')

    # (Generalized) Schur decomposition
    if Er is None:
        TAr, Z = spla.schur(Ar, output='complex')
        Q = Z
    else:
        TAr, TEr, Q, Z = spla.qz(Ar, Er, output='complex')

    # solve for V, from the last column to the first
    if compute_V:
        V = A.source.empty(reserve=r)

        BrTQ = Br.apply_adjoint(Br.range.from_numpy(Q.T))
        BBrTQ = B.apply(BrTQ)
        for i in range(-1, -r - 1, -1):
            rhs = -BBrTQ[i].copy()
            if i < -1:
                if Er is not None:
                    rhs -= A.apply(V.lincomb(TEr[i, :i:-1].conjugate()))
                rhs -= E.apply(V.lincomb(TAr[i, :i:-1].conjugate()))
            TErii = 1 if Er is None else TEr[i, i]
            eAaE = TErii.conjugate() * A + TAr[i, i].conjugate() * E
            V.append(eAaE.apply_inverse(rhs))

        V = V.lincomb(Z.conjugate()[:, ::-1])
        V = V.real

    # solve for W, from the first column to the last
    if compute_W:
        W = A.source.empty(reserve=r)

        CrZ = Cr.apply(Cr.source.from_numpy(Z.T))
        CTCrZ = C.apply_adjoint(CrZ)
        for i in range(r):
            rhs = -CTCrZ[i].copy()
            if i > 0:
                if Er is not None:
                    rhs -= A.apply_adjoint(W.lincomb(TEr[:i, i]))
                rhs -= E.apply_adjoint(W.lincomb(TAr[:i, i]))
            TErii = 1 if Er is None else TEr[i, i]
            eAaE = TErii.conjugate() * A + TAr[i, i].conjugate() * E
            W.append(eAaE.apply_inverse_adjoint(rhs))

        W = W.lincomb(Q.conjugate())
        W = W.real

    if compute_V and compute_W:
        return V, W
    elif compute_V:
        return V
    else:
        return W
예제 #41
0
                       model['nonlinearity'](np.dot(bigU,x))} )
simulate = memory.cache(simulator)
R     = simulate( model , 1000000 , possible_subunits )
testR = simulate( model , 1000000 ,          subunits )

total = float( np.sum([Nspikes for Nspikes in R['N_spikes']]) )

Nspikes = R['N_spikes']/total

dSTA  = np.concatenate(
        [STA[:,np.newaxis]-R['statistics']['features']['mean'][:,np.newaxis]
        for STA in R['statistics']['features']['STA']], axis=1)

Cin = R['statistics']['features']['cov']/2

D,Z = schur(Cin)
DD  = np.diag(D)
keep= DD>1e-10
P   =  (Z[:,keep] * np.sqrt(DD[keep])).T
y   =  np.dot ( (Z[:,keep] * 1/np.sqrt(DD[keep])).T , dSTA ) / 2

irls = memory.cache(IRLS)
V, iW = irls( y, P, x=0, disp_every=1000, lam=0.015, maxiter=1000000 , 
              ftol=1e-5, nonzero=1e-1)
print 'V'
kb.print_sparse_rows( V, precision=1e-1 )

keepers = np.array( [sum(abs(v))>3.e-1 for v in V] )
U        = filters[keepers,:]
V1       = V[keepers,:].T
예제 #42
0
파일: _solvers.py 프로젝트: andresbh/harold
def _solve_discrete_lyapunov(A, Y):
    '''
                 Solves     A.T X A - X + Y = 0
    '''
    mat33 = np.zeros((3, 3), dtype=float)
    mat44 = np.zeros((4, 4), dtype=float)
    i2 = np.eye(2)

    def mini_sylvester(Al, Yt, Ar=None):
        '''
        A helper function to solve the 1x1 or 2x2 Sylvester equations
        arising in the solution of the continuous-time Lyapunov equations

        Note that, this doesn't have any protection against LinAlgError
        hence the caller needs to `try` to see whether it is properly
        executed.
        '''
        # The symmetric problem
        if Ar is None:
            if Al.size == 1:
                return -Yt / (Al**2 - 1)
            else:
                a, b, c, d = Al.reshape(1, 4).tolist()[0]

                mat33[0, :] = [a**2 - 1, 2 * a * c, c**2]
                mat33[1, :] = [a * b, a * d + b * c - 1, c * d]
                mat33[2, :] = [b**2, 2 * b * d, d**2 - 1]
                a, b, c = solve(
                    mat33, -Yt.reshape(-1, 1)[[0, 1, 3], :]).ravel().tolist()

                return np.array([[a, b], [b, c]], dtype=float)

        # Nonsymmetric
        elif Al.size == 4:
            if Ar.size == 4:
                a00, a01, a10, a11 = Al.ravel().tolist()
                b00, b01, b10, b11 = Ar.ravel().tolist()

                mat44[0, :] = [a00 * b00 - 1, a00 * b10, a10 * b00, a10 * b10]
                mat44[1, :] = [a00 * b01, a00 * b11 - 1, a10 * b01, a10 * b11]
                mat44[2, :] = [a01 * b00, a01 * b10, a11 * b00 - 1, a11 * b10]
                mat44[3, :] = [a01 * b01, a01 * b11, a11 * b01, a11 * b11 - 1]

                return solve(mat44, -Yt.reshape(-1, 1)).reshape(2, 2)
            else:
                return solve(Al.T * Ar[0, 0] - i2, -Yt)

        elif Ar.size == 4:
            return solve(Ar.T * Al[0, 0] - i2, -Yt.T).T
        else:
            return -Yt / (Ar * Al - 1)

    # =====================================

    if A.shape[0] < 3:
        return mini_sylvester(A, Y)

    As, S = schur(A, output='real')
    Ys = S.T @ Y @ S

    # If there are nontrivial entries on the subdiagonal, we have a 2x2 block.
    # Based on that we have the block sizes `bz` and starting positions `bs`.
    n = As.shape[0]
    subdiag_entries = np.abs(As[range(1, n), range(0, n - 1)]) > 0
    subdiag_indices = [ind for ind, x in enumerate(subdiag_entries) if x]
    bz = np.ones(n)
    for x in subdiag_indices:
        bz[x] = 2
        bz[x + 1] = np.nan

    bz = bz[~np.isnan(bz)].astype(int)
    bs = [0] + np.cumsum(bz[:-1]).tolist() + [None]
    total_blk = bz.size
    Xs = np.empty_like(Y)

    # =============================
    #  Main Loop
    # =============================

    # Now we know how the matrices should be partitioned. We then start
    # from the uppper left corner and alternate between updating the
    # Y term and solving the next entry of X. We walk over X row-wise

    for row in range(total_blk):
        thisr = bs[row]
        nextr = bs[row + 1]

        if row is not 0:
            Ys[thisr:nextr, thisr:nextr] +=  \
                As[thisr:nextr, thisr:nextr].T @ \
                Xs[thisr:nextr, :thisr] @ \
                As[:thisr, thisr:nextr]

        # (**) Solve for the diagonal via Akk , Ykk and place it in Xkk
        tempx = mini_sylvester(As[thisr:nextr, thisr:nextr], Ys[thisr:nextr,
                                                                thisr:nextr])

        Xs[thisr:nextr, thisr:nextr] = tempx
        XA_of_row = Xs[thisr:nextr, :nextr] @ As[:nextr, thisr:]

        # Update Y terms right of the diagonal
        Ys[thisr:nextr, thisr:] += As[thisr:nextr, thisr:nextr].T @ XA_of_row

        # Walk over upper triangular terms
        for col in range(row + 1, total_blk):
            thisc = bs[col]
            nextc = bs[col + 1]

            # The corresponding Y term has already been updated, solve for X
            tempx = mini_sylvester(As[thisr:nextr, thisr:nextr],
                                   Ys[thisr:nextr, thisc:nextc],
                                   As[thisc:nextc, thisc:nextc])

            # Place it in the data
            Xs[thisr:nextr, thisc:nextc] = tempx
            Xs[thisc:nextc, thisr:nextr] = tempx.T

            # Post column solution Y update
            # XA terms
            tempa = tempx @ As[thisc:nextc, thisc:]
            # Update Y towards left
            Ys[thisr:nextr, thisc:] += As[thisr:nextr, thisr:nextr].T @ tempa
            # Update Y downwards
            XA_of_row[:, thisc - thisr:] += tempa

            ugly_sl = slice(thisc - thisr,
                            nextc - thisr if nextc is not None else None)

            Ys[nextr:nextc, thisc:nextc] += \
                As[thisr:nextr, nextr:nextc].T @ XA_of_row[:, ugly_sl]

    return S @ Xs @ S.T
def williamson(V, tol=1e-11):
    r"""Williamson decomposition of positive-definite (real) symmetric matrix.

    See :ref:`williamson`.

    Note that it is assumed that the symplectic form is

    .. math:: \Omega = \begin{bmatrix}0&I\\-I&0\end{bmatrix}

    where :math:`I` is the identity matrix and :math:`0` is the zero matrix.

    See https://math.stackexchange.com/questions/1171842/finding-the-symplectic-matrix-in-williamsons-theorem/2682630#2682630

    Args:
        V (array[float]): positive definite symmetric (real) matrix
        tol (float): the tolerance used when checking if the matrix is symmetric: :math:`|V-V^T| \leq` tol

    Returns:
        tuple[array,array]: ``(Db, S)`` where ``Db`` is a diagonal matrix
            and ``S`` is a symplectic matrix such that :math:`V = S^T Db S`
    """
    (n, m) = V.shape

    if n != m:
        raise ValueError("The input matrix is not square")

    diffn = np.linalg.norm(V-np.transpose(V))

    if diffn >= tol:
        raise ValueError("The input matrix is not symmetric")

    if n % 2 != 0:
        raise ValueError(
            "The input matrix must have an even number of rows/columns")

    n = n//2
    omega = sympmat(n)
    rotmat = changebasis(n)
    vals = np.linalg.eigvalsh(V)

    for val in vals:
        if val <= 0:
            raise ValueError("Input matrix is not positive definite")

    Mm12 = sqrtm(np.linalg.inv(V)).real
    r1 = Mm12 @ omega @ Mm12
    s1, K = schur(r1)
    X = np.array([[0, 1], [1, 0]])
    I = np.identity(2)
    seq = []

    # In what follows I construct a permutation matrix p  so that the Schur matrix has
    # only positive elements above the diagonal
    # Also the Schur matrix uses the x_1,p_1, ..., x_n,p_n  ordering thus I use rotmat to
    # go to the ordering x_1, ..., x_n, p_1, ... , p_n

    for i in range(n):
        if s1[2*i, 2*i+1] > 0:
            seq.append(I)
        else:
            seq.append(X)

    p = block_diag(*seq)
    Kt = K @ p
    s1t = p @ s1 @ p
    dd = np.transpose(rotmat) @ s1t @rotmat
    Ktt = Kt @ rotmat
    Db = np.diag([1/dd[i, i+n] for i in range(n)] + [1/dd[i, i+n]
                                                     for i in range(n)])
    S = Mm12 @ Ktt @ sqrtm(Db)
    return Db, np.linalg.inv(S).T
예제 #44
0
# fn = ['J', 'bee', 'mee']
fn = ['J', 'beh', 'meh']
writer = csv.DictWriter(
    open('eh_varyingJ_size%s_PBC_sigma%s.csv' % csvParas, 'w'), fn)
writer.writeheader()

J = 0.1
for i in range(400):
    Jc = '{:.2f}'.format(J)
    if (ene[i][1] - ene[i][0]) < 1e-10:
        wfArraySub = wfArray[i][:fold]
        T = np.zeros((fold, fold), dtype=complex)
        for i in range(fold):
            for j in range(fold):
                T[i][j] = np.vdot(wfArraySub[i], Translation(wfArraySub[j]))
        U, X = la.schur(T)  # T = XUX^{\dagger}
        # print(Jc, np.angle(U[0][0], deg=True), np.angle(U[1][1], deg=True))
        wfArrayTrans = np.zeros((fold, dim), dtype=complex)
        for i in range(fold):
            for j in range(fold):
                wfArrayTrans[i] += X[:, i][j] * wfArraySub[j]
        if np.angle(U[0][0] > 0):
            wf = wfArrayTrans[0]
        else:
            wf = wfArrayTrans[1]

    else:
        wf = wfArray[i][0]
        phase = np.vdot(wf, Translation(wf))
        # print(Jc, np.angle(phase))
    be = SpatialEE(wf)
예제 #45
0
    def test_mb03rd(self):
        """ Test for Schur form reduction.

        RvP, 31 Jul 2019"""

        test1_A = np.array([[1., -1., 1., 2., 3., 1., 2., 3.],
                            [1., 1., 3., 4., 2., 3., 4., 2.],
                            [0., 0., 1., -1., 1., 5., 4., 1.],
                            [0., 0., 0., 1., -1., 3., 1., 2.],
                            [0., 0., 0., 1., 1., 2., 3., -1.],
                            [0., 0., 0., 0., 0., 1., 5., 1.],
                            [0., 0., 0., 0., 0., 0., 0.99999999, -0.99999999],
                            [0., 0., 0., 0., 0., 0., 0.99999999, 0.99999999]])
        test1_n = test1_A.shape[0]

        test1_Ar = np.array([
            [
                1.0000, -1.0000, -1.2247, -0.7071, -3.4186, 1.4577, 0.0000,
                0.0000
            ],
            [1.0000, 1.0000, 0.0000, 1.4142, -5.1390, 3.1637, 0.0000, 0.0000],
            [0.0000, 0.0000, 1.0000, -1.7321, -0.0016, 2.0701, 0.0000, 0.0000],
            [0.0000, 0.0000, 0.5774, 1.0000, 0.7516, 1.1379, 0.0000, 0.0000],
            [0.0000, 0.0000, 0.0000, 0.0000, 1.0000, -5.8606, 0.0000, 0.0000],
            [0.0000, 0.0000, 0.0000, 0.0000, 0.1706, 1.0000, 0.0000, 0.0000],
            [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000, -0.8850],
            [0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 1.0000],
        ])

        test1_Xr = np.array(
            [[1.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.9045, 0.1957],
             [0.0000, 1.0000, 0.0000, 0.0000, 0.0000, 0.0000, -0.3015, 0.9755],
             [
                 0.0000, 0.0000, 0.8165, 0.0000, -0.5768, -0.0156, -0.3015,
                 0.0148
             ],
             [
                 0.0000, 0.0000, -0.4082, 0.7071, -0.5768, -0.0156, 0.0000,
                 -0.0534
             ],
             [
                 0.0000, 0.0000, -0.4082, -0.7071, -0.5768, -0.0156, 0.0000,
                 0.0801
             ],
             [0.0000, 0.0000, 0.0000, 0.0000, -0.0276, 0.9805, 0.0000, 0.0267],
             [0.0000, 0.0000, 0.0000, 0.0000, 0.0332, -0.0066, 0.0000, 0.0000],
             [0.0000, 0.0000, 0.0000, 0.0000, 0.0011, 0.1948, 0.0000, 0.0000]])

        test1_W = np.array([
            1 + 1j, 1 - 1j, 1 + 1j, 1 - 1j, 0.99999 + 0.99999j,
            0.99999 - 0.99999j, 1., 1.
        ])

        test1_pmax = 1e3
        test1_tol = 0.01
        # create schur form with scipy
        A, X = schur(test1_A)
        Ah, Xh = np.copy(A), np.copy(X)
        # on this basis, get the transform
        Ar, Xr, blsize, W = mb03rd(test1_n, A, X, 'U', 'S', test1_pmax,
                                   test1_tol)
        # ensure X and A are unchanged
        assert_allclose(A, Ah)
        assert_allclose(X, Xh)
        # compare to test case results
        assert_allclose(Ar, test1_Ar, atol=0.0001)
        assert_allclose(Xr, test1_Xr, atol=0.0001)
        assert_allclose(W, test1_W, atol=0.0001)

        # Test that the non sorting options do not throw errors and that Xr is
        # returned as None for jobx='N'
        for sort in ['N', 'C', 'B']:
            Ar, Xr, blsize, W = mb03rd(test1_n, A, X, 'N', sort, test1_pmax,
                                       test1_tol)
            assert Xr is None
예제 #46
0
파일: _solvers.py 프로젝트: andresbh/harold
def _solve_continuous_lyapunov(A, Y):
    '''
            Solves A.T X + X A + Y = 0

    '''
    mat33 = np.zeros((3, 3), dtype=float)
    mat44 = np.zeros((4, 4), dtype=float)
    i2 = np.eye(2, dtype=float)

    def mini_sylvester(Ar, Yt, Al=None):
        '''
        A helper function to solve the 1x1 or 2x2 Sylvester equations
        arising in the solution of the continuous-time Lyapunov equations

        Note that, this doesn't have any protection against LinAlgError
        hence the caller needs to `try` to see whether it is properly
        executed.
        '''

        # The symmetric problem
        if Al is None:
            if Ar.size == 1:
                return -Yt / (Ar * 2)
            else:
                a, b, c, d = Ar.reshape(1, 4).tolist()[0]

                mat33[0, :] = [2 * a, 2 * c, 0]
                mat33[1, :] = [b, a + d, c]
                mat33[2, :] = [0, 2 * b, 2 * d]
                a, b, c = solve(
                    mat33, -Yt.reshape(-1, 1)[[0, 1, 3], :]).ravel().tolist()

                return np.array([[a, b], [b, c]], dtype=float)

        # Nonsymmetric
        elif Ar.size == 4:
            if Al.size == 4:
                a00, a01, a10, a11 = Al.reshape(1, 4).tolist()[0]
                b00, b01, b10, b11 = Ar.reshape(1, 4).tolist()[0]

                mat44[0, :] = [a00 + b00, b10, a10, 0]
                mat44[1, :] = [b01, a00 + b11, 0, a10]
                mat44[2, :] = [a01, 0, a11 + b00, b10]
                mat44[3, :] = [0, a01, b01, a11 + b11]

                return solve(mat44, -Yt.reshape(-1, 1)).reshape(2, 2)
            # Ar is 2x2 , Al is scalar
            else:
                return solve(Ar.T + Al[0, 0] * i2, -Yt.T).T

        elif Al.size == 4:
            return solve(Al.T + Ar[0, 0] * i2, -Yt)
        else:
            return -Yt / (Ar + Al)

    # =============================
    # Prepare the data
    # =============================
    # if the problem is small then solve directly
    if A.shape[0] < 3:
        return mini_sylvester(A, Y)

    As, S = schur(A, output='real')
    Ys = S.T @ Y @ S
    n = As.shape[0]

    # If there are nontrivial entries on the subdiagonal, we have a 2x2 block.
    # Based on that we have the block sizes `bz` and starting positions `bs`.

    subdiag_entries = np.abs(As[range(1, n), range(0, n - 1)]) > 0
    subdiag_indices = [ind for ind, x in enumerate(subdiag_entries) if x]
    bz = np.ones(n)
    for x in subdiag_indices:
        bz[x] = 2
        bz[x + 1] = np.nan

    bz = bz[~np.isnan(bz)].astype(int)
    bs = [0] + np.cumsum(bz[:-1]).tolist() + [None]
    total_blk = bz.size
    Xs = np.empty_like(Y)

    # =============================
    #  Main Loop
    # =============================

    # Now we know how the matrices should be partitioned. We then start
    # from the uppper left corner and alternate between updating the
    # Y term and solving the next entry of X. We walk over X row-wise
    for row in range(total_blk):
        thisr = bs[row]
        nextr = bs[row + 1]

        # This block is executed at the second and further spins of the
        # for loop. Humans should start reading from (**)
        if row is not 0:
            Ys[thisr:nextr, thisr:] +=  \
                      Xs[thisr:nextr, 0:thisr] @ As[0:thisr, thisr:]

        # (**) Solve for the diagonal via Akk , Ykk and place it in Xkk
        tempx = mini_sylvester(As[thisr:nextr, thisr:nextr], Ys[thisr:nextr,
                                                                thisr:nextr])

        #        X_placer( tempx , row , row )
        Xs[thisr:nextr, thisr:nextr] = tempx
        # Update Y terms right of the diagonal
        Ys[thisr:nextr, nextr:] += tempx @ As[thisr:nextr, nextr:]

        # Walk over upper triangular terms
        for col in range(row + 1, total_blk):
            thisc = bs[col]
            nextc = bs[col + 1]

            # The corresponding Y term has already been updated, solve for X
            tempx = mini_sylvester(As[thisc:nextc, thisc:nextc],
                                   Ys[thisr:nextr, thisc:nextc],
                                   As[thisr:nextr, thisr:nextr])

            # Place it in the data
            Xs[thisr:nextr, thisc:nextc] = tempx
            Xs[thisc:nextc, thisr:nextr] = tempx.T

            # Update Y towards left
            Ys[thisr:nextr, nextc:] += tempx @ As[thisc:nextc, nextc:]

            # Update Y downwards
            Ys[nextr:nextc, thisc:nextc] += \
                As[thisr:nextr, nextr:nextc].T @ tempx

    return S @ Xs @ S.T
예제 #47
0
파일: _solvers.py 프로젝트: ilayn/harold
def _solve_discrete_lyapunov(A, Y):
    '''
                 Solves     A.T X A - X + Y = 0
    '''
    mat33 = np.zeros((3, 3), dtype=float)
    mat44 = np.zeros((4, 4), dtype=float)
    i2 = np.eye(2)

    def mini_sylvester(Al, Yt, Ar=None):
        '''
        A helper function to solve the 1x1 or 2x2 Sylvester equations
        arising in the solution of the continuous-time Lyapunov equations

        Note that, this doesn't have any protection against LinAlgError
        hence the caller needs to `try` to see whether it is properly
        executed.
        '''
        # The symmetric problem
        if Ar is None:
            if Al.size == 1:
                return - Yt / (Al ** 2 - 1)
            else:
                a, b, c, d = Al.reshape(1, 4).tolist()[0]

                mat33[0, :] = [a**2 - 1, 2*a*c, c ** 2]
                mat33[1, :] = [a*b, a*d + b*c - 1, c*d]
                mat33[2, :] = [b ** 2, 2*b*d, d ** 2 - 1]
                a, b, c = solve(mat33, -Yt.reshape(-1, 1)[[0, 1, 3], :]
                                ).ravel().tolist()

                return np.array([[a, b], [b, c]], dtype=float)

        # Nonsymmetric
        elif Al.size == 4:
            if Ar.size == 4:
                a00, a01, a10, a11 = Al.ravel().tolist()
                b00, b01, b10, b11 = Ar.ravel().tolist()

                mat44[0, :] = [a00*b00 - 1, a00*b10, a10*b00, a10*b10]
                mat44[1, :] = [a00*b01, a00*b11 - 1, a10*b01, a10*b11]
                mat44[2, :] = [a01*b00, a01*b10, a11*b00 - 1, a11*b10]
                mat44[3, :] = [a01*b01, a01*b11, a11*b01, a11*b11 - 1]

                return solve(mat44, -Yt.reshape(-1, 1)).reshape(2, 2)
            else:
                return solve(Al.T * Ar[0, 0] - i2, -Yt)

        elif Ar.size == 4:
            return solve(Ar.T * Al[0, 0] - i2, -Yt.T).T
        else:
            return -Yt / (Ar * Al - 1)

    # =====================================

    if A.shape[0] < 3:
        return mini_sylvester(A, Y)

    As, S = schur(A, output='real')
    Ys = S.T @ Y @ S

    # If there are nontrivial entries on the subdiagonal, we have a 2x2 block.
    # Based on that we have the block sizes `bz` and starting positions `bs`.
    n = As.shape[0]
    subdiag_entries = np.abs(As[range(1, n), range(0, n-1)]) > 0
    subdiag_indices = [ind for ind, x in enumerate(subdiag_entries) if x]
    bz = np.ones(n)
    for x in subdiag_indices:
        bz[x] = 2
        bz[x+1] = np.nan

    bz = bz[~np.isnan(bz)].astype(int)
    bs = [0] + np.cumsum(bz[:-1]).tolist() + [None]
    total_blk = bz.size
    Xs = np.empty_like(Y)

    # =============================
    #  Main Loop
    # =============================

    # Now we know how the matrices should be partitioned. We then start
    # from the uppper left corner and alternate between updating the
    # Y term and solving the next entry of X. We walk over X row-wise

    for row in range(total_blk):
        thisr = bs[row]
        nextr = bs[row+1]

        if row is not 0:
            Ys[thisr:nextr, thisr:nextr] +=  \
                As[thisr:nextr, thisr:nextr].T @ \
                Xs[thisr:nextr, :thisr] @ \
                As[:thisr, thisr:nextr]

        # (**) Solve for the diagonal via Akk , Ykk and place it in Xkk
        tempx = mini_sylvester(As[thisr:nextr, thisr:nextr],
                               Ys[thisr:nextr, thisr:nextr])

        Xs[thisr:nextr, thisr:nextr] = tempx
        XA_of_row = Xs[thisr:nextr, :nextr] @ As[:nextr, thisr:]

        # Update Y terms right of the diagonal
        Ys[thisr:nextr, thisr:] += As[thisr:nextr, thisr:nextr].T @ XA_of_row

        # Walk over upper triangular terms
        for col in range(row + 1, total_blk):
            thisc = bs[col]
            nextc = bs[col+1]

            # The corresponding Y term has already been updated, solve for X
            tempx = mini_sylvester(As[thisr:nextr, thisr:nextr],
                                   Ys[thisr:nextr, thisc:nextc],
                                   As[thisc:nextc, thisc:nextc])

            # Place it in the data
            Xs[thisr:nextr, thisc:nextc] = tempx
            Xs[thisc:nextc, thisr:nextr] = tempx.T

            # Post column solution Y update
            # XA terms
            tempa = tempx @ As[thisc:nextc, thisc:]
            # Update Y towards left
            Ys[thisr:nextr, thisc:] += As[thisr:nextr, thisr:nextr].T @ tempa
            # Update Y downwards
            XA_of_row[:, thisc - thisr:] += tempa

            ugly_sl = slice(thisc - thisr,
                            nextc - thisr if nextc is not None else None)

            Ys[nextr:nextc, thisc:nextc] += \
                As[thisr:nextr, nextr:nextc].T @ XA_of_row[:, ugly_sl]

    return S @ Xs @ S.T
예제 #48
0
    def __init__(self, A, eps_min, eps_max,
                 n_circles=20,
                 n_points=20,
                 randomize=True,
                 **kwargs
                 ):
        from scipy.linalg import eig, schur
        M = A.copy()

        if eps_min <= 0:
            raise ValueError('eps_min > 0 is required')
        if eps_min >= eps_max:
            raise ValueError('eps_min < eps_max is required')

        midpoints = []
        # compute containment circles with eps_max
        radii = [eps_max]

        for i in range(A.shape[0]):
            evals, evecs = eig(M)

            # compute condition number of eigenvector basis
            evec_cond = numpy.linalg.cond(evecs, 2)

            # try all eigenvalues in top-left position and pick the
            # configuration with smallest radius
            candidates_midpoints = []
            candidates_radii = []
            candidates_Ms = []
            if len(evals) == 1:
                midpoints.append(evals[0])
                radii.append(radii[-1])
            else:
                for eval in evals:
                    dists = numpy.sort(numpy.abs(eval - evals))

                    # get Schur decomposition
                    def sort(lambd):
                        return numpy.abs(lambd - eval) <= dists[1]
                    T, Z, sdim = schur(M, output='complex', sort=sort)

                    # T = [eval c^T]
                    #     [0    M  ]
                    # solve Sylvester equation c^T = r^T M - eval*r^T
                    # <=> r = (M - lambd*I)^{-T} c
                    c = T[0, 1:]
                    M_tmp = T[1:, 1:]
                    candidates_midpoints.append(T[0, 0])

                    r = solve_triangular(M_tmp - T[0, 0]*numpy.eye(*M_tmp.shape),
                                         c,
                                         trans='T'
                                         )
                    sep_min = numpy.min(svdvals(M_tmp - T[0, 0]*numpy.eye(*M_tmp.shape)))
                    sep_max = numpy.min(numpy.abs(T[0, 0] - numpy.diag(M_tmp)))
                    r_norm = numpy.linalg.norm(r, 2)
                    p = numpy.sqrt(1. + r_norm**2)

                    # Grammont-Largillier bound
                    g_gram_larg = numpy.sqrt(1. + numpy.linalg.norm(c, 2)/radii[-1])

                    # Demmel 1: g = kappa
                    g_demmel1 = kappa = p + r_norm

                    # Demmel 2
                    g_demmel2 = numpy.Inf
                    if radii[-1] <= sep_min/(2*kappa):
                        g_demmel2 = p + r_norm**2 * radii[-1]/(0.5*sep_min - p*radii[-1])

                    # Michael Karow bound (personal communication)
                    g_mika = numpy.Inf
                    if radii[-1] <= sep_min/(2*kappa):
                        eps_sep = radii[-1]/sep_min
                        g_mika = (p - eps_sep)/(
                            0.5 + numpy.sqrt(0.25 - eps_sep*(p - eps_sep))
                            )

                    # use the minimum of the above g's
                    candidates_radii.append(
                        radii[-1]*numpy.min([evec_cond,
                                             g_gram_larg,
                                             g_demmel1,
                                             g_demmel2,
                                             g_mika
                                             ])
                        )
                    candidates_Ms.append(M_tmp)
                min_index = numpy.argmin(candidates_radii)
                midpoints.append(candidates_midpoints[min_index])
                radii.append(candidates_radii[min_index])
                M = candidates_Ms[min_index]
        # remove first radius
        radii = radii[1:]

        # construct points for evaluation of resolvent
        points = []
        arg = numpy.linspace(0, 2*numpy.pi, n_points, endpoint=False)
        for midpoint, radius_max in zip(midpoints, radii):
            radius_log = numpy.logspace(numpy.log10(eps_min),
                                        numpy.log10(radius_max),
                                        n_circles
                                        )

            #radius_lin = numpy.linspace(eps_min, radius_max, n_circles)
            for radius in radius_log:
                rand = 0.
                if randomize:
                    rand = numpy.random.rand()

                # check that radius is larger than round-off in order to
                # avoid duplicate points
                if numpy.abs(radius)/numpy.abs(midpoint) > 1e-15:
                    points.append(midpoint + radius*numpy.exp(1j*(rand+arg)))
        points = numpy.concatenate(points)
        super(NonnormalAuto, self).__init__(A, points, **kwargs)
예제 #49
0
파일: eg.py 프로젝트: zimoun/mtf
if len(ii) == l:
    WARNING = True
    print('Warning')
k = len(ii)

V0, S0, W0 = V[:, ii], np.diag(s[ii]), W[0:l, ii]
V0h, iS0 = V0.transpose().conjugate(), la.inv(S0)
C = V0h.dot(B.dot(W0.dot(iS0)))
CC = V0h.dot(B.dot(W0))

w, M = la.eig(C)
M = V0.dot(M)
ww = la.eig(CC, b=S0, right=False)

reorder = lambda eigen: not ((eigen.real - mu)/a)**2 + (eigen.imag/b)**2 < 1.0
T, Z, sdim = la.schur(C, output='complex', sort=reorder)
www = np.diag(T)

print(' ')
teig = time() - teig
print('eig time:', teig)
print(' ')


plt.figure(1)
plt.plot(Psi.real, Psi.imag, 'b.-')
#plt.plot(C[0:5].real, C[0:5].imag, 'r.')
plt.plot(w.real, w.imag, 'ro', label='naive')
plt.plot(ww.real, ww.imag, 'g.', label='via generalized')
plt.plot(www.real, www.imag, 'c.', label='via Schur')
예제 #50
0
 def test_sort(self):
     a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
     s,u,sdim = schur(a,sort='lhp')
     assert_array_almost_equal([[0.1134,0.5436,0.8316,0.], 
                                [-0.1134,-0.8245,0.5544,0.], 
                                [-0.8213,0.1308,0.0265,-0.5547], 
                                [-0.5475,0.0872,0.0177,0.8321]],
                               u,3)
     assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174], 
                                [0.,-0.5000,9.4472,-0.7184], 
                                [0.,0.,1.4142,-0.1456], 
                                [0.,0.,0.,0.5]],
                               s,3)
     assert_equal(2,sdim)
                               
     s,u,sdim = schur(a,sort='rhp')
     assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
                                [-0.4862,0.4930,-0.1434,-0.7071],
                                [0.6042,0.3944,-0.6924,0.],
                                [0.4028,0.5986,0.6924,0.]],
                               u,3)
     assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
                                [0.,0.5,6.5809,-3.1870],
                                [0.,0.,-1.4142,0.9270],
                                [0.,0.,0.,-0.5]],
                               s,3)
     assert_equal(2,sdim)
                               
     s,u,sdim = schur(a,sort='iuc')
     assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],
                                [-0.8321,0.,-0.3814,-0.4028],
                                [0.,0.7071,-0.5134,0.4862],
                                [0.,0.7071,0.5134,-0.4862]],
                               u,3)
     assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],
                                [0.,0.5000,-3.3191,-14.4130],
                                [0.,0.,1.4142,2.1573],
                                [0.,0.,0.,-1.4142]],
                               s,3)
     assert_equal(2,sdim)
                               
     s,u,sdim = schur(a,sort='ouc')
     assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],
                                [-0.4862,0.5134,0.7071,0.],
                                [0.6042,0.5721,0.,-0.5547],
                                [0.4028,0.3814,0.,0.8321]],
                               u,3)
     assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],
                                [0.,-1.4142,3.3191,6.5809],
                                [0.,0.,-0.5000,0.],
                                [0.,0.,0.,0.5000]],
                               s,3)
     assert_equal(2,sdim)
                               
     rhp_function = lambda x: x >= 0.0
     s,u,sdim = schur(a,sort=rhp_function)
     assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
                                [-0.4862,0.4930,-0.1434,-0.7071],
                                [0.6042,0.3944,-0.6924,0.],
                                [0.4028,0.5986,0.6924,0.]],
                               u,3)
     assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
                                [0.,0.5,6.5809,-3.1870],
                                [0.,0.,-1.4142,0.9270],
                                [0.,0.,0.,-0.5]],
                               s,3)
     assert_equal(2,sdim)
예제 #51
0
def LocalReg(X, k, d):
    """
    :params:
    :return
    """

    N = X.shape[1]  # X: m x N, m dimension, N points
    tol = 1.0e-3  # the regularlization parameter

    # Neighborhood selection
    X2 = np.sum(pow(X, 2), axis=0)
    D = np.tile(X2, (N, 1)) + np.transpose(np.tile(
        X2, (N, 1))) - 2 * np.dot(np.transpose(X), X)
    J = D.argsort()

    # Initialize variables
    ratio = np.zeros(N)
    Theta = []
    Ev = []
    Ow = []

    # Local Information
    for i in range(N):
        xi = X[:, i]
        Ii = J[i, 0:k + 1]
        Ji = Ii[1::]
        Gi = X[:, Ji] - np.transpose(np.tile(xi, (k, 1)))
        [S, V] = schur(np.dot(np.transpose(Gi), Gi), output='real')
        ei = np.sort(np.diag(S))[::-1]
        JIi = np.diag(S).argsort()[::-1]
        ratio[i] = np.sum(ei[d:k]) / np.sum(ei[0:d])
        Theta.append(V[:, JIi])  # The local coordinates system
        Ev.append(ei)
        C = np.dot(np.transpose(Gi), Gi)
        C = C + np.eye(k) * tol * np.trace(C)  # Regularization
        Cw = np.linalg.solve(C, np.ones([k, 1]))
        Cw = Cw / np.sum(Cw)
        Ow.append(Cw)  # Considered as the optimal weight vector

    temp = np.sort(ratio)
    eta = temp[math.ceil(N / 2)]

    # Determine the number of weights to use
    s = np.zeros(N)
    for i in range(N):
        ell = k - d
        Lambda = Ev[i]
        while np.sum(Lambda[k - ell::]) / np.sum(
                Lambda[0:k - ell]) > eta and ell > 1:
            ell = ell - 1
        s[i] = ell

    Phi = np.zeros((N, N))
    for i in range(N):
        Ii = J[i, 0:k + 1]
        Vi = np.array(Theta[i])
        Ve = Vi[:, int(k - s[i])::]
        ve = np.sum(Ve, axis=0)
        alpha = linalg.norm(ve) / np.sqrt(s[i])
        u = ve - alpha
        normu = linalg.norm(u)

        if normu > 1.0e-5:
            u = u / normu
            Wi = ((1 - alpha) ** 2) * np.array(Ow[i]) * np.ones(int(s[i]))\
                + (2 - alpha) * (Ve - np.dot(np.dot(Ve, (2 * u))[:, None], np.transpose(u[:, None])))     # the multiple local weights
        else:
            Wi = ((1-alpha) ** 2) * np.array(Ow[i]) * np.ones(int(s[i]))\
                + (2 - alpha) * Ve

        Phi_add = np.dot(np.vstack((-np.ones(int(s[i])), Wi)),
                         np.transpose(np.vstack((-np.ones(int(s[i])), Wi))))
        for i in range(Phi_add.shape[0]):
            Phi[Ii, Ii[i]] = Phi[Ii, Ii[i]] + Phi_add[:, i]

    return Phi
예제 #52
0
 def test_schur_complex(self):
     assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],
                         dtypes=[np.float32, np.float64])
예제 #53
0
    def do_nsd(self, row, col):
        r"""Evaluates by numerical steepest descent the integral
        :math:`\langle \Phi_i | f | \Phi^\prime_j \rangle` for a polynomial
        function :math:`f(x)` with :math:`x \in \mathbb{R}^D`.

        :param row: The index :math:`i` of the component :math:`\Phi_i` of :math:`\Psi`.
        :param row: The index :math:`j` of the component :math:`\Phi^\prime_j` of :math:`\Psi^\prime`.
        :return: A complex valued matrix of shape :math:`|\mathfrak{K}_i| \times |\mathfrak{K}^\prime_j|`.
        """
        D = self._packet.get_dimension()
        N = self._packet.get_number_components()
        eps = self._packet.get_eps()
        Pibra = self._pacbra.get_parameters(component=row)
        Piket = self._packet.get_parameters(component=col)
        Pimix = self.mix_parameters(Pibra[:4], Piket[:4])

        # Combine oscillators
        A, b, c = self.build_bilinear(Pibra[:4], Piket[:4])

        # Schur decomposition of A = U^H T U
        T, U = schur(A, output="complex")
        U = conjugate(transpose(U))

        # Oscillator updates
        for i in range(1, D):
            if T[i - 1, i - 1] == 0:
                # TODO: Prove that this never happens or handle it correctly!
                print("Warning: 'update_oscillator' encountered a RESIDUE situation!")

            # Diagonal Elements
            for j in range(i, D):
                T[j, j] = T[j, j] - T[i - 1, j]**2 / (4.0 * T[i - 1, i - 1])

            # Others
            for rowi in range(i, D):
                for coli in range(rowi + 1, D):
                    T[rowi, coli] = T[rowi, coli] - T[i - 1, rowi] * T[i - 1, coli] / (2 * T[i - 1, i - 1])

        # Compute remaining parts
        X = inv(A + transpose(A))
        ctilde = c - 0.5 * dot(transpose(b), dot(X, b))

        # Prefactor originating from constant term c
        eps = self._packet.get_eps()
        w = 1.0 / eps**2
        prefactor = exp(1.0j * w * ctilde)

        # Take out diagonals of T
        Dk = diag(T).reshape((D, 1))
        # Tau (path parametrization variable)
        tk = self._nodes / sqrt(w)

        # Path Precomposition
        Tu = 0.5 * triu(T, 1) / Dk
        paths = (sqrt(1.0j / Dk) * tk).astype(complexfloating)
        for i in reversed(range(D)):
            paths[i, :] = paths[i, :] - dot(Tu[i, :], paths)

        # Path derivatives
        pathderivs = sqrt(1.0j / Dk)
        pdp = product(pathderivs, axis=0)

        # Backtransformation of paths
        pathst = dot(conjugate(transpose(U)), paths) - dot(X, b)

        # Another normalization prefactor
        # This is what differs the constant part of phi_0 from 1.
        # We loose it when dividing by phi_0 hence manually add it again.
        # TODO: Do we need mixing parameters here?
        #       Preliminary answer: no
        fr = (pi * eps**2)**(-0.25 * D) * 1.0 / sqrt(det(Pibra[2]))
        fc = (pi * eps**2)**(-0.25 * D) * 1.0 / sqrt(det(Piket[2]))
        normfactor = conjugate(fr) * fc

        # Compute global phase difference
        phase = exp(1.0j / eps**2 * (Piket[4] - conjugate(Pibra[4])))

        # Non-oscillatory parts
        # Wavepacket
        # TODO: This is a huge hack: division by phi_0 not stable?
        basisr = self._pacbra.evaluate_basis_at(conjugate(pathst), row, prefactor=False)
        basisr = basisr / basisr[0, :]
        basisc = self._packet.evaluate_basis_at(pathst, col, prefactor=False)
        basisc = basisc / basisc[0, :]
        # Basis division by phi0 may introduce NaNs
        #basisr = nan_to_num(basisr)
        #basisc = nan_to_num(basisc)

        # Operator should support the component notation for efficiency
        if self._eval_at_once is True:
            # TODO: Sure, this is inefficient, but we can not do better right now.
            opath = self._operator(pathst, Pimix[0])[row * N + col]
        else:
            opath = self._operator(pathst, Pimix[0], entry=(row, col))

        # Do the quadrature
        quadrand = (opath * pdp * self._weights).reshape((-1,))
        # Sum up matrices over all quadrature nodes
        M = einsum("k,ik,jk", quadrand, conjugate(basisr), basisc)

        return phase * normfactor * prefactor * M / sqrt(w)**D
예제 #54
0
def plot_ps(A, m=150, epsilon_vals=None, plot_eig_vals=True, labels=False):
    """
    Plots the pseudospectrum of the matrix A on an
    mxm grid of points
    
    Parameters
    ----------
    A : square, 2D ndarray
        The matrix whose pseudospectrum is to be plotted
    m : int
        The dimension of the square grid used for plotting
        Defaults to 150
    epsilon_vals : list of floats
        If k is in epsilon_vals, then the epsilon-pseudospectrum
        is plotted for epsilon=10**-k
        If epsilon_vals=None, the defaults of plt.contour are used
        instead of any specified values.
    plot_eig_vals : bool
        If True, the eigenvalues of A will be plotted along
        with the pseudospectum of A
        Defaults to True
    labels : bool
        If True, the contours will be labelled with k,
        where epsilon = 10**-k
        Defaults to False
    
    """
    T = la.schur(A, 'complex')
    eigs_A = T[0].diagonal()
    N = A.shape[0]
    maxit = N-1
    x_vals,y_vals = ps_grid(eigs_A, m)
    sigmin = np.zeros((m,m))
    for k in xrange(m):
        for i in xrange(m):
            T1 = (x_vals[k]+y_vals[i]*1.j)*np.identity(N)-T[0]
            T2 = (T1.T).conj()
            sigold = 0
            qold = np.zeros((N,1))
            beta = 0
            H = np.zeros_like(A)
            q = np.random.randn(N,1)+1.j*np.random.randn(N,1)
            q = q/la.norm(q)
            for p in xrange(maxit):
                v = la.solve_triangular(T1,(la.solve_triangular(T2,q,lower=True)))-beta*qold
                alpha = np.dot((q.T).conj(),v)[0].real
                v = v - alpha*q
                beta = la.norm(v)
                qold = q
                q = v/beta
                H[p+1,p] = beta
                H[p, p+1] = beta
                H[p,p] = alpha
                eigs_H = la.eigvalsh(H[:p+1,:p+1])
                sig = np.amax(np.absolute(eigs_H))
                if np.absolute(sigold/sig-1) < 0.001:
                    break
                sigold = sig
            sigmin[i,k] = np.sqrt(sig)
    fig = plt.figure()
    if plot_eig_vals:
        eigs_real = eigs_A.real
        eigs_imag = eigs_A.imag
        plt.scatter(eigs_real,eigs_imag) 
    CS = plt.contour(x_vals,y_vals,np.log10(sigmin), levels=epsilon_vals)
    if labels:
        plt.clabel(CS)
    #plt.show()
    return fig
예제 #55
0
sol=root(fun,0.1)
print("ROOT:",sol.x,sol.fun)

#3----Interpolation插值
x=np.linspace(0,1,10)
y=np.sin(2*np.pi*x)
from scipy.interpolate import interp1d
li=interp1d(x,y,kind="cubic")
x_new=np.linspace(0,1,50)
y_new=li(x_new)
figure()
plot(x,y,"r")
plot(x_new,y_new,"k")
show()
print(y_new)

#4 linear
from scipy import linalg as lg
arr=np.array([[1,2],[3,4]])
print("Det:",lg.det(arr))
print("Inv:",lg.inv(arr))
b=np.array([6,14])
print("Sol:",lg.solve(arr,b))
print("Eig:",lg.eig(arr))
print("LU:",lg.lu(arr))
print("QR:",lg.qr(arr))
print("SVD:",lg.svd(arr))
print("Schur:",lg.schur(arr))

#others
예제 #56
0
def dare(F, G1, G2, H):
    """Solves the discrete-time algebraic Riccati equation

    0 = F ^ T * X * F
        - X - F ^ T * X * G1 * (G2 + G1 ^ T * X * G1) ^ -1 * G1 ^ T * X * F + H

    Under the assumption that X ^ -1 exists, this equation is equivalent to

    0 = F ^ T * (X ^ -1 + G1 * G2 ^ -1 * G1 ^ T) ^ -1 * F - X + H

    Parameters
    ==========
    Inputs are real matrices:

    F : n x n
    G1 : n x m
    G2 : m x m, symmetric, positive definite
    H : n x n, symmetric, positive semi-definite

    Assumptions
    ===========
    (F, G1) is a stabilizable pair
    (C, F) is a detectable pair (where C is full rank factorization of H, i.e.,
        C ^ T * C = H and rank(C) = rank(H).
    F is invertible

    Returns
    =======

    Unique nonnegative definite solution of discrete Algrebraic Ricatti
    equation.

    Notes
    =====
    This is an implementation of the Schur method for solving algebraic Riccati
    eqautions as described in dx.doi.org/10.1109/TAC.1979.1102178

    """
    # Verify that F is non-singular
    u, s, v = la.svd(F)
    assert(np.all(s > 0.0))
    # Verify that (F, G1) controllable
    C = ctrb(F, G1)
    u, s, v = la.svd(C)
    assert(np.all(s > 0.0))
    # Verify that (H**.5, F) is observable
    O = obsv(H**.5, F)
    u, s, v = la.svd(O)
    assert(np.all(s > 0.0))
    
    n = F.shape[0]
    m = G2.shape[0]

    G = np.dot(G1, np.dot(inv(G2), G1.T))
    Finv = inv(F)
    Finvt = Finv.T

    # Form symplectic matrix
    Z = empty((2*n, 2*n))
    Z[:n, :n] = F + np.dot(G, np.dot(Finvt, H))
    Z[:n, n:] = -np.dot(G, Finvt)
    Z[n:, :n] = -np.dot(Finvt, H)
    Z[n:, n:] = Finvt

    S, U, sdim = schur(Z, sort='iuc')

    # Verify that the n eigenvalues of the upper left block stable
    assert(sdim == n)

    U11 = U[:n, :n]
    U21 = U[n:, :n]
    return solve(U[:n, :n].T, U[n:, :n].T).T
예제 #57
0
def spd_eig(W,
            epsilon=1e-10,
            method='QR',
            canonical_signs=False,
            check_sym: bool = False):
    """ Rank-reduced eigenvalue decomposition of symmetric positive definite matrix.

    Removes all negligible eigenvalues

    Parameters
    ----------
    W : ndarray((n, n), dtype=float)
        Symmetric positive-definite (spd) matrix.
    epsilon : float
        Truncation parameter. Eigenvalues with norms smaller than this cutoff will
        be removed.
    method : str
        Method to perform the decomposition of :math:`W` before inverting. Options are:

        * 'QR': QR-based robust eigenvalue decomposition of W
        * 'schur': Schur decomposition of W
    canonical_signs : bool, default = False
        Fix signs in V, such that the largest element in every column of V is positive.
    check_sym : bool, default = False
        Check whether the input matrix is (almost) symmetric.

    Returns
    -------
    s : ndarray(k)
        k non-negligible eigenvalues, sorted by descending norms

    V : ndarray(n, k)
        k leading eigenvectors
    """
    # check input
    if check_sym and not _np.allclose(W.T, W):
        raise ValueError('W is not a symmetric matrix')

    if method == 'QR':
        from .eig_qr import eig_qr
        s, V = eig_qr(W)
    # compute the Eigenvalues of C0 using Schur factorization
    elif method == 'schur':
        from scipy.linalg import schur
        S, V = schur(W)
        s = _np.diag(S)
    else:
        raise ValueError(
            f'method {method} not implemented, available are {spd_eig.methods}'
        )

    s, V = sort_eigs(s, V)  # sort them

    # determine the cutoff. We know that C0 is an spd matrix,
    # so we select the truncation threshold such that everything that is negative vanishes
    evmin = _np.min(s)
    if evmin < 0:
        epsilon = max(epsilon, -evmin + 1e-16)

    # determine effective rank m and perform low-rank approximations.
    evnorms = _np.abs(s)
    n = _np.shape(evnorms)[0]
    m = n - _np.searchsorted(evnorms[::-1], epsilon)
    if m == 0:
        raise ZeroRankError(
            'All eigenvalues are smaller than %g, rank reduction would discard all dimensions.'
            % epsilon)
    Vm = V[:, 0:m]
    sm = s[0:m]

    if canonical_signs:
        # enforce canonical eigenvector signs
        for j in range(m):
            jj = _np.argmax(_np.abs(Vm[:, j]))
            Vm[:, j] *= _np.sign(Vm[jj, j])

    return sm, Vm
def sqrtm(A, disp=True):
    """
    Symmetric Matrix square root.

    modified version of the scipy.linalg sqrtm function for performance:
    (i) introduced a dot product [based on https://groups.google.com/forum/#!topic/scipy-user/iNzZzkHjlgA]
    (ii) avoid rsf2csf as the input is expected to be symmetric

    Parameters
    ----------
    A : (N, N) array_like
        Matrix whose square root to evaluate
    disp : bool, optional
        Print warning if error in the result is estimated large
        instead of returning estimated error. (Default: True)

    Returns
    -------
    sgrtm : (N, N) ndarray
        Value of the sign function at `A`

    errest : float
        (if disp == False)

        Frobenius norm of the estimated error, ||err||_F / ||A||_F

    Notes
    -----
    Uses algorithm by Nicholas J. Higham

    """
    A = np.asarray(A)
    if len(A.shape)!=2:
        raise ValueError("Non-matrix input to matrix function.")
    T, Z = la.schur(A)
    # if the matrix is real and symmetric can skip the complex part
    if not (np.allclose(A,A.T,rtol=0,atol=1e-8) and np.all(A.imag==0)):
        T, Z = la.rsf2csf(T,Z)
    n,n = T.shape

    R = np.zeros((n,n),T.dtype.char)
    for j in xrange(n):
        R[j,j] = np.sqrt(T[j,j])
        for i in xrange(j-1,-1,-1):
            #s = 0
            #for k in range(i+1,j):
            #    s = s + R[i,k]*R[k,j]
            s = R[i,(i+1):j].dot(R[(i+1):j,j])
            R[i,j] = (T[i,j] - s)/(R[i,i] + R[j,j])

    R, Z = la.all_mat(R,Z)
    X = (Z * R * Z.H)

    if disp:
        nzeig = np.any(np.diag(T)==0)
        if nzeig:
            print("Matrix is singular and may not have a square root.")
        return X.A
    else:
        arg2 = la.norm(X*X - A,'fro')**2 / la.norm(A,'fro')
        return X.A, arg2