コード例 #1
0
ファイル: isvd.py プロジェクト: burakbayramli/classnotes
def  addblock_svd_update( Uarg, Sarg, Varg, Aarg, force_orth = False):
  U = Varg
  V = Uarg
  S = np.eye(len(Sarg),len(Sarg))*Sarg
  A = Aarg.T
  
  current_rank = U.shape[1]
  m = np.dot(U.T,A)
  p = A - np.dot(U,m)
  P = lin.orth(p)
  Ra = np.dot(P.T,p)
  z = np.zeros(m.shape)
  K = np.vstack(( np.hstack((S,m)), np.hstack((z.T,Ra)) ))
  tUp,tSp,tVp = lin.svd(K);
  tUp = tUp[:,:current_rank]
  tSp = np.diag(tSp[:current_rank])
  tVp = tVp[:,:current_rank]
  Sp = tSp
  Up = np.dot(np.hstack((U,P)),tUp)
  Vp = np.dot(V,tVp[:current_rank,:])
  Vp = np.vstack((Vp, tVp[current_rank:tVp.shape[0], :]))
  
  if force_orth:
    UQ,UR = lin.qr(Up,mode='economic')
    VQ,VR = lin.qr(Vp,mode='economic')
    tUp,tSp,tVp = lin.svd( np.dot(np.dot(UR,Sp),VR.T));
    tSp = np.diag(tSp)
    Up = np.dot(UQ,tUp)
    Vp = np.dot(VQ,tVp)
    Sp = tSp;

  Up1 = Vp;
  Vp1 = Up;
    
  return Up1,Sp,Vp1
コード例 #2
0
ファイル: lowrank.py プロジェクト: smileyk/TensorCUR
def lowrank_to_qr(A, B):
	"""
	Convert a low-rank factorization C appr A*B into an equivalent QR 
	C approx QR 


	Parameters:
	-----------
	A:	(m,k) ndarray
		First of the two low-rank matrices
	B:	(k,n) ndarray
		Second of the two low-rank matrices
		
	Returns:
	--------
	Q:	(m,k) ndarray
		
		
		
	
	Notes:
	------
	
	"""

	qa, ra = qr(A, mode = 'economic')
	d = dot(ra, B.conj().T)
	qd, r = qr(d, mode = 'economic')

	q = dot(qa,qd)

	return q,r 
コード例 #3
0
ファイル: solutions.py プロジェクト: davidreber/Labs
def eig(A, normal = False, iter = 100):
	'''Finds eigenvalues of an nxn array A. If A is normal, QRalg.eig 
	may also return eigenvectors.
	
	Parameters
	----------
	A :  nxn array
	     May be real or complex
	normal : bool, optional
		     Set to True if A is normal and you want to calculate
		     the eigenvectors.
	iter : positive integer, optional
			
	Returns
	-------
	v : 1xn array of eigenvectors, may be real or complex
	Q : (only returned if normal = True) 
		nxn array whose columns are eigenvectors, s.t. A*Q = Q*diag(v)
		real if A is real, complex if A is complex
	
	For more on the QR algorithm, see Eigenvalue Solvers lab.
	'''
	def getSchurEig(A):
		#Find the eigenvalues of a Schur form matrix. These are the 
		#elements on the main diagonal, except where there's a 2x2 
		#block on the main diagonal. Then we have to find the 
		#eigenvalues of that block.
		D = sp.diag(A).astype(complex)
		#Find all the 2x2 blocks:
		LD = sp.diag(A,-1)
		index = sp.nonzero(abs(LD)>.01)[0] #is this a good tolerance?
		#Find the eigenvalues of those blocks:
		a = 1
		b = -D[index]-D[index+1]
		c = D[index]*D[index+1] - A[index,index+1]*LD[index]
		discr = sp.sqrt(b**2-4*a*c)
		#Fill in vector D with those eigenvalues
		D[index] = (-b + discr)/(2*a)
		D[index+1] = (-b - discr)/(2*a)
		return D

	n,n = A.shape
	I = sp.eye(n)
	A,Q = hessenberg(A,True)
	if normal == False:
		for i in sp.arange(iter):
			s = A[n-1,n-1].copy()
			Qi,R = la.qr(A-s*I)
			A = sp.dot(R,Qi) + s*I
		v = getSchurEig(A)
		return v
	
	elif normal == True:
		for i in sp.arange(iter):
			s = A[n-1,n-1].copy()
			Qi,R = la.qr(A-s*I)
			A = sp.dot(R,Qi) + s*I
			Q = sp.dot(Q,Qi)
		v = sp.diag(A)
		return v,Q
コード例 #4
0
def _overlap_projector(data_int, data_res, corr):
    """Calculate projector for removal of subspace intersection in tSSS"""
    # corr necessary to deal with noise when finding identical signal
    # directions in the subspace. See the end of the Results section in [2]_

    # Note that the procedure here is an updated version of [2]_ (and used in
    # Elekta's tSSS) that uses residuals instead of internal/external spaces
    # directly. This provides more degrees of freedom when analyzing for
    # intersections between internal and external spaces.

    # Normalize data, then compute orth to get temporal bases. Matrices
    # must have shape (n_samps x effective_rank) when passed into svd
    # computation
    Q_int = linalg.qr(_orth_overwrite((data_int / np.linalg.norm(data_int)).T),
                      overwrite_a=True, mode='economic', **check_disable)[0].T
    Q_res = linalg.qr(_orth_overwrite((data_res / np.linalg.norm(data_res)).T),
                      overwrite_a=True, mode='economic', **check_disable)[0]
    assert data_int.shape[1] > 0
    C_mat = np.dot(Q_int, Q_res)
    del Q_int

    # Compute angles between subspace and which bases to keep
    S_intersect, Vh_intersect = linalg.svd(C_mat, overwrite_a=True,
                                           full_matrices=False,
                                           **check_disable)[1:]
    del C_mat
    intersect_mask = (S_intersect >= corr)
    del S_intersect

    # Compute projection operator as (I-LL_T) Eq. 12 in [2]_
    # V_principal should be shape (n_time_pts x n_retained_inds)
    Vh_intersect = Vh_intersect[intersect_mask].T
    V_principal = np.dot(Q_res, Vh_intersect)
    return V_principal
コード例 #5
0
def random_non_singular(p, sing_min=1., sing_max=2., random_state=0):
    """Generate a random nonsingular matrix.

    Parameters
    ----------
    p : int
        The first dimension of the array.

    sing_min : float, optional (default to 1.)
        Minimal singular value.

    sing_max : float, optional (default to 2.)
        Maximal singular value.

    random_state : int or numpy.random.RandomState instance, optional
        random number generator, or seed.

    Returns
    -------
    output : numpy.ndarray, shape (p, p)
        A nonsingular matrix with the given minimal and maximal singular
        values.
    """
    random_state = check_random_state(random_state)
    diag = random_diagonal(p, v_min=sing_min, v_max=sing_max,
                           random_state=random_state)
    mat1 = random_state.randn(p, p)
    mat2 = random_state.randn(p, p)
    unitary1, _ = linalg.qr(mat1)
    unitary2, _ = linalg.qr(mat2)
    return unitary1.dot(diag).dot(unitary2.T)
コード例 #6
0
ファイル: libmps.py プロジェクト: japs/mpys
    def normalise_site_qr(self, site, left=True):
        '''
        Normalise the MP at a given site. Use the QR method,
        arXiv:1008.3477, sec. 4.1.3
        '''
        if (left and site == L) or (not left and site == 1):
            self.normalise_extremal_site(site, left=left)
            return
        
        M_asb = self.mps[site] # the matrix we have to work on, M_site
        r, s, c = M_asb.shape  # row, spin, column

        if left:
            M_aab = M_asb.reshape((r * s, c)) # merge first two indices
            

            Q, R = la.qr(M_aab, mode='economic') # economic = thin QR in
                                                     # 1008.3477
            self.mps[site] = Q.reshape((r, s, Q.size // (r * s))) # new A_site

            # Contract R with matrix at site + 1
            self.mps[site+1] = np.tensordot(R, self.mps[site+1], ((1),(0)))
        else:
            M_aab = M_asb.reshape((r, s * c)) # merge last two indices

            # QR = M^+ ==> M = R^+ Q^+
            Q, R = la.qr(M_aab.transpose())
            Q = Q.transpose()
            R = R.transpose()
            self.mps[site] = Q.reshape((Q.size // (s * c), s, c)) # new B_site

            # Contract R with matrix at site - 1
            self.mps[site-1] = np.tensordot(self.mps[site-1], R, ((2),(0)))
        return
コード例 #7
0
ファイル: ssnr_sink.py プロジェクト: MMKrell/pyspace
    def _compute_xDAWN_filters(self, X, D):
        # Compute xDAWN spatial filters

        # QR decompositions of X and D
        if map(int, __import__("scipy").__version__.split('.')) >= [0,9,0]:
            # NOTE: mode='economy'required since otherwise the memory
            # consumption is excessive
            Qx, Rx = qr(X, overwrite_a=True, mode='economic')
            Qd, Rd = qr(D, overwrite_a=True, mode='economic')
        else:
            # NOTE: econ=True required since otherwise the memory consumption
            # is excessive
            Qx, Rx = qr(X, overwrite_a=True, econ=True)
            Qd, Rd = qr(D, overwrite_a=True, econ=True)

        # Singular value decomposition of Qd.T Qx
        # NOTE: full_matrices=True required since otherwise we do not get
        #       num_channels filters.
        Phi, Lambda, Psi = numpy.linalg.svd(numpy.dot(Qd.T, Qx),
                                           full_matrices=True)
        Psi = Psi.T

        # Construct the spatial filters
        for i in range(Psi.shape[1]):
            # Construct spatial filter with index i as Rx^-1*Psi_i
            ui = numpy.dot(numpy.linalg.inv(Rx), Psi[:,i])
            if i == 0:
                filters = numpy.atleast_2d(ui).T
            else:
                filters = numpy.hstack((filters, numpy.atleast_2d(ui).T))

        return filters
コード例 #8
0
ファイル: manifold.py プロジェクト: rphlypo/parietalretreat
def random_non_singular(shape):
    """Generates random non singular matrix"""
    d = random_diagonal_spd(shape)
    ran1 = np.random.rand(shape, shape)
    ran2 = np.random.rand(shape, shape)
    u, _ = linalg.qr(ran1)
    v, _ = linalg.qr(ran2)
    return u.dot(d).dot(v.T)
コード例 #9
0
ファイル: 2.py プロジェクト: catchmrbharath/compmeth
def eigenm(B):
    c,d= householder(B)
    a = diag(c,0)+diag(d,1)+diag(d,-1)
    q,r = ln.qr(a)
    for i in range(100):
        q,r = ln.qr(a);
        a = dot(r,q);
    return sort(diag(a,0))
コード例 #10
0
ファイル: lowrank.py プロジェクト: smileyk/TensorCUR
def lowrank_to_svd(A, B):
	"""M = AB^T"""

	qa, ra = qr(A, mode = 'economic')
	qb, rb = qr(B, mode = 'economic')

	mat = dot(ra,rb.conj().T)
	u, s, vh = svd(mat)

	return dot(qa, u), s, dot(qb,vh.conj().T)
コード例 #11
0
ファイル: pca.py プロジェクト: mstrazar/orange3
def randomized_pca(A, n_components, n_oversamples=10, n_iter="auto",
                   flip_sign=True, random_state=0):
    """Compute the randomized PCA decomposition of a given matrix.

    This method differs from the scikit-learn implementation in that it supports
    and handles sparse matrices well.

    """
    if n_iter == "auto":
        # Checks if the number of iterations is explicitly specified
        # Adjust n_iter. 7 was found a good compromise for PCA. See sklearn #5299
        n_iter = 7 if n_components < .1 * min(A.shape) else 4

    n_samples, n_features = A.shape

    c = np.atleast_2d(A.mean(axis=0))

    if n_samples >= n_features:
        Q = random_state.normal(size=(n_features, n_components + n_oversamples))
        Q = safe_sparse_dot(A, Q) - safe_sparse_dot(c, Q)

        # Normalized power iterations
        for _ in range(n_iter):
            Q = safe_sparse_dot(A.T, Q) - safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
            Q, _ = lu(Q, permute_l=True)
            Q = safe_sparse_dot(A, Q) - safe_sparse_dot(c, Q)
            Q, _ = lu(Q, permute_l=True)

        Q, _ = qr(Q, mode="economic")

        QA = safe_sparse_dot(A.T, Q) - safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
        R, s, V = svd(QA.T, full_matrices=False)
        U = Q.dot(R)

    else:  # n_features > n_samples
        Q = random_state.normal(size=(n_samples, n_components + n_oversamples))
        Q = safe_sparse_dot(A.T, Q) - safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])

        # Normalized power iterations
        for _ in range(n_iter):
            Q = safe_sparse_dot(A, Q) - safe_sparse_dot(c, Q)
            Q, _ = lu(Q, permute_l=True)
            Q = safe_sparse_dot(A.T, Q) - safe_sparse_dot(c.T, Q.sum(axis=0)[None, :])
            Q, _ = lu(Q, permute_l=True)

        Q, _ = qr(Q, mode="economic")

        QA = safe_sparse_dot(A, Q) - safe_sparse_dot(c, Q)
        U, s, R = svd(QA, full_matrices=False)
        V = R.dot(Q.T)

    if flip_sign:
        U, V = svd_flip(U, V)

    return U[:, :n_components], s[:n_components], V[:n_components, :]
コード例 #12
0
ファイル: xdawn.py プロジェクト: MMKrell/pyspace
    def _stop_training(self, debug=False):
        # The following if statement is needed only to account for
        # different versions of scipy
        if map(int, __import__("scipy").__version__.split('.')) >= [0, 9, 0]:
            # NOTE: mode='economy'required since otherwise
            # the memory consumption is excessive;
            # QR decompositions of X
            Qx, Rx = qr(self.X, overwrite_a=True, mode='economic')
            # QR decompositions of D
            Qd, Rd = qr(self.D, overwrite_a=True, mode='economic')
        else:
            # NOTE: econ=True required since otherwise
            #       the memory consumption is excessive
            # QR decompositions of X
            Qx, Rx = qr(self.X, overwrite_a=True, econ=True)
            # QR decompositions of D
            Qd, Rd = qr(self.D, overwrite_a=True, econ=True)

        # Singular value decomposition of Qd.T Qx
        # NOTE: full_matrices=True required since otherwise we do not get 
        #       num_channels filters. 
        self.Phi, self.Lambda, self.Psi = \
                    numpy.linalg.svd(numpy.dot(Qd.T, Qx), full_matrices=True)
        self.Psi = self.Psi.T
       
        SNR = numpy.zeros(self.X.shape[1])
        # Construct the spatial filters
        for i in range(self.Psi.shape[1]):
            # Construct spatial filter with index i as Rx^-1*Psi_i
            ui = numpy.dot(numpy.linalg.inv(Rx), self.Psi[:,i])
            wi = numpy.dot(Rx.T, self.Psi[:,i]) 
            if i < self.Phi.shape[1]:
                ai = numpy.dot(numpy.dot(numpy.linalg.inv(Rd), self.Phi[:,i]),
                               self.Lambda[i])
            if i == 0:
                self.filters = numpy.atleast_2d(ui).T
                self.wi = numpy.atleast_2d(wi)
                self.ai = numpy.atleast_2d(ai)
            else:
                self.filters = numpy.hstack((self.filters,
                                             numpy.atleast_2d(ui).T))
                self.wi = numpy.vstack((self.wi, numpy.atleast_2d(wi)))
                if i < self.Phi.shape[1]:
                    self.ai = numpy.vstack((self.ai, numpy.atleast_2d(ai)))
            a = numpy.dot(self.D, ai.T)
            b = numpy.dot(self.X, ui)
#            b.view(numpy.ndarray)
#            bb = numpy.dot(b.T, b)
#            aa = numpy.dot(a.T, a)
            SNR[i] = numpy.dot(a.T, a)/numpy.dot(b.T, b)

        self.SNR = SNR
        self.D = None
        self.X = None
コード例 #13
0
def QRalg(A):
	#initial step
	Q, R = cacca.qr(A)
	eigenVec = Q
	Ap = R.dot(Q)

	for i in range(10):
		Q, R = cacca.qr(Ap)
		Ap = R.dot(Q)
		eigenVec = eigenvec.dot(Q)
	
	eigenVal = np.diag(Ap)
	return eigenVal, eigenVec
コード例 #14
0
ファイル: test_knot_search.py プロジェクト: mehdidc/py-earth
def test_outcome_dependent_data():
    np.random.seed(10)
    m = 1000
    max_terms = 100
    y = np.random.normal(size=m)
    w = np.random.normal(size=m) ** 2
    weight = SingleWeightDependentData.alloc(w, m, max_terms, 1e-16)
    data = SingleOutcomeDependentData.alloc(y, weight, m, max_terms)

    # Test updating
    B = np.empty(shape=(m, max_terms))
    for k in range(max_terms):
        b = np.random.normal(size=m)
        B[:, k] = b
        code = weight.update_from_array(b)
        if k >= 99:
            1 + 1
        data.update()
        assert_equal(code, 0)
        assert_almost_equal(
            np.dot(weight.Q_t[:k + 1, :], np.transpose(weight.Q_t[:k + 1, :])),
            np.eye(k + 1))
    assert_equal(weight.update_from_array(b), -1)
#     data.update(1e-16)

    # Test downdating
    q = np.array(weight.Q_t).copy()
    theta = np.array(data.theta[:max_terms]).copy()
    weight.downdate()
    data.downdate()
    weight.update_from_array(b)
    data.update()
    assert_almost_equal(q, np.array(weight.Q_t))
    assert_almost_equal(theta, np.array(data.theta[:max_terms]))
    assert_almost_equal(
        np.array(data.theta[:max_terms]), np.dot(weight.Q_t, w * y))
    wB = B * w[:, None]
    Q, _ = qr(wB, pivoting=False, mode='economic')
    assert_almost_equal(np.abs(np.dot(weight.Q_t, Q)), np.eye(max_terms))

    # Test that reweighting works
    assert_equal(data.k, max_terms)
    w2 = np.random.normal(size=m) ** 2
    weight.reweight(w2, B, max_terms)
    data.synchronize()
    assert_equal(data.k, max_terms)
    w2B = B * w2[:, None]
    Q2, _ = qr(w2B, pivoting=False, mode='economic')
    assert_almost_equal(np.abs(np.dot(weight.Q_t, Q2)), np.eye(max_terms))
    assert_almost_equal(
        np.array(data.theta[:max_terms]), np.dot(weight.Q_t, w2 * y))
コード例 #15
0
def xDAWN(X,tau,N_e,remain = 5):
    '''
    xDAWN spatial filter for enhancing event-related potentials.
    
    xDAWN tries to construct spatial filters such that the 
    signal-to-signal plus noise ratio is maximized. This spatial filter is 
    particularly suited for paradigms where classification is based on 
    event-related potentials.
    
    For more details on xDAWN, please refer to 
    http://www.icp.inpg.fr/~rivetber/Publications/references/Rivet2009a.pdf

    this code is inspired by 'xdawn.py' in pySPACE:
    https://github.com/pyspace/pyspace/blob/master/pySPACE/missions/nodes/spatial_filtering/xdawn.py
    ##################
    use the same notations as in the paper linked above (../River2009a.pdf)
    N_t:the number of temporal samples. (over 100,000 per session)
    N_s:the number of sensors. (56 EEG sensors)
    @param:
        X: input EEG signal (N_t,N_s)
        tau: index list where stimulus onset
        N_e: number of temporal samples of the ERP (<1.3sec)
    return:

    '''
    N_t,N_s= X.shape
    D = build_toe_mat(N_t,N_e,tau)#construct Toeplitz matrix
    #print X.shape
    Qx, Rx = qr(X, overwrite_a = True, mode='economic')
    Qd, Rd = qr(D, overwrite_a = True, mode='economic')  
    Phi,Lambda,Psi = svd(np.dot(Qd.T,Qx),full_matrices = True)
    Psi = Psi.T
    SNR = []
    U = None
    A = None
    for i in range(remain):
        ui = np.dot(np.linalg.inv(Rx),Psi[:,i])
        ai = np.dot(np.dot(np.linalg.inv(Rd),Phi[:,i]),Lambda[i])
        if U == None:
            U = np.atleast_2d(ui).T
        else:
            U = np.hstack((U,np.atleast_2d(ui).T))
        if A == None:
            A = np.atleast_2d(ai)
        else:
            A = np.vstack((A,np.atleast_2d(ai)))
        tmp_a = np.dot(D, ai.T)
        tmp_b = np.dot(X, ui)
        #print np.dot(tmp_a.T,tmp_a)/np.dot(tmp_b.T,tmp_b)
    return U,A
コード例 #16
0
ファイル: core.py プロジェクト: pombredanne/relax
    def _set_romb_qr(self):
        '''
        Member variables used
            order
            method
            romberg_terms

        '''
        nexpon = self.romberg_terms
        add1 = self.method[0] == 'c'
        rombexpon = (1 + add1) * np.arange(nexpon) + self.order
        
        srinv = 1.0 / self.step_ratio
        
        # do nothing if no romberg terms
        rmat = np.ones((nexpon + 2, nexpon + 1))
        if nexpon > 0:
            rmat[1, 1:] = srinv ** rombexpon
            for n in range(2, nexpon + 2):
                rmat[n, 1:] = srinv ** (n * rombexpon)

        rmat = np.matrix(rmat)
        # qr factorization used for the extrapolation as well
        # as the uncertainty estimates
        self._qromb, self._rromb = linalg.qr(rmat)
        self._rmat = rmat
コード例 #17
0
ファイル: linalg.py プロジェクト: ehthiede/Diffusion_Maps
def groupInverse(M):
    """
    Computes the group inverse of stochastic matrix using the algorithm
    given by Golub and Meyer in:
    G. H. Golub and C. D. Meyer, Jr, SIAM J. Alg. Disc. Meth. 7, 273-
    281 (1986)

    Parameters
    ----------
        M : ndarray
            A square matrix with index 1.
    
    Returns
    -------
        grpInvM : ndarray
            The group inverse of M.
    """
    L=np.shape(M)[1]
    q,r=qr(M)
    piDist=q[:,L-1]
    piDist=(1/np.sum(piDist))*piDist
    specProjector=np.identity(L)-np.outer(np.ones(L),piDist)
    u=r[0:(L-1),0:(L-1)]#remember 0:(L-1) actually means 0 to L-2!
    uInv= inv(u)#REPLACE W. lapack, invert triangular matrix ROUTINE
    uInv = np.real(uInv)
    grpInvM=np.zeros((L,L))
    grpInvM[0:(L-1),0:(L-1)]=uInv
    grpInvM=np.dot(specProjector,np.dot(grpInvM,np.dot(q.transpose(),specProjector)))
    return grpInvM
コード例 #18
0
def random_spd(p, eig_min, cond, random_state=0):
    """Generate a random symmetric positive definite matrix.

    Parameters
    ----------
    p : int
        The first dimension of the array.

    eig_min : float
        Minimal eigenvalue.

    cond : float
        Condition number, defined as the ratio of the maximum eigenvalue to the
        minimum one.

    random_state : int or numpy.random.RandomState instance, optional
        random number generator, or seed.

    Returns
    -------
    ouput : numpy.ndarray, shape (p, p)
        A symmetric positive definite matrix with the given minimal eigenvalue
        and condition number.
    """
    random_state = check_random_state(random_state)
    mat = random_state.randn(p, p)
    unitary, _ = linalg.qr(mat)
    diag = random_diagonal(p, v_min=eig_min, v_max=cond * eig_min,
                           random_state=random_state)
    return unitary.dot(diag).dot(unitary.T)
コード例 #19
0
ファイル: ar_model.py プロジェクト: mtambos/Neural-Simulation
def _ar_model_qr(data, p=1):
    """QR factorization for a (multivariate) zero-mean AR model

    :Parameters:
        data : ndarray
            data with observations on the rows and variables on the columns
        p : int or list
            the model order, how many samples to regress over
    """

    # inits
    n, m = data.shape            # observations, channels
    ne = n - p                   # number of block equations of size m
    np = m * p                   # number of parameter vectors of size m
    K = N.zeros((ne, np + m))  # the lag shifted data matrix

    # compute predictors
    for i in xrange(p):
        K[:, m * i:m * (i + 1)] = data[p - i - 1:n - i - 1, :]
    K[:, np:np + m] = data[p:n, :]

    # contition the matrix and factorize
    scale = N.sqrt(((np + m) ** 2 + np + m + 1) * EPS)
    R = NL.qr(
        N.concatenate((
            K,
            scale * N.diag([NL.norm(K[:, i]) for i in xrange(K.shape[1])])
        )),
        mode='r'
    )

    # return
    del K
    return R
コード例 #20
0
ファイル: linalg.py プロジェクト: ChadFulton/statsmodels
def transf_constraints(constraints):
    """use QR to get transformation matrix to impose constraint

    Parameters
    ----------
    constraints : ndarray, 2-D
        restriction matrix with one constraints in rows

    Returns
    -------
    transf : ndarray
        transformation matrix to reparameterize so that constraint is
        imposed

    Notes
    -----
    This is currently and internal helper function for GAM.
    API not stable and will most likely change.

    The code for this function was taken from patsy spline handling, and
    corresponds to the reparameterization used by Wood in R's mgcv package.

    See Also
    --------
    statsmodels.base._constraints.TransformRestriction : class to impose
        constraints by reparameterization used by `_fit_constrained`.

    """

    from scipy import linalg

    m = constraints.shape[0]
    q, _ = linalg.qr(np.transpose(constraints))
    transf = q[:, m:]
    return transf
コード例 #21
0
 def test_random_complex(self):
     n = 20
     for k in range(2):
         a = random([n,n])+1j*random([n,n])
         q,r = qr(a)
         assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
         assert_array_almost_equal(dot(q,r),a)
コード例 #22
0
ファイル: fixes.py プロジェクト: DonKrieger/mne-python
def _qr_economic_old(A, **kwargs):
    """
    Compat function for the QR-decomposition in economic mode
    Scipy 0.9 changed the keyword econ=True to mode='economic'
    """
    with warnings.catch_warnings(record=True):
        return linalg.qr(A, econ=True, **kwargs)
コード例 #23
0
def random_entry(n, min_eig, max_eig, case):

    # Generate random matrix
    rand = np.random.uniform(-1, 1, (n, n))

    # QR decomposition
    Q, _, _ = qr(rand, pivoting='True')

    # Generate random eigenvalues
    eigvalues = np.random.uniform(min_eig, max_eig, n)
    eigvalues = np.sort(eigvalues)[::-1]

    # Generate matrix
    Qaux = np.multiply(eigvalues, Q)
    A = np.dot(Qaux, Q.T)

    # Generate gradient vector accordingly
    # to the case is being tested.
    if case == 'hard':
        g = np.zeros(n)
        g[:-1] = np.random.uniform(-1, 1, n-1)
        g = np.dot(Q, g)
    elif case == 'jac_equal_zero':
        g = np.zeros(n)
    else:
        g = np.random.uniform(-1, 1, n)

    return A, g
コード例 #24
0
ファイル: CameraParameters.py プロジェクト: PierreMandas/SIGB
    def __RQ(self, matrix):
        """Estimate the factor first 3*3 part."""
        Q, R = qr(np.flipud(matrix).T)
        R = np.flipud(R.T)
        Q = Q.T

        return R[:, ::-1], Q[::-1, :]
コード例 #25
0
ファイル: lr.py プロジェクト: bayerj/utils
def robust(X, t, mean_0, prec_0, 
        a, b, var, eps=10**-4, **params):
    """
    """
    prec_chol = la.cholesky(prec_0)
    Xt = np.append(X, prec_chol, axis=0)
    tt = np.append(t, np.dot(prec_chol, mean_0), axis=0)
    n, d = Xt.shape
    w = var*np.ones((n,1))
    w[:-d] = a/b
    # a only serves as shifted constant
    a += 0.5
    result = {}
    ll_old = -np.inf
    lls = []
    while True:
        # estimate gaussian for mean
        q, r = la.qr(np.sqrt(w/var)*Xt, mode='economic')
        del q
        mean = la.lstsq(np.sqrt(w/var)*Xt, np.sqrt(w/var)*tt)[0]
        # estimate posterior weights 
        # se -- squared error between prediction and target
        se = (t - np.dot(X, mean))**2
        # sqrt_eqk -- square root of equivalent kernel
        sqrt_eqk = la.solve_triangular(r, X.T, trans=1).T
        mhlb = np.sum(sqrt_eqk**2, axis=1)[:, np.newaxis]
        # a stays constant, according to formula in paper
        b_new = b + (se + mhlb)/(2*var)
        w_new = a/b_new
        # M-Step, estimate sigma
        var = np.mean(w_new * (se + mhlb))
        w[:-d] = w_new
        w[-d:] = var
        # maximize loglikelihood -> track its progress
        # leaving out constant terms
        ll = np.log(var)/2
        ll -= np.sum(w[:-d] * (se + mhlb))/(2*var)
        # entropies
        # gaussian of mean, no constant terms
        ll += (d-n) * np.sum(np.diag(r))
        # gamma dist, again no constants
        ll += 2 * np.sum(np.log(b_new))
        if (ll < ll_old):
            print "Loglikelihood decreased to %f!" % ll
            print "This can happen after the first iteration!"
            lls.append(ll)
            ll_old = ll
        elif ((ll - ll_old) < eps):
            result["mean"] = mean
            result["sqrt_prec"] = r
            result["weights"] = w[:-d]
            result["var"] = var
            result["logl"] = lls
            break
        else:
            print "LogL: ", ll
            lls.append(ll)
            ll_old = ll
    a -= 0.5
    return result
コード例 #26
0
def pick_rff_freqs(n_freqs, bandwidth, seed=None, n_feats=None,
                   orthogonal=True, stats=None, skip_feats=None):
    '''
    Sets up sampling with random Fourier features corresponding to a Gaussian
    kernel with the given bandwidth, with an embedding dimension of `2*n_freqs`.

    Either pass n_feats, or pass stats (and maybe skip_feats) to compute it.

    If orthogonal, uses Orthogonal Random Features:
      https://arxiv.org/abs/1610.09072
    '''
    if n_feats is None:
        n_feats = _num_feats(stats, skip_feats=skip_feats)
    rs = np.random.mtrand._rand if seed is None else np.random.RandomState(seed)

    if not orthogonal or n_feats == 1:  # ORF doesn't do anything for d=1
        return rs.normal(0, 1 / bandwidth, size=(n_feats, n_freqs))

    n_reps = int(np.ceil(n_freqs / n_feats))
    freqs = np.empty((n_feats, n_freqs))
    for i in range(n_reps):
        Q, _ = qr(rs.normal(0, 1, size=(n_feats, n_feats)), overwrite_a=True)
        if i < n_reps - 1:
            freqs[:, i*n_feats:(i+1)*n_feats] = Q.T
        else:
            freqs[:, i*n_feats:] = Q[:n_freqs - i*n_feats].T

    S = rs.chisquare(n_feats, size=n_freqs)
    np.sqrt(S, out=S)
    S /= bandwidth
    freqs *= S[np.newaxis, :]
    return freqs
コード例 #27
0
ファイル: server.py プロジェクト: rbessick5/PrairieLearn
def generate(data):

    sf = 2

    # Matrix shape
    M = 3

    myNumber = 2.148233

    # Generating the orthogonal matrix U
    # (numbers rounded with 2 decimal digits)
    X = np.random.rand(M, M)
    Q,R = sla.qr(X)
    U = np.around(Q, sf + 1)

    b = np.random.rand(M)
    bc = b.reshape((M, 1))
    br = b.reshape(1, M)

    data['params']['sf'] = sf
    data['params']['M'] = M
    data['params']['U'] = pl.to_json(U)
    data['params']['myNumber'] = pl.to_json(myNumber)
    data['params']['b'] = pl.to_json(br)
    data['params']['c'] = pl.to_json(bc)


    return data
コード例 #28
0
ファイル: math.py プロジェクト: Midnighter/pyorganism
def qr_nullspace(mat):
    """
    Compute the nullspace of a 2-D matrix mat. The dimensions of mat m x n are
    assumed to satisfy m > n. You can use the transpose of `mat` if that is not
    the case since the nullspace remains the same.

    In a QR decomposition, the Q matrix of dimensions m x m can be split into
    two parts Q = [Q1, Q2] where Q1 has dimensions m x n and Q2 the remaining
    columns, giving it dimensions m x (m - n). Each column of Q2 is a basis
    vector of the nullspace.
    See: http://stackoverflow.com/a/2219160/677122

    Parameters
    ----------
    mat : ndarray
        mat must be 2-D.

    Return value
    ------------
    q2 : ndarray
        If `mat` is an array with shape (m, n), then `q2` will be an array
        with shape (m, m - n). The columns of `q2` are a basis for the
        nullspace;  each column i of q2 in np.dot(mat, q2[:, i]) will
        produce approximately zero vectors.
    """
    assert mat.shape[0] >= mat.shape[1]
    (q, r) = sl.qr(mat)
    q2 = q[:, mat.shape[1] :]
    return q2
コード例 #29
0
ファイル: unscented.py プロジェクト: Answeror/pykalman
def qr(A):
    """Get square upper triangular matrix of QR decomposition of matrix A"""
    N, L = A.shape
    if not N >= L:
        raise ValueError("Number of columns must exceed number of rows")
    Q, R = linalg.qr(A)
    return R[:L, :L]
コード例 #30
0
ファイル: nullspace.py プロジェクト: bcriger/evoMPS
def nullspace_qr(A):
    """Compute an approximate basis for the nullspace of A.

    The algorithm used by this function is based on the QR
    decomposition of `A`.

    Parameters
    ----------
    A : ndarray
        A should be at most 2-D.  A 1-D array with length k will be treated
        as a 2-D with shape (1, k)

    Return value
    ------------
    ns : ndarray
        If `A` is an array with shape (m, k), then `ns` will be an array
        with shape (k, n), where n is the estimated dimension of the
        nullspace of `A`.  The columns of `ns` are a basis for the
        nullspace; each element in numpy.dot(A, ns) will be approximately
        zero.
    """

    A = np.atleast_2d(A)

    Q, R = qr(A.T)
    
    ns = Q[:, R.shape[1]:].conj()
    
    return ns
コード例 #31
0
ファイル: utils.py プロジェクト: brendrach/enterprise_fdm
def get_coefficients(pta,
                     params,
                     n=1,
                     phiinv_method="cliques",
                     common_sparse=False):
    ret = []

    TNrs = pta.get_TNr(params)
    TNTs = pta.get_TNT(params)
    phiinvs = pta.get_phiinv(params, logdet=False, method=phiinv_method)

    # ...repeated code in the two if branches... refactor at will!
    if pta._commonsignals:
        if common_sparse:
            Sigma = sps.block_diag(TNTs, "csc") + sps.csc_matrix(phiinvs)
            TNr = np.concatenate(TNrs)

            ch = cholesky(Sigma)
            mn = ch(TNr)
            Li = sps.linalg.inv(ch.L()).toarray()
        else:
            Sigma = sl.block_diag(*TNTs) + phiinvs
            TNr = np.concatenate(TNrs)

            u, s, _ = sl.svd(Sigma)
            mn = np.dot(u, np.dot(u.T, TNr) / s)
            Li = u * np.sqrt(1 / s)

        for j in range(n):
            b = mn + np.dot(Li, np.random.randn(Li.shape[0]))

            pardict, ntot = {}, 0
            for i, model in enumerate(pta.pulsarmodels):
                for sig in model._signals:
                    if sig.signal_type in ["basis", "common basis"]:
                        nb = sig.get_basis(params=params).shape[1]

                        if nb + ntot > len(b):
                            raise IndexError("Missing some parameters! "
                                             "You need to disable GP "
                                             "basis column reuse.")

                        pardict[sig.name + "_coefficients"] = b[ntot:nb + ntot]
                        ntot += nb

            if len(ret) <= j:
                ret.append(params.copy())

            ret[j].update(pardict)

        return ret[0] if n == 1 else ret
    else:
        for i, model in enumerate(pta.pulsarmodels):
            phiinv, d, TNT = phiinvs[i], TNrs[i], TNTs[i]

            Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)

            try:
                u, s, _ = sl.svd(Sigma)
                mn = np.dot(u, np.dot(u.T, d) / s)
                Li = u * np.sqrt(1 / s)
            except np.linalg.LinAlgError:
                Q, R = sl.qr(Sigma)
                Sigi = sl.solve(R, Q.T)
                mn = np.dot(Sigi, d)
                u, s, _ = sl.svd(Sigi)
                Li = u * np.sqrt(1 / s)

            for j in range(n):
                b = mn + np.dot(Li, np.random.randn(Li.shape[0]))

                pardict, ntot = {}, 0
                for sig in model._signals:
                    if sig.signal_type == "basis":
                        nb = sig.get_basis(params=params).shape[1]

                        if nb + ntot > len(b):
                            raise IndexError("Missing some parameters! "
                                             "You need to disable GP "
                                             "basis column reuse.")

                        pardict[sig.name + "_coefficients"] = b[ntot:nb + ntot]
                        ntot += nb

                if len(ret) <= j:
                    ret.append(params.copy())

                ret[j].update(pardict)

        return ret[0] if n == 1 else ret
コード例 #32
0
ファイル: signal.py プロジェクト: tobywise/nilearn
def clean(signals,
          sessions=None,
          detrend=True,
          standardize='zscore',
          confounds=None,
          standardize_confounds=True,
          low_pass=None,
          high_pass=None,
          t_r=2.5,
          ensure_finite=False):
    """Improve SNR on masked fMRI signals.

    This function can do several things on the input signals, in
    the following order:

    - detrend
    - low- and high-pass filter
    - remove confounds
    - standardize

    Low-pass filtering improves specificity.

    High-pass filtering should be kept small, to keep some
    sensitivity.

    Filtering is only meaningful on evenly-sampled signals.

    According to Lindquist et al. (2018), removal of confounds will be done
    orthogonally to temporal filters (low- and/or high-pass filters), if both
    are specified.

    Parameters
    ----------
    signals: numpy.ndarray
        Timeseries. Must have shape (instant number, features number).
        This array is not modified.

    sessions : numpy array, optional
        Add a session level to the cleaning process. Each session will be
        cleaned independently. Must be a 1D array of n_samples elements.

    confounds: numpy.ndarray, str, DataFrame or list of
        Confounds timeseries. Shape must be
        (instant number, confound number), or just (instant number,)
        The number of time instants in signals and confounds must be
        identical (i.e. signals.shape[0] == confounds.shape[0]).
        If a string is provided, it is assumed to be the name of a csv file
        containing signals as columns, with an optional one-line header.
        If a list is provided, all confounds are removed from the input
        signal, as if all were in the same array.

    t_r: float
        Repetition time, in second (sampling period). Set to None if not.

    low_pass, high_pass: float
        Respectively high and low cutoff frequencies, in Hertz.

    detrend: bool
        If detrending should be applied on timeseries (before
        confound removal)

    standardize: {'zscore', 'psc', False}, default is 'zscore'
        Strategy to standardize the signal.
        'zscore': the signal is z-scored. Timeseries are shifted
        to zero mean and scaled to unit variance.
        'psc':  Timeseries are shifted to zero mean value and scaled
        to percent signal change (as compared to original mean signal).
        False : Do not standardize the data.

    standardize_confounds: boolean, optional, default is True
        If standardize_confounds is True, the confounds are z-scored:
        their mean is put to 0 and their variance to 1 in the time dimension.

    ensure_finite: bool
        If True, the non-finite values (NANs and infs) found in the data
        will be replaced by zeros.

    Returns
    -------
    cleaned_signals: numpy.ndarray
        Input signals, cleaned. Same shape as `signals`.

    Notes
    -----
    Confounds removal is based on a projection on the orthogonal
    of the signal space. See `Friston, K. J., A. P. Holmes,
    K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.
    "Statistical Parametric Maps in Functional Imaging: A General
    Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210.
    <http://dx.doi.org/10.1002/hbm.460020402>`_

    Orthogonalization between temporal filters and confound removal is based on
    suggestions in `Lindquist, M., Geuter, S., Wager, T., & Caffo, B. (2018).
    Modular preprocessing pipelines can reintroduce artifacts into fMRI data.
    bioRxiv, 407676. <http://dx.doi.org/10.1101/407676>`_

    See Also
    --------
        nilearn.image.clean_img
    """
    if isinstance(low_pass, bool):
        raise TypeError("low pass must be float or None but you provided "
                        "low_pass='******'".format(low_pass))
    if isinstance(high_pass, bool):
        raise TypeError("high pass must be float or None but you provided "
                        "high_pass='******'".format(high_pass))

    if not isinstance(
            confounds,
        (list, tuple, str, np.ndarray, pd.DataFrame, type(None))):
        raise TypeError("confounds keyword has an unhandled type: %s" %
                        confounds.__class__)

    if not isinstance(ensure_finite, bool):
        raise ValueError(
            "'ensure_finite' must be boolean type True or False "
            "but you provided ensure_finite={0}".format(ensure_finite))

    signals = signals.copy()
    if not isinstance(signals, np.ndarray):
        signals = as_ndarray(signals)

    if ensure_finite:
        mask = np.logical_not(np.isfinite(signals))
        if mask.any():
            signals[mask] = 0

    # Read confounds
    if confounds is not None:
        if not isinstance(confounds, (list, tuple)):
            confounds = (confounds, )

        all_confounds = []
        for confound in confounds:
            # cast DataFrame to array
            if isinstance(confound, pd.DataFrame):
                confound = confound.values

            if isinstance(confound, str):
                filename = confound
                confound = csv_to_array(filename)
                if np.isnan(confound.flat[0]):
                    # There may be a header
                    if NP_VERSION >= [1, 4, 0]:
                        confound = csv_to_array(filename, skip_header=1)
                    else:
                        confound = csv_to_array(filename, skiprows=1)
                if confound.shape[0] != signals.shape[0]:
                    raise ValueError("Confound signal has an incorrect length")

            elif isinstance(confound, np.ndarray):
                if confound.ndim == 1:
                    confound = np.atleast_2d(confound).T
                elif confound.ndim != 2:
                    raise ValueError("confound array has an incorrect number "
                                     "of dimensions: %d" % confound.ndim)

                if confound.shape[0] != signals.shape[0]:
                    raise ValueError("Confound signal has an incorrect length")
            else:
                raise TypeError("confound has an unhandled type: %s" %
                                confound.__class__)
            all_confounds.append(confound)

        # Restrict the signal to the orthogonal of the confounds
        confounds = np.hstack(all_confounds)
        del all_confounds

    if sessions is not None:
        if not len(sessions) == len(signals):
            raise ValueError(
                ('The length of the session vector (%i) '
                 'does not match the length of the signals (%i)') %
                (len(sessions), len(signals)))
        for s in np.unique(sessions):
            session_confounds = None
            if confounds is not None:
                session_confounds = confounds[sessions == s]
            signals[sessions == s, :] = \
                clean(signals[sessions == s],
                      detrend=detrend, standardize=standardize,
                      confounds=session_confounds, low_pass=low_pass,
                      high_pass=high_pass, t_r=t_r)

    signals = _ensure_float(signals)

    # Apply low- and high-pass filters
    if low_pass is not None or high_pass is not None:
        if t_r is None:
            raise ValueError("Repetition time (t_r) must be specified for "
                             "filtering. You specified None.")
    if detrend:
        mean_signals = signals.mean(axis=0)
        signals = _standardize(signals, standardize=False, detrend=detrend)

    if low_pass is not None or high_pass is not None:
        if t_r is None:
            raise ValueError("Repetition time (t_r) must be specified for "
                             "filtering")

        signals = butterworth(signals,
                              sampling_rate=1. / t_r,
                              low_pass=low_pass,
                              high_pass=high_pass)
    # Remove confounds
    if confounds is not None:
        confounds = _ensure_float(confounds)
        # Apply low- and high-pass filters to keep filters orthogonal
        # (according to Lindquist et al. (2018))
        if low_pass is not None or high_pass is not None:

            confounds = butterworth(confounds,
                                    sampling_rate=1. / t_r,
                                    low_pass=low_pass,
                                    high_pass=high_pass)

        confounds = _standardize(confounds,
                                 standardize=standardize_confounds,
                                 detrend=detrend)

        if not standardize_confounds:
            # Improve numerical stability by controlling the range of
            # confounds. We don't rely on _standardize as it removes any
            # constant contribution to confounds.
            confound_max = np.max(np.abs(confounds), axis=0)
            confound_max[confound_max == 0] = 1
            confounds /= confound_max

        # Pivoting in qr decomposition was added in scipy 0.10
        Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
        Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float).eps * 100.]
        signals -= Q.dot(Q.T).dot(signals)

    # Standardize
    if detrend and (standardize == 'psc'):
        # If the signal is detrended, we have to know the original mean
        # signal to calculate the psc.
        signals = _standardize(signals + mean_signals,
                               standardize=standardize,
                               detrend=False)
    else:
        signals = _standardize(signals, standardize=standardize, detrend=False)

    return signals
コード例 #33
0
ファイル: trf.py プロジェクト: Ombarus/python_env
def trf_no_bounds(fun, jac, x0, f0, J0, ftol, xtol, gtol, max_nfev, x_scale,
                  loss_function, tr_solver, tr_options, verbose):
    x = x0.copy()

    f = f0
    f_true = f.copy()
    nfev = 1

    J = J0
    njev = 1
    m, n = J.shape

    if loss_function is not None:
        rho = loss_function(f)
        cost = 0.5 * np.sum(rho[0])
        J, f = scale_for_robust_loss_function(J, f, rho)
    else:
        cost = 0.5 * np.dot(f, f)

    g = compute_grad(J, f)

    jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
    if jac_scale:
        scale, scale_inv = compute_jac_scale(J)
    else:
        scale, scale_inv = x_scale, 1 / x_scale

    Delta = norm(x0 * scale_inv)
    if Delta == 0:
        Delta = 1.0

    if tr_solver == 'lsmr':
        reg_term = 0
        damp = tr_options.pop('damp', 0.0)
        regularize = tr_options.pop('regularize', True)

    if max_nfev is None:
        max_nfev = x0.size * 100

    alpha = 0.0  # "Levenberg-Marquardt" parameter

    termination_status = None
    iteration = 0
    step_norm = None
    actual_reduction = None

    if verbose == 2:
        print_header_nonlinear()

    while True:
        g_norm = norm(g, ord=np.inf)
        if g_norm < gtol:
            termination_status = 1

        if verbose == 2:
            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
                                      step_norm, g_norm)

        if termination_status is not None or nfev == max_nfev:
            break

        d = scale
        g_h = d * g

        if tr_solver == 'exact':
            J_h = J * d
            U, s, V = svd(J_h, full_matrices=False)
            V = V.T
            uf = U.T.dot(f)
        elif tr_solver == 'lsmr':
            J_h = right_multiplied_operator(J, d)

            if regularize:
                a, b = build_quadratic_1d(J_h, g_h, -g_h)
                to_tr = Delta / norm(g_h)
                ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
                reg_term = -ag_value / Delta**2

            damp_full = (damp**2 + reg_term)**0.5
            gn_h = lsmr(J_h, f, damp=damp_full, **tr_options)[0]
            S = np.vstack((g_h, gn_h)).T
            S, _ = qr(S, mode='economic')
            JS = J_h.dot(S)
            B_S = np.dot(JS.T, JS)
            g_S = S.T.dot(g_h)

        actual_reduction = -1
        while actual_reduction <= 0 and nfev < max_nfev:
            if tr_solver == 'exact':
                step_h, alpha, n_iter = solve_lsq_trust_region(
                    n, m, uf, s, V, Delta, initial_alpha=alpha)
            elif tr_solver == 'lsmr':
                p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
                step_h = S.dot(p_S)

            predicted_reduction = -evaluate_quadratic(J_h, g_h, step_h)
            step = d * step_h
            x_new = x + step
            f_new = fun(x_new)
            nfev += 1

            step_h_norm = norm(step_h)

            if not np.all(np.isfinite(f_new)):
                Delta = 0.25 * step_h_norm
                continue

            # Usual trust-region step quality estimation.
            if loss_function is not None:
                cost_new = loss_function(f_new, cost_only=True)
            else:
                cost_new = 0.5 * np.dot(f_new, f_new)
            actual_reduction = cost - cost_new

            Delta_new, ratio = update_tr_radius(Delta, actual_reduction,
                                                predicted_reduction,
                                                step_h_norm,
                                                step_h_norm > 0.95 * Delta)
            alpha *= Delta / Delta_new
            Delta = Delta_new

            step_norm = norm(step)
            termination_status = check_termination(actual_reduction, cost,
                                                   step_norm, norm(x), ratio,
                                                   ftol, xtol)

            if termination_status is not None:
                break

        if actual_reduction > 0:
            x = x_new

            f = f_new
            f_true = f.copy()

            cost = cost_new

            J = jac(x, f)
            njev += 1

            if loss_function is not None:
                rho = loss_function(f)
                J, f = scale_for_robust_loss_function(J, f, rho)

            g = compute_grad(J, f)

            if jac_scale:
                scale, scale_inv = compute_jac_scale(J, scale_inv)
        else:
            step_norm = 0
            actual_reduction = 0

        iteration += 1

    if termination_status is None:
        termination_status = 0

    active_mask = np.zeros_like(x)
    return OptimizeResult(x=x,
                          cost=cost,
                          fun=f_true,
                          jac=J,
                          grad=g,
                          optimality=g_norm,
                          active_mask=active_mask,
                          nfev=nfev,
                          njev=njev,
                          status=termination_status)
コード例 #34
0
def componentAnalysis(X, Y, processes, epsilon):
    """
    Carries out a a section of component analyses on X and Y to produce a
    projection matrix projMat which maps X to its components.  Valid
    projections are CCA, PCA, CCA-classwise, Original axes and Random Rotation.
    """
    probs = dict2array(X=processes) * 1
    # Sample projections to use if some set to be probabilistically used
    bToSample = np.logical_and((probs > 0), (probs < 1))
    if np.any(bToSample):
        # TODO: Ignoring for now
        probs[~bToSample] = 0
        cumprobs = probs.cumsum(axis=0) / np.sum(probs)
        iSampled = np.sum(np.random.rand() > cumprobs) + 1
        iToSample = bToSample.ravel().nonzero()[0]
        for n in range(iToSample.size):
            processes[iToSample[n]] = False
        processes[iSampled] = True

    # Eliminate any columns that don't vary.  We will add these back into the
    # projection matrices at the end
    bXVaries = queryIfColumnsVary(X=X, tol=1e-12)
    bYvaries = queryIfColumnsVary(X=Y, tol=1e-12)
    nXorg = bXVaries.size
    nYorg = bYvaries.size

    if ~(np.any(bXVaries)) or ~(np.any(bYvaries)):
        # One of X or Y doesn't vary so component analysis fails.
        # Return projection corresponding to first columns of X and Y
        A = np.concatenate((np.array([[1]]), np.zeros((nXorg - 1, 1))))
        B = np.concatenate((np.array([[1]]), np.zeros((nYorg - 1, 1))))
        U = X[:, 0]
        V = Y[:, 0]
        r = 0

        return A, B, U, V, r

    X = X[:, bXVaries]
    Y = Y[:, bYvaries]

    # Checks and sizes
    x1, x2 = X.shape
    assert (Y.shape[0] == x1), 'Input sizes do not match!'
    assert (x1 !=
            1), 'Cannot carry out component analysis with only one point!'
    K = Y.shape[1]

    # Subtraction of the mean is common to the process of calculating the
    # projection matrices for both CCA and PCA but for computational
    # effificently we don't make this translation when actually applying the
    # projections to choose the splits as it is the same effect on all points.
    # In other words, we don't split in canonical component space exactly, but
    # in a constant translation of this space.
    muX = np.divide(np.sum(X, axis=0), X.shape[0])
    muY = np.divide(np.sum(Y, axis=0), Y.shape[0])

    X = np.subtract(X, muX)
    Y = np.subtract(Y, muY)

    # Initialize the project matrices
    projMat = np.full((X.shape[1], 0), np.nan)
    yprojMat = np.full((Y.shape[1], 0), np.nan)
    r = np.array([])

    if processes['Original']:
        projMat = np.concatenate((projMat, np.eye(x2)))

    if processes['Random']:
        projMat = np.concatenate((projMat, randomRotation(N=x2)))

    if processes['PCA']:
        # PCA projection
        pcaCoeff, _, _ = pcaLite(X=X)
        projMat = np.concatenate((projMat, pcaCoeff))

    if processes['CCA'] or processes['CCAclasswise']:
        # CCA based projections
        q1, r1, p1 = la.qr(X, pivoting=True, mode='economic')
        # Reduce to full rank within some tolerance
        if r1.size == 0:
            rankX = 0
        else:
            rankX = np.sum(
                np.absolute(np.diag(r1)) >= (epsilon * np.absolute(r1[0, 0])))

        if rankX == 0:
            A = np.concatenate((np.array([[1]]), np.zeros((nXorg - 1, 1))))
            B = np.concatenate((np.array([[1]]), np.zeros((nYorg - 1, 1))))
            U = X[:, 0]
            V = Y[:, 0]
            r = 0

            return A, B, U, V, r

        elif rankX < x2:
            q1 = q1[:, 0:rankX]
            r1 = r1[0:rankX, 0:rankX]

        if processes['CCA']:
            q2, r2, p2 = la.qr(Y, mode='economic', pivoting=True)

            # Reduce to full rank within some tolerance
            if r2.size == 0:
                rankY = 0
            else:
                rankY = np.sum(
                    np.absolute(np.diag(r2)) >= (epsilon *
                                                 np.absolute(r2[0, 0])))

            if rankY == 0:
                A = np.concatenate((np.array([[1]]), np.zeros((nXorg - 1, 1))))
                B = np.concatenate((np.array([[1]]), np.zeros((nYorg - 1, 1))))
                U = X[:, 0]
                V = Y[:, 0]
                r = 0

                return A, B, U, V, r

            elif rankY < K:
                q2 = q2[:, 0:rankY]

            # Solve CCA using the decompositions, taking care to use minimal
            # complexity orientation for SVD.  Note the two calculations are
            # equivalent except in computational complexity
            d = np.min((rankX, rankY))

            if rankX >= rankY:
                L, D, M = np.linalg.svd(q1.T @ q2)
                D = np.diag(D)
            else:
                M, D, L = np.linalg.svd(q2.T @ q1)
                D = np.diag(D)

            if isSquare(r1):
                locProj = np.linalg.solve(r1, L[:, 0:d] * np.sqrt(x1 - 1))
            else:
                locProj, _, _, _ = np.linalg.lstsq(r1,
                                                   L[:, 0:d] * np.sqrt(x1 - 1),
                                                   rcond=-1)

            # Put coefficients back to their full size and their correct order
            locProj = amerge(a=locProj,
                             b=np.concatenate(
                                 (locProj, np.zeros((x2 - rankX, d)))),
                             p=p1)
            projMat = np.concatenate((projMat, locProj),
                                     axis=1)  # Maybe fix with axis

            # Projection For Y
            r2 = r2[0:rankY, 0:rankY]
            if isSquare(r2):
                locyProj = np.linalg.solve(r2, M[:, 0:d] * np.sqrt(x1 - 1))
            else:
                locyProj, _, _, _ = np.linalg.lstsq(r2,
                                                    M[:, 0:d] *
                                                    np.sqrt(x1 - 1),
                                                    rcond=-1)
            locyProj = amerge(a=locyProj,
                              b=np.concatenate(
                                  (locyProj, np.zeros((K - rankY, d)))),
                              p=p2)
            yprojMat = np.concatenate((yprojMat, locyProj), axis=1)

            r = np.minimum(np.maximum(np.diag(D[:, 0:d]), 0), 1)

        if processes['CCAclasswise']:
            # Consider each output in an in / out fashion to generate a set of K projections.
            for k in range(K):
                L, _, _ = la.svd(q1.T @ Y[:, k])
                if isSquare(r1):
                    locProj = np.linalg.solve(r1, L[:, 0] * np.sqrt(x1 - 1))
                else:
                    locProj = np.linalg.lstsq(r1, L[:, 0] * np.sqrt(x1 - 1))
                locProj[p1, :] = np.concatenate(
                    (locProj, np.zeros((x2 - rankX, 1))))
                projMat = np.concatenate((projMat, locProj))

    # Normalize the projection matrices.  This ensures that the later tests for
    # close points are triggered appropriately and is useful for interpretability.
    projMat = np.divide(projMat, np.sqrt(np.sum(projMat**2, axis=0)))

    # Note that as in general only a projection matrix is given, we need to
    # add the mean back to be consistent with general use.  This equates to
    # addition of a constant term to each column in U
    U = np.dot(X, projMat)
    V = np.dot(Y, yprojMat)

    # Finally, add back in the empty rows in the projection matrix for the
    # things which didn't vary
    A = np.zeros((nXorg, projMat.shape[1]))
    if len(bXVaries.shape) > 1 and bXVaries.shape[0] == 1:
        A[bXVaries[0], :] = projMat
    elif len(bXVaries.shape) > 1 and bXVaries.shape[1] == 1:
        A[bXVaries[:, 0], :] = projMat
    else:
        A[bXVaries, :] = projMat

    B = np.zeros((nYorg, yprojMat.shape[1]))
    if len(bYvaries.shape) > 1 and bYvaries.shape[0] == 1:
        B[bYvaries[0], :] = yprojMat
    elif len(bYvaries.shape) > 1 and bYvaries.shape[1] == 1:
        B[bYvaries[:, 0], :] = yprojMat
    else:
        B[bYvaries, :] = yprojMat

    return A, B, U, V, r
コード例 #35
0
def dirac_recon_joint_alg_fast(G,
                               a,
                               num_dirac,
                               shape_b,
                               noise_level=0,
                               max_ini=100,
                               stop_cri='mse',
                               max_inner_iter=20,
                               max_num_same_x=1,
                               max_num_same_y=1,
                               max_num_same_z=1,
                               refine_coef=False):
    """
    ALGORITHM that reconstructs 2D Dirac deltas jointly
        min     |a - Gb|^2
        s.t.    c_1 * b = 0
                c_2 * b = 0
                c_3 * b = 0

    This is an optimzied version for speed consideration. For instance, we try to
    reuse intermediate results and pre-compute a few matrices, etc.

    :param G: the linear mapping that links the unknown uniformly sampled
            sinusoids to the given measurements
    :param a: the given measurements of the 3D Dirac deltas
    :param num_dirac: number of Dirac deltas
    :param shape_b: shape of the (3D) uniformly sampled sinusoids
    :param noise_level: noise level present in the given measurements
    :param max_ini: maximum number of random initializations
    :param stop_cri: stopping criterion, either 'mse' or 'max_iter'
    :param max_inner_iter: maximum number of inner iterations for each random initializations
    :param max_num_same_x: maximum number of Dirac deltas that have the same horizontal locations.
            This will impose the minimum dimension of the annihilating filter used.
    :param max_num_same_y: maximum number of Dirac deltas that have the same vertical locations
            This will impose the minimum dimension of the annihilating filter used.
    :param max_num_same_z: maximum number of Dirac deltas that have the same depth locations
            This will impose the minimum dimension of the annihilating filter used.
    :return:
    """
    check_finite = False  # use False for faster speed
    compute_mse = (stop_cri == 'mse')
    a = a.flatten('F')
    num_non_zero = num_dirac + 3

    shape_c_0 = int(np.ceil(num_non_zero**(1. / 3)))
    shape_c_1 = max(int(np.ceil((num_non_zero / shape_c_0)**0.5)), 2)
    shape_c_2 = max(int(np.ceil((num_non_zero / (shape_c_0 * shape_c_1)))), 2)

    # sanity check
    assert shape_c_0 * shape_c_1 * shape_c_2 >= num_non_zero

    shape_c = (shape_c_0, shape_c_1, shape_c_2)
    # total number of coefficients in c1 and c2
    num_coef = shape_c_0 * shape_c_1 * shape_c_2

    # determine the effective row rank of the joint annihilation right-dual matrix
    c1_test = np.random.randn(*shape_c) + 1j * np.random.randn(*shape_c)
    c2_test = np.random.randn(*shape_c) + 1j * np.random.randn(*shape_c)
    c3_test = np.random.randn(*shape_c) + 1j * np.random.randn(*shape_c)
    R_test = R_mtx_joint3d(c1_test, c2_test, c3_test, shape_b)
    try:
        s_test = linalg.svd(R_test, compute_uv=False)
        shape_Tb0_effective = min(
            R_test.shape) - np.where(np.abs(s_test) < 1e-12)[0].size
    except ValueError:
        # the effective number of equations as predicted by the derivation
        shape_Tb0_effective = \
            min(max((num_coef - 1) * 3,
                    np.prod(shape_b) - compute_effective_num_eq_3d(shape_c, shape_c, shape_c)),
                R_test.shape[0])
    if shape_Tb0_effective == R_test.shape[0]:
        need_QR = False
    else:
        need_QR = True

    # print('Tb0: {}'.format(shape_Tb0_effective))
    # print('R_sz0: {}'.format(R_test.shape[0]))
    # print('need QR: {}'.format(need_QR))

    # sizes of various matrices / vectors
    sz_coef = 3 * num_non_zero

    # pre-compute a few things
    # we use LU decomposition so that later we can use lu_solve, which is much faster
    GtG = np.dot(G.conj().T, G)
    lu_GtG = linalg.lu_factor(GtG, check_finite=check_finite)
    beta = linalg.lstsq(G, a)[0]
    Tbeta0 = T_mtx_joint3d(np.reshape(beta, shape_b, order='F'), shape_c,
                           shape_c, shape_c)
    if not need_QR:
        Tbeta_loop = Tbeta0

    # initializations
    min_error = np.inf
    rhs = np.concatenate((np.zeros(sz_coef,
                                   dtype=complex), np.ones(3, dtype=complex)))
    c1_opt = None
    c2_opt = None
    c3_opt = None

    # iterations over different random initializations of the annihilating filter coefficients
    ini = 0
    while ini < max_ini:
        ini += 1
        c1 = np.random.randn(*shape_c) + 1j * np.random.randn(*shape_c)
        c2 = np.random.randn(*shape_c) + 1j * np.random.randn(*shape_c)
        c3 = np.random.randn(*shape_c) + 1j * np.random.randn(*shape_c)
        # the initializations of the annihilating filter coefficients
        Gamma0 = linalg.block_diag(
            c1.flatten('F')[:, np.newaxis],
            c2.flatten('F')[:, np.newaxis],
            c3.flatten('F')[:, np.newaxis])

        # build a selection matrix that chooses a subset of c1 and c2 to ZERO OUT
        S_complement = cubical_sel_coef_subset_complement(
            shape_c, num_non_zero=num_non_zero)
        S_H = S_complement.conj().T
        S_Gamma0 = np.dot(S_complement, Gamma0)

        # last row in mtx_loop
        mtx_last_row = np.hstack(
            (S_Gamma0.conj().T, np.zeros((3, 3), dtype=complex)))

        R_loop = R_mtx_joint3d(c1, c2, c3, shape_b)
        if need_QR:
            # use QR decomposition to extract effective lines of equations
            Q_H = linalg.qr(
                R_loop, mode='economic',
                pivoting=False)[0][:, :shape_Tb0_effective].conj().T
            R_loop = np.dot(Q_H, R_loop)
            Tbeta_loop = np.dot(Q_H, Tbeta0)
            # Q_full, U_full = linalg.qr(R_loop, mode='economic', pivoting=False)
            # R_loop = U_full[:shape_Tb0_effective, :]
            # Tbeta_loop = np.dot(Q_full[:, :shape_Tb0_effective].conj().T, Tbeta0)

        # inner loop for each random initialization
        Tbetah_R_GtGinv_Rh_inv_Tbeta = None
        for inner in range(max_inner_iter):
            if inner == 0:
                R_GtGinv_Rh = \
                    np.dot(R_loop,
                           linalg.lu_solve(lu_GtG, R_loop.conj().T,
                                           check_finite=check_finite)
                           )
                mtx_loop = \
                    np.vstack((
                        np.hstack((
                            np.dot(
                                S_complement,
                                np.dot(
                                    np.dot(Tbeta_loop.conj().T,
                                           linalg.solve(R_GtGinv_Rh, Tbeta_loop,
                                                        check_finite=check_finite)
                                           ),
                                    S_H)
                            ),
                            S_Gamma0
                        )),
                        mtx_last_row
                    ))
            else:
                mtx_loop[:sz_coef, :sz_coef] = Tbetah_R_GtGinv_Rh_inv_Tbeta

            # solve annihilating filter coefficients
            try:
                coef = np.dot(S_H, linalg.solve(mtx_loop, rhs)[:sz_coef])
            except linalg.LinAlgError:
                break
            c1 = np.reshape(coef[:num_coef], shape_c, order='F')
            c2 = np.reshape(coef[num_coef:num_coef + num_coef],
                            shape_c,
                            order='F')
            c3 = np.reshape(coef[num_coef + num_coef:], shape_c, order='F')

            # update the right-dual matrix R and T based on the new coefficients
            R_loop = R_mtx_joint3d(c1, c2, c3, shape_b)
            if need_QR:
                # use QR decomposition to extract effective lines of equations
                Q_H = linalg.qr(
                    R_loop, mode='economic',
                    pivoting=False)[0][:, :shape_Tb0_effective].conj().T
                R_loop = np.dot(Q_H, R_loop)
                Tbeta_loop = np.dot(Q_H, Tbeta0)
                # Q_full, U_full = linalg.qr(R_loop, mode='economic', pivoting=False)
                # R_loop = U_full[:shape_Tb0_effective, :]
                # Tbeta_loop = np.dot(Q_full[:, :shape_Tb0_effective].conj().T, Tbeta0)

            # evaluate fitting error without computing b
            '''implementation I, which involves a two-layer nested matrix inverses'''
            # Tbetah_R_GtGinv_Rh_inv_Tbeta = \
            #     np.dot(Tbeta_loop.conj().T,
            #            linalg.solve(
            #                np.dot(R_loop,
            #                       linalg.lu_solve(lu_GtG, R_loop.conj().T,
            #                                       check_finite=check_finite)),
            #                Tbeta_loop, check_finite=check_finite)
            #            )
            # # the actual error is this value + |a - G beta|^2, which is a constant
            # error_loop = \
            #     np.real(np.dot(coef.conj().T,
            #                    np.dot(Tbetah_R_GtGinv_Rh_inv_Tbeta, coef)))
            '''implementation II, which only involves G^h G inverse and 
            not too much extra computational cost compared with implementation I'''
            R_GtGinv_Rh = np.dot(
                R_loop,
                linalg.lu_solve(lu_GtG,
                                R_loop.conj().T,
                                check_finite=check_finite))
            try:
                Tbetah_R_GtGinv_Rh_inv_Tbeta = \
                    np.dot(
                        S_complement,
                        np.dot(
                            np.dot(
                                Tbeta_loop.conj().T,
                                linalg.solve(R_GtGinv_Rh, Tbeta_loop, check_finite=check_finite)
                            ),
                            S_H
                        )
                    )
            except linalg.LinAlgError:
                break

            Tbeta_c = np.dot(Tbeta_loop, coef)
            if inner == 0:
                mtx_error = np.row_stack(
                    (np.column_stack((R_GtGinv_Rh,
                                      np.zeros((shape_Tb0_effective, 1),
                                               dtype=complex))),
                     np.append(Tbeta_c.conj()[np.newaxis, :], -1)))
                rhs_error = np.append(Tbeta_c, 0)
            else:
                mtx_error[:shape_Tb0_effective, :
                          shape_Tb0_effective] = R_GtGinv_Rh
                mtx_error[-1, :shape_Tb0_effective] = Tbeta_c.conj()
                rhs_error[:-1] = Tbeta_c

            l_rho = linalg.solve(mtx_error,
                                 rhs_error,
                                 check_finite=check_finite)
            error_loop = l_rho[-1].real

            if 0 < error_loop < min_error:
                # check that the number of non-zero entries are
                # indeed num_dirac + 1 (could be less)
                c1[np.abs(c1) < 1e-2 * np.max(np.abs(c1))] = 0
                c2[np.abs(c2) < 1e-2 * np.max(np.abs(c2))] = 0
                c3[np.abs(c3) < 1e-2 * np.max(np.abs(c3))] = 0
                nnz_cond = \
                    np.sum(1 - np.isclose(np.abs(c1), 0).astype(int)) == num_non_zero and \
                    np.sum(1 - np.isclose(np.abs(c2), 0).astype(int)) == num_non_zero and \
                    np.sum(1 - np.isclose(np.abs(c3), 0).astype(int)) == num_non_zero
                # TODO: add the checks for cases when certain number of Dirac share the x, y, z coordinates
                if nnz_cond:
                    min_error = error_loop
                    c1_opt = c1
                    c2_opt = c2
                    c3_opt = c3
                    S_complement_opt = S_complement
                    l_opt = l_rho[:-1]

            if compute_mse and min_error < noise_level:
                break

        if compute_mse and min_error < noise_level:
            break

        if c1_opt is None or c2_opt is None or c3_opt is None:
            max_ini += 1

    # compute b_opt at the end
    R_opt = R_mtx_joint3d(c1_opt, c2_opt, c3_opt, shape_b)
    if need_QR:
        # use QR decomposition to extract effective lines of equations
        Q_H = linalg.qr(R_opt, mode='economic',
                        pivoting=False)[0][:, :shape_Tb0_effective].conj().T
        R_opt = np.dot(Q_H, R_opt)
        # R_opt = linalg.qr(R_opt, mode='economic',
        #                   pivoting=False)[1][:shape_Tb0_effective, :]
    '''use with implementation I'''
    # mtx_brecon = np.vstack((
    #     np.hstack((GtG, R_opt.conj().T)),
    #     np.hstack((R_opt, np.zeros((shape_Tb0_effective, shape_Tb0_effective))))
    # ))
    # b_opt = \
    #     linalg.solve(mtx_brecon,
    #                  np.concatenate((Gt_a,
    #                                  np.zeros(shape_Tb0_effective,
    #                                           dtype=complex)))
    #                  )[:sz_R1]
    '''use with implementation II'''
    b_opt = beta - linalg.lu_solve(
        lu_GtG, np.dot(R_opt.conj().T, l_opt), check_finite=check_finite)
    # use denoised FRI data b to estimate c as the final refinement
    if refine_coef:
        S_blk_H = S_complement_opt[:S_complement_opt.shape[0] //
                                   3, :S_complement_opt.shape[1] // 3].conj().T
        Tb = np.dot(
            convmtx3_valid(np.reshape(b_opt, shape_b, order='F'), shape_c),
            S_blk_H)
        V = linalg.svd(Tb, compute_uv=True)[2].conj().T
        c1_opt, c2_opt, c3_opt = \
            np.reshape(np.dot(S_blk_H, V[:, -3]), shape_c, order='F'), \
            np.reshape(np.dot(S_blk_H, V[:, -2]), shape_c, order='F'), \
            np.reshape(np.dot(S_blk_H, V[:, -1]), shape_c, order='F')

    return c1_opt, c2_opt, c3_opt, min_error, b_opt, ini
コード例 #36
0
ファイル: extmath.py プロジェクト: zaflondon/scikit-learn
def randomized_range_finder(A, size, n_iter,
                            power_iteration_normalizer='auto',
                            random_state=None):
    """Computes an orthonormal matrix whose range approximates the range of A.

    Parameters
    ----------
    A: 2D array
        The input data matrix

    size: integer
        Size of the return array

    n_iter: integer
        Number of power iterations used to stabilize the result

    power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
        Whether the power iterations are normalized with step-by-step
        QR factorization (the slowest but most accurate), 'none'
        (the fastest but numerically unstable when `n_iter` is large, e.g.
        typically 5 or larger), or 'LU' factorization (numerically stable
        but can lose slightly in accuracy). The 'auto' mode applies no
        normalization if `n_iter`<=2 and switches to LU otherwise.

        .. versionadded:: 0.18

    random_state: RandomState or an int seed (0 by default)
        A random number generator instance

    Returns
    -------
    Q: 2D array
        A (size x size) projection matrix, the range of which
        approximates well the range of the input matrix A.

    Notes
    -----

    Follows Algorithm 4.3 of
    Finding structure with randomness: Stochastic algorithms for constructing
    approximate matrix decompositions
    Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061

    An implementation of a randomized algorithm for principal component
    analysis
    A. Szlam et al. 2014
    """
    random_state = check_random_state(random_state)

    # Generating normal random vectors with shape: (A.shape[1], size)
    Q = random_state.normal(size=(A.shape[1], size))

    # Deal with "auto" mode
    if power_iteration_normalizer == 'auto':
        if n_iter <= 2:
            power_iteration_normalizer = 'none'
        else:
            power_iteration_normalizer = 'LU'

    # Perform power iterations with Q to further 'imprint' the top
    # singular vectors of A in Q
    for i in range(n_iter):
        if power_iteration_normalizer == 'none':
            Q = safe_sparse_dot(A, Q)
            Q = safe_sparse_dot(A.T, Q)
        elif power_iteration_normalizer == 'LU':
            Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
            Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
        elif power_iteration_normalizer == 'QR':
            Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
            Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')

    # Sample the range of A using by linear projection of Q
    # Extract an orthonormal basis
    Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
    return Q
コード例 #37
0
def qr_null(A, tol=None):
    """Computes the null space of A using a rank-revealing QR decomposition"""
    Q, R, P = qr(A.T, mode='full', pivoting=True)
    tol = np.finfo(R.dtype).eps if tol is None else tol
    rnk = min(A.shape) - np.abs(np.diag(R))[::-1].searchsorted(tol)
    return Q[:, rnk:].conj()
コード例 #38
0
    def reduced_likelihood_function(self, theta=None):
        """
        This function determines the BLUP parameters and evaluates the reduced
        likelihood function for the given autocorrelation parameters theta.

        Maximizing this function wrt the autocorrelation parameters theta is
        equivalent to maximizing the likelihood of the assumed joint Gaussian
        distribution of the observations y evaluated onto the design of
        experiments X.

        Parameters
        ----------
        theta : array_like, optional
            An array containing the autocorrelation parameters at which the
            Gaussian Process model parameters should be determined.
            Default uses the built-in autocorrelation parameters
            (ie ``theta = self.theta_``).

        Returns
        -------
        reduced_likelihood_function_value : double
            The value of the reduced likelihood function associated to the
            given autocorrelation parameters theta.

        par : dict
            A dictionary containing the requested Gaussian Process model
            parameters:

                sigma2
                        Gaussian Process variance.
                beta
                        Generalized least-squares regression weights for
                        Universal Kriging or given beta0 for Ordinary
                        Kriging.
                gamma
                        Gaussian Process weights.
                C
                        Cholesky decomposition of the correlation matrix [R].
                Ft
                        Solution of the linear equation system : [R] x Ft = F
                G
                        QR decomposition of the matrix Ft.
        """
        check_is_fitted(self, "X")

        if theta is None:
            # Use built-in autocorrelation parameters
            theta = self.theta_

        # Initialize output
        reduced_likelihood_function_value = -np.inf
        par = {}

        # Retrieve data
        n_samples = self.X.shape[0]
        D = self.D
        ij = self.ij
        F = self.F

        if D is None:
            # Light storage mode (need to recompute D, ij and F)
            D, ij = l1_cross_distances(self.X)
            if (np.min(np.sum(D, axis=1)) == 0.
                    and self.corr != correlation.pure_nugget):
                raise Exception("Multiple X are not allowed")
            F = self.regr(self.X)

        # Set up R
        r = self.corr(theta, D)
        R = np.eye(n_samples) * (1. + self.nugget)
        R[ij[:, 0], ij[:, 1]] = r
        R[ij[:, 1], ij[:, 0]] = r

        # Cholesky decomposition of R
        try:
            C = linalg.cholesky(R, lower=True)
        except linalg.LinAlgError:
            return reduced_likelihood_function_value, par

        # Get generalized least squares solution
        Ft = linalg.solve_triangular(C, F, lower=True)
        try:
            Q, G = linalg.qr(Ft, econ=True)
        except:
            #/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
            # DeprecationWarning: qr econ argument will be removed after scipy
            # 0.7. The economy transform will then be available through the
            # mode='economic' argument.
            Q, G = linalg.qr(Ft, mode='economic')
            pass

        sv = linalg.svd(G, compute_uv=False)
        rcondG = sv[-1] / sv[0]
        if rcondG < 1e-10:
            # Check F
            sv = linalg.svd(F, compute_uv=False)
            condF = sv[0] / sv[-1]
            if condF > 1e15:
                raise Exception("F is too ill conditioned. Poor combination "
                                "of regression model and observations.")
            else:
                # Ft is too ill conditioned, get out (try different theta)
                return reduced_likelihood_function_value, par

        Yt = linalg.solve_triangular(C, self.y, lower=True)
        if self.beta0 is None:
            # Universal Kriging
            beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
        else:
            # Ordinary Kriging
            beta = np.array(self.beta0)

        rho = Yt - np.dot(Ft, beta)
        sigma2 = (rho**2.).sum(axis=0) / n_samples
        # The determinant of R is equal to the squared product of the diagonal
        # elements of its Cholesky decomposition C
        detR = (np.diag(C)**(2. / n_samples)).prod()

        # Compute/Organize output
        reduced_likelihood_function_value = -sigma2.sum() * detR
        par['sigma2'] = sigma2 * self.y_std**2.
        par['beta'] = beta
        par['gamma'] = linalg.solve_triangular(C.T, rho)
        par['C'] = C
        par['Ft'] = Ft
        par['G'] = G

        return reduced_likelihood_function_value, par
コード例 #39
0
def locally_linear_embedding(X,
                             n_neighbors,
                             n_components,
                             reg=1e-3,
                             eigen_solver='auto',
                             tol=1e-6,
                             max_iter=100,
                             method='standard',
                             hessian_tol=1E-4,
                             modified_tol=1E-12,
                             random_state=None,
                             n_jobs=1):
    """Perform a Locally Linear Embedding analysis on the data.

    Read more in the :ref:`User Guide <locally_linear_embedding>`.

    Parameters
    ----------
    X : {array-like, NearestNeighbors}
        Sample data, shape = (n_samples, n_features), in the form of a
        numpy array or a NearestNeighbors object.

    n_neighbors : integer
        number of neighbors to consider for each point.

    n_components : integer
        number of coordinates for the manifold.

    reg : float
        regularization constant, multiplies the trace of the local covariance
        matrix of the distances.

    eigen_solver : string, {'auto', 'arpack', 'dense'}
        auto : algorithm will attempt to choose the best method for input data

        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.

        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.

    tol : float, optional
        Tolerance for 'arpack' method
        Not used if eigen_solver=='dense'.

    max_iter : integer
        maximum number of iterations for the arpack solver.

    method : {'standard', 'hessian', 'modified', 'ltsa'}
        standard : use the standard locally linear embedding algorithm.
                   see reference [1]_
        hessian  : use the Hessian eigenmap method.  This method requires
                   n_neighbors > n_components * (1 + (n_components + 1) / 2.
                   see reference [2]_
        modified : use the modified locally linear embedding algorithm.
                   see reference [3]_
        ltsa     : use local tangent space alignment algorithm
                   see reference [4]_

    hessian_tol : float, optional
        Tolerance for Hessian eigenmapping method.
        Only used if method == 'hessian'

    modified_tol : float, optional
        Tolerance for modified LLE method.
        Only used if method == 'modified'

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`. Used when ``solver`` == 'arpack'.

    n_jobs : int, optional (default = 1)
        The number of parallel jobs to run for neighbors search.
        If ``-1``, then the number of jobs is set to the number of CPU cores.

    Returns
    -------
    Y : array-like, shape [n_samples, n_components]
        Embedding vectors.

    squared_error : float
        Reconstruction error for the embedding vectors. Equivalent to
        ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.

    References
    ----------

    .. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
        by locally linear embedding.  Science 290:2323 (2000).`
    .. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
        linear embedding techniques for high-dimensional data.
        Proc Natl Acad Sci U S A.  100:5591 (2003).`
    .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
        Embedding Using Multiple Weights.`
        http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
    .. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
        dimensionality reduction via tangent space alignment.
        Journal of Shanghai Univ.  8:406 (2004)`
    """
    if eigen_solver not in ('auto', 'arpack', 'dense'):
        raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)

    if method not in ('standard', 'hessian', 'modified', 'ltsa'):
        raise ValueError("unrecognized method '%s'" % method)

    nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
    nbrs.fit(X)
    X = nbrs._fit_X

    N, d_in = X.shape

    if n_components > d_in:
        raise ValueError("output dimension must be less than or equal "
                         "to input dimension")
    if n_neighbors >= N:
        raise ValueError("Expected n_neighbors <= n_samples, "
                         " but n_samples = %d, n_neighbors = %d" %
                         (N, n_neighbors))

    if n_neighbors <= 0:
        raise ValueError("n_neighbors must be positive")

    M_sparse = (eigen_solver != 'dense')

    if method == 'standard':
        W = barycenter_kneighbors_graph(nbrs,
                                        n_neighbors=n_neighbors,
                                        reg=reg,
                                        n_jobs=n_jobs)

        # we'll compute M = (I-W)'(I-W)
        # depending on the solver, we'll do this differently
        if M_sparse:
            M = eye(*W.shape, format=W.format) - W
            M = (M.T * M).tocsr()
        else:
            M = (W.T * W - W.T - W).toarray()
            M.flat[::M.shape[0] + 1] += 1  # W = W - I = W - I

    elif method == 'hessian':
        dp = n_components * (n_components + 1) // 2

        if n_neighbors <= n_components + dp:
            raise ValueError("for method='hessian', n_neighbors must be "
                             "greater than "
                             "[n_components * (n_components + 3) / 2]")

        neighbors = nbrs.kneighbors(X,
                                    n_neighbors=n_neighbors + 1,
                                    return_distance=False)
        neighbors = neighbors[:, 1:]

        Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
        Yi[:, 0] = 1

        M = np.zeros((N, N), dtype=np.float64)

        use_svd = (n_neighbors > d_in)

        for i in range(N):
            Gi = X[neighbors[i]]
            Gi -= Gi.mean(0)

            # build Hessian estimator
            if use_svd:
                U = svd(Gi, full_matrices=0)[0]
            else:
                Ci = np.dot(Gi, Gi.T)
                U = eigh(Ci)[1][:, ::-1]

            Yi[:, 1:1 + n_components] = U[:, :n_components]

            j = 1 + n_components
            for k in range(n_components):
                Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
                                                 U[:, k:n_components])
                j += n_components - k

            Q, R = qr(Yi)

            w = Q[:, n_components + 1:]
            S = w.sum(0)

            S[np.where(abs(S) < hessian_tol)] = 1
            w /= S

            nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
            M[nbrs_x, nbrs_y] += np.dot(w, w.T)

        if M_sparse:
            M = csr_matrix(M)

    elif method == 'modified':
        if n_neighbors < n_components:
            raise ValueError("modified LLE requires "
                             "n_neighbors >= n_components")

        neighbors = nbrs.kneighbors(X,
                                    n_neighbors=n_neighbors + 1,
                                    return_distance=False)
        neighbors = neighbors[:, 1:]

        # find the eigenvectors and eigenvalues of each local covariance
        # matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
        # where the columns are eigenvectors
        V = np.zeros((N, n_neighbors, n_neighbors))
        nev = min(d_in, n_neighbors)
        evals = np.zeros([N, nev])

        # choose the most efficient way to find the eigenvectors
        use_svd = (n_neighbors > d_in)

        if use_svd:
            for i in range(N):
                X_nbrs = X[neighbors[i]] - X[i]
                V[i], evals[i], _ = svd(X_nbrs, full_matrices=True)
            evals **= 2
        else:
            for i in range(N):
                X_nbrs = X[neighbors[i]] - X[i]
                C_nbrs = np.dot(X_nbrs, X_nbrs.T)
                evi, vi = eigh(C_nbrs)
                evals[i] = evi[::-1]
                V[i] = vi[:, ::-1]

        # find regularized weights: this is like normal LLE.
        # because we've already computed the SVD of each covariance matrix,
        # it's faster to use this rather than np.linalg.solve
        reg = 1E-3 * evals.sum(1)

        tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
        tmp[:, :nev] /= evals + reg[:, None]
        tmp[:, nev:] /= reg[:, None]

        w_reg = np.zeros((N, n_neighbors))
        for i in range(N):
            w_reg[i] = np.dot(V[i], tmp[i])
        w_reg /= w_reg.sum(1)[:, None]

        # calculate eta: the median of the ratio of small to large eigenvalues
        # across the points.  This is used to determine s_i, below
        rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
        eta = np.median(rho)

        # find s_i, the size of the "almost null space" for each point:
        # this is the size of the largest set of eigenvalues
        # such that Sum[v; v in set]/Sum[v; v not in set] < eta
        s_range = np.zeros(N, dtype=int)
        evals_cumsum = stable_cumsum(evals, 1)
        eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
        for i in range(N):
            s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
        s_range += n_neighbors - nev  # number of zero eigenvalues

        # Now calculate M.
        # This is the [N x N] matrix whose null space is the desired embedding
        M = np.zeros((N, N), dtype=np.float64)
        for i in range(N):
            s_i = s_range[i]

            # select bottom s_i eigenvectors and calculate alpha
            Vi = V[i, :, n_neighbors - s_i:]
            alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)

            # compute Householder matrix which satisfies
            #  Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
            # using prescription from paper
            h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))

            norm_h = np.linalg.norm(h)
            if norm_h < modified_tol:
                h *= 0
            else:
                h /= norm_h

            # Householder matrix is
            #  >> Hi = np.identity(s_i) - 2*np.outer(h,h)
            # Then the weight matrix is
            #  >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
            # We do this much more efficiently:
            Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
                  (1 - alpha_i) * w_reg[i, :, None])

            # Update M as follows:
            # >> W_hat = np.zeros( (N,s_i) )
            # >> W_hat[neighbors[i],:] = Wi
            # >> W_hat[i] -= 1
            # >> M += np.dot(W_hat,W_hat.T)
            # We can do this much more efficiently:
            nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
            M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
            Wi_sum1 = Wi.sum(1)
            M[i, neighbors[i]] -= Wi_sum1
            M[neighbors[i], i] -= Wi_sum1
            M[i, i] += s_i

        if M_sparse:
            M = csr_matrix(M)

    elif method == 'ltsa':
        neighbors = nbrs.kneighbors(X,
                                    n_neighbors=n_neighbors + 1,
                                    return_distance=False)
        neighbors = neighbors[:, 1:]

        M = np.zeros((N, N))

        use_svd = (n_neighbors > d_in)

        for i in range(N):
            Xi = X[neighbors[i]]
            Xi -= Xi.mean(0)

            # compute n_components largest eigenvalues of Xi * Xi^T
            if use_svd:
                v = svd(Xi, full_matrices=True)[0]
            else:
                Ci = np.dot(Xi, Xi.T)
                v = eigh(Ci)[1][:, ::-1]

            Gi = np.zeros((n_neighbors, n_components + 1))
            Gi[:, 1:] = v[:, :n_components]
            Gi[:, 0] = 1. / np.sqrt(n_neighbors)

            GiGiT = np.dot(Gi, Gi.T)

            nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
            M[nbrs_x, nbrs_y] -= GiGiT
            M[neighbors[i], neighbors[i]] += 1

    return null_space(M,
                      n_components,
                      k_skip=1,
                      eigen_solver=eigen_solver,
                      tol=tol,
                      max_iter=max_iter,
                      random_state=random_state)
コード例 #40
0
    def estimate(self,
                 time_series,
                 prng,
                 fit_intercept=False,
                 crit_type='sbc',
                 ndx=None):
        """Stepwise selection of the model using QR decomposition according to Neumaier and Schneider across
           model orders [prng[0], prng[1]].  The criterion 'crit' is the model order selection criterion.
           Optionally the (integer) index array indicates which parts of the time series are contiguous and which are not.
           Code is mostly a port from the MATLAB ARFIT toolbox by the same authors."""

        p_min, p_max = prng[0], prng[1]
        fi = 1 if fit_intercept else 0
        ts = time_series[:,
                         np.newaxis] if time_series.ndim == 1 else time_series
        N, m = ts.shape
        n_p = np.zeros(shape=(p_max + 1, ), dtype=np.int)
        n_p[p_max] = fi + p_max * m

        # remove "presample" data (p_max values from start)
        N = N - p_max

        # construct matrix K (add row space for regularization matrix deltaD)
        K = np.zeros((N + n_p[p_max] + m, n_p[p_max] + m))

        # add intercept if required
        if (fit_intercept):
            K[:N, 0] = 1.0

        # set predictors u
        for j in range(1, p_max + 1):
            K[:N, fi + (j - 1) * m:fi + j * m] = ts[p_max - j:N + p_max - j, :]

        # set predictors v
        K[:N, n_p[p_max]:n_p[p_max] + m] = ts[p_max:N + p_max, :]

        # add regularization as per paper of Neumaier & Schneider, who refer to Higham
        q = n_p[p_max] + m
        delta = (q**2 + q + 1) * np.finfo(np.float64).eps
        sc = (delta * np.sum(K**2, axis=0))**0.5
        K[N:, :] = np.diag(sc)

        # compute QR decomposition but only return R, Q is unused here
        R = linalg.qr(K, True, -1, 'r')

        # retrieve R22 submatrix
        R22 = R[n_p[p_max]:q, n_p[p_max]:q]

        # invert R22 matrix for later downdating
        invR22 = linalg.inv(R22)
        Mp = np.dot(invR22, invR22.T)

        # compute the log of the determinant of the residual cross product matrix
        logdp = np.zeros(p_max + 1)
        sbc = np.zeros_like(logdp)
        fpe = np.zeros_like(logdp)
        logdp[p_max] = 2.0 * np.log(np.abs(np.prod(np.diag(R22))))

        # run the downdating steps & update estimates of log det covar mat
        q_max = q
        for p in range(p_max, p_min - 1, -1):
            n_p[p] = m * p + fi
            q = n_p[p] + m

            # execute downdating step if required
            if p < p_max:
                Rp = R[n_p[p]:q, n_p[p_max]:q_max]
                L = linalg.cholesky(
                    np.identity(m) + np.dot(np.dot(Rp, Mp), Rp.T)).T
                Np = linalg.solve(L, np.dot(Rp, Mp))
                Mp -= np.dot(Np.T, Np)
                logdp[p] = logdp[p +
                                 1] + 2.0 * math.log(abs(np.prod(np.diag(L))))

            # compute selected criterion
            sbc[p] = logdp[p] / m - math.log(N) * (1.0 - float(n_p[p]) / N)
            fpe[p] = logdp[p] / m - math.log(
                N * float(N - n_p[p]) / float(N + n_p[p]))

        # find the best order
        if crit_type == 'sbc':
            # find the best order
            p_opt = np.argmin(sbc[p_min:p_max + 1]) + p_min
        elif crit_type == 'fpe':
            p_opt = np.argmin(fpe[p_min:p_max + 1]) + p_min
        else:
            raise "Invalid criterion."

        # retrieve submatrices and intercept (if required)
        R11 = R[:n_p[p_opt], :n_p[p_opt]]
        R12 = R[:n_p[p_opt], n_p[p_max]:n_p[p_max] + m]
        R22 = R[n_p[p_opt]:n_p[p_max] + m, n_p[p_max]:n_p[p_max] + m]

        if n_p[p_opt] > 0:

            # improve conditioning
            if fit_intercept:
                scaler = np.max(sc[1:]) / sc[0]
                R11[:, 0] *= scaler

            Aaug = linalg.solve(R11, R12).transpose()

            if fit_intercept:
                self.w = Aaug[:, 0] * scaler
                self.A = Aaug[:, 1:n_p[p_opt]]
            else:
                self.w = np.zeros(shape=(m, ))
                self.A = Aaug

        # compute estimate of covariance matrix and return it
        dof = N - n_p[p_opt]
        C = np.dot(R22.T, R22) / dof

        # store the (upper tri) Cholesky factor in U, scipy.linalg.cholesky returns U s.t. U^T * U = C
        self.U = linalg.cholesky(C)

        return sbc, fpe
コード例 #41
0
def ols(y, x, const=True, everything=False):
    """
    Runs a Least-Squares regression of a vector y on a vector x.
    The regression equation is specified as follows in matrix notation:
    ---- LaTeX syntax ---------------------------------------------------------
        y = x \beta + \epsilon
    ---- End of LaTeX syntax --------------------------------------------------

    Parameters
    ----------
    y : numpy.array
         The vector on the LHS of the regression equation. It must satisfy
         y.shape = (T,1) or y.shape = (T,), where T is the number of
         observations available.
    x : numpy.array
         The matrix on the RHS of the regression equation. It must satisfy
         x.shape = (T,K), where T is the number of observations available and K
         is the number of regressors.
    const : bool (optional)
            Specifies whether a constant should be included in the regression
            equation. If so, then the constant will be the first value of the
            returned vector 'beta'. Default value is 'True'.
    everything : bool (optional)
                 Specifies whether the returned value of the function consists
                 only of the vector of estimated coefficients (as opposed to
                 the whole dictionary containing also diagnostics and other
                 information). Default value is 'False'.

    Returns
    -------
    beta : numpy.array
           The vector of estimated coefficients. This is returned if
           everything=False.
    results : dict
              A dictionary containing the following elements
                  - beta: the vector of estimated coefficients
                  - se: standard errors associated to beta
                  - resid: the vector of estimation errors
                  - fitted: the vector of fitted values
                  - conf_int: 95% confidence intervals
                  - tstat: t-statistics associated to beta
                  - r2: R-squared of the regression
                  - r2adj: adjusted R-squared of the regression
                  - T: the number of observations used in the regression
                  - K: the number of regressors
                  - TODO: complete documentation on returned values
              This is returned if everything=True.
    """
    T = y.shape[0]

    if x.shape[0] != T:
        raise ValueError('x and y have different no. of observations')

    if const:
        x = np.concatenate((np.ones((T, 1)), x), axis=1)

    K = x.shape[1]
    if T < 10000:  # prefer numerical accuracy over algorithm speed
        q, r = la.qr(x, mode='economic')
        inv_xx = la.solve(r.T * r, np.eye(K))  # (r'r) * a = I - solve for a
    else:  # prefer algorithm speed over numerical accuracy
        inv_xx = la.solve(x.T * x, np.eye(K))  # (x'x) * a = I - solve for a
    beta = np.dot(inv_xx, np.dot(x.T, y))

    if not everything:
        return beta  # provide the estimate beta to the user and exit
    else:
        yhat = np.dot(x, beta)  # fitted values
        eps = y - yhat  # regression residuals
        sigma = np.dot(eps.T, eps) / (T - K)  # var-cov matrix of the residuals
        std = np.sqrt(sigma * np.diag(inv_xx))  # standard deviation of beta
        tcrit = st.t.ppf(
            0.025, T)  # critical val on student-t for bidirectional testing
        conf_int = np.array([beta - tcrit * std,
                             beta + tcrit * std])  # 95% confidence interval
        tstat = beta / std  # t-statistics
        rss = np.dot(eps.T, eps)  # residual sum of squared
        tss = np.dot((y - np.mean(y)).T,
                     (y - np.mean(y)))  # total sum of squared
        r2 = 1 - rss / tss  # R-squared
        arss = rss / (T - K)
        atss = tss / (T - 1)
        if atss != 0:
            r2adj = 1 - (arss / atss)  # adjusted R-squared
        else:
            r2adj = None
        deps = eps[1:T] - eps[0:T - 1]  # first difference of residuals
        dw = np.dot(deps.T, deps) / np.dot(eps.T, eps)  # Durbin-Watson stat
        results = {
            'beta': beta,
            'se': std,
            'fitted': yhat,
            'resid': eps,
            'sigma': sigma,
            'conf_int': conf_int,
            'tstat': tstat,
            'r2': r2,
            'r2adj': r2adj,
            'dw': dw,
            'meth': 'ols'
        }
        return results
コード例 #42
0
print(L)

print(np.dot(L, L.T.conj()))
y = np.linalg.solve(L, B)
x = np.linalg.solve(L.T.conj(), y)
print(x)
print(np.mat(A) * np.mat(x).T)

#-------------------------------
"""QR decomposition with scipy"""
import scipy.linalg as linalg
import numpy as np

A = np.array([[2., 1., 1.], [1., 3., 2.], [1., 0., 0]])
B = np.array([4., 5., 6.])
Q, R = linalg.qr(A)
y = np.dot(Q.T, B)
x = linalg.solve(R, y)
print(x)

#---------------------------------
"""Solve Ax=B with the Jacobi method"""
import numpy as np


def jacobi(A, B, n, tol=1e-10):
    #Initializes x with zeros with same shape and type as B
    x = np.zeros_like(B)
    for it_count in range(n):
        x_new = np.zeros_like(x)
        for i in range(A.shape[0]):
コード例 #43
0
def compute_interp_decomp(A, rank, mode='column', index_set=False):
    """Interpolative decomposition (ID).

    Algorithm for computing the low-rank ID
    decomposition of a rectangular `(m, n)` matrix `A`, with target rank `k << min{m, n}`.
    Input matrix is factored as `A = C * V`, using the column pivoted QR decomposition.
    The factor matrix `C` is formed of a subset of columns of `A`,
    also called the partial column skeleton. The factor matrix `V` contains
    a `(rank, rank)` identity matrix as a submatrix, and is well-conditioned.

    If `mode='row'`, then the input matrix is factored as `A = Z * R`, using the
    row pivoted QR decomposition. The factor matrix `R` is now formed as
    a subset of rows of `A`, also called the partial row skeleton.

    Parameters
    ----------
    A : array_like, shape `(m, n)`.
        Input array.

    rank : integer
        Target rank. Best if `rank << min{m,n}`

    mode: str `{'column', 'row'}`, default: `mode='column'`.
        'column' : ID using column pivoted QR.
        'row' : ID using row pivoted QR.

    index_set: str `{'True', 'False'}`, default: `index_set='False'`.
        'True' : Return column/row index set instead of `C` or `R`.


    Returns
    -------
    If `mode='column'`:
        C:  array_like, shape `(m, rank)`.
            Partial column skeleton.

        V : array_like, shape `(rank, n)`.
            Well-conditioned matrix.

    If `mode='row'`:
        Z:  array_like, shape `(m, rank)`.
            Well-conditioned matrix.

        R : array_like, shape `(rank, n)`.
            Partial row skeleton.

    References
    ----------
    S. Voronin and P.Martinsson.
    "RSVDPACK: Subroutines for computing partial singular value
    decompositions via randomized sampling on single core, multi core,
    and GPU architectures" (2015).
    (available at `arXiv <http://arxiv.org/abs/1502.05366>`_).
    """
    if mode not in _VALID_MODES:
        raise ValueError('mode must be one of %s, not %s'
                         % (' '.join(_VALID_MODES), mode))

    # converts A to array, raise ValueError if A has inf or nan
    A = np.asarray_chkfinite(A)
    if mode=='row':
        A = conjugate_transpose(A)

    m, n = A.shape
    if rank < 1 or rank > min(m, n):
        raise ValueError("Target rank rank must be >= 1 or < min(m, n), not %d" % rank)

    #Pivoted QR decomposition
    Q, R, P = linalg.qr(A, mode='economic', overwrite_a=False, pivoting=True,
                        check_finite=False)

    # Select column subset
    C = A[:, P[:rank]]

    # Compute V
    T =  linalg.pinv2(R[:rank, :rank]).dot(R[:rank, rank:n])
    V = np.bmat([[np.eye(rank), T]])
    V = V[:, np.argsort(P)]

    # Return ID
    if mode == 'column':
        if index_set:
            return P[:rank], V
        return C, V
    # mode == row
    elif index_set:
        return conjugate_transpose(V), P[:rank]

    return conjugate_transpose(V), conjugate_transpose(C)
コード例 #44
0
def dirac_recon_joint_alg_fast_newform(G,
                                       a,
                                       num_dirac,
                                       shape_b,
                                       noise_level=0,
                                       max_ini=100,
                                       stop_cri='mse',
                                       max_inner_iter=20,
                                       max_num_same_x=1,
                                       max_num_same_y=1,
                                       max_num_same_z=1):
    """
    ALGORITHM that reconstructs 2D Dirac deltas jointly
        min     |a - Gb|^2
        s.t.    c_1 * b = 0
                c_2 * b = 0
                c_3 * b = 0

    This is an optimzied version for speed consideration. For instance, we try to
    reuse intermediate results and pre-compute a few matrices, etc.

    :param G: the linear mapping that links the unknown uniformly sampled
            sinusoids to the given measurements
    :param a: the given measurements of the 3D Dirac deltas
    :param num_dirac: number of Dirac deltas
    :param shape_b: shape of the (3D) uniformly sampled sinusoids
    :param noise_level: noise level present in the given measurements
    :param max_ini: maximum number of random initializations
    :param stop_cri: stopping criterion, either 'mse' or 'max_iter'
    :param max_inner_iter: maximum number of inner iterations for each random initializations
    :param max_num_same_x: maximum number of Dirac deltas that have the same horizontal locations.
            This will impose the minimum dimension of the annihilating filter used.
    :param max_num_same_y: maximum number of Dirac deltas that have the same vertical locations
            This will impose the minimum dimension of the annihilating filter used.
    :param max_num_same_z: maximum number of Dirac deltas that have the same depth locations
            This will impose the minimum dimension of the annihilating filter used.
    :return:
    """
    check_finite = False  # use False for faster speed
    compute_mse = (stop_cri == 'mse')
    a = a.flatten('F')
    num_non_zero = num_dirac + 3

    shape_c1_0 = int(np.ceil(num_non_zero**(1. / 3)))
    shape_c1_1 = max(int(np.ceil((num_non_zero / shape_c1_0)**0.5)), 2)
    shape_c1_2 = max(int(np.ceil((num_non_zero / (shape_c1_0 * shape_c1_1)))),
                     2)

    # sanity check
    assert shape_c1_0 * shape_c1_1 * shape_c1_2 >= num_non_zero

    shape_c3_0, shape_c3_1, shape_c3_2 = \
        shape_c2_0, shape_c2_1, shape_c2_2 = shape_c1_0, shape_c1_1, shape_c1_2

    shape_c1 = (shape_c1_0, shape_c1_1, shape_c1_2)
    shape_c2 = (shape_c2_0, shape_c2_1, shape_c2_2)
    shape_c3 = (shape_c3_0, shape_c3_1, shape_c3_2)

    # # check if there will be sufficient number of effective number of equations
    # update_shape = True
    # dict_keys = ['shape_c1', 'shape_c2', 'shape_c3']
    # shapes = {
    #     'shape_c1': list(shape_c1),
    #     'shape_c2': list(shape_c2),
    #     'shape_c3': list(shape_c3)
    # }
    # shapes_update_eq = [1, 0, 2]
    # shapes_update_neq = [2, 1, 0]
    # exp_count = 0
    # while update_shape:
    #     if compute_effective_num_eq_3d(shapes['shape_c1'],
    #                                    shapes['shape_c2'],
    #                                    shapes['shape_c3']) < num_dirac:
    #         shape_loop = shapes[dict_keys[exp_count]]
    #         if shape_loop[0] == shape_loop[1] == shape_loop[2]:
    #             shapes[dict_keys[exp_count]][shapes_update_eq[exp_count]] += 1
    #         else:
    #             shapes[dict_keys[exp_count]][shapes_update_neq[exp_count]] += 1
    #
    #         exp_count += 1
    #         exp_count = np.mod(exp_count, 3)
    #         update_shape = True
    #     else:
    #         update_shape = False
    #
    # shape_c1 = tuple(shapes['shape_c1'])
    # shape_c2 = tuple(shapes['shape_c2'])
    # shape_c3 = tuple(shapes['shape_c3'])
    # shape_c1_0, shape_c1_1, shape_c1_2 = shape_c1
    # shape_c2_0, shape_c2_1, shape_c2_2 = shape_c2
    # shape_c3_0, shape_c3_1, shape_c3_2 = shape_c3

    # total number of coefficients in c1 and c2
    num_coef1 = shape_c1_0 * shape_c1_1 * shape_c1_2
    num_coef2 = shape_c2_0 * shape_c2_1 * shape_c2_2
    num_coef3 = shape_c3_0 * shape_c3_1 * shape_c3_2

    # determine the effective row rank of the joint annihilation right-dual matrix
    c1_test = np.random.randn(*shape_c1) + 1j * np.random.randn(*shape_c1)
    c2_test = np.random.randn(*shape_c2) + 1j * np.random.randn(*shape_c2)
    c3_test = np.random.randn(*shape_c3) + 1j * np.random.randn(*shape_c3)
    R_test = R_mtx_joint3d(c1_test, c2_test, c3_test, shape_b)
    try:
        s_test = linalg.svd(R_test, compute_uv=False)
        shape_Tb0_effective = min(
            R_test.shape) - np.where(np.abs(s_test) < 1e-12)[0].size
    except ValueError:
        # the effective number of equations as predicted by the derivation
        shape_Tb0_effective = \
            min(max(num_coef1 - 1 + num_coef2 - 1 + num_coef3 - 1,
                    np.prod(shape_b) - compute_effective_num_eq_3d(shape_c1, shape_c2, shape_c3)),
                R_test.shape[0])
    # assert shape_Tb0_effective == shape_Tb0_effective_thm  # just to make sure

    # sizes of various matrices / vectors
    sz_coef = num_coef1 + num_coef2 + num_coef3 - 3  # -3 because of linear independence
    sz_S0 = num_coef1 + num_coef2 + num_coef3 - 3 * num_non_zero

    # pre-compute a few things
    # we use LU decomposition so that later we can use lu_solve, which is much faster
    GtG = np.dot(G.conj().T, G)
    lu_GtG = linalg.lu_factor(GtG, check_finite=check_finite)
    beta = linalg.lstsq(G, a)[0]
    Tbeta0 = T_mtx_joint3d(np.reshape(beta, shape_b, order='F'), shape_c1,
                           shape_c2, shape_c3)
    # use one block of Tbeta0 to do QR decomposition
    Tbeta_one_blk = convmtx3_valid(np.reshape(beta, shape_b, order='F'),
                                   shape_c1)
    Qtilde_full = linalg.qr(Tbeta_one_blk.conj().T,
                            mode='economic',
                            pivoting=False)[0]
    Qtilde1 = Qtilde_full
    Qtilde2 = Qtilde_full[:, 1:]
    Qtilde3 = Qtilde_full[:, 2:]
    Qtilde_mtx = linalg.block_diag(Qtilde1, Qtilde2, Qtilde3)
    Tbeta0_Qtilde = np.dot(Tbeta0, Qtilde_mtx)

    # initializations
    min_error = np.inf
    rhs = np.concatenate(
        (np.zeros(sz_coef + sz_S0, dtype=complex),
         np.concatenate((np.ones(3, dtype=complex), np.zeros(3,
                                                             dtype=complex)))))
    c1_opt = None
    c2_opt = None
    c3_opt = None

    # iterations over different random initializations of the annihilating filter coefficients
    ini = 0
    while ini < max_ini:
        ini += 1
        c1 = np.random.randn(*shape_c1) + 1j * np.random.randn(*shape_c1)
        c2 = np.random.randn(*shape_c2) + 1j * np.random.randn(*shape_c2)
        c3 = np.random.randn(*shape_c3) + 1j * np.random.randn(*shape_c3)
        # the initializations of the annihilating filter coefficients
        Gamma0 = np.dot(
            Qtilde_mtx.T,
            np.column_stack(
                (linalg.block_diag(
                    c1.flatten('F')[:, np.newaxis],
                    c2.flatten('F')[:, np.newaxis],
                    c3.flatten('F')[:, np.newaxis]),
                 np.concatenate(
                     (c2.flatten('F'), c1.flatten('F'), np.zeros(num_coef3))),
                 np.concatenate(
                     (c3.flatten('F'), np.zeros(num_coef2), c1.flatten('F'))),
                 np.concatenate((np.zeros(num_coef1), c3.flatten('F'),
                                 c2.flatten('F'))))))

        # build a selection matrix that chooses a subset of c1 and c2 to ZERO OUT
        S = np.dot(
            cubical_sel_coef_subset(shape_c1,
                                    shape_c2,
                                    shape_c3,
                                    num_non_zero=num_non_zero,
                                    max_num_same_x=max_num_same_x,
                                    max_num_same_y=max_num_same_y,
                                    max_num_same_z=max_num_same_z), Qtilde_mtx)
        S_H = S.conj().T
        mtx_S_row = np.hstack((S, np.zeros((sz_S0, sz_S0 + 6), dtype=complex)))

        # last row in mtx_loop
        mtx_last_row = np.hstack(
            (Gamma0.T, np.zeros((6, sz_S0 + 6), dtype=complex)))

        R_loop = R_mtx_joint3d(c1, c2, c3, shape_b)
        # use QR decomposition to extract effective lines of equations
        Q_H = linalg.qr(R_loop, mode='economic',
                        pivoting=False)[0][:, :shape_Tb0_effective].conj().T
        R_loop = np.dot(Q_H, R_loop)
        Tbeta_loop = np.dot(Q_H, Tbeta0_Qtilde)

        # inner loop for each random initialization
        Tbetah_R_GtGinv_Rh_inv_Tbeta = None
        for inner in range(max_inner_iter):
            if inner == 0:
                R_GtGinv_Rh = \
                    np.dot(R_loop,
                           linalg.lu_solve(lu_GtG, R_loop.conj().T,
                                           check_finite=check_finite)
                           )
                mtx_loop = \
                    np.vstack((
                        np.hstack((
                            np.dot(Tbeta_loop.conj().T,
                                   linalg.solve(R_GtGinv_Rh, Tbeta_loop,
                                                check_finite=check_finite)
                                   ),
                            S_H, Gamma0.conj()
                        )),
                        mtx_S_row,
                        mtx_last_row
                    ))
            else:
                mtx_loop[:sz_coef, :sz_coef] = Tbetah_R_GtGinv_Rh_inv_Tbeta

            # solve annihilating filter coefficients
            try:
                gamma = linalg.solve(mtx_loop, rhs)[:sz_coef]
                coef = np.dot(Qtilde_mtx, gamma)
            except linalg.LinAlgError:
                break
            c1 = np.reshape(coef[:num_coef1], shape_c1, order='F')
            c2 = np.reshape(coef[num_coef1:num_coef1 + num_coef2],
                            shape_c2,
                            order='F')
            c3 = np.reshape(coef[num_coef1 + num_coef2:], shape_c3, order='F')

            # update the right-dual matrix R and T based on the new coefficients
            R_loop = R_mtx_joint3d(c1, c2, c3, shape_b)
            # use QR decomposition to extract effective lines of equations
            Q_H = linalg.qr(
                R_loop, mode='economic',
                pivoting=False)[0][:, :shape_Tb0_effective].conj().T
            R_loop = np.dot(Q_H, R_loop)
            Tbeta_loop = np.dot(Q_H, Tbeta0_Qtilde)

            # evaluate fitting error without computing b
            '''implementation I, which involves a two-layer nested matrix inverses'''
            # Tbetah_R_GtGinv_Rh_inv_Tbeta = \
            #     np.dot(Tbeta_loop.conj().T,
            #            linalg.solve(
            #                np.dot(R_loop,
            #                       linalg.lu_solve(lu_GtG, R_loop.conj().T,
            #                                       check_finite=check_finite)),
            #                Tbeta_loop, check_finite=check_finite)
            #            )
            # # the actual error is this value + |a - G beta|^2, which is a constant
            # error_loop = \
            #     np.real(np.dot(coef.conj().T,
            #                    np.dot(Tbetah_R_GtGinv_Rh_inv_Tbeta, coef)))
            '''implementation II, which only involves G^h G inverse and 
            not too much extra computational cost compared with implementation I'''
            R_GtGinv_Rh = np.dot(
                R_loop,
                linalg.lu_solve(lu_GtG,
                                R_loop.conj().T,
                                check_finite=check_finite))
            Tbetah_R_GtGinv_Rh_inv_Tbeta = \
                np.dot(
                    Tbeta_loop.conj().T,
                    linalg.solve(R_GtGinv_Rh, Tbeta_loop, check_finite=check_finite)
                )
            Tbeta_c = np.dot(Tbeta_loop, gamma)
            if inner == 0:
                mtx_error = np.row_stack(
                    (np.column_stack((R_GtGinv_Rh,
                                      np.zeros((shape_Tb0_effective, 1),
                                               dtype=complex))),
                     np.append(Tbeta_c.conj()[np.newaxis, :], -1)))
                rhs_error = np.append(Tbeta_c, 0)
            else:
                mtx_error[:shape_Tb0_effective, :
                          shape_Tb0_effective] = R_GtGinv_Rh
                mtx_error[-1, :shape_Tb0_effective] = Tbeta_c.conj()
                rhs_error[:-1] = Tbeta_c

            l_rho = linalg.solve(mtx_error,
                                 rhs_error,
                                 check_finite=check_finite)
            error_loop = l_rho[-1].real

            if 0 < error_loop < min_error:
                # check that the number of non-zero entries are
                # indeed num_dirac + 1 (could be less)
                c1[np.abs(c1) < 1e-2 * np.max(np.abs(c1))] = 0
                c2[np.abs(c2) < 1e-2 * np.max(np.abs(c2))] = 0
                c3[np.abs(c3) < 1e-2 * np.max(np.abs(c3))] = 0
                nnz_cond = \
                    np.sum(1 - np.isclose(np.abs(c1), 0).astype(int)) == num_non_zero and \
                    np.sum(1 - np.isclose(np.abs(c2), 0).astype(int)) == num_non_zero and \
                    np.sum(1 - np.isclose(np.abs(c3), 0).astype(int)) == num_non_zero
                # TODO: add the checks for cases when certain number of Dirac share the x, y, z coordinates
                if nnz_cond:
                    min_error = error_loop
                    c1_opt = c1
                    c2_opt = c2
                    c3_opt = c3
                    l_opt = l_rho[:-1]

            if compute_mse and min_error < noise_level:
                break

        if compute_mse and min_error < noise_level:
            break

        if c1_opt is None or c2_opt is None or c3_opt is None:
            max_ini += 1

    # compute b_opt at the end
    R_opt = R_mtx_joint3d(c1_opt, c2_opt, c3_opt, shape_b)
    # use QR decomposition to extract effective lines of equations
    Q_H = linalg.qr(R_opt, mode='economic',
                    pivoting=False)[0][:, :shape_Tb0_effective].conj().T
    R_opt = np.dot(Q_H, R_opt)
    '''use with implementation I'''
    # mtx_brecon = np.vstack((
    #     np.hstack((GtG, R_opt.conj().T)),
    #     np.hstack((R_opt, np.zeros((shape_Tb0_effective, shape_Tb0_effective))))
    # ))
    # b_opt = \
    #     linalg.solve(mtx_brecon,
    #                  np.concatenate((Gt_a,
    #                                  np.zeros(shape_Tb0_effective,
    #                                           dtype=complex)))
    #                  )[:sz_R1]
    '''use with implementation II'''
    b_opt = beta - linalg.lu_solve(
        lu_GtG, np.dot(R_opt.conj().T, l_opt), check_finite=check_finite)
    return c1_opt, c2_opt, c3_opt, min_error, b_opt, ini
コード例 #45
0
def builder_to_model(syst, momenta=None, real_space=True, params=None):
    """Make a qsymm.BlochModel out of a `~kwant.builder.Builder`.

    Parameters
    ----------
    syst : `~kwant.builder.Builder`
        May have translational symmetries.
    momenta : list of strings or None
        Names of momentum variables. If None, 'k_x', 'k_y', ... is used.
    real_space : bool (default True)
        If False, use the unit cell convention for Bloch basis, the
        exponential has the difference in the unit cell coordinates and
        k is expressed in the reciprocal lattice basis. This is consistent
        with `kwant.wraparound`.
        If True, the difference in the real space coordinates is used
        and k is given in an absolute basis.
        Only the default choice guarantees that qsymm is able to find
        nonsymmorphic symmetries.
    params : dict, optional
        Dictionary of parameter names and their values; used when
        evaluating the Hamiltonian matrix elements.

    Returns
    -------
    model : qsymm.BlochModel
        Model representing the tight-binding Hamiltonian.

    Notes
    -----
    The sites in the the builder are in lexicographical order, i.e. ordered
    first by their family and then by their tag. This is the same ordering that
    is used in finalized kwant systems.
    """
    def term_to_model(d, par, matrix):
        if allclose(matrix, 0):
            result = BlochModel({}, shape=matrix.shape, format=np.ndarray)
        else:
            result = BlochModel({BlochCoeff(d, qsymm.sympify(par)): matrix},
                                momenta=momenta)
        return result

    def hopping_to_model(hop, value, proj, params):
        site1, site2 = hop
        if real_space:
            d = proj @ np.array(site2.pos - site1.pos)
        else:
            # site in the FD
            d = np.array(syst.symmetry.which(site2))

        slice1, slice2 = slices[to_fd(site1)], slices[to_fd(site2)]
        if callable(value):
            return sum(
                term_to_model(d, par, set_block(slice1, slice2, val))
                for par, val in function_to_terms(hop, value, params))
        else:
            matrix = set_block(slice1, slice2, value)
            return term_to_model(d, '1', matrix)

    def onsite_to_model(site, value, params):
        d = np.zeros((dim, ))
        slice1 = slices[to_fd(site)]
        if callable(value):
            return sum(
                term_to_model(d, par, set_block(slice1, slice1, val))
                for par, val in function_to_terms(site, value, params))
        else:
            return term_to_model(d, '1', set_block(slice1, slice1, value))

    def function_to_terms(site_or_hop, value, fixed_params):
        assert callable(value)
        parameters = get_parameters(value)
        # remove site or site1, site2 parameters
        if isinstance(site_or_hop, builder.Site):
            parameters = parameters[1:]
            site_or_hop = (site_or_hop, )
        else:
            parameters = parameters[2:]
        free_parameters = (par for par in parameters
                           if par not in fixed_params.keys())
        # first set all free parameters to 0
        args = ((fixed_params[par] if par in fixed_params.keys() else 0)
                for par in parameters)
        h_0 = value(*site_or_hop, *args)
        # set one of the free parameters to 1 at a time, the rest 0
        terms = []
        for p in free_parameters:
            args = ((fixed_params[par] if par in fixed_params.keys() else
                     (1 if par == p else 0)) for par in parameters)
            terms.append((p, value(*site_or_hop, *args) - h_0))
        return terms + [('1', h_0)]

    def orbital_slices(syst):
        orbital_slices = {}
        start_orb = 0

        for site in sorted(syst.sites()):
            n = site.family.norbs
            if n is None:
                raise ValueError('norbs must be provided for every lattice.')
            orbital_slices[site] = slice(start_orb, start_orb + n)
            start_orb += n
        return orbital_slices, start_orb

    def set_block(slice1, slice2, val):
        matrix = np.zeros((N, N), dtype=complex)
        matrix[slice1, slice2] = val
        return matrix

    if params is None:
        params = dict()

    periods = np.array(syst.symmetry.periods)
    dim = len(periods)
    to_fd = syst.symmetry.to_fd
    if momenta is None:
        momenta = ['k_x', 'k_y', 'k_z'][:dim]
    # If the system is higher dimensional than the number of translation
    # vectors, we need to project onto the subspace spanned by the
    # translation vectors.
    if dim == 0:
        proj = np.empty((0, len(list(syst.sites())[0].pos)))
    elif dim < len(list(syst.sites())[0].pos):
        proj, r = la.qr(np.array(periods).T, mode='economic')
        sign = np.diag(np.diag(np.sign(r)))
        proj = sign @ proj.T
    else:
        proj = np.eye(dim)

    slices, N = orbital_slices(syst)

    one_way_hoppings = [
        hopping_to_model(hop, value, proj, params)
        for hop, value in syst.hopping_value_pairs()
    ]
    other_way_hoppings = [term.T().conj() for term in one_way_hoppings]
    hoppings = one_way_hoppings + other_way_hoppings

    onsites = [
        onsite_to_model(site, value, params)
        for site, value in syst.site_value_pairs()
    ]

    result = sum(onsites) + sum(hoppings)

    return result
コード例 #46
0
sys.path.append(CODE_DIR + 'repler/src/')
import util
import plotting as dicplt

#%%

ndat = 100
nneur = 300

c_up = np.repeat(np.linspace(0, 2 * np.pi, ndat), ndat)  # upper color
c_low = np.tile(np.linspace(0, 2 * np.pi, ndat), ndat)  # lower color

circ1 = np.stack([np.sin(c_up), np.cos(c_up)])
circ2 = np.stack([np.sin(c_low), np.cos(c_low)])

basis = la.qr(np.random.randn(nneur, nneur).T)[0][:8, :]

#%% parallel circles

mu_u = basis[:2, :].T @ circ1 + np.random.randn(nneur, ndat**2) * 0.1
mu_d_l = 0.6 * basis[2:4, :].T @ circ2 + np.random.randn(nneur, ndat**2) * 0.1

mu_l = basis[:2, :].T @ circ2 + basis[[5], :].T + np.random.randn(
    nneur, ndat**2) * 0.1
mu_d_u = 0.6 * basis[2:4, :].T @ circ1 + basis[[6], :].T + np.random.randn(
    nneur, ndat**2) * 0.1

T1_par = mu_u + mu_d_l  # upper cued torus
T2_par = mu_l + mu_d_u  # lower cued torus

U, mwa = util.pca(T1_par)
コード例 #47
0
    def information_transfer(self, tau_max=0, estimator='knn',
                             knn=10, past=1, cond_mode='ity', lag_mode='max'):

        r"""
        Return bivariate information transfer between all pairs of nodes.

        Two condition modes of information transfer are available
        as described in [Runge2012b]_.

        Information transfer to Y (ITY):
            .. math::
                I(X^i_t-\tau, X^j_t | X^j_t-1, ...,X^j_t-past)

        Momentary information transfer (MIT):
            .. math::
                I(X^i_t-\tau, X^j_t | X^j_t-1, ...,X^j_t-past, X^i_t-\tau-1,
                                       ...,X^j_t-\tau-past)

        Two estimators are available:

        estimator = 'knn' (Recommended):
        Based on k-nearest-neighbors [Kraskov2004]_,
        version 1 in their paper. Larger k have smaller variance, but larger
        (typically negative) bias, and vice versa.

        estimator = 'gauss':
        Captures only linear part of association. Essentially estimates a
        transformed partial correlation.


        Two lag-modes are available (default: lag_mode='max'):

        lag_mode = 'all':
        Return 3-dimensional array of lag-functions between all pairs of nodes.
        An entry :math:`(i, j, \tau)` corresponds to :math:`I(X^i_t-\tau, X^j_t
        | ...)` for positive lags tau, i.e., the direction i --> j for
        :math:`\tau \ne 0`.

        lag_mode = 'max':
        Return matrix of absolute maxima and corresponding lags of
        lag-functions between all pairs of nodes.
        Returns two usually asymmetric matrices of values and lags: In each
        matrix, an entry :math:`(i, j)` corresponds to the value and lag,
        respectively, at absolute maximum of :math:`I(X^i_t-\tau, X^j_t | ...)`
        for positive lags tau, i.e., the direction i --> j for :math:`\tau >
        0`.  The matrices are, thus, asymmetric. The function
        :meth:`.symmetrize_by_absmax` can be used to obtain a symmetric matrix.

        **Example:**

        >>> coup_ana = CouplingAnalysis(CouplingAnalysis.test_data())
        >>> similarity_matrix, lag_matrix = coup_ana.information_transfer(
        ...     tau_max=5, estimator='knn', knn=10)
        >>> r((similarity_matrix, lag_matrix))
        (array([[ 0.    ,  0.1544,  0.3261,  0.3047],
               [  0.0218,  0.    ,  0.0394,  0.0976],
               [  0.0134,  0.0663,  0.    ,  0.1502],
               [  0.0066,  0.0694,  0.0401,  0.    ]]),
        array([[0, 2, 1, 2], [5, 0, 0, 0], [5, 1, 0, 1], [5, 0, 0, 0]]))

        :type tau_max: int [int>=0]
        :arg  tau_max: maximum lag of ITY lag function.

        :type past: int [int>=1]
        :arg  past: maximum lag of past history.

        :type knn: int [int>=1]
        :arg  knn: nearest-neighbor ITY estimation parameter. (default: 10)

        :type bins: int [int>=2]
        :arg  bins: binning ITY estimation parameter. (default: 6)

        :type estimator: str [('knn'|'gauss')]
        :arg  estimator: ITY estimator. (default: 'knn')

        :type cond_mode: str [('ity'|'mit')]
        :arg  cond_mode: condition mode. (default: 'ity')

        :type lag_mode: str [('max'|'all')]
        :arg  lag_mode: lag-mode of ITY to return.

        :rtype: 3D-array or tuple of matrices
        :returns: all-lag array or matrices of value and lag at the absolute
                  maximum.
        """

        data = self.data
        T, N = data.shape

        # Sanity checks
        if not isinstance(data, numpy.ndarray):
            raise TypeError("data is of type %s, must be numpy.ndarray"
                            % type(data))
        if N > T:
            print(f"Warning: data.shape = {data.shape},"
                  " is it of shape (observations, variables) ?")
        if estimator == 'knn' and T < 500:
            print(f"Warning: T = {T} ,"
                  " unreliable estimation using knn-estimator")
        if numpy.isnan(data).sum() != 0:
            raise ValueError("NaNs in the data")
        if tau_max < 0:
            raise ValueError("tau_max = %d, but 0 <= tau_max" % tau_max)
        if estimator == 'knn':
            if knn > T/2. or knn < 1:
                raise ValueError(f"knn = {knn}, should be between 1 and T/2")

        if lag_mode == 'max':
            similarity_matrix = numpy.ones((N, N), dtype='float32')
            lag_matrix = numpy.zeros((N, N), dtype='int8')
        elif lag_mode == 'all':
            lagfuncs = numpy.zeros((N, N, tau_max+1), dtype='float32')

        for i in range(N):
            for j in range(N):
                maximum = 0.
                lag_at_max = 0
                for tau in range(tau_max + 1):

                    X = [(i, -tau)]
                    Y = [(j, 0)]
                    if cond_mode == 'ity':
                        Z = [(j, -p) for p in range(1, past + 1)]
                    elif cond_mode == 'mit':
                        Z = [(j, -p) for p in range(1, past + 1)]
                        Z += [(i, -tau - p) for p in range(1, past + 1)]

                    XYZ = X + Y + Z

                    dim = len(XYZ)
                    max_lag = tau_max + past
                    array = numpy.zeros((dim, T - max_lag))
                    for d, node in enumerate(XYZ):
                        var, lag = node
                        array[d, :] = data[max_lag + lag: T + lag, var]

                    if estimator == 'knn':
                        xyz = numpy.array([0, 1])

                        k_xz, k_yz, k_z = self._get_nearest_neighbors(
                            array=array, xyz=xyz, k=knn, standardize=True)

                        ixy_z = (special.digamma(knn)
                                 + (- special.digamma(k_xz)
                                    - special.digamma(k_yz)
                                    + special.digamma(k_z)).mean())

                    elif estimator == 'gauss':

                        if numpy.isnan(array).sum() != 0:
                            raise ValueError("nans in the array!")

                        # Standardize
                        array -= array.mean(axis=1).reshape(dim, 1)
                        array /= array.std(axis=1).reshape(dim, 1)
                        if numpy.isnan(array).sum() != 0:
                            raise ValueError("nans after standardizing, \
                                             possibly constant array!")

                        x = array[0, :]
                        y = array[1, :]
                        if len(array) > 2:
                            confounds = array[2:, :]
                            ortho_confounds = linalg.qr(
                                numpy.fastCopyAndTranspose(confounds),
                                mode='economic')[0].T
                            x -= numpy.dot(numpy.dot(ortho_confounds, x),
                                           ortho_confounds)
                            y -= numpy.dot(numpy.dot(ortho_confounds, y),
                                           ortho_confounds)

                        ixy_z = self._par_corr_to_cmi(
                            numpy.dot(x, y) / numpy.sqrt(
                                numpy.dot(x, x) * numpy.dot(y, y)))

                    if lag_mode == 'max':
                        if ixy_z > maximum:
                            maximum = ixy_z
                            lag_at_max = tau

                    elif lag_mode == 'all':
                        lagfuncs[i, j, tau] = ixy_z

                if lag_mode == 'max':
                    similarity_matrix[i, j] = maximum
                    lag_matrix[i, j] = lag_at_max

        if lag_mode == 'max':
            similarity_matrix[range(N), range(N)] = 0.
        elif lag_mode == 'all':
            lagfuncs[range(N), range(N), 0.] = 0.

        if lag_mode == 'max':
            return similarity_matrix, lag_matrix
        elif lag_mode == 'all':
            return lagfuncs
        else:
            return None
コード例 #48
0
def main():
    #1--Integral
    print(quad(lambda x: np.exp(-x), 0, np.inf))  #jifen  1~wuqiong
    print(
        dblquad(
            lambda t, x: np.exp(-x * t) / t**3,
            0,
            np.inf,  #t de fan wei
            lambda x: 1,
            lambda x: np.inf))  #erweijifen   x de fanwei

    def f(x, y):
        return x * y

    def bound_y():
        return [0, 0.5]

    def bound_x(y):
        return [0, 1 - 2 * y]

    print(nquad(f, (bound_x, bound_y)))  #n weijifen

    # 2--Optimizer
    def rosen(x):
        return sum(100.0 * (x[1:] - x[:1]**2.0)**2.0 + (1 - x[:1])**2.0)

    x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2])
    res = minimize(rosen,
                   x0,
                   method="nelder-mead",
                   options={
                       "xtol": 1e-8,
                       "disp": True
                   })
    print("ROSE MINI:", res.x)

    # def func(x):
    #     return (2*x[0]*x[1]+2*x[0]-x[0]**2-2*x[1]**2)
    # def func_deriv(x):
    #     dfdx0 = (-2*x[0]+2*x[1]+2)
    #     dfdx1 = (2*x[0]-4*x[1])
    #     return np.array([dfdx0,dfdx1])
    # cons=({"type":"eq","fun":lambda x:np.array([x[0]**3-x[1]]),"jac":lambda x:np.array([3.0*(x[0]**2.0,-1.0)])},
    #       {"type": "ineq", "fun": lambda x: np.array([x[1] - 1]), "jac": lambda x: np.array([0.0,1.0])})
    # res = minimize(func,np.array([-1.0,1.0]),jac=func_deriv,constraints=cons,method='SLSQP',options={'disp':True})
    # print("RESTRICT:",res)
    def fun(x):
        return x + 2 * np.cos(x)

    sol = root(fun, 0.1)
    print("ROOT:", sol.x, sol.fun)
    #3--Inter
    x = np.linspace(0, 1, 10)
    y = np.sin(2 * np.pi * x)
    li = interp1d(x, y, kind="cubic")
    x_new = np.linspace(0, 1, 50)
    y_new = li(x_new)
    figure()
    plot(x, y, "r")
    plot(x_new, y_new, "k")
    show()
    print(y_new)
    #4--Linear
    arr = np.array([[1, 2], [3, 4]])
    print("Det:", lg.det(arr))
    print("Inv:", lg.inv(arr))
    b = np.array([6, 14])
    print("Sol:", lg.solve(arr, b))
    print("Eig:", lg.eig(arr))
    print("LU:", lg.lu(arr))
    print("QR:", lg.qr(arr))
    print("SVD:", lg.svd(arr))
    print("Schur:", lg.schur(arr))
コード例 #49
0
def _random_orthonormal_cols(data_size: int, columns: int) -> Matrix:
    return np.ascontiguousarray(
        qr(np.random.randn(data_size, columns),
           mode="economic",
           overwrite_a=True,
           check_finite=False)[0])
コード例 #50
0
def get_orthogonal_matrix(dim):
    H = np.random.randn(dim, dim)
    Q, R = qr(H)
    return Q
コード例 #51
0
ファイル: confidence.py プロジェクト: erickmartinez/pydlcp
def confidence_interval(x, func, res, **kwargs):
    """
    This function estimates the confidence interval for the optimized parameters
    from the fit.
    
    Parameters
    ----------
    x: [double]
        The observed x points
    func: callback
        The function
    res: OptimizeResult
        The optimzied result from least_squares minimization
    **kwargs
        confidence: float
            The confidence level (default 0.95)
    Returns
    -------
    ci: [double]
        The confidence interval
    """
    from scipy.optimize import optimize
    if not isinstance(res, optimize.OptimizeResult):
        raise ValueError(
            'Argument \'res\' should be an instance of \'scipy.optimize.OptimizeResult\''
        )

    import scipy.linalg as LA
    from scipy.stats.distributions import t

    confidence = kwargs.get('confidence', 0.95)

    # The residual
    resid = res.fun
    n = len(resid)
    p = len(res.x)
    dfe = n - p
    # Get MSE. The degrees of freedom when J is full rank is v = n-p and n-rank(J) otherwise
    mse = (LA.norm(resid))**2 / (dfe)

    # Needs to estimate the jacobian at the predictor point!!!
    # From MATLAB toolbox/stats/stats/nlpredci
    #    ypred = func(x,res.x)
    #    delta = np.zeros((len(ypred),p));
    #    fdiffstep       = np.amax(np.spacing(res.x)**(1/3));
    #    for i in range(p):
    #        change = np.zeros(p)
    #        if res.x[i] == 0:
    #            nb = np.sqrt(LA.norm(res.x))
    #            change[i] = fdiffstep * (nb + (nb == 0))
    #        else:
    #            change[i] = fdiffstep * res.x[i]
    #
    #        predplus    = func(x,res.x+change)
    #        delta[:,i]  = (predplus - ypred)/change[i]
    # Find R to get the variance
    _, R = LA.qr(res.jac)
    # Get the rank of jac
    Rinv = LA.pinv(R)

    v = np.sum(Rinv**2, axis=1) * mse
    alpha = 1.0 - confidence
    tval = t.ppf(1.0 - alpha / 2.0, dfe)
    delta = np.sqrt(v) * tval
    ci = np.zeros((p, 2), dtype=np.float64)

    for i, p, d in zip(range(n), res.x, delta):
        ci[i, :] = [p - d, p + d]

    return ci
コード例 #52
0
    for ind, line in enumerate(lines):
        num = line.split(',')
        labels[ind] = int(num[0])
        data[ind] = [int(x) for x in num[1:]]

    return data, labels


# gradient Descent

train_data, train_labels = read_data("sample_train.csv")
train_data = train_data / 255
cov_mat = np.cov(train_data.T)
alpha = 0.5
H = np.random.randn(784, 784)
Q, R = qr(H)
p = Q
print(p)
gradient = np.zeros((1, 784))
for i in range(100):
    for j in range(2):
        for k in range(3, 784):
            gradient += 6000 * np.matmul(cov_mat, p[:, k])
        p[:, j] = p[:, j] - alpha * 2 * gradient
z = np.matmul(train_data, p[:, 0:2])
plt.plot(z, 'r+')
plt.show()
evalue, evct = np.linalg.eig(cov_mat)
s = (p[:, 0:2] - evct[:, 0:2])
plt.plot(s, 'g*')
plt.show()
コード例 #53
0
def test_reproject_continuous(n=100, m=20, r=10):
    """Test pre.reproject_continuous()."""
    # Construct dummy operators.
    k = 1 + r + r * (r + 1) // 2
    I = np.eye(n)
    D = np.diag(1 - np.logspace(-1, -2, n))
    W = la.qr(np.random.normal(size=(n, n)))[0]
    A = W.T @ D @ W
    Ht = np.random.random((n, n, n))
    H = (Ht + Ht.T) / 20
    H = H.reshape((n, n**2))
    B = np.random.random((n, m))
    U = np.random.random((m, k))
    B1d = np.random.random(n)
    U1d = np.random.random(k)
    Vr = np.eye(n)[:, :r]
    X = np.random.random((n, k))

    # Try with bad initial condition shape.
    with pytest.raises(ValueError) as exc:
        roi.pre.reproject_continuous(lambda x: x, Vr, X[:-1, :])
    assert exc.value.args[0] == \
        f"X and Vr not aligned, first dimension {n-1} != {n}"

    # Linear case, no inputs.
    f = lambda x: A @ x
    X_, Xdot_ = roi.pre.reproject_continuous(f, Vr, X)
    assert X_.shape == (r, k)
    assert Xdot_.shape == (r, k)
    model = roi.InferredContinuousROM("A").fit(Vr, X_, Xdot_)
    assert np.allclose(model.A_, Vr.T @ A @ Vr)

    # Linear case, 1D inputs.
    f = lambda x, u: A @ x + B1d * u
    X_, Xdot_ = roi.pre.reproject_continuous(f, Vr, X, U1d)
    assert X_.shape == (r, k)
    assert Xdot_.shape == (r, k)
    model = roi.InferredContinuousROM("AB").fit(Vr, X_, Xdot_, U1d)
    assert np.allclose(model.A_, Vr.T @ A @ Vr)
    assert np.allclose(model.B_.flatten(), Vr.T @ B1d)

    # Linear case, 2D inputs.
    f = lambda x, u: A @ x + B @ u
    X_, Xdot_ = roi.pre.reproject_continuous(f, Vr, X, U)
    assert X_.shape == (r, k)
    assert Xdot_.shape == (r, k)
    model = roi.InferredContinuousROM("AB").fit(Vr, X_, Xdot_, U)
    assert np.allclose(model.A_, Vr.T @ A @ Vr)
    assert np.allclose(model.B_, Vr.T @ B)

    # Quadratic case, no inputs.
    f = lambda x: A @ x + H @ np.kron(x, x)
    X_, Xdot_ = roi.pre.reproject_continuous(f, Vr, X)
    assert X_.shape == (r, k)
    assert Xdot_.shape == (r, k)
    model = roi.InferredContinuousROM("AH").fit(Vr, X_, Xdot_)
    assert np.allclose(model.A_, Vr.T @ A @ Vr)
    H_ = Vr.T @ H @ np.kron(Vr, Vr)
    for _ in range(10):
        x_ = np.random.random(r)
        x2_ = np.kron(x_, x_)
        assert np.allclose(model.H_ @ x2_, H_ @ x2_)
コード例 #54
0
ファイル: confidence.py プロジェクト: erickmartinez/pydlcp
def predint(x, xd, yd, func, res, **kwargs):
    """
    This function estimates the prediction bands for the fit
    (see: https://www.mathworks.com/help/curvefit/confidence-and-prediction-bounds.html)
    Parameters 
    ----------
    x: [double]
        The requested x points for the bands
    xd: [double]
        The x datapoints
    yd: [double]
        The y datapoints
    func: obj
        The fitted function
    res: OptimizeResult
        The optimzied result from least_squares minimization
    **kwargs
        confidence: float
            The confidence level (default 0.95)
        simulateneous: bool
            True if the bound type is simultaneous, false otherwise
        mode: [functional, observation]
            Default observation        
    """

    if len(yd) != len(xd):
        raise ValueError('The length of the observations should be the same '+\
                         'as the length of the predictions.')
    if len(yd) <= 1:
        raise ValueError('Too few datapoints')
    from scipy.optimize import optimize

    if not isinstance(res, optimize.OptimizeResult):
        raise ValueError(
            'Argument \'res\' should be an instance of \'scipy.optimize.OptimizeResult\''
        )

    import scipy.linalg as LA
    simultaneous = kwargs.get('simultaneous', True)
    mode = kwargs.get('mode', 'observation')
    confidence = kwargs.get('confidence', 0.95)

    p = len(res.x)

    # Needs to estimate the jacobian at the predictor point!!!
    # From MATLAB toolbox/stats/stats/nlpredci
    ypred = func(x, res.x)
    if callable(res.jac):
        delta = res.jac(x)
    else:
        delta = np.zeros((len(ypred), p))
        fdiffstep = np.spacing(np.abs(res.x))**(1 / 3)
        #    print('diff_step = {0}'.format(fdiffstep))
        #    print('popt = {0}'.format(res.x))
        for i in range(p):
            change = np.zeros(p)
            if res.x[i] == 0:
                nb = np.sqrt(LA.norm(res.x))
                change[i] = fdiffstep[i] * (nb + (nb == 0))
            else:
                change[i] = fdiffstep[i] * res.x[i]

            predplus = func(x, res.x + change)
            delta[:, i] = (predplus - ypred) / change[i]
#    print('delta = {0}'.format(delta))

# Find R to get the variance
    _, R = LA.qr(res.jac)
    # Get the rank of jac
    rankJ = res.jac.shape[1]
    Rinv = LA.pinv(R)
    pinvJTJ = np.dot(Rinv, Rinv.T)

    # The residual
    resid = res.fun
    n = len(resid)
    # Get MSE. The degrees of freedom when J is full rank is v = n-p and n-rank(J) otherwise
    mse = (LA.norm(resid))**2 / (n - rankJ)
    # Calculate Sigma if usingJ
    Sigma = mse * pinvJTJ

    # Compute varpred
    varpred = np.sum(np.dot(delta, Sigma) * delta, axis=1)
    #    print('varpred = {0}, len: '.format(varpred,len(varpred)))
    alpha = 1.0 - confidence
    if mode == 'observation':
        # Assume a constant variance model if errorModelInfo and weights are
        # not supplied.
        errorVar = mse * np.ones(delta.shape[0])
        #        print('errorVar = {0}, len: '.format(errorVar,len(errorVar)))
        varpred += errorVar
    # The significance
    if simultaneous:
        from scipy.stats.distributions import f
        sch = [rankJ + 1]
        crit = f.ppf(1.0 - alpha, sch, n - rankJ)
    else:
        from scipy.stats.distributions import t
        crit = t.ppf(1.0 - alpha / 2.0, n - rankJ)

    delta = np.sqrt(varpred) * crit

    lpb = ypred - delta
    upb = ypred + delta

    return ypred, lpb, upb
コード例 #55
0
ファイル: signal.py プロジェクト: zhoujian1210/nilearn
def clean(signals,
          sessions=None,
          detrend=True,
          standardize=True,
          confounds=None,
          low_pass=None,
          high_pass=None,
          t_r=2.5,
          ensure_finite=False):
    """Improve SNR on masked fMRI signals.

    This function can do several things on the input signals, in
    the following order:

    - detrend
    - standardize
    - remove confounds
    - low- and high-pass filter

    Low-pass filtering improves specificity.

    High-pass filtering should be kept small, to keep some
    sensitivity.

    Filtering is only meaningful on evenly-sampled signals.

    Parameters
    ----------
    signals: numpy.ndarray
        Timeseries. Must have shape (instant number, features number).
        This array is not modified.

    sessions : numpy array, optional
        Add a session level to the cleaning process. Each session will be
        cleaned independently. Must be a 1D array of n_samples elements.

    confounds: numpy.ndarray, str or list of
        Confounds timeseries. Shape must be
        (instant number, confound number), or just (instant number,)
        The number of time instants in signals and confounds must be
        identical (i.e. signals.shape[0] == confounds.shape[0]).
        If a string is provided, it is assumed to be the name of a csv file
        containing signals as columns, with an optional one-line header.
        If a list is provided, all confounds are removed from the input
        signal, as if all were in the same array.

    t_r: float
        Repetition time, in second (sampling period).

    low_pass, high_pass: float
        Respectively low and high cutoff frequencies, in Hertz.

    detrend: bool
        If detrending should be applied on timeseries (before
        confound removal)

    standardize: bool
        If True, returned signals are set to unit variance.

    ensure_finite: bool
        If True, the non-finite values (NANs and infs) found in the data
        will be replaced by zeros.

    Returns
    -------
    cleaned_signals: numpy.ndarray
        Input signals, cleaned. Same shape as `signals`.

    Notes
    -----
    Confounds removal is based on a projection on the orthogonal
    of the signal space. See `Friston, K. J., A. P. Holmes,
    K. J. Worsley, J.-P. Poline, C. D. Frith, et R. S. J. Frackowiak.
    "Statistical Parametric Maps in Functional Imaging: A General
    Linear Approach". Human Brain Mapping 2, no 4 (1994): 189-210.
    <http://dx.doi.org/10.1002/hbm.460020402>`_

    See Also
    --------
        nilearn.image.clean_img
    """

    if isinstance(low_pass, bool):
        raise TypeError("low pass must be float or None but you provided "
                        "low_pass='******'".format(low_pass))
    if isinstance(high_pass, bool):
        raise TypeError("high pass must be float or None but you provided "
                        "high_pass='******'".format(high_pass))

    if not isinstance(confounds,
                      (list, tuple, _basestring, np.ndarray, type(None))):
        raise TypeError("confounds keyword has an unhandled type: %s" %
                        confounds.__class__)

    if not isinstance(ensure_finite, bool):
        raise ValueError(
            "'ensure_finite' must be boolean type True or False "
            "but you provided ensure_finite={0}".format(ensure_finite))

    if not isinstance(signals, np.ndarray):
        signals = as_ndarray(signals)

    if ensure_finite:
        signals[np.logical_not(np.isfinite(signals))] = 0

    # Read confounds
    if confounds is not None:
        if not isinstance(confounds, (list, tuple)):
            confounds = (confounds, )

        all_confounds = []
        for confound in confounds:
            if isinstance(confound, _basestring):
                filename = confound
                confound = csv_to_array(filename)
                if np.isnan(confound.flat[0]):
                    # There may be a header
                    if NP_VERSION >= [1, 4, 0]:
                        confound = csv_to_array(filename, skip_header=1)
                    else:
                        confound = csv_to_array(filename, skiprows=1)
                if confound.shape[0] != signals.shape[0]:
                    raise ValueError("Confound signal has an incorrect length")

            elif isinstance(confound, np.ndarray):
                if confound.ndim == 1:
                    confound = np.atleast_2d(confound).T
                elif confound.ndim != 2:
                    raise ValueError("confound array has an incorrect number "
                                     "of dimensions: %d" % confound.ndim)

                if confound.shape[0] != signals.shape[0]:
                    raise ValueError("Confound signal has an incorrect length")
            else:
                raise TypeError("confound has an unhandled type: %s" %
                                confound.__class__)
            all_confounds.append(confound)

        # Restrict the signal to the orthogonal of the confounds
        confounds = np.hstack(all_confounds)
        del all_confounds

    if sessions is not None:
        if not len(sessions) == len(signals):
            raise ValueError(
                ('The length of the session vector (%i) '
                 'does not match the length of the signals (%i)') %
                (len(sessions), len(signals)))
        for s in np.unique(sessions):
            session_confounds = None
            if confounds is not None:
                session_confounds = confounds[sessions == s]
            signals[sessions == s, :] = \
                clean(signals[sessions == s],
                      detrend=detrend, standardize=standardize,
                      confounds=session_confounds, low_pass=low_pass,
                      high_pass=high_pass, t_r=2.5)

    # detrend
    signals = _ensure_float(signals)
    signals = _standardize(signals, normalize=False, detrend=detrend)

    # Remove confounds
    if confounds is not None:
        confounds = _ensure_float(confounds)
        confounds = _standardize(confounds,
                                 normalize=standardize,
                                 detrend=detrend)
        if not standardize:
            # Improve numerical stability by controlling the range of
            # confounds. We don't rely on _standardize as it removes any
            # constant contribution to confounds.
            confound_max = np.max(np.abs(confounds), axis=0)
            confound_max[confound_max == 0] = 1
            confounds /= confound_max

        # Pivoting in qr decomposition was added in scipy 0.10
        Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
        Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float).eps * 100.]
        signals -= Q.dot(Q.T).dot(signals)

    if low_pass is not None or high_pass is not None:
        if t_r is None:
            raise ValueError("Repetition time (t_r) must be specified for "
                             "filtering")

        signals = butterworth(signals,
                              sampling_rate=1. / t_r,
                              low_pass=low_pass,
                              high_pass=high_pass)

    if standardize:
        signals = _standardize(signals, normalize=True, detrend=False)
        signals *= np.sqrt(signals.shape[0])  # for unit variance

    return signals
コード例 #56
0
def test_reproject_discrete(n=50, m=5, r=3):
    """Test pre.reproject_discrete()."""
    # Construct dummy operators.
    k = 1 + r + r * (r + 1) // 2
    I = np.eye(n)
    D = np.diag(1 - np.logspace(-1, -2, n))
    W = la.qr(np.random.normal(size=(n, n)))[0]
    A = W.T @ D @ W
    Ht = np.random.random((n, n, n))
    H = (Ht + Ht.T) / 20
    H = H.reshape((n, n**2))
    B = np.random.random((n, m))
    U = np.random.random((m, k))
    B1d = np.random.random(n)
    U1d = np.random.random(k)
    Vr = np.eye(n)[:, :r]
    x0 = np.zeros(n)
    x0[0] = 1

    # Try with bad initial condition shape.
    with pytest.raises(ValueError) as exc:
        roi.pre.reproject_discrete(lambda x: x, Vr, x0[:-1], k)
    assert exc.value.args[0] == "basis Vr and initial condition x0 not aligned"

    # Linear case, no inputs.
    f = lambda x: A @ x
    X_ = roi.pre.reproject_discrete(f, Vr, x0, k)
    assert X_.shape == (r, k)
    model = roi.InferredDiscreteROM("A").fit(Vr, X_)
    assert np.allclose(Vr @ X_, model.predict(X_[:, 0], k))
    assert np.allclose(model.A_, Vr.T @ A @ Vr)

    # Linear case, 1D inputs.
    f = lambda x, u: A @ x + B1d * u
    X_ = roi.pre.reproject_discrete(f, Vr, x0, k, U1d)
    assert X_.shape == (r, k)
    model = roi.InferredDiscreteROM("AB").fit(Vr, X_, U1d)
    assert np.allclose(X_, Vr.T @ model.predict(X_[:, 0], k, U1d))
    assert np.allclose(model.A_, Vr.T @ A @ Vr)
    assert np.allclose(model.B_.flatten(), Vr.T @ B1d)

    # Linear case, 2D inputs.
    f = lambda x, u: A @ x + B @ u
    X_ = roi.pre.reproject_discrete(f, Vr, x0, k, U)
    assert X_.shape == (r, k)
    model = roi.InferredDiscreteROM("AB").fit(Vr, X_, U)
    assert np.allclose(X_, Vr.T @ model.predict(X_[:, 0], k, U))
    assert np.allclose(model.A_, Vr.T @ A @ Vr)
    assert np.allclose(model.B_, Vr.T @ B)

    # Quadratic case, no inputs.
    f = lambda x: A @ x + H @ np.kron(x, x)
    X_ = roi.pre.reproject_discrete(f, Vr, x0, k)
    assert X_.shape == (r, k)
    model = roi.InferredDiscreteROM("AH").fit(Vr, X_)
    assert np.allclose(X_, Vr.T @ model.predict(X_[:, 0], k))
    assert np.allclose(model.A_, Vr.T @ A @ Vr, atol=1e-6, rtol=1e-6)
    H_ = Vr.T @ H @ np.kron(Vr, Vr)
    for _ in range(10):
        x_ = np.random.random(r)
        x2_ = np.kron(x_, x_)
        assert np.allclose(model.H_ @ x2_, H_ @ x2_)
コード例 #57
0
ファイル: stepwisefit.py プロジェクト: saorisakaue/MIGWAS
def stepcalc(allx, y, inmodel):
    """
    Perform fit and other calculations as part of stepwise regression.
    """

    N = y.size  # Number of independent tests (rows in allx).
    P = inmodel.size  # Number of independent variables in each test
    # (cols in allx).
    X = np.concatenate((np.ones((N, 1)), allx[:, inmodel]), 1)
    nin = inmodel.sum() + 1
    tol = max(N, P + 1) * np.finfo(allx.dtype).eps
    x = allx[:, ~inmodel]
    sumxsq = (x**2).sum(axis=0)

    # Compute b and its standard error.
    Q, R, perm = qr(X, mode="economic", pivoting=True)
    Rrank = (abs(np.diag(R)) > tol * abs(R.ravel()[0])).sum()
    if Rrank < nin:
        R = R[0:Rrank, 0:Rrank]
        Q = Q[:, 0:Rrank]
        perm = perm[0:Rrank]

    # Compute the LS coefficients, filling in zeros in elements corresponding
    # to rows of X that were thrown out.
    b = np.zeros((nin, 1))
    Qb = np.dot(Q.conj().T, y)
    Qb[abs(Qb) < tol * max(abs(Qb))] = 0
    b[perm] = linalg.solve(R, Qb)

    r = y - np.dot(X, b)
    dfe = X.shape[0] - Rrank
    df0 = Rrank - 1
    SStotal = linalg.norm(y - y.mean())
    SStotal = np.dot(SStotal, SStotal)
    SSresid = linalg.norm(r)
    SSresid = np.dot(SSresid, SSresid)
    perfectyfit = (dfe == 0) or (SSresid < tol * SStotal)
    if perfectyfit:
        SSresid = 0
        r[:] = 0
    rmse = np.sqrt(np.divide(SSresid, dfe))
    Rinv = linalg.solve(R, np.eye(max(R.shape))[0:R.shape[0], 0:R.shape[1]])
    se = np.zeros((nin, 1))
    se[perm] = rmse * np.expand_dims(np.sqrt((Rinv**2).sum(axis=1)), 1)

    # Compute separate added-variable coeffs and their standard errors.
    xr = x - np.dot(Q, np.dot(Q.conj().T, x))
    # remove effect of "in" predictors on "out" predictors
    yr = r
    # remove effect of "in" predictors on response

    xx = (xr**2).sum(axis=0)

    perfectxfit = (xx <= tol * sumxsq)
    if perfectxfit.any():  # to coef==0 for columns dependent in "in" cols
        xr[:, perfectxfit] = 0
        xx[perfectxfit] = 1
    b2 = np.divide(np.dot(yr.conj().T, xr), xx)
    r2 = np.tile(yr, (1, (~inmodel).sum())) - xr * np.tile(b2, (N, 1))
    df2 = max(0, dfe - 1)
    s2 = np.divide(np.sqrt(np.divide((r2**2).sum(axis=0), df2)), np.sqrt(xx))
    if len(s2.shape) == 1:
        s2 = s2.reshape((1, s2.shape[0]))

    # Combine in/out coefficients and standard errors.
    B = np.zeros((P, 1))
    B[inmodel] = b[1:]
    B[~inmodel] = b2.conj().T
    SE = np.zeros((P, 1))
    SE[inmodel] = se[1:]
    SE[~inmodel] = s2.conj().T

    #Get P-to-enter or P-to-remove for each term.
    PVAL = np.zeros((P, 1))
    tstat = np.zeros((P, 1))
    if any(inmodel):
        tval = np.divide(B[inmodel], SE[inmodel])
        ptemp = 2 * scipy.stats.t.cdf(-abs(tval), dfe)
        PVAL[inmodel] = ptemp
        tstat[inmodel] = tval
    if any(~inmodel):
        if dfe > 1:
            tval = np.divide(B[~inmodel], SE[~inmodel])
            ptemp = 2 * scipy.stats.t.cdf(-abs(tval), dfe - 1)
            flat_tval = tval.ravel()
            flat_ptemp = ptemp.ravel()
            for i in range(flat_tval.size):
                if np.isnan(flat_tval[i]):
                    flat_ptemp[i] = np.NaN
        else:
            tval = np.NaN
            ptemp = np.NaN
        PVAL[~inmodel] = ptemp
        tstat[~inmodel] = tval

    # Compute some summary statistics.
    MSexplained = np.divide(SStotal - SSresid, df0)
    fstat = np.divide(MSexplained, np.dot(rmse, rmse))
    pval = scipy.stats.f.cdf(1. / fstat, dfe, df0)

    # Return summary statistics as a single structure.
    stats = InfoDict()
    stats.source = "stepwisefit"
    stats.dfe = dfe
    stats.df0 = df0
    stats.SStotal = SStotal
    stats.SSresid = SSresid
    stats.fstat = fstat
    stats.pval = pval
    stats.rmse = rmse
    stats.xr = xr
    stats.yr = yr
    stats.B = B
    stats.SE = SE
    stats.TSTAT = tstat
    stats.PVAL = PVAL
    stats.intercept = b[0]

    return B, SE, PVAL, stats
コード例 #58
0
ファイル: scipyTest.py プロジェクト: HanKin2015/ACM
def main():
    #--Integral
    from scipy.integrate import quad,dblquad,nquad
    print(quad(lambda x:np.exp(-x),0,np.inf))
    print(dblquad(lambda t, x:np.exp(-x*t)/t**3,0,np.inf,lambda x:1, lambda x:np.inf))
    def f(x,y):
        return x*y
    def bound_y():
        return [0,0.5]
    def bound_x(y):
        return [0,1-2*y]
    print("NQUAD",nquad(f,[bound_x,bound_y]))

    #2--Optimizer
    from scipy.optimize import minimize
    def rosen(x):
        return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0+(1-x[:-1])**2.0)
    x0=np.array([1,3,0.7,0.8,1.9,1.2])
    res=minimize(rosen,x0,method="nelder-mead",options={"xtol":1e-8,"disp":True})
    print("ROSE MINI:",res)     #求出函数全局的最小值,可看属性res.x
    
    def func(x):
        return (2*x[0]*x[1]+2*x[0]-x[0]**2-2*x[1]**2)
    def func_deriv(x):
        dfdx0 =(-2*x[0]+2*x[1]+2)
        dedx1 =(2*x[0]-4*x[1])
        return np.array([dedx0,dedx1])
#    cons = 
#    优化器,拉格朗日(条件极值),雅可比矩阵,求根(未知数值)
    from scipy.optimize import root
    def fun(x):
        return x+2*np.cos(x)
    sol=root(fun,0.1)
    print("ROOT:",sol.x,sol.fun)
    
    def fun_simple(x):
        return x+3
    sol=root(fun,0.5)
    print("ROOT:",sol.x,sol.fun)
    
    #3--Interpolation插值
    x=np.linspace(0,1,10)
    y=np.sin(2*np.pi*x)
#    You used interpld, i.e. INTERPLD.
#    You want interp1d, i.e. with the numeral 1, for one-dimensional.
    from scipy.interpolate import interp1d
    li=interp1d(x,y,kind='cubic')
    x_new=np.linspace(0,1,50)
    y_new=li(x_new)
    figure()
    plot(x,y,'r')   #红色
    plot(x_new,y_new,'k')   #黑色
    print(y_new)
    
    #4--Linear
    from scipy import linalg as lg
    arr = np.array([[1,2],[3,4]])
    print("Det:",lg.det(arr))
    print("Inv:",lg.inv(arr))
    b=np.array([6,14])
    print("Sol:",lg.solve(arr,b))
    print("Eig:",lg.eig(arr))
    print("LU:",lg.lu(arr))
    print("QR:",lg.qr(arr))
    print("SVD:",lg.svd(arr))
    print("Schur:",lg.schur(arr))
コード例 #59
0
def get_model_improving_point(settings, n, k, var_lower, var_upper, node_pos,
                              model_set, start_point_index, tr_radius,
                              integer_vars):
    """Compute a point to improve the model used in the trust region.

    Determine a point that improves the geometry of the set of points
    used to build the trust region model. This point may not have a
    good objective function value, but it ensures that the model is
    well behaved.

    Parameters
    ----------
    settings : :class:`rbfopt_settings.RbfoptSettings`.
        Global and algorithmic settings.

    n : int
        Dimension of the problem, i.e. the size of the space.

    k : int
        Number of interpolation nodes.

    var_lower : 1D numpy.ndarray[float]
        Vector of variable lower bounds.
    
    var_upper : 1D numpy.ndarray[float]
        Vector of variable upper bounds.

    node_pos : 2D numpy.ndarray[float]
        List of coordinates of the nodes.

    model_set : 1D numpy.ndarray[int]
        Indices of points in node_pos to be used to compute model.

    start_point_index : int
        Index in node_pos of the starting point for the descent.

    tr_radius : float
        Radius of the trust region.

    integer_vars : 1D numpy.ndarray[int]
        Indices of the integer variables.

    Returns
    -------
    (1D numpy.ndarray[float], bool, int)
        Next candidate point to improve the model, a boolean
        indicating success, and the index of the point to replace if
        successful.

    """
    assert (isinstance(var_lower, np.ndarray))
    assert (isinstance(var_upper, np.ndarray))
    assert (len(var_lower) == n)
    assert (len(var_upper) == n)
    assert (isinstance(node_pos, np.ndarray))
    assert (len(node_pos) == k)
    assert (isinstance(model_set, np.ndarray))
    assert (start_point_index < k)
    assert (tr_radius >= 0)
    assert (isinstance(settings, RbfoptSettings))
    # Remove the start point from the model set if necessary
    red_model_set = np.array([i for i in model_set if i != start_point_index])
    model_size = len(red_model_set)
    # Tolerance for linearly dependent rows
    # Determine the coefficients of the directions spanned by the model
    A = node_pos[red_model_set] - node_pos[start_point_index]
    Q, R, P = la.qr(A.T, mode='full', pivoting=True)
    rank = min(A.shape) - np.abs(np.diag(R))[::-1].searchsorted(
        settings.eps_linear_dependence)
    if (rank >= model_size):
        # Numerically, the rank is ok according to our tolerance.
        # Return indicating that we do not have to perform model
        # improvement.
        return (node_pos[start_point_index], False, start_point_index)
    success = False
    d = np.zeros(n)
    i = rank
    to_replace = P[i]
    while (i < model_size and not success):
        # Determine candidate direction
        d = Q[:, i].T * tr_radius
        d = np.clip(node_pos[start_point_index] + d, var_lower,
                    var_upper) - node_pos[start_point_index]
        if (len(integer_vars)):
            # Zero out small directions, and increase to one nonzero
            # integer directions
            d[np.abs(d) < settings.eps_zero] = 0
            d[integer_vars] = (np.sign(d[integer_vars]) * np.maximum(
                np.abs(d[integer_vars]), np.ones(len(integer_vars))))
            d[integer_vars] = np.around(d[integer_vars])
        # Check if rank increased
        B = np.vstack((A[P[:rank], :], d.T))
        Q2, R2, P2 = la.qr(B.T, mode='full', pivoting=True)
        new_rank = min(B.shape) - np.abs(np.diag(R2))[::-1].searchsorted(
            settings.eps_linear_dependence)
        if (new_rank > rank):
            to_replace = P[i]
            success = True
        i += 1
    return (node_pos[start_point_index] + d, success, to_replace)
コード例 #60
0
ファイル: trf.py プロジェクト: Ombarus/python_env
def trf_bounds(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev,
               x_scale, loss_function, tr_solver, tr_options, verbose):
    x = x0.copy()

    f = f0
    f_true = f.copy()
    nfev = 1

    J = J0
    njev = 1
    m, n = J.shape

    if loss_function is not None:
        rho = loss_function(f)
        cost = 0.5 * np.sum(rho[0])
        J, f = scale_for_robust_loss_function(J, f, rho)
    else:
        cost = 0.5 * np.dot(f, f)

    g = compute_grad(J, f)

    jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
    if jac_scale:
        scale, scale_inv = compute_jac_scale(J)
    else:
        scale, scale_inv = x_scale, 1 / x_scale

    v, dv = CL_scaling_vector(x, g, lb, ub)
    v[dv != 0] *= scale_inv[dv != 0]
    Delta = norm(x0 * scale_inv / v**0.5)
    if Delta == 0:
        Delta = 1.0

    g_norm = norm(g * v, ord=np.inf)

    f_augmented = np.zeros((m + n))
    if tr_solver == 'exact':
        J_augmented = np.empty((m + n, n))
    elif tr_solver == 'lsmr':
        reg_term = 0.0
        regularize = tr_options.pop('regularize', True)

    if max_nfev is None:
        max_nfev = x0.size * 100

    alpha = 0.0  # "Levenberg-Marquardt" parameter

    termination_status = None
    iteration = 0
    step_norm = None
    actual_reduction = None

    if verbose == 2:
        print_header_nonlinear()

    while True:
        v, dv = CL_scaling_vector(x, g, lb, ub)

        g_norm = norm(g * v, ord=np.inf)
        if g_norm < gtol:
            termination_status = 1

        if verbose == 2:
            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
                                      step_norm, g_norm)

        if termination_status is not None or nfev == max_nfev:
            break

        # Now compute variables in "hat" space. Here we also account for
        # scaling introduced by `x_scale` parameter. This part is a bit tricky,
        # you have to write down the formulas and see how the trust-region
        # problem is formulated when the two types of scaling are applied.
        # The idea is that first we apply `x_scale` and then apply Coleman-Li
        # approach in the new variables.

        # v is recomputed in the variables after applying `x_scale`, note that
        # components which were identically 1 not affected.
        v[dv != 0] *= scale_inv[dv != 0]

        # Here we apply two types of scaling.
        d = v**0.5 * scale

        # C = diag(g * scale) Jv
        diag_h = g * dv * scale

        # After all this were done, we continue normally.

        # "hat" gradient.
        g_h = d * g

        f_augmented[:m] = f
        if tr_solver == 'exact':
            J_augmented[:m] = J * d
            J_h = J_augmented[:m]  # Memory view.
            J_augmented[m:] = np.diag(diag_h**0.5)
            U, s, V = svd(J_augmented, full_matrices=False)
            V = V.T
            uf = U.T.dot(f_augmented)
        elif tr_solver == 'lsmr':
            J_h = right_multiplied_operator(J, d)

            if regularize:
                a, b = build_quadratic_1d(J_h, g_h, -g_h, diag=diag_h)
                to_tr = Delta / norm(g_h)
                ag_value = minimize_quadratic_1d(a, b, 0, to_tr)[1]
                reg_term = -ag_value / Delta**2

            lsmr_op = regularized_lsq_operator(J_h, (diag_h + reg_term)**0.5)
            gn_h = lsmr(lsmr_op, f_augmented, **tr_options)[0]
            S = np.vstack((g_h, gn_h)).T
            S, _ = qr(S, mode='economic')
            JS = J_h.dot(S)  # LinearOperator does dot too.
            B_S = np.dot(JS.T, JS) + np.dot(S.T * diag_h, S)
            g_S = S.T.dot(g_h)

        # theta controls step back step ratio from the bounds.
        theta = max(0.995, 1 - g_norm)

        actual_reduction = -1
        while actual_reduction <= 0 and nfev < max_nfev:
            if tr_solver == 'exact':
                p_h, alpha, n_iter = solve_lsq_trust_region(
                    n, m, uf, s, V, Delta, initial_alpha=alpha)
            elif tr_solver == 'lsmr':
                p_S, _ = solve_trust_region_2d(B_S, g_S, Delta)
                p_h = S.dot(p_S)

            p = d * p_h  # Trust-region solution in the original space.
            step, step_h, predicted_reduction = select_step(
                x, J_h, diag_h, g_h, p, p_h, d, Delta, lb, ub, theta)

            x_new = make_strictly_feasible(x + step, lb, ub, rstep=0)
            f_new = fun(x_new)
            nfev += 1

            step_h_norm = norm(step_h)

            if not np.all(np.isfinite(f_new)):
                Delta = 0.25 * step_h_norm
                continue

            # Usual trust-region step quality estimation.
            if loss_function is not None:
                cost_new = loss_function(f_new, cost_only=True)
            else:
                cost_new = 0.5 * np.dot(f_new, f_new)
            actual_reduction = cost - cost_new
            # Correction term is specific to the algorithm,
            # vanishes in unbounded case.
            correction = 0.5 * np.dot(step_h * diag_h, step_h)

            Delta_new, ratio = update_tr_radius(Delta,
                                                actual_reduction - correction,
                                                predicted_reduction,
                                                step_h_norm,
                                                step_h_norm > 0.95 * Delta)
            alpha *= Delta / Delta_new
            Delta = Delta_new

            step_norm = norm(step)
            termination_status = check_termination(actual_reduction, cost,
                                                   step_norm, norm(x), ratio,
                                                   ftol, xtol)

            if termination_status is not None:
                break

        if actual_reduction > 0:
            x = x_new

            f = f_new
            f_true = f.copy()

            cost = cost_new

            J = jac(x, f)
            njev += 1

            if loss_function is not None:
                rho = loss_function(f)
                J, f = scale_for_robust_loss_function(J, f, rho)

            g = compute_grad(J, f)

            if jac_scale:
                scale, scale_inv = compute_jac_scale(J, scale_inv)
        else:
            step_norm = 0
            actual_reduction = 0

        iteration += 1

    if termination_status is None:
        termination_status = 0

    active_mask = find_active_constraints(x, lb, ub, rtol=xtol)
    return OptimizeResult(x=x,
                          cost=cost,
                          fun=f_true,
                          jac=J,
                          grad=g,
                          optimality=g_norm,
                          active_mask=active_mask,
                          nfev=nfev,
                          njev=njev,
                          status=termination_status)