Example #1
0
File: no6.py Project: hallliu/f2013
def format_eig_svd():
    def format_cplx(z):
        if z.imag < 1e-300:
            return '{0:.4f}'.format(z.real)
        return '{0:.4f}+{1:.4f}i'.format(z.real, z.imag)

    eig12 = sp.eigvals(generate_matrix(12))
    svd12 = sp.svdvals(generate_matrix(12))

    eig25 = sp.eigvals(generate_matrix(25))
    svd25 = sp.svdvals(generate_matrix(25))

    result12 = r'\begin{tabular}{cc}' + '\n'
    result12 += r'    Eigenvalues&Singular values\\' + '\n'
    result12 += '     \\hline\n'
    result25 = copy.copy(result12)
    for k in range(25):
        if k < 12:
            result12 += r'    ${0}$&${1:.4f}$\\'.format(format_cplx(eig12[k]), svd12[k]) + '\n'
        result25 += r'    ${0}$&${1:.4f}$\\'.format(format_cplx(eig25[k]), svd25[k]) + '\n'

    result12 += '\\end{tabular}\n'
    result25 += '\\end{tabular}\n'

    print(result12)

    print(result25)
Example #2
0
	def _EFA_fired(self):

		#number of singular values to track
		singvals = 3

		#Time
		rows = Data.TrA_Data.shape[0]
		forward_r = np.zeros((rows,singvals))
		backward_r = np.zeros((rows,singvals))

		stepl_r = rows-singvals
		#Forward

		#Must start with number of tracked singular values in order to intially generate 10 SV
		for i in range(singvals,rows):
			partsvd = linalg.svdvals(Data.TrA_Data[:i,:]).T
			forward_r[i,:] = partsvd[:singvals]

		#Backwards

		for i in range(0,stepl_r):
			j = (rows-singvals)-i
			partsvd = linalg.svdvals(Data.TrA_Data[j:,:]).T
			backward_r[j,:] = partsvd[:singvals]

		plt.figure()
		plt.semilogy(Data.time[singvals:],forward_r[singvals:,:],'b',Data.time[:(rows-singvals)],backward_r[:(rows-singvals),:],'r')
		plt.title("%s EFA time" %(self.title))
		plt.xlabel("Time (ps)")
		plt.ylabel("Log(EV)")
		plt.show()

		#Wavelength

		cols = Data.TrA_Data.shape[1]
		forward_c = np.zeros((cols,singvals))
		backward_c = np.zeros((cols,singvals))

		stepl_c = cols-singvals
		#Forward

		#Must start with number of tracked singular values in order to intially generate 10 SV
		for i in range(singvals,cols):
			partsvd = linalg.svdvals(Data.TrA_Data[:,:i])
			forward_c[i,:] = partsvd[:singvals]

		#Backwards

		for i in range(0,stepl_c):
			j = (cols-singvals)-i
			partsvd = linalg.svdvals(Data.TrA_Data[:,j:])
			backward_c[j,:] = partsvd[:singvals]

		plt.figure()
		plt.semilogy(Data.wavelength[singvals:],forward_c[singvals:,:],'b',Data.wavelength[:cols-singvals],backward_c[:cols-singvals,:],'r')
		plt.title("%s EFA wavelength" %(self.title))
		plt.xlabel("Wavelength (nm)")
		plt.ylabel("Log(EV)")
		plt.show()
Example #3
0
def test_trace_1():
    B = np.ones((3, 3))
    X = np.random.randn(100, 9)
    y = np.dot(X, B.ravel('F')) + .1 * np.random.randn(100)

    alpha = 10.
    B_, _ = mt.trace(X, y, alpha, 0., (3, 3), rtol=1e-10)

    # KKT conditions
    grad = - np.dot(X.T, y - np.dot(X, B_.ravel('F')))
    M = (grad / alpha).reshape(B.shape, order='F')
    assert np.all(linalg.svdvals(M) < 1. + 1e-3)
    testing.assert_allclose(np.dot(M.ravel('F'), B_.ravel('F')),
        - linalg.svdvals(B_).sum())
def sample_moments( X, k ):
    """Get the sample moments from data"""
    N, d = X.shape

    # Partition X into two halves to independently estimate M2 and M3
    X1, X2 = X[:N/2], X[N/2:]

    # Get the moments  
    M1 = X1.mean(0)
    M1_ = X2.mean(0)
    M2 = Pairs( X1, X1 ) 
    M3 = lambda theta: TriplesP( X2, X2, X2, theta )
    #M3 = Triples( X2, X2, X2 )

    # TODO: Ah, not computing sigma2! 
    # Estimate \sigma^2 = k-th eigenvalue of  M2 - mu mu^T
    sigma2 = svdvals( M2 - outer( M1, M1 ) )[k-1]
    assert( sc.isreal( sigma2 ) and sigma2 > 0 )
    # P (M_2) is the best kth rank apprximation to M2 - sigma^2 I
    P = approxk( M2 - sigma2 * eye( d ), k )

    B = matrix_tensorify( eye(d), M1_ )
    T = lambda theta: M3(theta) - sigma2 * ( M1_.dot(theta) * eye( d ) + outer( M1_, theta ) + outer( theta, M1_ ) )
    #T = M3 - sigma2 * ( B + B.swapaxes(2, 1) + B.swapaxes(2, 0) )

    return P, T    
Example #5
0
File: glm.py Project: itmat/pade
def rank(X, cond=1.0e-12):
    X = np.asarray(X)
    if len(X.shape) == 2:
        D = svdvals(X)
        return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
    else:
        return int(not np.alltrue(np.equal(X, 0.0)))
Example #6
0
def sweep_fidelity(kets,direction):
    '''
    Sweep fidelity.
    '''
    bra=kets[0].tobra(labels=[kets[0].labels[0],kets[0].labels[1]+'\''])
    ket=kets[1]
    if direction=='->':
        [keti<<keti.l-1 for keti in [bra,ket]]
        step=1
        clink_axis=kets[0].llink_axis
        attach_S='A'
        edge_labels=[bra.AL[0].labels[clink_axis],ket.AL[0].labels[clink_axis]]
    else:
        step=-1
        clink_axis=kets[0].rlink_axis
        attach_S='B'
        [keti>>keti.nsite-1-keti.l for keti in [bra,ket]]
        edge_labels=[bra.BL[-1].labels[clink_axis],ket.BL[-1].labels[clink_axis]]
    Ri=tensor.Tensor(identity(1),labels=edge_labels)
    fs=[1]
    for i in xrange(ket.nsite):
        sitei=i if direction=='->' else ket.nsite-i-1
        Ri=(bra.get(sitei,attach_S=attach_S)*Ri*ket.get(sitei,attach_S=attach_S))
        S=svdvals(Ri)
        fs.append(sum(S))

        print i,sum(S)
    if direction=='<-':
        fs.reverse()
    return fs
Example #7
0
    def add_consts( self, key, A, k=-1, ntype=None ):
        """Print the error between two objects"""

        if ntype is None:
            self.add( "norm_%s" % key, norm( A ) )
        else:
            self.add( "norm_%s_%s" % (key, str(ntype)), norm( A, ntype ) )

        if ntype == 2:
            if k > 0:
                self.add( "s_k_%s" % key, svdvals(A)[k-1]  )
            else:
                self.add( "s_k_%s" % key, svdvals(A)[-1]  )
            self.add( "K_%s" % key, condition_number( A, k ) )
            if A.shape[0] == A.shape[1]:
                self.add( "D_%s" % key, eigen_sep( A, k ) )
def schmidt_vals(dw,aas,aai,eps,deltaw,f):
    """
    Args:
    dw: size of the grid spacing
    aas=relative slowness of the signal mode
    aai=relative slowness of the idler mode
    lnl=inverse of the strength of the nonlinearity
    deltaw:  specifies the size of the frequency grid going from
    -deltaw to deltaw for each frequency
    f: shape of the pump function
    """
    ddws=np.arange(-deltaw-dw/2,deltaw+dw/2,dw)
    deltaks=aas*ddws
    ddwi=np.arange(-deltaw-dw/2,deltaw+dw/2,dw)
    deltaki=aai*ddwi
    ds=np.diag(deltaks)
    di=np.diag(deltaki)


    def ff(x,y):
        return f(x+y)
    
    v=eps*(dw)*ff(ddwi[:,None],ddws[None,:])
    G=1j*np.concatenate((np.concatenate((ds,v),axis=1),np.concatenate((-v,-di),axis=1)),axis=0)
    z=1;
    dsi=np.concatenate((deltaks,-deltaki),axis=0)
    U0=linalg.expm(-1j*np.diag(dsi)*z/2)
    GG=np.dot(np.dot(U0,linalg.expm(G)),U0)
    n=len(ddws)
    C=GG[0:n,n:2*n]
    na=np.dot(np.conj(np.transpose(C)),C)*dw
    vv=np.arcsinh(np.sqrt(np.diag(np.diag(linalg.svdvals(na))/dw)))
    return vv
Example #9
0
def spectral_gap( x, k = None ):
    """Minimum difference in eigenvalues"""
    # Get the singular values
    s = svdvals( x )
    if k is not None:
        s = s[:k]

    return (sc.diff( s )).min() / s[0]
Example #10
0
def test_singular_values():
  from scipy.linalg import svdvals
  random.seed(13811)
  for n in 2,3:
    A = random.randn(7,n,n)
    D = fast_singular_values(A)
    for a,d in zip(A,D):
      assert allclose(svdvals(a),abs(d))
def pca_eigvals(d):
    """
    Compute the eigenvalues of the covariance matrix of the data d.  The covariance
    matrix is computed as d*d^T.
    """
    # remove mean of each row
    d = d - np.mean(d, axis = 1)[:, np.newaxis]
    
    return 1.0 / (d.shape[1] - 1) * svdvals(d, True)**2
Example #12
0
def condition_number( x, k = None ):
    """Condition number for the k-rank approximation of x"""
    # Get the eigenvalues
    s = svdvals( x )

    if k is not None:
        return s[0]/s[k-1]
    else:
        return s[0]/s[-1]
Example #13
0
def rank(X, cond=1.0e-12):
    """
    Return the rank of a matrix X based on its generalized inverse,
    not the SVD.
    """
    X = np.asarray(X)
    if len(X.shape) == 2:
        D = svdvals(X)
        return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
    else:
        return int(not np.alltrue(np.equal(X, 0.)))
Example #14
0
def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0,
                        patch_areas=None, limit_depth_chs=False):
    """Compute weighting for depth prior
    """
    logger.info('Creating the depth weighting matrix...')

    # If possible, pick best depth-weighting channels
    if limit_depth_chs is True:
        G = _restrict_gain_matrix(G, gain_info)

    # Compute the gain matrix
    if is_fixed_ori:
        d = np.sum(G ** 2, axis=0)
    else:
        n_pos = G.shape[1] // 3
        d = np.zeros(n_pos)
        for k in xrange(n_pos):
            Gk = G[:, 3 * k:3 * (k + 1)]
            d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]

    # XXX Currently the fwd solns never have "patch_areas" defined
    if patch_areas is not None:
        d /= patch_areas ** 2
        logger.info('    Patch areas taken into account in the depth '
                    'weighting')

    w = 1.0 / d
    ws = np.sort(w)
    weight_limit = limit ** 2
    if limit_depth_chs is False:
        # match old mne-python behavor
        ind = np.argmin(ws)
        n_limit = ind
        limit = ws[ind] * weight_limit
        wpp = (np.minimum(w / limit, 1)) ** exp
    else:
        # match C code behavior
        limit = ws[-1]
        n_limit = len(d)
        if ws[-1] > weight_limit * ws[0]:
            ind = np.where(ws > weight_limit * ws[0])[0][0]
            limit = ws[ind]
            n_limit = ind

    logger.info('    limit = %d/%d = %f'
                % (n_limit + 1, len(d),
                np.sqrt(limit / ws[0])))
    scale = 1.0 / limit
    logger.info('    scale = %g exp = %g' % (scale, exp))
    wpp = np.minimum(w / limit, 1) ** exp

    depth_prior = wpp if is_fixed_ori else np.repeat(wpp, 3)

    return depth_prior
Example #15
0
def test_add_indep():
    x1 = np.array([0,0,0,0,0,1,1,1,2,2,2])
    x2 = np.array([0,0,0,0,0,1,1,1,1,1,1])
    x0 = np.ones(len(x2))
    x = np.column_stack([x0, x1[:,None]*np.arange(3), x2[:,None]*np.arange(2)])
    varnames = ['const'] + ['var1_%d' %i for i in np.arange(3)] \
                         + ['var2_%d' %i for i in np.arange(2)]
    xo, vo = add_indep(x, varnames)

    assert_equal(xo, np.column_stack((x0, x1, x2)))
    assert_equal((linalg.svdvals(x) > 1e-12).sum(), 3)
    assert_equal(vo, ['const', 'var1_1', 'var2_1'])
Example #16
0
    def error_norm(self, comp_cov, norm='frobenius', scaling=True,
                   squared=True):
        """Computes the Mean Squared Error between two covariance estimators.
        (In the sense of the Frobenius norm).

        Parameters
        ----------
        comp_cov : array-like, shape = [n_features, n_features]
            The covariance to compare with.

        norm : str
            The type of norm used to compute the error. Available error types:
            - 'frobenius' (default): sqrt(tr(A^t.A))
            - 'spectral': sqrt(max(eigenvalues(A^t.A))
            where A is the error ``(comp_cov - self.covariance_)``.

        scaling : bool
            If True (default), the squared error norm is divided by n_features.
            If False, the squared error norm is not rescaled.

        squared : bool
            Whether to compute the squared error norm or the error norm.
            If True (default), the squared error norm is returned.
            If False, the error norm is returned.

        Returns
        -------
        The Mean Squared Error (in the sense of the Frobenius norm) between
        `self` and `comp_cov` covariance estimators.

        """
        # compute the error
        error = comp_cov - self.covariance_
        # compute the error norm
        if norm == "frobenius":
            squared_norm = np.sum(error ** 2)
        elif norm == "spectral":
            squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
        else:
            raise NotImplementedError(
                "Only spectral and frobenius norms are implemented")
        # optionally scale the error norm
        if scaling:
            squared_norm = squared_norm / error.shape[0]
        # finally get either the squared norm or the norm
        if squared:
            result = squared_norm
        else:
            result = np.sqrt(squared_norm)

        return result
Example #17
0
def rank(X, cond=1.0e-12):
    """
    Return the rank of a matrix X based on its generalized inverse,
    not the SVD.
    """
    from warnings import warn
    warn("rank is deprecated and will be removed in 0.7."
         " Use np.linalg.matrix_rank instead.", FutureWarning)
    X = np.asarray(X)
    if len(X.shape) == 2:
        D = svdvals(X)
        return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
    else:
        return int(not np.alltrue(np.equal(X, 0.)))
Example #18
0
def rank(X, cond=1.0e-12):
    """
    Return the rank of a matrix X based on its generalized inverse,
    not the SVD.
    """
    X = np.asarray(X)
    if len(X.shape) == 2:
        import scipy.linalg as SL

        D = SL.svdvals(X)
        result = np.add.reduce(np.greater(D / D.max(), cond))
        return int(result.astype(np.int32))
    else:
        return int(not np.alltrue(np.equal(X, 0.0)))
Example #19
0
def compute_depth_prior(G, exp=0.8, limit=10.0):
    """Compute weighting for depth prior
    """
    n_pos = G.shape[1] // 3
    d = np.zeros(n_pos)
    for k in xrange(n_pos):
        Gk = G[:, 3 * k:3 * (k + 1)]
        d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]
    w = 1.0 / d
    wmax = np.min(w) * (limit ** 2)
    wp = np.minimum(w, wmax)
    wpp = (wp / wmax) ** exp
    depth_prior = np.ravel(wpp[:, None] * np.ones((1, 3)))
    return depth_prior
Example #20
0
def _fractional_matrix_power(A, p):
    """
    Compute the fractional power of a matrix.

    See the fractional_matrix_power docstring in matfuncs.py for more info.

    """
    A = np.asarray(A)
    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
        raise ValueError('expected a square matrix')
    if p == int(p):
        return np.linalg.matrix_power(A, int(p))
    # Compute singular values.
    s = svdvals(A)
    # Inverse scaling and squaring cannot deal with a singular matrix,
    # because the process of repeatedly taking square roots
    # would not converge to the identity matrix.
    if s[-1]:
        # Compute the condition number relative to matrix inversion,
        # and use this to decide between floor(p) and ceil(p).
        k2 = s[0] / s[-1]
        p1 = p - np.floor(p)
        p2 = p - np.ceil(p)
        if p1 * k2 ** (1 - p1) <= -p2 * k2:
            a = int(np.floor(p))
            b = p1
        else:
            a = int(np.ceil(p))
            b = p2
        try:
            R = _remainder_matrix_power(A, b)
            Q = np.linalg.matrix_power(A, a)
            return Q.dot(R)
        except np.linalg.LinAlgError as e:
            pass
    # If p is negative then we are going to give up.
    # If p is non-negative then we can fall back to generic funm.
    if p < 0:
        X = np.empty_like(A)
        X.fill(np.nan)
        return X
    else:
        p1 = p - np.floor(p)
        a = int(np.floor(p))
        b = p1
        R, info = funm(A, lambda x: pow(x, b), disp=False)
        Q = np.linalg.matrix_power(A, a)
        return Q.dot(R)
    def fit(self, X, y=None):
        """Fits a Minimum Covariance Determinant with the FastMCD algorithm.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training data, where n_samples is the number of samples
            and n_features is the number of features.

        y
            not used, present for API consistence purpose.

        Returns
        -------
        self : object

        """
        X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
        random_state = check_random_state(self.random_state)
        n_samples, n_features = X.shape
        # check that the empirical covariance is full rank
        if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
            warnings.warn("The covariance matrix associated to your dataset "
                          "is not full rank")
        # compute and store raw estimates
        raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
            X, support_fraction=self.support_fraction,
            cov_computation_method=self._nonrobust_covariance,
            random_state=random_state)
        if self.assume_centered:
            raw_location = np.zeros(n_features)
            raw_covariance = self._nonrobust_covariance(X[raw_support],
                                                        assume_centered=True)
            # get precision matrix in an optimized way
            precision = linalg.pinvh(raw_covariance)
            raw_dist = np.sum(np.dot(X, precision) * X, 1)
        self.raw_location_ = raw_location
        self.raw_covariance_ = raw_covariance
        self.raw_support_ = raw_support
        self.location_ = raw_location
        self.support_ = raw_support
        self.dist_ = raw_dist
        # obtain consistency at normal models
        self.correct_covariance(X)
        # re-weight estimator
        self.reweight_covariance(X)

        return self
Example #22
0
def clcl_cn_inv(mat):
    """
    Calculate inverse conditional number of 2D matrix.
    
    Param
    --------
    mat : ndarray
        (2, 2)
    
    Ret
    ----
    cn_inv : float
    """
    sv = svdvals(mat)
    cn_inv = sv[1] / sv[0]

    return cn_inv
Example #23
0
    def fit(self, X, y=None):
        """Fits a Minimum Covariance Determinant with the FastMCD algorithm.

        Parameters
        ----------
        X: array-like, shape = [n_samples, n_features]
          Training data, where n_samples is the number of samples
          and n_features is the number of features.
        y: not used, present for API consistence purpose.

        Returns
        -------
        self: object
          Returns self.

        """
        n_samples, n_features = X.shape
        # check that the empirical covariance is full rank
        if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
            warnings.warn("The covariance matrix associated to your dataset "
                          "is not full rank")
        # compute and store raw estimates
        raw_location, raw_covariance, raw_support = fast_mcd(
            X, objective_function=self.objective_function,
            h=self.h, cov_computation_method=self._nonrobust_covariance)
        if self.h is None:
            self.h = int(np.ceil(0.5 * (n_samples + n_features + 1))) \
                / float(n_samples)
        if self.assume_centered:
            raw_location = np.zeros(n_features)
            raw_covariance = self._nonrobust_covariance(
                X[raw_support], assume_centered=True)
        # get precision matrix in an optimized way
        precision = pinvh(raw_covariance)
        raw_dist = np.sum(np.dot(X, precision) * X, 1)
        self.raw_location_ = raw_location
        self.raw_covariance_ = raw_covariance
        self.raw_support_ = raw_support
        self.location_ = raw_location
        self.support_ = raw_support
        self.dist_ = raw_dist
        # obtain consistency at normal models
        self.correct_covariance(X)

        return self
Example #24
0
def estimate_rank(data, tol='auto', return_singular=False, norm=True,
                  verbose=None):
    """Estimate the rank of data.

    This function will normalize the rows of the data (typically
    channels or vertices) such that non-zero singular values
    should be close to one.

    Parameters
    ----------
    data : array
        Data to estimate the rank of (should be 2-dimensional).
    tol : float | 'auto'
        Tolerance for singular values to consider non-zero in
        calculating the rank. The singular values are calculated
        in this method such that independent data are expected to
        have singular value around one. Can be 'auto' to use the
        same thresholding as ``scipy.linalg.orth``.
    return_singular : bool
        If True, also return the singular values that were used
        to determine the rank.
    norm : bool
        If True, data will be scaled by their estimated row-wise norm.
        Else data are assumed to be scaled. Defaults to True.

    Returns
    -------
    rank : int
        Estimated rank of the data.
    s : array
        If return_singular is True, the singular values that were
        thresholded to determine the rank are also returned.
    """
    if norm:
        data = data.copy()  # operate on a copy
        norms = _compute_row_norms(data)
        data /= norms[:, np.newaxis]
    s = linalg.svdvals(data)
    rank = _estimate_rank_from_s(s, tol)
    if return_singular is True:
        return rank, s
    else:
        return rank
Example #25
0
def inv_resolvent_norm(A, z, method='svd'):
    r'''Compute the reciprocal norm of the resolvent

    :param A: the input matrix as a ``numpy.array``, sparse matrix or
      ``LinearOperator`` with ``A.shape==(m,n)``, where :math:`m\geq n`.
    :param z: a complex number
    :param method: (optional) one of

      * ``svd`` (default): computes the minimal singular value of :math:`A-zI`.
        This one should be used for dense matrices.
      * ``lanczos``: computes the minimal singular value with the Lanczos
        iteration on the matrix
        :math:`\begin{bmatrix}0&A\\A^*&0\end{bmatrix}`
    '''
    if method == 'svd':
        return numpy.min(svdvals(A - z*numpy.eye(*A.shape)))
    elif method == 'lanczos':
        m, n = A.shape
        if m > n:
            raise ValueError('m > n is not allowed')
        AH = A.T.conj()

        def matvec(x):
            r'''matrix-vector multiplication

            matrix-vector multiplication with matrix
            :math:`\begin{bmatrix}0&A\\A^*&0\end{bmatrix}`
            '''
            x1 = x[:m]
            x2 = x[m:]
            ret1 = AH.dot(x2) - numpy.conj(z)*x2
            ret2 = numpy.array(A.dot(x1), dtype=numpy.complex)
            ret2[:n] -= z*x1
            return numpy.c_[ret1, ret2]
        AH_A = LinearOperator(matvec=matvec, dtype=numpy.complex,
                              shape=(m+n, m+n))

        evals = eigsh(AH_A, k=2, tol=1e-6, which='SM', maxiter=m+n+1,
                      ncv=2*(m+n),
                      return_eigenvectors=False)

        return numpy.min(numpy.abs(evals))
Example #26
0
def sweep_fidelity2_fsingle(kets,spaceconfig,maxN=55):
    '''
    Sweep fidelity from center to edge, the single version taking fermionic sign into consideration.

    Parameters:
        :kets: len-2 list, the kets to sweep fidelity.
        :spaceconfig: <SuperSpaceConfig>,
        :maxN: int, the maximum retained singular value for usv mode, and the maximum retained states for direct mode.
    '''
    nsite=kets[0].nsite
    #prepair kets.
    bra=kets[0].tobra(labels=[kets[0].labels[0],kets[0].labels[1]+'\''])
    ket=kets[1]
    ket>>(nsite/2-ket.l,1e-8,Inf)
    bra>>(nsite/2-bra.l,1e-8,Inf)
    l=kets[0].forder.index(0)-nsite/2 #bulk size/2.

    rlink_axis=kets[0].rlink_axis
    edge_labels_l=[bra.AL[-1].labels[rlink_axis],ket.AL[-1].labels[rlink_axis]]
    llink_axis=kets[0].llink_axis
    bra.BL[0].labels[llink_axis]+='@'
    ket.BL[0].labels[llink_axis]+='@'
    edge_labels_r=[bra.BL[0].labels[llink_axis],ket.BL[0].labels[llink_axis]]
    Ci=tensor.Tensor(diag(bra.S),labels=[edge_labels_l[0],edge_labels_r[0]])*tensor.Tensor(diag(ket.S),labels=[edge_labels_l[1],edge_labels_r[1]])
    fs=[1]
    #get the bulk overlap matrix.
    for i in xrange(l):
        t0=time.time()
        site_l=nsite/2-i-1
        site_r=nsite/2+i
        Ci=bra.get(site_l,attach_S='B')*(ket.get(site_l,attach_S='B')*Ci)
        Ci=Ci*bra.get(site_r,attach_S='A')*ket.get(site_r,attach_S='A')
        Ci=Ci.chorder(array([0,2,1,3]))
        t1=time.time()
        print 'Update %s, Elapse->%s'%(i,t1-t0)
    S=svdvals(Ci.reshape([Ci.shape[0]*Ci.shape[1],-1]))
    f=sum(S)
    print 'Get Fidlity for l = %s: %s.'%(l,f)
    return f
def spectral_norm_squared(X):
    """Computes square of the operator 2-norm (spectral norm) of X

    This corresponds to the Lipschitz constant of the gradient of the
    squared-loss function:

        w -> .5 * ||y - Xw||^2

    Parameters
    ----------
    X : ndarray, shape (n_samples, n_features)
      Design matrix.

    Returns
    -------
    lipschitz_constant : float
      The square of the spectral norm of X.

    """
    # On big matrices like those that we have in neuroimaging, svdvals
    # is faster than a power iteration (even when using arpack's)
    return linalg.svdvals(X)[0] ** 2
Example #28
0
def canonicalAngles(A, B):
    ''' Computes the canonical angles between the subspaces defined by
    the column spaces of matrix A and B.
    @param A: A 2D array (matrix) with rows > cols.
    @param B: A 2D array (matrix) with rows > cols.
    @return: The 1D array of canonical angles (Theta) between the subspaces defined by A and B.
    '''
    (r,c) = A.shape
    assert( r > c)
    
    (r,c) = B.shape
    assert( r > c)
    
    #get orthonormal bases
    #NOTE: in scipy.linalg, using the thin svd to get the orthonormal bases is MUCH FASTER
    # than using either the LA.orth(A) function or "economy" mode of QR decomposition!
    (Qa,_,_) = LA.svd(A, full_matrices=False)
    (Qb,_,_) = LA.svd(B, full_matrices=False)
    X = sp.dot(Qa.T,Qb)
    S = LA.svdvals( X )  #singular vals of Qa'*Qb
    #S = cos(Theta)
    Theta = sp.arccos(S)
    return Theta
Example #29
0
def chordal_dist(M1, M2, already_orthogonal=False):
    '''
    The chordal distance is based on the canonical angles
    between subspaces. This function computes the chordal
    distance between two matrices.
    @param M1: A 2D array (matrix) with rows >= cols.
    @param M2: A 2D array (matrix) with rows >= cols.
    @param already_orthogonal: Specify True if M1 and M2
    are already orthogonal matrices, which will save on
    unnecessary computation. Otherwise, an SVD will be
    used to get an orthogonal representation of each matrix.
    '''
    (r,c) = M1.shape
    assert( r >= c)

    (r,c) = M2.shape
    assert( r >= c)
    
    if already_orthogonal:
        Q1 = M1
        Q2 = M2
    else:
        #get orthonormal bases
        #NOTE: in scipy.linalg, using the thin svd to get the orthonormal bases is MUCH FASTER
        # than using either the LA.orth(A) function or "economy" mode of QR decomposition!
        (Q1,_,_) = LA.svd(M1, full_matrices=False)
        (Q2,_,_) = LA.svd(M2, full_matrices=False)
        
    #canonical angles between subspaces
    X = sp.dot(Q1.T,Q2)
    S = LA.svdvals( X )
    #S = cos(Theta)
    Theta = sp.arccos(S)
    
    #chordal distance is ||sin(Theta)||_2
    return LA.norm( sp.sin(Theta)  )
Example #30
0
def do_svd(buckets):
    """ Given a set of frequency bins, computes the singluar values
    of the feature vector matrix.

    :returns: A vector of singular values.
    """
    l = 0
    for _, data in buckets.iteritems():
        if len(data['scale']) > l: l = len(data['scale'])

    M = []
    l_t_new = xrange(l)
    for name, data in buckets.iteritems():
        _, min_ = data['max_min']
        rms = data['scale']
        l_t = xrange(len(rms))
        tck = splrep(l_t, rms, s=0)
        rms_new = splev(l_t_new, tck, der=0)
        M.append((min_, rms_new))

    M = [rms for _, rms in reversed(sorted(M))]
    svd = svdvals(M)
    m, s = mean(svd), std(svd)
    return [((x - m) / s) for x in svd]