Esempio n. 1
0
def check_alm_condition(obs, ar_comps, mixing_coef):
    """
    Computes the condition numbers that show-up in the various iterative procedures
    :param obs: num_obs x obs_len x signal_dim numpy array
    :param ar_comps: num_comps x model_ord x signal_dim x signal_dim numpy array
    :param mixing_coef: num_obs x num_comps numpy array
    :return comp_cond, coef_cond: float, float
    """

    num_obs, _, signal_dim = obs.shape
    num_comps, model_ord, _, _ = ar_comps.shape

    _, _, XtX = package_observations(obs, model_ord)
    coef_gram = coef_gram_matrix(XtX, mixing_coef)
    comp_cond_mat = np.zeros([
        model_ord * signal_dim * num_comps, model_ord * signal_dim * num_comps
    ])
    for (i, j), coef_gram in coef_gram.items():
        comp_cond_mat[(i * model_ord * signal_dim):((i + 1) * model_ord *
                                                    signal_dim),
                      (j * model_ord * signal_dim):(j + 1) * model_ord *
                      signal_dim] = coef_gram
        if i != j:
            comp_cond_mat[(j * model_ord * signal_dim):((j + 1) * model_ord *
                                                        signal_dim),
                          (i * model_ord * signal_dim):(i + 1) * model_ord *
                          signal_dim] = coef_gram
    comp_singvals = sl.svdvals(comp_cond_mat)
    ar_comps = np.array(
        [stack_ar_coef(component_j) for component_j in ar_comps])
    comp_gram = component_gram_matrix(XtX, ar_comps)
    coef_singvals = [sl.svdvals(comp_gram_i) for comp_gram_i in comp_gram]

    return np.max(comp_singvals) / np.min(comp_singvals), np.max(
        coef_singvals, axis=1) / np.min(coef_singvals, axis=1)
Esempio n. 2
0
def efa(data, ncomp=2, plot=False):

    data = data.copy().T
    nrows = len(data)

    ef = np.zeros((nrows, nrows))
    eb = np.zeros((nrows, nrows))
    for i in range(1, nrows + 1):
        ef[i - 1, :i] = np.square(svdvals(data[:i]))
        eb[i - 1, i - 1 :] = np.square(svdvals(data[i - 1 :])[::-1])

    C = np.stack([ef[:, :ncomp], eb[:, -ncomp:]]).min(axis=0)

    if plot:
        import matplotlib.pyplot as plt
        X = np.arange(nrows)
        fig, (ax1, ax2) = plt.subplots(1, 2)

        for row in np.log10(ef.T):
            ax1.plot(row, "k")
        for row in np.log10(eb.T):
            ax1.plot(row, "r")

        for row in C.T:
            ax2.plot(row)

        fig.tight_layout()

    return C
Esempio n. 3
0
    def expected_g(self, A, sigma, adjusted):
        W = self.decoder.W.weight.detach().numpy()
        mat = A - W
        if adjusted:
            assert A.shape[0] == A.shape[1], "Only valid when D=d"
            # print (A)
            svd_a = svdvals(A)
            svd_w = svdvals(W)
            # u, s, vh = np.linalg.svd(A, full_matrices=True)
            # print (u)
            # u, s, vh = np.linalg.svd(W, full_matrices=True)
            # print (u)
            mat = np.diag(svd_a - svd_w)
            # print ("SVD values square of A", svd_a * svd_a)
            # print ("SVD values square for W", svd_w * svd_w)
            # print (mat)
            # if self.d==1 and self.D==1:
            #     mat = np.abs(A) - np.abs(W)

        diff_hat_sq = np.trace(np.matmul(mat,mat.T))
        norm_term = np.mean(diff_hat_sq)/(sigma*sigma)

        s = np.exp(self.decoder.log_s.detach().numpy())
        ratio = s/sigma
        ratio_sq = ratio * ratio
        const_term = self.D * (ratio_sq - np.log(ratio_sq) -1)

        return 0.5*(norm_term + const_term)
Esempio n. 4
0
File: no6.py Progetto: hallliu/f2013
def format_eig_svd():
    def format_cplx(z):
        if z.imag < 1e-300:
            return '{0:.4f}'.format(z.real)
        return '{0:.4f}+{1:.4f}i'.format(z.real, z.imag)

    eig12 = sp.eigvals(generate_matrix(12))
    svd12 = sp.svdvals(generate_matrix(12))

    eig25 = sp.eigvals(generate_matrix(25))
    svd25 = sp.svdvals(generate_matrix(25))

    result12 = r'\begin{tabular}{cc}' + '\n'
    result12 += r'    Eigenvalues&Singular values\\' + '\n'
    result12 += '     \\hline\n'
    result25 = copy.copy(result12)
    for k in range(25):
        if k < 12:
            result12 += r'    ${0}$&${1:.4f}$\\'.format(format_cplx(eig12[k]), svd12[k]) + '\n'
        result25 += r'    ${0}$&${1:.4f}$\\'.format(format_cplx(eig25[k]), svd25[k]) + '\n'

    result12 += '\\end{tabular}\n'
    result25 += '\\end{tabular}\n'

    print(result12)

    print(result25)
Esempio n. 5
0
	def _EFA_fired(self):

		#number of singular values to track
		singvals = 3

		#Time
		rows = Data.TrA_Data.shape[0]
		forward_r = np.zeros((rows,singvals))
		backward_r = np.zeros((rows,singvals))

		stepl_r = rows-singvals
		#Forward

		#Must start with number of tracked singular values in order to intially generate 10 SV
		for i in range(singvals,rows):
			partsvd = linalg.svdvals(Data.TrA_Data[:i,:]).T
			forward_r[i,:] = partsvd[:singvals]

		#Backwards

		for i in range(0,stepl_r):
			j = (rows-singvals)-i
			partsvd = linalg.svdvals(Data.TrA_Data[j:,:]).T
			backward_r[j,:] = partsvd[:singvals]

		plt.figure()
		plt.semilogy(Data.time[singvals:],forward_r[singvals:,:],'b',Data.time[:(rows-singvals)],backward_r[:(rows-singvals),:],'r')
		plt.title("%s EFA time" %(self.title))
		plt.xlabel("Time (ps)")
		plt.ylabel("Log(EV)")
		plt.show()

		#Wavelength

		cols = Data.TrA_Data.shape[1]
		forward_c = np.zeros((cols,singvals))
		backward_c = np.zeros((cols,singvals))

		stepl_c = cols-singvals
		#Forward

		#Must start with number of tracked singular values in order to intially generate 10 SV
		for i in range(singvals,cols):
			partsvd = linalg.svdvals(Data.TrA_Data[:,:i])
			forward_c[i,:] = partsvd[:singvals]

		#Backwards

		for i in range(0,stepl_c):
			j = (cols-singvals)-i
			partsvd = linalg.svdvals(Data.TrA_Data[:,j:])
			backward_c[j,:] = partsvd[:singvals]

		plt.figure()
		plt.semilogy(Data.wavelength[singvals:],forward_c[singvals:,:],'b',Data.wavelength[:cols-singvals],backward_c[:cols-singvals,:],'r')
		plt.title("%s EFA wavelength" %(self.title))
		plt.xlabel("Wavelength (nm)")
		plt.ylabel("Log(EV)")
		plt.show()
Esempio n. 6
0
def _test__twonormest():
    print('Checking _twonormest... ', end='')
    flag = True
    A = np.random.randn(10, 10)
    A = 0.5 * (A + A.T)
    est, _ = _twonormest(A)
    if abs(est - la.svdvals(A)[0]) / abs(la.svdvals(A)[0]) > 1.0e-3:
        flag &= False
    print('PASS') if flag else print('FAIL')
Esempio n. 7
0
def compute_entropy_singular_vals(psi, L, N, i):
    """Compute the singular values of a state decomposition.

    We divide the state in two parts given by a position and then
    do return its singular values. The space partition is done between
    i-1 and i.

    Args:
        psi (1darray of floats): state vector.
        L (int): number of lattice sites.
        N (int): number of particles.
        i (int): position where the state is partitioned.

    Returns:
        svals (1darray of floats): singular values.

    """
    svals = None

    # States in the whole lattice with N particles.
    states = generate_states(L, N)

    # Get the maximum and minimum number of particles that fit in the
    # subspace 0 to i-1 (both inclusive).
    num_min = N - min(L-i, N)
    num_max = min(i, N)

    for n in range(num_min, num_max+1):
        # Generate all states in the interval (0, i-1) with n
        # particles.
        a_states = generate_states(i, n)
        num_a_states = a_states.size
        # Generate all states in the interval (i, L-1) with N-n
        # particles.
        b_states = generate_states(L-i, N-n)
        num_b_states = b_states.size
        A = np.zeros((num_a_states, num_b_states), dtype=np.float64)

        for ia, a in enumerate(a_states):
            for ib, b in enumerate(b_states):
                # Tensor multiply a and b to produce a state in (0, L).
                ab = np.left_shift(a, L-i) + b
                A[ia, ib] = psi[np.nonzero(states == ab)]

        if n == num_min:
            svals = svdvals(A)
        else:
            svals = np.concatenate((svals, svdvals(A)))

    return svals
Esempio n. 8
0
def test_trace_1():
    B = np.ones((3, 3))
    X = np.random.randn(100, 9)
    y = np.dot(X, B.ravel('F')) + .1 * np.random.randn(100)

    alpha = 10.
    B_, _ = mt.trace(X, y, alpha, 0., (3, 3), rtol=1e-10)

    # KKT conditions
    grad = - np.dot(X.T, y - np.dot(X, B_.ravel('F')))
    M = (grad / alpha).reshape(B.shape, order='F')
    assert np.all(linalg.svdvals(M) < 1. + 1e-3)
    testing.assert_allclose(np.dot(M.ravel('F'), B_.ravel('F')),
        - linalg.svdvals(B_).sum())
Esempio n. 9
0
def evil(L,
         r,
         z,
         proj_C=lambda v: np.maximum(v, 0.),
         gamma=None,
         max_iter=100,
         verbose=1,
         tol=1e-4):
    """Projects the point z onto the intersection of the hyperplane "Ex=r" and
    the convex set C.
    """
    if gamma is None:
        gamma = 2. / linalg.svdvals(L)[0]**2
    v = np.zeros(L.shape[0])
    x = np.zeros(L.shape[1])
    for k in xrange(max_iter):
        old_x = x.copy()
        x = proj_C(z - L.T.dot(v))
        v += gamma * (L.dot(x) - r)
        error = ((x - old_x)**2).sum()
        if verbose:
            print(
                "\tIteration: %03i/%03i: x^(k-1) = %s; x^(k) = %s; "
                "||x^(k) - x^(k-1)||^2 = %s" %
                (k + 1, max_iter, old_x, x, error))
        if error < tol:
            if verbose:
                print "\tConverged after %i iterations." % (k + 1)
            break
    return x
Esempio n. 10
0
def devil(L, r, z, max_iter=100, verbose=0, tol=1e-4):
    q, n = L.shape
    l = linalg.svdvals(np.vstack((np.eye(n), L)))[0]
    sigma = tau = .99 / l
    assert sigma * tau * l * l < 1.
    x = np.zeros(n)
    xbar = x.copy()
    v = np.zeros(n)
    zeta = np.zeros(q)
    for k in xrange(max_iter):
        old_x = x.copy()
        old_v = v.copy()
        old_zeta = zeta.copy()

        v += sigma * (xbar - z)
        v /= (1. + sigma)
        zeta += sigma * (L.dot(xbar) - r)
        x -= tau * (v + L.T.dot(zeta))
        x = np.maximum(x, 0.)
        xbar = 2 * x - old_x

        error = .5 * (((x - old_x)**2).sum() / tau +
                      (((v - old_v)**2).sum() +
                       ((zeta - old_zeta)**2).sum()) / sigma)
        if verbose:
            print "\tIteration: %03i/%03i: error=%g" % (k + 1, max_iter, error)

        if error < tol:
            if verbose:
                print "\tConverged after %i iterations." % (k + 1)
            break

    return x
Esempio n. 11
0
def general_condat(L,
                   grad_F,
                   beta,
                   prox_G,
                   prox_Hstar,
                   norm_L=None,
                   max_iter=100,
                   verbose=0,
                   tol=1e-4):

    if norm_L is None:
        norm_L = linalg.svdvals(L)[0]
    sigma = .99 / norm_L
    tau = .9 / (sigma * norm_L * norm_L + .5 * beta)
    assert 1. / tau - sigma * norm_L * norm_L >= beta * .5
    print norm_L, beta, sigma, tau
    x = np.zeros(A.shape[1])
    y = np.zeros(A.shape[0])
    ybar = y.copy()
    delta = 2. - .5 * beta / (1. / tau - sigma * norm_L * norm_L)
    assert 1. <= delta <= 2.
    rho = .99 * delta
    for _ in xrange(max_iter):
        old_x = x.copy()
        old_y = y.copy()

        xbar = x - tau * (grad_F(x) + A.T.dot(y))
        xbar = prox_G(xbar, tau)
        ybar = y + sigma * A.dot(2 * xbar - x)
        ybar = prox_Hstar(ybar, sigma)
        x = rho * xbar + (1. - rho) * old_x
        y = rho * ybar + (1. - rho) * old_y
        if verbose:
            print x, y
Esempio n. 12
0
def sample_moments( X, k ):
    """Get the sample moments from data"""
    N, d = X.shape

    # Partition X into two halves to independently estimate M2 and M3
    X1, X2 = X[:N/2], X[N/2:]

    # Get the moments  
    M1 = X1.mean(0)
    M1_ = X2.mean(0)
    M2 = Pairs( X1, X1 ) 
    M3 = lambda theta: TriplesP( X2, X2, X2, theta )
    #M3 = Triples( X2, X2, X2 )

    # TODO: Ah, not computing sigma2! 
    # Estimate \sigma^2 = k-th eigenvalue of  M2 - mu mu^T
    sigma2 = svdvals( M2 - outer( M1, M1 ) )[k-1]
    assert( sc.isreal( sigma2 ) and sigma2 > 0 )
    # P (M_2) is the best kth rank apprximation to M2 - sigma^2 I
    P = approxk( M2 - sigma2 * eye( d ), k )

    B = matrix_tensorify( eye(d), M1_ )
    T = lambda theta: M3(theta) - sigma2 * ( M1_.dot(theta) * eye( d ) + outer( M1_, theta ) + outer( theta, M1_ ) )
    #T = M3 - sigma2 * ( B + B.swapaxes(2, 1) + B.swapaxes(2, 0) )

    return P, T    
Esempio n. 13
0
File: glm.py Progetto: itmat/pade
def rank(X, cond=1.0e-12):
    X = np.asarray(X)
    if len(X.shape) == 2:
        D = svdvals(X)
        return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
    else:
        return int(not np.alltrue(np.equal(X, 0.0)))
Esempio n. 14
0
def test_energy_capture(set_up_basis_data):
    """Test pre.energy_capture()."""
    X = set_up_basis_data
    svdvals = la.svdvals(X)

    # Single threshold.
    r = roi.pre.energy_capture(svdvals, .9, plot=False)
    assert isinstance(r, np.int64) and r >= 1

    # Multiple thresholds.
    rs = roi.pre.energy_capture(svdvals, [.9, .99, .999], plot=False)
    assert isinstance(rs, list)
    for r in rs:
        assert isinstance(r, np.int64) and r >= 1
    assert rs == sorted(rs)

    # Plotting.
    status = plt.isinteractive()
    plt.ion()
    rs = roi.pre.energy_capture(svdvals, .999, plot=True)
    assert len(plt.gcf().get_axes()) == 1
    rs = roi.pre.energy_capture(svdvals, [.9, .99, .999], plot=True)
    assert len(plt.gcf().get_axes()) == 1
    plt.interactive(status)
    plt.close("all")
Esempio n. 15
0
def sweep_fidelity(kets,direction):
    '''
    Sweep fidelity.
    '''
    bra=kets[0].tobra(labels=[kets[0].labels[0],kets[0].labels[1]+'\''])
    ket=kets[1]
    if direction=='->':
        [keti<<keti.l-1 for keti in [bra,ket]]
        step=1
        clink_axis=kets[0].llink_axis
        attach_S='A'
        edge_labels=[bra.AL[0].labels[clink_axis],ket.AL[0].labels[clink_axis]]
    else:
        step=-1
        clink_axis=kets[0].rlink_axis
        attach_S='B'
        [keti>>keti.nsite-1-keti.l for keti in [bra,ket]]
        edge_labels=[bra.BL[-1].labels[clink_axis],ket.BL[-1].labels[clink_axis]]
    Ri=tensor.Tensor(identity(1),labels=edge_labels)
    fs=[1]
    for i in xrange(ket.nsite):
        sitei=i if direction=='->' else ket.nsite-i-1
        Ri=(bra.get(sitei,attach_S=attach_S)*Ri*ket.get(sitei,attach_S=attach_S))
        S=svdvals(Ri)
        fs.append(sum(S))

        print i,sum(S)
    if direction=='<-':
        fs.reverse()
    return fs
 def get_strong_convexity(self):
     '''
         if not strongly convex, returns 0
     :return:
     '''
     sigma_min = svdvals(self.A)[-1]
     return sigma_min**2
Esempio n. 17
0
def emb_learn(S, method, emb_dim, sval_sqrt=True, error=True):
    nsize = len(S)

    if method == 'svd':
        svecs_l, svals, svecs_r = randomized_svd(S,
                                                 n_components=emb_dim,
                                                 n_iter=5,
                                                 random_state=1)
        if error:
            part_sum = svals.sum()
            all_sum = svdvals(S).sum()

        if sval_sqrt:
            emb = np.mat(svecs_l) * scipy.sparse.spdiags(
                np.sqrt(svals), [0], emb_dim, emb_dim, format='csr')
        else:
            emb = np.mat(svecs_l) * scipy.sparse.spdiags(
                svals, [0], emb_dim, emb_dim, format='csr')

        if error:
            error = 1 - part_sum / all_sum
        else:
            error = None

    return np.array(emb), error
def schmidt_vals(dw,aas,aai,eps,deltaw,f):
    """
    Args:
    dw: size of the grid spacing
    aas=relative slowness of the signal mode
    aai=relative slowness of the idler mode
    lnl=inverse of the strength of the nonlinearity
    deltaw:  specifies the size of the frequency grid going from
    -deltaw to deltaw for each frequency
    f: shape of the pump function
    """
    ddws=np.arange(-deltaw-dw/2,deltaw+dw/2,dw)
    deltaks=aas*ddws
    ddwi=np.arange(-deltaw-dw/2,deltaw+dw/2,dw)
    deltaki=aai*ddwi
    ds=np.diag(deltaks)
    di=np.diag(deltaki)


    def ff(x,y):
        return f(x+y)
    
    v=eps*(dw)*ff(ddwi[:,None],ddws[None,:])
    G=1j*np.concatenate((np.concatenate((ds,v),axis=1),np.concatenate((-v,-di),axis=1)),axis=0)
    z=1;
    dsi=np.concatenate((deltaks,-deltaki),axis=0)
    U0=linalg.expm(-1j*np.diag(dsi)*z/2)
    GG=np.dot(np.dot(U0,linalg.expm(G)),U0)
    n=len(ddws)
    C=GG[0:n,n:2*n]
    na=np.dot(np.conj(np.transpose(C)),C)*dw
    vv=np.arcsinh(np.sqrt(np.diag(np.diag(linalg.svdvals(na))/dw)))
    return vv
Esempio n. 19
0
 def test_cplx_mats(self):
     """Test complex matrices.
     """
     if cvx.SUPER_SCS in cvx.installed_solvers():
         # Complex-valued matrix
         K = np.matrix(np.random.rand(2, 2) +
                       1j * np.random.rand(2, 2))  #  example matrix
         n1 = la.svdvals(K).sum()  # trace norm of K
         # Dual Problem
         X = cvx.Variable((2, 2), complex=True)
         Y = cvx.Variable((2, 2), complex=True)
         Z = cvx.Variable((2, 2))
         # X, Y >= 0 so trace is real
         objective = cvx.Minimize(
             cvx.real(0.5 * cvx.trace(X) + 0.5 * cvx.trace(Y)))
         constraints = [
             cvx.bmat([[X, -K.H], [-K, Y]]) >> 0,
             X >> 0,
             Y >> 0,
         ]
         problem = cvx.Problem(objective, constraints)
         sol_scs = problem.solve(solver='SUPER_SCS')
         self.assertEqual(constraints[0].dual_value.shape, (4, 4))
         self.assertEqual(constraints[1].dual_value.shape, (2, 2))
         self.assertEqual(constraints[2].dual_value.shape, (2, 2))
         self.assertAlmostEqual(sol_scs, n1)
     else:
         pass
Esempio n. 20
0
 def evidenceparts(self, sigma=None):
     s = sigma if sigma is not None else self.estimatenoise()
     r = self.res()
     N = len(self.params)
     j = self.grad()
     logdet = 2*np.log(svdvals(j)).sum()
     return np.array([-r.dot(r)/s**2, N*np.log(2*np.pi*s**2), -logdet])/2.
Esempio n. 21
0
    def add_consts( self, key, A, k=-1, ntype=None ):
        """Print the error between two objects"""

        if ntype is None:
            self.add( "norm_%s" % key, norm( A ) )
        else:
            self.add( "norm_%s_%s" % (key, str(ntype)), norm( A, ntype ) )

        if ntype == 2:
            if k > 0:
                self.add( "s_k_%s" % key, svdvals(A)[k-1]  )
            else:
                self.add( "s_k_%s" % key, svdvals(A)[-1]  )
            self.add( "K_%s" % key, condition_number( A, k ) )
            if A.shape[0] == A.shape[1]:
                self.add( "D_%s" % key, eigen_sep( A, k ) )
Esempio n. 22
0
def hello_hither_scipy(n=20):
    from scipy import linalg
    import numpy as np
    A = np.vander(np.arange(1, n+1))
    smallest_singular_value = np.min(linalg.svdvals(A))
    print(f'Hello, hither scipy: smallest singular value of Vandermonde matrix for x=[1..{n}] is {smallest_singular_value}')
    return smallest_singular_value
Esempio n. 23
0
def rank(X, cond=1.0e-12):
    X = np.asarray(X)
    if len(X.shape) == 2:
        D = svdvals(X)
        return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
    else:
        return int(not np.alltrue(np.equal(X, 0.)))
Esempio n. 24
0
def compute_and_save_all_svdvals(training_data):
    """Compute and save the singular values corresponding to the *full* POD
    basis for the training data.

    Parameters
    ----------
    training_data : (NUM_ROMVARS*DOF,trainsize) ndarray
        Training snapshots to take the SVD of.

    Returns
    -------
    svdvals : (trainsize,) ndarray
        Singular values for the full POD basis.
    """
    # Compute the DENSE SVD of the training data to get the singular values.
    with utils.timed_block("Computing *dense* SVD for singular values"):
        svdvals = la.svdvals(training_data,
                             overwrite_a=True,
                             check_finite=False)

    # Save the POD basis.
    save_path = config.basis_path(training_data.shape[1])
    save_path = save_path.replace(config.BASIS_FILE, "svdvals.h5")
    with utils.timed_block("Saving singular values"):
        with h5py.File(save_path, 'w') as hf:
            hf.create_dataset("svdvals", data=svdvals)
    logging.info(f"Singular values saved to {save_path}.\n")

    return svdvals
def lavarello_choice(A, scattered_field_o, scattered_field_r):
    """Compute the regularization parameter according to [1].

    This parameter strategy is based on the first singular value of the
    coefficient matrix.

    Parameters
    ----------
        A : :class:`numpy.ndarray`
            Coefficient matrix returned by `_compute_A()` routine.

        scattered_field_o :class:`numpy.ndarray`
            Original scattered field matrix given for the problem.

        scattered_field_r :class:`numpy.ndarray`
            Recovered scattered field matrix given for the problem.

    References
    ----------
    .. [1] Lavarello, Roberto, and Michael Oelze. "A study on the
           reconstruction of moderate contrast targets using the
           distorted Born iterative method." IEEE transactions on
           ultrasonics, ferroelectrics, and frequency control 55.1
           (2008): 112-124.
    """
    RRE = rst.compute_rre(scattered_field_o, scattered_field_r)
    s0 = svdvals(A)[0]

    if .5 < RRE:
        return s0**2 / 2
    elif .25 < RRE <= .5:
        return s0**2 / 20
    elif RRE <= .25:
        return s0**2 / 200
Esempio n. 26
0
    def test_complex_matrices(self):
        """Test complex matrices.
        """
        # Complex-valued matrix
        K = np.array(np.random.rand(2, 2) + 1j * np.random.rand(2, 2))  # example matrix
        n1 = la.svdvals(K).sum()  # trace norm of K

        # Dual Problem
        X = cp.Variable((2, 2), complex=True)
        Y = cp.Variable((2, 2), complex=True)
        # X, Y >= 0 so trace is real
        objective = cp.Minimize(
            cp.real(0.5 * cp.trace(X) + 0.5 * cp.trace(Y))
        )
        constraints = [
            cp.bmat([[X, -K.conj().T], [-K, Y]]) >> 0,
            X >> 0,
            Y >> 0,
        ]
        problem = cp.Problem(objective, constraints)

        sol_scs = problem.solve(solver='SCS')
        self.assertEqual(constraints[0].dual_value.shape, (4, 4))
        self.assertEqual(constraints[1].dual_value.shape, (2, 2))
        self.assertEqual(constraints[2].dual_value.shape, (2, 2))
        self.assertAlmostEqual(sol_scs, n1)
def test_cumulative_energy(set_up_basis_data):
    """Test pre._basis.cumulative_energy()."""
    X = set_up_basis_data
    svdvals = la.svdvals(X)

    # Single threshold.
    r = roi.pre.cumulative_energy(svdvals, .9, plot=False)
    assert isinstance(r, np.int64) and r >= 1

    # Multiple thresholds.
    rs = roi.pre.cumulative_energy(svdvals, [.9, .99, .999], plot=False)
    assert isinstance(rs, list)
    for r in rs:
        assert isinstance(r, np.int64) and r >= 1
    assert rs == sorted(rs)

    # Plotting.
    status = plt.isinteractive()
    plt.ion()
    rs = roi.pre.cumulative_energy(svdvals, .999, plot=True)
    assert len(plt.gcf().get_axes()) == 1
    rs = roi.pre.cumulative_energy(svdvals, [.9, .99, .999], plot=True)
    assert len(plt.gcf().get_axes()) == 1
    plt.interactive(status)
    plt.close("all")

    # Specific test.
    svdvals = np.sqrt([.9, .09, .009, .0009, .00009, .000009, .0000009])
    rs = roi.pre.cumulative_energy(svdvals, [.9, .99, .999], plot=False)
    assert len(rs) == 3
    assert rs == [1, 2, 3]
Esempio n. 28
0
def plotMatrixSpectrum(model, A, mat_name):
    fig_path = os.path.join(model.fig_dir,
                            "singular_values_{:}.png".format(mat_name))
    try:
        s = svdvals(A)
    except:
        s = -np.sort(-sparse_svds(
            A, return_singular_vectors=False, k=min(100, min(A.shape))))
    plt.plot(s, 'o')
    plt.ylabel(r'$\sigma$')
    plt.title('Singular values of {:}'.format(mat_name))
    plt.savefig(fig_path)
    plt.close()

    if A.shape[0] == A.shape[1]:  #is square
        fig_path = os.path.join(model.fig_dir,
                                "eigenvalues_{:}.png".format(mat_name))
        try:
            eig = eigvals(A)
        except:
            eig = sparse_eigs(A,
                              return_eigenvectors=False,
                              k=min(1000, min(A.shape)))
        plt.plot(eig.real, eig.imag, 'o')
        plt.xlabel(r'Re($\lambda$)')
        plt.ylabel(r'Im($\lambda$)')
        plt.title('Eigenvalues of {:}'.format(mat_name))
        plt.savefig(fig_path)
        plt.close()
def test_svdval_decay(set_up_basis_data):
    """Test pre._basis.svdval_decay()."""
    X = set_up_basis_data
    svdvals = la.svdvals(X)

    # Single cutoffs.
    r = roi.pre.svdval_decay(svdvals, 1e-14, plot=False)
    assert isinstance(r, int) and r >= 1

    # Multiple cutoffss.
    rs = roi.pre.svdval_decay(svdvals, [1e-10,1e-12], plot=False)
    assert isinstance(rs, list)
    for r in rs:
        assert isinstance(r, int) and r >= 1
    assert rs == sorted(rs)

    # Plotting.
    status = plt.isinteractive()
    plt.ion()
    rs = roi.pre.svdval_decay(svdvals, .0001, plot=True)
    assert len(plt.gcf().get_axes()) == 1
    rs = roi.pre.svdval_decay(svdvals, [1e-4, 1e-8, 1e-12], plot=True)
    assert len(plt.gcf().get_axes()) == 1
    plt.interactive(status)
    plt.close("all")

    # Specific test.
    svdvals = [.9, .09, .009, .0009, .00009, .000009, .0000009]
    rs = roi.pre.svdval_decay(svdvals, [.8, .1, .0004], plot=False)
    assert len(rs) == 3
    assert rs == [1, 1, 4]
def shrink_ca(X, ncp=2):
    """computes the approximation of a given matrix X using `ncp` components
    """
    n, p = X.shape
    N = X.sum()
    N = 1 if N == 0 else N
    P = X / N
    Rc = P.sum(axis=0)[np.newaxis, :]
    Rr = P.sum(axis=1)[:, np.newaxis]
    Rc[Rc == 0] = 1
    Rr[Rr == 0] = 1
    S = (P - Rr @ Rc) / Rr**.5 / Rc**.5

    svals = svdvals(S)
    u, s, v = sps.linalg.svds(S, k=ncp, maxiter=500, tol=1E-9)

    zero_vals = np.isclose(0, s)  # find which singular values are null
    den = ((n - 1) * (p - 1) - (n - 1) * ncp - (p - 1) * ncp + ncp**2)
    sigma2 = (svals[ncp:]**2).sum() / (1 if den == 0 else den)

    lambda_shrunk = s.copy()
    lambda_shrunk[~zero_vals] = (
        s[~zero_vals]**2 - n * p / min(p, n - 1) * sigma2) / s[~zero_vals]

    recon = (u * lambda_shrunk) @ v
    recon = N * (((recon * Rr**.5) * Rc**.5) + Rr @ Rc)
    recon[recon <
          0] = 0  # account for numerical errors and avoid negative values

    return recon
Esempio n. 31
0
def _assert_ica_attributes(ica):
    """Assert some attributes of ICA objects."""
    __tracebackhide__ = True
    # This tests properties, but also serves as documentation of
    # the shapes these arrays can obtain and how they obtain them

    # Pre-whitener
    n_ch = len(ica.ch_names)
    assert ica.pre_whitener_.shape == (
        n_ch, n_ch if ica.noise_cov is not None else 1)

    # PCA
    n_pca = ica.max_pca_components
    assert ica.pca_components_.shape == (n_pca, n_ch), 'PCA shape'
    assert_allclose(np.dot(ica.pca_components_, ica.pca_components_.T),
                    np.eye(n_pca), atol=1e-6, err_msg='PCA orthogonality')
    assert ica.pca_mean_.shape == (n_ch,)

    # Mixing/unmixing
    assert ica.unmixing_matrix_.shape == (ica.n_components_,) * 2, \
        'Unmixing shape'
    assert ica.mixing_matrix_.shape == (ica.n_components_,) * 2, \
        'Mixing shape'
    mix_unmix = np.dot(ica.mixing_matrix_, ica.unmixing_matrix_)
    s = linalg.svdvals(ica.unmixing_matrix_)
    nz = len(s) - (s > s[0] * 1e-12).sum()
    want = np.eye(ica.n_components_)
    want[:nz] = 0
    assert_allclose(mix_unmix, want, atol=1e-6, err_msg='Mixing as pinv')
Esempio n. 32
0
def spectral_norm_squared(X):
    """Computes square of the operator 2-norm (spectral norm) of X

    This corresponds to the Lipschitz constant of the gradient of the
    squared-loss function:

        w -> .5 * ||y - Xw||^2

    Parameters
    ----------
    X : ndarray, shape (n_samples, n_features)
      Design matrix.

    Returns
    -------
    lipschitz_constant : float
      The square of the spectral norm of X.

    """
    # On big matrices like those that we have in neuroimaging, svdvals
    # is faster than a power iteration (even when using arpack's)
    """tw: below is same as:
    sp.sparse.linalg.svds(Xtrz,k=1,return_singular_vectors=False)[0]**2
    """
    return linalg.svdvals(X)[0]**2
Esempio n. 33
0
def SSFLR(kets, direction):
    '''
    Sweep fidelity for left to right or right to left.
    '''
    bra = kets[0].tobra(labels=[kets[0].labels[0], kets[0].labels[1] + '\''])
    ket = kets[1]
    nsite = ket.nsite
    if direction == '->':
        [keti << keti.l - 1 for keti in [bra, ket]]
        step = 1
        clink_axis = kets[0].llink_axis
        attach_S = 'A'
        edge_labels = [
            bra.ML[0].labels[clink_axis], ket.ML[0].labels[clink_axis]
        ]
    else:
        step = -1
        clink_axis = kets[0].rlink_axis
        attach_S = 'B'
        [keti >> nsite - 1 - keti.l for keti in [bra, ket]]
        edge_labels = [
            bra.get(nsite - 1).labels[clink_axis],
            ket.get(nsite - 1).labels[clink_axis]
        ]
    Ri = tensor.Tensor(identity(1), labels=edge_labels)
    fs = [1]
    for i in range(nsite):
        sitei = i if direction == '->' else nsite - i - 1
        Ri = (bra.get(sitei, attach_S=attach_S) * Ri *
              ket.get(sitei, attach_S=attach_S))
        S = svdvals(Ri)
        fs.append(sum(S))
        print(i, sum(S))
    return fs
Esempio n. 34
0
    def test_cond(self, k=20, d=11, r=3):
        """Test lstsq._tikhonov._BaseSolver.cond()."""
        solver = roi.lstsq._tikhonov._BaseSolver()

        # Try before calling _process_fit_arguments().
        with pytest.raises(AttributeError) as ex:
            solver.cond()
        assert ex.value.args[0] == "lstsq solver not trained (call fit())"

        # Contrived test 1
        A = np.eye(d)
        B = np.zeros((d,r))
        solver._process_fit_arguments(A, B)
        assert np.isclose(solver.cond(), 1)

        # Contrived test 2
        A = np.diag(np.arange(1,d+1))
        B = np.zeros((d,r))
        solver._process_fit_arguments(A, B)
        assert np.isclose(solver.cond(), d)

        # Random test
        A = np.random.standard_normal((k,d))
        B = np.random.standard_normal((k,r))
        svals = la.svdvals(A)
        solver._process_fit_arguments(A, B)
        assert np.isclose(solver.cond(), svals[0] / svals[-1])
Esempio n. 35
0
def randomize(table, ntrials=100):
    """

    Parameters
    ----------
    table : :class:`pandas.DataFrame`
        Table of coupling strengths
    ntrials : int, optional
        Number of trials for eigenvalues

    Returns
    -------
        Array of eigenvalues
    """
    _, Ntime = table.shape
    Lrand = []

    avg_kb = table.mean(axis=1)
    std_kb = table.std(axis=1)

    rand = functools.partial(_rand, avg_kb, std_kb)
    for _ in range(ntrials):
        pool = mp.Pool(maxtasksperchild=2)
        values = pool.map_async(rand, range(Ntime))
        pool.close()
        pool.join()
        kb_rand = pd.DataFrame.from_items(values.get())
        Lrand.append(linalg.svdvals(kb_rand))
    return np.array(Lrand)
Esempio n. 36
0
def spectral_gap( x, k = None ):
    """Minimum difference in eigenvalues"""
    # Get the singular values
    s = svdvals( x )
    if k is not None:
        s = s[:k]

    return (sc.diff( s )).min() / s[0]
Esempio n. 37
0
def test_singular_values():
  from scipy.linalg import svdvals
  random.seed(13811)
  for n in 2,3:
    A = random.randn(7,n,n)
    D = fast_singular_values(A)
    for a,d in zip(A,D):
      assert allclose(svdvals(a),abs(d))
Esempio n. 38
0
def computepca(dataset):
    U, s, V = np.linalg.svd(dataset, full_matrices=True)
    eigVecs, eigvals = np.linalg.eig(np.cov(dataset))
    S = sp.svdvals(dataset)
    print(S)
    lambda1 = S**2 / np.size(dataset, 1)
    cumvar = np.cumsum(lambda1) / np.sum(lambda1)
    return U, lambda1, cumvar
Esempio n. 39
0
def matrix_cond(A):
    """Calculate the condition number of A with respect to the 2-norm."""
    s_vals = la.svdvals(A)
    # if the smallest singular value is 0, then A is singular so the condition number is inf.
    if s_vals[-1] == 0:
        return np.inf
    else:
        return s_vals[0]/s_vals[-1]
Esempio n. 40
0
def test_singular_values():
    from scipy.linalg import svdvals
    random.seed(13811)
    for n in 2, 3:
        A = random.randn(7, n, n)
        D = fast_singular_values(A)
        for a, d in zip(A, D):
            assert allclose(svdvals(a), abs(d))
Esempio n. 41
0
def pca_eigvals(d):
    """
    Compute the eigenvalues of the covariance matrix of the data d.  The covariance
    matrix is computed as d*d^T.
    """
    # remove mean of each row
    d = d - np.mean(d, axis = 1)[:, np.newaxis]
    
    return 1.0 / (d.shape[1] - 1) * svdvals(d, True)**2
Esempio n. 42
0
def condition_number( x, k = None ):
    """Condition number for the k-rank approximation of x"""
    # Get the eigenvalues
    s = svdvals( x )

    if k is not None:
        return s[0]/s[k-1]
    else:
        return s[0]/s[-1]
Esempio n. 43
0
def compute_depth_prior(G, gain_info, is_fixed_ori, exp=0.8, limit=10.0,
                        patch_areas=None, limit_depth_chs=False):
    """Compute weighting for depth prior
    """
    logger.info('Creating the depth weighting matrix...')

    # If possible, pick best depth-weighting channels
    if limit_depth_chs is True:
        G = _restrict_gain_matrix(G, gain_info)

    # Compute the gain matrix
    if is_fixed_ori:
        d = np.sum(G ** 2, axis=0)
    else:
        n_pos = G.shape[1] // 3
        d = np.zeros(n_pos)
        for k in xrange(n_pos):
            Gk = G[:, 3 * k:3 * (k + 1)]
            d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]

    # XXX Currently the fwd solns never have "patch_areas" defined
    if patch_areas is not None:
        d /= patch_areas ** 2
        logger.info('    Patch areas taken into account in the depth '
                    'weighting')

    w = 1.0 / d
    ws = np.sort(w)
    weight_limit = limit ** 2
    if limit_depth_chs is False:
        # match old mne-python behavor
        ind = np.argmin(ws)
        n_limit = ind
        limit = ws[ind] * weight_limit
        wpp = (np.minimum(w / limit, 1)) ** exp
    else:
        # match C code behavior
        limit = ws[-1]
        n_limit = len(d)
        if ws[-1] > weight_limit * ws[0]:
            ind = np.where(ws > weight_limit * ws[0])[0][0]
            limit = ws[ind]
            n_limit = ind

    logger.info('    limit = %d/%d = %f'
                % (n_limit + 1, len(d),
                np.sqrt(limit / ws[0])))
    scale = 1.0 / limit
    logger.info('    scale = %g exp = %g' % (scale, exp))
    wpp = np.minimum(w / limit, 1) ** exp

    depth_prior = wpp if is_fixed_ori else np.repeat(wpp, 3)

    return depth_prior
Esempio n. 44
0
def rank(X, cond=1.0e-12):
    """
    Return the rank of a matrix X based on its generalized inverse,
    not the SVD.
    """
    X = np.asarray(X)
    if len(X.shape) == 2:
        D = svdvals(X)
        return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
    else:
        return int(not np.alltrue(np.equal(X, 0.)))
Esempio n. 45
0
def test_add_indep():
    x1 = np.array([0,0,0,0,0,1,1,1,2,2,2])
    x2 = np.array([0,0,0,0,0,1,1,1,1,1,1])
    x0 = np.ones(len(x2))
    x = np.column_stack([x0, x1[:,None]*np.arange(3), x2[:,None]*np.arange(2)])
    varnames = ['const'] + ['var1_%d' %i for i in np.arange(3)] \
                         + ['var2_%d' %i for i in np.arange(2)]
    xo, vo = add_indep(x, varnames)

    assert_equal(xo, np.column_stack((x0, x1, x2)))
    assert_equal((linalg.svdvals(x) > 1e-12).sum(), 3)
    assert_equal(vo, ['const', 'var1_1', 'var2_1'])
Esempio n. 46
0
    def error_norm(self, comp_cov, norm='frobenius', scaling=True,
                   squared=True):
        """Computes the Mean Squared Error between two covariance estimators.
        (In the sense of the Frobenius norm).

        Parameters
        ----------
        comp_cov : array-like, shape = [n_features, n_features]
            The covariance to compare with.

        norm : str
            The type of norm used to compute the error. Available error types:
            - 'frobenius' (default): sqrt(tr(A^t.A))
            - 'spectral': sqrt(max(eigenvalues(A^t.A))
            where A is the error ``(comp_cov - self.covariance_)``.

        scaling : bool
            If True (default), the squared error norm is divided by n_features.
            If False, the squared error norm is not rescaled.

        squared : bool
            Whether to compute the squared error norm or the error norm.
            If True (default), the squared error norm is returned.
            If False, the error norm is returned.

        Returns
        -------
        The Mean Squared Error (in the sense of the Frobenius norm) between
        `self` and `comp_cov` covariance estimators.

        """
        # compute the error
        error = comp_cov - self.covariance_
        # compute the error norm
        if norm == "frobenius":
            squared_norm = np.sum(error ** 2)
        elif norm == "spectral":
            squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
        else:
            raise NotImplementedError(
                "Only spectral and frobenius norms are implemented")
        # optionally scale the error norm
        if scaling:
            squared_norm = squared_norm / error.shape[0]
        # finally get either the squared norm or the norm
        if squared:
            result = squared_norm
        else:
            result = np.sqrt(squared_norm)

        return result
Esempio n. 47
0
def compute_depth_prior(G, exp=0.8, limit=10.0):
    """Compute weighting for depth prior
    """
    n_pos = G.shape[1] // 3
    d = np.zeros(n_pos)
    for k in xrange(n_pos):
        Gk = G[:, 3 * k:3 * (k + 1)]
        d[k] = linalg.svdvals(np.dot(Gk.T, Gk))[0]
    w = 1.0 / d
    wmax = np.min(w) * (limit ** 2)
    wp = np.minimum(w, wmax)
    wpp = (wp / wmax) ** exp
    depth_prior = np.ravel(wpp[:, None] * np.ones((1, 3)))
    return depth_prior
Esempio n. 48
0
def rank(X, cond=1.0e-12):
    """
    Return the rank of a matrix X based on its generalized inverse,
    not the SVD.
    """
    from warnings import warn
    warn("rank is deprecated and will be removed in 0.7."
         " Use np.linalg.matrix_rank instead.", FutureWarning)
    X = np.asarray(X)
    if len(X.shape) == 2:
        D = svdvals(X)
        return int(np.add.reduce(np.greater(D / D.max(), cond).astype(np.int32)))
    else:
        return int(not np.alltrue(np.equal(X, 0.)))
Esempio n. 49
0
def rank(X, cond=1.0e-12):
    """
    Return the rank of a matrix X based on its generalized inverse,
    not the SVD.
    """
    X = np.asarray(X)
    if len(X.shape) == 2:
        import scipy.linalg as SL

        D = SL.svdvals(X)
        result = np.add.reduce(np.greater(D / D.max(), cond))
        return int(result.astype(np.int32))
    else:
        return int(not np.alltrue(np.equal(X, 0.0)))
Esempio n. 50
0
    def fit(self, X, y=None):
        """Fits a Minimum Covariance Determinant with the FastMCD algorithm.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Training data, where n_samples is the number of samples
            and n_features is the number of features.

        y
            not used, present for API consistence purpose.

        Returns
        -------
        self : object

        """
        X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
        random_state = check_random_state(self.random_state)
        n_samples, n_features = X.shape
        # check that the empirical covariance is full rank
        if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
            warnings.warn("The covariance matrix associated to your dataset "
                          "is not full rank")
        # compute and store raw estimates
        raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
            X, support_fraction=self.support_fraction,
            cov_computation_method=self._nonrobust_covariance,
            random_state=random_state)
        if self.assume_centered:
            raw_location = np.zeros(n_features)
            raw_covariance = self._nonrobust_covariance(X[raw_support],
                                                        assume_centered=True)
            # get precision matrix in an optimized way
            precision = linalg.pinvh(raw_covariance)
            raw_dist = np.sum(np.dot(X, precision) * X, 1)
        self.raw_location_ = raw_location
        self.raw_covariance_ = raw_covariance
        self.raw_support_ = raw_support
        self.location_ = raw_location
        self.support_ = raw_support
        self.dist_ = raw_dist
        # obtain consistency at normal models
        self.correct_covariance(X)
        # re-weight estimator
        self.reweight_covariance(X)

        return self
Esempio n. 51
0
def _fractional_matrix_power(A, p):
    """
    Compute the fractional power of a matrix.

    See the fractional_matrix_power docstring in matfuncs.py for more info.

    """
    A = np.asarray(A)
    if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
        raise ValueError('expected a square matrix')
    if p == int(p):
        return np.linalg.matrix_power(A, int(p))
    # Compute singular values.
    s = svdvals(A)
    # Inverse scaling and squaring cannot deal with a singular matrix,
    # because the process of repeatedly taking square roots
    # would not converge to the identity matrix.
    if s[-1]:
        # Compute the condition number relative to matrix inversion,
        # and use this to decide between floor(p) and ceil(p).
        k2 = s[0] / s[-1]
        p1 = p - np.floor(p)
        p2 = p - np.ceil(p)
        if p1 * k2 ** (1 - p1) <= -p2 * k2:
            a = int(np.floor(p))
            b = p1
        else:
            a = int(np.ceil(p))
            b = p2
        try:
            R = _remainder_matrix_power(A, b)
            Q = np.linalg.matrix_power(A, a)
            return Q.dot(R)
        except np.linalg.LinAlgError as e:
            pass
    # If p is negative then we are going to give up.
    # If p is non-negative then we can fall back to generic funm.
    if p < 0:
        X = np.empty_like(A)
        X.fill(np.nan)
        return X
    else:
        p1 = p - np.floor(p)
        a = int(np.floor(p))
        b = p1
        R, info = funm(A, lambda x: pow(x, b), disp=False)
        Q = np.linalg.matrix_power(A, a)
        return Q.dot(R)
Esempio n. 52
0
def clcl_cn_inv(mat):
    """
    Calculate inverse conditional number of 2D matrix.
    
    Param
    --------
    mat : ndarray
        (2, 2)
    
    Ret
    ----
    cn_inv : float
    """
    sv = svdvals(mat)
    cn_inv = sv[1] / sv[0]

    return cn_inv
Esempio n. 53
0
    def fit(self, X, y=None):
        """Fits a Minimum Covariance Determinant with the FastMCD algorithm.

        Parameters
        ----------
        X: array-like, shape = [n_samples, n_features]
          Training data, where n_samples is the number of samples
          and n_features is the number of features.
        y: not used, present for API consistence purpose.

        Returns
        -------
        self: object
          Returns self.

        """
        n_samples, n_features = X.shape
        # check that the empirical covariance is full rank
        if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
            warnings.warn("The covariance matrix associated to your dataset "
                          "is not full rank")
        # compute and store raw estimates
        raw_location, raw_covariance, raw_support = fast_mcd(
            X, objective_function=self.objective_function,
            h=self.h, cov_computation_method=self._nonrobust_covariance)
        if self.h is None:
            self.h = int(np.ceil(0.5 * (n_samples + n_features + 1))) \
                / float(n_samples)
        if self.assume_centered:
            raw_location = np.zeros(n_features)
            raw_covariance = self._nonrobust_covariance(
                X[raw_support], assume_centered=True)
        # get precision matrix in an optimized way
        precision = pinvh(raw_covariance)
        raw_dist = np.sum(np.dot(X, precision) * X, 1)
        self.raw_location_ = raw_location
        self.raw_covariance_ = raw_covariance
        self.raw_support_ = raw_support
        self.location_ = raw_location
        self.support_ = raw_support
        self.dist_ = raw_dist
        # obtain consistency at normal models
        self.correct_covariance(X)

        return self
Esempio n. 54
0
def estimate_rank(data, tol='auto', return_singular=False, norm=True,
                  verbose=None):
    """Estimate the rank of data.

    This function will normalize the rows of the data (typically
    channels or vertices) such that non-zero singular values
    should be close to one.

    Parameters
    ----------
    data : array
        Data to estimate the rank of (should be 2-dimensional).
    tol : float | 'auto'
        Tolerance for singular values to consider non-zero in
        calculating the rank. The singular values are calculated
        in this method such that independent data are expected to
        have singular value around one. Can be 'auto' to use the
        same thresholding as ``scipy.linalg.orth``.
    return_singular : bool
        If True, also return the singular values that were used
        to determine the rank.
    norm : bool
        If True, data will be scaled by their estimated row-wise norm.
        Else data are assumed to be scaled. Defaults to True.

    Returns
    -------
    rank : int
        Estimated rank of the data.
    s : array
        If return_singular is True, the singular values that were
        thresholded to determine the rank are also returned.
    """
    if norm:
        data = data.copy()  # operate on a copy
        norms = _compute_row_norms(data)
        data /= norms[:, np.newaxis]
    s = linalg.svdvals(data)
    rank = _estimate_rank_from_s(s, tol)
    if return_singular is True:
        return rank, s
    else:
        return rank
Esempio n. 55
0
def inv_resolvent_norm(A, z, method='svd'):
    r'''Compute the reciprocal norm of the resolvent

    :param A: the input matrix as a ``numpy.array``, sparse matrix or
      ``LinearOperator`` with ``A.shape==(m,n)``, where :math:`m\geq n`.
    :param z: a complex number
    :param method: (optional) one of

      * ``svd`` (default): computes the minimal singular value of :math:`A-zI`.
        This one should be used for dense matrices.
      * ``lanczos``: computes the minimal singular value with the Lanczos
        iteration on the matrix
        :math:`\begin{bmatrix}0&A\\A^*&0\end{bmatrix}`
    '''
    if method == 'svd':
        return numpy.min(svdvals(A - z*numpy.eye(*A.shape)))
    elif method == 'lanczos':
        m, n = A.shape
        if m > n:
            raise ValueError('m > n is not allowed')
        AH = A.T.conj()

        def matvec(x):
            r'''matrix-vector multiplication

            matrix-vector multiplication with matrix
            :math:`\begin{bmatrix}0&A\\A^*&0\end{bmatrix}`
            '''
            x1 = x[:m]
            x2 = x[m:]
            ret1 = AH.dot(x2) - numpy.conj(z)*x2
            ret2 = numpy.array(A.dot(x1), dtype=numpy.complex)
            ret2[:n] -= z*x1
            return numpy.c_[ret1, ret2]
        AH_A = LinearOperator(matvec=matvec, dtype=numpy.complex,
                              shape=(m+n, m+n))

        evals = eigsh(AH_A, k=2, tol=1e-6, which='SM', maxiter=m+n+1,
                      ncv=2*(m+n),
                      return_eigenvectors=False)

        return numpy.min(numpy.abs(evals))
Esempio n. 56
0
def spectral_norm_squared(X):
    """Computes square of the operator 2-norm (spectral norm) of X

    This corresponds to the Lipschitz constant of the gradient of the
    squared-loss function:

        w -> .5 * ||y - Xw||^2

    Parameters
    ----------
    X : ndarray, shape (n_samples, n_features)
      Design matrix.

    Returns
    -------
    lipschitz_constant : float
      The square of the spectral norm of X.

    """
    # On big matrices like those that we have in neuroimaging, svdvals
    # is faster than a power iteration (even when using arpack's)
    return linalg.svdvals(X)[0] ** 2
Esempio n. 57
0
def sweep_fidelity2_fsingle(kets,spaceconfig,maxN=55):
    '''
    Sweep fidelity from center to edge, the single version taking fermionic sign into consideration.

    Parameters:
        :kets: len-2 list, the kets to sweep fidelity.
        :spaceconfig: <SuperSpaceConfig>,
        :maxN: int, the maximum retained singular value for usv mode, and the maximum retained states for direct mode.
    '''
    nsite=kets[0].nsite
    #prepair kets.
    bra=kets[0].tobra(labels=[kets[0].labels[0],kets[0].labels[1]+'\''])
    ket=kets[1]
    ket>>(nsite/2-ket.l,1e-8,Inf)
    bra>>(nsite/2-bra.l,1e-8,Inf)
    l=kets[0].forder.index(0)-nsite/2 #bulk size/2.

    rlink_axis=kets[0].rlink_axis
    edge_labels_l=[bra.AL[-1].labels[rlink_axis],ket.AL[-1].labels[rlink_axis]]
    llink_axis=kets[0].llink_axis
    bra.BL[0].labels[llink_axis]+='@'
    ket.BL[0].labels[llink_axis]+='@'
    edge_labels_r=[bra.BL[0].labels[llink_axis],ket.BL[0].labels[llink_axis]]
    Ci=tensor.Tensor(diag(bra.S),labels=[edge_labels_l[0],edge_labels_r[0]])*tensor.Tensor(diag(ket.S),labels=[edge_labels_l[1],edge_labels_r[1]])
    fs=[1]
    #get the bulk overlap matrix.
    for i in xrange(l):
        t0=time.time()
        site_l=nsite/2-i-1
        site_r=nsite/2+i
        Ci=bra.get(site_l,attach_S='B')*(ket.get(site_l,attach_S='B')*Ci)
        Ci=Ci*bra.get(site_r,attach_S='A')*ket.get(site_r,attach_S='A')
        Ci=Ci.chorder(array([0,2,1,3]))
        t1=time.time()
        print 'Update %s, Elapse->%s'%(i,t1-t0)
    S=svdvals(Ci.reshape([Ci.shape[0]*Ci.shape[1],-1]))
    f=sum(S)
    print 'Get Fidlity for l = %s: %s.'%(l,f)
    return f
Esempio n. 58
0
def canonicalAngles(A, B):
    ''' Computes the canonical angles between the subspaces defined by
    the column spaces of matrix A and B.
    @param A: A 2D array (matrix) with rows > cols.
    @param B: A 2D array (matrix) with rows > cols.
    @return: The 1D array of canonical angles (Theta) between the subspaces defined by A and B.
    '''
    (r,c) = A.shape
    assert( r > c)
    
    (r,c) = B.shape
    assert( r > c)
    
    #get orthonormal bases
    #NOTE: in scipy.linalg, using the thin svd to get the orthonormal bases is MUCH FASTER
    # than using either the LA.orth(A) function or "economy" mode of QR decomposition!
    (Qa,_,_) = LA.svd(A, full_matrices=False)
    (Qb,_,_) = LA.svd(B, full_matrices=False)
    X = sp.dot(Qa.T,Qb)
    S = LA.svdvals( X )  #singular vals of Qa'*Qb
    #S = cos(Theta)
    Theta = sp.arccos(S)
    return Theta
Esempio n. 59
0
def do_svd(buckets):
    """ Given a set of frequency bins, computes the singluar values
    of the feature vector matrix.

    :returns: A vector of singular values.
    """
    l = 0
    for _, data in buckets.iteritems():
        if len(data['scale']) > l: l = len(data['scale'])

    M = []
    l_t_new = xrange(l)
    for name, data in buckets.iteritems():
        _, min_ = data['max_min']
        rms = data['scale']
        l_t = xrange(len(rms))
        tck = splrep(l_t, rms, s=0)
        rms_new = splev(l_t_new, tck, der=0)
        M.append((min_, rms_new))

    M = [rms for _, rms in reversed(sorted(M))]
    svd = svdvals(M)
    m, s = mean(svd), std(svd)
    return [((x - m) / s) for x in svd]
Esempio n. 60
0
def chordal_dist(M1, M2, already_orthogonal=False):
    '''
    The chordal distance is based on the canonical angles
    between subspaces. This function computes the chordal
    distance between two matrices.
    @param M1: A 2D array (matrix) with rows >= cols.
    @param M2: A 2D array (matrix) with rows >= cols.
    @param already_orthogonal: Specify True if M1 and M2
    are already orthogonal matrices, which will save on
    unnecessary computation. Otherwise, an SVD will be
    used to get an orthogonal representation of each matrix.
    '''
    (r,c) = M1.shape
    assert( r >= c)

    (r,c) = M2.shape
    assert( r >= c)
    
    if already_orthogonal:
        Q1 = M1
        Q2 = M2
    else:
        #get orthonormal bases
        #NOTE: in scipy.linalg, using the thin svd to get the orthonormal bases is MUCH FASTER
        # than using either the LA.orth(A) function or "economy" mode of QR decomposition!
        (Q1,_,_) = LA.svd(M1, full_matrices=False)
        (Q2,_,_) = LA.svd(M2, full_matrices=False)
        
    #canonical angles between subspaces
    X = sp.dot(Q1.T,Q2)
    S = LA.svdvals( X )
    #S = cos(Theta)
    Theta = sp.arccos(S)
    
    #chordal distance is ||sin(Theta)||_2
    return LA.norm( sp.sin(Theta)  )