Exemplo n.º 1
0
def test_lobpcg(matrices):
    A_dense, A_sparse, x = matrices
    X = x[:, None]

    w_dense, v_dense = splin.lobpcg(A_dense, X)
    w, v = splin.lobpcg(A_sparse, X)

    assert_allclose(w, w_dense)
    assert_allclose(v, v_dense)
Exemplo n.º 2
0
def laplacian_eigen(image,
                    seeds=None,
                    n_components=3,
                    beta=1,
                    divide_by_std=False, offsets=((0, 1), (1, 0))):
    """
    input : edges image 1 x C x H x W
    output: instances probability shape: C x H x W
    """
    # Pytorch Tensors to numpy
    graph = make2d_lattice_graph(size=(image.shape[0],
                                       image.shape[1]), offsets=offsets)

    edges = image2edge_weights(image, graph, beta, divide_by_std=divide_by_std)

    A = graph2adjacency(graph, edges)
    L = adjacency2laplacian(A, mode=0)
    Lu, _ = lap2lapu_bt(L, seeds)

    ml = pyamg.ruge_stuben_solver(csr_matrix(Lu))
    M = ml.aspreconditioner(cycle='V')

    ex = np.random.rand(Lu.shape[0], n_components + 1).astype(np.float32)

    eigen_values, eigen_vectors = lobpcg(Lu, ex, largest=False, M=M, tol=1e-8)
    #eigen_values, eigen_vectors = lobpcg(Lu, ex, largest=False, tol=1e-8)
    eigen_values, eigen_vectors = eigen_values[1:], eigen_vectors[:, 1:]

    eigen_vectors = pu_fill(eigen_vectors, seeds)
    return eigen_values, eigen_vectors.reshape(image.shape[0],
                                               image.shape[1],
                                               -1)
Exemplo n.º 3
0
def locally_linear_embedding(X, n_neighbors, out_dim, tol=1e-6, max_iter=200):

    #W = neighbors.kneighbors_graph(
    #   X, n_neighbors=n_neighbors, mode='distance')
    W = barycenter_kneighbors_graph(X, n_neighbors)
    print(W)
    # M = (I-W)' (I-W)
    A = eye(*W.shape, format=W.format) - W
    A = (A.T).dot(A).tocsr()

    # initial approximation to the eigenvectors
    X = np.random.rand(W.shape[0], out_dim)
    ml = smoothed_aggregation_solver(A, symmetry='symmetric')
    prec = ml.aspreconditioner()

    # compute eigenvalues and eigenvectors with LOBPCG
    eigen_values, eigen_vectors = linalg.lobpcg(A,
                                                X,
                                                M=prec,
                                                largest=False,
                                                tol=tol,
                                                maxiter=max_iter)

    index = np.argsort(eigen_values)
    return eigen_vectors[:, index], np.sum(eigen_values)
Exemplo n.º 4
0
def bench_lobpcg_mikota():
    print()
    print('                 lobpcg benchmark using mikota pairs')
    print('==============================================================')
    print('      shape      | blocksize |    operation   |   time   ')
    print('                                              | (seconds)')
    print('--------------------------------------------------------------')
    fmt = ' %15s |   %3d     |     %6s     | %6.2f '

    m = 10
    for n in 128, 256, 512, 1024, 2048:
        shape = (n, n)
        A, B = _mikota_pair(n)
        desired_evs = np.square(np.arange(1, m + 1))

        tt = time.clock()
        X = rand(n, m)
        X = orth(X)
        LorU, lower = cho_factor(A, lower=0, overwrite_a=0)
        M = LinearOperator(shape,
                           matvec=partial(_precond, LorU, lower),
                           matmat=partial(_precond, LorU, lower))
        eigs, vecs = lobpcg(A, X, B, M, tol=1e-4, maxiter=40)
        eigs = sorted(eigs)
        elapsed = time.clock() - tt
        assert_allclose(eigs, desired_evs)
        print(fmt % (shape, m, 'lobpcg', elapsed))

        tt = time.clock()
        w = eigh(A, B, eigvals_only=True, eigvals=(0, m - 1))
        elapsed = time.clock() - tt
        assert_allclose(w, desired_evs)
        print(fmt % (shape, m, 'eigh', elapsed))
Exemplo n.º 5
0
def bench_lobpcg_mikota():
    print()
    print('                 lobpcg benchmark using mikota pairs')
    print('==============================================================')
    print('      shape      | blocksize |    operation   |   time   ')
    print('                                              | (seconds)')
    print('--------------------------------------------------------------')
    fmt = ' %15s |   %3d     |     %6s     | %6.2f '

    m = 10
    for n in 128, 256, 512, 1024, 2048:
        shape = (n, n)
        A, B = _mikota_pair(n)
        desired_evs = np.square(np.arange(1, m+1))

        tt = time.clock()
        X = rand(n, m)
        X = orth(X)
        LorU, lower = cho_factor(A, lower=0, overwrite_a=0)
        M = LinearOperator(shape,
                matvec=partial(_precond, LorU, lower),
                matmat=partial(_precond, LorU, lower))
        eigs, vecs = lobpcg(A, X, B, M, tol=1e-4, maxiter=40)
        eigs = sorted(eigs)
        elapsed = time.clock() - tt
        assert_allclose(eigs, desired_evs)
        print(fmt % (shape, m, 'lobpcg', elapsed))

        tt = time.clock()
        w = eigh(A, B, eigvals_only=True, eigvals=(0, m-1))
        elapsed = time.clock() - tt
        assert_allclose(w, desired_evs)
        print(fmt % (shape, m, 'eigh', elapsed))
Exemplo n.º 6
0
def bench_lobpcg_sakurai():
    print()
    print('                 lobpcg benchmark sakurai et al.')
    print('==============================================================')
    print('      shape      | blocksize |    operation   |   time   ')
    print('                                              | (seconds)')
    print('--------------------------------------------------------------')
    fmt = ' %15s |   %3d     |     %6s     | %6.2f '

    m = 3
    for n in 50, 400, 2400:

        shape = (n, n)
        A, B, all_eigenvalues = _sakurai(n)
        desired_evs = all_eigenvalues[:m]

        tt = time.clock()
        X = rand(n, m)
        eigs, vecs, resnh = lobpcg(A, X, B, tol=1e-6, maxiter=500,
                retResidualNormsHistory=1)
        w_lobpcg = sorted(eigs)
        elapsed = time.clock() - tt
        yield (assert_allclose, w_lobpcg, desired_evs, 1e-7, 1e-5)
        print(fmt % (shape, m, 'lobpcg', elapsed))

        tt = time.clock()
        A_dense = A.A
        B_dense = B.A
        w_eigh = eigh(A_dense, B_dense, eigvals_only=True, eigvals=(0, m-1))
        elapsed = time.clock() - tt
        yield (assert_allclose, w_eigh, desired_evs, 1e-7, 1e-5)
        print(fmt % (shape, m, 'eigh', elapsed))
Exemplo n.º 7
0
    def eigsys(self, hamiltonian: Hamiltonian,
               k: int) -> Tuple[np.ndarray, np.ndarray]:
        h_mat = hamiltonian.hamiltonian_matrix()

        if self.method == 'eigsh':
            eig, vec = eigsh(
                h_mat, k=k, which='LM', sigma=0
            )  # use shift-invert to find smallest eigenvalues quickly
        elif self.method == 'lobpcg':
            # preconditioning matrix should approximate the inverse of the hamiltonian
            # we naively construct this by taking the inverse of diagonal elements
            # and setting all others to zero. This is called the Jacobi or diagonal preconditioner.
            A = diags([1 / h_mat.diagonal()], [0],
                      dtype=hamiltonian.space.dtype).tocsc()
            precond = lambda x: A @ x
            M = LinearOperator(h_mat.shape,
                               matvec=precond,
                               matmat=precond,
                               dtype=hamiltonian.space.dtype)

            # guess for eigenvectors is also computed from random numbers
            X_approx = np.random.rand(np.prod(hamiltonian.space.grid), k)

            sol = lobpcg(h_mat, X_approx, largest=False, M=M, tol=1e-15)
            eig, vec = sol[0], sol[1]
        else:
            raise NotImplementedError(
                f"{self.method} solver has not been implemented. Use one of {self.implemented_solvers}"
            )

        return eig, vec.T
 def find_fiedler(L, x, normalized, tol, seed):
     L = csc_matrix(L, dtype=float)
     n = L.shape[0]
     if normalized:
         D = spdiags(1.0 / sqrt(L.diagonal()), [0], n, n, format="csc")
         L = D * L * D
     if method == "lanczos" or n < 10:
         # Avoid LOBPCG when n < 10 due to
         # https://github.com/scipy/scipy/issues/3592
         # https://github.com/scipy/scipy/pull/3594
         sigma, X = eigsh(L,
                          2,
                          which="SM",
                          tol=tol,
                          return_eigenvectors=True)
         return sigma[1], X[:, 1]
     else:
         X = asarray(asmatrix(x).T)
         M = spdiags(1.0 / L.diagonal(), [0], n, n)
         Y = ones(n)
         if normalized:
             Y /= D.diagonal()
         sigma, X = lobpcg(L,
                           X,
                           M=M,
                           Y=asmatrix(Y).T,
                           tol=tol,
                           maxiter=n,
                           largest=False)
         return sigma[0], X[:, 0]
Exemplo n.º 9
0
def spectral_clustering(road_map=None, a=None, use_ncut=False, num_clusters=2):
    print("Building adjacency matrix")
    if(a==None):
        a = build_adjacency_matrix(road_map)
    
    print("Computing laplacian")
    l = build_laplacian(a, normalize=use_ncut)
    
    print("Spectral embedding")
    #e_vals, e_vects = eigsh(l, k=num_clusters, which='SM', tol=0.01, sigma=2.01)
    X = np.random.rand(l.shape[0], num_clusters+1)
    e_vals, e_vects = lobpcg(l, X, tol=1e-15,
                                            largest=False, maxiter=2000)    
    
    
    
    embedded_data = e_vects[:,1:]
    
    print e_vals
    
    

    print("Clustering")
    centroid, label, intertia = k_means(embedded_data, num_clusters)
    
    for i in xrange(len(label)):
        road_map.nodes[i].region_id = label[i]
def eig_multi(A, B=None, n_components=2, tol=1E-12, random_state=None):
    """Solves the generalized Eigenvalue problem:
    A x = lambda B x using the multigrid method.
    Works well with very large matrices but there are some
    instabilities sometimes.
    """
    random_state = check_random_state(random_state)
    # convert matrix A and B to float
    A = A.astype(np.float64)

    if B is not None:
        B = B.astype(np.float64)

    # import the solver
    ml = smoothed_aggregation_solver(check_array(A, accept_sparse=['csr']))

    # preconditioner
    M = ml.aspreconditioner()

    n_nodes = A.shape[0]
    n_find = min(n_nodes, 5 + 2 * n_components)
    # initial guess for X
    np.random.RandomState(seed=1234)
    X = random_state.rand(n_nodes, n_find)

    # solve using the lobpcg algorithm
    eigVals, eigVecs = lobpcg(A, X, M=M, B=B, tol=tol, largest='False')

    sort_order = np.argsort(eigVals)
    eigVals = eigVals[sort_order]
    eigVecs = eigVecs[:, sort_order]

    eigVals = eigVals[:n_components]
    eigVecs = eigVecs[:, :n_components]
    return eigVals, eigVecs
Exemplo n.º 11
0
 def time_sakurai(self, n, solver):
     m = 3
     if solver == 'lobpcg':
         X = rand(n, m)
         eigs, vecs, resnh = lobpcg(self.A, X, self.B, tol=1e-6, maxiter=500,
                                    retResidualNormsHistory=1)
     else:
         eigh(self.A_dense, self.B_dense, eigvals_only=True, eigvals=(0, m - 1))
Exemplo n.º 12
0
 def time_sakurai(self, n, solver):
     m = 3
     if solver == 'lobpcg':
         X = rand(n, m)
         eigs, vecs, resnh = lobpcg(self.A, X, self.B, tol=1e-6, maxiter=500,
                 retResidualNormsHistory=1)
     else:
         w_eigh = eigh(self.A_dense, self.B_dense, eigvals_only=True, eigvals=(0, m-1))
Exemplo n.º 13
0
def gGroundSystem(matH):
    '''
    This function is just a shorthand for getting the ground state `vecGS` and energy `rEg`
    of an input Hamiltonian `matH`.
    '''

    vecGuess = np.random.rand(matH.shape[0], 1)
    rEg, vecGS = slin.lobpcg(matH, vecGuess, maxiter=100, largest=False)
    return vecGS.T[0], rEg[0]
Exemplo n.º 14
0
def solveEigenspace(x, m=2):
    k = numpy.random.uniform(.5, 1.0, (len(neighborhoods), m))

    for ix in range(20):
        t0 = time.time()
        res = lobpcg(x, k, largest=False, maxiter=50)

        k = res[1]

    return res
Exemplo n.º 15
0
def solveEigenspace(x, m = 2):
	k = numpy.random.uniform(.5, 1.0, (len(neighborhoods),m))

	for ix in range(20):
		t0 = time.time()
		res = lobpcg(x,k,largest=False,maxiter=50)

		k = res[1]

	return res
def test_lobpcg(read_guess=True, tol=1e-5):
    X = orth(randn(N, m))
    if read_guess:
        X = x0

    try:
        λ, v = lobpcg(A, X, largest=False, M=P, maxiter=100, tol=tol)
        assert np.max(np.abs(λref - λ)) < tol
    except np.linalg.LinAlgError as e:
        print("ERROR: ", str(e))
Exemplo n.º 17
0
 def time_mikota(self, n, solver):
     m = 10
     if solver == 'lobpcg':
         X = rand(n, m)
         X = orth(X)
         LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
         M = LinearOperator(self.shape,
                 matvec=partial(_precond, LorU, lower),
                 matmat=partial(_precond, LorU, lower))
         eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
     else:
         w = eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m-1))
Exemplo n.º 18
0
 def time_mikota(self, n, solver):
     m = 10
     if solver == 'lobpcg':
         X = rand(n, m)
         X = orth(X)
         LorU, lower = cho_factor(self.A, lower=0, overwrite_a=0)
         M = LinearOperator(self.shape,
                            matvec=partial(_precond, LorU, lower),
                            matmat=partial(_precond, LorU, lower))
         eigs, vecs = lobpcg(self.A, X, self.B, M, tol=1e-4, maxiter=40)
     else:
         eigh(self.A, self.B, eigvals_only=True, eigvals=(0, m - 1))
Exemplo n.º 19
0
def fiedler(adj_list, plot=False, fn="FiedlerPlots", n_fied=2):
    """calculate the first fiedler vector of a graph adjascancy list and optionally write associated plots to file.

    Takes:
    adj_list:
    An Nx2 nested list of ints of the form:
    [[node1,node2],
    ...]
    Representing the adjascancy list.

    plot=False: make plots or not.
    fn="FiedlerPlots": filename to prepend to the plot png file names
    n_fied=2: the number of fiedler vectors to calculate (values above 2 will not be output)

    Returns a Dictionary of the form:



    {"f1": the first fiedler vector,
    "f2": (if caclulated) the second fideler vector
    "d": the node degrees,
    "r1": the rank of each node in the first fiedler vector
    "r2": the rank of each node in the second fiedler vector}


    """

    A = graph_laplacian(adj_list)

    # construct preconditioner
    ml = smoothed_aggregation_solver(A, coarse_solver='pinv2', max_coarse=10)
    M = ml.aspreconditioner()

    # solve for lowest two modes: constant vector and Fiedler vector
    X = scipy.rand(A.shape[0], n_fied + 1)
    (eval,evec,res) = lobpcg(A, X, M=None, tol=1e-12, largest=False, \
            verbosityLevel=0, retResidualNormsHistory=True)

    if plot:
        doPlots(evec[:, 1], evec[:, 2], A.diagonal(), adj_list, fn)

    out = {
        "f1": list(evec[:, 1]),
        "d": list(A.diagonal()),
        "r1": [int(i) for i in list(numpy.argsort(numpy.argsort(evec[:, 1])))]
    }
    if n_fied > 1:
        out["f2"] = list(evec[:, 2])
        out["r2"] = [
            int(i) for i in list(numpy.argsort(numpy.argsort(evec[:, 2])))
        ]
    return out
Exemplo n.º 20
0
def ess(operator, M, k=2):
    r"""
    An efficient sampling set selection method for bandlimited graph signals [1]_.

    Parameters
    ----------
    operator: SparseTensor
        The chosen variation operators, e.g., graph normalized Laplacian.
    M:  int
        The number of desired sampled nodes.
    k:  int
        The proxy order. Refer to the literature for details.

    Returns
    -------
    S:  list
        A list containing sampled nodes with the sampling order

    References
    ----------
    .. [1]  Aamir Anis, et al., “Efficient sampling set selection for bandlimited graph
            signals using graph spectral proxies,” IEEE TSP, 2016.


    """
    import scipy.sparse.linalg as splin

    # add GPU support after cp.setdiff1d is implemented
    # dt, dv, density, on_gpu = get_ddd(operator)
    # xp, xcipy, xsplin = get_array_module(on_gpu)

    L = to_scipy(operator)
    N = L.shape[-1]
    LtL = L.T**k * L**k
    V = np.arange(N)
    S = list()
    while len(S) < M:
        Sc = np.setdiff1d(V, S)
        length = len(Sc)
        if length == 1:
            S.append(Sc[0])
            break
        reduced = LtL[np.ix_(Sc, Sc)]

        sigma, psi = splin.lobpcg(reduced,
                                  X=np.random.rand(length, 1),
                                  largest=False)
        psi = psi.ravel()
        v = Sc[np.argmax(np.abs(psi)).item()]
        S.append(v)
    return S
Exemplo n.º 21
0
  def solve_on_coarse_level(self):

    if comm.rank == 0:
      if self.verbosity >= 2:
        print pid+"  Solving on coarse level"

      timer = Timer("Coarse level solution")

      if self.problem.switch_matrices_on_coarse_level:
        A = self.B_coarse
        B = self.A_coarse
        largest = True
        which = 'LM'
      else:
        A = self.A_coarse
        B = self.B_coarse
        largest = False
        which = 'SM'

      # Set initial approximation
      self.v_coarse.fill(0.0)
      self.v_coarse[0] = 1.0

      if self.use_lobpcg_on_coarse_level:
        if self.precond_lobpcg_by_ml:
          if self.update_lobpcg_prec or self.M is None:
            if self.verbosity >= 3:
              print0(pid+"    Creating coarse level preconditioner")

            ml = smoothed_aggregation_solver(A)
            self.M = ml.aspreconditioner()

        w, v, h = lobpcg(A, self.v_coarse, B, self.M, tol=self.coarse_level_tol, maxiter=self.coarse_level_maxit,
                         largest=largest, verbosityLevel=self.lobpcg_verb, retResidualNormsHistory=True)
      else:
        if self.problem.sym:
          w, v = eigsh(A, 1, B, which=which, v0=self.v_coarse,
                       ncv=self.coarse_level_num_ritz_vec, maxiter=self.coarse_level_maxit, tol=self.coarse_level_tol)
        else:
          w, v = eigs(A, 1, B, which=which, v0=self.v_coarse,
                      ncv=self.coarse_level_num_ritz_vec, maxiter=self.coarse_level_maxit, tol=self.coarse_level_tol)

      self.lam = w[0]
      self.v_coarse = v[0]

      try:
        self.num_it_coarse += len(h)
      except NameError:
        pass  # There seems to be no way to obtain number of iterations for eigs/eigsh
Exemplo n.º 22
0
        def iter_cond_number_lobpcg(L, invPL):
            k = 20
            scipy.random.seed(0)
            X = scipy.random.rand(n, k)
            # compute the smallest eigenvalues:
            min_w_invPL_L, temp = SSLA.lobpcg(L * invPL * invPL * L,
                                              X,
                                              maxiter=20,
                                              tol=1e-3,
                                              largest=False,
                                              verbosityLevel=1)
            # compute the largest eigenvalues:
            max_w_invPL_L, temp = SSLA.lobpcg(opt.eyelo(n),
                                              X,
                                              B=L * invPL * invPL * L,
                                              maxiter=20,
                                              tol=1e-3,
                                              largest=False,
                                              verbosityLevel=1)
            max_w_invPL_L = 1 / max_w_invPL_L
            assert (max_w_invPL_L[0] >= min_w_invPL_L[::-1][0]
                    and min_w_invPL_L[0] <= min_w_invPL_L[::-1][0])

            return np.sqrt(max_w_invPL_L[0] / min_w_invPL_L[0])
Exemplo n.º 23
0
def fiedler(adj_list,plot=False,fn="FiedlerPlots",n_fied=2):
    """calculate the first fiedler vector of a graph adjascancy list and optionally write associated plots to file.

    Takes:
    adj_list:
    An Nx2 nested list of ints of the form:
    [[node1,node2],
    ...]
    Representing the adjascancy list.

    plot=False: make plots or not.
    fn="FiedlerPlots": filename to prepend to the plot png file names
    n_fied=2: the number of fiedler vectors to calculate (values above 2 will not be output)

    Returns a Dictionary of the form:



    {"f1": the first fiedler vector,
    "f2": (if caclulated) the second fideler vector
    "d": the node degrees,
    "r1": the rank of each node in the first fiedler vector
    "r2": the rank of each node in the second fiedler vector}


    """
    

    A = graph_laplacian(adj_list)

    # construct preconditioner
    ml = smoothed_aggregation_solver(A, coarse_solver='pinv2',max_coarse=10)
    M = ml.aspreconditioner()

    # solve for lowest two modes: constant vector and Fiedler vector
    X = scipy.rand(A.shape[0], n_fied+1)
    (eval,evec,res) = lobpcg(A, X, M=None, tol=1e-12, largest=False, \
            verbosityLevel=0, retResidualNormsHistory=True)

    if plot:
        doPlots(evec[:,1],evec[:,2],A.diagonal(),adj_list,fn)
        
        
    out = {"f1":list(evec[:,1]),"d":list(A.diagonal()),"r1":[int(i) for i in list(numpy.argsort(numpy.argsort(evec[:,1])))]}
    if n_fied > 1:
        out["f2"]=list(evec[:,2])
        out["r2"]=[int(i) for i in list(numpy.argsort(numpy.argsort(evec[:,2])))]
    return out
Exemplo n.º 24
0
Arquivo: lle.py Projeto: whille/mylab
def locally_linear_embedding(X, n_neighbors, out_dim, tol=1e-6, max_iter=200):
    W = neighbors.kneighbors_graph(X, n_neighbors=n_neighbors, mode="barycenter")

    # M = (I-W)' (I-W)
    A = eye(*W.shape, format=W.format) - W
    A = (A.T).dot(A).tocsr()

    # initial approximation to the eigenvectors
    X = np.random.rand(W.shape[0], out_dim)
    ml = smoothed_aggregation_solver(A, symmetry="symmetric")
    prec = ml.aspreconditioner()

    # compute eigenvalues and eigenvectors with LOBPCG
    eigen_values, eigen_vectors = linalg.lobpcg(A, X, M=prec, largest=False, tol=tol, maxiter=max_iter)

    index = np.argsort(eigen_values)
    return eigen_vectors[:, index], np.sum(eigen_values)
Exemplo n.º 25
0
def locally_linear_embedding(W, out_dim, i_dim, j_dim, tol=1e-5, max_iter=2000):
    # M = (I-W)' (I-W)
    A = eye(*W.shape, format=W.format) - W
    A = (A.T).dot(A).tocsr()

    # initial approximation to the eigenvectors - use coords
    X = np.random.rand(W.shape[0], out_dim)
    X[:, 1] = np.arange(W.shape[0]) % j_dim
    X[:, 0] = (np.arange(W.shape[0], dtype=int) / j_dim) % i_dim
    ml = smoothed_aggregation_solver(A, symmetry='symmetric')
    prec = ml.aspreconditioner()

    # compute eigenvalues and eigenvectors with LOBPCG
    eigen_values, eigen_vectors = linalg.lobpcg(
        A, X, M=prec, largest=False, tol=tol, maxiter=max_iter)

    index = np.argsort(eigen_values)
    return eigen_vectors[:, index], np.sum(eigen_values)
Exemplo n.º 26
0
def spectral_partition(A):
    ml = smoothed_aggregation_solver(A,
            coarse_solver='pinv2',max_coarse=100,smooth=None, strength=None)
    print ml

    M = ml.aspreconditioner()

    X = sp.rand(A.shape[0], 2) 
    (evals,evecs,res) = lobpcg(A, X, M=M, tol=1e-12, largest=False, \
        verbosityLevel=0, retResidualNormsHistory=True, maxiter=200)

    fiedler = evecs[:,1]
    vmed = np.median(fiedler)
    v = np.zeros((A.shape[0],))
    K = np.where(fiedler<=vmed)[0]
    v[K]=-1
    K = np.where(fiedler>vmed)[0]
    v[K]=1
    return v, res
Exemplo n.º 27
0
def eigen_solve(A, mode):
    """solve for eigenpairs using a specified method"""
    if mode == 'arpack_eigsh':
        return arpack.eigsh(A, largest=True, tol=1e-4)
    elif mode == 'arpack_eigsh_mkl':
        return arpack.eigsh_mkl(A, largest=True, tol_dps=4)
    elif mode == 'torch_eigh':
        return torch.linalg.eigh(A)
    elif mode == 'torch_lobpcg':
        return torch.lobpcg(A, k=1, largest=True, tol=1e-4)
    elif mode == 'scipy_eigsh':
        # For some reason scipy's eigsh requires slightly smaller tolerance
        # (1e-5 vs 1e-4) to reach equiavelent accuracy
        return splinalg.eigsh(A.numpy(), k=1, which="LA", tol=1e-5)
    elif mode == 'scipy_lobpcg':
        X = A.new_empty(A.size(0), 1).normal_()
        return splinalg.lobpcg(A.numpy(), X.numpy(), largest=True, tol=1e-4)
    else:
        raise ValueError
Exemplo n.º 28
0
def lobpcg_randEVD(A, k, sketching_type, s):
    """
    Approximate top k eigenvalue and corresponding right eigenvector computation 
    for At * A using LOBPCG over preconditioner attained through sketching.
    
    Parameters
    ----------
    A              : m x n matrix, expected tall-and-thin (m > n)
    k              : number of top eignevalues
    sketching_type : sketching transformation type (row number reducer)
    s              : number of rows in the sketched matrix
    
    Returns
    --------
    lambdas : top k eigenavalues of At * A
    Vt      : top k right singular vectors At * A (as rows of Vt)
    
    Notes/TODO
    -----------
    - ordering eigenvalues in lobpcg
    - opt out the option of using a generic symmetric eigenvalue solver internally
    
    """
    m, n = A.shape
    assert m > n
    assert n >= k

    if s == None:
        s = 4 * n
    assert s < m

    sketch = sketching_type(m, s)
    B = sketch * A
    U, Sigma, Vt = linalg.svd(B, full_matrices=False)

    Q, R = linalg.qr(B)  # alternatively: connection of R to V, Sigma

    Aop = symmetrizer(A)
    X = Vt[:k, :].T
    Rop = upper_triangular_preconditioner_symmetrizer(R)
    lambdas, Vt = lobpcg(Aop, X, M=Rop, largest=True)
    return lambdas, Vt
Exemplo n.º 29
0
def lobpcg_randEVD(A, k, sketching_type, s):
    '''
    Approximate top k eigenvalue and corresponding right eigenvector computation 
    for At * A using LOBPCG over preconditioner attained through sketching.
    
    Parameters
    ----------
    A              : m x n matrix, expected tall-and-thin (m > n)
    k              : number of top eignevalues
    sketching_type : sketching transformation type (row number reducer)
    s              : number of rows in the sketched matrix
    
    Returns
    --------
    lambdas : top k eigenavalues of At * A
    Vt      : top k right singular vectors At * A (as rows of Vt)
    
    Notes/TODO
    -----------
    - ordering eigenvalues in lobpcg
    - opt out the option of using a generic symmetric eigenvalue solver internally
    
    '''
    m, n = A.shape
    assert m > n
    assert n >= k

    if s == None:
        s = 4 * n
    assert s < m

    sketch = sketching_type(m, s)
    B = sketch * A
    U, Sigma, Vt = linalg.svd(B, full_matrices=False)

    Q, R = linalg.qr(B)  # alternatively: connection of R to V, Sigma

    Aop = symmetrizer(A)
    X = Vt[:k, :].T
    Rop = upper_triangular_preconditioner_symmetrizer(R)
    lambdas, Vt = lobpcg(Aop, X, M=Rop, largest=True)
    return lambdas, Vt
 def find_fiedler(L, x, normalized, tol):
     L = csc_matrix(L, dtype=float)
     n = L.shape[0]
     if normalized:
         D = spdiags(1.0 / sqrt(L.diagonal()), [0], n, n, format="csc")
         L = D * L * D
     if method == "lanczos" or n < 10:
         # Avoid LOBPCG when n < 10 due to
         # https://github.com/scipy/scipy/issues/3592
         # https://github.com/scipy/scipy/pull/3594
         sigma, X = eigsh(L, 2, which="SM", tol=tol, return_eigenvectors=True)
         return sigma[1], X[:, 1]
     else:
         X = asarray(asmatrix(x).T)
         M = spdiags(1.0 / L.diagonal(), [0], n, n)
         Y = ones(n)
         if normalized:
             Y /= D.diagonal()
         sigma, X = lobpcg(L, X, M=M, Y=asmatrix(Y).T, tol=tol, maxiter=n, largest=False)
         return sigma[0], X[:, 0]
Exemplo n.º 31
0
def StrongConvexity(loss, output, model, lambda_max=None):
    from scipy.sparse.linalg import eigsh
    from scipy.sparse.linalg import LinearOperator, lobpcg

    # SmoothnessAbsHessian returns the maximum eigenvalue of the absolute Hessian (i.e. the square root of the maximum of the squared Hessian)
    if lambda_max is None:
        lambda_max = SmoothnessAbsHessian(loss, output, model)

    ndim = len(torch.nn.utils.parameters_to_vector(model.parameters()))
    batch = output.size(0)

    def NumpyWrapper(vp):
        vp = vector_to_parameter_list(
            torch.tensor(np.squeeze(vp), device='cuda').float(),
            model.parameters())
        Hv = HessianSqrVectorProduct(loss, list(model.parameters()), vp)
        Hv = torch.cat([torch.flatten(v) for v in Hv]) / batch
        return Hv.cpu().numpy()

    A = LinearOperator((ndim, ndim), matvec=NumpyWrapper)
    # use the Lanczos method to estimate the largest Largest (in magnitude) eigenvalue ('LM')
    lambda_min, vs = eigsh(A,
                           1,
                           which='LM',
                           return_eigenvectors=1,
                           tol=1e-2,
                           sigma=lambda_max**2)
    l, v = lobpcg(A,
                  vs,
                  B=None,
                  M=None,
                  Y=None,
                  tol=None,
                  maxiter=50,
                  largest=False,
                  verbosityLevel=0,
                  retLambdaHistory=False,
                  retResidualNormsHistory=False)

    return np.sqrt(l)
Exemplo n.º 32
0
def spectral(A, eval=None, evec=None, plot=False, method='lobpcg'):

    # solve for lowest two modes: constant vector and Fiedler vector
    X = scipy.rand(A.shape[0], 2)

    if method == 'lobpcg':
        # specify lowest eigenvector and orthonormalize fiedler against it
        X[:, 0] = numpy.ones((A.shape[0], ))
        X = numpy.linalg.qr(X, mode='full')[0]

        # construct preconditioner
        ml = smoothed_aggregation_solver(A, coarse_solver='pinv2')
        M = ml.aspreconditioner()

        (eval,evec,res) = lobpcg(A, X, M=M, tol=1e-5, largest=False, \
              verbosityLevel=0, retResidualNormsHistory=True, maxiter=200)
    elif method == 'tracemin':
        res = []
        evec = tracemin_fiedler(A, residuals=res, tol=1e-5)
        evec[:, 1] = rqi(A, evec[:, 1], k=3)[1]
    else:
        raise InputError('Unknown method')

    # use the median of fiedler, as the separator
    fiedler = evec[:, 1]
    vmed = numpy.median(fiedler)
    P1 = numpy.where(fiedler <= vmed)[0]
    P2 = numpy.where(fiedler > vmed)[0]

    if plot is True:
        from matplotlib.pyplot import semilogy, figure, show, title, xlabel, ylabel
        figure()
        semilogy(res)
        xlabel('Iteration')
        ylabel('Residual norm')
        title('Spectral convergence history')
        show()

    return P1, P2, fiedler
Exemplo n.º 33
0
def spectral(A,eval=None,evec=None,plot=False,method='lobpcg') :

  # solve for lowest two modes: constant vector and Fiedler vector
  X = scipy.rand(A.shape[0], 2) 

  if method == 'lobpcg' :
  	# specify lowest eigenvector and orthonormalize fiedler against it
  	X[:,0] = numpy.ones((A.shape[0],))
  	X = numpy.linalg.qr(X, mode='full')[0]

  	# construct preconditioner
  	ml = smoothed_aggregation_solver(A,coarse_solver='pinv2')
  	M = ml.aspreconditioner()

  	(eval,evec,res) = lobpcg(A, X, M=M, tol=1e-5, largest=False, \
        	verbosityLevel=0, retResidualNormsHistory=True, maxiter=200)
  elif method == 'tracemin':
	res = []
	evec = tracemin_fiedler(A, residuals=res, tol=1e-5)
	evec[:,1] = rqi(A, evec[:,1], k=3)[1]
  else :
	raise InputError('Unknown method')
  
  # use the median of fiedler, as the separator
  fiedler = evec[:,1]
  vmed = numpy.median(fiedler)
  P1 = numpy.where(fiedler<=vmed)[0]
  P2 = numpy.where(fiedler>vmed)[0]

  if plot is True :
     from matplotlib.pyplot import semilogy,figure,show,title,xlabel,ylabel
     figure()
     semilogy(res)
     xlabel('Iteration')
     ylabel('Residual norm')
     title('Spectral convergence history')
     show()

  return P1,P2,fiedler
def eig_multi(A, B=None, n_components=2, tol=1E-12, random_state=None):
    """Solves the generalized Eigenvalue problem:
    A x = lambda B x using the multigrid method.
    Works well with very large matrices but there are some
    instabilities sometimes.
    """
    random_state = check_random_state(random_state)
    # convert matrix A and B to float
    A = A.astype(np.float64);

    if B is not None:
        B = B.astype(np.float64)

    # import the solver
    ml = smoothed_aggregation_solver(check_array(A, accept_sparse = ['csr']))

    # preconditioner
    M = ml.aspreconditioner()

    n_nodes = A.shape[0]
    n_find = min(n_nodes, 5 + 2*n_components)
    # initial guess for X
    np.random.RandomState(seed=1234)
    X = random_state.rand(n_nodes, n_find)

    # solve using the lobpcg algorithm
    eigVals, eigVecs = lobpcg(A, X, M=M, B=B,
                                       tol=tol,
                                       largest='False')

    sort_order = np.argsort(eigVals)
    eigVals = eigVals[sort_order]
    eigVecs = eigVecs[:, sort_order]

    eigVals = eigVals[:n_components]
    eigVecs = eigVecs[:, :n_components]
    return eigVals, eigVecs
Exemplo n.º 35
0
def main_fisher_diag(l, neigs=0):

    logging.info('main_fisher - new UbercalModel')
    model = UbercalModel(l)
    logging.info('building fisher matrix')
    model.pars['zps'].fix(0, 0.0)
    v, J = model(jac=True)
    #    J.data /= 0.01**2
    N, n = J.shape
    H = J.T * J

    logging.info('cholesky factorization')
    fact = cholmod.cholesky(H)

    logging.info('cholesky inverse')

    def mv(x):
        return fact.solve_A(x)

    OPinv = OPinv = linalg.LinearOperator(H.shape, mv)

    e, v = None, None
    if neigs > 0:
        logging.info('extracting smallest eigenvals/eigenvects')
        #    X = np.random.random((n,3))
        X = np.random.random(neigs * 5288154)
        X = X = X.reshape((-1, neigs))
        tol = 1.E-5
        e, v = linalg.lobpcg(H,
                             X,
                             largest=False,
                             verbosityLevel=2,
                             maxiter=100,
                             M=OPinv)

    return H, e, v, model, fact
Exemplo n.º 36
0
def bench_lobpcg_sakurai():
    print()
    print('                 lobpcg benchmark sakurai et al.')
    print('==============================================================')
    print('      shape      | blocksize |    operation   |   time   ')
    print('                                              | (seconds)')
    print('--------------------------------------------------------------')
    fmt = ' %15s |   %3d     |     %6s     | %6.2f '

    m = 3
    for n in 50, 400, 2400:

        shape = (n, n)
        A, B, all_eigenvalues = _sakurai(n)
        desired_evs = all_eigenvalues[:m]

        tt = time.clock()
        X = rand(n, m)
        eigs, vecs, resnh = lobpcg(A,
                                   X,
                                   B,
                                   tol=1e-6,
                                   maxiter=500,
                                   retResidualNormsHistory=1)
        w_lobpcg = sorted(eigs)
        elapsed = time.clock() - tt
        yield (assert_allclose, w_lobpcg, desired_evs, 1e-7, 1e-5)
        print(fmt % (shape, m, 'lobpcg', elapsed))

        tt = time.clock()
        A_dense = A.A
        B_dense = B.A
        w_eigh = eigh(A_dense, B_dense, eigvals_only=True, eigvals=(0, m - 1))
        elapsed = time.clock() - tt
        yield (assert_allclose, w_eigh, desired_evs, 1e-7, 1e-5)
        print(fmt % (shape, m, 'eigh', elapsed))
Exemplo n.º 37
0
def eigen_decomposition(G, n_components=8, eigen_solver=None,
                       random_state=None, eigen_tol=0.0, 
                       drop_first=True, largest = True):
    """
    G : 2d numpy/scipy array. Potentially sparse.
        The matrix to find the eigendecomposition of 
    n_components : integer, optional
        The number of eigenvectors to return 

    eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
        auto : algorithm will attempt to choose the best method for input data
        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.
        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.
        lobpcg : Locally Optimal Block Preconditioned Conjugate Gradient Method.
            a preconditioned eigensolver for large symmetric positive definite 
            (SPD) generalized eigenproblems.
        amg : AMG requires pyamg to be installed. It can be faster on very large, 
            sparse problems, but may also lead to instabilities.

    random_state : int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
        By default, arpack is used.

    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition when using arpack eigen_solver
    
    Returns
    -------
    lambdas, diffusion_map : eigenvalues, eigenvectors 
    """
    n_nodes = G.shape[0]
    if eigen_solver is None:
        eigen_solver = 'auto'
    elif not eigen_solver in eigen_solvers:
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be: '%s'"
                         % eigen_solver, eigen_solvers)
    if eigen_solver == 'auto':
        if G.shape[0] > 200:
            eigen_solver = 'arpack'
        else:
            eigen_solver = 'dense'
    
    # Check eigen_solver method
    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
                             "not available.")
    # Check input values
    if not isinstance(largest, bool):
        raise ValueError("largest should be True if you want largest eigenvalues otherwise False")
    random_state = check_random_state(random_state)
    if drop_first:
        n_components = n_components + 1     
    # Check for symmetry
    is_symmetric = _is_symmetric(G)
    # Convert G to best type for eigendecomposition 
    if sparse.issparse(G):
        if G.getformat() is not 'csr':
            G.tocsr()
    G = G.astype(np.float)
    
    if ((eigen_solver == 'lobpcg') and (n_nodes < 5 * n_components + 1)):
        warnings.warn("lobpcg has problems with small number of nodes. Using dense eigh")
        eigen_solver = 'dense'
        
    # Try Eigen Methods:
    if eigen_solver == 'arpack':
        if is_symmetric:
            if largest:
                which = 'LM'
            else:
                which = 'SM'
            lambdas, diffusion_map = eigsh(G, k=n_components, which=which,tol=eigen_tol)
        else:
            if largest:
                which = 'LR'
            else:
                which = 'SR'
            lambdas, diffusion_map = eigs(G, k=n_components, which=which,tol=eigen_tol)
        lambdas = np.real(lambdas)         
        diffusion_map = np.real(diffusion_map)
    elif eigen_solver == 'amg':
        if not is_symmetric:
            raise ValueError("lobpcg requires symmetric matrices.")
        if not sparse.issparse(G):
            warnings.warn("AMG works better for sparse matrices")
        # Use AMG to get a preconditioner and speed up the eigenvalue problem.
        ml = smoothed_aggregation_solver(check_array(G, accept_sparse = ['csr']))
        M = ml.aspreconditioner()
        n_find = min(n_nodes, 5 + 2*n_components)
        X = random_state.rand(n_nodes, n_find)
        X[:, 0] = (G.diagonal()).ravel()
        lambdas, diffusion_map = lobpcg(G, X, M=M, largest=largest)   
        sort_order = np.argsort(lambdas)
        if largest:
            lambdas = lambdas[sort_order[::-1]]
            diffusion_map = diffusion_map[:, sort_order[::-1]]
        else:
            lambdas = lambdas[sort_order]
            diffusion_map = diffusion_map[:, sort_order]
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == "lobpcg":
        if not is_symmetric:
            raise ValueError("lobpcg requires symmetric matrices.")
        n_find = min(n_nodes, 5 + 2*n_components)
        X = random_state.rand(n_nodes, n_find)
        lambdas, diffusion_map = lobpcg(G, X, largest=largest)
        sort_order = np.argsort(lambdas)
        if largest:
            lambdas = lambdas[sort_order[::-1]]
            diffusion_map = diffusion_map[:, sort_order[::-1]]
        else:
            lambdas = lambdas[sort_order]
            diffusion_map = diffusion_map[:, sort_order]
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == 'dense':
        if sparse.isspmatrix(G):
            G = G.todense()
        if is_symmetric:
            lambdas, diffusion_map = eigh(G)
        else:
            lambdas, diffusion_map = eig(G)
        if largest:# eigh always returns eigenvalues in ascending order
            lambdas = lambdas[::-1] # reverse order the e-values
            diffusion_map = diffusion_map[:, ::-1] # reverse order the vectors
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    return (lambdas, diffusion_map)
Exemplo n.º 38
0
def eigs_lobpcg(A,
                k,
                *,
                B=None,
                v0=None,
                which=None,
                return_vecs=True,
                sigma=None,
                isherm=True,
                P=None,
                sort=True,
                **lobpcg_opts):
    """Interface to scipy's lobpcg eigensolver, which can be good for
    generalized eigenproblems with matrix-free operators. Seems to a be a bit
    innacurate though (e.g. on the order of ~ 1e-6 for eigenvalues). Also only
    takes real, symmetric problems, targeting smallest eigenvalues (though
    scipy will soon have complex support, and its easy to add oneself).

    Note that the slepc eigensolver also has a lobpcg backend
    (``EPSType='lobpcg'``) which accepts complex input and is more accurate -
    though seems slower.

    Parameters
    ----------
    A : array_like, sparse_matrix, LinearOperator or callable
        The operator to solve for.
    k : int
        Number of eigenpairs to return
    B : array_like, sparse_matrix, LinearOperator or callable, optional
        If given, the RHS operator (which should be positive) defining a
        generalized eigen problem.
    v0 : array_like (d, k), optional
        The initial subspace to iterate with.
    which : {'SA', 'LA'}, optional
        Find the smallest or largest eigenvalues.
    return_vecs : bool, optional
        Whether to return the eigenvectors found.
    P : array_like, sparse_matrix, LinearOperator or callable, optional
        Perform the eigensolve in the subspace defined by this projector.
    sort : bool, optional
        Whether to ensure the eigenvalues are sorted in ascending value.
    lobpcg_opts
        Supplied to :func:`scipy.sparse.linagl.lobpcg`.

    Returns
    -------
    lk : array_like (k,)
        The eigenvalues.
    vk : array_like (d, k)
        The eigenvectors, if `return_vecs=True`.

    See Also
    --------
    eigs_scipy, eigs_numpy, eigs_slepc
    """
    if not isherm:
        raise ValueError("lobpcg can only solve symmetric problems.")

    if sigma is not None:
        raise ValueError("lobpcg can only solve extremal eigenvalues.")

    # remove invalid options for lobpcg
    lobpcg_opts.pop('ncv', None)
    lobpcg_opts.pop('EPSType', None)

    # convert some arguments and defaults
    lobpcg_opts.setdefault('maxiter', 30)
    if lobpcg_opts['maxiter'] is None:
        lobpcg_opts['maxiter'] = 30
    largest = {'SA': False, 'LA': True}[which]

    if isinstance(A, qu.Lazy):
        A = A()
    if isinstance(B, qu.Lazy):
        B = B()
    if isinstance(P, qu.Lazy):
        P = P()

    # project into subspace
    if P is not None:
        A = qu.dag(P) @ (A @ P)

    # avoid matrix like behaviour
    if isinstance(A, qu.qarray):
        A = A.A

    d = A.shape[0]

    # set up the initial subsspace to iterate with
    if v0 is None:
        v0 = qu.randn((d, k), dtype=A.dtype)
    else:
        # check if intial space should be projected too
        if P is not None and v0.shape[0] != d:
            v0 = qu.dag(P) @ v0

        v0 = v0.reshape(d, -1)

        # if not enough initial states given, flesh out with random
        if v0.shape[1] != k:
            v0 = np.hstack(v0, qu.randn((d, k - v0.shape[1]), dtype=A.dtype))

    lk, vk = spla.lobpcg(A=A, X=v0, B=B, largest=largest, **lobpcg_opts)

    if return_vecs:
        vk = qu.qarray(vk)
        return maybe_sort_and_project(lk, vk, P, sort)
    else:
        return np.sort(lk) if sort else lk
Exemplo n.º 39
0
# this is the only reliable solver.

        if run_slp:

            k = top_k
            # initial approximation to the k eigenvectors
            if not pre:
                scipy.random.seed(0)
                X = scipy.random.uniform(-1, 1, size=(L.shape[0], k))
            else:
                X = pre.v_L
            largest = False
            w_L, v_L = SSLA.lobpcg(L,
                                   X,
                                   B=M,
                                   M=invPL,
                                   maxiter=10,
                                   tol=1e-8,
                                   largest=largest,
                                   verbosityLevel=1)
            if largest:
                w_L = w_L[::-1]
                v_L = v_L[:, ::-1]
            w_L = 1 / w_L
            plt.figure()
            plt.plot(range(k), w_L)
            if verify_dense:
                plt.plot(range(k), w_dL[range(k)])

            # show_mesh(V,F,v_L[:,0])

            output.w_L = w_L
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
                       random_state=None, eigen_tol=0.0,
                       norm_laplacian=True, drop_first=True):
    """Project the sample on the first eigenvectors of the graph Laplacian.

    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigenvectors associated to the
    smallest eigenvalues) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.

    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).

    However care must taken to always make the affinity matrix symmetric
    so that the eigenvector decomposition works as expected.

    Note : Laplacian Eigenmaps is the actual algorithm implemented here.

    Read more in the :ref:`User Guide <spectral_embedding>`.

    Parameters
    ----------
    adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.

    n_components : integer, optional, default 8
        The dimension of the projection subspace.

    eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities.

    random_state : int, RandomState instance or None, optional, default: None
        A pseudo random number generator used for the initialization of the
        lobpcg eigenvectors decomposition.  If int, random_state is the seed
        used by the random number generator; If RandomState instance,
        random_state is the random number generator; If None, the random number
        generator is the RandomState instance used by `np.random`. Used when
        ``solver`` == 'amg'.

    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.

    norm_laplacian : bool, optional, default=True
        If True, then compute normalized Laplacian.

    drop_first : bool, optional, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.

    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.

    Notes
    -----
    Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
    has one connected component. If there graph has many components, the first
    few eigenvectors will simply uncover the connected components of the graph.

    References
    ----------
    * https://en.wikipedia.org/wiki/LOBPCG

    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      http://dx.doi.org/10.1137%2FS1064827500366124
    """
    adjacency = check_symmetric(adjacency)

    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
                             "not available.")

    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'"
                         % eigen_solver)

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1

    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")

    laplacian, dd = sparse.csgraph.laplacian(adjacency, normed=norm_laplacian,
                                             return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
       (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
        # for details see the source code in scipy:
        # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
        # /lobpcg/lobpcg.py#L237
        # or matlab:
        # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        try:
            # We are computing the opposite of the laplacian inplace so as
            # to spare a memory allocation of a possibly very large array
            laplacian *= -1
            v0 = random_state.uniform(-1, 1, laplacian.shape[0])
            lambdas, diffusion_map = eigsh(laplacian, k=n_components,
                                           sigma=1.0, which='LM',
                                           tol=eigen_tol, v0=v0)
            embedding = diffusion_map.T[n_components::-1] * dd
        except RuntimeError:
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"
            # Revert the laplacian to its opposite to have lobpcg work
            laplacian *= -1

    if eigen_solver == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        if not sparse.issparse(laplacian):
            warnings.warn("AMG works better for sparse matrices")
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian, dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)
        ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1:
            raise ValueError

    elif eigen_solver == "lobpcg":
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian, dtype=np.float64,
                                accept_sparse=True)
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lobpcg has problems with small
            # number of nodes
            # lobpcg will fallback to eigh, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.toarray()
            lambdas, diffusion_map = eigh(laplacian)
            embedding = diffusion_map.T[:n_components] * dd
        else:
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
                                            largest=False, maxiter=2000)
            embedding = diffusion_map.T[:n_components] * dd
            if embedding.shape[0] == 1:
                raise ValueError

    embedding = _deterministic_vector_sign_flip(embedding)
    if drop_first:
        return embedding[1:n_components].T
    else:
        return embedding[:n_components].T
Exemplo n.º 41
0
def null_space(M,
               k,
               k_skip=1,
               eigen_solver='arpack',
               tol=1E-6,
               max_iter=100,
               random_state=None):
    """
    Find the null space of a matrix M.

    Parameters
    ----------
    M : array, matrix, sparse matrix, or LinearOperator
        Input covariance matrix: should be symmetric positive semi-definite

    k : number of eigenvalues/vectors to return

    k_skip : number of low eigenvalues to skip.

    eigen_solver : string ['arpack' | 'lobpcg' | 'dense']
        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
        lobpcg : use locally optimized block-preconditioned conjugate gradient.
                    For this method, M may be a dense or sparse matrix.
                    A dense matrix M will be converted internally to a
                    csr sparse format.
        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.

    tol : tolerance for 'arpack' or 'lobpcg' methods.
            not used if eigen_solver=='dense'

    max_iter : maximum number of iterations for 'arpack' or 'lobpcg' methods
            not used if eigen_solver=='dense'
    """
    random_state = check_random_state(random_state)

    if eigen_solver == 'arpack':
        eigen_values, eigen_vectors = eigsh(M,
                                            k + k_skip,
                                            sigma=0.0,
                                            tol=tol,
                                            maxiter=max_iter)
        return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
    elif eigen_solver == 'lobpcg':
        # initial vectors for iteration
        X = np.random.rand(M.shape[0], k + k_skip)
        try:
            ml = pyamg.smoothed_aggregation_solver(M, symmetry='symmetric')
        except TypeError:
            ml = pyamg.smoothed_aggregation_solver(M, mat_flag='symmetric')
        prec = ml.aspreconditioner()

        # compute eigenvalues and eigenvectors with LOBPCG
        eigen_values, eigen_vectors = linalg.lobpcg(M,
                                                    X,
                                                    M=prec,
                                                    largest=False,
                                                    tol=tol,
                                                    maxiter=max_iter)

        index = np.argsort(eigen_values)
        return (eigen_vectors[:, index[k_skip:]],
                np.sum(eigen_values[index[k_skip:]]))
    elif eigen_solver == 'dense':
        M = np.asarray(M)
        eigen_values, eigen_vectors = eigh(M,
                                           eigvals=(k_skip, k + k_skip),
                                           overwrite_a=True)
        index = np.argsort(np.abs(eigen_values))
        return eigen_vectors[:, index], np.sum(eigen_values)
    else:
        raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
Exemplo n.º 42
0
    k = arange(1,n+1)
    w_ex = sort(1./(16.*pow(cos(0.5*k*pi/(n+1)),4)))  # exact eigenvalues

    return A,B, w_ex

m = 3  # Blocksize

#
# Large scale
#
n = 2500
A,B, w_ex = sakurai(n)  # Mikota pair
X = rand(n,m)
data = []
tt = time.clock()
eigs,vecs, resnh = lobpcg(X,A,B, residualTolerance=1e-6, maxIterations=500, retResidualNormsHistory=1)
data.append(time.clock()-tt)
print('Results by LOBPCG for n='+str(n))
print()
print(eigs)
print()
print('Exact eigenvalues')
print()
print(w_ex[:m])
print()
print('Elapsed time',data[0])
loglog(arange(1,n+1),w_ex,'b.')
xlabel(r'Number $i$')
ylabel(r'$\lambda_i$')
title('Eigenvalue distribution')
show()
Exemplo n.º 43
0
def fem_laplacian(points, faces, spectrum_size=10, normalization=None,
                  verbose=False):
    """
    Compute linear finite-element method Laplace-Beltrami spectrum
    after Martin Reuter's MATLAB code.

    Comparison of fem_laplacian() with Martin Reuter's Matlab eigenvalues:

    fem_laplacian() results for Twins-2-1 left hemisphere (6 values):
    [4.829758648026221e-18,
    0.0001284173002467199,
    0.0002715181572272745,
    0.0003205150847159417,
    0.0004701628070486448,
    0.0005768904023010318]

    Martin Reuter's shapeDNA-tria Matlab code:
    {-4.7207711983791511358e-18 ;
    0.00012841730024672144738 ;
    0.00027151815722727096853 ;
    0.00032051508471592313632 ;
    0.0004701628070486902353  ;
    0.00057689040230097490998 }

    fem_laplacian() results for Twins-2-1 left postcentral (1022):
    [6.3469513010430304e-18,
    0.0005178862383467463,
    0.0017434911095630772,
    0.003667561767487686,
    0.005429017880363784,
    0.006309346984678924]

    Martin Reuter's Matlab code:
    {-2.1954862991027e-18 ;
    0.0005178862383468 ;
    0.0017434911095628 ;
    0.0036675617674875 ;
    0.0054290178803611 ;
    0.006309346984678 }

    Julien Lefevre, regarding comparison with Spongy results:
    "I have done some comparisons between my Matlab codes and yours
    on python and everything sounds perfect:
    The evaluation has been done only for one mesh (about 10000 vertices).
    - L2 error between your A and B matrices and mine are about 1e-16.
    - I have also compared eigenvalues of the generalized problem;
    even if the error is slightly increasing, it remains on the order
    of machine precision.
    - computation time for 1000 eigenvalues was 67s with python
    versus 63s in matlab. And it is quite the same behavior for lower orders.
    - Since the eigenvalues are increasing with order,
    it is also interesting to look at the relative error...
    high frequencies are not so much perturbed."

    Parameters
    ----------
    points : list of lists of 3 floats
        x,y,z coordinates for each vertex of the structure
    faces : list of lists of 3 integers
        3 indices to vertices that form a triangle on the mesh
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006
    verbose : bool
        print statements?

    Returns
    -------
    spectrum : list
        first spectrum_size eigenvalues for Laplace-Beltrami spectrum

    Examples
    --------
    >>> import numpy as np
    >>> from mindboggle.shapes.laplace_beltrami import fem_laplacian
    >>> # Define a cube:
    >>> points = [[0,0,0], [0,1,0], [1,1,0], [1,0,0],
    ...           [0,0,1], [0,1,1], [1,1,1], [1,0,1]]
    >>> faces = [[0,1,2], [2,3,0], [4,5,6], [6,7,4], [0,4,7], [7,3,0],
    ...          [0,4,5], [5,1,0], [1,5,6], [6,2,1], [3,7,6], [6,2,3]]
    >>> spectrum = fem_laplacian(points, faces, spectrum_size=3,
    ...                          normalization=None, verbose=False)
    >>> print(np.array_str(np.array(spectrum[1::]),
    ...                    precision=5, suppress_small=True))
    [ 4.58359  4.8    ]
    >>> spectrum = fem_laplacian(points, faces, spectrum_size=3,
    ...                          normalization="area", verbose=False)
    >>> print(np.array_str(np.array(spectrum[1::]),
    ...                    precision=5, suppress_small=True))
    [ 27.50155  28.8    ]
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> from mindboggle.mio.vtks import read_vtk
    >>> from mindboggle.mio.fetch_data import prep_tests
    >>> urls, fetch_data = prep_tests()
    >>> label_file = fetch_data(urls['left_freesurfer_labels'], '', '.vtk')
    >>> points, f1,f2, faces, labels, f3,f4,f5 = read_vtk(label_file)
    >>> spectrum = fem_laplacian(points, faces, spectrum_size=6,
    ...                          normalization=None, verbose=False)
    >>> print(np.array_str(np.array(spectrum[1::]),
    ...                    precision=5, suppress_small=True))
    [ 0.00013  0.00027  0.00032  0.00047  0.00058]
    >>> # Spectrum for Twins-2-1 left postcentral pial surface (22):
    >>> from mindboggle.guts.mesh import keep_faces, reindex_faces_points
    >>> I22 = [i for i,x in enumerate(labels) if x==1022] # postcentral
    >>> faces = keep_faces(faces, I22)
    >>> faces, points, o1 = reindex_faces_points(faces, points)
    >>> spectrum = fem_laplacian(points, faces, spectrum_size=6,
    ...                          normalization=None, verbose=False)
    >>> print(np.array_str(np.array(spectrum[1::]),
    ...                    precision=5, suppress_small=True))
    [ 0.00057  0.00189  0.00432  0.00691  0.00775]
    >>> # Area-normalized spectrum for a single label (postcentral):
    >>> spectrum = fem_laplacian(points, faces, spectrum_size=6,
    ...                          normalization="area", verbose=False)
    >>> print(np.array_str(np.array(spectrum[1::]),
    ...                    precision=5, suppress_small=True))
    [  2.69259   8.97865  20.44857  32.74477  36.739  ]

    """
    from scipy.sparse.linalg import eigsh, lobpcg
    import numpy as np

    from mindboggle.shapes.laplace_beltrami import computeAB

    # ----------------------------------------------------------------
    # Compute A and B matrices (from Reuter et al., 2009):
    # ----------------------------------------------------------------
    A, B = computeAB(points, faces)
    if A.shape[0] <= spectrum_size:
        if verbose:
            print("The 3D shape has too few vertices ({0} <= {1}). Skip.".
                  format(A.shape[0], spectrum_size))
        return None

    # ----------------------------------------------------------------
    # Use the eigsh eigensolver:
    # ----------------------------------------------------------------
    try :

        # eigs is for nonsymmetric matrices while
        # eigsh is for real-symmetric or complex-Hermitian matrices:
        eigenvalues, eigenvectors = eigsh(A, k=spectrum_size, M=B,
                                          sigma=-0.01)
        spectrum = eigenvalues.tolist()

    # ----------------------------------------------------------------
    # Use the lobpcg eigensolver:
    # ----------------------------------------------------------------
    except RuntimeError:     
           
        if verbose:
            print("eigsh() failed. Now try lobpcg.")
            print("Warning: lobpcg can produce different results from "
                  "Reuter (2006) shapeDNA-tria software.")
        # Initial eigenvector values:
        init_eigenvecs = np.random.random((A.shape[0], spectrum_size))

        # maxiter = 40 forces lobpcg to use 20 iterations.
        # Strangely, largest=false finds largest eigenvalues
        # and largest=True gives the smallest eigenvalues:
        eigenvalues, eigenvectors = lobpcg(A, init_eigenvecs, B=B,
                                           largest=True, maxiter=40)
        # Extract the real parts:
        spectrum = [value.real for value in eigenvalues]

        # For some reason, the eigenvalues from lobpcg are not sorted:
        spectrum.sort()

    # ----------------------------------------------------------------
    # Normalize by area:
    # ----------------------------------------------------------------
    if normalization == "area":
        spectrum = area_normalize(points, faces, spectrum)
        if verbose:
            print("Compute area-normalized linear FEM Laplace-Beltrami "
                  "spectrum")
    else:
        if verbose:
            print("Compute linear FEM Laplace-Beltrami spectrum")

    return spectrum
Exemplo n.º 44
0
def fem_laplacian(points, faces, n_eigenvalues=6, normalization=None):
    """
    Compute linear finite-element method Laplace-Beltrami spectrum
    after Martin Reuter's MATLAB code.

    Note ::

        Compare fem_laplacian() with Martin Reuter's Matlab code output:

        fem_laplacian() results for Twins-2-1 left hemisphere:
        [4.829758648026223e-18,
         0.00012841730024671977,
         0.0002715181572272744,
         0.00032051508471594173,
         0.000470162807048644,
         0.0005768904023010327]

        Martin Reuter's Matlab code:
         Creator: ./shapeDNA-tria
         Refine: 0
         Degree: 1
         Dimension: 2
         Elements: 290134
         DoF: 145069
         NumEW: 6
         Area: 110016
         Volume: 534346
         BLength: 0
         EulerChar: 2
         Time(pre) : 2
         Time(calcAB) : 0
         Time(calcEW) : 7
         Time(total ) : 9
        Eigenvalues:
        {-4.7207711983791511358e-18 ;
         0.00012841730024672144738 ;
         0.00027151815722727096853 ;
         0.00032051508471592313632 ;
         0.0004701628070486902353  ;
         0.00057689040230097490998 }

        fem_laplacian() results for Twins-2-1 left hemisphere postcentral:
        [6.346951301043029e-18,
         0.0005178862383467465,
         0.0017434911095630787,
         0.0036675617674876916,
         0.005429017880363785,
         0.006309346984678927]

        Martin Reuter's Matlab code:
         -2.1954862991027e-18
         0.0005178862383468
         0.0017434911095628
         0.0036675617674875
         0.0054290178803611
         0.006309346984678

    Parameters
    ----------
    points : list of lists of 3 floats
        x,y,z coordinates for each vertex of the structure
    faces : list of lists of 3 integers
        3 indices to vertices that form a triangle on the mesh
    n_eigenvalues : integer
        number of eigenvalues to be computed (the length of the spectrum)
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006

    Returns
    -------
    spectrum : list
        first n_eigenvalues eigenvalues for Laplace-Beltrami spectrum

    Examples
    --------
    >>> from mindboggle.shapes.laplace_beltrami import fem_laplacian
    >>> # Define a cube:
    >>> points = [[0,0,0], [0,1,0], [1,1,0], [1,0,0],
    >>>           [0,0,1], [0,1,1], [1,1,1], [1,0,1]]
    >>> faces = [[0,1,2], [2,3,0], [4,5,6], [6,7,4], [0,4,7], [7,3,0],
    >>>          [0,4,5], [5,1,0], [1,5,6], [6,2,1], [3,7,6], [6,2,3]]
    >>> fem_laplacian(points, faces, n_eigenvalues=3, normalization=None)
        [7.401486830834377e-17, 4.58359213500127, 4.799999999999998]
    >>> fem_laplacian(points, faces, n_eigenvalues=3, normalization="area")
        [1.2335811384723967e-17, 0.76393202250021175, 0.79999999999999949]
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_faces_points
    >>> from mindboggle.shapes.laplace_beltrami import fem_laplacian
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels',
    >>>                         'lh.labels.DKT25.manual.vtk')
    >>> faces, points, npoints = read_faces_points(vtk_file)
    >>> fem_laplacian(points, faces, n_eigenvalues=6, normalization=None)
        [4.829758648026223e-18,
         0.00012841730024671904,
         0.00027151815722727406,
         0.00032051508471594146,
         0.0004701628070486449,
         0.0005768904023010303]
    >>> # Spectrum for Twins-2-1 left hemisphere postcentral (label 22):
    >>> import os
    >>> from mindboggle.utils.io_vtk import read_faces_points
    >>> from mindboggle.shapes.laplace_beltrami import fem_laplacian
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> label_file = os.path.join(path, 'arno', 'labels', 'label22.vtk')
    >>> faces, points, npoints = read_faces_points(label_file)
    >>> print("{0}".format(fem_laplacian(points, faces, n_eigenvalues=6,
    >>>                                  normalization=None)))
        [6.346951301043029e-18,
         0.0005178862383467465,
         0.0017434911095630787,
         0.0036675617674876916,
         0.005429017880363785,
         0.006309346984678927]
    >>> # Area-normalized spectrum for a single label (postcentral):
    >>> print("{0}".format(fem_laplacian(points, faces, n_eigenvalues=6,
    >>>                                  normalization="area")))
        [1.1410192787181146e-21,
         9.310268097367214e-08,
         3.1343504525679715e-07,
         6.593336681038091e-07,
         9.759983608165455e-07,
         1.1342589857996225e-06]
         
    >>> # testing LBO on previously failed folds 
    >>> import subprocess
    >>> cmd = ["find", "/media/USBDATA/data/Mindboggle_MRI/MB101/results/features/", "-name", "fold_*.vtk"]
    >>> process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    >>> out, err = process.communicate()
    >>> pblm_vtks = out.split()
    >>> import mindboggle.shapes.laplace_beltrami
    >>> for vtk_file in pblm_vtks:
        mindboggle.shapes.laplace_beltrami.spectrum_from_file(vtk_file)

    """
    from scipy.sparse.linalg import eigsh, lobpcg
    import numpy as np

    from mindboggle.shapes.laplace_beltrami import computeAB

    #-----------------------------------------------------------------
    # Compute A and B matrices (from Reuter et al., 2009):
    #-----------------------------------------------------------------
    A, B = computeAB(points, faces)
    if A.shape[0] <= n_eigenvalues:
        print("The 3D shape has too few vertices ({0} <= {1}). Skip.".
              format(A.shape[0], n_eigenvalues))
        return None

    #-----------------------------------------------------------------
    # Use the eigsh eigensolver:
    #-----------------------------------------------------------------
    try :

        # eigs is for nonsymmetric matrices while
        # eigsh is for real-symmetric or complex-Hermitian matrices:
        eigenvalues, eigenvectors = eigsh(A, k=n_eigenvalues, M=B,
                                          sigma=0)
        spectrum = eigenvalues.tolist()

    #-----------------------------------------------------------------
    # Use the lobpcg eigensolver:
    #-----------------------------------------------------------------
    except RuntimeError:     
           
        print("eigsh() failed. Now try lobpcg.")
        print("Warning: lobpcg can produce different results from Reuter"
              "et al.'s (2006) shapeDNA-tria software.")
        # Initial eigenvector values:
        init_eigenvecs = np.random.random((A.shape[0], n_eigenvalues))

        # maxiter = 40 forces lobpcg to use 20 iterations.
        # Strangely, largest=false finds largest eigenvalues
        # and largest=True gives the smallest eigenvalues:
        eigenvalues, eigenvectors =  lobpcg(A, init_eigenvecs, B=B,
                                            largest=True, maxiter=40)
        # Extract the real parts:
        spectrum = [value.real for value in eigenvalues]

        # For some reason, the eigenvalues from lobpcg are not sorted:
        spectrum.sort()

    #-----------------------------------------------------------------
    # Normalize by area:
    #-----------------------------------------------------------------
    if normalization == "area":
        spectrum = area_normalize(points, faces, spectrum)
        print("Compute area-normalized linear FEM Laplace-Beltrami spectrum")
    else:
        print("Compute linear FEM Laplace-Beltrami spectrum")

    return spectrum
Exemplo n.º 45
0
def solveSparse(Hs, run_lobpcg=False, minimal=False, verbose=False,
                more=False, exact=False):
    '''Finds a subset of the eigenstates/eigenvalues for a sparse
    formatted Hamiltonian'''

    sols = {}
    N = int(round(np.log2(Hs.shape[0])))  # number of cells

    ## EIGSH

    if verbose:
        print '-'*40
        print 'EIGSH...\n'

    if minimal:    # only the lowest two states
        K_EIGSH = 2
    elif more:        # can only find ground state for 2x2 system using eigsh
        K_EIGSH = 1 if N == 1 else min(pow(2, N)-1, 11*N)
    else:
        K_EIGSH = 1 if N == 1 else 3*N

    K_EIGSH = min(K_EIGSH, Hs.shape[0]-1)
    t1 = clock()

    # run eigsh

    try:
        if exact:
            e_vals, e_vecs = eigh(Hs.todense())
        else:
            e_vals, e_vecs = eigsh(Hs, k=K_EIGSH, tol=TOL_EIGSH, which='SA')
    except:  # should not happen unless K_EIGSH assignment rewritten
        try:
            e_vals, e_vecs = eigsh(Hs, k=2, tol=TOL_EIGSH, which='SA')
        except:
            if verbose:
                print 'Insufficient dim for sparse methods. Running eigh'
            e_vals, e_vecs = eigh(Hs.todense())

    if verbose:
        print 'Time elapsed (seconds): %f\n' % (clock()-t1)

    sols['eigsh'] = {}
    sols['eigsh']['vals'] = e_vals
    sols['eigsh']['vecs'] = e_vecs

    ###################################################################
    ## RUN LOBPCG

    ### LOBPCG only works for positive definite matrices and hence is only
    ### reliable for the ground state (for which the n-th leading minor
    ### remains positive definite in the cholesky decomposition)

    if run_lobpcg:
        if verbose:
            print '-'*40
            print 'LOBPCG...\n'

        t2 = clock()

        # guess solution
        approx = np.ones([N, 1], dtype=float)

        # offset Hamiltonian to assert positive definite (for ground state)
        Hs = Hs+sp.diags(np.ones([1, N], dtype=float)*H_OFFSET, [0])

        # run lobpcg, remove offset
        e_vals, e_vecs = lobpcg(Hs, approx, tol=TOL_LOBPCG)
        e_vals -= H_OFFSET

        if verbose:
            print 'Time elapsed (seconds): %f\n' % (clock()-t2)

        sols['lobpcg'] = {}
        sols['lobpcg']['vals'] = e_vals
        sols['lobpcg']['vecs'] = e_vecs

    return sols
Exemplo n.º 46
0
    V,E = mesh.regular_triangle_mesh(20,6)
if meshnum==2:
    from scipy.io import loadmat
    mesh = loadmat('crack_mesh.mat')
    V=mesh['V']
    E=mesh['E']

A = graph_laplacian(V,E)

# construct preconditioner
ml = smoothed_aggregation_solver(A, coarse_solver='pinv2',max_coarse=10)
M = ml.aspreconditioner()

# solve for lowest two modes: constant vector and Fiedler vector
X = scipy.rand(A.shape[0], 2) 
(eval,evec,res) = lobpcg(A, X, M=None, tol=1e-12, largest=False, \
        verbosityLevel=0, retResidualNormsHistory=True)

fiedler = evec[:,1]

# use the median of the Fiedler vector as the separator
vmed = numpy.median(fiedler)
v = numpy.zeros((A.shape[0],))
K = numpy.where(fiedler<=vmed)[0]
v[K]=-1
K = numpy.where(fiedler>vmed)[0]
v[K]=1

# plot the mesh and partition
trimesh(V,E)
sub = pylab.gca()
sub.hold(True)
Exemplo n.º 47
0
def eigen_decomposition(G, n_components=8, eigen_solver='auto',
                        random_state=None,
                        drop_first=True, largest=True, solver_kwds=None):
    """
    Function to compute the eigendecomposition of a square matrix.

    Parameters
    ----------
    G : array_like or sparse matrix
        The square matrix for which to compute the eigen-decomposition.
    n_components : integer, optional
        The number of eigenvectors to return
    eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
        'auto' :
            attempt to choose the best method for input data (default)
        'dense' :
            use standard dense matrix operations for the eigenvalue decomposition.
            For this method, M must be an array or matrix type.
            This method should be avoided for large problems.
        'arpack' :
            use arnoldi iteration in shift-invert mode. For this method,
            M may be a dense matrix, sparse matrix, or general linear operator.
            Warning: ARPACK can be unstable for some problems.  It is best to
            try several random seeds in order to check results.
        'lobpcg' :
            Locally Optimal Block Preconditioned Conjugate Gradient Method.
            A preconditioned eigensolver for large symmetric positive definite
            (SPD) generalized eigenproblems.
        'amg' :
            Algebraic Multigrid solver (requires ``pyamg`` to be installed)
            It can be faster on very large, sparse problems, but may also lead
            to instabilities.
    random_state : int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
        By default, arpack is used.
    solver_kwds : any additional keyword arguments to pass to the selected eigen_solver

    Returns
    -------
    lambdas, diffusion_map : eigenvalues, eigenvectors
    """
    n_nodes = G.shape[0]
    if drop_first:
        n_components = n_components + 1

    eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
                                                   size=n_nodes,
                                                   nvec=n_components)
    random_state = check_random_state(random_state)

    # Convert G to best type for eigendecomposition
    if sparse.issparse(G):
        if G.getformat() is not 'csr':
            G.tocsr()
    G = G.astype(np.float)

    # Check for symmetry
    is_symmetric = _is_symmetric(G)

    # Try Eigen Methods:
    if eigen_solver == 'arpack':
        # This matches the internal initial state used by ARPACK
        v0 = random_state.uniform(-1, 1, G.shape[0])
        if is_symmetric:
            if largest:
                which = 'LM'
            else:
                which = 'SM'
            lambdas, diffusion_map = eigsh(G, k=n_components, which=which,
                                           v0=v0,**(solver_kwds or {}))
        else:
            if largest:
                which = 'LR'
            else:
                which = 'SR'
            lambdas, diffusion_map = eigs(G, k=n_components, which=which,
                                          **(solver_kwds or {}))
        lambdas = np.real(lambdas)
        diffusion_map = np.real(diffusion_map)
    elif eigen_solver == 'amg':
        # separate amg & lobpcg keywords:
        if solver_kwds is not None:
            amg_kwds = {}
            lobpcg_kwds = solver_kwds.copy()
            for kwd in AMG_KWDS:
                if kwd in solver_kwds.keys():
                    amg_kwds[kwd] = solver_kwds[kwd]
                    del lobpcg_kwds[kwd]
        else:
            amg_kwds = None
            lobpcg_kwds = None
        if not is_symmetric:
            raise ValueError("lobpcg requires symmetric matrices.")
        if not sparse.issparse(G):
            warnings.warn("AMG works better for sparse matrices")
        # Use AMG to get a preconditioner and speed up the eigenvalue problem.
        ml = smoothed_aggregation_solver(check_array(G, accept_sparse = ['csr']),**(amg_kwds or {}))
        M = ml.aspreconditioner()
        n_find = min(n_nodes, 5 + 2*n_components)
        X = random_state.rand(n_nodes, n_find)
        X[:, 0] = (G.diagonal()).ravel()
        lambdas, diffusion_map = lobpcg(G, X, M=M, largest=largest,**(lobpcg_kwds or {}))
        sort_order = np.argsort(lambdas)
        if largest:
            lambdas = lambdas[sort_order[::-1]]
            diffusion_map = diffusion_map[:, sort_order[::-1]]
        else:
            lambdas = lambdas[sort_order]
            diffusion_map = diffusion_map[:, sort_order]
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == "lobpcg":
        if not is_symmetric:
            raise ValueError("lobpcg requires symmetric matrices.")
        n_find = min(n_nodes, 5 + 2*n_components)
        X = random_state.rand(n_nodes, n_find)
        lambdas, diffusion_map = lobpcg(G, X, largest=largest,**(solver_kwds or {}))
        sort_order = np.argsort(lambdas)
        if largest:
            lambdas = lambdas[sort_order[::-1]]
            diffusion_map = diffusion_map[:, sort_order[::-1]]
        else:
            lambdas = lambdas[sort_order]
            diffusion_map = diffusion_map[:, sort_order]
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == 'dense':
        if sparse.isspmatrix(G):
            G = G.todense()
        if is_symmetric:
            lambdas, diffusion_map = eigh(G,**(solver_kwds or {}))
        else:
            lambdas, diffusion_map = eig(G,**(solver_kwds or {}))
        if largest:# eigh always returns eigenvalues in ascending order
            lambdas = lambdas[::-1] # reverse order the e-values
            diffusion_map = diffusion_map[:, ::-1] # reverse order the vectors
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    return (lambdas, diffusion_map)
Exemplo n.º 48
0
def spectral_embedding(adjacency, n_components=8, mode=None,
                       random_state=None):
    """Project the sample on the first eigen vectors of the graph Laplacian

    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigen vectors associated to the
    smallest eigen values) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.

    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).

    However care must taken to always make the affinity matrix symmetric
    so that the eigen vector decomposition works as expected.

    Parameters
    -----------
    adjacency: array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.

    n_components: integer, optional
        The dimension of the projection subspace.

    mode: {None, 'arpack' or 'amg'}
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities

    random_state: int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when mode == 'amg'. By default
        arpack is used.

    Returns
    --------
    embedding: array, shape: (n_samples, n_components)
        The reduced samples

    Notes
    ------
    The graph should contain only one connected component, elsewhere the
    results make little sense.
    """

    from scipy import sparse
    from ..utils.arpack import eigsh
    from scipy.sparse.linalg import lobpcg
    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if mode == "amg":
            raise ValueError("The mode was set to 'amg', but pyamg is "
                             "not available.")

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # XXX: Should we check that the matrices given is symmetric
    if mode is None:
        mode = 'arpack'
    laplacian, dd = graph_laplacian(adjacency,
                                    normed=True, return_diag=True)
    if (mode == 'arpack'
        or not sparse.isspmatrix(laplacian)
        or n_nodes < 5 * n_components):
        # lobpcg used with mode='amg' has bugs for low number of nodes

        # We need to put the diagonal at zero
        if not sparse.isspmatrix(laplacian):
            laplacian.flat[::n_nodes + 1] = 0
        else:
            laplacian = laplacian.tocoo()
            diag_idx = (laplacian.row == laplacian.col)
            laplacian.data[diag_idx] = 0
            # If the matrix has a small number of diagonals (as in the
            # case of structured matrices comming from images), the
            # dia format might be best suited for matvec products:
            n_diags = np.unique(laplacian.row - laplacian.col).size
            if n_diags <= 7:
                # 3 or less outer diagonals on each side
                laplacian = laplacian.todia()
            else:
                # csr has the fastest matvec and is thus best suited to
                # arpack
                laplacian = laplacian.tocsr()

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        lambdas, diffusion_map = eigsh(-laplacian, k=n_components,
                                       sigma=1.0, which='LM')
        embedding = diffusion_map.T[::-1] * dd
    elif mode == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        laplacian = laplacian.astype(np.float)  # lobpcg needs native floats
        ml = smoothed_aggregation_solver(laplacian.tocsr())
        X = random_state.rand(laplacian.shape[0], n_components)
        X[:, 0] = 1. / dd.ravel()
        M = ml.aspreconditioner()
        lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1:
            raise ValueError
    else:
        raise ValueError("Unknown value for mode: '%s'."
                         "Should be 'amg' or 'arpack'" % mode)
    return embedding
    k = arange(1,n+1)
    w_ex = sort(1./(16.*pow(cos(0.5*k*pi/(n+1)),4))) # exact eigenvalues

    return A,B, w_ex

m = 3  # Blocksize

#
# Large scale
#
n = 2500
A,B, w_ex = sakurai(n) # Mikota pair
X = rand(n,m)
data=[]
tt = time.clock()
eigs,vecs, resnh = lobpcg(A,X,B, tol=1e-6, maxiter=500, retResidualNormsHistory=1)
data.append(time.clock()-tt)
print 'Results by LOBPCG for n='+str(n)
print
print eigs
print
print 'Exact eigenvalues'
print
print w_ex[:m]
print
print 'Elapsed time',data[0]
loglog(arange(1,n+1),w_ex,'b.')
xlabel(r'Number $i$')
ylabel(r'$\lambda_i$')
title('Eigenvalue distribution')
show()
Exemplo n.º 50
0
def spectral_embedding(adjacency, n_components=8, mode=None,
                       random_state=None):
    """Project the sample on the first eigen vectors of the graph Laplacian

    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigen vectors associated to the
    smallest eigen values) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.

    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).

    However care must taken to always make the affinity matrix symmetric
    so that the eigen vector decomposition works as expected.

    Parameters
    -----------
    adjacency: array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.

    n_components: integer, optional
        The dimension of the projection subspace.

    mode: {None, 'arpack' or 'amg'}
        The eigenvalue decomposition strategy to use. AMG (Algebraic
        MultiGrid) is much faster, but requires pyamg to be
        installed.

    random_state: int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when mode == 'amg'.

    Returns
    --------
    embedding: array, shape: (n_samples, n_components)
        The reduced samples

    Notes
    ------
    The graph should contain only one connected component, elsewhere the
    results make little sense.
    """

    from scipy import sparse
    from ..utils.fixes import arpack_eigsh
    from scipy.sparse.linalg import lobpcg
    try:
        from pyamg import smoothed_aggregation_solver
        amg_loaded = True
    except ImportError:
        amg_loaded = False

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # XXX: Should we check that the matrices given is symmetric
    if not amg_loaded:
        warnings.warn('pyamg not available, using scipy.sparse')
    if mode is None:
        mode = ('amg' if amg_loaded else 'arpack')
    laplacian, dd = graph_laplacian(adjacency,
                                    normed=True, return_diag=True)
    if (mode == 'arpack'
        or not sparse.isspmatrix(laplacian)
        or n_nodes < 5 * n_components):
        # lobpcg used with mode='amg' has bugs for low number of nodes

        # We need to put the diagonal at zero
        if not sparse.isspmatrix(laplacian):
            laplacian[::n_nodes + 1] = 0
        else:
            laplacian = laplacian.tocoo()
            diag_idx = (laplacian.row == laplacian.col)
            laplacian.data[diag_idx] = 0
            # If the matrix has a small number of diagonals (as in the
            # case of structured matrices comming from images), the
            # dia format might be best suited for matvec products:
            n_diags = np.unique(laplacian.row - laplacian.col).size
            if n_diags <= 7:
                # 3 or less outer diagonals on each side
                laplacian = laplacian.todia()
            else:
                # csr has the fastest matvec and is thus best suited to
                # arpack
                laplacian = laplacian.tocsr()
        lambdas, diffusion_map = arpack_eigsh(-laplacian, k=n_components,
                                              which='LA')
        embedding = diffusion_map.T[::-1] * dd
    elif mode == 'amg':
        # Use AMG to get a preconditionner and speed up the eigenvalue
        # problem.
        laplacian = laplacian.astype(np.float)  # lobpcg needs native floats
        ml = smoothed_aggregation_solver(laplacian.tocsr())
        X = random_state.rand(laplacian.shape[0], n_components)
        X[:, 0] = 1. / dd.ravel()
        M = ml.aspreconditioner()
        lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1:
            raise ValueError
    else:
        raise ValueError("Unknown value for mode: '%s'."
                         "Should be 'amg' or 'arpack'" % mode)
    return embedding
Exemplo n.º 51
0
def eigen_decomposition(G,
                        n_components=8,
                        eigen_solver='auto',
                        random_state=None,
                        drop_first=True,
                        largest=True,
                        solver_kwds=None):
    """
    Function to compute the eigendecomposition of a square matrix.

    Parameters
    ----------
    G : array_like or sparse matrix
        The square matrix for which to compute the eigen-decomposition.
    n_components : integer, optional
        The number of eigenvectors to return
    eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
        'auto' :
            attempt to choose the best method for input data (default)
        'dense' :
            use standard dense matrix operations for the eigenvalue decomposition.
            For this method, M must be an array or matrix type.
            This method should be avoided for large problems.
        'arpack' :
            use arnoldi iteration in shift-invert mode. For this method,
            M may be a dense matrix, sparse matrix, or general linear operator.
            Warning: ARPACK can be unstable for some problems.  It is best to
            try several random seeds in order to check results.
        'lobpcg' :
            Locally Optimal Block Preconditioned Conjugate Gradient Method.
            A preconditioned eigensolver for large symmetric positive definite
            (SPD) generalized eigenproblems.
        'amg' :
            Algebraic Multigrid solver (requires ``pyamg`` to be installed)
            It can be faster on very large, sparse problems, but may also lead
            to instabilities.
    random_state : int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
        By default, arpack is used.
    solver_kwds : any additional keyword arguments to pass to the selected eigen_solver

    Returns
    -------
    lambdas, diffusion_map : eigenvalues, eigenvectors
    """
    n_nodes = G.shape[0]
    if drop_first:
        n_components = n_components + 1

    eigen_solver, solver_kwds = check_eigen_solver(eigen_solver,
                                                   solver_kwds,
                                                   size=n_nodes,
                                                   nvec=n_components)
    random_state = check_random_state(random_state)

    # Convert G to best type for eigendecomposition
    if sparse.issparse(G):
        if G.getformat() is not 'csr':
            G.tocsr()
    G = G.astype(np.float)
    # Check for symmetry
    is_symmetric = _is_symmetric(G)

    # Try Eigen Methods:
    if eigen_solver == 'arpack':
        # This matches the internal initial state used by ARPACK
        v0 = random_state.uniform(-1, 1, G.shape[0])
        if is_symmetric:
            if largest:
                which = 'LM'
            else:
                which = 'SM'
            lambdas, diffusion_map = eigsh(G,
                                           k=n_components,
                                           which=which,
                                           v0=v0,
                                           **(solver_kwds or {}))
        else:
            if largest:
                which = 'LR'
            else:
                which = 'SR'
            lambdas, diffusion_map = eigs(G,
                                          k=n_components,
                                          which=which,
                                          **(solver_kwds or {}))
        lambdas = np.real(lambdas)
        diffusion_map = np.real(diffusion_map)
    elif eigen_solver == 'amg':
        # separate amg & lobpcg keywords:
        if solver_kwds is not None:
            amg_kwds = {}
            lobpcg_kwds = solver_kwds.copy()
            for kwd in AMG_KWDS:
                if kwd in solver_kwds.keys():
                    amg_kwds[kwd] = solver_kwds[kwd]
                    del lobpcg_kwds[kwd]
        else:
            amg_kwds = None
            lobpcg_kwds = None
        if not is_symmetric:
            raise ValueError("lobpcg requires symmetric matrices.")
        if not sparse.issparse(G):
            warnings.warn("AMG works better for sparse matrices")
        # Use AMG to get a preconditioner and speed up the eigenvalue problem.
        ml = smoothed_aggregation_solver(check_array(G, accept_sparse=['csr']),
                                         **(amg_kwds or {}))
        M = ml.aspreconditioner()
        n_find = min(n_nodes, 5 + 2 * n_components)
        X = random_state.rand(n_nodes, n_find)
        X[:, 0] = (G.diagonal()).ravel()
        lambdas, diffusion_map = lobpcg(G,
                                        X,
                                        M=M,
                                        largest=largest,
                                        **(lobpcg_kwds or {}))
        sort_order = np.argsort(lambdas)
        if largest:
            lambdas = lambdas[sort_order[::-1]]
            diffusion_map = diffusion_map[:, sort_order[::-1]]
        else:
            lambdas = lambdas[sort_order]
            diffusion_map = diffusion_map[:, sort_order]
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == "lobpcg":
        if not is_symmetric:
            raise ValueError("lobpcg requires symmetric matrices.")
        n_find = min(n_nodes, 5 + 2 * n_components)
        X = random_state.rand(n_nodes, n_find)
        lambdas, diffusion_map = lobpcg(G,
                                        X,
                                        largest=largest,
                                        **(solver_kwds or {}))
        sort_order = np.argsort(lambdas)
        if largest:
            lambdas = lambdas[sort_order[::-1]]
            diffusion_map = diffusion_map[:, sort_order[::-1]]
        else:
            lambdas = lambdas[sort_order]
            diffusion_map = diffusion_map[:, sort_order]
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == 'dense':
        if sparse.isspmatrix(G):
            G = G.todense()
        if is_symmetric:
            lambdas, diffusion_map = eigh(G, **(solver_kwds or {}))
        else:
            lambdas, diffusion_map = eig(G, **(solver_kwds or {}))
            sort_index = np.argsort(lambdas)
            lambdas = lambdas[sort_index]
            diffusion_map[:, sort_index]
        if largest:  # eigh always returns eigenvalues in ascending order
            lambdas = lambdas[::-1]  # reverse order the e-values
            diffusion_map = diffusion_map[:, ::-1]  # reverse order the vectors
        lambdas = lambdas[:n_components]
        diffusion_map = diffusion_map[:, :n_components]
    elif eigen_solver == 'slepc':
        if is_symmetric:
            lambdas, diffusion_map = slepc.get_eigenpairs(G,
                                                          npairs=n_components,
                                                          largest=largest)
    return (lambdas, diffusion_map)
N = 100
K = 9
A = poisson((N,N), format='csr')

# create the AMG hierarchy
ml = smoothed_aggregation_solver(A)

# initial approximation to the K eigenvectors
X = scipy.rand(A.shape[0], K) 

# preconditioner based on ml
M = ml.aspreconditioner()

# compute eigenvalues and eigenvectors with LOBPCG
W,V = lobpcg(A, X, M=M, tol=1e-8, largest=False)


#plot the eigenvectors
import pylab

pylab.figure(figsize=(9,9))

for i in range(K):
    pylab.subplot(3, 3, i+1)
    pylab.title('Eigenvector %d' % i)
    pylab.pcolor(V[:,i].reshape(N,N))
    pylab.axis('equal')
    pylab.axis('off')
pylab.show()    
def spectral_embedding(adjacency,
                       n_components=8,
                       eigen_solver=None,
                       random_state=None,
                       eigen_tol=0.0,
                       norm_laplacian=True,
                       drop_first=True):
    """Project the sample on the first eigenvectors of the graph Laplacian.

    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigenvectors associated to the
    smallest eigenvalues) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.

    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).

    However care must taken to always make the affinity matrix symmetric
    so that the eigenvector decomposition works as expected.

    Note : Laplacian Eigenmaps is the actual algorithm implemented here.

    Read more in the :ref:`User Guide <spectral_embedding>`.

    Parameters
    ----------
    adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.

    n_components : integer, optional, default 8
        The dimension of the projection subspace.

    eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities.

    random_state : int, RandomState instance or None, optional, default: None
        A pseudo random number generator used for the initialization of the
        lobpcg eigenvectors decomposition.  If int, random_state is the seed
        used by the random number generator; If RandomState instance,
        random_state is the random number generator; If None, the random number
        generator is the RandomState instance used by `np.random`. Used when
        ``solver`` == 'amg'.

    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.

    norm_laplacian : bool, optional, default=True
        If True, then compute normalized Laplacian.

    drop_first : bool, optional, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.

    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.

    Notes
    -----
    Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
    has one connected component. If there graph has many components, the first
    few eigenvectors will simply uncover the connected components of the graph.

    References
    ----------
    * https://en.wikipedia.org/wiki/LOBPCG

    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      https://doi.org/10.1137%2FS1064827500366124
    """
    adjacency = check_symmetric(adjacency)

    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
                             "not available.")

    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'" %
                         eigen_solver)

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1

    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")

    laplacian, dd = csgraph_laplacian(adjacency,
                                      normed=norm_laplacian,
                                      return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
        (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
        # for details see the source code in scipy:
        # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
        # /lobpcg/lobpcg.py#L237
        # or matlab:
        # https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        try:
            # We are computing the opposite of the laplacian inplace so as
            # to spare a memory allocation of a possibly very large array
            laplacian *= -1
            v0 = random_state.uniform(-1, 1, laplacian.shape[0])
            lambdas, diffusion_map = eigsh(laplacian,
                                           k=n_components,
                                           sigma=1.0,
                                           which='LM',
                                           tol=eigen_tol,
                                           v0=v0)
            embedding = diffusion_map.T[n_components::-1]
            if norm_laplacian:
                embedding = embedding / dd
        except RuntimeError:
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"
            # Revert the laplacian to its opposite to have lobpcg work
            laplacian *= -1

    if eigen_solver == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        if not sparse.issparse(laplacian):
            warnings.warn("AMG works better for sparse matrices")
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)
        ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        lambdas, diffusion_map = lobpcg(laplacian,
                                        X,
                                        M=M,
                                        tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T
        if norm_laplacian:
            embedding = embedding / dd
        if embedding.shape[0] == 1:
            raise ValueError

    elif eigen_solver == "lobpcg":
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lobpcg has problems with small
            # number of nodes
            # lobpcg will fallback to eigh, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.toarray()
            lambdas, diffusion_map = eigh(laplacian)
            embedding = diffusion_map.T[:n_components]
            if norm_laplacian:
                embedding = embedding / dd
        else:
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian,
                                            X,
                                            tol=1e-15,
                                            largest=False,
                                            maxiter=2000)
            embedding = diffusion_map.T[:n_components]
            if norm_laplacian:
                embedding = embedding / dd
            if embedding.shape[0] == 1:
                raise ValueError

    embedding = _deterministic_vector_sign_flip(embedding)
    if drop_first:
        return embedding[1:n_components].T
    else:
        return embedding[:n_components].T
Exemplo n.º 54
0
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
              random_state=None):
    """
    Find the null space of a matrix M.

    Parameters
    ----------
    M : array, matrix, sparse matrix, or LinearOperator
        Input covariance matrix: should be symmetric positive semi-definite

    k : number of eigenvalues/vectors to return

    k_skip : number of low eigenvalues to skip.

    eigen_solver : string ['arpack' | 'lobpcg' | 'dense']
        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
        lobpcg : use locally optimized block-preconditioned conjugate gradient.
                    For this method, M may be a dense or sparse matrix.
                    A dense matrix M will be converted internally to a
                    csr sparse format.
        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.

    tol : tolerance for 'arpack' or 'lobpcg' methods.
            not used if eigen_solver=='dense'

    max_iter : maximum number of iterations for 'arpack' or 'lobpcg' methods
            not used if eigen_solver=='dense'
    """
    random_state = check_random_state(random_state)

    if eigen_solver == 'arpack':
        eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
                                            tol=tol, maxiter=max_iter)
        return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
    elif eigen_solver == 'lobpcg':
        # initial vectors for iteration
        X = np.random.rand(M.shape[0], k + k_skip)
        try:
            ml = pyamg.smoothed_aggregation_solver(M, symmetry='symmetric')
        except TypeError:
            ml = pyamg.smoothed_aggregation_solver(M, mat_flag='symmetric')
        prec = ml.aspreconditioner()

        # compute eigenvalues and eigenvectors with LOBPCG
        eigen_values, eigen_vectors = linalg.lobpcg(
            M, X, M=prec, largest=False, tol=tol, maxiter=max_iter)

        index = np.argsort(eigen_values)
        return (eigen_vectors[:, index[k_skip:]],
                np.sum(eigen_values[index[k_skip:]]))
    elif eigen_solver == 'dense':
        M = np.asarray(M)
        eigen_values, eigen_vectors = eigh(
            M, eigvals=(k_skip, k + k_skip), overwrite_a=True)
        index = np.argsort(np.abs(eigen_values))
        return eigen_vectors[:, index], np.sum(eigen_values)
    else:
        raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
Exemplo n.º 55
0
def spectral_embedding(adjacency, k=8, mode=None):
    """ Spectral embedding: project the sample on the k first
        eigen vectors of the graph laplacian. 

        Parameters
        -----------
        adjacency: array-like or sparse matrix, shape: (p, p)
            The adjacency matrix of the graph to embed.
        k: integer, optional
            The dimension of the projection subspace.
        mode: {None, 'arpack' or 'amg'}
            The eigenvalue decomposition strategy to use. AMG (Algebraic
            MultiGrid) is much faster, but requires pyamg to be
            installed.

        Returns
        --------
        embedding: array, shape: (p, k)
            The reduced samples

        Notes
        ------
        The graph should contain only one connect component,
        elsewhere the results make little sens.
    """

    from scipy import sparse
    from scipy.sparse.linalg.eigen.arpack import eigen_symmetric
    from scipy.sparse.linalg import lobpcg
    try:
        from pyamg import smoothed_aggregation_solver
        amg_loaded = True
    except ImportError:
        amg_loaded = False 

    n_nodes = adjacency.shape[0]
    # XXX: Should we check that the matrices given is symmetric
    if not amg_loaded:
        warnings.warn('pyamg not available, using scipy.sparse')
    if mode is None:
        mode = ('amg' if amg_loaded else 'arpack')
    laplacian, dd = graph_laplacian(adjacency,
                                    normed=True, return_diag=True)
    if (mode == 'arpack' 
        or not sparse.isspmatrix(laplacian)
        or n_nodes < 5*k # This is the threshold under which lobpcg has bugs 
       ):
        # We need to put the diagonal at zero
        if not sparse.isspmatrix(laplacian):
            laplacian[::n_nodes+1] = 0
        else:
            laplacian = laplacian.tocoo()
            diag_idx = (laplacian.row == laplacian.col)
            laplacian.data[diag_idx] = 0
            # If the matrix has a small number of diagonals (as in the
            # case of structured matrices comming from images), the
            # dia format might be best suited for matvec products:
            n_diags = np.unique(laplacian.row - laplacian.col).size
            if n_diags <= 7:
                # 3 or less outer diagonals on each side
                laplacian = laplacian.todia()
            else:
                # csr has the fastest matvec and is thus best suited to
                # arpack
                laplacian = laplacian.tocsr()
        lambdas, diffusion_map = eigen_symmetric(-laplacian, k=k, which='LA')
        embedding = diffusion_map.T[::-1]*dd
    elif mode == 'amg':
        # Use AMG to get a preconditionner and speed up the eigenvalue
        # problem.
        laplacian = laplacian.astype(np.float) # lobpcg needs the native float
        ml = smoothed_aggregation_solver(laplacian.tocsr())
        X = np.random.rand(laplacian.shape[0], k)
        X[:, 0] = 1. / dd.ravel()
        M = ml.aspreconditioner()
        lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, 
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1: raise ValueError
    else:
        raise ValueError("Unknown value for mode: '%s'." % mode)
    return embedding
Exemplo n.º 56
0
def fem_laplacian(points, faces, spectrum_size=10, normalization=None):
    """
    Compute linear finite-element method Laplace-Beltrami spectrum
    after Martin Reuter's MATLAB code.

    Note ::

        Compare fem_laplacian() with Martin Reuter's Matlab eigenvalues:

        fem_laplacian() results for Twins-2-1 left hemisphere (6 values):
        [4.829758648026221e-18,
         0.0001284173002467199,
         0.0002715181572272745,
         0.0003205150847159417,
         0.0004701628070486448,
         0.0005768904023010318]

        Martin Reuter's shapeDNA-tria Matlab code:
        {-4.7207711983791511358e-18 ;
         0.00012841730024672144738 ;
         0.00027151815722727096853 ;
         0.00032051508471592313632 ;
         0.0004701628070486902353  ;
         0.00057689040230097490998 }

        fem_laplacian() results for Twins-2-1 left postcentral (1022):
        [6.3469513010430304e-18,
         0.0005178862383467463,
         0.0017434911095630772,
         0.003667561767487686,
         0.005429017880363784,
         0.006309346984678924]

        Martin Reuter's Matlab code:
        {-2.1954862991027e-18 ;
         0.0005178862383468 ;
         0.0017434911095628 ;
         0.0036675617674875 ;
         0.0054290178803611 ;
         0.006309346984678 }

    Julien Lefevre, regarding comparison with Spongy results:
    "I have done some comparisons between my Matlab codes and yours
    on python and everything sounds perfect:
    The evaluation has been done only for one mesh (about 10000 vertices)."
    - L2 error between your A and B matrices and mine are about 1e-16.
    - I have also compared eigenvalues of the generalized problem;
      even if the error is slightly increasing, it remains on the order
      of machine precision.
    - Another good point: computation time for 1000 eigenvalues was 67s
      with python versus 63s in matlab.
      And it is quite the same behavior for lower orders.
    - Since the eigenvalues are increasing with order,
      it is also interesting to look at the relative error...
      high frequencies are not so much perturbed."

    Parameters
    ----------
    points : list of lists of 3 floats
        x,y,z coordinates for each vertex of the structure
    faces : list of lists of 3 integers
        3 indices to vertices that form a triangle on the mesh
    spectrum_size : integer
        number of eigenvalues to be computed (the length of the spectrum)
    normalization : string
        the method used to normalize eigenvalues ('area' or None)
        if "area", use area of the 2D structure as in Reuter et al. 2006

    Returns
    -------
    spectrum : list
        first spectrum_size eigenvalues for Laplace-Beltrami spectrum

    Examples
    --------
    >>> from mindboggle.shapes.laplace_beltrami import fem_laplacian
    >>> # Define a cube:
    >>> points = [[0,0,0], [0,1,0], [1,1,0], [1,0,0],
    >>>           [0,0,1], [0,1,1], [1,1,1], [1,0,1]]
    >>> faces = [[0,1,2], [2,3,0], [4,5,6], [6,7,4], [0,4,7], [7,3,0],
    >>>          [0,4,5], [5,1,0], [1,5,6], [6,2,1], [3,7,6], [6,2,3]]
    >>> fem_laplacian(points, faces, spectrum_size=3, normalization=None)
    [7.401486830834377e-17, 4.58359213500127, 4.799999999999998]
    >>> fem_laplacian(points, faces, spectrum_size=3, normalization="area")
    [1.2335811384723967e-17, 0.76393202250021175, 0.79999999999999949]
    >>> # Spectrum for entire left hemisphere of Twins-2-1:
    >>> import os
    >>> from mindboggle.mio.vtks import read_faces_points
    >>> from mindboggle.shapes.laplace_beltrami import fem_laplacian
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> vtk_file = os.path.join(path, 'arno', 'labels',
    >>>                         'lh.labels.DKT25.manual.vtk')
    >>> faces, points, npoints = read_faces_points(vtk_file)
    >>> fem_laplacian(points, faces, spectrum_size=6, normalization=None)
    [4.829758648026222e-18,
     0.0001284173002467197,
     0.000271518157227274,
     0.00032051508471594065,
     0.0004701628070486444,
     0.0005768904023010318]
    >>> # Spectrum for Twins-2-1 left postcentral pial surface (22):
    >>> import os
    >>> from mindboggle.mio.vtks import read_vtk
    >>> from mindboggle.guts.mesh import remove_faces, reindex_faces_points
    >>> from mindboggle.shapes.laplace_beltrami import fem_laplacian
    >>> path = os.environ['MINDBOGGLE_DATA']
    >>> label_file = os.path.join(path, 'arno', 'labels', 'lh.labels.DKT31.manual.vtk')
    >>> faces, u1,u2, points, u3, labels, u4,u5 = read_vtk(label_file)
    >>> I22 = [i for i,x in enumerate(labels) if x==22] # postcentral
    >>> faces = remove_faces(faces, I22)
    >>> faces, points, o1 = reindex_faces_points(faces, points)
    >>> #from mindboggle.mio.vtks import read_faces_points
    >>> #label_file = os.path.join(path, 'arno', 'labels', 'label22.vtk')
    >>> #faces, points, npoints = read_faces_points(label_file)
    >>> fem_laplacian(points, faces, spectrum_size=6, normalization=None)
    [6.3469513010430304e-18,
     0.0005178862383467462,
     0.0017434911095630795,
     0.0036675617674876856,
     0.005429017880363785,
     0.006309346984678933]
    >>> # Area-normalized spectrum for a single label (postcentral):
    >>> fem_laplacian(points, faces, spectrum_size=6, normalization="area")
    [1.1410192787181146e-21,
     9.3102680973672063e-08,
     3.1343504525679647e-07,
     6.5933366810380741e-07,
     9.7599836081654446e-07,
     1.1342589857996233e-06]

    """
    from scipy.sparse.linalg import eigsh, lobpcg
    import numpy as np

    from mindboggle.shapes.laplace_beltrami import computeAB

    #-----------------------------------------------------------------
    # Compute A and B matrices (from Reuter et al., 2009):
    #-----------------------------------------------------------------
    A, B = computeAB(points, faces)
    if A.shape[0] <= spectrum_size:
        print("The 3D shape has too few vertices ({0} <= {1}). Skip.".
              format(A.shape[0], spectrum_size))
        return None

    #-----------------------------------------------------------------
    # Use the eigsh eigensolver:
    #-----------------------------------------------------------------
    try :

        # eigs is for nonsymmetric matrices while
        # eigsh is for real-symmetric or complex-Hermitian matrices:
        eigenvalues, eigenvectors = eigsh(A, k=spectrum_size, M=B,
                                          sigma=0)
        spectrum = eigenvalues.tolist()

    #-----------------------------------------------------------------
    # Use the lobpcg eigensolver:
    #-----------------------------------------------------------------
    except RuntimeError:     
           
        print("eigsh() failed. Now try lobpcg.")
        print("Warning: lobpcg can produce different results from "
              "Reuter (2006) shapeDNA-tria software.")
        # Initial eigenvector values:
        init_eigenvecs = np.random.random((A.shape[0], spectrum_size))

        # maxiter = 40 forces lobpcg to use 20 iterations.
        # Strangely, largest=false finds largest eigenvalues
        # and largest=True gives the smallest eigenvalues:
        eigenvalues, eigenvectors =  lobpcg(A, init_eigenvecs, B=B,
                                            largest=True, maxiter=40)
        # Extract the real parts:
        spectrum = [value.real for value in eigenvalues]

        # For some reason, the eigenvalues from lobpcg are not sorted:
        spectrum.sort()

    #-----------------------------------------------------------------
    # Normalize by area:
    #-----------------------------------------------------------------
    if normalization == "area":
        spectrum = area_normalize(points, faces, spectrum)
        print("Compute area-normalized linear FEM Laplace-Beltrami spectrum")
    else:
        print("Compute linear FEM Laplace-Beltrami spectrum")

    return spectrum
Exemplo n.º 57
0
def MyDataset(dataset, Lev, s, n, FrameType='Haar', add_feature=False, QM7=False):
    if FrameType == 'Haar':
        D1 = lambda x: np.cos(x / 2)
        D2 = lambda x: np.sin(x / 2)
        DFilters = [D1, D2]
    elif FrameType == 'Linear':
        D1 = lambda x: np.square(np.cos(x / 2))
        D2 = lambda x: np.sin(x) / np.sqrt(2)
        D3 = lambda x: np.square(np.sin(x / 2))
        DFilters = [D1, D2, D3]
    elif FrameType == 'Quadratic':  # not accurate so far
        D1 = lambda x: np.cos(x / 2) ** 3
        D2 = lambda x: np.multiply((np.sqrt(3) * np.sin(x / 2)), np.cos(x / 2) ** 2)
        D3 = lambda x: np.multiply((np.sqrt(3) * np.sin(x / 2) ** 2), np.cos(x / 2))
        D4 = lambda x: np.sin(x / 2) ** 3
        DFilters = [D1, D2, D3, D4]
    else:
        raise Exception('Invalid FrameType')
    r = len(DFilters)

    dataset1 = list()
    label=list()
    for i in range(len(dataset)):
        if add_feature:
            raise Exception('this function has not been completed')  # will add this function as required
        else:
            if QM7:
                x_qm7 = torch.ones(dataset[i].num_nodes, num_features)
                data1 = Data(x=x_qm7, edge_index=dataset[i].edge_index, y=dataset[i].y)
                data1.y_origin = dataset[i].y_origin
            else:
                data1 = Data(x=dataset[i].x, edge_index=dataset[i].edge_index, y=dataset[i].y)
                
        if QM7:
            label.append(dataset[i].y_origin)
        # get graph Laplacian
        num_nodes = data1.x.shape[0]
        L = get_laplacian(dataset[i].edge_index, num_nodes=num_nodes, normalization='sym')
        L = sparse.coo_matrix((L[1].numpy(), (L[0][0, :].numpy(), L[0][1, :].numpy())), shape=(num_nodes, num_nodes))
        # calculate lambda max
        lobpcg_init = np.random.rand(num_nodes, 1)
        lambda_max, _ = lobpcg(L, lobpcg_init)
        lambda_max = lambda_max[0]
        J = np.log(lambda_max / np.pi) / np.log(s) + Lev - 1  # dilation level to start the decomposition
        # get matrix operators
        d = get_operator(L, DFilters, n, s, J, Lev)
        for m in range(1, r):
            for q in range(Lev):
                if (m == 1) and (q == 0):
                    d_aggre = d[m, q]
                else:
                    d_aggre = sparse.vstack((d_aggre, d[m, q]))
        d_aggre = sparse.vstack((d[0, Lev - 1], d_aggre)) ###stack the n x n matrix
        data1.d = [d_aggre]
        # get d_index
        a = [i for i in range((r - 1) * Lev + 1)]##len=3
        data1.d_index=[[a[i // num_nodes] for i in range(len(a) * num_nodes)]]##3*num [0,1,2;,0,1,2...]
        # append data1 into dataset1
        dataset1.append(data1)    
    if QM7:
        mean = torch.mean(torch.Tensor(label)).item()
        std = torch.sqrt(torch.var(torch.Tensor(label))).item()
        return dataset1, r, mean, std    
    else:
        return dataset1, r
Exemplo n.º 58
0
def spectral_embedding(adjacency, n_components=8, mode=None,
                       random_state=None, eig_tol=0.0):
    """Project the sample on the first eigen vectors of the graph Laplacian

    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigen vectors associated to the
    smallest eigen values) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.

    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).

    However care must taken to always make the affinity matrix symmetric
    so that the eigen vector decomposition works as expected.

    Parameters
    ----------
    adjacency: array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.

    n_components: integer, optional
        The dimension of the projection subspace.

    mode: {None, 'arpack', 'lobpcg', or 'amg'}
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities

    random_state: int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when mode == 'amg'. By default
        arpack is used.

    eig_tol : float, optional, default: 0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack mode.

    Returns
    -------
    embedding: array, shape: (n_samples, n_components)
        The reduced samples

    Notes
    -----
    The graph should contain only one connected component, elsewhere the
    results make little sense.

    References
    ----------
    [1] http://en.wikipedia.org/wiki/LOBPCG
    [2] LOBPCG: http://dx.doi.org/10.1137%2FS1064827500366124
    """

    from scipy import sparse
    from ..utils.arpack import eigsh
    from scipy.sparse.linalg import lobpcg
    from scipy.sparse.linalg.eigen.lobpcg.lobpcg import symeig
    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if mode == "amg":
            raise ValueError("The mode was set to 'amg', but pyamg is "
                             "not available.")

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # XXX: Should we check that the matrices given is symmetric
    if mode is None:
        mode = 'arpack'
    elif not mode in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for mode: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'" % mode)
    laplacian, dd = graph_laplacian(adjacency,
                                    normed=True, return_diag=True)
    if (mode == 'arpack'
        or mode != 'lobpcg' and
            (not sparse.isspmatrix(laplacian)
             or n_nodes < 5 * n_components)):
        # lobpcg used with mode='amg' has bugs for low number of nodes
        # for details see the source code in scipy:
        # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py#L237
        # or matlab:
        # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
        laplacian = _set_diag(laplacian, 0)

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        try:
            lambdas, diffusion_map = eigsh(-laplacian, k=n_components,
                                        sigma=1.0, which='LM',
                                        tol=eig_tol)
            embedding = diffusion_map.T[::-1] * dd
        except RuntimeError:
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            mode = "lobpcg"

    if mode == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        laplacian = laplacian.astype(np.float)  # lobpcg needs native floats
        ml = smoothed_aggregation_solver(laplacian.tocsr())
        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components)
        X[:, 0] = dd.ravel()
        lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1:
            raise ValueError
    elif mode == "lobpcg":
        laplacian = laplacian.astype(np.float)  # lobpcg needs native floats
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lopbcg has problems with small
            # number of nodes
            # lobpcg will fallback to symeig, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.todense()
            lambdas, diffusion_map = symeig(laplacian)
            embedding = diffusion_map.T[:n_components] * dd
        else:
            # lobpcg needs native floats
            laplacian = laplacian.astype(np.float)
            laplacian = _set_diag(laplacian, 1)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
                                            largest=False, maxiter=2000)
            embedding = diffusion_map.T[:n_components] * dd
            if embedding.shape[0] == 1:
                raise ValueError
    return embedding