Example #1
0
def signed_spectral_embedding(affinity,
                              random_state=None,
                              n_clusters=2,
                              eigen_tol=0.0):
    """
    affinity: Matriz de pesos del grafo.    
    random_state: fija la semilla para check random state.
    n_cluster:cantidad de clusters.
    tol:tolerancia para eigsh.
    """
    random_state = check_random_state(random_state)

    laplacian, dd = signed_laplacian(affinity)
    laplacian *= -1
    v0 = random_state.uniform(-1, 1, laplacian.shape[0])
    lambdas, diffusion_map = eigsh(laplacian,
                                   k=n_clusters,
                                   sigma=1.0,
                                   which='LM',
                                   tol=eigen_tol,
                                   v0=v0)
    embedding = diffusion_map.T[n_clusters::-1] * dd

    # modifica el signo de los vectores para reproducibilidad.
    embedding = _deterministic_vector_sign_flip(embedding)
    return embedding[:n_clusters].T
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
                       random_state=None, eigen_tol=0.0,
                       norm_laplacian=True, drop_first=True):
    adjacency = check_symmetric(adjacency)

    # eigen_solver = 'arpack'
    # eigen_solver = 'amg'
    norm_laplacian=False
    random_state = check_random_state(random_state)
    n_nodes = adjacency.shape[0]
    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")
    laplacian, dd = csgraph_laplacian(adjacency, normed=norm_laplacian,
                                      return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
       (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        print("[INFILE] eigen_solver : ", eigen_solver, "norm_laplacian:", norm_laplacian)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        try:
            laplacian *= -1
            v0 = random_state.uniform(-1, 1, laplacian.shape[0])
            lambdas, diffusion_map = eigsh(laplacian, k=n_components,
                                           sigma=1.0, which='LM',
                                           tol=eigen_tol, v0=v0)
            embedding = diffusion_map.T[n_components::-1]
            if norm_laplacian:
                embedding = embedding / dd
        except RuntimeError:
            eigen_solver = "lobpcg"
            laplacian *= -1

    embedding = _deterministic_vector_sign_flip(embedding)
    return embedding[:n_components].T
Example #3
0
def spectralcluster(A,
                    n_cluster,
                    n_neighbors=6,
                    random_state=None,
                    eigen_tol=0.0):
    #maps = spectral_embedding(affinity, n_components=n_components,eigen_solver=eigen_solver,random_state=random_state,eigen_tol=eigen_tol, drop_first=False)

    # dd is diag
    laplacian, dd = graph_laplacian(A, normed=True, return_diag=True)
    # set the diagonal of the laplacian matrix and convert it to a sparse format well suited for e    # igenvalue decomposition
    laplacian = _set_diag(laplacian, 1)

    # diffusion_map is eigenvectors
    # LM largest eigenvalues
    laplacian *= -1
    eigenvalues, eigenvectors = eigsh(laplacian,
                                      k=n_cluster,
                                      sigma=1.0,
                                      which='LM',
                                      tol=eigen_tol)
    y = eigenvectors.T[n_cluster::-1] * dd
    y = _deterministic_vector_sign_flip(y)[:n_cluster].T

    random_state = check_random_state(random_state)
    centroids, labels, _ = k_means(y, n_cluster, random_state=random_state)

    return eigenvalues, y, centroids, labels
def test_vector_sign_flip():
    # Testing that sign flip is working & largest value has positive sign
    data = np.random.RandomState(36).randn(5, 5)
    max_abs_rows = np.argmax(np.abs(data), axis=1)
    data_flipped = _deterministic_vector_sign_flip(data)
    max_rows = np.argmax(data_flipped, axis=1)
    assert_array_equal(max_abs_rows, max_rows)
    signs = np.sign(data[range(data.shape[0]), max_abs_rows])
    assert_array_equal(data, data_flipped * signs[:, np.newaxis])
Example #5
0
def test_vector_sign_flip():
    # Testing that sign flip is working & largest value has positive sign
    data = np.random.RandomState(36).randn(5, 5)
    max_abs_rows = np.argmax(np.abs(data), axis=1)
    data_flipped = _deterministic_vector_sign_flip(data)
    max_rows = np.argmax(data_flipped, axis=1)
    assert_array_equal(max_abs_rows, max_rows)
    signs = np.sign(data[range(data.shape[0]), max_abs_rows])
    assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def get_laplacian_eig(adjacency, dims, normed=True, random_state=None):
    random_state = check_random_state(random_state)
    laplacian, dd = sparse.csgraph.laplacian(adjacency, normed=normed, return_diag=True)
    laplacian = _set_diag(laplacian, 1, True)
    laplacian *= -1
    v0 = random_state.uniform(-1, 1, laplacian.shape[0])
    lambdas, diffusion_map = eigsh(laplacian, k=dims, sigma=1.0, which='LM', tol=0.0, v0=v0)
    
    embedding = diffusion_map.T[dims::-1] * dd
    embedding = _deterministic_vector_sign_flip(embedding)
    
    return lambdas, embedding[:dims].T
    
Example #7
0
def dme(network, threshold=90, n_components=10, return_result=False, **kwargs):
    """
    Threshold, cosine similarity, and diffusion map embed `network`

    Parameters
    ----------
    network : (N, N) array_like
        Symmetric network on which to perform diffusion map embedding
    threshold : [0, 100] float, optional
        Threshold used to "sparsify" `network` prior to embedding. Default: 90
    n_components : int, optional
        Number of components to retain from embedding of `network`. Default: 10
    return_result : bool, optional
        Whether to return result dictionary including eigenvalues, original
        eigenvectors, etc. from embedding. Default: False
    kwargs : key-value pairs, optional
        Passed directly to :func:`mapalign.embed.compute_diffusion_map`

    Returns
    -------
    embedding : (N, C) numpy.ndarray
        Embedding of `N` samples in `C`-dimensional spaces
    res : dict
        Only if `return_result=True`
    """

    from mapalign import embed
    from sklearn import metrics
    from sklearn.utils.extmath import _deterministic_vector_sign_flip

    # threshold
    network = network.copy()
    threshold = np.percentile(network, threshold, axis=1, keepdims=True)
    network[network < threshold] = 0

    # cosine similarity
    network = metrics.pairwise.cosine_similarity(network)

    # embed (and ensure consistent output with regard to sign flipping)
    emb, res = embed.compute_diffusion_map(network,
                                           n_components=n_components,
                                           return_result=True,
                                           **kwargs)
    emb = _deterministic_vector_sign_flip(emb.T).T

    if return_result:
        return emb, res

    return emb
def test_spectral_embedding_unnormalized():
    # Test that spectral_embedding is also processing unnormalized laplacian correctly
    random_state = np.random.RandomState(36)
    data = random_state.randn(10, 30)
    sims = rbf_kernel(data)
    n_components = 8
    embedding_1 = spectral_embedding(sims, norm_laplacian=False, n_components=n_components, drop_first=False)

    # Verify using manual computation with dense eigh
    laplacian, dd = graph_laplacian(sims, normed=False, return_diag=True)
    _, diffusion_map = eigh(laplacian)
    embedding_2 = diffusion_map.T[:n_components] * dd
    embedding_2 = _deterministic_vector_sign_flip(embedding_2).T

    assert_array_almost_equal(embedding_1, embedding_2)
Example #9
0
def spectral_embedding_imitation(graph_laplacian_sketch,
                                 dd,
                                 n_components=8,
                                 random_state=None,
                                 norm_laplacian=True,
                                 drop_first=True):
    random_state = check_random_state(random_state)

    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1

    embedding = graph_laplacian_sketch.T[:n_components] * dd
    embedding = _deterministic_vector_sign_flip(embedding)
    if drop_first:
        return embedding[1:n_components].T
    else:
        return embedding[:n_components].T
def test_spectral_embedding_unnormalized():
    # Test that spectral_embedding is also processing unnormalized laplacian correctly
    random_state = np.random.RandomState(36)
    data = random_state.randn(10, 30)
    sims = rbf_kernel(data)
    n_components = 8
    embedding_1 = spectral_embedding(sims,
                                     norm_laplacian=False,
                                     n_components=n_components,
                                     drop_first=False)

    # Verify using manual computation with dense eigh
    laplacian, dd = graph_laplacian(sims, normed=False, return_diag=True)
    _, diffusion_map = eigh(laplacian)
    embedding_2 = diffusion_map.T[:n_components] * dd
    embedding_2 = _deterministic_vector_sign_flip(embedding_2).T

    assert_array_almost_equal(embedding_1, embedding_2)
Example #11
0
def spectralcluster(A, n_cluster, n_neighbors=6, random_state=None, eigen_tol=0.0):
    #maps = spectral_embedding(affinity, n_components=n_components,eigen_solver=eigen_solver,random_state=random_state,eigen_tol=eigen_tol, drop_first=False)

    # dd is diag
    laplacian, dd = graph_laplacian(A, normed=True, return_diag=True)
    # set the diagonal of the laplacian matrix and convert it to a sparse format well suited for e    # igenvalue decomposition
    laplacian = _set_diag(laplacian, 1)
    
    # diffusion_map is eigenvectors
    # LM largest eigenvalues
    laplacian *= -1
    eigenvalues, eigenvectors = eigsh(laplacian, k=n_cluster,
                                   sigma=1.0, which='LM',
                                   tol=eigen_tol)
    y = eigenvectors.T[n_cluster::-1] * dd
    y = _deterministic_vector_sign_flip(y)[:n_cluster].T

    random_state = check_random_state(random_state)
    centroids, labels, _ = k_means(y, n_cluster, random_state=random_state)

    return eigenvalues, y, centroids, labels
Example #12
0
    def _embed(self, affinity, shift_invert=True):
        """
        Compute the eigenspace embedding of a given affinity matrix. 

        Arguments
        ---------
        affinity    :   sparse or dense matrix
                        affinity matrix to compute the spectral embedding of
        shift_invert:   bool
                        whether or not to use the shift-invert eigenvector search
                        trick useful for finding sparse eigenvectors.
        """
        laplacian, orig_d = cg.laplacian(affinity,
                                         normed=True,
                                         return_diag=True)
        laplacian *= -1
        random_state = check_random_state(self.random_state)
        v0 = random_state.uniform(-1, 1, laplacian.shape[0])

        if not shift_invert:
            ev, spectrum = la.eigsh(laplacian,
                                    which='LA',
                                    k=self.n_clusters,
                                    v0=v0,
                                    tol=self.eigen_tol)
        else:
            ev, spectrum = la.eigsh(laplacian,
                                    which='LM',
                                    sigma=1,
                                    k=self.n_clusters,
                                    v0=v0,
                                    tol=self.eigen_tol)

        embedding = spectrum.T[self.n_clusters::-1]  #sklearn/issues/8129
        embedding = embedding / orig_d
        embedding = _deterministic_vector_sign_flip(embedding)
        return embedding
Example #13
0
        # doesn't behave well in low dimension
        X = np.random.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        try:
            _, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
                                    largest=False, maxiter=2000)
        except:
            continue
        embedding = diffusion_map.T[:n_components]
        if norm_laplacian:
            embedding = embedding / dd
        if embedding.shape[0] == 1:
            #raise ValueError
            continue

    embedding = _deterministic_vector_sign_flip(embedding)
    X=embedding[:n_components].T

    #STEP 5
    kmeans = KMeans(n_clusters=K, random_state=0).fit(X)
    #print(kmeans.labels_)
    #print(len(set(kmeans.labels_)))
    
    #FORM THE NEW ADJACENCY MATRIX AND FIND THE NUMBER OF EDITIONS
    #NewAdj=np.zeros([n,n])
    clusters=kmeans.labels_
    row=[]
    col=[]
    data=[]
    while max(clusters)>-1:
        cluster=(np.array(clusters)==max(clusters)).nonzero()[0]
Example #14
0
print 'ncols: ' + str(ncols)
print 'master_stepsize: ' + str(master_stepsize)

nmi_sgd_set = []
num_repeat_exp = 10
for repeatExp in range(num_repeat_exp):
    print 'iteration id: ' + str(repeatExp)
    X = nystromSP(train_data, 10, gamma_value, nclass)
    X_sto1, nnz_list, X_sto_list = StochasticRiemannianOpt(
        laplacian, X, ndim, master_stepsize, auto_corr, outer_iter, ncols,
        nsampleround)
    nmi_sgd = []
    for i in range(len(X_sto_list)):
        if i % 5 == 0:
            X_sto_tmp = X_sto_list[i].T * dd
            X_sto_tmp = _deterministic_vector_sign_flip(X_sto_tmp)
            cluster_id = KMeans(n_clusters=nclass,
                                n_init=50).fit(X_sto_tmp.T).labels_
            nmi = normalized_mutual_info_score(
                train_label, cluster_id)  ### measuring NMI score per iteration
            nmi_sgd.append(nmi)

    nmi_sgd_set.append(nmi_sgd)

nmi_sgd_set = np.array(nmi_sgd_set)
nrow = nmi_sgd_set.shape[0]
ncol = nmi_sgd_set.shape[1]
records_file = open('sgd_cost_nmi_2_50_60k_ada_warmstart.csv', 'w')
for i in range(ncol):
    tmpstr = ''
    for j in range(nrow):
def my_spectral_embedding(adjacency,
                          n_components=8,
                          eigen_solver=None,
                          random_state=None,
                          eigen_tol=0.0,
                          norm_laplacian=False,
                          drop_first=True):
    """Project the sample on the first eigenvectors of the graph Laplacian.
    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigenvectors associated to the
    smallest eigenvalues) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.
    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).
    However care must taken to always make the affinity matrix symmetric
    so that the eigenvector decomposition works as expected.
    Note : Laplacian Eigenmaps is the actual algorithm implemented here.
    Read more in the :ref:`User Guide <spectral_embedding>`.
    Parameters
    ----------
    adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.
    n_components : integer, optional, default 8
        The dimension of the projection subspace.
    eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities.
    random_state : int, RandomState instance or None, optional, default: None
        A pseudo random number generator used for the initialization of the
        lobpcg eigenvectors decomposition.  If int, random_state is the seed
        used by the random number generator; If RandomState instance,
        random_state is the random number generator; If None, the random number
        generator is the RandomState instance used by `np.random`. Used when
        ``solver`` == 'amg'.
    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.
    norm_laplacian : bool, optional, default=True
        If True, then compute normalized Laplacian.
    drop_first : bool, optional, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.
    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.
    Notes
    -----
    Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
    has one connected component. If there graph has many components, the first
    few eigenvectors will simply uncover the connected components of the graph.
    References
    ----------
    * https://en.wikipedia.org/wiki/LOBPCG
    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      http://dx.doi.org/10.1137%2FS1064827500366124
    """
    import warnings

    import numpy as np
    from scipy import sparse
    from scipy.linalg import eigh
    from scipy.sparse.linalg import eigsh, lobpcg

    from sklearn.base import BaseEstimator
    from sklearn.externals import six
    from sklearn.utils import check_random_state, check_array, check_symmetric
    from sklearn.utils.extmath import _deterministic_vector_sign_flip
    from sklearn.metrics.pairwise import rbf_kernel
    from sklearn.neighbors import kneighbors_graph

    adjacency = check_symmetric(adjacency)
    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
                             "not available.")
    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'" %
                         eigen_solver)
    random_state = check_random_state(random_state)
    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1
    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")
    laplacian, dd = sparse.csgraph.laplacian(adjacency,
                                             normed=norm_laplacian,
                                             return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
        (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
        # for details see the source code in scipy:
        # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
        # /lobpcg/lobpcg.py#L237
        # or matlab:
        # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        try:
            # We are computing the opposite of the laplacian inplace so as
            # to spare a memory allocation of a possibly very large array
            laplacian *= -1
            v0 = random_state.uniform(-1, 1, laplacian.shape[0])
            lambdas, diffusion_map = eigsh(laplacian,
                                           k=n_components,
                                           sigma=1.0,
                                           which='LM',
                                           tol=eigen_tol,
                                           v0=v0)
            embedding = diffusion_map.T[n_components::-1] * dd
        except RuntimeError:
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"
            # Revert the laplacian to its opposite to have lobpcg work
            laplacian *= -1
    if eigen_solver == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        if not sparse.issparse(laplacian):
            warnings.warn("AMG works better for sparse matrices")
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)
        ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        lambdas, diffusion_map = lobpcg(laplacian,
                                        X,
                                        M=M,
                                        tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1:
            raise ValueError

    elif eigen_solver == "lobpcg":
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lobpcg has problems with small
            # number of nodes
            # lobpcg will fallback to eigh, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.toarray()
            lambdas, diffusion_map = eigh(laplacian)
            embedding = diffusion_map.T[:n_components] * dd
        else:
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian,
                                            X,
                                            tol=1e-15,
                                            largest=False,
                                            maxiter=2000)
            embedding = diffusion_map.T[:n_components] * dd
            if embedding.shape[0] == 1:
                raise ValueError
    embedding = _deterministic_vector_sign_flip(embedding)
    if drop_first:
        vectors = embedding[1:n_components].T
    else:
        vectors = embedding[:n_components].T

    return (lambdas, vectors)
Example #16
0
def spectral_embedding(adjacency,
                       n_components=8,
                       eigen_solver=None,
                       random_state=None,
                       eigen_tol=1e-15,
                       norm_laplacian=False,
                       drop_first=True,
                       norm_adjacency=False,
                       scale_embedding=False,
                       verb=0):
    """

    REMARK :
    This is an adaptation from the same function in scikit-learn
    [http://scikit-learn.org/stable/modules/generated/sklearn.manifold.SpectralEmbedding.html]
    but slightly modify to account for optional scalings of the embedding,
    ability to normalize the Laplacian with random_walk option, and ability to
    normalize the adjacency matrix with Lafon and Coifman normalization
    [https://doi.org/10.1016/j.acha.2006.04.006] (see check_similarity)


    Project the sample on the first eigenvectors of the graph Laplacian.
    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigenvectors associated to the
    smallest eigenvalues) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.
    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).
    However care must taken to always make the affinity matrix symmetric
    so that the eigenvector decomposition works as expected.
    Note : Laplacian Eigenmaps is the actual algorithm implemented here.
    Read more in the :ref:`User Guide <spectral_embedding>`.
    Parameters
    ----------
    adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.
    n_components : integer, optional, default 8
        The dimension of the projection subspace.
    eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities.
    random_state : int, RandomState instance or None, optional, default: None
        A pseudo random number generator used for the initialization of the
        lobpcg eigenvectors decomposition.  If int, random_state is the seed
        used by the random number generator; If RandomState instance,
        random_state is the random number generator; If None, the random number
        generator is the RandomState instance used by `np.random`. Used when
        ``solver`` == 'amg'.
    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.
    norm_laplacian : bool or string, optional, default=False
        If True, then compute normalized Laplacian.
        If 'random_walk', compute the random_walk normalization
        [see e.g. https://arxiv.org/abs/0711.0189]
    norm_adjacency : bool or string, optional, default=False
        Whether to normalize the adjacency with the method from diffusion maps
    scale_embedding : bool or string, optional, default=False
        Whether to scale the embedding.
        If True or 'LE', default scaling from the Laplacian Eigenmaps method.
        If 'CTD', Commute Time Distance based scaling (1/sqrt(lambda_k)) used.
        If 'heuristic', use 1/sqrt(k) for each dimension k=1..n_components.
    drop_first : bool, optional, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.
    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.
    Notes
    -----
    Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
    has one connected component. If there graph has many components, the first
    few eigenvectors will simply uncover the connected components of the graph.
    References
    ----------
    * https://en.wikipedia.org/wiki/LOBPCG
    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      http://dx.doi.org/10.1137%2FS1064827500366124
    """
    adjacency = check_similarity(adjacency, normalize=norm_adjacency)

    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            warnings.warn("The eigen_solver was set to 'amg', but pyamg is "
                          "not available. Switching to 'arpack' instead")
            # raise ValueError("The eigen_solver was set to 'amg', but pyamg "
            #                  "is not available.")

    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'" %
                         eigen_solver)

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1
    if n_components > n_nodes:
        print(" n_components ({}) > ({}) n_nodes. setting \
              n_components=n_nodes".format(n_components, n_nodes))
        n_components = n_nodes

    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")

    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
        (not sparse.isspmatrix(adjacency) or n_nodes < 5 * n_components)):
        try:

            laplacian, dd = compute_laplacian(adjacency,
                                              normed=norm_laplacian,
                                              return_diag=True)
            # Compute embedding. We compute the largest eigenvalue and then use
            # the opposite of the laplacian since computing the largest
            # eigenvalues is more efficient.
            (evals_max, _) = eigsh(laplacian,
                                   n_components,
                                   which='LM',
                                   tol=eigen_tol)
            maxval = evals_max.max()
            laplacian *= -1
            if sparse.isspmatrix(laplacian):
                diag_idx = (laplacian.row == laplacian.col)
                laplacian.data[diag_idx] += maxval
            else:
                laplacian.flat[::n_nodes + 1] += maxval
            lambdas, diffusion_map = eigsh(laplacian,
                                           n_components,
                                           which='LM',
                                           tol=eigen_tol)
            lambdas -= maxval
            lambdas *= -1
            idx = np.array(lambdas).argsort()
            d = lambdas[idx]
            embedding = diffusion_map.T[idx]
            if scale_embedding:
                if scale_embedding == 'CTD':
                    embedding[1:] = (embedding[1:, :].T *
                                     np.sqrt(1. / d[1:])).T
                    # embedding = embedding.T
                elif scale_embedding == 'heuristic':
                    embedding = embedding.T * np.sqrt(
                        1. / np.arange(1, n_components + 1))
                    embedding = embedding.T
                else:
                    embedding *= dd

        except RuntimeError:
            warnings.warn("arpack did not converge. trying lobpcg instead."
                          " scale_embedding set to default.")
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"

    if eigen_solver == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        # norm_laplacian='random_walk' does not work for the following,
        # replace by True
        if norm_laplacian:
            if norm_laplacian == 'unnormalized':
                norm_laplacian = False
            else:
                norm_laplacian = True
        laplacian, dd = compute_laplacian(adjacency,
                                          normed=norm_laplacian,
                                          return_diag=True)
        if not sparse.issparse(laplacian):
            warnings.warn("AMG works better for sparse matrices")
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)
        ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        lambdas, diffusion_map = lobpcg(laplacian,
                                        X,
                                        M=M,
                                        tol=1.e-12,
                                        largest=False)
        if scale_embedding:
            embedding = diffusion_map.T * dd
        else:
            embedding = diffusion_map.T
        if embedding.shape[0] == 1:
            raise ValueError

    elif eigen_solver == "lobpcg":
        # norm_laplacian='random_walk' does not work for the following,
        # replace by True
        if norm_laplacian:
            if norm_laplacian == 'unnormalized':
                norm_laplacian = False
            else:
                norm_laplacian = True
        laplacian, dd = compute_laplacian(adjacency,
                                          normed=norm_laplacian,
                                          return_diag=True)
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lobpcg has problems with small
            # number of nodes
            # lobpcg will fallback to eigh, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.toarray()
            lambdas, diffusion_map = eigh(laplacian)
            embedding = diffusion_map.T[:n_components] * dd
        else:
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian,
                                            X,
                                            tol=1e-15,
                                            largest=False,
                                            maxiter=2000)
            if scale_embedding:
                embedding = diffusion_map.T[:n_components] * dd
            else:
                embedding = diffusion_map.T[:n_components]
            if embedding.shape[0] == 1:
                raise ValueError

    embedding = _deterministic_vector_sign_flip(embedding)

    if drop_first:
        return embedding[1:n_components].T
    else:
        return embedding[:n_components].T
Example #17
0
def classical_MDS_embedding(adjacency,
                            n_components=8,
                            eigen_solver='arpack',
                            random_state=None,
                            eigen_tol=1e-15,
                            norm_laplacian=False,
                            drop_first=True,
                            norm_adjacency=False,
                            scale_embedding=False,
                            verb=0):

    adjacency = check_similarity(adjacency, normalize=norm_adjacency)

    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            warnings.warn("The eigen_solver was set to 'amg', but pyamg is "
                          "not available. Switching to 'arpack' instead")
            # raise ValueError("The eigen_solver was set to 'amg', but pyamg "
            #                  "is not available.")

    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'" %
                         eigen_solver)

    random_state = check_random_state(random_state)

    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1
    if n_components > n_nodes:
        print(" n_components ({}) > ({}) n_nodes. setting \
              n_components=n_nodes".format(n_components, n_nodes))
        n_components = n_nodes

    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")

    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
        (not isspmatrix(adjacency) or n_nodes < 5 * n_components)):
        try:

            dist_mat, dd = get_dist_mat(adjacency, return_diag=True)

            (lambdas, diffusion_map) = eigsh(dist_mat,
                                             n_components,
                                             which='LA',
                                             tol=eigen_tol)
            idx = np.array(-lambdas).argsort()
            d = lambdas[idx]
            embedding = diffusion_map.T[idx]
            if scale_embedding:
                if scale_embedding == 'CTD':
                    embedding[1:] = (embedding[1:, :].T *
                                     np.sqrt(1. / d[1:])).T
                    # embedding = embedding.T
                elif scale_embedding == 'heuristic':
                    embedding = embedding.T * np.sqrt(
                        1. / np.arange(1, n_components + 1))
                    embedding = embedding.T
                else:
                    embedding *= dd

        except RuntimeError:
            warnings.warn("arpack did not converge. trying lobpcg instead."
                          " scale_embedding set to default.")
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"

    else:
        raise ValueError("So far, only eigen_solver='arpack' is implemented.")

    embedding = _deterministic_vector_sign_flip(embedding)

    if drop_first:
        return embedding[1:n_components].T
    else:
        return embedding[:n_components].T
Example #18
0
    def spectral_embedding(self,
                           adjacency,
                           n_components=8,
                           eigen_solver=None,
                           random_state=None,
                           eigen_tol=0.0,
                           drop_first=True):
        """
        see original at https://github.com/scikit-learn/scikit-learn/blob/14031f6/sklearn/manifold/spectral_embedding_.py#L133
        custermize1: return lambdas with the embedded matrix.
        custermize2: norm_laplacian is always True
        """
        norm_laplacian = True
        adjacency = check_symmetric(adjacency)

        try:
            from pyamg import smoothed_aggregation_solver
        except ImportError:
            if eigen_solver == "amg":
                raise ValueError(
                    "The eigen_solver was set to 'amg', but pyamg is "
                    "not available.")

        if eigen_solver is None:
            eigen_solver = 'arpack'
        elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
            raise ValueError("Unknown value for eigen_solver: '%s'."
                             "Should be 'amg', 'arpack', or 'lobpcg'" %
                             eigen_solver)

        random_state = check_random_state(random_state)

        n_nodes = adjacency.shape[0]
        # Whether to drop the first eigenvector
        if drop_first:
            n_components = n_components + 1

        if not _graph_is_connected(adjacency):
            warnings.warn("Graph is not fully connected, spectral embedding"
                          " may not work as expected.")

        laplacian, dd = graph_laplacian(adjacency,
                                        normed=norm_laplacian,
                                        return_diag=True)
        if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
            (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
            # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
            # for details see the source code in scipy:
            # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
            # /lobpcg/lobpcg.py#L237
            # or matlab:
            # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
            laplacian = _set_diag(laplacian, 1, norm_laplacian)

            # Here we'll use shift-invert mode for fast eigenvalues
            # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
            #  for a short explanation of what this means)
            # Because the normalized Laplacian has eigenvalues between 0 and 2,
            # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
            # when finding eigenvalues of largest magnitude (keyword which='LM')
            # and when these eigenvalues are very large compared to the rest.
            # For very large, very sparse graphs, I - L can have many, many
            # eigenvalues very near 1.0.  This leads to slow convergence.  So
            # instead, we'll use ARPACK's shift-invert mode, asking for the
            # eigenvalues near 1.0.  This effectively spreads-out the spectrum
            # near 1.0 and leads to much faster convergence: potentially an
            # orders-of-magnitude speedup over simply using keyword which='LA'
            # in standard mode.
            try:
                # We are computing the opposite of the laplacian inplace so as
                # to spare a memory allocation of a possibly very large array
                laplacian *= -1
                lambdas, diffusion_map = eigsh(laplacian,
                                               k=n_components,
                                               sigma=1.0,
                                               which='LM',
                                               tol=eigen_tol)
                embedding = diffusion_map.T[n_components::-1] * dd

            except RuntimeError:
                # When submatrices are exactly singular, an LU decomposition
                # in arpack fails. We fallback to lobpcg
                eigen_solver = "lobpcg"
                # Revert the laplacian to its opposite to have lobpcg work
                laplacian *= -1

        if eigen_solver == 'amg':
            # Use AMG to get a preconditioner and speed up the eigenvalue
            # problem.
            if not sparse.issparse(laplacian):
                warnings.warn("AMG works better for sparse matrices")
            # lobpcg needs double precision floats
            laplacian = check_array(laplacian,
                                    dtype=np.float64,
                                    accept_sparse=True)
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
            M = ml.aspreconditioner()
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian,
                                            X,
                                            M=M,
                                            tol=1.e-12,
                                            largest=False)
            embedding = diffusion_map.T * dd
            if embedding.shape[0] == 1:
                raise ValueError

        elif eigen_solver == "lobpcg":
            # lobpcg needs double precision floats
            laplacian = check_array(laplacian,
                                    dtype=np.float64,
                                    accept_sparse=True)
            if n_nodes < 5 * n_components + 1:
                # see note above under arpack why lobpcg has problems with small
                # number of nodes
                # lobpcg will fallback to eigh, so we short circuit it
                if sparse.isspmatrix(laplacian):
                    laplacian = laplacian.toarray()
                lambdas, diffusion_map = eigh(laplacian)
                embedding = diffusion_map.T[:n_components] * dd
            else:
                laplacian = _set_diag(laplacian, 1, norm_laplacian)
                # We increase the number of eigenvectors requested, as lobpcg
                # doesn't behave well in low dimension
                X = random_state.rand(laplacian.shape[0], n_components + 1)
                X[:, 0] = dd.ravel()
                lambdas, diffusion_map = lobpcg(laplacian,
                                                X,
                                                tol=1e-15,
                                                largest=False,
                                                maxiter=2000)
                embedding = diffusion_map.T[:n_components] * dd
                if embedding.shape[0] == 1:
                    raise ValueError

        embedding = _deterministic_vector_sign_flip(embedding)
        if drop_first:
            return embedding[1:n_components].T, lambdas
        else:
            return embedding[:n_components].T, lambdas