def spectral_embedding(adjacency, n_components=8, eigen_solver=None, random_state=None, eigen_tol=0.0, norm_laplacian=True, drop_first=True, mode=None): """Project the sample on the first eigen vectors of the graph Laplacian. The adjacency matrix is used to compute a normalized graph Laplacian whose spectrum (especially the eigen vectors associated to the smallest eigen values) has an interpretation in terms of minimal number of cuts necessary to split the graph into comparably sized components. This embedding can also 'work' even if the ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally an affinity or similarity matrix between samples (for instance the heat kernel of a euclidean distance matrix or a k-NN matrix). However care must taken to always make the affinity matrix symmetric so that the eigen vector decomposition works as expected. Parameters ---------- adjacency : array-like or sparse matrix, shape: (n_samples, n_samples) The adjacency matrix of the graph to embed. n_components : integer, optional The dimension of the projection subspace. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'. By default, arpack is used. eigen_tol : float, optional, default=0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. drop_first : bool, optional, default=True Whether to drop the first eigenvector. For spectral embedding, this should be True as the first eigenvector should be constant vector for connected graph, but for spectral clustering, this should be kept as False to retain the first eigenvector. Returns ------- embedding : array, shape=(n_samples, n_components) The reduced samples. Notes ----- Spectral embedding is most useful when the graph has one connected component. If there graph has many components, the first few eigenvectors will simply uncover the connected components of the graph. References ---------- * http://en.wikipedia.org/wiki/LOBPCG * Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method Andrew V. Knyazev http://dx.doi.org/10.1137%2FS1064827500366124 """ try: from pyamg import smoothed_aggregation_solver except ImportError: if eigen_solver == "amg" or mode == "amg": raise ValueError("The eigen_solver was set to 'amg', but pyamg is " "not available.") if not mode is None: warnings.warn( "'mode' was renamed to eigen_solver " "and will be removed in 0.15.", DeprecationWarning) eigen_solver = mode if eigen_solver is None: eigen_solver = 'arpack' elif not eigen_solver in ('arpack', 'lobpcg', 'amg'): raise ValueError("Unknown value for eigen_solver: '%s'." "Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver) random_state = check_random_state(random_state) n_nodes = adjacency.shape[0] # Whether to drop the first eigenvector if drop_first: n_components = n_components + 1 # Check that the matrices given is symmetric if ((not sparse.isspmatrix(adjacency) and not np.all( (adjacency - adjacency.T) < 1e-10)) or (sparse.isspmatrix(adjacency) and (adjacency - adjacency.T).nnz > 0)): warnings.warn("Graph adjacency matrix should be symmetric. " "Converted to be symmetric by average with its " "transpose.") adjacency = .5 * (adjacency + adjacency.T) if not _graph_is_connected(adjacency): warnings.warn("Graph is not fully connected, spectral embedding" " may not works as expected.") laplacian, dd = graph_laplacian(adjacency, normed=norm_laplacian, return_diag=True) if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)): # lobpcg used with eigen_solver='amg' has bugs for low number of nodes # for details see the source code in scipy: # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen # /lobpcg/lobpcg.py#L237 # or matlab: # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m laplacian = _set_diag(laplacian, 1) # Here we'll use shift-invert mode for fast eigenvalues # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html # for a short explanation of what this means) # Because the normalized Laplacian has eigenvalues between 0 and 2, # I - L has eigenvalues between -1 and 1. ARPACK is most efficient # when finding eigenvalues of largest magnitude (keyword which='LM') # and when these eigenvalues are very large compared to the rest. # For very large, very sparse graphs, I - L can have many, many # eigenvalues very near 1.0. This leads to slow convergence. So # instead, we'll use ARPACK's shift-invert mode, asking for the # eigenvalues near 1.0. This effectively spreads-out the spectrum # near 1.0 and leads to much faster convergence: potentially an # orders-of-magnitude speedup over simply using keyword which='LA' # in standard mode. try: lambdas, diffusion_map = eigsh(-laplacian, k=n_components, sigma=1.0, which='LM', tol=eigen_tol) embedding = diffusion_map.T[n_components::-1] * dd except RuntimeError: # When submatrices are exactly singular, an LU decomposition # in arpack fails. We fallback to lobpcg eigen_solver = "lobpcg" if eigen_solver == 'amg': # Use AMG to get a preconditioner and speed up the eigenvalue # problem. if not sparse.issparse(laplacian): warnings.warn("AMG works better for sparse matrices") laplacian = laplacian.astype(np.float) # lobpcg needs native floats laplacian = _set_diag(laplacian, 1) ml = smoothed_aggregation_solver(atleast2d_or_csr(laplacian)) M = ml.aspreconditioner() X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, largest=False) embedding = diffusion_map.T * dd if embedding.shape[0] == 1: raise ValueError elif eigen_solver == "lobpcg": laplacian = laplacian.astype(np.float) # lobpcg needs native floats if n_nodes < 5 * n_components + 1: # see note above under arpack why lobpcg has problems with small # number of nodes # lobpcg will fallback to symeig, so we short circuit it if sparse.isspmatrix(laplacian): laplacian = laplacian.todense() lambdas, diffusion_map = symeig(laplacian) embedding = diffusion_map.T[:n_components] * dd else: # lobpcg needs native floats laplacian = laplacian.astype(np.float) laplacian = _set_diag(laplacian, 1) # We increase the number of eigenvectors requested, as lobpcg # doesn't behave well in low dimension X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15, largest=False, maxiter=2000) embedding = diffusion_map.T[:n_components] * dd if embedding.shape[0] == 1: raise ValueError if drop_first: return embedding[1:n_components].T else: return embedding[:n_components].T
def spectral_embedding(adjacency, n_components=8, mode=None, random_state=None, eig_tol=0.0): """Project the sample on the first eigen vectors of the graph Laplacian The adjacency matrix is used to compute a normalized graph Laplacian whose spectrum (especially the eigen vectors associated to the smallest eigen values) has an interpretation in terms of minimal number of cuts necessary to split the graph into comparably sized components. This embedding can also 'work' even if the ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally an affinity or similarity matrix between samples (for instance the heat kernel of a euclidean distance matrix or a k-NN matrix). However care must taken to always make the affinity matrix symmetric so that the eigen vector decomposition works as expected. Parameters ---------- adjacency: array-like or sparse matrix, shape: (n_samples, n_samples) The adjacency matrix of the graph to embed. n_components: integer, optional The dimension of the projection subspace. mode: {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state: int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when mode == 'amg'. By default arpack is used. eig_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack mode. Returns ------- embedding: array, shape: (n_samples, n_components) The reduced samples Notes ----- The graph should contain only one connected component, elsewhere the results make little sense. References ---------- [1] http://en.wikipedia.org/wiki/LOBPCG [2] LOBPCG: http://dx.doi.org/10.1137%2FS1064827500366124 """ from scipy import sparse from ..utils.arpack import eigsh from scipy.sparse.linalg import lobpcg from scipy.sparse.linalg.eigen.lobpcg.lobpcg import symeig try: from pyamg import smoothed_aggregation_solver except ImportError: if mode == "amg": raise ValueError("The mode was set to 'amg', but pyamg is " "not available.") random_state = check_random_state(random_state) n_nodes = adjacency.shape[0] # XXX: Should we check that the matrices given is symmetric if mode is None: mode = 'arpack' elif not mode in ('arpack', 'lobpcg', 'amg'): raise ValueError("Unknown value for mode: '%s'." "Should be 'amg', 'arpack', or 'lobpcg'" % mode) laplacian, dd = graph_laplacian(adjacency, normed=True, return_diag=True) if (mode == 'arpack' or mode != 'lobpcg' and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)): # lobpcg used with mode='amg' has bugs for low number of nodes # for details see the source code in scipy: # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py#L237 # or matlab: # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m laplacian = _set_diag(laplacian, 0) # Here we'll use shift-invert mode for fast eigenvalues # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html # for a short explanation of what this means) # Because the normalized Laplacian has eigenvalues between 0 and 2, # I - L has eigenvalues between -1 and 1. ARPACK is most efficient # when finding eigenvalues of largest magnitude (keyword which='LM') # and when these eigenvalues are very large compared to the rest. # For very large, very sparse graphs, I - L can have many, many # eigenvalues very near 1.0. This leads to slow convergence. So # instead, we'll use ARPACK's shift-invert mode, asking for the # eigenvalues near 1.0. This effectively spreads-out the spectrum # near 1.0 and leads to much faster convergence: potentially an # orders-of-magnitude speedup over simply using keyword which='LA' # in standard mode. try: lambdas, diffusion_map = eigsh(-laplacian, k=n_components, sigma=1.0, which='LM', tol=eig_tol) embedding = diffusion_map.T[::-1] * dd except RuntimeError: # When submatrices are exactly singular, an LU decomposition # in arpack fails. We fallback to lobpcg mode = "lobpcg" if mode == 'amg': # Use AMG to get a preconditioner and speed up the eigenvalue # problem. laplacian = laplacian.astype(np.float) # lobpcg needs native floats ml = smoothed_aggregation_solver(laplacian.tocsr()) M = ml.aspreconditioner() X = random_state.rand(laplacian.shape[0], n_components) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, largest=False) embedding = diffusion_map.T * dd if embedding.shape[0] == 1: raise ValueError elif mode == "lobpcg": laplacian = laplacian.astype(np.float) # lobpcg needs native floats if n_nodes < 5 * n_components + 1: # see note above under arpack why lopbcg has problems with small # number of nodes # lobpcg will fallback to symeig, so we short circuit it if sparse.isspmatrix(laplacian): laplacian = laplacian.todense() lambdas, diffusion_map = symeig(laplacian) embedding = diffusion_map.T[:n_components] * dd else: # lobpcg needs native floats laplacian = laplacian.astype(np.float) laplacian = _set_diag(laplacian, 1) # We increase the number of eigenvectors requested, as lobpcg # doesn't behave well in low dimension X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15, largest=False, maxiter=2000) embedding = diffusion_map.T[:n_components] * dd if embedding.shape[0] == 1: raise ValueError return embedding
def spectral_embedding(adjacency, n_components=8, eigen_solver=None, random_state=None, eigen_tol=0.0, norm_laplacian=True, drop_first=True, mode=None): """Project the sample on the first eigen vectors of the graph Laplacian. The adjacency matrix is used to compute a normalized graph Laplacian whose spectrum (especially the eigen vectors associated to the smallest eigen values) has an interpretation in terms of minimal number of cuts necessary to split the graph into comparably sized components. This embedding can also 'work' even if the ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally an affinity or similarity matrix between samples (for instance the heat kernel of a euclidean distance matrix or a k-NN matrix). However care must taken to always make the affinity matrix symmetric so that the eigen vector decomposition works as expected. Parameters ---------- adjacency : array-like or sparse matrix, shape: (n_samples, n_samples) The adjacency matrix of the graph to embed. n_components : integer, optional The dimension of the projection subspace. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'. By default, arpack is used. eigen_tol : float, optional, default=0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. drop_first : bool, optional, default=True Whether to drop the first eigenvector. For spectral embedding, this should be True as the first eigenvector should be constant vector for connected graph, but for spectral clustering, this should be kept as False to retain the first eigenvector. Returns ------- embedding : array, shape=(n_samples, n_components) The reduced samples. Notes ----- Spectral embedding is most useful when the graph has one connected component. If there graph has many components, the first few eigenvectors will simply uncover the connected components of the graph. References ---------- * http://en.wikipedia.org/wiki/LOBPCG * Toward the Optimal Preconditioned Eigensolver: Locally Optimal Block Preconditioned Conjugate Gradient Method Andrew V. Knyazev http://dx.doi.org/10.1137%2FS1064827500366124 """ try: from pyamg import smoothed_aggregation_solver except ImportError: if eigen_solver == "amg" or mode == "amg": raise ValueError("The eigen_solver was set to 'amg', but pyamg is " "not available.") if not mode is None: warnings.warn("'mode' was renamed to eigen_solver " "and will be removed in 0.15.", DeprecationWarning) eigen_solver = mode if eigen_solver is None: eigen_solver = 'arpack' elif not eigen_solver in ('arpack', 'lobpcg', 'amg'): raise ValueError("Unknown value for eigen_solver: '%s'." "Should be 'amg', 'arpack', or 'lobpcg'" % eigen_solver) random_state = check_random_state(random_state) n_nodes = adjacency.shape[0] # Whether to drop the first eigenvector if drop_first: n_components = n_components + 1 # Check that the matrices given is symmetric if ((not sparse.isspmatrix(adjacency) and not np.all((adjacency - adjacency.T) < 1e-10)) or (sparse.isspmatrix(adjacency) and (adjacency - adjacency.T).nnz > 0)): warnings.warn("Graph adjacency matrix should be symmetric. " "Converted to be symmetric by average with its " "transpose.") adjacency = .5 * (adjacency + adjacency.T) if not _graph_is_connected(adjacency): warnings.warn("Graph is not fully connected, spectral embedding" " may not works as expected.") laplacian, dd = graph_laplacian(adjacency, normed=norm_laplacian, return_diag=True) if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)): # lobpcg used with eigen_solver='amg' has bugs for low number of nodes # for details see the source code in scipy: # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen # /lobpcg/lobpcg.py#L237 # or matlab: # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m laplacian = _set_diag(laplacian, 1) # Here we'll use shift-invert mode for fast eigenvalues # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html # for a short explanation of what this means) # Because the normalized Laplacian has eigenvalues between 0 and 2, # I - L has eigenvalues between -1 and 1. ARPACK is most efficient # when finding eigenvalues of largest magnitude (keyword which='LM') # and when these eigenvalues are very large compared to the rest. # For very large, very sparse graphs, I - L can have many, many # eigenvalues very near 1.0. This leads to slow convergence. So # instead, we'll use ARPACK's shift-invert mode, asking for the # eigenvalues near 1.0. This effectively spreads-out the spectrum # near 1.0 and leads to much faster convergence: potentially an # orders-of-magnitude speedup over simply using keyword which='LA' # in standard mode. try: lambdas, diffusion_map = eigsh(-laplacian, k=n_components, sigma=1.0, which='LM', tol=eigen_tol) embedding = diffusion_map.T[n_components::-1] * dd except RuntimeError: # When submatrices are exactly singular, an LU decomposition # in arpack fails. We fallback to lobpcg eigen_solver = "lobpcg" if eigen_solver == 'amg': # Use AMG to get a preconditioner and speed up the eigenvalue # problem. if not sparse.issparse(laplacian): warnings.warn("AMG works better for sparse matrices") laplacian = laplacian.astype(np.float) # lobpcg needs native floats laplacian = _set_diag(laplacian, 1) ml = smoothed_aggregation_solver(atleast2d_or_csr(laplacian)) M = ml.aspreconditioner() X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, largest=False) embedding = diffusion_map.T * dd if embedding.shape[0] == 1: raise ValueError elif eigen_solver == "lobpcg": laplacian = laplacian.astype(np.float) # lobpcg needs native floats if n_nodes < 5 * n_components + 1: # see note above under arpack why lobpcg has problems with small # number of nodes # lobpcg will fallback to symeig, so we short circuit it if sparse.isspmatrix(laplacian): laplacian = laplacian.todense() lambdas, diffusion_map = symeig(laplacian) embedding = diffusion_map.T[:n_components] * dd else: # lobpcg needs native floats laplacian = laplacian.astype(np.float) laplacian = _set_diag(laplacian, 1) # We increase the number of eigenvectors requested, as lobpcg # doesn't behave well in low dimension X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15, largest=False, maxiter=2000) embedding = diffusion_map.T[:n_components] * dd if embedding.shape[0] == 1: raise ValueError if drop_first: return embedding[1:n_components].T else: return embedding[:n_components].T
def spectral_embedding(adjacency, n_components=8, mode=None, random_state=None): """Project the sample on the first eigen vectors of the graph Laplacian The adjacency matrix is used to compute a normalized graph Laplacian whose spectrum (especially the eigen vectors associated to the smallest eigen values) has an interpretation in terms of minimal number of cuts necessary to split the graph into comparably sized components. This embedding can also 'work' even if the ``adjacency`` variable is not strictly the adjacency matrix of a graph but more generally an affinity or similarity matrix between samples (for instance the heat kernel of a euclidean distance matrix or a k-NN matrix). However care must taken to always make the affinity matrix symmetric so that the eigen vector decomposition works as expected. Parameters ----------- adjacency: array-like or sparse matrix, shape: (n_samples, n_samples) The adjacency matrix of the graph to embed. n_components: integer, optional The dimension of the projection subspace. mode: {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state: int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when mode == 'amg'. By default arpack is used. Returns -------- embedding: array, shape: (n_samples, n_components) The reduced samples Notes ------ The graph should contain only one connected component, elsewhere the results make little sense. """ from scipy import sparse from ..utils.arpack import eigsh from scipy.sparse.linalg import lobpcg from scipy.sparse.linalg.eigen.lobpcg.lobpcg import symeig try: from pyamg import smoothed_aggregation_solver except ImportError: if mode == "amg": raise ValueError("The mode was set to 'amg', but pyamg is " "not available.") random_state = check_random_state(random_state) n_nodes = adjacency.shape[0] # XXX: Should we check that the matrices given is symmetric if mode is None: mode = 'arpack' elif not mode in ('arpack', 'lobpcg', 'amg'): raise ValueError("Unknown value for mode: '%s'." "Should be 'amg' or 'arpack'" % mode) laplacian, dd = graph_laplacian(adjacency, normed=True, return_diag=True) if (mode == 'arpack' or not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components): # lobpcg used with mode='amg' has bugs for low number of nodes laplacian = _set_diag(laplacian, 0) # Here we'll use shift-invert mode for fast eigenvalues # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html # for a short explanation of what this means) # Because the normalized Laplacian has eigenvalues between 0 and 2, # I - L has eigenvalues between -1 and 1. ARPACK is most efficient # when finding eigenvalues of largest magnitude (keyword which='LM') # and when these eigenvalues are very large compared to the rest. # For very large, very sparse graphs, I - L can have many, many # eigenvalues very near 1.0. This leads to slow convergence. So # instead, we'll use ARPACK's shift-invert mode, asking for the # eigenvalues near 1.0. This effectively spreads-out the spectrum # near 1.0 and leads to much faster convergence: potentially an # orders-of-magnitude speedup over simply using keyword which='LA' # in standard mode. try: lambdas, diffusion_map = eigsh(-laplacian, k=n_components, sigma=1.0, which='LM') embedding = diffusion_map.T[::-1] * dd except RuntimeError: # When submatrices are exactly singular, an LU decomposition # in arpack fails. We fallback to lobpcg mode = "lobpcg" if mode == 'amg': # Use AMG to get a preconditioner and speed up the eigenvalue # problem. laplacian = laplacian.astype(np.float) # lobpcg needs native floats ml = smoothed_aggregation_solver(laplacian.tocsr()) M = ml.aspreconditioner() X = random_state.rand(laplacian.shape[0], n_components) #X[:, 0] = 1. / dd.ravel() X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12, largest=False) embedding = diffusion_map.T * dd if embedding.shape[0] == 1: raise ValueError elif mode == "lobpcg": laplacian = laplacian.astype(np.float) # lobpcg needs native floats if n_nodes < 5 * n_components + 1: # lobpcg will fallback to symeig, so we short circuit it if sparse.isspmatrix(laplacian): laplacian = laplacian.todense() lambdas, diffusion_map = symeig(laplacian) embedding = diffusion_map.T[:n_components] * dd else: laplacian = laplacian.astype( np.float) # lobpcg needs native floats laplacian = _set_diag(laplacian, 1) # We increase the number of eigenvectors requested, as lobpcg # doesn't behave well in low dimension X = random_state.rand(laplacian.shape[0], n_components + 1) X[:, 0] = dd.ravel() lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15, largest=False, maxiter=2000) embedding = diffusion_map.T[:n_components] * dd if embedding.shape[0] == 1: raise ValueError return embedding