Beispiel #1
0
def _test_all_solvers(solvers_to_test, S):
    for largest in [True, False]:
        Lambdas = {};
        for eigen_solver in solvers_to_test:
            lambdas, diffusion_map = eigen_decomposition(S, n_components = 3, 
                                                        eigen_solver = eigen_solver,
                                                        largest = largest, drop_first = False)
            Lambdas[eigen_solver] = np.sort(lambdas)
        # pairwise comparison:
        for i in range(len(solvers_to_test)):
            for j in range(i+1, len(solvers_to_test)):
                print largest
                print(str(solvers_to_test[i]) + " + " + str(solvers_to_test[j]))
                assert_array_almost_equal(Lambdas[solvers_to_test[i]],
                                        Lambdas[solvers_to_test[j]])    
Beispiel #2
0
def isomap(Geometry, n_components=8, eigen_solver=None,
           random_state=None, eigen_tol=1e-12, path_method='auto',
           distance_matrix = None, graph_distance_matrix = None, 
           centered_matrix = None):
    """
    Parameters
    ----------        
    Geometry : a Geometry object from Mmani.geometry.geometry

    n_components : integer, optional
        The dimension of the projection subspace.

    eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
        auto : algorithm will attempt to choose the best method for input data
        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.
        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.
        lobpcg : Locally Optimal Block Preconditioned Conjugate Gradient Method.
            a preconditioned eigensolver for large symmetric positive definite 
            (SPD) generalized eigenproblems.
        amg : AMG requires pyamg to be installed. It can be faster on very large, 
            sparse problems, but may also lead to instabilities.

    random_state : int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
        By default, arpack is used.

    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.
        
    path_method : string, method for computing graph shortest path. One of :
        'auto', 'D', 'FW', 'BF', 'J'. See scipy.sparse.csgraph.shortest_path 
        for more information. 
    
    distance_matrix : sparse Ndarray (n_obs, n_obs), optional. Pairwise distance matrix
        sparse zeros considered 'infinite'. 
    
    graph_distance_matrix : Ndarray (n_obs, n_obs), optional. Pairwise graph distance 
        matrix. Output of graph_shortest_path.
    
    centered_matrix : Ndarray (n_obs, n_obs), optional. Centered version of 
        graph_distance_matrix

    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.

    Notes
    -----
    """

    random_state = check_random_state(random_state)    

    if not isinstance(Geometry, geom.Geometry):
        raise RuntimeError("Geometry object not Mmani.embedding.geometry ",
                            "Geometry class")
        
    # Step 1: use geometry to calculate the distance matrix 
    if ((distance_matrix is None) and (centered_matrix is None)):
        distance_matrix = Geometry.get_distance_matrix()
    
    # Step 2: use graph_shortest_path to construct D_G
    ## WARNING: D_G is an (NxN) DENSE matrix!! 
    if ((graph_distance_matrix is None) and (centered_matrix is None)):
        graph_distance_matrix = graph_shortest_path(distance_matrix,
                                                    method=path_method,
                                                    directed=False)
                                                            
    # Step 3: center graph distance matrix 
    if centered_matrix is None:
        centered_matrix = center_matrix(graph_distance_matrix)
    
        
    # Step 4: compute d largest eigenvectors/values of centered_matrix 
    lambdas, diffusion_map = eigen_decomposition(centered_matrix, n_components, eigen_solver,
                                                 random_state, eigen_tol, 
                                                 largest = True)    
    # Step 5: 
    # return Y = [sqrt(lambda_1)*V_1, ..., sqrt(lambda_d)*V_d]
    ind = np.argsort(lambdas); ind = ind[::-1] # sort largest 
    lambdas = lambdas[ind];
    diffusion_map = diffusion_map[:, ind]
    embedding = diffusion_map[:, 0:n_components] * np.sqrt(lambdas[0:n_components])
    return embedding
Beispiel #3
0
def spectral_embedding(
    Geometry, n_components=8, eigen_solver=None, random_state=None, eigen_tol=0.0, drop_first=True, diffusion_maps=False
):
    """Project the sample on the first eigen vectors of the graph Laplacian.
    
    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigen vectors associated to the
    smallest eigen values) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.
    
    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).
    
    However care must taken to always make the affinity matrix symmetric
    so that the eigen vector decomposition works as expected.
    
    Parameters
    ----------        
    Geometry : a Geometry object from Mmani.embedding.geometry

    n_components : integer, optional
        The dimension of the projection subspace.

    eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
        auto : algorithm will attempt to choose the best method for input data
        dense  : use standard dense matrix operations for the eigenvalue
                    decomposition.  For this method, M must be an array
                    or matrix type.  This method should be avoided for
                    large problems.
        arpack : use arnoldi iteration in shift-invert mode.
                    For this method, M may be a dense matrix, sparse matrix,
                    or general linear operator.
                    Warning: ARPACK can be unstable for some problems.  It is
                    best to try several random seeds in order to check results.
        lobpcg : Locally Optimal Block Preconditioned Conjugate Gradient Method.
            a preconditioned eigensolver for large symmetric positive definite 
            (SPD) generalized eigenproblems.
        amg : AMG requires pyamg to be installed. It can be faster on very large, 
            sparse problems, but may also lead to instabilities.
    
    random_state : int seed, RandomState instance, or None (default)
        A pseudo random number generator used for the initialization of the
        lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
        By default, arpack is used.
    
    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.
    
    drop_first : bool, optional, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.
        
    diffusion_map : boolean, optional. Whether to return the diffusion map 
        version by re-scaling the embedding by the eigenvalues. 
        
    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.
    
    Notes
    -----
    Spectral embedding is most useful when the graph has one connected
    component. If there graph has many components, the first few eigenvectors
    will simply uncover the connected components of the graph.
    
    References
    ----------
    * http://en.wikipedia.org/wiki/LOBPCG

    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      http://dx.doi.org/10.1137%2FS1064827500366124
    """
    random_state = check_random_state(random_state)

    if not isinstance(Geometry, geom.Geometry):
        raise RuntimeError("Geometry object not Mmani.embedding.geometry Geometry class")
    affinity_matrix = Geometry.get_affinity_matrix()
    if not _graph_is_connected(affinity_matrix):
        warnings.warn("Graph is not fully connected, spectral embedding may not work as expected.")

    laplacian = Geometry.get_laplacian_matrix(return_lapsym=True, symmetrize=True)
    n_nodes = laplacian.shape[0]
    lapl_type = Geometry.laplacian_type

    re_normalize = False
    if eigen_solver in ["amg", "lobpcg"]:  # these methods require a symmetric positive definite matrix!
        if lapl_type not in ["symmetricnormalized", "unnormalized"]:
            re_normalize = True
            # If lobpcg (or amg with lobpcg) is chosen and
            # If the Laplacian is non-symmetric then we need to extract:
            # the w (weight) vector from geometry
            # and the symmetric Laplacian = S.
            # The actual Laplacian is L = W^{-1}S  (Where W is the diagonal matrix of w)
            # Which has the same spectrum as: L* = W^{-1/2}SW^{-1/2} which is symmetric
            # We calculate the eigen-decomposition of L*: [D, V]
            # then use W^{-1/2}V  to compute the eigenvectors of L
            # See (Handbook for Cluster Analysis Chapter 2 Proposition 1).
            # However, since we censor the affinity matrix A at a radius it is not guaranteed
            # to be positive definite. But since L = W^{-1}S has maximum eigenvalue 1 (stochastic matrix)
            # and L* has the same spectrum it also has largest e-value of 1.
            # therefore if we look at I - L* then this has smallest eigenvalue of 0 and so
            # must be positive semi-definite. It also has the same spectrum as L* but
            # lambda(I - L*) = 1 - lambda(L*).
            # Finally, since we want positive definite not semi-definite we use (1+epsilon)*I
            # instead of I to make the smallest eigenvalue epsilon.
            epsilon = 2
            w = np.array(Geometry.w)
            symmetrized_laplacian = Geometry.laplacian_symmetric.copy()
            if sparse.isspmatrix(symmetrized_laplacian):
                symmetrized_laplacian.data /= np.sqrt(w[symmetrized_laplacian.row])
                symmetrized_laplacian.data /= np.sqrt(w[symmetrized_laplacian.col])
                symmetrized_laplacian = (1 + epsilon) * sparse.identity(n_nodes) - symmetrized_laplacian
            else:
                symmetrized_laplacian /= np.sqrt(w)
                symmetrized_laplacian /= np.sqrt(w[:, np.newaxis])
                symmetrixed_laplacian = (1 + epsilon) * np.identity(n_nodes) - symmetrized_laplacian
    if re_normalize:
        print("using symmetrized laplacian")
        lambdas, diffusion_map = eigen_decomposition(
            symmetrized_laplacian, n_components + 1, eigen_solver, random_state, eigen_tol, drop_first, largest=False
        )
        lambdas = -lambdas + epsilon
    else:
        lambdas, diffusion_map = eigen_decomposition(
            laplacian, n_components + 1, eigen_solver, random_state, eigen_tol, drop_first, largest=True
        )
    if re_normalize:
        diffusion_map /= np.sqrt(w[:, np.newaxis])  # put back on original Laplacian space
        diffusion_map /= np.linalg.norm(diffusion_map, axis=0)  # norm 1 vectors
    ind = np.argsort(lambdas)
    ind = ind[::-1]
    lambdas = lambdas[ind]
    lambdas[0] = 0
    diffusion_map = diffusion_map[:, ind]
    if diffusion_maps:
        diffusion_map = diffusion_map * np.sqrt(lambdas)
    if drop_first:
        embedding = diffusion_map[:, 1 : (n_components + 1)]
    else:
        embedding = diffusion_map[:, :n_components]
    return embedding