def dm_eigenvector(data, k=0, mean_center=True, metricpar={}, verbose=True, callback=None): r'''Return the :math:`k`-th eigenvector of the distance matrix. The matrix of pairwise distances is symmetric, so it has an orthonormal basis of eigenvectors. The parameter :math:`k` can be either an integer or an array of integers (for multi-dimensional filter functions). The index is zero-based, and eigenvalues are sorted by absolute value, so :math:`k=0` returns the eigenvector corresponding to the largest eigenvalue in magnitude. If `mean_center` is ``True``, the distance matrix is double-mean-centered before the eigenvalue decomposition. Reference: [R6]_, subsection “Principal metric SVD filters”. ''' # comp can be an integer or a list of integers # todo: check validity of comp if data.ndim == 1: # dissimilarity matrix assert metricpar == {}, ('No optional parameter is allowed for a ' 'dissimilarity matrix.') D = data N = n_obs(D) else: # vector data D = pdist(data, **metricpar) N = len(data) DD = squareform(D) del D if mean_center: md = DD.mean(axis=1) DD -= md DD -= (md - md.mean())[:, np.newaxis] karray = np.atleast_1d(k) assert karray.ndim == 1 maxk = 1 + karray.max() if callback: callback('Computing: distance matrix eigenvectors.') if hasattr(spla, 'eigsh'): w, v = spla.eigsh(DD, k=maxk, which='LM') else: # for SciPy < 0.9.0 w, v = spla.eigen_symmetric(DD, k=maxk, which='LM') sortedorder = np.argsort(np.abs(w))[::-1] if verbose: print('Eigenvalues:\n{}'.format(w[sortedorder])) ret = v[:, sortedorder[k]] # normalize return ret / np.sqrt((ret * ret).sum(axis=0))
def dm_eigenvector(data, k=0, mean_center=True, metricpar={}, verbose=True, callback=None): r'''Return the :math:`k`-th eigenvector of the distance matrix. The matrix of pairwise distances is symmetric, so it has an orthonormal basis of eigenvectors. The parameter :math:`k` can be either an integer or an array of integers (for multi-dimensional filter functions). The index is zero-based, and eigenvalues are sorted by absolute value, so :math:`k=0` returns the eigenvector corresponding to the largest eigenvalue in magnitude. If `mean_center` is ``True``, the distance matrix is double-mean-centered before the eigenvalue decomposition. Reference: [R6]_, subsection “Principal metric SVD filters”. ''' # comp can be an integer or a list of integers # todo: check validity of comp if data.ndim==1: # dissimilarity matrix assert metricpar=={}, ('No optional parameter is allowed for a ' 'dissimilarity matrix.') D = data N = n_obs(D) else: # vector data D = pdist(data, **metricpar) N = len(data) DD = squareform(D) del D if mean_center: md = DD.mean(axis=1) DD -= md DD -= (md-md.mean())[:,np.newaxis] karray = np.atleast_1d(k) assert karray.ndim == 1 maxk = 1 + karray.max() if callback: callback('Computing: distance matrix eigenvectors.') if hasattr(spla, 'eigsh'): w, v = spla.eigsh(DD, k=maxk, which='LM') else: # for SciPy < 0.9.0 w, v = spla.eigen_symmetric(DD, k=maxk, which='LM') sortedorder = np.argsort(np.abs(w))[::-1] if verbose: print('Eigenvalues:\n{}'.format(w[sortedorder])) ret = v[:,sortedorder[k]] # normalize return ret / np.sqrt((ret*ret).sum(axis=0))
def minimal_k_to_make_dataset_connected(data, lo=10, metricpar={}, callback=None, verbose=True): if data.ndim==1: # dissimilarity matrix assert metricpar=={}, ('No optional parameter is allowed for a ' 'dissimilarity matrix.', metricpar) D = data N = n_obs(D) def nn(k): return nearest_neighbors_from_dm(D, k, callback) elif metricpar['metric']=='euclidean': # vector data, Euclidean metric data_cKDTree = cKDTree(data) N = len(data) def nn(k): return data_cKDTree.query(data,k) else: # vector data print("Inefficient! Generate pairwise distances from vector data.") D = pdist(data, **metricpar) N = len(data) def nn(k): return nearest_neighbors_from_dm(D, k, callback) k = lo ncomp = 2 while ncomp>1: if verbose: print('Try up to {0} neighbors.'.format(k)) d, j = nn(k) assert np.all(d[:,0]==0.) if verbose: print('Compute threshold for connectedness.') ncomp, kk = _conn_comp_loop(j) if k==N: assert ncomp==1 k = min(2*k,N) return kk+1
def zero_filter(data, **kwargs): r'''Return an array of the correct size filled with zeros.''' if data.ndim==1: return np.zeros(n_obs(data)) else: return np.zeros(np.alen(data))
def graph_Laplacian(data, eps, n=1, k=1, weighted_edges=False, sigma_eps=1., normalized=True, metricpar={}, verbose=True, callback=None): r'''Graph Laplacian of the neighborhood graph. * First, if *k* is 1, form the *eps*-neighborhood graph of the data set: vertices are the data points; two points are connected if their distance is at most *eps*. * Alternatively, if *k* is greater than 1, form the neighborhood graph from the :math:`k`-th nearest neighbors of each point. Each point counts as its first nearest neighbor, so feasible values start with :math:`k=2`. * If *weighted_edges* is ``False``, each edge gets weight 1. Otherwise, each edge is weighted with .. math:: \exp\left(-\frac{d^2}{2\sigma^2}\right), where :math:`\sigma=\mathtt{eps}\cdot\mathtt{sigma\_eps}` and :math:`d` is the distance between the two points. * Form the graph Laplacian. The graph Laplacian is a self-adjoint operator on the real vector space spanned by the vertices and can thus be described by a symmetric matrix :math:`L`: If *normalized* is false, :math:`L` is closely related to the adjacency matrix of the graph: it has entries :math:`-w(i,j)` whenever nodes :math:`i` and :math:`j` are connected by an edge of weight :math:`w(i,j)` and zero if there is no edge. The :math:`i`-th diagonal entry holds the degree :math:`\deg(i)` of the corresponding vertex, so that row and column sums are zero. If *normalized* is true, each row :math:`i` of :math:`L` is additionally scaled by :math:`1/\sqrt{\deg(i)}`, and so is each column. This destroys the zero row and column sums but preserves symmetry. * Return the :math:`n`-th eigenvector of the graph Laplacian. The index is 0-based: the 0-th eigenvector is constant on all vertices, corresponding to the eigenvalue 0. :math:`n=1` returns the Fiedler vector, which is the second smallest eigenvector after 0. The normalized variant seems to give consistently better results, so this is always chosen in the GUI. However, this experience is based on few examples only, so please do not hesitate to also try the non-normalized version if there is a reason for it. Reference: [R9]_; see especially Section 6.3 for normalization.''' assert n>=1, 'The rank of the eigenvector must be positive.' assert isinstance(k, int) assert k>=1 if data.ndim==1: # dissimilarity matrix assert metricpar=={}, ('No optional parameter is allowed for a ' 'dissimilarity matrix.') D = data N = n_obs(D) else: # vector data D = pdist(data, **metricpar) N = len(data) if callback: callback('Computing: neighborhood graph.') rowstart, targets, weights = \ neighborhood_graph(D, k, eps, diagonal=True, verbose=verbose, callback=callback) c = ncomp(rowstart, targets) if (c>1): print('The neighborhood graph has {0} components. Return zero values.'. format(c)) return zero_filter(data) weights = Laplacian(rowstart, targets, weights, weighted_edges, eps, sigma_eps, normalized) L = scipy.sparse.csr_matrix((weights, targets, rowstart)) del weights, targets, rowstart if callback: callback('Computing: eigenvectors.') assert n<N, ('The rank of the eigenvector must be smaller than the number ' 'of data points.') if hasattr(spla, 'eigsh'): w, v = spla.eigsh(L, k=n+1, which='SA') else: # for SciPy < 0.9.0 w, v = spla.eigen_symmetric(L, k=n+1, which='SA') # Strange: computing more eigenvectors seems faster. #w, v = spla.eigsh(L, k=n+1, sigma=0., which='LM') if verbose: print('Eigenvalues: {0}.'.format(w)) order = np.argsort(w) if w[order[0]]<0 and w[order[1]]<abs(w[order[0]]): raise RuntimeError('Negative eigenvalue of the graph Laplacian found: {0}'.format(w)) return v[:,order[n]]