コード例 #1
0
ファイル: test_validation.py プロジェクト: Afey/scikit-learn
def test_check_symmetric():
    arr_sym = np.array([[0, 1], [1, 2]])
    arr_bad = np.ones(2)
    arr_asym = np.array([[0, 2], [0, 2]])

    test_arrays = {'dense': arr_asym,
                   'dok': sp.dok_matrix(arr_asym),
                   'csr': sp.csr_matrix(arr_asym),
                   'csc': sp.csc_matrix(arr_asym),
                   'coo': sp.coo_matrix(arr_asym),
                   'lil': sp.lil_matrix(arr_asym),
                   'bsr': sp.bsr_matrix(arr_asym)}

    # check error for bad inputs
    assert_raises(ValueError, check_symmetric, arr_bad)

    # check that asymmetric arrays are properly symmetrized
    for arr_format, arr in test_arrays.items():
        # Check for warnings and errors
        assert_warns(UserWarning, check_symmetric, arr)
        assert_raises(ValueError, check_symmetric, arr, raise_exception=True)

        output = check_symmetric(arr, raise_warning=False)
        if sp.issparse(output):
            assert_equal(output.format, arr_format)
            assert_array_equal(output.toarray(), arr_sym)
        else:
            assert_array_equal(output, arr_sym)
コード例 #2
0
def test_check_symmetric():
    arr_sym = np.array([[0, 1], [1, 2]])
    arr_bad = np.ones(2)
    arr_asym = np.array([[0, 2], [0, 2]])

    test_arrays = {
        'dense': arr_asym,
        'dok': sp.dok_matrix(arr_asym),
        'csr': sp.csr_matrix(arr_asym),
        'csc': sp.csc_matrix(arr_asym),
        'coo': sp.coo_matrix(arr_asym),
        'lil': sp.lil_matrix(arr_asym),
        'bsr': sp.bsr_matrix(arr_asym)
    }

    # check error for bad inputs
    with pytest.raises(ValueError):
        check_symmetric(arr_bad)

    # check that asymmetric arrays are properly symmetrized
    for arr_format, arr in test_arrays.items():
        # Check for warnings and errors
        with pytest.warns(UserWarning):
            check_symmetric(arr)
        with pytest.raises(ValueError):
            check_symmetric(arr, raise_exception=True)

        output = check_symmetric(arr, raise_warning=False)
        if sp.issparse(output):
            assert output.format == arr_format
            assert_array_equal(output.toarray(), arr_sym)
        else:
            assert_array_equal(output, arr_sym)
コード例 #3
0
def spectral_embedding(adjacency, norm_laplacian=True):
    adjacency = check_symmetric(adjacency)

    n_nodes = adjacency.shape[0]

    laplacian, dd = csgraph_laplacian(adjacency,
                                      normed=norm_laplacian,
                                      return_diag=True)
    return np.linalg.pinv(laplacian.todense())
コード例 #4
0
 def _spectral_clustering(self):
     affinity_matrix_ = check_symmetric(self.affinity_matrix_)
     random_state = check_random_state(self.random_state)
     
     laplacian = sparse.csgraph.laplacian(affinity_matrix_, normed=True)
     _, vec = sparse.linalg.eigsh(sparse.identity(laplacian.shape[0]) - laplacian, 
                                  k=self.n_clusters, sigma=None, which='LA')
     embedding = normalize(vec)
     _, self.labels_, _ = cluster.k_means(embedding, self.n_clusters, 
                                          random_state=random_state, n_init=self.n_init)
コード例 #5
0
def spectral_clustering(affinity, n_clusters, norm_laplacian=True, random_state=None, n_init=20):
    """Spectral clustering.
    This is a simplified version of spectral_clustering in sklearn. 
    """
    affinity = check_symmetric(affinity)
    random_state = check_random_state(random_state)
    
    laplacian = sparse.csgraph.laplacian(affinity, normed=norm_laplacian)
    _, vec = sparse.linalg.eigsh(sparse.identity(laplacian.shape[0]) - laplacian, k=n_clusters, sigma=None, which='LA')
    embedding = normalize(vec)
    _, labels, _ = cluster.k_means(embedding, n_clusters, random_state=random_state, n_init=n_init)
    return  labels
コード例 #6
0
def mds_3d_view(dissimilarities, metric=True, n_components=2, init=None,
                  max_iter=300, verbose=0, eps=1e-3, random_state=None):

   dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
   n_samples = dissimilarities.shape[0]
   random_state = check_random_state(random_state)
   sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
   sim_flat_w = sim_flat[sim_flat != 0]
   if init is None:
       # Randomly choose initial configuration
       X = random_state.rand(n_samples * n_components)
       X = X.reshape((n_samples, n_components))
   else:
       # overrides the parameter p
       n_components = init.shape[1]
       if n_samples != init.shape[0]:
           raise ValueError("init matrix should be of shape (%d, %d)" %
                            (n_samples, n_components))
       X = init

   old_stress = None
   ir = IsotonicRegression()
   for it in range(max_iter):
       # Compute distance and monotonic regression
       dis = euclidean_distances(X)
       disparities = dissimilarities

       # Compute stress
       stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2

       # Update X using the Guttman transform
       dis[dis == 0] = 1e-5
       ratio = disparities / dis
       B = - ratio
       B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
       X = 1. / n_samples * np.dot(B, X)

       dis = np.sqrt((X ** 2).sum(axis=1)).sum()
       if verbose >= 2:
           print('it: %d, stress %s' % (it, stress))
       if old_stress is not None:
           if(old_stress - stress / dis) < eps:
               if verbose:
                   print('breaking at iteration %d with stress %s' % (it,
                                                                      stress))
               break
       old_stress = stress / dis

   return X, stress, it + 1
コード例 #7
0
def spectral_clustering(affinity_matrix_, n_clusters, k, seed=1, n_init=20):
    affinity_matrix_ = check_symmetric(affinity_matrix_)
    random_state = check_random_state(seed)

    laplacian = sparse.csgraph.laplacian(affinity_matrix_, normed=True)
    _, vec = sparse.linalg.eigsh(sparse.identity(laplacian.shape[0]) -
                                 laplacian,
                                 k=k,
                                 sigma=None,
                                 which='LA')
    embedding = normalize(vec)
    _, labels_, _ = cluster.k_means(embedding,
                                    n_clusters,
                                    random_state=seed,
                                    n_init=n_init)
    return labels_
コード例 #8
0
def similarity_matrix(G, sim=None, par=None, symmetric=True):

    if sim == None:
        raise ValueError('Specify similarity measure!')
    if par == None:
        raise ValueError('Specify parameter(s) of similarity measure!')

    n = G.number_of_nodes()
    pos = nx.get_node_attributes(G, 'pos')
    pos = np.reshape([pos[i] for i in range(n)], (n, len(pos[0])))

    if sim == 'euclidean' or sim == 'minkowski':
        A = squareform(pdist(pos, sim))

    elif sim == 'knn':
        A = skn.kneighbors_graph(pos,
                                 par,
                                 mode='connectivity',
                                 metric='minkowski',
                                 p=2,
                                 metric_params=None,
                                 n_jobs=-1)
        A = A.todense()

    elif sim == 'radius':
        A = skn.radius_neighbors_graph(pos,
                                       par,
                                       mode='connectivity',
                                       metric='minkowski',
                                       p=2,
                                       metric_params=None,
                                       n_jobs=-1)
        A = A.todense()

    elif sim == 'rbf':
        gamma_ = par
        A = rbf_kernel(pos, gamma=gamma_)

    if symmetric == True:
        A = check_symmetric(A)

    for i in range(n):
        for j in range(n):
            if np.abs(A[i, j]) > 0:
                G.add_edge(i, j, weight=A[i, j])

    return G
コード例 #9
0
def spectral_embedding(adjacency,
                       n_components=8,
                       eigen_solver=None,
                       random_state=None,
                       eigen_tol=0.0,
                       norm_laplacian=True,
                       drop_first=True):
    adjacency = check_symmetric(adjacency)

    eigen_solver = 'arpack'
    norm_laplacian = False
    random_state = check_random_state(random_state)
    n_nodes = adjacency.shape[0]
    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")
    laplacian, dd = csgraph_laplacian(adjacency,
                                      normed=norm_laplacian,
                                      return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
        (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        # print("[INFILE] eigen_solver : ", eigen_solver, "norm_laplacian:", norm_laplacian)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        try:
            laplacian *= -1
            v0 = random_state.uniform(-1, 1, laplacian.shape[0])
            lambdas, diffusion_map = eigsh(laplacian,
                                           k=n_components,
                                           sigma=1.0,
                                           which='LM',
                                           tol=eigen_tol,
                                           v0=v0)
            embedding = diffusion_map.T[n_components::-1]
            if norm_laplacian:
                embedding = embedding / dd
        except RuntimeError:
            eigen_solver = "lobpcg"
            laplacian *= -1

    embedding = _deterministic_vector_sign_flip(embedding)
    return embedding[:n_components].T
コード例 #10
0
def spectral_hg_partitioning(hg,
                             n_clusters,
                             assign_labels='kmeans',
                             n_components=None,
                             random_state=None,
                             n_init=10):
    """
    :param hg: instance of HyperG
    :param n_clusters: int,
    :param assign_labels: str, {'kmeans', 'discretize'}, default: 'kmeans'
    :param n_components: int, number of eigen vectors to use for the spectral embedding
    :param random_state: int or None (default)
    :param n_init: int, number of time the k-means algorithm will be run
    with different centroid seeds.
    :return: numpy array, shape = (n_samples,), labels of each point
    """

    assert isinstance(hg, HyperG)
    assert n_clusters <= hg.num_nodes()

    random_state = check_random_state(random_state)

    if n_components is None:
        n_components = n_clusters

    L = hg.laplacian().toarray()
    L = check_symmetric(L)

    eigenval, eigenvec = eigh(L)
    embeddings = eigenvec[:, :n_components]

    if assign_labels == 'kmeans':
        _, labels, _ = k_means(embeddings,
                               n_clusters,
                               random_state=random_state,
                               n_init=n_init)
    else:
        labels = discretize(embeddings, random_state=random_state)

    return labels
コード例 #11
0
def sammon(dissimilarity_matrix, n_components,
           init, l_rate, decay, base_rate,
           max_iter, verbose, eps, sensitivity, random_state):
    dissimilarity_matrix = check_array(dissimilarity_matrix)
    dissimilarity_matrix = check_symmetric(
        dissimilarity_matrix, raise_exception=True)
    random_state = check_random_state(random_state)
    n_samples = dissimilarity_matrix.shape[0]

    if init is None:
        # Randomly choose initial configuration
        X = random_state.rand(n_samples * n_components)
        X = X.reshape((n_samples, n_components))
        X -= np.mean(X, axis=0)
        X *= np.mean(dissimilarity_matrix)
    else:
        n_components = init.shape[1]
        if n_samples != init.shape[0]:
            raise ValueError("init matrix should be of shape (%d, %d)" %
                             (n_samples, n_components))
        X = init

    if hasattr(init, '__array__'):
        init = np.asarray(init).copy()

    pos, stress, n_iter_ = _sammon(
        dissimilarity_matrix,
        X,
        sensitivity=sensitivity,
        random_state=random_state,
        max_iter=max_iter,
        base_rate=base_rate,
        verbose=verbose,
        l_rate=l_rate,
        decay=decay,
        eps=eps)

    return pos, stress, n_iter_
コード例 #12
0
def detect_change_points(path, groundEvent, nodeImportance, alpha_pg, flag1,
                         flag2, s, kernel_fun):
    """Use the spectral clustering implemented in sklearn (Mode: fully connected )
    (only two parameters to tune : alpha, k)
    Parameters
    ----------
        path: the path of the sequence networks
        groundEvent:ground event information
        nodeImportance:
        alpha_pg: skip probability for pageRank
        flag1: for construct the network(True : original; False : supplement )
        flag2: for construct the probability distribution(True: pageRank
                or leaderRank; False: normalized Rank)
        s:the delay delta
        kernel_fun: used for adjacency/affinity matrix computation.

    Returns
    ----------

    """
    print "step 1"
    get_prvalue_sequence(path, alpha_pg, flag1, flag2)

    print "Step 2"
    numSnapshots = len(prScore)
    numNodes = len(prScore[0])
    print "there are totally %d snapshots,each snapshot has %d nodes" \
          % (numSnapshots, numNodes)

    temp_alpha = 0.0
    temp_k = 0
    temp_f = 0.0

    distance_array = distance.pdist(
        np.reshape(np.array(prScore), (numSnapshots, numNodes)), kernel_fun)
    for alpha in range(1, 101, 1):
        alpha = alpha / 100.0
        func = partial(tmp, alpha)
        condensed_similarity = map(func, distance_array)
        adjacentM = distance.squareform(condensed_similarity)

        adjacentM = check_symmetric(adjacentM)
        if not _graph_is_connected(adjacentM):
            print "not fully connected alpha: ", alpha
            warnings.warn("Graph is not fully connected.")
            graph_weighted = nx.from_numpy_matrix(adjacentM)
            Lcc, p = largestConnectedComponent(adjacentM, graph_weighted)
            if len(Lcc) < 1.0 * numSnapshots:
                print "connected nodes: ", len(Lcc)
                continue
            else:
                print "connected nodes: ", len(Lcc)

        else:
            graph_weighted = nx.from_numpy_matrix(adjacentM)
            Lcc, p = largestConnectedComponent(adjacentM, graph_weighted)
            print "connected nodes: ", len(Lcc)

        best_k = -1
        best_score = -np.inf
        for index, k in enumerate((2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)):

            if k >= numSnapshots:
                break
            try:
                score = 0.0
                for i in range(10):
                    model = SpectralClustering(n_clusters=k,
                                               assign_labels='discretize',
                                               affinity='precomputed',
                                               random_state=1).fit(adjacentM)
                    assigments = model.labels_
                    smallkEigVector = spectral_embedding(np.array(adjacentM),
                                                         n_components=k,
                                                         eigen_solver=None,
                                                         random_state=None,
                                                         eigen_tol=0.0,
                                                         norm_laplacian=True,
                                                         drop_first=False)

                    score += metrics.silhouette_score(
                        np.reshape(np.array(smallkEigVector),
                                   (numSnapshots, k)), assigments)

                score = score / 10.0

                if score > best_score:
                    best_score = score
                    best_k = k
            except Exception, e:
                print "except, ", e

        model = SpectralClustering(n_clusters=best_k,
                                   assign_labels='discretize',
                                   affinity='precomputed',
                                   random_state=1).fit(adjacentM)
        best_assigments = model.labels_

        try:
            p_c = predictResult(best_assigments)

            precision, recall, fvalue, fpr = Evaluation(p_c,
                                                        groundEvent,
                                                        s,
                                                        numSnapshots,
                                                        K=best_k,
                                                        alpha=alpha)
            if fvalue > temp_f:
                temp_f = fvalue
                temp_alpha = alpha
                temp_k = best_k
        except:
            print "no changes detected"
コード例 #13
0
def _smacof_single(dissimilarities,
                   metric=True,
                   n_components=2,
                   init=None,
                   max_iter=300,
                   verbose=0,
                   eps=1e-3,
                   random_state=None,
                   history=None):
    """Computes multidimensional scaling using SMACOF algorithm
    Parameters
    ----------
    dissimilarities : ndarray, shape (n_samples, n_samples)
        Pairwise dissimilarities between the points. Must be symmetric.
    metric : boolean, optional, default: True
        Compute metric or nonmetric SMACOF algorithm.
    n_components : int, optional, default: 2
        Number of dimensions in which to immerse the dissimilarities. If an
        ``init`` array is provided, this option is overridden and the shape of
        ``init`` is used to determine the dimensionality of the embedding
        space.
    init : ndarray, shape (n_samples, n_components), optional, default: None
        Starting configuration of the embedding to initialize the algorithm. By
        default, the algorithm is initialized with a randomly chosen array.
    max_iter : int, optional, default: 300
        Maximum number of iterations of the SMACOF algorithm for a single run.
    verbose : int, optional, default: 0
        Level of verbosity.
    eps : float, optional, default: 1e-3
        Relative tolerance with respect to stress at which to declare
        convergence.
    random_state : int, RandomState instance or None, optional, default: None
        The generator used to initialize the centers.  If int, random_state is
        the seed used by the random number generator; If RandomState instance,
        random_state is the random number generator; If None, the random number
        generator is the RandomState instance used by `np.random`.
    Returns
    -------
    X : ndarray, shape (n_samples, n_components)
        Coordinates of the points in a ``n_components``-space.
    stress : float
        The final value of the stress (sum of squared distance of the
        disparities and the distances for all constrained points).
    n_iter : int
        The number of iterations corresponding to the best stress.
    """
    dissimilarities = check_symmetric(dissimilarities, raise_exception=True)

    n_samples = dissimilarities.shape[0]
    random_state = check_random_state(random_state)

    sim_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
    sim_flat_w = sim_flat[sim_flat != 0]
    if init is None:
        # Randomly choose initial configuration
        X = random_state.rand(n_samples * n_components)
        X = X.reshape((n_samples, n_components))
    else:
        # overrides the parameter p
        n_components = init.shape[1]
        if n_samples != init.shape[0]:
            raise ValueError("init matrix should be of shape (%d, %d)" %
                             (n_samples, n_components))
        X = init

    old_stress = None
    ir = IsotonicRegression()
    for it in range(max_iter):
        # Compute distance and monotonic regression
        dis = euclidean_distances(X)

        if metric:
            disparities = dissimilarities
        else:
            dis_flat = dis.ravel()
            # dissimilarities with 0 are considered as missing values
            dis_flat_w = dis_flat[sim_flat != 0]

            # Compute the disparities using a monotonic regression
            disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
            disparities = dis_flat.copy()
            disparities[sim_flat != 0] = disparities_flat
            disparities = disparities.reshape((n_samples, n_samples))
            disparities *= np.sqrt(
                (n_samples * (n_samples - 1) / 2) / (disparities**2).sum())

        # Compute stress
        stress = ((dis.ravel() - disparities.ravel())**2).sum() / 2

        # Update X using the Guttman transform
        dis[dis == 0] = 1e-5
        ratio = disparities / dis
        B = -ratio
        B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
        X = 1. / n_samples * np.dot(B, X)

        dis = np.sqrt((X**2).sum(axis=1)).sum()

        if history is not None:
            history.epoch(it, 0, stress, X)
        if verbose >= 2:
            print('it: %d, stress %s' % (it, stress))
        if old_stress is not None:
            if (old_stress - stress / dis) < eps:
                if verbose:
                    print('breaking at iteration %d with stress %s' %
                          (it, stress))
                break
        old_stress = stress / dis

    return X, stress, it + 1
コード例 #14
0
def _smacof_with_anchors_single(config, similarities, metric=True, n_components=2, init=None,
				   max_iter=300, verbose=0, eps=1e-3, random_state=None, estimated_dist_weights=None):
	"""
	Computes multidimensional scaling using SMACOF algorithm
	Parameters
	----------
	config : Config object
		configuration object for anchor-tag deployment parameters
	similarities: symmetric ndarray, shape [n * n]
		similarities between the points
	metric: boolean, optional, default: True
		compute metric or nonmetric SMACOF algorithm
	n_components: int, optional, default: 2
		number of dimension in which to immerse the similarities
		overwritten if initial array is provided.
	init: {None or ndarray}, optional
		if None, randomly chooses the initial configuration
		if ndarray, initialize the SMACOF algorithm with this array
	max_iter: int, optional, default: 300
		Maximum number of iterations of the SMACOF algorithm for a single run
	verbose: int, optional, default: 0
		level of verbosity
	eps: float, optional, default: 1e-6
		relative tolerance w.r.t stress to declare converge
	random_state: integer or numpy.RandomState, optional
		The generator used to initialize the centers. If an integer is
		given, it fixes the seed. Defaults to the global numpy random
		number generator.
	Returns
	-------
	X: ndarray (n_samples, n_components), float
			   coordinates of the n_samples points in a n_components-space
	stress_: float
		The final value of the stress (sum of squared distance of the
		disparities and the distances for all constrained points)
	n_iter : int
		Number of iterations run
	last_positions: ndarray [X1,...,Xn]
		An array of computed Xs.
	"""
	NO_OF_TAGS, NO_OF_ANCHORS = config.no_of_tags, config.no_of_anchors
	similarities = check_symmetric(similarities, raise_exception=True)

	n_samples = similarities.shape[0]
	random_state = check_random_state(random_state)

	sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
	sim_flat_w = sim_flat[sim_flat != 0]

	if init is None:
		# Randomly choose initial configuration
		X = random_state.rand(n_samples * n_components)
		X = X.reshape((n_samples, n_components))
		# uncomment the following if weight matrix W is not hollow
		#X[:-2] = Xa
	else:
		# overrides the parameter p
		n_components = init.shape[1]
		if n_samples != init.shape[0]:
			raise ValueError("init matrix should be of shape (%d, %d)" %
							 (n_samples, n_components))
		X = init

	old_stress = None
	ir = IsotonicRegression()

	# setup weight matrix
	if getattr(config, 'weights', None) is not None:
		weights = config.weights
	else:
		weights = np.ones((n_samples, n_samples))
	if getattr(config, 'missingdata', None):
		weights[-NO_OF_TAGS:, -NO_OF_TAGS:] = 0
	if estimated_dist_weights is not None:
		weights[-NO_OF_TAGS:, -NO_OF_TAGS:] = estimated_dist_weights
	diag = np.arange(n_samples)
	weights[diag, diag] = 0

	last_n_configs = []
	Xa = config.anchors
	for it in range(max_iter):
		# Compute distance and monotonic regression
		dis = euclidean_distances(X)

		if metric:
			disparities = similarities
		else:
			dis_flat = dis.ravel()
			# similarities with 0 are considered as missing values
			dis_flat_w = dis_flat[sim_flat != 0]

			# Compute the disparities using a monotonic regression
			disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
			disparities = dis_flat.copy()
			disparities[sim_flat != 0] = disparities_flat
			disparities = disparities.reshape((n_samples, n_samples))
			disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
								   (disparities ** 2).sum())

		# Compute stress
		stress = (weights.ravel()*(dis.ravel() - disparities.ravel()) ** 2).sum() / 2
		#stress = ((dis[:-NO_OF_TAGS, -NO_OF_TAGS:].ravel() - disparities[:-NO_OF_TAGS, -NO_OF_TAGS:].ravel()) ** 2).sum()

		# Update X using the Guttman transform
		dis[dis == 0] = 1e5
		ratio = weights*disparities / dis
		B = - ratio
		B[diag, diag] = 0
		B[diag, diag] = -B.sum(axis=1)

		# Apply update to only tag configuration since anchor config is already known
		
		V = - weights
		V[diag, diag] += weights.sum(axis=1)
		# V_inv = np.linalg.pinv(V)
		V12 = V[-NO_OF_TAGS:, :-NO_OF_TAGS]
		B11 = B[-NO_OF_TAGS:, -NO_OF_TAGS:]
		Zu = X[-NO_OF_TAGS:]
		B12 = B[-NO_OF_TAGS:, :-NO_OF_TAGS]
		V11_inv = np.linalg.inv(V[-NO_OF_TAGS:, -NO_OF_TAGS:]) 
		Xu = V11_inv.dot(B11.dot(Zu) + (B12 - V12).dot(Xa)) 

		# merge known anchors config with new tags config 
		X = np.concatenate((Xa, Xu))
		last_n_configs.append(X)

		#X = (1/n_samples)*B.dot(X)

		#dis = np.sqrt((X ** 2).sum(axis=1)).sum()
		dis = (weights*dis**2).sum() / 2
		if verbose >= 2:
			print('it: %d, stress %s' % (it, stress))
		if old_stress is not None:
			if(old_stress - stress / dis) < eps:
				if verbose:
					print('breaking at iteration %d with stress %s' % (it,
																	   stress))
				break
		old_stress = stress / dis
	return X, stress, it + 1, np.array(last_n_configs)
コード例 #15
0
def _smacof_with_anchors_single(config,
                                similarities,
                                metric=True,
                                n_components=2,
                                init=None,
                                max_iter=300,
                                verbose=0,
                                eps=1e-3,
                                random_state=None,
                                estimated_dist_weights=None):
    """
	Computes multidimensional scaling using SMACOF algorithm
	Parameters
	----------
	config : Config object
		configuration object for anchor-tag deployment parameters
	similarities: symmetric ndarray, shape [n * n]
		similarities between the points
	metric: boolean, optional, default: True
		compute metric or nonmetric SMACOF algorithm
	n_components: int, optional, default: 2
		number of dimension in which to immerse the similarities
		overwritten if initial array is provided.
	init: {None or ndarray}, optional
		if None, randomly chooses the initial configuration
		if ndarray, initialize the SMACOF algorithm with this array
	max_iter: int, optional, default: 300
		Maximum number of iterations of the SMACOF algorithm for a single run
	verbose: int, optional, default: 0
		level of verbosity
	eps: float, optional, default: 1e-6
		relative tolerance w.r.t stress to declare converge
	random_state: integer or numpy.RandomState, optional
		The generator used to initialize the centers. If an integer is
		given, it fixes the seed. Defaults to the global numpy random
		number generator.
	Returns
	-------
	X: ndarray (n_samples, n_components), float
			   coordinates of the n_samples points in a n_components-space
	stress_: float
		The final value of the stress (sum of squared distance of the
		disparities and the distances for all constrained points)
	n_iter : int
		Number of iterations run
	last_positions: ndarray [X1,...,Xn]
		An array of computed Xs.
	"""
    NO_OF_TAGS, NO_OF_ANCHORS = config.no_of_tags, config.no_of_anchors
    similarities = check_symmetric(similarities, raise_exception=True)

    n_samples = similarities.shape[0]
    random_state = check_random_state(random_state)

    sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
    sim_flat_w = sim_flat[sim_flat != 0]

    if init is None:
        # Randomly choose initial configuration
        X = random_state.rand(n_samples * n_components)
        X = X.reshape((n_samples, n_components))
        # uncomment the following if weight matrix W is not hollow
        #X[:-2] = Xa
    else:
        # overrides the parameter p
        n_components = init.shape[1]
        if n_samples != init.shape[0]:
            raise ValueError("init matrix should be of shape (%d, %d)" %
                             (n_samples, n_components))
        X = init

    old_stress = None
    ir = IsotonicRegression()

    # setup weight matrix
    if getattr(config, 'weights', None) is not None:
        weights = config.weights
    else:
        weights = np.ones((n_samples, n_samples))
    if getattr(config, 'missingdata', None):
        weights[-NO_OF_TAGS:, -NO_OF_TAGS:] = 0
    if estimated_dist_weights is not None:
        weights[-NO_OF_TAGS:, -NO_OF_TAGS:] = estimated_dist_weights
    diag = np.arange(n_samples)
    weights[diag, diag] = 0

    last_n_configs = []
    Xa = config.anchors
    for it in range(max_iter):
        # Compute distance and monotonic regression
        dis = euclidean_distances(X)

        if metric:
            disparities = similarities
        else:
            dis_flat = dis.ravel()
            # similarities with 0 are considered as missing values
            dis_flat_w = dis_flat[sim_flat != 0]

            # Compute the disparities using a monotonic regression
            disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
            disparities = dis_flat.copy()
            disparities[sim_flat != 0] = disparities_flat
            disparities = disparities.reshape((n_samples, n_samples))
            disparities *= np.sqrt(
                (n_samples * (n_samples - 1) / 2) / (disparities**2).sum())

        # Compute stress
        stress = (weights.ravel() *
                  (dis.ravel() - disparities.ravel())**2).sum() / 2
        #stress = ((dis[:-NO_OF_TAGS, -NO_OF_TAGS:].ravel() - disparities[:-NO_OF_TAGS, -NO_OF_TAGS:].ravel()) ** 2).sum()

        # Update X using the Guttman transform
        dis[dis == 0] = 1e5
        ratio = weights * disparities / dis
        B = -ratio
        B[diag, diag] = 0
        B[diag, diag] = -B.sum(axis=1)

        # Apply update to only tag configuration since anchor config is already known

        V = -weights
        V[diag, diag] += weights.sum(axis=1)
        # V_inv = np.linalg.pinv(V)
        V12 = V[-NO_OF_TAGS:, :-NO_OF_TAGS]
        B11 = B[-NO_OF_TAGS:, -NO_OF_TAGS:]
        Zu = X[-NO_OF_TAGS:]
        B12 = B[-NO_OF_TAGS:, :-NO_OF_TAGS]
        V11_inv = np.linalg.inv(V[-NO_OF_TAGS:, -NO_OF_TAGS:])
        Xu = V11_inv.dot(B11.dot(Zu) + (B12 - V12).dot(Xa))

        # merge known anchors config with new tags config
        X = np.concatenate((Xa, Xu))
        last_n_configs.append(X)

        #X = (1/n_samples)*B.dot(X)

        #dis = np.sqrt((X ** 2).sum(axis=1)).sum()
        dis = (weights * dis**2).sum() / 2
        if verbose >= 2:
            print('it: %d, stress %s' % (it, stress))
        if old_stress is not None:
            if (old_stress - stress / dis) < eps:
                if verbose:
                    print('breaking at iteration %d with stress %s' %
                          (it, stress))
                break
        old_stress = stress / dis
    return X, stress, it + 1, np.array(last_n_configs)
コード例 #16
0
def _smacof_single(dissimilarities1,
                   dissimilarities2,
                   p,
                   weights1=None,
                   weights2=None,
                   metric=True,
                   n_components=2,
                   init1=None,
                   init2=None,
                   max_iter=300,
                   verbose=0,
                   eps=1e-3,
                   random_state1=None,
                   random_state2=None):
    """
    Computes multidimensional scaling using SMACOF algorithm

    Parameters
    ----------
    dissimilarities : ndarray, shape (n_samples, n_samples)
        Pairwise dissimilarities between the points. Must be symmetric.

    metric : boolean, optional, default: True
        Compute metric or nonmetric SMACOF algorithm.

    n_components : int, optional, default: 2
        Number of dimensions in which to immerse the dissimilarities. If an
        ``init`` array is provided, this option is overridden and the shape of
        ``init`` is used to determine the dimensionality of the embedding
        space.

    init : ndarray, shape (n_samples, n_components), optional, default: None
        Starting configuration of the embedding to initialize the algorithm. By
        default, the algorithm is initialized with a randomly chosen array.

    max_iter : int, optional, default: 300
        Maximum number of iterations of the SMACOF algorithm for a single run.

    verbose : int, optional, default: 0
        Level of verbosity.

    eps : float, optional, default: 1e-3
        Relative tolerance with respect to stress at which to declare
        convergence.

    random_state : integer or numpy.RandomState, optional
        The generator used to initialize the centers. If an integer is
        given, it fixes the seed. Defaults to the global numpy random
        number generator.

    Returns
    -------
    X : ndarray, shape (n_samples, n_components)
        Coordinates of the points in a ``n_components``-space.

    stress : float
        The final value of the stress (sum of squared distance of the
        disparities and the distances for all constrained points).

    n_iter : int
        The number of iterations corresponding to the best stress.
    """
    dissimilarities1 = check_symmetric(dissimilarities1, raise_exception=True)
    dissimilarities2 = check_symmetric(dissimilarities2, raise_exception=True)

    if dissimilarities1.shape != dissimilarities2.shape:
        print("Error. Distance matrices have different shapes.")
        sys.exit("Error. Distance matrices have different shapes.")

    n_samples = dissimilarities1.shape[0]

    X1, sim_flat1, sim_flat_w1 = initialize(dissimilarities1, random_state1,
                                            init1, n_samples, n_components)
    X2, sim_flat2, sim_flat_w2 = initialize(dissimilarities2, random_state2,
                                            init2, n_samples, n_components)

    #Default: equal weights
    if weights1 is None:
        weights1 = np.ones((n_samples, n_samples))
    if weights2 is None:
        weights2 = np.ones(n_samples)

    # Disparity-specific weights (V in Borg)
    V1 = np.zeros((n_samples, n_samples))
    for i in range(n_samples):
        diagonal = 0
        for j in range(n_samples):
            V1[i, j] = -weights1[i, j]
            diagonal += weights1[i, j]
        V1[i, i] = diagonal

    # Locus-specific weights
    V2 = np.zeros((n_samples, n_samples))
    for i, weight in enumerate(weights2):
        V2[i, i] = weight * p * n_samples

    inv_V = moore_penrose(V1 + V2)

    old_stress = None
    ir = IsotonicRegression()
    for it in range(max_iter):
        # Compute distance and monotonic regression
        dis1 = euclidean_distances(X1)
        dis2 = euclidean_distances(X2)

        if metric:
            disparities1 = dissimilarities1
            disparities2 = dissimilarities2
        else:
            disparities1 = nonmetric_disparities1(dis1, sim_flat1, n_samples)
            disparities2 = nonmetric_disparities2(dis2, sim_flat2, n_samples)

        # Compute stress
        stress = ((dis1.ravel() - disparities1.ravel())**2).sum() + (
            (dis2.ravel() - disparities2.ravel())**2
        ).sum() + n_samples * p * ssd(
            X1, X2
        )  #multiply by n_samples to make ssd term comparable in magnitude to embedding error terms

        # Update X1 using the Guttman transform
        X1 = guttman(X1, X2, disparities1, inv_V, V2, dis1)

        # Update X2 using the Guttman transform
        X2 = guttman(X2, X1, disparities2, inv_V, V2, dis2)

        # Test stress
        dis1 = np.sqrt((X1**2).sum(axis=1)).sum()
        dis2 = np.sqrt((X2**2).sum(axis=1)).sum()
        dis = np.mean((dis1, dis2))
        if verbose >= 2:
            print('it: %d, stress %s' % (it, stress))
        if old_stress is not None:
            if np.abs(old_stress - stress / dis) < eps:
                if verbose:
                    print('breaking at iteration %d with stress %s' %
                          (it, stress))
                break
        old_stress = stress / dis

    return X1, X2, stress, it + 1
コード例 #17
0
def my_spectral_embedding(adjacency,
                          n_components=8,
                          eigen_solver=None,
                          random_state=None,
                          eigen_tol=0.0,
                          norm_laplacian=False,
                          drop_first=True):
    """Project the sample on the first eigenvectors of the graph Laplacian.
    The adjacency matrix is used to compute a normalized graph Laplacian
    whose spectrum (especially the eigenvectors associated to the
    smallest eigenvalues) has an interpretation in terms of minimal
    number of cuts necessary to split the graph into comparably sized
    components.
    This embedding can also 'work' even if the ``adjacency`` variable is
    not strictly the adjacency matrix of a graph but more generally
    an affinity or similarity matrix between samples (for instance the
    heat kernel of a euclidean distance matrix or a k-NN matrix).
    However care must taken to always make the affinity matrix symmetric
    so that the eigenvector decomposition works as expected.
    Note : Laplacian Eigenmaps is the actual algorithm implemented here.
    Read more in the :ref:`User Guide <spectral_embedding>`.
    Parameters
    ----------
    adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
        The adjacency matrix of the graph to embed.
    n_components : integer, optional, default 8
        The dimension of the projection subspace.
    eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
        The eigenvalue decomposition strategy to use. AMG requires pyamg
        to be installed. It can be faster on very large, sparse problems,
        but may also lead to instabilities.
    random_state : int, RandomState instance or None, optional, default: None
        A pseudo random number generator used for the initialization of the
        lobpcg eigenvectors decomposition.  If int, random_state is the seed
        used by the random number generator; If RandomState instance,
        random_state is the random number generator; If None, the random number
        generator is the RandomState instance used by `np.random`. Used when
        ``solver`` == 'amg'.
    eigen_tol : float, optional, default=0.0
        Stopping criterion for eigendecomposition of the Laplacian matrix
        when using arpack eigen_solver.
    norm_laplacian : bool, optional, default=True
        If True, then compute normalized Laplacian.
    drop_first : bool, optional, default=True
        Whether to drop the first eigenvector. For spectral embedding, this
        should be True as the first eigenvector should be constant vector for
        connected graph, but for spectral clustering, this should be kept as
        False to retain the first eigenvector.
    Returns
    -------
    embedding : array, shape=(n_samples, n_components)
        The reduced samples.
    Notes
    -----
    Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
    has one connected component. If there graph has many components, the first
    few eigenvectors will simply uncover the connected components of the graph.
    References
    ----------
    * https://en.wikipedia.org/wiki/LOBPCG
    * Toward the Optimal Preconditioned Eigensolver: Locally Optimal
      Block Preconditioned Conjugate Gradient Method
      Andrew V. Knyazev
      http://dx.doi.org/10.1137%2FS1064827500366124
    """
    import warnings

    import numpy as np
    from scipy import sparse
    from scipy.linalg import eigh
    from scipy.sparse.linalg import eigsh, lobpcg

    from sklearn.base import BaseEstimator
    from sklearn.externals import six
    from sklearn.utils import check_random_state, check_array, check_symmetric
    from sklearn.utils.extmath import _deterministic_vector_sign_flip
    from sklearn.metrics.pairwise import rbf_kernel
    from sklearn.neighbors import kneighbors_graph

    adjacency = check_symmetric(adjacency)
    try:
        from pyamg import smoothed_aggregation_solver
    except ImportError:
        if eigen_solver == "amg":
            raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
                             "not available.")
    if eigen_solver is None:
        eigen_solver = 'arpack'
    elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
        raise ValueError("Unknown value for eigen_solver: '%s'."
                         "Should be 'amg', 'arpack', or 'lobpcg'" %
                         eigen_solver)
    random_state = check_random_state(random_state)
    n_nodes = adjacency.shape[0]
    # Whether to drop the first eigenvector
    if drop_first:
        n_components = n_components + 1
    if not _graph_is_connected(adjacency):
        warnings.warn("Graph is not fully connected, spectral embedding"
                      " may not work as expected.")
    laplacian, dd = sparse.csgraph.laplacian(adjacency,
                                             normed=norm_laplacian,
                                             return_diag=True)
    if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
        (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
        # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
        # for details see the source code in scipy:
        # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
        # /lobpcg/lobpcg.py#L237
        # or matlab:
        # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
        laplacian = _set_diag(laplacian, 1, norm_laplacian)

        # Here we'll use shift-invert mode for fast eigenvalues
        # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
        #  for a short explanation of what this means)
        # Because the normalized Laplacian has eigenvalues between 0 and 2,
        # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
        # when finding eigenvalues of largest magnitude (keyword which='LM')
        # and when these eigenvalues are very large compared to the rest.
        # For very large, very sparse graphs, I - L can have many, many
        # eigenvalues very near 1.0.  This leads to slow convergence.  So
        # instead, we'll use ARPACK's shift-invert mode, asking for the
        # eigenvalues near 1.0.  This effectively spreads-out the spectrum
        # near 1.0 and leads to much faster convergence: potentially an
        # orders-of-magnitude speedup over simply using keyword which='LA'
        # in standard mode.
        try:
            # We are computing the opposite of the laplacian inplace so as
            # to spare a memory allocation of a possibly very large array
            laplacian *= -1
            v0 = random_state.uniform(-1, 1, laplacian.shape[0])
            lambdas, diffusion_map = eigsh(laplacian,
                                           k=n_components,
                                           sigma=1.0,
                                           which='LM',
                                           tol=eigen_tol,
                                           v0=v0)
            embedding = diffusion_map.T[n_components::-1] * dd
        except RuntimeError:
            # When submatrices are exactly singular, an LU decomposition
            # in arpack fails. We fallback to lobpcg
            eigen_solver = "lobpcg"
            # Revert the laplacian to its opposite to have lobpcg work
            laplacian *= -1
    if eigen_solver == 'amg':
        # Use AMG to get a preconditioner and speed up the eigenvalue
        # problem.
        if not sparse.issparse(laplacian):
            warnings.warn("AMG works better for sparse matrices")
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        laplacian = _set_diag(laplacian, 1, norm_laplacian)
        ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
        M = ml.aspreconditioner()
        X = random_state.rand(laplacian.shape[0], n_components + 1)
        X[:, 0] = dd.ravel()
        lambdas, diffusion_map = lobpcg(laplacian,
                                        X,
                                        M=M,
                                        tol=1.e-12,
                                        largest=False)
        embedding = diffusion_map.T * dd
        if embedding.shape[0] == 1:
            raise ValueError

    elif eigen_solver == "lobpcg":
        # lobpcg needs double precision floats
        laplacian = check_array(laplacian,
                                dtype=np.float64,
                                accept_sparse=True)
        if n_nodes < 5 * n_components + 1:
            # see note above under arpack why lobpcg has problems with small
            # number of nodes
            # lobpcg will fallback to eigh, so we short circuit it
            if sparse.isspmatrix(laplacian):
                laplacian = laplacian.toarray()
            lambdas, diffusion_map = eigh(laplacian)
            embedding = diffusion_map.T[:n_components] * dd
        else:
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            # We increase the number of eigenvectors requested, as lobpcg
            # doesn't behave well in low dimension
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian,
                                            X,
                                            tol=1e-15,
                                            largest=False,
                                            maxiter=2000)
            embedding = diffusion_map.T[:n_components] * dd
            if embedding.shape[0] == 1:
                raise ValueError
    embedding = _deterministic_vector_sign_flip(embedding)
    if drop_first:
        vectors = embedding[1:n_components].T
    else:
        vectors = embedding[:n_components].T

    return (lambdas, vectors)
コード例 #18
0
ファイル: mdsData.py プロジェクト: trevorbalint/MDSkmeans
import os


# Get the student/postcode/OA data from the flat file
def readstudentdata():
    cur_path = os.path.dirname(__file__)
    new_path = os.path.relpath('..\\studentsByOA.csv', cur_path)
    return pd.read_csv(new_path)


inData = pd.read_csv("distancematrix.csv", header=0, index_col="oa11")

# the real distance matrix is asymmetric - mds requires a symmetric one
# use this function to create a symmetric version
# it averages the matrix with its transpose
symmMatrix = utils.check_symmetric(inData.as_matrix())

mds = manifold.MDS(n_components=2,
                   max_iter=3000,
                   eps=1e-4,
                   random_state=None,
                   dissimilarity="precomputed",
                   n_jobs=1,
                   metric=True)
coords = mds.fit(symmMatrix).embedding_

oldData = readstudentdata()
oldData = oldData.set_index("oa11")
coordsDF = pd.DataFrame(data=coords, index=inData.index.values)
oldData.loc[:, "fakex"] = pd.Series(coordsDF.loc[:, 0],
                                    index=inData.index.values)
コード例 #19
0
def _smacof_single_p(similarities,
                     n_uq,
                     metric=True,
                     n_components=2,
                     init=None,
                     max_iter=300,
                     verbose=0,
                     eps=1e-3,
                     random_state=None):
    """
    Computes multidimensional scaling using SMACOF algorithm.

    Parameters
    ----------
    n_uq
    similarities: symmetric ndarray, shape [n * n]
        similarities between the points
    metric: boolean, optional, default: True
        compute metric or nonmetric SMACOF algorithm
    n_components: int, optional, default: 2
        number of dimension in which to immerse the similarities
        overwritten if initial array is provided.
    init: {None or ndarray}, optional
        if None, randomly chooses the initial configuration
        if ndarray, initialize the SMACOF algorithm with this array
    max_iter: int, optional, default: 300
        Maximum number of iterations of the SMACOF algorithm for a single run
    verbose: int, optional, default: 0
        level of verbosity
    eps: float, optional, default: 1e-6
        relative tolerance w.r.t stress to declare converge
    random_state: integer or numpy.RandomState, optional
        The generator used to initialize the centers. If an integer is
        given, it fixes the seed. Defaults to the global numpy random
        number generator.

    Returns
    -------
    X: ndarray (n_samples, n_components), float
               coordinates of the n_samples points in a n_components-space
    stress_: float
        The final value of the stress (sum of squared distance of the
        disparities and the distances for all constrained points)
    n_iter : int
        Number of iterations run.
    """
    similarities = check_symmetric(similarities, raise_exception=True)

    n_samples = similarities.shape[0]
    random_state = check_random_state(random_state)

    W = np.ones((n_samples, n_samples))
    W[:n_uq, :n_uq] = 0.0
    W[n_uq:, n_uq:] = 0.0

    V = -W
    V[np.arange(len(V)), np.arange(len(V))] = W.sum(axis=1)
    e = np.ones((n_samples, 1))

    Vp = np.linalg.inv(V + np.dot(e, e.T) / n_samples) - \
         np.dot(e, e.T) / n_samples

    sim_flat = similarities.ravel()
    sim_flat_w = sim_flat[sim_flat != 0]
    if init is None:
        # Randomly choose initial configuration
        X = random_state.rand(n_samples * n_components)
        X = X.reshape((n_samples, n_components))
    else:
        # overrides the parameter p
        n_components = init.shape[1]
        if n_samples != init.shape[0]:
            raise ValueError("init matrix should be of shape (%d, %d)" %
                             (n_samples, n_components))
        X = init

    old_stress = None
    ir = IsotonicRegression()
    for it in range(max_iter):
        # Compute distance and monotonic regression
        dis = euclidean_distances(X)

        if metric:
            disparities = similarities
        else:
            dis_flat = dis.ravel()
            # similarities with 0 are considered as missing values
            dis_flat_w = dis_flat[sim_flat != 0]

            # Compute the disparities using a monotonic regression
            disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
            disparities = dis_flat.copy()
            disparities[sim_flat != 0] = disparities_flat
            disparities = disparities.reshape((n_samples, n_samples))
            disparities *= np.sqrt(
                (n_samples * (n_samples - 1) / 2) / (disparities**2).sum())
            disparities[similarities == 0] = 0

        # Compute stress
        _stress = (W.ravel() * ((dis.ravel() - disparities.ravel())**2)).sum()
        _stress /= 2

        # Update X using the Guttman transform
        dis[dis == 0] = 1e-5
        ratio = disparities / dis
        _B = -W * ratio
        _B[np.arange(len(_B)), np.arange(len(_B))] += (W * ratio).sum(axis=1)

        X = np.dot(Vp, np.dot(_B, X))

        dis = np.sqrt((X**2).sum(axis=1)).sum()

        if verbose >= 2:
            print('it: %d, stress %s' % (it, _stress))
        if old_stress is not None:
            if (old_stress - _stress / dis) < eps:
                if verbose:
                    print(f'breaking at iteration {it} with stress {_stress}')
                break
        old_stress = _stress / dis

    return X, _stress, it + 1
コード例 #20
0
ファイル: mdsp.py プロジェクト: souvenir13/libact
def _smacof_single_p(similarities, n_uq, metric=True, n_components=2, init=None,
                   max_iter=300, verbose=0, eps=1e-3, random_state=None):
    """
    Computes multidimensional scaling using SMACOF algorithm

    Parameters
    ----------
    n_uq

    similarities: symmetric ndarray, shape [n * n]
        similarities between the points

    metric: boolean, optional, default: True
        compute metric or nonmetric SMACOF algorithm

    n_components: int, optional, default: 2
        number of dimension in which to immerse the similarities
        overwritten if initial array is provided.

    init: {None or ndarray}, optional
        if None, randomly chooses the initial configuration
        if ndarray, initialize the SMACOF algorithm with this array

    max_iter: int, optional, default: 300
        Maximum number of iterations of the SMACOF algorithm for a single run

    verbose: int, optional, default: 0
        level of verbosity

    eps: float, optional, default: 1e-6
        relative tolerance w.r.t stress to declare converge

    random_state: integer or numpy.RandomState, optional
        The generator used to initialize the centers. If an integer is
        given, it fixes the seed. Defaults to the global numpy random
        number generator.

    Returns
    -------
    X: ndarray (n_samples, n_components), float
               coordinates of the n_samples points in a n_components-space

    stress_: float
        The final value of the stress (sum of squared distance of the
        disparities and the distances for all constrained points)

    n_iter : int
        Number of iterations run.

    """
    similarities = check_symmetric(similarities, raise_exception=True)

    n_samples = similarities.shape[0]
    random_state = check_random_state(random_state)

    W = np.ones((n_samples, n_samples))
    W[:n_uq, :n_uq] = 0.0
    W[n_uq:, n_uq:] = 0.0
    # W[np.arange(len(W)), np.arange(len(W))] = 0.0

    V = -W
    V[np.arange(len(V)), np.arange(len(V))] = W.sum(axis=1)
    e = np.ones((n_samples, 1))

    Vp = np.linalg.inv(V + np.dot(e, e.T)/n_samples) - np.dot(e, e.T)/n_samples
    # Vp = np.linalg.pinv(V)

    # sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel()
    sim_flat = similarities.ravel()
    sim_flat_w = sim_flat[sim_flat != 0]
    if init is None:
        # Randomly choose initial configuration
        X = random_state.rand(n_samples * n_components)
        X = X.reshape((n_samples, n_components))
    else:
        # overrides the parameter p
        n_components = init.shape[1]
        if n_samples != init.shape[0]:
            raise ValueError("init matrix should be of shape (%d, %d)" %
                             (n_samples, n_components))
        X = init

    old_stress = None
    ir = IsotonicRegression()
    for it in range(max_iter):
        # Compute distance and monotonic regression
        dis = euclidean_distances(X)

        if metric:
            disparities = similarities
        else:
            # dis_flat = dis.ravel()
            # # similarities with 0 are considered as missing values
            # dis_flat_w = dis_flat[sim_flat != 0]

            # # Compute the disparities using a monotonic regression
            # disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
            # disparities = dis_flat.copy()
            # disparities[sim_flat != 0] = disparities_flat
            # disparities = disparities.reshape((n_samples, n_samples))
            # disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) /
            #                        (disparities ** 2).sum())
            
            dis_flat = dis.ravel()
            # similarities with 0 are considered as missing values
            dis_flat_w = dis_flat[sim_flat != 0]

            # Compute the disparities using a monotonic regression
            disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w)
            disparities = dis_flat.copy()
            disparities[sim_flat != 0] = disparities_flat
            disparities = disparities.reshape((n_samples, n_samples))
            disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) / (disparities ** 2).sum())
            disparities[similarities==0] = 0

        # Compute stress
        # stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2
        _stress = (W.ravel()*((dis.ravel() - disparities.ravel()) ** 2)).sum() / 2

        # Update X using the Guttman transform
        # dis[dis == 0] = 1e-5
        # ratio = disparities / dis
        # B = - ratio
        # B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
        # X = 1. / n_samples * np.dot(B, X)
        # print (1. / n_samples * np.dot(B, X))[:5].T

        dis[dis == 0] = 1e-5
        ratio = disparities / dis
        _B = - W*ratio
        _B[np.arange(len(_B)), np.arange(len(_B))] += (W*ratio).sum(axis=1)

        X = np.dot(Vp, np.dot(_B, X))
        # print X[:5].T

        dis = np.sqrt((X ** 2).sum(axis=1)).sum()
        
        if verbose >= 2:
            print('it: %d, stress %s' % (it, _stress))
        if old_stress is not None:
            if(old_stress - _stress / dis) < eps:
                if verbose:
                    print('breaking at iteration %d with stress %s' % (it,
                                                                       _stress))
                break
        old_stress = _stress / dis

    return X, _stress, it + 1
コード例 #21
0
    def spectral_embedding(self,
                           adjacency,
                           n_components=8,
                           eigen_solver=None,
                           random_state=None,
                           eigen_tol=0.0,
                           drop_first=True):
        """
        see original at https://github.com/scikit-learn/scikit-learn/blob/14031f6/sklearn/manifold/spectral_embedding_.py#L133
        custermize1: return lambdas with the embedded matrix.
        custermize2: norm_laplacian is always True
        """
        norm_laplacian = True
        adjacency = check_symmetric(adjacency)

        try:
            from pyamg import smoothed_aggregation_solver
        except ImportError:
            if eigen_solver == "amg":
                raise ValueError(
                    "The eigen_solver was set to 'amg', but pyamg is "
                    "not available.")

        if eigen_solver is None:
            eigen_solver = 'arpack'
        elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
            raise ValueError("Unknown value for eigen_solver: '%s'."
                             "Should be 'amg', 'arpack', or 'lobpcg'" %
                             eigen_solver)

        random_state = check_random_state(random_state)

        n_nodes = adjacency.shape[0]
        # Whether to drop the first eigenvector
        if drop_first:
            n_components = n_components + 1

        if not _graph_is_connected(adjacency):
            warnings.warn("Graph is not fully connected, spectral embedding"
                          " may not work as expected.")

        laplacian, dd = graph_laplacian(adjacency,
                                        normed=norm_laplacian,
                                        return_diag=True)
        if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
            (not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
            # lobpcg used with eigen_solver='amg' has bugs for low number of nodes
            # for details see the source code in scipy:
            # https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
            # /lobpcg/lobpcg.py#L237
            # or matlab:
            # http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
            laplacian = _set_diag(laplacian, 1, norm_laplacian)

            # Here we'll use shift-invert mode for fast eigenvalues
            # (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
            #  for a short explanation of what this means)
            # Because the normalized Laplacian has eigenvalues between 0 and 2,
            # I - L has eigenvalues between -1 and 1.  ARPACK is most efficient
            # when finding eigenvalues of largest magnitude (keyword which='LM')
            # and when these eigenvalues are very large compared to the rest.
            # For very large, very sparse graphs, I - L can have many, many
            # eigenvalues very near 1.0.  This leads to slow convergence.  So
            # instead, we'll use ARPACK's shift-invert mode, asking for the
            # eigenvalues near 1.0.  This effectively spreads-out the spectrum
            # near 1.0 and leads to much faster convergence: potentially an
            # orders-of-magnitude speedup over simply using keyword which='LA'
            # in standard mode.
            try:
                # We are computing the opposite of the laplacian inplace so as
                # to spare a memory allocation of a possibly very large array
                laplacian *= -1
                lambdas, diffusion_map = eigsh(laplacian,
                                               k=n_components,
                                               sigma=1.0,
                                               which='LM',
                                               tol=eigen_tol)
                embedding = diffusion_map.T[n_components::-1] * dd

            except RuntimeError:
                # When submatrices are exactly singular, an LU decomposition
                # in arpack fails. We fallback to lobpcg
                eigen_solver = "lobpcg"
                # Revert the laplacian to its opposite to have lobpcg work
                laplacian *= -1

        if eigen_solver == 'amg':
            # Use AMG to get a preconditioner and speed up the eigenvalue
            # problem.
            if not sparse.issparse(laplacian):
                warnings.warn("AMG works better for sparse matrices")
            # lobpcg needs double precision floats
            laplacian = check_array(laplacian,
                                    dtype=np.float64,
                                    accept_sparse=True)
            laplacian = _set_diag(laplacian, 1, norm_laplacian)
            ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
            M = ml.aspreconditioner()
            X = random_state.rand(laplacian.shape[0], n_components + 1)
            X[:, 0] = dd.ravel()
            lambdas, diffusion_map = lobpcg(laplacian,
                                            X,
                                            M=M,
                                            tol=1.e-12,
                                            largest=False)
            embedding = diffusion_map.T * dd
            if embedding.shape[0] == 1:
                raise ValueError

        elif eigen_solver == "lobpcg":
            # lobpcg needs double precision floats
            laplacian = check_array(laplacian,
                                    dtype=np.float64,
                                    accept_sparse=True)
            if n_nodes < 5 * n_components + 1:
                # see note above under arpack why lobpcg has problems with small
                # number of nodes
                # lobpcg will fallback to eigh, so we short circuit it
                if sparse.isspmatrix(laplacian):
                    laplacian = laplacian.toarray()
                lambdas, diffusion_map = eigh(laplacian)
                embedding = diffusion_map.T[:n_components] * dd
            else:
                laplacian = _set_diag(laplacian, 1, norm_laplacian)
                # We increase the number of eigenvectors requested, as lobpcg
                # doesn't behave well in low dimension
                X = random_state.rand(laplacian.shape[0], n_components + 1)
                X[:, 0] = dd.ravel()
                lambdas, diffusion_map = lobpcg(laplacian,
                                                X,
                                                tol=1e-15,
                                                largest=False,
                                                maxiter=2000)
                embedding = diffusion_map.T[:n_components] * dd
                if embedding.shape[0] == 1:
                    raise ValueError

        embedding = _deterministic_vector_sign_flip(embedding)
        if drop_first:
            return embedding[1:n_components].T, lambdas
        else:
            return embedding[:n_components].T, lambdas