Пример #1
0
def test_spectral_embedding_unknown_affinity(seed=36):
    # Test that SpectralClustering fails with an unknown affinity type
    se = SpectralEmbedding(n_components=1,
                           affinity="<unknown>",
                           random_state=np.random.RandomState(seed))
    with pytest.raises(ValueError):
        se.fit(S)
Пример #2
0
def test_spectral_embedding_unknown_eigensolver(seed=36):
    # Test that SpectralClustering fails with an unknown eigensolver
    se = SpectralEmbedding(n_components=1, affinity="precomputed",
                           random_state=np.random.RandomState(seed),
                           eigen_solver="<unknown>")
    with pytest.raises(ValueError):
        se.fit(S)
Пример #3
0
def swiss_roll_test():

    import matplotlib.pyplot as plt
    plt.style.use('ggplot')

    from time import time

    from sklearn import manifold, datasets
    from sklearn.manifold import SpectralEmbedding
    from lpproj import LocalityPreservingProjection

    n_points = 1000
    X, color = datasets.samples_generator.make_s_curve(n_points,
                                                       random_state=0)
    n_neighbors = 20
    n_components = 2

    # original lE algorithm

    t0 = time()
    ml_model = SpectralEmbedding(n_neighbors=n_neighbors,
                                 n_components=n_components)
    Y = ml_model.fit_transform(X)
    t1 = time()

    # 2d projection
    fig, ax = plt.subplots(nrows=3, ncols=1, figsize=(5, 10))
    ax[0].scatter(Y[:, 0], Y[:, 1], c=color, label='scikit')
    ax[0].set_title('Sklearn-LE: {t:.2g}'.format(t=t1 - t0))

    # Jakes LPP Algorithm

    t0 = time()
    ml_model = LocalityPreservingProjection(n_components=n_components)
    ml_model.fit(X)
    Y = ml_model.transform(X)
    t1 = time()

    ax[1].scatter(Y[:, 0], Y[:, 1], c=color, label='Jakes Algorithm')
    ax[1].set_title('Jakes LPP: {t:.2g}'.format(t=t1 - t0))

    # my SSSE algorith,

    t0 = time()
    ml_model = LocalityPreservingProjections(weight='angle',
                                             n_components=n_components,
                                             n_neighbors=n_neighbors,
                                             sparse=True,
                                             eig_solver='dense')
    ml_model.fit(X)
    Y = ml_model.transform(X)
    t1 = time()

    ax[2].scatter(Y[:, 0], Y[:, 1], c=color, label='My LPP Algorithm')
    ax[2].set_title('My LPP: {t:.2g}'.format(t=t1 - t0))

    plt.show()
Пример #4
0
def swiss_roll_test():


    n_points = 1000
    X, color = datasets.samples_generator.make_s_curve(n_points,
                                                       random_state=0)
    n_neighbors=20
    n_components=2

    # original lE algorithm


    t0 = time()
    ml_model = SpectralEmbedding(n_neighbors=n_neighbors,
                                 n_components=n_components)
    Y = ml_model.fit_transform(X)
    t1 = time()

    # 2d projection
    fig, ax = plt.subplots(nrows=3, ncols=1, figsize=(5,10))
    ax[0].scatter(Y[:,0], Y[:,1], c=color, label='scikit')
    ax[0].set_title('Sklearn-LE: {t:.2g}'.format(t=t1-t0))


    # Jakes LPP Algorithm

    t0 = time()
    ml_model = LocalityPreservingProjection(n_components=n_components)
    ml_model.fit(X)
    Y = ml_model.transform(X)
    t1 = time()

    ax[1].scatter(Y[:,0], Y[:,1], c=color, label='Jakes Algorithm')
    ax[1].set_title('Jakes LPP: {t:.2g}'.format(t=t1-t0))

    # my SSSE algorith,

    t0 = time()
    ml_model = LocalityPreservingProjections(weight='angle',
                                             n_components=n_components,
                                             n_neighbors=n_neighbors,
                                             sparse=True,
                                             eig_solver='dense')
    ml_model.fit(X)
    Y = ml_model.transform(X)
    t1 = time()

    ax[2].scatter(Y[:,0], Y[:,1], c=color, label='My LPP Algorithm')
    ax[2].set_title('My LPP: {t:.2g}'.format(t=t1-t0))

    plt.show()
class LaplacianEigenmaps(AbstractReducer):
    def __init__(self, d: int = 2, random_state: int = 0, **kwargs):
        super().__init__(d, random_state)
        self._main = SpectralEmbedding(n_components=d,
                                       random_state=random_state,
                                       **kwargs)

    def fit_transform(self, x: np.ndarray, **kwargs) -> np.ndarray:
        return self._main.fit_transform(x)

    def fit(self, x: np.ndarray, **kwargs):
        return self._main.fit(x)

    def transform(self, x: np.ndarray, **kwargs) -> np.ndarray:
        raise NotImplementedError

    def set_random_state(self, random_state: int = 0):
        self.random_state = random_state
        self._main.random_state = random_state

    @property
    def is_deterministic(self) -> bool:
        return False

    @property
    def is_stateful(self) -> bool:
        return True

    @staticmethod
    def get_parameter_ranges() -> dict:
        return {'n_neighbors': (int, 1, 20)}
Пример #6
0
def swiss_roll_test():

    import matplotlib.pyplot as plt
    plt.style.use('ggplot')

    from time import time

    from sklearn import manifold, datasets
    from sklearn.manifold import SpectralEmbedding

    n_points = 1000
    X, color = datasets.samples_generator.make_s_curve(n_points,
                                                       random_state=0)
    n_neighbors=10
    n_components=2

    # original scikit-learn lE algorithm
    t0 = time()
    ml_model = SpectralEmbedding(affinity='nearest_neighbors',
                                 n_neighbors=n_neighbors,
                                 n_components=n_components)
    Y = ml_model.fit_transform(X)
    t1 = time()

    # 2d projection
    fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(5,10))
    ax[0].scatter(Y[:,0], Y[:,1], c=color, label='scikit')
    ax[0].set_title('Sklearn LE: {t:.2g}'.format(t=t1-t0))

    # my Laplacian Eigenmaps algorithm

    t0 = time()
    ml_model = LaplacianEigenmaps(n_components=n_components,
                                  n_neighbors=n_neighbors)
    ml_model.fit(X)
    Y = ml_model.fit_transform(X)
    t1 = time()

    ax[1].scatter(Y[:,0], Y[:,1], c=color, label='My LE Algorithm')
    ax[1].set_title('My LE: {t:.2g}'.format(t=t1-t0))

    plt.show()
def plot_data(r1_nc, r2_nc, rx_nc, r1_dur, r2_dur, knn):
    ncon = r1_nc + r2_nc + rx_nc
    #Create simulated data
    data = create_sym_data(r1_nc, r2_nc, rx_nc, r1_dur, r2_dur)
    #Carpet plot of connectivity
    swc_plot = data.hvplot.heatmap(cmap='jet',
                                   clim=(0, 1),
                                   xlabel='Time (Windows)',
                                   ylabel='Connections',
                                   title='Simulated SWC Matrix',
                                   shared_axes=False).opts(toolbar=None)
    #Correlation matrix for SCC
    cm = data.corr()
    cm.columns = [
        'WIN' + str(i + 1).zfill(4) for i in np.arange(r1_dur + r2_dur)
    ]
    cm.index = [
        'WIN' + str(i + 1).zfill(4) for i in np.arange(r1_dur + r2_dur)
    ]
    cm_plot = cm.hvplot.heatmap(
        cmap='RdBu_r',
        clim=(-1, 1),
        shared_axes=False,
        xlabel='Time (Windows)',
        ylabel='Time (Windows)',
        title='Win-2-Win Correlation (Similarity across samples)')
    # Create Embedding
    se = SpectralEmbedding(n_components=2, n_neighbors=knn)
    se.fit(data.T)
    se_df = pd.DataFrame(se.embedding_)
    se_df.columns = ['x', 'y']
    se_df['class'] = 'Run 2'
    se_df.loc[0:r1_dur, 'class'] = 'Run 1'
    se_plot = se_df.hvplot.scatter(x='x',
                                   y='y',
                                   color='class',
                                   title='Spectral Embedding',
                                   xlabel='SE Dim 1',
                                   ylabel='SE Dim 2')
    return (swc_plot + cm_plot + se_plot).cols(1)
Пример #8
0
    def mds_dyn_mat(self, ax=None, type="T", learned=True):
        """
        dimensionality reduction plot for either est_SR or est_T depending on 'type' and 'learned'
        """
        import networkx as nx
        from sklearn.manifold import MDS, SpectralEmbedding

        dr = SpectralEmbedding(
            n_components=2,
            affinity="precomputed",
            gamma=None,
            random_state=None,
            eigen_solver=None,
            n_neighbors=3,
            n_jobs=None,
        )

        self.set_target_axis(ax)
        if type is "T":
            if learned:
                S = (self.est_T + self.est_T.T) / 2.0
            else:
                S = (self.ENV.T + self.ENV.T.T) / 2.0
        elif type is "SR":
            if learned:
                S = (self.est_SR + self.est_SR.T) / 2.0
            else:
                S = (self.SR + self.SR.T) / 2.0
        elif type is "Q":
            if learned:
                S = (self.est_Q + self.est_Q.T) / 2.0
            else:
                S = (self.GEN.Q + self.GEN.Q.T) / 2.0
        else:
            raise ValueError("unrecognized learned dynamics matrix request")

        pos = dr.fit(S).embedding_[:, :2]

        G = nx.from_numpy_array(self.ENV.T)
        pos_dict = dict([(i, posxy) for i, posxy in enumerate(pos)])
        nx.draw(
            G,
            pos=pos_dict,
            ax=self.ax,
            node_size=10,
            node_color="black",
            edge_color="grey",
            width=0.5,
        )
Пример #9
0
    for i in range(0, n):
        rowTopN[i] = rowSorted[i, :][n - topN]
        columnTopN[i] = columnSorted[:, i][n - topN]

    for i in range(0, n):
        for j in range(0, m):
            if (sM[i][j] >= rowTopN[i] or sM[i][j] >= columnTopN[j]):
                result[i][j] = sM[i][j]

    return result


embedding = SpectralEmbedding(n_components=2,
                              affinity="precomputed",
                              n_neighbors=0)
coords = embedding.fit(thresholdMatrix(1 / (dM + 0.1), 20)).embedding_
print(coords.shape)

total_count = 0
similar_count1 = 0
similar_count2 = 0
sM = thresholdMatrix(1 / (dM + 0.1), 10)
for i in range(0, sM.shape[0]):
    r = sM[i, :]
    r = np.nonzero(r)
    r = r[0]
    l = r.size
    for a in range(0, l):
        for b in range(a + 1, l):
            total_count += 1
            if (sM[i, r[a]] >= sM[i, r[b]]
simMatAllTimes = setDiagToOne(forcePosDef(simMatAllTimes))

# tsneModel = TSNE(n_components=2, metric='precomputed',
#                 method='barnes_hut', perplexity=30.0)
# #method='barnes_hut'
# #method='exact'
# distMat = 1.0 / (simMatAllTimes + 1.01)

tsneModel = SpectralEmbedding(n_components=2, affinity='precomputed',
                              gamma=1.0, n_neighbors=6)

distMat = simMatAllTimes + 1.0


tsneModel = tsneModel.fit(distMat)
sizeScale = np.abs(tsneModel.embedding_.ravel()).max()
tsneModel.embedding_ /= sizeScale

#############################
#
# Plotting
#

# set plot parameters

comm1_str = np.vstack([np.sum(a[:, :nNodes_comm1], axis=1, keepdims=True)
                       for a in adjMat_timeseries])

comm2_str = np.vstack([np.sum(a[:, -nNodes_comm2:], axis=1, keepdims=True)
                       for a in adjMat_timeseries])
Пример #11
0
 def spectral_emb(self, n):
     semb = SpectralEmbedding(n_components=n)
     semb.fit(self.sample)
     semb_res = semb.fit_transform(self.sample)
import numpy as np
from sklearn.utils.arpack import eigsh

app = service.prodbox.CinemaService()

X = app.getWeightedSearchFeatures(15)

graph = kneighbors_graph(X, 10)
lap = graph_laplacian(graph, True)

from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components = 30, algorithm="arpack")
lap = spectral_embedding_._set_diag(lap, 1)
svd.fit(-lap)

eigenvalues = np.diag(svd.components_ * (-lap).todense() * svd.components_.T)

eigenvalues2, _ = eigsh(-lap, k=30, which='LM', sigma=1)
print(eigenvalues)

print(eigenvalues2)

se = SpectralEmbedding(n_components = 30, eigen_solver='arpack', affinity="nearest_neighbors")
se.fit(X)

app.quit()

# TODO : check budget distribution, draw budget conditionnaly
out = connected_components(graph)

Пример #13
0
def spectral_embed(affinity_mat, n_comps):
    spec_emb = SpectralEmbedding(n_components=n_comps, affinity='precomputed')
    spec_emb.fit(affinity_mat)
    embed = spec_emb.embedding_
    return embed.T
Пример #14
0
from sklearn.utils.arpack import eigsh

app = service.prodbox.CinemaService()

X = app.getWeightedSearchFeatures(15)

graph = kneighbors_graph(X, 10)
lap = graph_laplacian(graph, True)

from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=30, algorithm="arpack")
lap = spectral_embedding_._set_diag(lap, 1)
svd.fit(-lap)

eigenvalues = np.diag(svd.components_ * (-lap).todense() * svd.components_.T)

eigenvalues2, _ = eigsh(-lap, k=30, which='LM', sigma=1)
print(eigenvalues)

print(eigenvalues2)

se = SpectralEmbedding(n_components=30,
                       eigen_solver='arpack',
                       affinity="nearest_neighbors")
se.fit(X)

app.quit()

# TODO : check budget distribution, draw budget conditionnaly
out = connected_components(graph)
Пример #15
0
                                norm_local_diss=False,
                                norm_sim=False,
                                merge_if_ccs=True,
                                min_cc_len=min_cc_len,
                                do_eps_graph=True,
                                preprocess_only=True)
# Run the spectral ordering method on the DNA reads similarity matrix
t0 = time()
reord_method.fit(new_mat)
my_ebd = reord_method.embedding
tme = time() - t0

print("my embedding in {}s".format(tme))

skl_method = SpectralEmbedding(n_components=dim, affinity='precomputed')
skl_method.fit(new_mat.toarray())
skl_ebd = skl_method.embedding_
tskl = time() - tme
print("sklearn embedding in {}s".format(tskl))

skl_amg = SpectralEmbedding(n_components=dim,
                            affinity='precomputed',
                            eigen_solver='amg')
skl_amg.fit(new_mat)
amg_ebd = skl_amg.embedding_
tamg = time() - tskl
print("sklearn amg embedding in {}s".format(tamg))

fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(my_ebd[:, 0], my_ebd[:, 1], my_ebd[:, 2])