Ejemplo n.º 1
0
    def test_precomputed(self):
        mapper = KeplerMapper()

        X = np.random.rand(100, 2)
        X_pdist = distance.squareform(distance.pdist(X, metric="euclidean"))

        lens = mapper.fit_transform(X_pdist)

        graph = mapper.map(
            lens,
            X=X_pdist,
            cover=Cover(n_cubes=10, perc_overlap=0.8),
            clusterer=cluster.DBSCAN(metric="precomputed", min_samples=3),
            precomputed=True,
        )
        graph2 = mapper.map(
            lens,
            X=X,
            cover=Cover(n_cubes=10, perc_overlap=0.8),
            clusterer=cluster.DBSCAN(metric="euclidean", min_samples=3),
        )

        assert graph["links"] == graph2["links"]
        assert graph["nodes"] == graph2["nodes"]
        assert graph["simplices"] == graph2["simplices"]
Ejemplo n.º 2
0
    def test_overlap_perc_cover(self):
        mapper = KeplerMapper()

        with pytest.deprecated_call():
            cover = Cover(overlap_perc=17)  # strange number

        assert cover.perc_overlap == 17
Ejemplo n.º 3
0
    def test_nr_cubes_cover(self):
        mapper = KeplerMapper()

        with pytest.deprecated_call():
            cover = Cover(nr_cubes=17)  # strange number

        assert cover.n_cubes == 17
Ejemplo n.º 4
0
def profile():
    num_sets = 100
    blob_size = 1000
    n_cubes = 10
    overlap = 0.2

    blob_list = []
    for i in range(num_sets):
        data, _ = datasets.make_blobs(blob_size)
        blob_list.append(data)

    mapper = KeplerMapper(verbose=0)

    pr = cProfile.Profile()
    pr.enable()

    for data in blob_list:
        lens = mapper.fit_transform(data)
        graph = mapper.map(lens, data, cover=Cover(n_cubes=n_cubes, perc_overlap=overlap))

    pr.disable()
    s = io.StringIO()
    sortby = "cumulative"
    ps = pstats.Stats(pr, stream=s).strip_dirs().sort_stats(sortby)
    ps.print_stats("kmapper")
    print(
        "Ran {} blobs of size {} with params (n_cubes:{}\toverlap:{})".format(
            num_sets, blob_size, n_cubes, overlap
        )
    )
    print(s.getvalue())
Ejemplo n.º 5
0
    def test_remove_duplicates_argument(self, capsys):
        mapper = KeplerMapper(verbose=1)
        X = np.random.rand(100, 5)

        lens = mapper.project(X)
        graph = mapper.map(
            lens,
            X=X,
            cover=Cover(n_cubes=2, perc_overlap=1),
            clusterer=cluster.DBSCAN(metric="euclidean", min_samples=3),
            remove_duplicate_nodes=True,
        )

        captured = capsys.readouterr()
        assert "duplicate nodes" in captured[0]
Ejemplo n.º 6
0
    def test_simplices(self):
        mapper = KeplerMapper()

        X = np.random.rand(100, 2)
        lens = mapper.fit_transform(X)
        graph = mapper.map(
            lens,
            X=X,
            cover=Cover(n_cubes=3, perc_overlap=0.75),
            clusterer=cluster.DBSCAN(metric="euclidean", min_samples=3),
        )
        assert max([len(s) for s in graph["simplices"]]) <= 2

        nodes = [n for n in graph["simplices"] if len(n) == 1]
        edges = [n for n in graph["simplices"] if len(n) == 2]
        assert len(nodes) == 3
        assert len(edges) == 3
Ejemplo n.º 7
0
y = y.loc[mask_sessions, :]
target = target[mask_sessions]

# Generate a shape graph using KeplerMapper
mapper = KeplerMapper(verbose=1)

# Configure projection
pca = PCA(2, random_state=1)
umap = UMAP(n_components=2, init=pca.fit_transform(X))

# Construct lens and generate the shape graph
lens = mapper.fit_transform(umap.fit_transform(X, y=target), projection=[0, 1])
graph = mapper.map(
    lens,
    X=X,
    cover=Cover(20, 0.5),
    clusterer=optimize_dbscan(X, k=3, p=100.0),
)

# Convert to a DyNeuGraph
dG = DyNeuGraph(G=graph, y=y)

# Define some custom_layouts
dG.add_custom_layout(lens, name='lens')
dG.add_custom_layout(nx.spring_layout, name='nx.spring')
dG.add_custom_layout(nx.kamada_kawai_layout, name='nx.kamada_kawai')
dG.add_custom_layout(nx.spectral_layout, name='nx.spectral')
dG.add_custom_layout(nx.circular_layout, name='nx.circular')

# Configure some projections
pca = PCA(2, random_state=1)
Ejemplo n.º 8
0
                     smoothing_fwhm=4.0,
                     low_pass=0.09,
                     high_pass=0.008,
                     t_r=2.5,
                     memory="nilearn_cache")
X = masker.fit_transform(dataset.func[0])

# Encode labels as integers
df = pd.read_csv(dataset.session_target[0], sep=" ")
target, labels = pd.factorize(df.labels.values)
y = pd.DataFrame({l: (target == i).astype(int) for i, l in enumerate(labels)})

# Generate shape graph using KeplerMapper
mapper = KeplerMapper(verbose=1)
lens = mapper.fit_transform(X, projection=TSNE(2, random_state=1))
graph = mapper.map(lens, X=X, cover=Cover(20, 0.5), clusterer=DBSCAN(eps=20.))

# Visualize the stages of Mapper
fig, axes = visualize_mapper_stages(dataset,
                                    y=target,
                                    lens=lens,
                                    graph=graph,
                                    cover=mapper.cover,
                                    node_size=20,
                                    edge_size=0.5,
                                    edge_color='gray',
                                    layout="kamada_kawai",
                                    figsize=(16, 4))
plt.savefig("mapper_stages.png", dpi=600, background='transparent')
plt.show()