Exemplo n.º 1
0
def run(idx, reduction, alpha, mask, raw, n_components, init, func_filenames):
    output_dir = join(trace_folder, 'experiment_%i' % idx)
    try:
        os.makedirs(output_dir)
    except OSError:
        pass
    dict_fact = SpcaFmri(mask=mask,
                         smoothing_fwhm=3,
                         batch_size=40,
                         shelve=not raw,
                         n_components=n_components,
                         replacement=False,
                         dict_init=fetch_atlas_smith_2009().rsn70 if
                         init else None,
                         reduction=reduction,
                         alpha=alpha,
                         random_state=0,
                         n_epochs=2,
                         l1_ratio=0.5,
                         backend='c',
                         memory=expanduser("~/nilearn_cache"), memory_level=2,
                         verbose=5,
                         n_jobs=1,
                         trace_folder=output_dir
                         )

    print('[Example] Learning maps')
    t0 = time.time()
    dict_fact.fit(func_filenames, raw=raw)
    t1 = time.time() - t0
    print('[Example] Dumping results')
    # Decomposition estimator embeds their own masker
    masker = dict_fact.masker_
    components_img = masker.inverse_transform(dict_fact.components_)
    components_img.to_filename(join(output_dir, 'components_final.nii.gz'))
    print('[Example] Run in %.2f s' % t1)
    # Show components from both methods using 4D plotting tools
    import matplotlib.pyplot as plt
    from nilearn.plotting import plot_prob_atlas, show

    print('[Example] Displaying')
    fig, axes = plt.subplots(2, 1)
    plot_prob_atlas(components_img, view_type="filled_contours",
                    axes=axes[0])
    plot_stat_map(index_img(components_img, 0),
                  axes=axes[1],
                  colorbar=False,
                  threshold=0)
    plt.savefig(join(output_dir, 'components.pdf'))
    show()
Exemplo n.º 2
0
def run(idx, reduction, alpha, mask, raw, n_components, init, func_filenames):
    output_dir = join(trace_folder, 'experiment_%i' % idx)
    try:
        os.makedirs(output_dir)
    except OSError:
        pass
    dict_fact = SpcaFmri(
        mask=mask,
        smoothing_fwhm=3,
        batch_size=40,
        shelve=not raw,
        n_components=n_components,
        replacement=False,
        dict_init=fetch_atlas_smith_2009().rsn70 if init else None,
        reduction=reduction,
        alpha=alpha,
        random_state=0,
        n_epochs=2,
        l1_ratio=0.5,
        backend='c',
        memory=expanduser("~/nilearn_cache"),
        memory_level=2,
        verbose=5,
        n_jobs=1,
        trace_folder=output_dir)

    print('[Example] Learning maps')
    t0 = time.time()
    dict_fact.fit(func_filenames, raw=raw)
    t1 = time.time() - t0
    print('[Example] Dumping results')
    # Decomposition estimator embeds their own masker
    masker = dict_fact.masker_
    components_img = masker.inverse_transform(dict_fact.components_)
    components_img.to_filename(join(output_dir, 'components_final.nii.gz'))
    print('[Example] Run in %.2f s' % t1)
    # Show components from both methods using 4D plotting tools
    import matplotlib.pyplot as plt
    from nilearn.plotting import plot_prob_atlas, show

    print('[Example] Displaying')
    fig, axes = plt.subplots(2, 1)
    plot_prob_atlas(components_img, view_type="filled_contours", axes=axes[0])
    plot_stat_map(index_img(components_img, 0),
                  axes=axes[1],
                  colorbar=False,
                  threshold=0)
    plt.savefig(join(output_dir, 'components.pdf'))
    show()
Exemplo n.º 3
0
def test_component_sign():
    # Regression test
    # We should have a heuristic that flips the sign of components in
    # DictLearning to have more positive values than negative values, for
    # instance by making sure that the largest value is positive.

    data, mask_img, components, rng = _make_test_data(n_subjects=2, noisy=True)
    for mp in components:
        assert_less_equal(-mp.min(), mp.max())

    sparse_pca = SpcaFmri(n_components=4, random_state=rng, mask=mask_img, smoothing_fwhm=0.0)
    sparse_pca.fit(data)
    for mp in iter_img(sparse_pca.masker_.inverse_transform(sparse_pca.components_)):
        mp = mp.get_data()
        assert_less_equal(np.sum(mp[mp <= 0]), np.sum(mp[mp > 0]))
Exemplo n.º 4
0
def test_component_sign():
    # Regression test
    # We should have a heuristic that flips the sign of components in
    # DictLearning to have more positive values than negative values, for
    # instance by making sure that the largest value is positive.

    data, mask_img, components, rng = _make_test_data(n_subjects=2, noisy=True)
    for mp in components:
        assert_less_equal(-mp.min(), mp.max())

    sparse_pca = SpcaFmri(n_components=4,
                          random_state=rng,
                          mask=mask_img,
                          smoothing_fwhm=0.)
    sparse_pca.fit(data)
    for mp in iter_img(
            sparse_pca.masker_.inverse_transform(sparse_pca.components_)):
        mp = mp.get_data()
        assert_less_equal(np.sum(mp[mp <= 0]), np.sum(mp[mp > 0]))
Exemplo n.º 5
0
def test_sparse_pca(backend, var_red):
    data, mask_img, components, rng = _make_test_data(n_subjects=10)
    sparse_pca = SpcaFmri(n_components=4,
                          random_state=0,
                          mask=mask_img,
                          backend=backend,
                          var_red=var_red,
                          reduction=2 if var_red else 1,
                          smoothing_fwhm=0.,
                          n_epochs=3,
                          alpha=0.01)
    sparse_pca.fit(data)
    maps = sparse_pca.masker_. \
        inverse_transform(sparse_pca.components_).get_data()
    maps = np.reshape(np.rollaxis(maps, 3, 0), (4, 400))

    S = np.sqrt(np.sum(components**2, axis=1))
    S[S == 0] = 1
    components /= S[:, np.newaxis]

    S = np.sqrt(np.sum(maps**2, axis=1))
    S[S == 0] = 1
    maps /= S[:, np.newaxis]

    G = np.abs(components.dot(maps.T))
    # Hard
    # if var_red:
    #     recovered_maps = min(np.sum(np.any(G > 0.5, axis=1)),
    #                          np.sum(np.any(G > 0.5, axis=0)))
    # else:
    #     recovered_maps = min(np.sum(np.any(G > 0.95, axis=1)),
    #                  np.sum(np.any(G > 0.95, axis=0)))
    if var_red:
        recovered_maps = np.sum(G > 0.7)
    else:
        recovered_maps = np.sum(G > 0.95)
    assert (recovered_maps >= 4)

    # Smoke test n_epochs > 1
    sparse_pca = SpcaFmri(n_components=4,
                          random_state=0,
                          mask=mask_img,
                          var_red=var_red,
                          smoothing_fwhm=0.,
                          n_epochs=2,
                          alpha=1)
    sparse_pca.fit(data)

    # Smoke test reduction_ratio < 1
    sparse_pca = SpcaFmri(n_components=4,
                          random_state=0,
                          reduction=2,
                          mask=mask_img,
                          var_red=var_red,
                          smoothing_fwhm=0.,
                          n_epochs=1,
                          alpha=1)
    sparse_pca.fit(data)
Exemplo n.º 6
0
def test_sparse_pca(backend):
    data, mask_img, components, rng = _make_test_data(n_subjects=10)
    sparse_pca = SpcaFmri(
        n_components=4,
        random_state=0,
        mask=mask_img,
        backend=backend,
        reduction=2,
        smoothing_fwhm=0.0,
        n_epochs=3,
        alpha=0.01,
    )
    sparse_pca.fit(data)
    maps = sparse_pca.masker_.inverse_transform(sparse_pca.components_).get_data()
    maps = np.reshape(np.rollaxis(maps, 3, 0), (4, 400))

    S = np.sqrt(np.sum(components ** 2, axis=1))
    S[S == 0] = 1
    components /= S[:, np.newaxis]

    S = np.sqrt(np.sum(maps ** 2, axis=1))
    S[S == 0] = 1
    maps /= S[:, np.newaxis]

    G = np.abs(components.dot(maps.T))
    recovered_maps = np.sum(G > 0.95)
    assert recovered_maps >= 4

    # Smoke test n_epochs > 1
    sparse_pca = SpcaFmri(n_components=4, random_state=0, mask=mask_img, smoothing_fwhm=0.0, n_epochs=2, alpha=1)
    sparse_pca.fit(data)

    # Smoke test reduction_ratio < 1
    sparse_pca = SpcaFmri(
        n_components=4, random_state=0, reduction=2, mask=mask_img, smoothing_fwhm=0.0, n_epochs=1, alpha=1
    )
    sparse_pca.fit(data)
Exemplo n.º 7
0
dict_fact = SpcaFmri(
    n_components=n_components,
    smoothing_fwhm=6.,
    memory="nilearn_cache",
    memory_level=2,
    reduction=3,
    verbose=4,
    alpha=0.001,
    random_state=0,
    n_epochs=1,
    n_jobs=1,
)

print('[Example] Learning maps')
t0 = time.time()
dict_fact.fit(func_filenames)
print('[Example] Dumping results')
# Decomposition estimator embeds their own masker
masker = dict_fact.masker_
components_img = masker.inverse_transform(dict_fact.components_)
components_img.to_filename('components.nii.gz')
time = time.time() - t0
print('[Example] Run in %.2f s' % time)
# Show components from both methods using 4D plotting tools
from nilearn.plotting import plot_prob_atlas, show

print('[Example] Displaying')

plot_prob_atlas(components_img,
                view_type="filled_contours",
                title="Reduced sparse PCA",
Exemplo n.º 8
0
                     verbose=10,
                     alpha=0.001,
                     random_state=0,
                     learning_rate=.8,
                     replacement=True,
                     batch_size=50,
                     offset=0,
                     n_epochs=1,
                     backend='c',
                     # trace_folder=trace_folder,
                     n_jobs=n_jobs,
                     )

print('[Example] Learning maps')
t0 = time.time()
dict_fact.fit(func_filenames)
print('[Example] Dumping results')
# Decomposition estimator embeds their own masker
masker = dict_fact.masker_
components_img = masker.inverse_transform(dict_fact.components_)
components_img.to_filename(join(trace_folder, 'components.nii.gz'))
time = time.time() - t0
print('[Example] Run in %.2f s' % time)
# Show components from both methods using 4D plotting tools
import matplotlib.pyplot as plt
from nilearn.plotting import plot_prob_atlas, plot_stat_map, show
from nilearn.image import index_img

print('[Example] Displaying')
fig, axes = plt.subplots(2, 1)
plot_prob_atlas(components_img, view_type="filled_contours",
Exemplo n.º 9
0
init = True

dict_fact = SpcaFmri(mask=mask,
                     smoothing_fwhm=3,
                     shelve=not raw,
                     n_components=n_components,
                     dict_init=fetch_atlas_smith_2009().rsn70 if init else None,
                     reduction=12,
                     alpha=0.001,
                     random_state=0,
                     n_epochs=1,
                     memory=expanduser("~/nilearn_cache"), memory_level=2,
                     verbose=4,
                     n_jobs=1,
                     )

print('[Example] Learning maps')
timings = np.zeros(20)
for n_jobs in range(1, 21):
    with num_threads(n_jobs):
        t0 = time.time()
        dict_fact.fit(func_filenames, raw=raw)
        timings[n_jobs - 1] = time.time() - t0

print(timings)
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt

plt.plot(np.arange(1, 21), timings)
plt.savefig('bench_hcp_blas.pdf')