def run(idx, reduction, alpha, mask, raw, n_components, init, func_filenames): output_dir = join(trace_folder, 'experiment_%i' % idx) try: os.makedirs(output_dir) except OSError: pass dict_fact = SpcaFmri(mask=mask, smoothing_fwhm=3, batch_size=40, shelve=not raw, n_components=n_components, replacement=False, dict_init=fetch_atlas_smith_2009().rsn70 if init else None, reduction=reduction, alpha=alpha, random_state=0, n_epochs=2, l1_ratio=0.5, backend='c', memory=expanduser("~/nilearn_cache"), memory_level=2, verbose=5, n_jobs=1, trace_folder=output_dir ) print('[Example] Learning maps') t0 = time.time() dict_fact.fit(func_filenames, raw=raw) t1 = time.time() - t0 print('[Example] Dumping results') # Decomposition estimator embeds their own masker masker = dict_fact.masker_ components_img = masker.inverse_transform(dict_fact.components_) components_img.to_filename(join(output_dir, 'components_final.nii.gz')) print('[Example] Run in %.2f s' % t1) # Show components from both methods using 4D plotting tools import matplotlib.pyplot as plt from nilearn.plotting import plot_prob_atlas, show print('[Example] Displaying') fig, axes = plt.subplots(2, 1) plot_prob_atlas(components_img, view_type="filled_contours", axes=axes[0]) plot_stat_map(index_img(components_img, 0), axes=axes[1], colorbar=False, threshold=0) plt.savefig(join(output_dir, 'components.pdf')) show()
def run(idx, reduction, alpha, mask, raw, n_components, init, func_filenames): output_dir = join(trace_folder, 'experiment_%i' % idx) try: os.makedirs(output_dir) except OSError: pass dict_fact = SpcaFmri( mask=mask, smoothing_fwhm=3, batch_size=40, shelve=not raw, n_components=n_components, replacement=False, dict_init=fetch_atlas_smith_2009().rsn70 if init else None, reduction=reduction, alpha=alpha, random_state=0, n_epochs=2, l1_ratio=0.5, backend='c', memory=expanduser("~/nilearn_cache"), memory_level=2, verbose=5, n_jobs=1, trace_folder=output_dir) print('[Example] Learning maps') t0 = time.time() dict_fact.fit(func_filenames, raw=raw) t1 = time.time() - t0 print('[Example] Dumping results') # Decomposition estimator embeds their own masker masker = dict_fact.masker_ components_img = masker.inverse_transform(dict_fact.components_) components_img.to_filename(join(output_dir, 'components_final.nii.gz')) print('[Example] Run in %.2f s' % t1) # Show components from both methods using 4D plotting tools import matplotlib.pyplot as plt from nilearn.plotting import plot_prob_atlas, show print('[Example] Displaying') fig, axes = plt.subplots(2, 1) plot_prob_atlas(components_img, view_type="filled_contours", axes=axes[0]) plot_stat_map(index_img(components_img, 0), axes=axes[1], colorbar=False, threshold=0) plt.savefig(join(output_dir, 'components.pdf')) show()
def test_component_sign(): # Regression test # We should have a heuristic that flips the sign of components in # DictLearning to have more positive values than negative values, for # instance by making sure that the largest value is positive. data, mask_img, components, rng = _make_test_data(n_subjects=2, noisy=True) for mp in components: assert_less_equal(-mp.min(), mp.max()) sparse_pca = SpcaFmri(n_components=4, random_state=rng, mask=mask_img, smoothing_fwhm=0.0) sparse_pca.fit(data) for mp in iter_img(sparse_pca.masker_.inverse_transform(sparse_pca.components_)): mp = mp.get_data() assert_less_equal(np.sum(mp[mp <= 0]), np.sum(mp[mp > 0]))
def test_component_sign(): # Regression test # We should have a heuristic that flips the sign of components in # DictLearning to have more positive values than negative values, for # instance by making sure that the largest value is positive. data, mask_img, components, rng = _make_test_data(n_subjects=2, noisy=True) for mp in components: assert_less_equal(-mp.min(), mp.max()) sparse_pca = SpcaFmri(n_components=4, random_state=rng, mask=mask_img, smoothing_fwhm=0.) sparse_pca.fit(data) for mp in iter_img( sparse_pca.masker_.inverse_transform(sparse_pca.components_)): mp = mp.get_data() assert_less_equal(np.sum(mp[mp <= 0]), np.sum(mp[mp > 0]))
def test_sparse_pca(backend): data, mask_img, components, rng = _make_test_data(n_subjects=10) sparse_pca = SpcaFmri( n_components=4, random_state=0, mask=mask_img, backend=backend, reduction=2, smoothing_fwhm=0.0, n_epochs=3, alpha=0.01, ) sparse_pca.fit(data) maps = sparse_pca.masker_.inverse_transform(sparse_pca.components_).get_data() maps = np.reshape(np.rollaxis(maps, 3, 0), (4, 400)) S = np.sqrt(np.sum(components ** 2, axis=1)) S[S == 0] = 1 components /= S[:, np.newaxis] S = np.sqrt(np.sum(maps ** 2, axis=1)) S[S == 0] = 1 maps /= S[:, np.newaxis] G = np.abs(components.dot(maps.T)) recovered_maps = np.sum(G > 0.95) assert recovered_maps >= 4 # Smoke test n_epochs > 1 sparse_pca = SpcaFmri(n_components=4, random_state=0, mask=mask_img, smoothing_fwhm=0.0, n_epochs=2, alpha=1) sparse_pca.fit(data) # Smoke test reduction_ratio < 1 sparse_pca = SpcaFmri( n_components=4, random_state=0, reduction=2, mask=mask_img, smoothing_fwhm=0.0, n_epochs=1, alpha=1 ) sparse_pca.fit(data)
func_filenames = adhd_dataset.func # list of 4D nifti files for each subject # print basic information on the dataset print('First functional nifti image (4D) is at: %s' % adhd_dataset.func[0]) # 4D data # Apply our decomposition estimator with reduction n_components = 20 dict_fact = SpcaFmri( n_components=n_components, smoothing_fwhm=6., memory="nilearn_cache", memory_level=2, reduction=3, verbose=4, alpha=0.001, random_state=0, n_epochs=1, n_jobs=1, ) print('[Example] Learning maps') t0 = time.time() dict_fact.fit(func_filenames) print('[Example] Dumping results') # Decomposition estimator embeds their own masker masker = dict_fact.masker_ components_img = masker.inverse_transform(dict_fact.components_) components_img.to_filename('components.nii.gz') time = time.time() - t0
trace_folder = expanduser('~/output/modl/adhd') try: os.makedirs(trace_folder) except OSError: pass dict_fact = SpcaFmri(n_components=n_components, smoothing_fwhm=6., memory=expanduser("~/nilearn_cache"), memory_level=2, reduction=2, projection='partial', var_red='weight_based', verbose=10, alpha=0.001, random_state=0, learning_rate=.8, replacement=True, batch_size=50, offset=0, n_epochs=1, backend='c', # trace_folder=trace_folder, n_jobs=n_jobs, ) print('[Example] Learning maps') t0 = time.time() dict_fact.fit(func_filenames) print('[Example] Dumping results') # Decomposition estimator embeds their own masker masker = dict_fact.masker_
func_filenames = func_filenames[:2] # Apply our decomposition estimator with reduction n_components = 70 n_jobs = 20 raw = True init = True dict_fact = SpcaFmri(mask=mask, smoothing_fwhm=3, shelve=not raw, n_components=n_components, dict_init=fetch_atlas_smith_2009().rsn70 if init else None, reduction=12, alpha=0.001, random_state=0, n_epochs=1, memory=expanduser("~/nilearn_cache"), memory_level=2, verbose=4, n_jobs=1, ) print('[Example] Learning maps') timings = np.zeros(20) for n_jobs in range(1, 21): with num_threads(n_jobs): t0 = time.time() dict_fact.fit(func_filenames, raw=raw) timings[n_jobs - 1] = time.time() - t0
def test_sparse_pca(backend): data, mask_img, components, rng = _make_test_data(n_subjects=16) sparse_pca = SpcaFmri(n_components=4, random_state=0, mask=mask_img, backend=backend, smoothing_fwhm=0., n_epochs=1, alpha=0.05) sparse_pca.fit(data) maps = sparse_pca.masker_. \ inverse_transform(sparse_pca.components_).get_data() maps = np.reshape(np.rollaxis(maps, 3, 0), (4, 400)) S = np.sqrt(np.sum(components**2, axis=1)) S[S == 0] = 1 components /= S[:, np.newaxis] S = np.sqrt(np.sum(maps**2, axis=1)) S[S == 0] = 1 maps /= S[:, np.newaxis] G = np.abs(components.dot(maps.T)) recovered_maps = min(np.sum(np.any(G > 0.95, axis=1)), np.sum(np.any(G > 0.95, axis=0))) assert (recovered_maps >= 4) # Smoke test n_epochs > 1 sparse_pca = SpcaFmri(n_components=4, random_state=0, mask=mask_img, smoothing_fwhm=0., n_epochs=2, alpha=1) sparse_pca.fit(data) # Smoke test reduction_ratio < 1 sparse_pca = SpcaFmri(n_components=4, random_state=0, reduction=2, mask=mask_img, smoothing_fwhm=0., n_epochs=1, alpha=1) sparse_pca.fit(data)
trace_folder = expanduser('~/output/modl/adhd') try: os.makedirs(trace_folder) except OSError: pass dict_fact = SpcaFmri( n_components=n_components, smoothing_fwhm=6., memory=expanduser("~/nilearn_cache"), memory_level=2, reduction=2, projection='partial', verbose=10, alpha=0.001, random_state=0, learning_rate=.8, batch_size=50, offset=0, n_epochs=1, backend='c', n_jobs=n_jobs, ) print('[Example] Learning maps') t0 = time.time() dict_fact.fit(func_filenames) print('[Example] Dumping results') # Decomposition estimator embeds their own masker masker = dict_fact.masker_