confound_filenames = adhd_dataset.confounds for func_filename, confound_filename in zip(func_filenames, confound_filenames): print("Processing file %s" % func_filename) # Computing some confounds hv_confounds = mem.cache(image.high_variance_confounds)(func_filename) region_ts = masker.transform(func_filename, confounds=[hv_confounds, confound_filename]) subject_time_series.append(region_ts) ############################################################################## # Computing group-sparse precision matrices from nilearn.connectome import GroupSparseCovarianceCV gsc = GroupSparseCovarianceCV(verbose=2) gsc.fit(subject_time_series) from sklearn import covariance gl = covariance.GraphLassoCV(verbose=2) gl.fit(np.concatenate(subject_time_series)) ############################################################################## # Displaying results atlas_imgs = image.iter_img(msdl_atlas_dataset.maps) atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in atlas_imgs] plotting.plot_connectome(gl.covariance_, atlas_region_coords, edge_threshold='90%', title="Covariance",
print("Processing file %s" % func_filename) # Computing some confounds hv_confounds = mem.cache(image.high_variance_confounds)( func_filename) region_ts = masker.transform(func_filename, confounds=[hv_confounds, confound_filename]) subject_time_series.append(region_ts) ############################################################################## # Computing group-sparse precision matrices # ------------------------------------------ from nilearn.connectome import GroupSparseCovarianceCV gsc = GroupSparseCovarianceCV(verbose=2) gsc.fit(subject_time_series) from sklearn import covariance gl = covariance.GraphLassoCV(verbose=2) gl.fit(np.concatenate(subject_time_series)) ############################################################################## # Displaying results # ------------------- atlas_imgs = image.iter_img(msdl_atlas_dataset.maps) atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in atlas_imgs] labels = msdl_atlas_dataset.labels plotting.plot_connectome(gl.covariance_,
n_subjects=n_subjects, n_features=10, min_n_samples=30, max_n_samples=50, density=0.1) fig = plt.figure(figsize=(10, 7)) plt.subplots_adjust(hspace=0.4) for n in range(n_displayed): plt.subplot(n_displayed, 4, 4 * n + 1) plot_matrix(precisions[n]) if n == 0: plt.title("ground truth") plt.ylabel("subject %d" % n) # Run group-sparse covariance on all subjects from nilearn.connectome import GroupSparseCovarianceCV gsc = GroupSparseCovarianceCV(max_iter=50, verbose=1) gsc.fit(subjects) for n in range(n_displayed): plt.subplot(n_displayed, 4, 4 * n + 2) plot_matrix(gsc.precisions_[..., n]) if n == 0: plt.title("group-sparse\n$\\alpha=%.2f$" % gsc.alpha_) # Fit one graph lasso per subject from sklearn.covariance import GraphLassoCV gl = GraphLassoCV(verbose=1) for n, subject in enumerate(subjects[:n_displayed]): gl.fit(subject)
for n in range(n_displayed): ax = plt.subplot(n_displayed, 4, 4 * n + 1) max_precision = precisions[n].max() plotting.plot_matrix(precisions[n], vmin=-max_precision, vmax=max_precision, axes=ax, colorbar=False) if n == 0: plt.title("ground truth") plt.ylabel("subject %d" % n) # Run group-sparse covariance on all subjects from nilearn.connectome import GroupSparseCovarianceCV gsc = GroupSparseCovarianceCV(max_iter=50, verbose=1) gsc.fit(subjects) for n in range(n_displayed): ax = plt.subplot(n_displayed, 4, 4 * n + 2) max_precision = gsc.precisions_[..., n].max() plotting.plot_matrix(gsc.precisions_[..., n], axes=ax, vmin=-max_precision, vmax=max_precision, colorbar=False) if n == 0: plt.title("group-sparse\n$\\alpha=%.2f$" % gsc.alpha_) # Fit one graph lasso per subject try:
def test_group_sparse_covariance(): # run in debug mode. Should not fail # without debug mode: cost must decrease. signals, _, _ = generate_group_sparse_gaussian_graphs( density=0.1, n_subjects=5, n_features=10, min_n_samples=100, max_n_samples=151, random_state=np.random.RandomState(0)) alpha = 0.1 # These executions must hit the tolerance limit emp_covs, omega = group_sparse_covariance(signals, alpha, max_iter=20, tol=1e-2, debug=True, verbose=0) emp_covs, omega2 = group_sparse_covariance(signals, alpha, max_iter=20, tol=1e-2, debug=True, verbose=0) np.testing.assert_almost_equal(omega, omega2, decimal=4) class Probe(object): def __init__(self): self.objective = [] def __call__(self, emp_covs, n_samples, alpha, max_iter, tol, n, omega, omega_diff): if n >= 0: _, objective = group_sparse_scores(omega, n_samples, emp_covs, alpha) self.objective.append(objective) # Use a probe to test for number of iterations and decreasing objective. probe = Probe() emp_covs, omega = group_sparse_covariance( signals, alpha, max_iter=4, tol=None, verbose=0, probe_function=probe) objective = probe.objective # check number of iterations assert_equal(len(objective), 4) # np.testing.assert_array_less is a strict comparison. # Zeros can occur in np.diff(objective). assert_true(np.all(np.diff(objective) <= 0)) assert_equal(omega.shape, (10, 10, 5)) # Test input argument checking assert_raises(ValueError, group_sparse_covariance, signals, "") assert_raises(ValueError, group_sparse_covariance, 1, alpha) assert_raises(ValueError, group_sparse_covariance, [np.ones((2, 2)), np.ones((2, 3))], alpha) # Check consistency between classes gsc1 = GroupSparseCovarianceCV(alphas=4, tol=1e-1, max_iter=20, verbose=0, early_stopping=True) gsc1.fit(signals) gsc2 = GroupSparseCovariance(alpha=gsc1.alpha_, tol=1e-1, max_iter=20, verbose=0) gsc2.fit(signals) np.testing.assert_almost_equal(gsc1.precisions_, gsc2.precisions_, decimal=4)