def test_explicit_fixed_effects(): """ tests the fixed effects performed manually/explicitly""" with InTemporaryDirectory(): shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3 mask, fmri_data, design_matrices = write_fake_fmri_data_and_design( shapes, rk) contrast = np.eye(rk)[1] # session 1 multi_session_model = FirstLevelModel(mask_img=mask).fit( fmri_data[0], design_matrices=design_matrices[:1]) dic1 = multi_session_model.compute_contrast(contrast, output_type='all') # session 2 multi_session_model.fit(fmri_data[1], design_matrices=design_matrices[1:]) dic2 = multi_session_model.compute_contrast(contrast, output_type='all') # fixed effects model multi_session_model.fit(fmri_data, design_matrices=design_matrices) fixed_fx_dic = multi_session_model.compute_contrast(contrast, output_type='all') # manual version contrasts = [dic1['effect_size'], dic2['effect_size']] variance = [dic1['effect_variance'], dic2['effect_variance']] ( fixed_fx_contrast, fixed_fx_variance, fixed_fx_stat, ) = compute_fixed_effects(contrasts, variance, mask) assert_almost_equal(fixed_fx_contrast.get_data(), fixed_fx_dic['effect_size'].get_data()) assert_almost_equal(fixed_fx_variance.get_data(), fixed_fx_dic['effect_variance'].get_data()) assert_almost_equal(fixed_fx_stat.get_data(), fixed_fx_dic['stat'].get_data()) # test without mask variable ( fixed_fx_contrast, fixed_fx_variance, fixed_fx_stat, ) = compute_fixed_effects(contrasts, variance) assert_almost_equal(fixed_fx_contrast.get_data(), fixed_fx_dic['effect_size'].get_data()) assert_almost_equal(fixed_fx_variance.get_data(), fixed_fx_dic['effect_variance'].get_data()) assert_almost_equal(fixed_fx_stat.get_data(), fixed_fx_dic['stat'].get_data()) # ensure that using unbalanced effects size and variance images # raises an error with pytest.raises(ValueError): compute_fixed_effects(contrasts * 2, variance, mask) del mask, multi_session_model
def test_high_level_glm_null_contrasts(): # test that contrast computation is resilient to 0 values. shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( shapes, rk) multi_session_model = FirstLevelModel(mask_img=None).fit( fmri_data, design_matrices=design_matrices) single_session_model = FirstLevelModel(mask_img=None).fit( fmri_data[0], design_matrices=design_matrices[0]) z1 = multi_session_model.compute_contrast( [np.eye(rk)[:1], np.zeros((1, rk))], output_type='stat') z2 = single_session_model.compute_contrast(np.eye(rk)[:1], output_type='stat') np.testing.assert_almost_equal(get_data(z1), get_data(z2))
def test_high_level_glm_one_session(): shapes, rk = [(7, 8, 9, 15)], 3 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( shapes, rk) single_session_model = FirstLevelModel(mask_img=None).fit( fmri_data[0], design_matrices=design_matrices[0]) assert isinstance(single_session_model.masker_.mask_img_, Nifti1Image) single_session_model = FirstLevelModel(mask_img=mask).fit( fmri_data[0], design_matrices=design_matrices[0]) z1 = single_session_model.compute_contrast(np.eye(rk)[:1]) assert isinstance(z1, Nifti1Image)
def test_high_level_glm_with_paths(): shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3 with InTemporaryDirectory(): mask_file, fmri_files, design_files = write_fake_fmri_data_and_design( shapes, rk) multi_session_model = FirstLevelModel(mask_img=None).fit( fmri_files, design_matrices=design_files) z_image = multi_session_model.compute_contrast(np.eye(rk)[1]) assert_array_equal(z_image.affine, load(mask_file).affine) assert get_data(z_image).std() < 3. # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del z_image, fmri_files, multi_session_model
def test_high_level_glm_different_design_matrices(): # test that one can estimate a contrast when design matrices are different shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3 mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design( shapes, rk) # add a column to the second design matrix design_matrices[1]['new'] = np.ones((19, 1)) # Fit a glm with two sessions and design matrices multi_session_model = FirstLevelModel(mask_img=mask).fit( fmri_data, design_matrices=design_matrices) z_joint = multi_session_model.compute_contrast( [np.eye(rk)[:1], np.eye(rk + 1)[:1]], output_type='effect_size') assert z_joint.shape == (7, 8, 7) # compare the estimated effects to seprarately-fitted models model1 = FirstLevelModel(mask_img=mask).fit( fmri_data[0], design_matrices=design_matrices[0]) z1 = model1.compute_contrast(np.eye(rk)[:1], output_type='effect_size') model2 = FirstLevelModel(mask_img=mask).fit( fmri_data[1], design_matrices=design_matrices[1]) z2 = model2.compute_contrast(np.eye(rk + 1)[:1], output_type='effect_size') assert_almost_equal(z1.get_data() + z2.get_data(), 2 * z_joint.get_data())
def test_high_level_glm_with_data(): with InTemporaryDirectory(): shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3 mask, fmri_data, design_matrices = write_fake_fmri_data_and_design( shapes, rk) multi_session_model = FirstLevelModel(mask_img=None).fit( fmri_data, design_matrices=design_matrices) n_voxels = get_data(multi_session_model.masker_.mask_img_).sum() z_image = multi_session_model.compute_contrast(np.eye(rk)[1]) assert np.sum(get_data(z_image) != 0) == n_voxels assert get_data(z_image).std() < 3. # with mask multi_session_model = FirstLevelModel(mask_img=mask).fit( fmri_data, design_matrices=design_matrices) z_image = multi_session_model.compute_contrast(np.eye(rk)[:2], output_type='z_score') p_value = multi_session_model.compute_contrast(np.eye(rk)[:2], output_type='p_value') stat_image = multi_session_model.compute_contrast(np.eye(rk)[:2], output_type='stat') effect_image = multi_session_model.compute_contrast( np.eye(rk)[:2], output_type='effect_size') variance_image = multi_session_model.compute_contrast( np.eye(rk)[:2], output_type='effect_variance') assert_array_equal(get_data(z_image) == 0., get_data(load(mask)) == 0.) assert (get_data(variance_image)[get_data(load(mask)) > 0] > .001).all() all_images = multi_session_model.compute_contrast(np.eye(rk)[:2], output_type='all') assert_array_equal(get_data(all_images['z_score']), get_data(z_image)) assert_array_equal(get_data(all_images['p_value']), get_data(p_value)) assert_array_equal(get_data(all_images['stat']), get_data(stat_image)) assert_array_equal(get_data(all_images['effect_size']), get_data(effect_image)) assert_array_equal(get_data(all_images['effect_variance']), get_data(variance_image)) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del (all_images, design_matrices, effect_image, fmri_data, mask, multi_session_model, n_voxels, p_value, rk, shapes, stat_image, variance_image, z_image)
def test_first_level_model_contrast_computation(): with InTemporaryDirectory(): shapes = ((7, 8, 9, 10), ) mask, FUNCFILE, _ = write_fake_fmri_data_and_design(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) # basic test based on basic_paradigm and glover hrf t_r = 10.0 slice_time_ref = 0. events = basic_paradigm() # Ordinary Least Squares case model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask, drift_model='polynomial', drift_order=3, minimize_memory=False) c1, c2, cnull = np.eye(7)[0], np.eye(7)[1], np.zeros(7) # asking for contrast before model fit gives error with pytest.raises(ValueError): model.compute_contrast(c1) # fit model model = model.fit([func_img, func_img], [events, events]) # smoke test for different contrasts in fixed effects model.compute_contrast([c1, c2]) # smoke test for same contrast in fixed effects model.compute_contrast([c2, c2]) # smoke test for contrast that will be repeated model.compute_contrast(c2) model.compute_contrast(c2, 'F') model.compute_contrast(c2, 't', 'z_score') model.compute_contrast(c2, 't', 'stat') model.compute_contrast(c2, 't', 'p_value') model.compute_contrast(c2, None, 'effect_size') model.compute_contrast(c2, None, 'effect_variance') # formula should work (passing varible name directly) model.compute_contrast('c0') model.compute_contrast('c1') model.compute_contrast('c2') # smoke test for one null contrast in group model.compute_contrast([c2, cnull]) # only passing null contrasts should give back a value error with pytest.raises(ValueError): model.compute_contrast(cnull) with pytest.raises(ValueError): model.compute_contrast([cnull, cnull]) # passing wrong parameters with pytest.raises(ValueError): model.compute_contrast([]) with pytest.raises(ValueError): model.compute_contrast([c1, []]) with pytest.raises(ValueError): model.compute_contrast(c1, '', '') with pytest.raises(ValueError): model.compute_contrast(c1, '', []) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del func_img, FUNCFILE, model
plot_contrast(first_level_model) plt.show() ######################################################################### # Not a huge effect, but rather positive overall. We could keep that one. # # By the way, a benefit of this approach is that we can test which voxels are # well explained by the derivative term, hinting at misfit regions, a # possibly valuable information. This is implemented by an F-test on # the time derivative regressors. contrast_val = np.eye(design_matrix.shape[1])[1:21:2] plot_contrast_matrix(contrast_val, design_matrix) plt.show() z_map = first_level_model.compute_contrast(contrast_val, output_type='z_score') plotting.plot_stat_map(z_map, display_mode='z', threshold=3.0, title='effect of time derivatives') plt.show() ######################################################################### # Well, there seems to be something here. Maybe we could adjust the # timing, by increasing the slice_time_ref parameter from 0 to 0.5. Now the # reference for model sampling is not the beginning of the volume # acquisition, but the middle of it. first_level_model = FirstLevelModel(t_r, hrf_model='spm + derivative', slice_time_ref=0.5) first_level_model = first_level_model.fit(fmri_img, events=events)
active_minus_rest = conditions['active'] - conditions['rest'] ############################################################################### # Let's look at it: plot the coefficients of the contrast, indexed by # the names of the columns of the design matrix. from nilearn.reporting import plot_contrast_matrix plot_contrast_matrix(active_minus_rest, design_matrix=design_matrix) ############################################################################### # Below, we compute the estimated effect. It is in BOLD signal unit, # but has no statistical guarantees, because it does not take into # account the associated variance. eff_map = fmri_glm.compute_contrast(active_minus_rest, output_type='effect_size') ############################################################################### # In order to get statistical significance, we form a t-statistic, and # directly convert it into z-scale. The z-scale means that the values # are scaled to match a standard Gaussian distribution (mean=0, # variance=1), across voxels, if there were no effects in the data. z_map = fmri_glm.compute_contrast(active_minus_rest, output_type='z_score') ############################################################################### # Plot thresholded z scores map. # # We display it on top of the average # functional image of the series (could be the anatomical image of the
# Fit the GLM for the 2 sessions by speficying a FirstLevelModel and then fitting it. from nilearn.stats.first_level_model import FirstLevelModel print('Fitting a GLM') fmri_glm = FirstLevelModel() fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices) ######################################################################### # Now we can compute contrast-related statistical maps (in z-scale), and plot them. print('Computing contrasts') from nilearn import plotting # Iterate on contrasts for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) # compute the contrasts z_map = fmri_glm.compute_contrast(contrast_val, output_type='z_score') # plot the contrasts as soon as they're generated # the display is overlayed on the mean fMRI image # a threshold of 3.0 is used, more sophisticated choices are possible plotting.plot_stat_map(z_map, bg_img=mean_image, threshold=3.0, display_mode='z', cut_coords=3, black_bg=True, title=contrast_id) plotting.show() ######################################################################### # Based on the resulting maps we observe that the analysis results in # wide activity for the 'effects of interest' contrast, showing the
minimize_memory=True) ######################################################################### # Compute fixed effects of the two runs and compute related images # For this, we first define the contrasts as we would do for a single session n_columns = design_matrices[0].shape[1] contrast_val = np.hstack(([-1, -1, 1, 1], np.zeros(n_columns - 4))) ######################################################################### # Statistics for the first session from nilearn import plotting cut_coords = [-129, -126, 49] contrast_id = 'DSt_minus_SSt' fmri_glm = fmri_glm.fit(fmri_img[0], design_matrices=design_matrices[0]) summary_statistics_session1 = fmri_glm.compute_contrast(contrast_val, output_type='all') plotting.plot_stat_map(summary_statistics_session1['z_score'], bg_img=mean_img_, threshold=3.0, cut_coords=cut_coords, title='{0}, first session'.format(contrast_id)) ######################################################################### # Statistics for the second session fmri_glm = fmri_glm.fit(fmri_img[1], design_matrices=design_matrices[1]) summary_statistics_session2 = fmri_glm.compute_contrast(contrast_val, output_type='all') plotting.plot_stat_map(summary_statistics_session2['z_score'], bg_img=mean_img_, threshold=3.0,
from nilearn.stats.first_level_model import FirstLevelModel fmri_glm = FirstLevelModel(t_r=7, drift_model='cosine', signal_scaling=False, mask_img=mask, minimize_memory=False) fmri_glm = fmri_glm.fit(fmri_img, events) ######################################################################### # Calculate and plot contrast # --------------------------- from nilearn import plotting z_map = fmri_glm.compute_contrast('active - rest') plotting.plot_stat_map(z_map, bg_img=mean_img, threshold=3.1) ######################################################################### # Extract the largest clusters # ---------------------------- from nilearn.reporting import get_clusters_table from nilearn import input_data table = get_clusters_table(z_map, stat_threshold=3.1, cluster_threshold=20).set_index('Cluster ID', drop=True) table.head() # get the 6 largest clusters' max x, y, and z coordinates
add_reg_names=["pcc_seed"]) dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1)) contrasts = {'seed_based_glm': dmn_contrast} ######################################################################### # Perform first level analysis # ---------------------------- # Setup and fit GLM. first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref) first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0], design_matrices=design_matrix) ######################################################################### # Estimate the contrast. print('Contrast seed_based_glm computed.') z_map = first_level_model.compute_contrast(contrasts['seed_based_glm'], output_type='z_score') # Saving snapshots of the contrasts filename = 'dmn_z_map.png' display = plotting.plot_stat_map(z_map, threshold=3.0, title='Seed based GLM', cut_coords=pcc_coords) display.add_markers(marker_coords=[pcc_coords], marker_color='g', marker_size=300) display.savefig(filename) print("Save z-map in '{0}'.".format(filename)) ########################################################################### # Generating a report
memory='nilearn_cache') ############################################################################## # Run the glm on data from each session # ------------------------------------- for session in unique_sessions: # grab the fmri data for that particular session fmri_session = index_img(func_filename, sessions == session) # fit the glm glm.fit(fmri_session, events=events[session]) # set up contrasts: one per condition conditions = events[session].trial_type.unique() for condition_ in conditions: z_maps.append(glm.compute_contrast(condition_)) condition_idx.append(condition_) session_idx.append(session) ######################################################################### # Generating a report # ------------------- # Since we have already computed the FirstLevelModel # and have the contrast, we can quickly create a summary report. from nilearn.image import mean_img from nilearn.reporting import make_glm_report mean_img_ = mean_img(func_filename) report = make_glm_report( glm, contrasts=conditions,