def test_high_level_glm_null_contrasts(): # test that contrast computation is resilient to 0 values. # new API shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3 mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk) multi_session_model = FirstLevelGLM(mask=None).fit( fmri_data, design_matrices) single_session_model = FirstLevelGLM(mask=None).fit( fmri_data[0], design_matrices[0]) z1, = multi_session_model.transform([np.eye(rk)[:1], np.zeros((1, rk))], output_z=False, output_stat=True) z2, = single_session_model.transform([np.eye(rk)[:1]], output_z=False, output_stat=True) np.testing.assert_almost_equal(z1.get_data(), z2.get_data())
def test_high_level_glm_null_contrasts(): # test that contrast computation is resilient to 0 values. # new API shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3 mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk) multi_session_model = FirstLevelGLM(mask=None).fit(fmri_data, design_matrices) single_session_model = FirstLevelGLM(mask=None).fit( fmri_data[0], design_matrices[0]) z1, = multi_session_model.transform( [np.eye(rk)[:1], np.zeros((1, rk))], output_z=False, output_stat=True) z2, = single_session_model.transform([np.eye(rk)[:1]], output_z=False, output_stat=True) np.testing.assert_almost_equal(z1.get_data(), z2.get_data())
def test_high_level_glm_with_data(): # New API shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3 mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk) multi_session_model = FirstLevelGLM(mask=None).fit(fmri_data, design_matrices) n_voxels = multi_session_model.masker_.mask_img_.get_data().sum() z_image, = multi_session_model.transform([np.eye(rk)[1]] * 2) assert_equal(np.sum(z_image.get_data() != 0), n_voxels) assert_true(z_image.get_data().std() < 3.) # with mask multi_session_model = FirstLevelGLM(mask=mask).fit(fmri_data, design_matrices) z_image, effect_image, variance_image = multi_session_model.transform( [np.eye(rk)[:2]] * 2, output_effects=True, output_variance=True) assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.) assert_true( (variance_image.get_data()[load(mask).get_data() > 0, 0] > .001).all())
def test_high_level_glm_with_data(): # New API shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3 mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk) multi_session_model = FirstLevelGLM(mask=None).fit( fmri_data, design_matrices) n_voxels = multi_session_model.masker_.mask_img_.get_data().sum() z_image, = multi_session_model.transform([np.eye(rk)[1]] * 2) assert_equal(np.sum(z_image.get_data() != 0), n_voxels) assert_true(z_image.get_data().std() < 3. ) # with mask multi_session_model = FirstLevelGLM(mask=mask).fit( fmri_data, design_matrices) z_image, effect_image, variance_image = multi_session_model.transform( [np.eye(rk)[:2]] * 2, output_effects=True, output_variance=True) assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.) assert_true( (variance_image.get_data()[load(mask).get_data() > 0, 0] > .001).all())
def test_high_level_glm_with_paths(): # New API shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3 with InTemporaryDirectory(): mask_file, fmri_files, design_files = write_fake_fmri_data(shapes, rk) multi_session_model = FirstLevelGLM(mask=None).fit( fmri_files, design_files) z_image, = multi_session_model.transform([np.eye(rk)[1]] * 2) assert_array_equal(z_image.get_affine(), load(mask_file).get_affine()) assert_true(z_image.get_data().std() < 3.) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory del z_image, fmri_files, multi_session_model
def test_high_level_glm_one_session(): # New API shapes, rk = [(7, 8, 9, 15)], 3 mask, fmri_data, design_matrices = generate_fake_fmri_data(shapes, rk) single_session_model = FirstLevelGLM(mask=None).fit( fmri_data[0], design_matrices[0]) assert_true(isinstance(single_session_model.masker_.mask_img_, Nifti1Image)) single_session_model = FirstLevelGLM(mask=mask).fit( fmri_data[0], design_matrices[0]) z1, = single_session_model.transform(np.eye(rk)[:1]) assert_true(isinstance(z1, Nifti1Image))
contrasts = {'active-rest': contrasts['active'] - contrasts['rest']} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit( [fmri_img], design_matrix) print("Computing contrasts ..") output_dir = 'results' if not os.path.exists(output_dir): os.mkdir(output_dir) for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, t_map, eff_map, var_map = fmri_glm.transform( contrasts[contrast_id], contrast_name=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True) # store stat maps to disk for dtype, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, eff_map, var_map]): map_dir = os.path.join(output_dir, '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id) nib.save(out_map, map_path) print("\t\t%s map: %s" % (dtype, map_path)) # plot one activation map display = plot_stat_map(z_map, bg_img=mean_img, threshold=3.0, display_mode='z', cut_coords=3, black_bg=True,
n_columns = design_matrices[0].shape[1] def pad_vector(contrast_, n_columns): return np.hstack((contrast_, np.zeros(n_columns - len(contrast_)))) contrasts = {'SStSSp_minus_DStDSp': pad_vector([1, 0, 0, -1], n_columns), 'DStDSp_minus_SStSSp': pad_vector([-1, 0, 0, 1], n_columns), 'DSt_minus_SSt': pad_vector([-1, -1, 1, 1], n_columns), 'DSp_minus_SSp': pad_vector([-1, 1, -1, 1], n_columns), 'DSt_minus_SSt_for_DSp': pad_vector([0, -1, 0, 1], n_columns), 'DSp_minus_SSp_for_DSt': pad_vector([0, 0, -1, 1], n_columns), 'Deactivation': pad_vector([-1, -1, -1, -1, 4], n_columns), 'Effects_of_interest': np.eye(n_columns)[:5]} print('Computing contrasts...') for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % ( index + 1, len(contrasts), contrast_id)) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = fmri_glm.transform( [contrast_val] * 2, contrast_name=contrast_id, output_z=True) nib.save(z_map, z_image_path) # make a snapshot of the contrast activation if contrast_id == 'Effects_of_interest': display = plotting.plot_stat_map( z_map, bg_img=mean_img_, threshold=2.5, title=contrast_id) display.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) print('All the results were witten in %s' % write_dir) plotting.show()
contrast['DSt_minus_SSt_for_DSp'] = _pad_vector([0, - 1, 0, 1], n_columns) contrast['DSp_minus_SSp_for_DSt'] = _pad_vector([0, 0, - 1, 1], n_columns) contrast['Deactivation'] = _pad_vector([- 1, - 1, - 1, - 1, 4], n_columns) contrast['Effects_of_interest'] = np.eye(n_columns)[:5] return contrast # compute fixed effects of the two runs and compute related images n_columns = np.load(design_files[0])['X'].shape[1] contrasts = make_fiac_contrasts(n_columns) print('Computing contrasts...') mean_ = mean_img(data['func1']) for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % ( index + 1, len(contrasts), contrast_id)) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = multi_session_model.transform( [contrast_val] * 2, contrast_name=contrast_id, output_z=True) nib.save(z_map, z_image_path) # make a snapshot of the contrast activation if contrast_id == 'Effects_of_interest': vmax = max(- z_map.get_data().min(), z_map.get_data().max()) vmin = - vmax display = plot_stat_map(z_map, bg_img=mean_, threshold=2.5, title=contrast_id) display.savefig(path.join(write_dir, '%s_z_map.png' % contrast_id)) print('All the results were witten in %s' % write_dir) plt.show()
niimgs = nb.Nifti1Image(fmri, affine=np.eye(4)) # Testing with a GLM glm = FirstLevelGLM(mask=mask_img, t_r=t_r, standardize=True, noise_model='ols') glm.fit(niimgs, design) contrast_matrix = np.eye(design.shape[1]) contrasts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design.columns)]) z_maps = {} for condition_id in event_types: z_maps[condition_id] = glm.transform(contrasts[condition_id], contrast_name=condition_id, output_z=True, output_stat=False, output_effects=False, output_variance=False) fig, axx = plt.subplots(nrows=len(event_types), ncols=2, figsize=(8, 8)) for i, ((cond_id, mask), (condition_id, z_map)) in enumerate( zip(masks.items(), z_maps.items())): img_z_map = z_map[0].get_data() niimg = nb.Nifti1Image(mask.astype('int'), affine=np.eye(4)) cuts = find_cuts.find_cut_slices(niimg) axx[i, 0].imshow(mask[..., cuts[0]]) axx[i, 1].imshow(img_z_map[..., cuts[0]]) axx[i, 1].set_title('z map: %s' % condition_id) axx[i, 0].set_title('ground truth: %s' % condition_id)
# specify contrasts contrast_matrix = np.eye(design_matrix.shape[1]) contrasts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) # more interesting contrasts contrasts = { 'faces-scrambled': contrasts['faces'] - contrasts['scrambled'], 'scrambled-faces': -contrasts['faces'] + contrasts['scrambled'], 'effects_of_interest': np.vstack((contrasts['faces'], contrasts['scrambled'])) } # fit GLM print('Fitting a GLM') fmri_glm = FirstLevelGLM(standardize=False).fit(fmri_img, design_matrices) # compute contrast maps print('Computing contrasts') from nilearn import plotting for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, = fmri_glm.transform( [contrast_val] * 2, contrast_name=contrast_id, output_z=True) plotting.plot_stat_map( z_map, bg_img=mean_image, threshold=3.0, display_mode='z', cut_coords=3, black_bg=True, title=contrast_id) plotting.show()
# fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit([fmri_img], design_matrix) print("Computing contrasts ..") output_dir = 'results' if not os.path.exists(output_dir): os.mkdir(output_dir) for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, t_map, eff_map, var_map = fmri_glm.transform( contrasts[contrast_id], contrast_name=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True) # store stat maps to disk for dtype, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, eff_map, var_map]): map_dir = os.path.join(output_dir, '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id) nib.save(out_map, map_path) print("\t\t%s map: %s" % (dtype, map_path)) # plot one activation map