def test_first_level_model_design_creation(): # Test processing of FMRI inputs with InTemporaryDirectory(): shapes = ((7, 8, 9, 10),) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) # basic test based on basic_paradigm and glover hrf t_r = 10.0 slice_time_ref = 0. events = basic_paradigm() model = FirstLevelModel(t_r, slice_time_ref, mask=mask, drift_model='polynomial', drift_order=3) model = model.fit(func_img, events) frame1, X1, names1 = check_design_matrix(model.design_matrices_[0]) # check design computation is identical n_scans = func_img.get_data().shape[3] start_time = slice_time_ref * t_r end_time = (n_scans - 1 + slice_time_ref) * t_r frame_times = np.linspace(start_time, end_time, n_scans) design = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=3) frame2, X2, names2 = check_design_matrix(design) assert_array_equal(frame1, frame2) assert_array_equal(X1, X2) assert_array_equal(names1, names2) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del FUNCFILE, mask, model, func_img
def test_first_level_model_design_creation(): # Test processing of FMRI inputs with InTemporaryDirectory(): shapes = ((7, 8, 9, 10),) mask, FUNCFILE, _ = write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) # basic test based on basic_paradigm and glover hrf t_r = 1.0 slice_time_ref = 0. paradigm = basic_paradigm() model = FirstLevelModel(t_r, slice_time_ref, mask=mask, drift_model='polynomial', drift_order=3) model = model.fit(func_img, paradigm) frame1, X1, names1 = check_design_matrix(model.design_matrices_[0]) # check design computation is identical n_scans = func_img.get_data().shape[3] start_time = slice_time_ref * t_r end_time = (n_scans - 1 + slice_time_ref) * t_r frame_times = np.linspace(start_time, end_time, n_scans) design = make_design_matrix(frame_times, paradigm, drift_model='polynomial', drift_order=3) frame2, X2, names2 = check_design_matrix(design) assert_array_equal(frame1, frame2) assert_array_equal(X1, X2) assert_array_equal(names1, names2)
def test_first_level_model_design_creation(): # Test processing of FMRI inputs with InTemporaryDirectory(): shapes = ((7, 8, 9, 10), ) mask, FUNCFILE, _ = _write_fake_fmri_data(shapes) FUNCFILE = FUNCFILE[0] func_img = load(FUNCFILE) # basic test based on basic_paradigm and glover hrf t_r = 10.0 slice_time_ref = 0. events = basic_paradigm() model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask, drift_model='polynomial', drift_order=3) model = model.fit(func_img, events) frame1, X1, names1 = check_design_matrix(model.design_matrices_[0]) # check design computation is identical n_scans = get_data(func_img).shape[3] start_time = slice_time_ref * t_r end_time = (n_scans - 1 + slice_time_ref) * t_r frame_times = np.linspace(start_time, end_time, n_scans) design = make_first_level_design_matrix(frame_times, events, drift_model='polynomial', drift_order=3) frame2, X2, names2 = check_design_matrix(design) assert_array_equal(frame1, frame2) assert_array_equal(X1, X2) assert_array_equal(names1, names2) # Delete objects attached to files to avoid WindowsError when deleting # temporary directory (in Windows) del FUNCFILE, mask, model, func_img
def test_csv_io(): # test the csv io on design matrices tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) paradigm = modulated_event_paradigm() DM = make_design_matrix(frame_times, paradigm, hrf_model="glover", drift_model="polynomial", drift_order=3) path = "design_matrix.csv" with InTemporaryDirectory(): DM.to_csv(path) DM2 = pd.DataFrame().from_csv(path) _, matrix, names = check_design_matrix(DM) _, matrix_, names_ = check_design_matrix(DM2) assert_almost_equal(matrix, matrix_) assert_equal(names, names_)
def test_design_matrix0c(): # test design matrix creation when regressors are provided manually tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) _, X, names = check_design_matrix( make_design_matrix(frame_times, drift_model="polynomial", drift_order=3, add_regs=ax) ) assert_almost_equal(X[:, 0], ax[:, 0]) ax = np.random.randn(127, 4) assert_raises_regex( AssertionError, "Incorrect specification of additional regressors:.", make_design_matrix, frame_times, add_regs=ax, ) ax = np.random.randn(128, 4) assert_raises_regex( ValueError, "Incorrect number of additional regressor names.", make_design_matrix, frame_times, add_regs=ax, add_reg_names="", )
def test_csv_io(): # test the csv io on design matrices tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) events = modulated_event_paradigm() DM = make_first_level_design_matrix(frame_times, events, hrf_model='glover', drift_model='polynomial', drift_order=3) path = 'design_matrix.csv' with InTemporaryDirectory(): DM.to_csv(path) DM2 = pd.DataFrame().from_csv(path) _, matrix, names = check_design_matrix(DM) _, matrix_, names_ = check_design_matrix(DM2) assert_almost_equal(matrix, matrix_) assert_equal(names, names_)
def design_matrix_light( frame_times, paradigm=None, hrf_model="glover", drift_model="cosine", period_cut=128, drift_order=1, fir_delays=[0], add_regs=None, add_reg_names=None, min_onset=-24, path=None, ): """ Idem make_design_matrix, but only returns the computed matrix and associated names """ dmtx = make_design_matrix( frame_times, paradigm, hrf_model, drift_model, period_cut, drift_order, fir_delays, add_regs, add_reg_names, min_onset, ) _, matrix, names = check_design_matrix(dmtx) return matrix, names
def test_design_matrix0(): # Test design matrix creation when no paradigm is provided tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) _, X, names = check_design_matrix(make_design_matrix(frame_times, drift_model="polynomial", drift_order=3)) assert_equal(len(names), 4) x = np.linspace(-0.5, 0.5, 128) assert_almost_equal(X[:, 0], x)
def test_design_matrix0(): # Test design matrix creation when no experimental paradigm is provided tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) _, X, names = check_design_matrix(make_first_level_design_matrix( frame_times, drift_model='polynomial', drift_order=3)) assert_equal(len(names), 4) x = np.linspace(- 0.5, .5, 128) assert_almost_equal(X[:, 0], x)
def test_design_matrix0d(): # test design matrix creation when regressors are provided manually tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) _, X, names = check_design_matrix(make_first_level_design_matrix( frame_times, drift_model='polynomial', drift_order=3, add_regs=ax)) assert_equal(len(names), 8) assert_equal(X.shape[1], 8)
def test_design_matrix0d(): # test design matrix creation when regressors are provided manually tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) _, X, names = check_design_matrix(make_design_matrix( frame_times, drift_model='polynomial', drift_order=3, add_regs=ax)) assert_equal(len(names), 8) assert_equal(X.shape[1], 8)
def design_matrix_light( frame_times, events=None, hrf_model='glover', drift_model='cosine', period_cut=128, drift_order=1, fir_delays=[0], add_regs=None, add_reg_names=None, min_onset=-24, path=None): """ Idem make_first_level_design_matrix, but only returns the computed matrix and associated names """ dmtx = make_first_level_design_matrix(frame_times, events, hrf_model, drift_model, period_cut, drift_order, fir_delays, add_regs, add_reg_names, min_onset) _, matrix, names = check_design_matrix(dmtx) return matrix, names
def test_design_matrix0c(): # test design matrix creation when regressors are provided manually tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) _, X, names = check_design_matrix( make_design_matrix(frame_times, drift_model='polynomial', drift_order=3, add_regs=ax)) assert_almost_equal(X[:, 0], ax[:, 0])
def test_spm_1(): # Check that the nistats design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) frame_times = np.linspace(0, 99, 100) conditions = ["c0", "c0", "c0", "c1", "c1", "c1", "c2", "c2", "c2"] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] paradigm = pd.DataFrame({"name": conditions, "onset": onsets}) X1 = make_design_matrix(frame_times, paradigm, drift_model="blank") _, matrix, _ = check_design_matrix(X1) spm_design_matrix = DESIGN_MATRIX["arr_0"] assert_true(((spm_design_matrix - matrix) ** 2).sum() / (spm_design_matrix ** 2).sum() < 0.1)
def test_spm_1(): # Check that the nistats design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) frame_times = np.linspace(0, 99, 100) conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] paradigm = pd.DataFrame({'name': conditions, 'onset': onsets}) X1 = make_design_matrix(frame_times, paradigm, drift_model='blank') _, matrix, _ = check_design_matrix(X1) spm_design_matrix = DESIGN_MATRIX['arr_0'] assert_true(((spm_design_matrix - matrix)**2).sum() / (spm_design_matrix**2).sum() < .1)
def plot_design_matrix(design_matrix, rescale=True, ax=None, output_file=None): """Plot a design matrix provided as a DataFrame Parameters ---------- design matrix : pandas DataFrame, Describes a design matrix. rescale : bool, optional Rescale columns magnitude for visualization or not. ax : axis handle, optional Handle to axis onto which we will draw design matrix. output_file: string or None, optional, The name of an image file to export the plot to. Valid extensions are .png, .pdf, .svg. If output_file is not None, the plot is saved to a file, and the display is closed. Returns ------- ax: axis handle The axis used for plotting. """ # We import _set_mpl_backend because just the fact that we are # importing it sets the backend from nilearn.plotting import _set_mpl_backend # avoid unhappy pyflakes _set_mpl_backend # normalize the values per column for better visualization _, X, names = check_design_matrix(design_matrix) if rescale: X = X / np.maximum(1.e-12, np.sqrt( np.sum(X ** 2, 0))) # pylint: disable=no-member if ax is None: plt.figure() ax = plt.subplot(1, 1, 1) ax.imshow(X, interpolation='nearest', aspect='auto') ax.set_label('conditions') ax.set_ylabel('scan number') ax.set_xticks(range(len(names))) ax.set_xticklabels(names, rotation=60, ha='right') plt.tight_layout() if output_file is not None: plt.savefig(output_file) plt.close() ax = None return ax
def test_spm_1(): # Check that the nistats design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) frame_times = np.linspace(0, 99, 100) conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] paradigm = pd.DataFrame({'name': conditions, 'onset': onsets}) X1 = make_design_matrix(frame_times, paradigm, drift_model='blank') _, matrix, _ = check_design_matrix(X1) spm_design_matrix = DESIGN_MATRIX['arr_0'] assert_true(((spm_design_matrix - matrix) ** 2).sum() / (spm_design_matrix ** 2).sum() < .1)
def _hcp_regions_selection(fmri_img, subject_id, n_voxels=10, n_jobs=1, verbose=False): """GLM on HCP dataset.""" paradigm, t_frames = get_paradigm_hcp(subject_id) d_m = make_first_level_design_matrix(t_frames, paradigm, hrf_model='spm', drift_model='Cosine', period_cut=2 * 2 * EPOCH_DUR_HCP) glm = FirstLevelModel(t_r=TR_HCP, slice_time_ref=0.0, noise_model='ols', min_onset=10.0, signal_scaling=False, smoothing_fwhm=6.0, standardize=False, memory_level=1, memory='./.cachedir', minimize_memory=False, n_jobs=n_jobs) glm.fit(run_imgs=fmri_img, design_matrices=d_m) _, _, names = check_design_matrix(d_m) n_names = len(names) c_val = dict([(n, c) for n, c in zip(names, np.eye(n_names))]) c_val['rh-lh'] = c_val['rh'] - c_val['lh'] c_val['lh-rh'] = c_val['lh'] - c_val['rh'] z_maps = dict([(n, glm.compute_contrast(c, output_type='z_score')) for n, c in c_val.iteritems()]) z_maps = { 'rh': z_maps['rh-lh'], 'lh': z_maps['lh-rh'], 'cue': z_maps['cue'] } region_mask_imgs = {} for name, _ in [('rh', 'rh-lh'), ('lh', 'lh-rh'), ('cue', 'cue')]: z_map_vector_mask = glm.masker_.transform(z_maps[name]).flatten() z_region_vector_mask = mask_n_max(z_map_vector_mask, n_voxels) z_region_vector_mask = z_region_vector_mask.astype(float) region_mask_imgs[name] = \ glm.masker_.inverse_transform(z_region_vector_mask) return d_m, z_maps, region_mask_imgs
def test_spm_2(): # Check that the nistats design matrix is close enough to the SPM one # (it cannot be identical, because the hrf shape is different) frame_times = np.linspace(0, 99, 100) conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2'] onsets = [30, 50, 70, 10, 30, 80, 30, 40, 60] durations = 10 * np.ones(9) events = pd.DataFrame({'trial_type': conditions, 'onset': onsets, 'duration': durations}) X1 = make_first_level_design_matrix(frame_times, events, drift_model=None) spm_design_matrix = DESIGN_MATRIX['arr_1'] _, matrix, _ = check_design_matrix(X1) assert_true(((spm_design_matrix - matrix) ** 2).sum() / (spm_design_matrix ** 2).sum() < .1)
def test_design_matrix0c(): # test design matrix creation when regressors are provided manually tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) _, X, names = check_design_matrix(make_first_level_design_matrix( frame_times, drift_model='polynomial', drift_order=3, add_regs=ax)) assert_almost_equal(X[:, 0], ax[:, 0]) ax = np.random.randn(127, 4) assert_raises_regex( AssertionError, "Incorrect specification of additional regressors:.", make_first_level_design_matrix, frame_times, add_regs=ax) ax = np.random.randn(128, 4) assert_raises_regex( ValueError, "Incorrect number of additional regressor names.", make_first_level_design_matrix, frame_times, add_regs=ax, add_reg_names='')
def first_level(subject_dic, additional_regressors=None, compcorr=False, smooth=None, surface=False, mask_img=None): """ Run the first-level analysis (GLM fitting + statistical maps) in a given subject Parameters ---------- subject_dic: dict, exhaustive description of an individual acquisition additional_regressors: dict or None, additional regressors provided as an already sampled design_matrix dictionary keys are session_ids compcorr: Bool, optional, whether confound estimation and removal should be done or not smooth: float or None, optional, how much the data should spatially smoothed during masking """ start_time = time.ctime() # experimental paradigm meta-params motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] hrf_model = subject_dic['hrf_model'] hfcut = subject_dic['hfcut'] drift_model = subject_dic['drift_model'] tr = subject_dic['TR'] if not surface and (mask_img is None): mask_img = masking(subject_dic['func'], subject_dic['output_dir']) if additional_regressors is None: additional_regressors = dict([ (session_id, None) for session_id in subject_dic['session_id'] ]) for session_id, fmri_path, onset, motion_path in zip( subject_dic['session_id'], subject_dic['func'], subject_dic['onset'], subject_dic['realignment_parameters']): paradigm_id = _session_id_to_task_id([session_id])[0] if surface: from nibabel.gifti import read n_scans = np.array( [darrays.data for darrays in read(fmri_path).darrays]).shape[0] else: n_scans = nib.load(fmri_path).shape[3] # motion parameters motion = np.loadtxt(motion_path) # define the time stamps for different images frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) if paradigm_id == 'audio': mask = np.array([1, 0, 1, 1, 0, 1, 1, 0, 1, 1]) n_cycles = 28 cycle_duration = 20 t_r = 2 cycle = np.arange(0, cycle_duration, t_r)[mask > 0] frametimes = np.tile(cycle, n_cycles) +\ np.repeat(np.arange(n_cycles) * cycle_duration, mask.sum()) frametimes = frametimes[:-2] # for some reason... if surface: compcorr = False # XXX Fixme if compcorr: confounds = high_variance_confounds(fmri_path, mask_img=mask_img) confounds = np.hstack((confounds, motion)) confound_names = ['conf_%d' % i for i in range(5)] + motion_names else: confounds = motion confound_names = motion_names if onset is None: warnings.warn('Onset file not provided. Trying to guess it') task = os.path.basename(fmri_path).split('task')[-1][4:] onset = os.path.join( os.path.split(os.path.dirname(fmri_path))[0], 'model001', 'onsets', 'task' + task + '_run001', 'task%s.csv' % task) if not os.path.exists(onset): warnings.warn('non-existant onset file. proceeding without it') paradigm = None else: paradigm = make_paradigm(onset, paradigm_id) # handle manually supplied regressors add_reg_names = [] if additional_regressors[session_id] is None: add_regs = confounds else: df = read_csv(additional_regressors[session_id]) add_regs = [] for regressor in df: add_reg_names.append(regressor) add_regs.append(df[regressor]) add_regs = np.array(add_regs).T add_regs = np.hstack((add_regs, confounds)) add_reg_names += confound_names # create the design matrix design_matrix = make_first_level_design_matrix( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=hfcut, add_regs=add_regs, add_reg_names=add_reg_names) _, dmtx, names = check_design_matrix(design_matrix) # create the relevant contrasts contrasts = make_contrasts(paradigm_id, names) if surface: subject_session_output_dir = os.path.join( subject_dic['output_dir'], 'res_surf_%s' % session_id) else: subject_session_output_dir = os.path.join( subject_dic['output_dir'], 'res_stats_%s' % session_id) if not os.path.exists(subject_session_output_dir): os.makedirs(subject_session_output_dir) np.savez(os.path.join(subject_session_output_dir, 'design_matrix.npz'), design_matrix=design_matrix) if surface: run_surface_glm(design_matrix, contrasts, fmri_path, subject_session_output_dir) else: z_maps = run_glm(design_matrix, contrasts, fmri_path, mask_img, subject_dic, subject_session_output_dir, tr=tr, smoothing_fwhm=smooth) # do stats report anat_img = nib.load(subject_dic['anat']) stats_report_filename = os.path.join(subject_session_output_dir, 'report_stats.html') generate_subject_stats_report( stats_report_filename, contrasts, z_maps, mask_img, threshold=3., cluster_th=15, anat=anat_img, anat_affine=anat_img.affine, design_matrices=[design_matrix], subject_id=subject_dic['subject_id'], start_time=start_time, title="GLM for subject %s" % session_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) if not surface: ProgressReport().finish_dir(subject_session_output_dir) print("Statistic report written to %s\r\n" % stats_report_filename)
def do_subject_glm(subject_id): subject_output_dir = os.path.join(output_dir, subject_id) # make design matrices design_matrices = [] func = [] anat = os.path.join(subject_output_dir, "anatomy", "whighres001_brain.nii") for run_path in sorted( glob.glob( os.path.join(data_dir, subject_id, "model/model001/onsets/task*"))): run_id = os.path.basename(run_path) run_func = glob.glob( os.path.join(subject_output_dir, "BOLD", run_id, "wrbold*.nii")) assert len(run_func) == 1 run_func = run_func[0] run_onset_paths = sorted( glob.glob( os.path.join(data_dir, subject_id, "model/model001/onsets/%s/*" % run_id))) onsets = map(np.loadtxt, run_onset_paths) conditions = np.hstack([[condition_keys["cond%03i" % (c + 1)]] * len(onsets[c]) for c in range(len(run_onset_paths))]) onsets = np.vstack((onsets)) onsets *= tr run_func = nibabel.load(run_func) func.append(run_func) n_scans = run_func.shape[-1] onset, duration, modulation = onsets.T frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) paradigm = pd.DataFrame( dict(name=conditions, onset=onset, duration=duration, modulation=modulation)) design_matrix = make_design_matrix(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=hfcut) design_matrices.append(design_matrix) n_runs = len(func) # specify contrasts _, _, names = check_design_matrix(design_matrix) n_columns = len(names) contrast_matrix = np.eye(n_columns) contrasts = {} for c in range(len(condition_keys)): contrasts[names[2 * c]] = contrast_matrix[2 * c] contrasts["avg"] = np.mean(contrasts.values(), axis=0) # more interesting contrasts contrasts_ = {} for contrast, val in contrasts.items(): if not contrast == "avg": contrasts_["%s_minus_avg" % contrast] = val - contrasts["avg"] contrasts = contrasts_ # fit GLM from nilearn.image import smooth_img func = smooth_img(func, fwhm=8.) print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(func, [ check_design_matrix(design_matrix)[1] for design_matrix in design_matrices ], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(subject_output_dir, "mask.nii") print("Saving mask image to %s ..." % mask_path) nibabel.save(fmri_glm.mask, mask_path) # compute contrast maps z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, t_map, effects_map, var_map = fmri_glm.contrast( [contrast_val] * n_runs, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True) for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join(subject_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id) print("\t\tWriting %s ..." % map_path) nibabel.save(out_map, map_path) if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # # generate stats report # stats_report_filename = os.path.join(subject_output_dir, "reports", # "report_stats.html") # generate_subject_stats_report( # stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat=anat, # threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr, # subject_id="sub001", n_scans=n_scans, hfcut=hfcut, # paradigm=paradigm, frametimes=frametimes, # drift_model=drift_model, hrf_model=hrf_model) # ProgressReport().finish_dir(subject_output_dir) return dict(subject_id=subject_id, mask=mask_path, effects_maps=effects_maps, z_maps=z_maps, contrasts=contrasts)
frametimes = np.linspace(0, (nscans - 1) * tr, nscans) drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' period_cut = 2 * 2 * epoch_duration design_matrix = make_design_matrix( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=period_cut) # plot and save design matrix ax = plot_design_matrix(design_matrix) ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') # specify contrasts contrasts = {} _, matrix, names = check_design_matrix(design_matrix) contrast_matrix = np.eye(len(names)) for i in range(len(names)): contrasts[names[i]] = contrast_matrix[i] # Use a more interesting contrast contrasts = {'active-rest': contrasts['active'] - contrasts['rest']} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit( [subject_data.func], matrix) # compute bg unto which activation will be projected mean_img = mean_img(subject_data.func)
scrambled_onsets = timing['onsets'][1].ravel() onsets = np.hstack((faces_onsets, scrambled_onsets)) onsets *= tr # because onsets were reporting in 'scans' units conditions = ['faces'] * len(faces_onsets) + ['scrambled'] * len( scrambled_onsets) paradigm = DataFrame({'name': conditions, 'onset': onsets}) # build design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) design_matrix = make_design_matrix( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=period_cut) design_matrices.append(design_matrix) # specify contrasts _, matrix, names = check_design_matrix(design_matrix) contrasts = {} n_columns = len(names) contrast_matrix = np.eye(n_columns) for i in range(2): contrasts[names[2 * i]] = contrast_matrix[2 * i] # more interesting contrasts contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled'] contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] contrasts['effects_of_interest'] = np.vstack((contrasts['faces'], contrasts['scrambled'])) # fit GLM print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(
def first_level(subject): subject_id = subject['subject_id'] data_dir = subject['output_dir'] subject_session_output_dir = os.path.join(data_dir, 'res_stats') if not os.path.exists(subject_session_output_dir): os.makedirs(subject_session_output_dir) design_matrices=[] for e, i in enumerate(subject['func']) : # Parameters tr = subject['TR'] drift_model = None hrf_model = 'spm' # hemodynamic reponse function hfcut = 128. fwhm = [5, 5, 5] n_scans = nibabel.load(subject['func'][e]).shape[3] # Preparation of paradigm events_file = subject['onset'][e] paradigm = paradigm_contrasts.localizer_paradigm(events_file) # Motion parameter motion_path = subject['realignment_parameters'][e] motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] motion = np.loadtxt(motion_path) # Build design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) design_matrix = make_first_level_design_matrix( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, high_pass=hfcut, add_regs=motion, add_reg_names=motion_names) _, dmtx, names = check_design_matrix(design_matrix) design_matrices.append(design_matrix) #print(names) # Specify contrasts contrasts = paradigm_contrasts.localizer_contrasts(design_matrix) # GLM Analysis print('Fitting a GLM (this takes time)...') #for mask_img; use the False or the mask of t1 mni template #the computed mask by default on fmri seems not always correct. # For a specific mask, try this: #mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz") #mask = compute_epi_mask(fmri_f) #nibabel.save(mask , mask_path) #mask_images.append(compute_epi_mask(mask)) fmri_glm = FirstLevelModel(mask_img=False, t_r=tr, smoothing_fwhm=fwhm).fit(subject['func'], design_matrices=design_matrices) # compute contrasts z_maps = {} for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) # store stat maps to disk for map_type in ['z_score', 'stat', 'effect_size', 'effect_variance']: stat_map = fmri_glm.compute_contrast( contrast_val, output_type=map_type) map_dir = os.path.join( subject_session_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id) print("\t\tWriting %s ..." % map_path) stat_map.to_filename(map_path) # collect zmaps for contrasts we're interested in if map_type == 'z_score': z_maps[contrast_id] = map_path anat_img = glob.glob(os.path.join(data_dir, 'anat/wsub*T1w.nii.gz'))[0] stats_report_filename = os.path.join( subject_session_output_dir, 'report_stats.html') report = make_glm_report(fmri_glm, contrasts, threshold=3.0, bg_img=anat_img, cluster_threshold=15, title="GLM for subject %s" % subject_id, ) report.save_as_html(stats_report_filename) return z_maps
scrambled_onsets = timing['onsets'][1].ravel() onsets = np.hstack((faces_onsets, scrambled_onsets)) onsets *= tr # because onsets were reporting in 'scans' units conditions = (['faces'] * len(faces_onsets) + ['scrambled'] * len(scrambled_onsets)) paradigm = DataFrame({'name': conditions, 'onset': onsets}) # build design matrix frame_times = np.arange(n_scans) * tr design_matrix = make_design_matrix( frame_times, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=period_cut) design_matrices.append(design_matrix) # specify contrasts _, matrix, names = check_design_matrix(design_matrix) contrasts = {} n_columns = len(names) contrast_matrix = np.eye(n_columns) for i in range(2): contrasts[names[2 * i]] = contrast_matrix[2 * i] # more interesting contrasts contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled'] contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] contrasts['effects_of_interest'] = np.vstack((contrasts['faces'], contrasts['scrambled'])) # fit GLM print('Fitting a GLM') X = [check_design_matrix(design_)[1] for design_ in design_matrices]
def do_subject_glm(subject_id): subject_output_dir = os.path.join(output_dir, subject_id) # make design matrices design_matrices = [] func = [] anat = os.path.join(subject_output_dir, "anatomy", "whighres001_brain.nii") for run_path in sorted(glob.glob(os.path.join( data_dir, subject_id, "model/model001/onsets/task*"))): run_id = os.path.basename(run_path) run_func = glob.glob(os.path.join(subject_output_dir, "BOLD", run_id, "wrbold*.nii")) assert len(run_func) == 1 run_func = run_func[0] run_onset_paths = sorted(glob.glob(os.path.join( data_dir, subject_id, "model/model001/onsets/%s/*" % run_id))) onsets = map(np.loadtxt, run_onset_paths) conditions = np.hstack( [[condition_keys["cond%03i" % (c + 1)]] * len(onsets[c]) for c in range(len(run_onset_paths))]) onsets = np.vstack((onsets)) onsets *= tr run_func = nibabel.load(run_func) func.append(run_func) n_scans = run_func.shape[-1] onset, duration, modulation = onsets.T frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) paradigm = pd.DataFrame(dict(name=conditions, onset=onset, duration=duration, modulation=modulation)) design_matrix = make_design_matrix(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=hfcut) design_matrices.append(design_matrix) n_runs = len(func) # specify contrasts _, _, names = check_design_matrix(design_matrix) n_columns = len(names) contrast_matrix = np.eye(n_columns) contrasts = {} for c in range(len(condition_keys)): contrasts[names[2 * c]] = contrast_matrix[2 * c] contrasts["avg"] = np.mean(contrasts.values(), axis=0) # more interesting contrasts contrasts_ = {} for contrast, val in contrasts.items(): if not contrast == "avg": contrasts_["%s_minus_avg" % contrast] = val - contrasts["avg"] contrasts = contrasts_ # fit GLM from nilearn.image import smooth_img func = smooth_img(func, fwhm=8.) print 'Fitting a GLM (this takes time)...' fmri_glm = FMRILinearModel(func, [check_design_matrix(design_matrix)[1] for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(subject_output_dir, "mask.nii") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) # compute contrast maps z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.items(): print "\tcontrast id: %s" % contrast_id z_map, t_map, effects_map, var_map = fmri_glm.contrast( [contrast_val] * n_runs, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True) for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join(subject_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # # generate stats report # stats_report_filename = os.path.join(subject_output_dir, "reports", # "report_stats.html") # generate_subject_stats_report( # stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat=anat, # threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr, # subject_id="sub001", n_scans=n_scans, hfcut=hfcut, # paradigm=paradigm, frametimes=frametimes, # drift_model=drift_model, hrf_model=hrf_model) # ProgressReport().finish_dir(subject_output_dir) return dict(subject_id=subject_id, mask=mask_path, effects_maps=effects_maps, z_maps=z_maps, contrasts=contrasts)
data = datasets.fetch_localizer_first_level() paradigm_file = data.paradigm epi_img = data.epi_img ######################################## # Design matrix ######################################## paradigm = DataFrame.from_csv(paradigm_file, sep=" ", header=None, index_col=None) paradigm.columns = ["session", "name", "onset"] n_conditions = len(paradigm.name.unique()) design_matrix = make_design_matrix( frame_times, paradigm, hrf_model="canonical with derivative", drift_model="cosine", period_cut=128 ) _, matrix, column_names = check_design_matrix(design_matrix) # Plot the design matrix ax = plot_design_matrix(design_matrix) ax.set_position([0.05, 0.25, 0.9, 0.65]) ax.set_title("Design matrix") plt.savefig(path.join(write_dir, "design_matrix.png")) ######################################## # Perform a GLM analysis ######################################## fmri_glm = FirstLevelGLM().fit(epi_img, matrix) ######################################### # Estimate contrasts