def tortoise(*args): print(args) print( 'Fitting a "Fixed Effect" GLM for merging LR and RL ' 'phase-encoding directions for subject %s ...' % subject_data.subject_id) fmri_glm = FMRILinearModel(subject_data.func, [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') print("... done.\r\n") # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii") print("Saving mask image to %s ..." % mask_path) nibabel.save(fmri_glm.mask, mask_path) print("... done.\r\n") z_maps = {} effects_maps = {} map_dirs = {} try: for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_effects=True ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_data.output_dir, '%s_maps' % map_type) map_dirs[map_type] = map_dir if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s_%s.nii' % (map_type, contrast_id)) print("\t\tWriting %s ..." % map_path) nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path return effects_maps, z_maps, mask_path, map_dirs except: return None
def tortoise(*args): print args print ( 'Fitting a "Fixed Effect" GLM for merging LR and RL ' 'phase-encoding directions for subject %s ...' % ( subject_data.subject_id)) fmri_glm = FMRILinearModel(subject_data.func, [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" z_maps = {} effects_maps = {} map_dirs = {} try: for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_effects=True ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_data.output_dir, '%s_maps' % map_type) map_dirs[map_type] = map_dir if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s_%s.nii' % (map_type, contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path return effects_maps, z_maps, mask_path, map_dirs except: return None
def _first_level_glm(study_dir, subject_id, model_id, hrf_model='canonical', drift_model='cosine', glm_model='ar1', mask='compute', verbose=1): study_id = os.path.split(study_dir)[1] subject_dir = os.path.join(study_dir, subject_id) if verbose > 0: print '%s@%s: first level glm' % (subject_id, study_id) tr = get_study_tr(study_dir) images, n_scans = get_subject_bold_images(subject_dir) motion = get_subject_motion_per_session(subject_dir) contrasts = get_task_contrasts(study_dir, subject_dir, model_id) events = get_subject_events(study_dir, subject_dir) design_matrices = make_design_matrices(events, n_scans, tr, hrf_model, drift_model, motion) glm = FMRILinearModel(images, design_matrices, mask=mask) glm.fit(do_scaling=True, model=glm_model) for contrast_id in contrasts: con_val = [] for session_con, session_dm in zip(contrasts[contrast_id], design_matrices): con = np.zeros(session_dm.shape[1]) con[:len(session_con)] = session_con con_val.append(con) z_map, t_map, c_map, var_map = glm.contrast( con_val, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True,) model_dir = os.path.join(subject_dir, 'model', model_id) for dtype, img in zip(['z', 't', 'c', 'var'], [z_map, t_map, c_map, var_map]): map_dir = os.path.join(model_dir, '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) path = os.path.join( map_dir, '%s.nii.gz' % normalize_name(contrast_id)) nb.save(img, path) nb.save(glm.mask, os.path.join(model_dir, 'mask.nii.gz'))
def _apply_glm(out_dir, data, design_matrices, contrasts, mask='compute', model_id=None, resample=True): # print out_dir bold_dir = os.path.join(out_dir, 'fmri') if not os.path.exists(bold_dir): os.makedirs(bold_dir) for i, img in enumerate(data): if type(img) is str: img = nb.load(img) nb.save(img, os.path.join(bold_dir, 'bold_session_%i.nii.gz' % i)) # fit glm glm = FMRILinearModel(data, design_matrices, mask=mask) glm.fit(do_scaling=True, model='ar1') nb.save(glm.mask, os.path.join(out_dir, 'mask.nii.gz')) if resample: resample_niimg(os.path.join(out_dir, 'mask.nii.gz')) stat_maps = {} for contrast_id in contrasts: stat_maps[contrast_id] = {} z_map, t_map, c_map, var_map = glm.contrast( contrasts[contrast_id], con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True,) for dtype, out_map in zip(['z', 't', 'c', 'variance'], [z_map, t_map, c_map, var_map]): map_dir = os.path.join(out_dir, '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) if model_id: map_path = os.path.join( map_dir, '%s_%s.nii.gz' % (model_id, contrast_id)) else: map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) nb.save(out_map, map_path) if resample: resample_niimg(map_path) stat_maps[contrast_id][dtype] = map_path return stat_maps
def _first_level(out_dir, data, design_matrices, contrasts, glm_model='ar1', mask='compute', verbose=1): if verbose: print '%s:' % out_dir data = check_niimgs(data) design_matrices = check_design_matrices(design_matrices) contrasts = check_contrasts(contrasts) glm = FMRILinearModel(data, design_matrices, mask=mask) glm.fit(do_scaling=True, model=glm_model) for i, contrast_id in enumerate(contrasts): if verbose: print ' %s/%s - %s ' % (i, len(contrasts), contrast_id) con_val = [] for session_con, session_dm in zip(contrasts[contrast_id], design_matrices): con = np.zeros(session_dm.shape[1]) con[:len(session_con)] = session_con con_val.append(con) z_map, t_map, c_map, var_map = glm.contrast( con_val, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True,) for dtype, img in zip(['z', 't', 'c', 'var'], [z_map, t_map, c_map, var_map]): map_dir = os.path.join(out_dir, '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) path = os.path.join( map_dir, '%s.nii.gz' % remove_special(contrast_id)) nb.save(img, path) nb.save(glm.mask, os.path.join(out_dir, 'mask.nii.gz'))
def preprocess_files(self, func_files, anat_files=None, verbose=1): def get_beta_filepath(func_file, cond): return func_file.replace('_bold.nii.gz', '_beta-%s.nii.gz' % cond) beta_files = [] for fi, func_file in enumerate(func_files): # Don't re-do preprocessing. beta_mask = func_file.replace('_bold.nii.gz', '_beta*.nii.gz') cond_file = func_file.replace('_bold.nii.gz', '_events.tsv') cond_data = pd.read_csv(cond_file, sep='\t') # Get condition info, to search if betas have been done. conditions = cond_data['trial_type'].tolist() all_conds = np.unique(conditions) all_beta_files = [ get_beta_filepath(func_file, cond) for cond in all_conds ] # All betas are done. if np.all([os.path.exists(f) for f in all_beta_files]): beta_files += all_beta_files continue if verbose >= 0: print('Preprocessing file %d of %d' % (fi + 1, len(func_files))) # Need to do regression. tr = cond_data['duration'].as_matrix().mean() onsets = cond_data['onset'].tolist() img = nibabel.load(func_file) n_scans = img.shape[3] frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # Create the design matrix paradigm = EventRelatedParadigm(conditions, onsets) design_mat = dm.make_dmtx(frametimes, paradigm, drift_model='cosine', hfcut=n_scans, hrf_model='canonical') # Do the GLM mask_img = compute_epi_mask(img) fmri_glm = FMRILinearModel(img, design_mat.matrix, mask=mask_img) fmri_glm.fit(do_scaling=True, model='ar1') # Pull out the betas beta_hat = fmri_glm.glms[0].get_beta( ) # Least-squares estimates of the beta mask = fmri_glm.mask.get_data() > 0 # output beta images dim = design_mat.matrix.shape[1] beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim) beta_map[mask] = beta_hat.T beta_image = nibabel.Nifti1Image(beta_map, fmri_glm.affine) beta_image.get_header()['descrip'] = ( 'Parameter estimates of the localizer dataset') # Save beta images for ci, cond in enumerate(np.unique(conditions)): beta_cond_img = index_img(beta_image, ci) beta_filepath = get_beta_filepath(func_file, cond) nibabel.save(beta_cond_img, beta_filepath) beta_files.append(beta_filepath) return beta_files
def run_suject_level1_glm(subject_data_dir, subject_output_dir, task_id, readout_time=.01392, # seconds tr=.72, do_preproc=False, do_realign=False, do_normalize=False, fwhm=0., report=False, hrf_model="Canonical with Derivative", drift_model="Cosine", hfcut=100, regress_motion=True, slicer='y', cut_coords=6, threshold=3., cluster_th=15 ): """ Function to do preproc + analysis for a single HCP subject (task fMRI) """ # sanitize subject data_dir subject_id = int(os.path.basename(subject_data_dir)) subject_data_dir = os.path.abspath(subject_data_dir) _subject_data_dir = os.path.join(subject_data_dir, "MNINonLinear/Results/") add_regs_files = None if do_preproc: if not os.path.exists(subject_output_dir): os.makedirs(subject_output_dir) # glob fmri files fmri_files = [os.path.join( subject_data_dir, "unprocessed/3T/tfMRI_%s_%s/%s_3T_tfMRI_%s_%s.nii.gz" % ( task_id, direction, subject_id, task_id, direction)) for direction in ["LR", "RL"]] assert len(fmri_files) == 2 # glob anat file anat_file = os.path.join(subject_data_dir, "T1w/T1w_acpc_dc_restore_brain.nii.gz") # assert os.path.isfile(anat_file) if not os.path.isfile(anat_file): anat_file = None # distortion correction ? dc_output = _do_fmri_distortion_correction( fmri_files, subject_data_dir, subject_output_dir, subject_id, task_id, readout_time=readout_time, report=report ) if dc_output is None: return else: fmri_files, realignment_parameters = dc_output # preprocess the data preproc_subject_data = do_subject_preproc(SubjectData( func=fmri_files, anat=anat_file, output_dir=subject_output_dir), do_realign=True, do_normalize=do_normalize, fwhm=fwhm, report=report ) fmri_files = preproc_subject_data.func n_motion_regressions = 6 if do_realign and regress_motion: add_regs_files = realignment_parameters else: n_motion_regressions = 12 # glob fmri files fmri_files = [] for direction in ['LR', 'RL']: fmri_file = os.path.join( _subject_data_dir, "tfMRI_%s_%s/tfMRI_%s_%s.nii.gz" % ( task_id, direction, task_id, direction)) if not os.path.isfile(fmri_file): print "Can't find task fMRI file %s; skipping subject %s" % ( fmri_file, subject_id) return else: fmri_files.append(fmri_file) # glob movement confounds if regress_motion: add_regs_files = [os.path.join(_subject_data_dir, "tfMRI_%s_%s" % ( task_id, direction), "Movement_Regressors.txt") for direction in ["LR", "RL"]] # smooth images if np.sum(fwhm) > 0: print "Smoothing fMRI data (fwhm = %s)..." % fwhm fmri_files = _do_subject_smooth(SubjectData( func=fmri_files, output_dir=subject_output_dir), fwhm=fwhm, report=False ).func print "... done.\r\n" # sanitize subject_output_dir if not os.path.exists(subject_output_dir): os.makedirs(subject_output_dir) # chronometry stats_start_time = pretty_time() # merged lists paradigms = [] frametimes_list = [] design_matrices = [] # fmri_files = [] n_scans = [] for direction, direction_index in zip(['LR', 'RL'], xrange(2)): # glob the design file design_file = os.path.join(_subject_data_dir, "tfMRI_%s_%s" % ( task_id, direction), "tfMRI_%s_%s_hp200_s4_level1.fsf" % ( task_id, direction)) if not os.path.isfile(design_file): print "Can't find design file %s; skipping subject %s" % ( design_file, subject_id) return # read the experimental setup print "Reading experimental setup from %s ..." % design_file fsl_condition_ids, timing_files, fsl_contrast_ids, contrast_values = \ read_design_fsl_design_file(design_file) print "... done.\r\n" # fix timing filenames timing_files = _insert_directory_in_file_name( timing_files, "tfMRI_%s_%s" % (task_id, direction), 1) # make design matrix print "Constructing design matrix for direction %s ..." % direction _n_scans = nibabel.load(fmri_files[direction_index]).shape[-1] n_scans.append(_n_scans) add_regs_file = add_regs_files[ direction_index] if not add_regs_files is None else None, design_matrix, paradigm, frametimes = make_dmtx_from_timing_files( timing_files, fsl_condition_ids, n_scans=_n_scans, tr=tr, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs_file=add_regs_file, add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis', 'Differential Translation along x axis', 'Differential Translation along yaxis', 'Differential Translation along z axis', 'Differential Rotation along x axis', 'Differential Rotation along y axis', 'Differential Rotation along z axis' ][:n_motion_regressions] if not add_regs_files is None else None, ) print "... done." paradigms.append(paradigm) frametimes_list.append(frametimes) design_matrices.append(design_matrix) # convert contrasts to dict contrasts = dict((contrast_id, # append zeros to end of contrast to match design np.hstack((contrast_value, np.zeros(len( design_matrix.names) - len(contrast_value))))) for contrast_id, contrast_value in zip( fsl_contrast_ids, contrast_values)) # more interesting contrasts if task_id == 'MOTOR': contrasts['RH-LH'] = contrasts['RH'] - contrasts['LH'] contrasts['LH-RH'] = -contrasts['RH-LH'] contrasts['RF-LF'] = contrasts['RF'] - contrasts['LF'] contrasts['LF-RF'] = -contrasts['RF-LF'] contrasts['H'] = contrasts['RH'] + contrasts['LH'] contrasts['F'] = contrasts['RF'] + contrasts['LF'] contrasts['H-F'] = contrasts['RH'] + contrasts['LH'] - ( contrasts['RF'] - contrasts['LF']) contrasts['F-H'] = -contrasts['H-F'] # importat maps z_maps = {} effects_maps = {} # replicate contrasts across sessions contrasts = dict((cid, [cval] * 2) for cid, cval in contrasts.iteritems()) # compute effects mask_path = os.path.join(subject_output_dir, "mask.nii.gz") skip = os.path.isfile(mask_path) if skip: for contrast_id, contrast_val in contrasts.iteritems(): for map_type in ['z', 'effects']: map_dir = os.path.join( subject_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) if not os.path.exists(map_path): skip = 0 break # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path if skip: print "Skipping subject %s..." % ( subject_id) # fit GLM if not skip: print ( 'Fitting a "Fixed Effect" GLM for merging LR and RL phase-encoding ' 'directions for subject %s ...' % subject_id) fmri_glm = FMRILinearModel(fmri_files, [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_effects=True ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report if 0x0: anat_img = load_specific_vol(fmri_files[0], 0)[0] stats_report_filename = os.path.join(subject_output_dir, "reports", "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, nibabel.load(mask_path), anat=anat_img.get_data(), anat_affine=anat_img.get_affine(), threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm={'LR': paradigms[0].__dict__, 'RL': paradigms[1].__dict__}, frametimes={'LR': frametimes_list[0], 'RL': frametimes_list[1]}, fwhm=fwhm ) ProgressReport().finish_dir(subject_output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path
dmat_outfile = os.path.join(subject_data.output_dir, 'design_matrix.png') pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # more interesting contrasts""" contrasts['active-rest'] = contrasts['active'] - contrasts['rest'] # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FMRILinearModel(nibabel.concat_images(subject_data.func[0]), design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) # compute bg unto which activation will be projected anat_img = nibabel.load(subject_data.anat) anat = anat_img.get_data() anat_affine = anat_img.get_affine() print "Computing contrasts .." z_maps = {}
######################################### # Specify the contrasts ######################################### # simplest ones contrasts = {} n_columns = len(design_matrix.names) contrasts['audio'] = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) ######################################## # Perform a GLM analysis ######################################## print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(data_file, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################### # Estimate the contrasts ######################################### print('Computing contrasts...') for index, (contrast_id, contrast_val) in enumerate(contrasts.items()): print(' Contrast % 2i out of %i: %s' % (index + 1, len(contrasts), contrast_id)) # save the z_image image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True) save(z_map, image_path)
from nibabel import save from nipy.modalities.fmri.glm import FMRILinearModel from nipy.utils import example_data from nipy.labs.viz import plot_map, cm # ----------------------------------------------------------- # --------- Get the data ----------------------------------- # ----------------------------------------------------------- fmri_files = [example_data.get_filename("fiac", "fiac0", run) for run in ["run1.nii.gz", "run2.nii.gz"]] design_files = [example_data.get_filename("fiac", "fiac0", run) for run in ["run1_design.npz", "run2_design.npz"]] mask_file = example_data.get_filename("fiac", "fiac0", "mask.nii.gz") # Load all the data multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model="ar1") def make_fiac_contrasts(p): """Specify some contrasts for the FIAC experiment Parameters ========== p: int, the number of columns of the design matrix (for all sessions) """ con = {} # the design matrices of both runs comprise 13 columns # the first 5 columns of the design matrices correspond to the following
# ----------------------------------------------------------- # --------- Get the data ----------------------------------- #----------------------------------------------------------- fmri_files = [ example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz'] ] design_files = [ example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz'] ] mask_file = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') # Load all the data multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') def make_fiac_contrasts(p): """Specify some contrasts for the FIAC experiment Parameters ========== p: int, the number of columns of the design matrix (for all sessions) """ con = {} # the design matrices of both runs comprise 13 columns
def _preprocess_and_analysis_subject(subject_data, slicer='z', cut_coords=6, threshold=3., cluster_th=15, **preproc_params): """ Preprocesses the subject and then fits (mass-univariate) GLM thereup. """ # sanitize run_ids: # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage, # for example run_ids = range(9) if subject_data['subject_id'] == "Sub14": run_ids = [0] + range(2, 9) subject_data['func'] = [subject_data['func'][0] ] + subject_data['func'][2:] subject_data['session_id'] = [subject_data['session_id'][0] ] + subject_data['session_id'][2:] # sanitize subject output dir if not 'output_dir' in subject_data: subject_data['output_dir'] = os.path.join(output_dir, subject_data['subject_id']) # preprocess the data subject_data = do_subject_preproc(subject_data, **preproc_params) # chronometry stats_start_time = pretty_time() # to-be merged lists, one item per run paradigms = [] frametimes_list = [] design_matrices = [] # one list_of_contrast_dicts = [] # one dict per run n_scans = [] for run_id in run_ids: _n_scans = len(subject_data.func[run_id]) n_scans.append(_n_scans) # make paradigm paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id]) # make design matrix tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128. frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs=np.loadtxt( getattr(subject_data, 'realignment_parameters')[run_id]), add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis' ]) # import matplotlib.pyplot as plt # design_matrix.show() # plt.show() paradigms.append(paradigm) design_matrices.append(design_matrix) frametimes_list.append(frametimes) n_scans.append(_n_scans) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # more interesting contrasts""" contrasts['Famous-Unfamiliar'] = contrasts['Famous'] - contrasts[ 'Unfamiliar'] contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar'] contrasts[ 'Famous-Scrambled'] = contrasts['Famous'] - contrasts['Scrambled'] contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled'] contrasts['Unfamiliar-Scrambled'] = contrasts[ 'Unfamiliar'] - contrasts['Scrambled'] contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled'] list_of_contrast_dicts.append(contrasts) # importat maps z_maps = {} effects_maps = {} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FMRILinearModel( [nibabel.concat_images(sess_func) for sess_func in subject_data.func], [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # replicate contrasts across runs contrasts = dict( (cid, [contrasts[cid] for contrasts in list_of_contrast_dicts]) for cid, cval in contrasts.iteritems()) # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True, output_stat=False, output_effects=True, output_variance=False) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join(subject_data.output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report stats_report_filename = os.path.join( getattr(subject_data, 'reports_output_dir', subject_data.output_dir), "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_data.subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_data.subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id]) for run_id in run_ids), frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id]) for run_id in run_ids), # fwhm=fwhm ) ProgressReport().finish_dir(subject_data.output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path
write_dir = path.join(getcwd(), 'results') if not path.exists(write_dir): mkdir(write_dir) # Compute a population-level mask as the intersection of individual masks grp_mask = Nifti1Image( intersect_masks(mask_images).astype(np.int8), load(mask_images[0]).get_affine()) # concatenate the individual images first_level_image = concat_images(betas) # set the model design_matrix = np.ones(len(betas))[:, np.newaxis] # only the intercept grp_model = FMRILinearModel(first_level_image, design_matrix, grp_mask) # GLM fitting using ordinary least_squares grp_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1]])) # the only possible contrast ! z_map, = grp_model.contrast(contrast_val, con_id='one_sample', output_z=True) # write the results save(z_map, path.join(write_dir, 'one_sample_z_map.nii')) # look at the result vmax = max(-z_map.get_data().min(), z_map.get_data().max()) vmin = -vmax plot_map(z_map.get_data(),
drift_model=drift_model, hfcut=hfcut) ax = design_matrix.show() ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') plt.savefig(path.join(write_dir, 'design_matrix.png')) dim = design_matrix.matrix.shape[1] ######################################## # Perform a GLM analysis ######################################## print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################## # Output beta and variance images ######################################## beta_hat = fmri_glm.glms[0].get_beta() # Least-squares estimates of the beta variance_hat = fmri_glm.glms[0].get_mse() # Estimates of the variance mask = fmri_glm.mask.get_data() > 0 # output beta images beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim) beta_map[mask] = beta_hat.T beta_image = Nifti1Image(beta_map, fmri_glm.affine) beta_image.get_header()['descrip'] = ( 'Parameter estimates of the localizer dataset')
def execute_spm_auditory_glm(data, reg_motion=False): reg_motion = reg_motion and 'realignment_parameters' in data tr = 7. n_scans = 96 _duration = 6 epoch_duration = _duration * tr conditions = ['rest', 'active'] * 8 duration = epoch_duration * np.ones(len(conditions)) onset = np.linspace(0, (len(conditions) - 1) * epoch_duration, len(conditions)) paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration) hfcut = 2 * 2 * epoch_duration # construct design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' add_reg_names = None add_regs = None if reg_motion: add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] add_regs = data['realignment_parameters'][0] if isinstance(add_regs, basestring): add_regs = np.loadtxt(add_regs) design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_reg_names=add_reg_names, add_regs=add_regs) # plot and save design matrix ax = design_matrix.show() ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') dmat_outfile = os.path.join(data['output_dir'], 'design_matrix.png') pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200) pl.close() # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # more interesting contrasts""" contrasts['active-rest'] = contrasts['active'] - contrasts['rest'] # fit GLM print('\r\nFitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(load_4D_img(data['func'][0]), design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(data['output_dir'], "mask.nii.gz") print "Saving mask image %s..." % mask_path nibabel.save(fmri_glm.mask, mask_path) # compute bg unto which activation will be projected anat_img = load_vol(data['anat']) anat = anat_img.get_data() if anat.ndim == 4: anat = anat[..., 0] anat_affine = anat_img.get_affine() print "Computing contrasts..." z_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, eff_map, var_map = fmri_glm.contrast( contrasts[contrast_id], con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True, ) # store stat maps to disk for dtype, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, eff_map, var_map]): map_dir = os.path.join( data['output_dir'], '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if contrast_id == 'active-rest' and dtype == "z": z_maps[contrast_id] = map_path print "\t\t%s map: %s" % (dtype, map_path) print # do stats report stats_report_filename = os.path.join(data['reports_output_dir'], "report_stats.html") contrasts = dict((contrast_id, contrasts[contrast_id]) for contrast_id in z_maps.keys()) generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, design_matrices=[design_matrix], subject_id=data['subject_id'], anat=anat, anat_affine=anat_affine, cluster_th=50, # we're only interested in this 'large' clusters # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(data['output_dir']) print "\r\nStatistic report written to %s\r\n" % stats_report_filename
def execute_spm_multimodal_fmri_glm(data, reg_motion=False): reg_motion = reg_motion and 'realignment_parameters' in data # experimental paradigm meta-params stats_start_time = time.ctime() tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128. # make design matrices design_matrices = [] for x in xrange(2): n_scans = data['func'][x].shape[-1] timing = scipy.io.loadmat(data['trials_ses%i' % (x + 1)], squeeze_me=True, struct_as_record=False) faces_onsets = timing['onsets'][0].ravel() scrambled_onsets = timing['onsets'][1].ravel() onsets = np.hstack((faces_onsets, scrambled_onsets)) onsets *= tr # because onsets were reporting in 'scans' units conditions = ['faces'] * len(faces_onsets) + ['scrambled'] * len( scrambled_onsets) paradigm = EventRelatedParadigm(conditions, onsets) frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) add_reg_names = None add_regs = None if reg_motion: add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] add_regs = np.loadtxt(data['realignment_parameters'][x]) if isinstance(add_regs): add_regs = np.loadtxt(add_regs) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_reg_names=add_reg_names, add_regs=add_regs ) design_matrices.append(design_matrix) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # more interesting contrasts contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled'] contrasts['scrambled-faces'] = contrasts['scrambled'] - contrasts['faces'] contrasts['effects_of_interest'] = contrasts[ 'faces'] + contrasts['scrambled'] # we've thesame contrasts over sessions, so let's replicate contrasts = dict((contrast_id, [contrast_val] * 2) for contrast_id, contrast_val in contrasts.iteritems()) # fit GLM print('\r\nFitting a GLM (this takes time)...') fmri_glm = FMRILinearModel([load_4D_img(sess_func) for sess_func in data['func']], [dmat.matrix for dmat in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(data['output_dir'], "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) # compute bg unto which activation will be projected anat_img = load_vol(data['anat']) anat = anat_img.get_data() if anat.ndim == 4: anat = anat[..., 0] anat_affine = anat_img.get_affine() print "Computing contrasts .." z_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, eff_map, var_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True, ) # store stat maps to disk for dtype, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, eff_map, var_map]): map_dir = os.path.join( data['output_dir'], '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if dtype == 'z': z_maps[contrast_id] = map_path print "\t\t%s map: %s" % (dtype, map_path) # do stats report data['stats_report_filename'] = os.path.join(data['reports_output_dir'], "report_stats.html") contrasts = dict((contrast_id, contrasts[contrast_id]) for contrast_id in z_maps.keys()) generate_subject_stats_report( data['stats_report_filename'], contrasts, z_maps, fmri_glm.mask, anat=anat, anat_affine=anat_affine, design_matrices=design_matrices, subject_id=data['subject_id'], cluster_th=15, # we're only interested in this 'large' clusters start_time=stats_start_time, # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(data['reports_output_dir']) print "\r\nStatistic report written to %s\r\n" % data[ 'stats_report_filename'] return data
def execute_glm( doc, out_dir, contrast_definitions=None, outputs=None, glm_model='ar1', ): """Function to execute GLM for one subject --and perhaps multiple sessions thereof """ stats_start_time = time.ctime() # study_dir = os.path.join(out_dir, doc['study']) if outputs is None: outputs = { 'maps': False, 'data': False, 'mask': True, 'model': True, } else: outputs['maps'] = False subject_id = doc['subject'] subject_output_dir = os.path.join(out_dir, subject_id) _export(doc, subject_output_dir, outputs=outputs) params = load_glm_params(doc) # instantiate GLM fmri_glm = FMRILinearModel(params['data'], params['design_matrices'], doc['mask']) # fit GLM fmri_glm.fit(do_scaling=True, model=glm_model) # save beta-maps to disk beta_map_dir = os.path.join(subject_output_dir, 'beta_maps') if not os.path.exists(beta_map_dir): os.makedirs(beta_map_dir) for j, glm in zip(xrange(len(fmri_glm.glms)), fmri_glm.glms): # XXX save array in some compressed format np.savetxt( os.path.join(beta_map_dir, "beta_map_%i.txt" % j), glm.get_beta(), # array has shape (n_conditions, n_voxels) ) # define contrasts if contrast_definitions is not None: params['contrasts'] = make_contrasts(params, contrast_definitions) contrasts = sorted(params['contrasts'][0].keys()) _contrasts = {} z_maps = {} # compute stats maps for index, contrast_id in enumerate(contrasts): print ' study[%s] subject[%s] contrast [%s]: %i/%i' % ( doc['study'], doc['subject'], contrast_id, index + 1, len(contrasts)) contrast = [c[contrast_id] for c in params['contrasts']] contrast_name = contrast_id.replace(' ', '_') z_map, t_map, c_map, var_map = fmri_glm.contrast( contrast, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True, ) for dtype, out_map in zip(['z', 't', 'c', 'variance'], [z_map, t_map, c_map, var_map]): map_dir = os.path.join(subject_output_dir, '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_name) nb.save(out_map, map_path) # collect z map if dtype == 'z': _contrasts[contrast_name] = contrast z_maps[contrast_name] = map_path # invoke a single API to handle plotting and html business for you subject_stats_report_filename = os.path.join(subject_output_dir, "report_stats.html") glm_reporter.generate_subject_stats_report( subject_stats_report_filename, _contrasts, z_maps, doc['mask'], design_matrices=list(params['design_matrices']), subject_id=doc['subject'], cluster_th=15, # 15 voxels start_time=stats_start_time, TR=doc['TR'], n_scans=doc['n_scans'], n_sessions=doc['n_sessions'], model=glm_model, ) print "Report for subject %s written to %s" % ( doc['subject'], subject_stats_report_filename)
def _preprocess_and_analysis_subject(subject_data, do_normalize=False, fwhm=0., slicer='z', cut_coords=6, threshold=3., cluster_th=15 ): """ Preprocesses the subject and then fits (mass-univariate) GLM thereupon. """ # sanitize run_ids: # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage, # for example run_ids = range(9) if subject_data['subject_id'] == "Sub14": run_ids = [0] + range(2, 9) subject_data['func'] = [subject_data['func'][0]] + subject_data[ 'func'][2:] subject_data['session_id'] = [subject_data['session_id'][0] ] + subject_data['session_id'][2:] # sanitize subject output dir if not 'output_dir' in subject_data: subject_data['output_dir'] = os.path.join( output_dir, subject_data['subject_id']) # preprocess the data subject_data = do_subject_preproc(SubjectData(**subject_data), do_realign=True, do_coreg=True, do_report=False, do_cv_tc=False ) assert not subject_data.anat is None # norm if do_normalize: subject_data = nipype_do_subject_preproc( subject_data, do_realign=False, do_coreg=False, do_segment=True, do_normalize=True, func_write_voxel_sizes=[3, 3, 3], anat_write_voxel_sizes=[2, 2, 2], fwhm=fwhm, hardlink_output=False, do_report=False ) # chronometry stats_start_time = pretty_time() # to-be merged lists, one item per run paradigms = [] frametimes_list = [] design_matrices = [] # one list_of_contrast_dicts = [] # one dict per run n_scans = [] for run_id in run_ids: _n_scans = len(subject_data.func[run_id]) n_scans.append(_n_scans) # make paradigm paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id]) # make design matrix tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128. frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs=np.loadtxt(getattr(subject_data, 'realignment_parameters')[run_id]), add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis' ] ) # import matplotlib.pyplot as plt # design_matrix.show() # plt.show() paradigms.append(paradigm) design_matrices.append(design_matrix) frametimes_list.append(frametimes) n_scans.append(_n_scans) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye( n_columns)[2 * i] # more interesting contrasts""" contrasts['Famous-Unfamiliar'] = contrasts[ 'Famous'] - contrasts['Unfamiliar'] contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar'] contrasts['Famous-Scrambled'] = contrasts[ 'Famous'] - contrasts['Scrambled'] contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled'] contrasts['Unfamiliar-Scrambled'] = contrasts[ 'Unfamiliar'] - contrasts['Scrambled'] contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled'] list_of_contrast_dicts.append(contrasts) # importat maps z_maps = {} effects_maps = {} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func) for sess_func in subject_data.func], [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # replicate contrasts across runs contrasts = dict((cid, [contrasts[cid] for contrasts in list_of_contrast_dicts]) for cid, cval in contrasts.iteritems()) # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_stat=False, output_effects=True, output_variance=False ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_data.output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report stats_report_filename = os.path.join(getattr(subject_data, 'reports_output_dir', subject_data.output_dir), "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, anat=nibabel.load(subject_data.anat).get_data(), anat_affine=nibabel.load(subject_data.anat).get_affine(), design_matrices=design_matrices, subject_id=subject_data.subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_data.subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id].__dict__) for run_id in run_ids), frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id]) for run_id in run_ids), # fwhm=fwhm ) ProgressReport().finish_dir(subject_data.output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path
def do_subject_glm(subject_data): """FE analysis for a single subject.""" subject_id = subject_data['subject_id'] output_dir = subject_data["output_dir"] func_files = subject_data['func'] anat = subject_data['anat'] onset_files = subject_data['onset'] tr = subject_data['TR'] time_units = subject_data['time_units'].lower() assert time_units in ["seconds", "tr", "milliseconds"] drift_model = subject_data['drift_model'] hrf_model = subject_data["hrf_model"] hfcut = subject_data["hfcut"] mem = Memory(os.path.join(output_dir, "cache")) if not os.path.exists(output_dir): os.makedirs(output_dir) if 0: subject_data = mem.cache(do_subject_preproc)(dict( func=func_files, anat=anat, output_dir=output_dir)) func_files = subject_data['func'] anat = subject_data['anat'] # reslice func images func_files = [ mem.cache(reslice_vols)(sess_func, target_affine=nibabel.load( sess_func[0]).get_affine()) for sess_func in func_files ] ### GLM: loop on (session_bold, onse_file) pairs over the various sessions design_matrices = [] for func_file, onset_file in zip(func_files, onset_files): if isinstance(func_file, str): bold = nibabel.load(func_file) else: if len(func_file) == 1: func_file = func_file[0] bold = nibabel.load(func_file) assert len(bold.shape) == 4 n_scans = bold.shape[-1] del bold else: n_scans = len(func_file) frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) conditions, onsets, durations, amplitudes = parse_onset_file( onset_file) if time_units == "tr": onsets *= tr durations *= tr elif time_units in ["milliseconds"]: onsets *= 1e-3 durations *= 1e-3 paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=durations, amplitude=amplitudes) design_matrices.append( make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut)) # specify contrasts n_columns = len(design_matrices[0].names) contrasts = {} for i in range(paradigm.n_conditions): contrasts['%s' % design_matrices[0].names[2 * i]] = np.eye(n_columns)[2 * i] # effects of interest F-test diff_contrasts = [] for i in range(paradigm.n_conditions - 1): a = contrasts[design_matrices[0].names[2 * i]] b = contrasts[design_matrices[0].names[2 * (i + 1)]] diff_contrasts.append(a - b) contrasts["diff"] = diff_contrasts # fit GLM print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel([ nibabel.concat_images(sess_func, check_affines=False) for sess_func in func_files ], [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(output_dir, "mask.nii.gz") print("Saving mask image %s" % mask_path) nibabel.save(fmri_glm.mask, mask_path) # compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) if np.ndim(contrast_val) > 1: contrast_type = "t" else: contrast_type = "F" z_map, t_map, effects_map, var_map = fmri_glm.contrast( [contrast_val] * len(func_files), con_id=contrast_id, contrast_type=contrast_type, output_z=True, output_stat=True, output_effects=True, output_variance=True) # store stat maps to disk for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join(output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id) print("\t\tWriting %s ..." % map_path) nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path return subject_id, anat, effects_maps, z_maps, contrasts, fmri_glm.mask
for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i] ] = np.eye(n_columns)[2 * i] # more interesting contrasts contrasts['faces-scrambled'] = contrasts['faces' ] - contrasts['scrambled'] contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] contrasts['effects_of_interest'] = contrasts['faces' ] + contrasts['scrambled'] # fit GLM print 'Fitting a GLM for %s (this takes time)...' % ( subject_data.session_id[x]) fmri_glm = FMRILinearModel(nibabel.concat_images(subject_data.func[x]), design_matrix.matrix, mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) mask_images.append(mask_path) # compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id
######################################### # simplest ones contrasts = {} n_columns = len(design_matrix.names) for i in range(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # Our contrast of interest reading_vs_visual = contrasts["phrasevideo"] - contrasts["damier_H"] ######################################## # Perform a GLM analysis on H1 ######################################## fmri_glm = FMRILinearModel(fmri_data, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # Estimate the contrast z_map, = fmri_glm.contrast(reading_vs_visual, output_z=True) # Plot the contrast vmax = max(-z_map.get_data().min(), z_map.get_data().max()) plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, slicer='z', black_bg=True, threshold=2.5, title='Reading vs visual') # Count all the clusters for |Z| > 2.5 Z = z_map.get_data() from scipy import ndimage
def do_glm_for_subject(subject_id, bold_base_folder, trial_base_folder, output_base_folder): subject_dir = path(bold_base_folder) / ("sub%03d" % subject_id) output_dir = (path(output_base_folder) / ("sub%03d" % subject_id) / "model001") print output_dir if not output_dir.exists(): output_dir.makedirs() anat_file = subject_dir / "highres001.nii" anat = nb.load(anat_file) run_ids = range(1, 10) task_bold_files = [subject_dir.glob("task001_run%03d/rbold*.nii" % rid)[0] for rid in run_ids] task_mvt_files = [subject_dir.glob("task001_run%03d/rp_bold*.txt" % rid)[0] for rid in run_ids] trial_files = [(path(trial_base_folder) / ("Sub%02d" % subject_id) / "BOLD" / "Trials" / ("run_%02d_spmdef.txt" % rid)) for rid in range(1, 10)] stats_start_time = pretty_time() paradigms = [] design_matrices = [] n_scans = [] all_frametimes = [] list_of_contrast_dicts = [] # one dict per run for bold_file, mvt_file, trial_file in zip(task_bold_files, task_mvt_files, trial_files): _n_scans = nb.load(bold_file).shape[-1] n_scans.append(_n_scans) paradigm = make_paradigm(trial_file) paradigms.append(paradigm) movements = np.loadtxt(mvt_file) tr = 2. drift_model = "Cosine" hrf_model = "Canonical With Derivative" hfcut = 128. frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs=movements, add_reg_names=[ "Tx", "Ty", "Tz", "R1", "R2", "R3"]) design_matrices.append(design_matrix) all_frametimes.append(frametimes) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye( n_columns)[2 * i] # more interesting contrasts""" contrasts['Famous-Unfamiliar'] = contrasts[ 'Famous'] - contrasts['Unfamiliar'] contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar'] contrasts['Famous-Scrambled'] = contrasts[ 'Famous'] - contrasts['Scrambled'] contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled'] contrasts['Unfamiliar-Scrambled'] = contrasts[ 'Unfamiliar'] - contrasts['Scrambled'] contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled'] list_of_contrast_dicts.append(contrasts) # importat maps z_maps = {} effects_maps = {} fmri_glm = FMRILinearModel(task_bold_files, [dm.matrix for dm in design_matrices], mask="compute") fmri_glm.fit(do_scaling=True, model="ar1") # replicate contrasts across runs contrasts = dict((cid, [contrasts[cid] for contrasts in list_of_contrast_dicts]) for cid, cval in contrasts.iteritems()) # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map, var_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_stat=False, output_effects=True, output_variance=True ) for map_type, out_map in zip(['z', 'effects', 'variance'], [z_map, eff_map, var_map]): map_dir = output_dir / ('%s_maps' % map_type) if not map_dir.exists(): map_dir.makedirs() map_path = map_dir / ('%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nb.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path if map_type == "variance": effects_maps[contrast_id] = map_path stats_report_dir = output_dir / "report" if not stats_report_dir.exists(): stats_report_dir.makedirs() stats_report_filename = stats_report_dir / "report_stats.html" # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) slicer = 'z' cut_coords = [-20, -10, 0, 10, 20, 30, 40, 50] threshold = 3. cluster_th = 15 generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat_affine=anat.get_affine(), anat=anat.get_data(), threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id="sub%03d" % subject_id, start_time=stats_start_time, title="GLM for subject %s" % ("sub%03d" % subject_id), # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm=dict(("Run_%02i" % (run_id), paradigms[run_id - 1]) for run_id in run_ids), frametimes=dict(("Run_%02i" % (run_id), all_frametimes[run_id - 1]) for run_id in run_ids), # fwhm=fwhm ) return fmri_glm
def do_subject_glm(subject_data): """FE analysis for a single subject.""" subject_id = subject_data['subject_id'] output_dir = subject_data["output_dir"] func_files = subject_data['func'] anat = subject_data['anat'] onset_files = subject_data['onset'] tr = subject_data['TR'] time_units = subject_data['time_units'].lower() assert time_units in ["seconds", "tr", "milliseconds"] drift_model = subject_data['drift_model'] hrf_model = subject_data["hrf_model"] hfcut = subject_data["hfcut"] mem = Memory(os.path.join(output_dir, "cache")) if not os.path.exists(output_dir): os.makedirs(output_dir) if 0: subject_data = mem.cache(do_subject_preproc)( dict(func=func_files, anat=anat, output_dir=output_dir)) func_files = subject_data['func'] anat = subject_data['anat'] # reslice func images func_files = [mem.cache(reslice_vols)( sess_func, target_affine=nibabel.load(sess_func[0]).get_affine()) for sess_func in func_files] ### GLM: loop on (session_bold, onse_file) pairs over the various sessions design_matrices = [] for func_file, onset_file in zip(func_files, onset_files): if isinstance(func_file, str): bold = nibabel.load(func_file) else: if len(func_file) == 1: func_file = func_file[0] bold = nibabel.load(func_file) assert len(bold.shape) == 4 n_scans = bold.shape[-1] del bold else: n_scans = len(func_file) frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) conditions, onsets, durations, amplitudes = parse_onset_file( onset_file) if time_units == "tr": onsets *= tr durations *= tr elif time_units in ["milliseconds"]: onsets *= 1e-3 durations *= 1e-3 paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=durations, amplitude=amplitudes) design_matrices.append(make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut)) # specify contrasts n_columns = len(design_matrices[0].names) contrasts = {} for i in range(paradigm.n_conditions): contrasts['%s' % design_matrices[0].names[2 * i] ] = np.eye(n_columns)[2 * i] # effects of interest F-test diff_contrasts = [] for i in range(paradigm.n_conditions - 1): a = contrasts[design_matrices[0].names[2 * i]] b = contrasts[design_matrices[0].names[2 * (i + 1)]] diff_contrasts.append(a - b) contrasts["diff"] = diff_contrasts # fit GLM print('Fitting a GLM (this takes time)...') fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func, check_affines=False) for sess_func in func_files], [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(output_dir, "mask.nii.gz") print("Saving mask image %s" % mask_path) nibabel.save(fmri_glm.mask, mask_path) # compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.items(): print("\tcontrast id: %s" % contrast_id) if np.ndim(contrast_val) > 1: contrast_type = "t" else: contrast_type = "F" z_map, t_map, effects_map, var_map = fmri_glm.contrast( [contrast_val] * len(func_files), con_id=contrast_id, contrast_type=contrast_type, output_z=True, output_stat=True, output_effects=True, output_variance=True ) # store stat maps to disk for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join( output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print("\t\tWriting %s ..." % map_path) nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path return subject_id, anat, effects_maps, z_maps, contrasts, fmri_glm.mask
def execute_spm_auditory_glm(data, reg_motion=False): reg_motion = reg_motion and 'realignment_parameters' in data tr = 7. n_scans = 96 _duration = 6 epoch_duration = _duration * tr conditions = ['rest', 'active'] * 8 duration = epoch_duration * np.ones(len(conditions)) onset = np.linspace(0, (len(conditions) - 1) * epoch_duration, len(conditions)) paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration) hfcut = 2 * 2 * epoch_duration # construct design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' add_reg_names = None add_regs = None if reg_motion: add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] add_regs = data['realignment_parameters'][0] if isinstance(add_regs, basestring): add_regs = np.loadtxt(add_regs) design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_reg_names=add_reg_names, add_regs=add_regs) # plot and save design matrix ax = design_matrix.show() ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') dmat_outfile = os.path.join(data['output_dir'], 'design_matrix.png') pl.savefig(dmat_outfile, bbox_inches="tight", dpi=200) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # more interesting contrasts""" contrasts['active-rest'] = contrasts['active'] - contrasts['rest'] # fit GLM print('\r\nFitting a GLM (this takes time)...') fmri_glm = FMRILinearModel(load_4D_img(data['func'][0]), design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(data['output_dir'], "mask.nii.gz") print "Saving mask image %s..." % mask_path nibabel.save(fmri_glm.mask, mask_path) # compute bg unto which activation will be projected anat_img = load_vol(data['anat']) anat = anat_img.get_data() if anat.ndim == 4: anat = anat[..., 0] anat_affine = anat_img.get_affine() print "Computing contrasts..." z_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, eff_map, var_map = fmri_glm.contrast( contrasts[contrast_id], con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True, ) # store stat maps to disk for dtype, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, eff_map, var_map]): map_dir = os.path.join( data['output_dir'], '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if contrast_id == 'active-rest' and dtype == "z": z_maps[contrast_id] = map_path print "\t\t%s map: %s" % (dtype, map_path) print # do stats report stats_report_filename = os.path.join(data['reports_output_dir'], "report_stats.html") contrasts = dict((contrast_id, contrasts[contrast_id]) for contrast_id in z_maps.keys()) generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, design_matrices=[design_matrix], subject_id=data['subject_id'], anat=anat, anat_affine=anat_affine, cluster_th=50, # we're only interested in this 'large' clusters # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(data['output_dir']) print "\r\nStatistic report written to %s\r\n" % stats_report_filename
# simplest ones contrasts = {} n_columns = len(design_matrix.names) for i in range(paradigm.n_conditions): contrasts['%s' % design_matrix.names[i]] = np.eye(n_columns)[i] # and more complex/ interesting ones contrasts['left'] = contrasts['clicGaudio'] + contrasts['clicGvideo'] contrasts['right'] = contrasts['clicDaudio'] + contrasts['clicDvideo'] ######################################## # Perform a GLM analysis ######################################## print('Fitting a General Linear Model') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################### # Estimate the contrasts ######################################### contrast_id = 'left_right_motor_min' z_map, effects_map = fmri_glm.contrast(np.vstack( (contrasts['left'], contrasts['right'])), contrast_type='tmin-conjunction', output_z=True, output_effects=True) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) save(z_map, z_image_path)
def first_level(subject_dic): # experimental paradigm meta-params stats_start_time = time.ctime() tr = 2.4 drift_model = 'blank' hrf_model = 'canonical' # hemodynamic reponse function hfcut = 128. n_scans = 128 # make design matrices mask_images = [] design_matrices = [] fmri_files = subject_dic['func'] for x in xrange(len(fmri_files)): paradigm = paradigm_contrasts.localizer_paradigm() # build design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, ) design_matrices.append(design_matrix) # Specify contrasts contrasts = paradigm_contrasts.localizer_contrasts(design_matrix) #create output directory subject_session_output_dir = os.path.join(subject_dic['output_dir'], 'res_stats') if not os.path.exists(subject_session_output_dir): os.makedirs(subject_session_output_dir) # Fit GLM print 'Fitting a GLM (this takes time)...' fmri_glm = FMRILinearModel( fmri_files, [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) mask_images.append(mask_path) # compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, effects_map, var_map = fmri_glm.contrast( [contrast_val] * 1, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True) # store stat maps to disk for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join(subject_session_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s%s.nii.gz' % (subject_dic['subject_id'], contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # do stats report anat_img = nibabel.load(subject_dic['anat']) stats_report_filename = os.path.join(subject_session_output_dir, "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=2.3, cluster_th=15, anat=anat_img.get_data(), anat_affine=anat_img.get_affine(), design_matrices=design_matrix, subject_id="sub001", start_time=stats_start_time, title="GLM for subject %s" % subject_dic['session_id'], # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(subject_session_output_dir) print "Statistic report written to %s\r\n" % stats_report_filename return z_maps
"""collect preprocessed data""" fmri_files = results[0]['func'] anat_file = results[0]['anat'] """specify contrasts""" contrasts = {} n_columns = len(design_matrix.names) I = np.eye(len(design_matrix.names)) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = I[2 * i] """more interesting contrasts""" contrasts['EV1>EV2'] = contrasts['EV1'] - contrasts['EV2'] contrasts['EV2>EV1'] = contrasts['EV2'] - contrasts['EV1'] contrasts['effects_of_interest'] = contrasts['EV1'] + contrasts['EV2'] """fit GLM""" print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FMRILinearModel(fmri_files, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') """save computed mask""" mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) # compute bg unto which activation will be projected mean_fmri_files = compute_mean_3D_image(fmri_files) anat_img = nibabel.load(anat_file) anat = anat_img.get_data() anat_affine = anat_img.get_affine() print "Computing contrasts .." z_maps = {} for contrast_id, contrast_val in contrasts.iteritems():
def do_subject_glm(subject_data): """FE analysis for a single subject.""" subject_id = subject_data['subject_id'] output_dir = subject_data["output_dir"] func_files = subject_data['func'] anat = subject_data['anat'] onset_files = subject_data['onset'] # subject_id = os.path.basename(subject_dir) # subject_output_dir = os.path.join(output_dir, subject_id) mem = Memory(os.path.join(output_dir, "cache")) if not os.path.exists(output_dir): os.makedirs(output_dir) # glob files: anat, session func files, session onset files # anat = glob.glob(os.path.join(subject_dir, anat_wildcard)) # assert len(anat) == 1 # anat = anat[0] # onset_files = sorted([glob.glob(os.path.join(subject_dir, session))[0] # for session in session_onset_wildcards]) # func_files = sorted([sorted(glob.glob(os.path.join(subject_dir, session))) # for session in session_func_wildcards]) ### Preprocess data ####################################################### if 0: subject_data = mem.cache(do_subject_preproc)( dict(func=func_files, anat=anat, output_dir=output_dir)) func_files = subject_data['func'] anat = subject_data['anat'] # reslice func images func_files = [mem.cache(reslice_vols)( sess_func, target_affine=nibabel.load(sess_func[0]).get_affine()) for sess_func in func_files] ### GLM: loop on (session_bold, onse_file) pairs over the various sessions design_matrices = [] for session, (func_file, onset_file) in enumerate(zip(func_files, onset_files)): if isinstance(func_file, str): bold = nibabel.load(func_file) else: if len(func_file) == 1: func_file = func_file[0] bold = nibabel.load(func_file) assert len(bold.shape) == 4 n_scans = bold.shape[-1] del bold else: n_scans = len(func_file) frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) conditions, onsets, durations, amplitudes = parse_onset_file( onset_file) onsets *= tr durations *= tr paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=durations, amplitude=amplitudes) design_matrices.append(make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut)) # specify contrasts n_columns = len(design_matrices[0].names) contrasts = {} for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrices[0].names[2 * i] ] = np.eye(n_columns)[2 * i] # more interesting contrasts contrasts['faces-scrambled'] = contrasts['faces' ] - contrasts['scrambled'] contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] contrasts['effects_of_interest'] = contrasts['faces' ] + contrasts['scrambled'] # effects of interest F-test diff_contrasts = [] for i in xrange(paradigm.n_conditions - 1): a = contrasts[design_matrices[0].names[2 * i]] b = contrasts[design_matrices[0].names[2 * (i + 1)]] diff_contrasts.append(a - b) contrasts["diff"] = diff_contrasts # fit GLM print 'Fitting a GLM (this takes time)...' fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func, check_affines=False) for sess_func in func_files], [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) # compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id if np.ndim(contrast_val) > 1: contrast_type = "t" else: contrast_type = "F" z_map, t_map, effects_map, var_map = fmri_glm.contrast( [contrast_val] * 2, con_id=contrast_id, contrast_type=contrast_type, output_z=True, output_stat=True, output_effects=True, output_variance=True ) # store stat maps to disk for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join( output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path return subject_id, anat, effects_maps, z_maps, contrasts, fmri_glm.mask
def first_level(subject_dic): # experimental paradigm meta-params stats_start_time = time.ctime() tr = 2.4 drift_model = 'blank' hrf_model = 'canonical' # hemodynamic reponse function hfcut = 128. n_scans = 128 # make design matrices mask_images = [] design_matrices = [] fmri_files = subject_dic['func'] for x in xrange(len(fmri_files)): paradigm = paradigm_contrasts.localizer_paradigm() # build design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, ) design_matrices.append(design_matrix) # Specify contrasts contrasts = paradigm_contrasts.localizer_contrasts(design_matrix) #create output directory subject_session_output_dir = os.path.join(subject_dic['output_dir'], 'res_stats') if not os.path.exists(subject_session_output_dir): os.makedirs(subject_session_output_dir) # Fit GLM print 'Fitting a GLM (this takes time)...' fmri_glm = FMRILinearModel(fmri_files, [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) mask_images.append(mask_path) # compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, effects_map, var_map = fmri_glm.contrast( [contrast_val] * 1, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True ) # store stat maps to disk for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join( subject_session_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s%s.nii.gz' %(subject_dic['subject_id'], contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # do stats report anat_img = nibabel.load(subject_dic['anat']) stats_report_filename = os.path.join(subject_session_output_dir, "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=2.3, cluster_th=15, anat=anat_img, anat_affine=anat_img.get_affine(), design_matrices=design_matrix, subject_id="sub001", start_time=stats_start_time, title="GLM for subject %s" % subject_dic['session_id'], # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(subject_session_output_dir) print "Statistic report written to %s\r\n" % stats_report_filename return z_maps
from nipy.labs.utils.simul_multisubject_fmri_dataset import \ surrogate_4d_dataset from nipy.modalities.fmri.glm import FMRILinearModel shape = (10, 10, 10) # simulate a cubic image n_scans = 100 # equal to the number of frametimes fmri_data = surrogate_4d_dataset(shape=shape, n_scans=n_scans) # run the GLM my_glm = FMRILinearModel(fmri_data, X.matrix) # GLM fitting my_glm.fit(do_scaling=True, model='ar1') listen_vs_read = array([1, -1, 0, 0, 0, 0, 0]) z_map, = my_glm.contrast(listen_vs_read)
def group_one_sample_t_test(masks, effects_maps, contrasts, output_dir, start_time=base_reporter.pretty_time(), **kwargs): """ Runs a one-sample t-test procedure for group analysis. Here, we are for each experimental condition, only interested refuting the null hypothesis H0: "The average effect accross the subjects is zero!" Parameters ---------- masks: list of strings or nibabel image objects subject masks, one per subject effects_maps: list of dicts of lists effects maps from subject-level GLM; each entry is a dictionary; each entry (indexed by condition id) of this dictionary is the filename (or correspinding nibabel image object) for the effects maps for that condition (aka contrast),for that subject contrasts: dictionary of array_likes contrasts vectors, indexed by condition id kwargs: dict_like parameters can be regular `nipy.labs.viz.plot_map` parameters (e.g slicer="y") or any parameter we want be reported (e.g fwhm=[5, 5, 5]) """ # make output directory if not os.path.exists(output_dir): os.makedirs(output_dir) assert len(masks) == len(effects_maps), (len(masks), len(effects_maps)) # compute group mask group_mask = nibabel.Nifti1Image( intersect_masks(masks).astype(np.int8), (nibabel.load(masks[0]) if isinstance( masks[0], basestring) else masks[0]).get_affine()) # construct design matrix (only one covariate, namely the "mean effect") design_matrix = np.ones(len(effects_maps) )[:, np.newaxis] # only the intercept group_level_z_maps = {} group_level_t_maps = {} for contrast_id in contrasts: print "\tcontrast id: %s" % contrast_id # effects maps will be the input to the second level GLM first_level_image = nibabel.concat_images( [x[contrast_id] for x in effects_maps]) # fit 2nd level GLM for given contrast group_model = FMRILinearModel(first_level_image, design_matrix, group_mask) group_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1.]]) ) # the only possible contrast ! z_map, t_map = group_model.contrast(contrast_val, con_id='one_sample %s' % contrast_id, output_z=True, output_stat=True) # save map for map_type, map_img in zip(["z", "t"], [z_map, t_map]): map_dir = os.path.join(output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, 'group_level_%s.nii.gz' % ( contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(map_img, map_path) if map_type == "z": group_level_z_maps[contrast_id] = map_path elif map_type == "t": group_level_z_maps[contrast_id] = map_path # do stats report stats_report_filename = os.path.join( output_dir, "report_stats.html") generate_subject_stats_report(stats_report_filename, contrasts, group_level_z_maps, group_mask, start_time=start_time, **kwargs) print "\r\nStatistic report written to %s\r\n" % ( stats_report_filename) return group_level_z_maps
onset = np.arange(6, N_SCANS, 12) * TR paradigm = BlockParadigm(con_id=conditions, onset=onset, duration=duration) frametimes = np.linspace(0, (N_SCANS - 1) * TR, N_SCANS) design_mat = design_matrix.make_dmtx(frametimes, paradigm, hrf_model='Canonical', drift_model='Cosine', hfcut=168) design_mat.show() # general linear model glm = FMRILinearModel(input_image, design_mat.matrix, mask='compute') glm.fit() # contrasts c = np.hstack([1, np.zeros(len(design_mat.names)-1)]) z_map, t_map, eff_map, var_map = glm.contrast(c, contrast_type='t', output_z=True, output_stat=True, output_effects=True, output_variance=True) # save maps
for subject_glm_results in group_glm_inputs] contrasts = group_glm_inputs[0][0] sujects_effects_maps = [subject_glm_results[1] for subject_glm_results in group_glm_inputs] group_level_z_maps = {} design_matrix = np.ones(len(sujects_effects_maps) )[:, np.newaxis] # only the intercept for contrast_id in contrasts: print "\tcontrast id: %s" % contrast_id # effects maps will be the input to the second level GLM first_level_image = nibabel.concat_images( [x[contrast_id] for x in sujects_effects_maps]) # fit 2nd level GLM for given contrast group_model = FMRILinearModel(first_level_image, design_matrix, group_mask) group_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1.]])) # the only possible contrast ! z_map, = group_model.contrast(contrast_val, con_id='one_sample %s' % contrast_id, output_z=True) # save map map_dir = os.path.join(output_dir, 'z_maps') if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '2nd_level_%s.nii.gz' % ( contrast_id)) print "\t\tWriting %s ..." % map_path
def execute_glm(doc, out_dir, contrast_definitions=None, outputs=None, glm_model='ar1', ): """Function to execute GLM for one subject --and perhaps multiple sessions thereof """ stats_start_time = time.ctime() # study_dir = os.path.join(out_dir, doc['study']) if outputs is None: outputs = {'maps': False, 'data': False, 'mask': True, 'model': True, } else: outputs['maps'] = False subject_id = doc['subject'] subject_output_dir = os.path.join( out_dir, subject_id) _export(doc, subject_output_dir, outputs=outputs) params = load_glm_params(doc) # instantiate GLM fmri_glm = FMRILinearModel(params['data'], params['design_matrices'], doc['mask']) # fit GLM fmri_glm.fit(do_scaling=True, model=glm_model) # save beta-maps to disk beta_map_dir = os.path.join(subject_output_dir, 'beta_maps') if not os.path.exists(beta_map_dir): os.makedirs(beta_map_dir) for j, glm in zip(range(len(fmri_glm.glms)), fmri_glm.glms): # XXX save array in some compressed format np.savetxt(os.path.join(beta_map_dir, "beta_map_%i.txt" % j), glm.get_beta(), # array has shape (n_conditions, n_voxels) ) # define contrasts if contrast_definitions is not None: params['contrasts'] = make_contrasts(params, contrast_definitions) contrasts = sorted(params['contrasts'][0].keys()) _contrasts = {} z_maps = {} # compute stats maps for index, contrast_id in enumerate(contrasts): print ' study[%s] subject[%s] contrast [%s]: %i/%i' % ( doc['study'], doc['subject'], contrast_id, index + 1, len(contrasts) ) contrast = [c[contrast_id] for c in params['contrasts']] contrast_name = contrast_id.replace(' ', '_') z_map, t_map, c_map, var_map = fmri_glm.contrast( contrast, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True,) for dtype, out_map in zip(['z', 't', 'c', 'variance'], [z_map, t_map, c_map, var_map]): map_dir = os.path.join(subject_output_dir, '%s_maps' % dtype) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_name) nb.save(out_map, map_path) # collect z map if dtype == 'z': _contrasts[contrast_name] = contrast z_maps[contrast_name] = map_path # invoke a single API to handle plotting and html business for you subject_stats_report_filename = os.path.join( subject_output_dir, "report_stats.html") glm_reporter.generate_subject_stats_report( subject_stats_report_filename, _contrasts, z_maps, doc['mask'], design_matrices=list(params['design_matrices']), subject_id=doc['subject'], cluster_th=15, # 15 voxels start_time=stats_start_time, TR=doc['TR'], n_scans=doc['n_scans'], n_sessions=doc['n_sessions'], model=glm_model, ) print "Report for subject %s written to %s" % ( doc['subject'], subject_stats_report_filename)
######################################### # simplest ones contrasts = {} n_columns = len(design_matrix.names) for i in range(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # Our contrast of interest reading_vs_visual = contrasts["phrasevideo"] - contrasts["damier_H"] ######################################## # Perform a GLM analysis on H1 ######################################## fmri_glm = FMRILinearModel(fmri_data, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # Estimate the contrast z_map, = fmri_glm.contrast(reading_vs_visual, output_z=True) # Plot the contrast vmax = max(-z_map.get_data().min(), z_map.get_data().max()) plot_map(z_map.get_data(), z_map.get_affine(), cmap=cm.cold_hot, vmin=-vmax, vmax=vmax, slicer='z', black_bg=True, threshold=2.5,
# Instantiate the parser parser = argparse.ArgumentParser( description='Please provide a patient directory path') # Required patient argument parser.add_argument('path', help='Patient Path Directory') args = parser.parse_args() if args.path is None: parser.error("Directory Path should be added as argument") fmri_files = [f for f in listdir(args.path) if isfile(join(args.path, f))] mask_file = mask_generator.make_mask(args.path, "./Group_Mask") design_files = design_matrix_generator.make_design(args.path) multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') # Compute the required contrast print('Computing test contrast image...') n_regressors = [np.load(f)['arr_0'].shape[1] for f in design_files] con = [np.hstack((cvect, np.zeros(nr - len(cvect)))) for nr in n_regressors] z_map, = multi_session_model.contrast(con) # Show Z-map image mean_map = multi_session_model.means[0] print(mean_map) plot_map(z_map.get_data(),
def group_one_sample_t_test(masks, effects_maps, contrasts, output_dir, start_time=base_reporter.pretty_time(), **kwargs): """ Runs a one-sample t-test procedure for group analysis. Here, we are for each experimental condition, only interested refuting the null hypothesis H0: "The average effect accross the subjects is zero!" Parameters ---------- masks: list of strings or nibabel image objects subject masks, one per subject effects_maps: list of dicts of lists effects maps from subject-level GLM; each entry is a dictionary; each entry (indexed by condition id) of this dictionary is the filename (or correspinding nibabel image object) for the effects maps for that condition (aka contrast),for that subject contrasts: dictionary of array_likes contrasts vectors, indexed by condition id kwargs: dict_like parameters can be regular `nipy.labs.viz.plot_map` parameters (e.g slicer="y") or any parameter we want be reported (e.g fwhm=[5, 5, 5]) """ # make output directory if not os.path.exists(output_dir): os.makedirs(output_dir) assert len(masks) == len(effects_maps), (len(masks), len(effects_maps)) # compute group mask group_mask = nibabel.Nifti1Image( intersect_masks(masks).astype(np.int8), (nibabel.load(masks[0]) if isinstance(masks[0], basestring) else masks[0]).get_affine()) # construct design matrix (only one covariate, namely the "mean effect") design_matrix = np.ones( len(effects_maps))[:, np.newaxis] # only the intercept group_level_z_maps = {} group_level_t_maps = {} for contrast_id in contrasts: print "\tcontrast id: %s" % contrast_id # effects maps will be the input to the second level GLM first_level_image = nibabel.concat_images( [x[contrast_id] for x in effects_maps]) # fit 2nd level GLM for given contrast group_model = FMRILinearModel(first_level_image, design_matrix, group_mask) group_model.fit(do_scaling=False, model='ols') # specify and estimate the contrast contrast_val = np.array(([[1.]])) # the only possible contrast ! z_map, t_map = group_model.contrast(contrast_val, con_id='one_sample %s' % contrast_id, output_z=True, output_stat=True) # save map for map_type, map_img in zip(["z", "t"], [z_map, t_map]): map_dir = os.path.join(output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, 'group_level_%s.nii.gz' % (contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(map_img, map_path) if map_type == "z": group_level_z_maps[contrast_id] = map_path elif map_type == "t": group_level_z_maps[contrast_id] = map_path # do stats report stats_report_filename = os.path.join(output_dir, "report_stats.html") generate_subject_stats_report(stats_report_filename, contrasts, group_level_z_maps, group_mask, start_time=start_time, **kwargs) print "\r\nStatistic report written to %s\r\n" % (stats_report_filename) return group_level_z_maps
contrasts["left-right"] = contrasts["left"] - contrasts["right"] contrasts["right-left"] = contrasts["right"] - contrasts["left"] contrasts["audio-video"] = contrasts["audio"] - contrasts["video"] contrasts["video-audio"] = contrasts["video"] - contrasts["audio"] contrasts["computation-sentences"] = contrasts["computation"] - \ contrasts["sentences"] contrasts["reading-visual"] = contrasts["sentences"] * 2 - \ contrasts["damier_H"] - contrasts["damier_V"] contrasts['effects_of_interest'] = np.eye(25)[:20:2] ######################################## # Perform a GLM analysis ######################################## print 'Fitting a GLM (this takes time)...' fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################### # Estimate the contrasts ######################################### print 'Computing contrasts...' for index, (contrast_id, contrast_val) in enumerate(contrasts.iteritems()): print ' Contrast % 2i out of %i: %s' % ( index + 1, len(contrasts), contrast_id) # save the z_image image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) z_map, = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True) save(z_map, image_path)
exit(1) try: cvect = [float(arg) for arg in args] except ValueError: print(USAGE) exit(1) # Input files fmri_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1.nii.gz', 'run2.nii.gz']] design_files = [example_data.get_filename('fiac', 'fiac0', run) for run in ['run1_design.npz', 'run2_design.npz']] mask_file = example_data.get_filename('fiac', 'fiac0', 'mask.nii.gz') # Load all the data multi_session_model = FMRILinearModel(fmri_files, design_files, mask_file) # GLM fitting multi_session_model.fit(do_scaling=True, model='ar1') # Compute the required contrast print('Computing test contrast image...') n_regressors = [np.load(f)['X'].shape[1] for f in design_files] con = [np.hstack((cvect, np.zeros(nr - len(cvect)))) for nr in n_regressors] z_map, = multi_session_model.contrast(con) # Show Z-map image mean_map = multi_session_model.means[0] plot_map(z_map.get_data(), z_map.get_affine(), anat=mean_map.get_data(),
# specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # more interesting contrasts contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled'] contrasts['scrambled-faces'] = -contrasts['faces-scrambled'] contrasts['effects_of_interest'] = contrasts['faces'] + contrasts['scrambled'] # fit GLM print 'Fitting a GLM (this takes time)...' fmri_glm = FMRILinearModel( [nibabel.concat_images(x) for x in subject_data.func], [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) mask_images.append(mask_path) # compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, effects_map, var_map = fmri_glm.contrast(
design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut) ax = design_matrix.show() ax.set_position([.05, .25, .9, .65]) ax.set_title('Design matrix') plt.savefig(path.join(write_dir, 'design_matrix.png')) dim = design_matrix.matrix.shape[1] ######################################## # Perform a GLM analysis ######################################## print 'Fitting a GLM (this takes time)...' fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################## # Output beta and variance images ######################################## beta_hat = fmri_glm.glms[0].get_beta() # Least-squares estimates of the beta variance_hat = fmri_glm.glms[0].get_mse() # Estimates of the variance mask = fmri_glm.mask.get_data() > 0 # output beta images beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim) beta_map[mask] = beta_hat.T beta_image = Nifti1Image(beta_map, fmri_glm.affine) beta_image.get_header()['descrip'] = ( 'Parameter estimates of the localizer dataset')
# simplest ones contrasts = {} n_columns = len(design_matrix.names) for i in range(paradigm.n_conditions): contrasts['%s' % design_matrix.names[i]] = np.eye(n_columns)[i] # and more complex/ interesting ones contrasts['left'] = contrasts['clicGaudio'] + contrasts['clicGvideo'] contrasts['right'] = contrasts['clicDaudio'] + contrasts['clicDvideo'] ######################################## # Perform a GLM analysis ######################################## print('Fitting a General Linear Model') fmri_glm = FMRILinearModel(data_path, design_matrix.matrix, mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') ######################################### # Estimate the contrasts ######################################### contrast_id = 'left_right_motor_min' z_map, effects_map = fmri_glm.contrast( np.vstack((contrasts['left'], contrasts['right'])), contrast_type='tmin-conjunction', output_z=True, output_effects=True) z_image_path = path.join(write_dir, '%s_z_map.nii' % contrast_id) save(z_map, z_image_path) contrast_path = path.join(write_dir, '%s_con.nii' % contrast_id) save(effects_map, contrast_path)
def preprocess_files(self, func_files, anat_files=None, verbose=1): """ TODO: preprocess_files docstring. """ import pandas as pd import nibabel import nipy.modalities.fmri.design_matrix as dm from nilearn.image import index_img from nilearn.masking import compute_epi_mask from nipy.modalities.fmri.glm import FMRILinearModel from nipy.modalities.fmri.experimental_paradigm import ( EventRelatedParadigm) def get_beta_filepath(func_file, cond): return func_file.replace('_bold.nii.gz', '_beta-%s.nii.gz' % cond) beta_files = [] for fi, func_file in enumerate(func_files): # Don't re-do preprocessing. # beta_mask = func_file.replace('_bold.nii.gz', '_beta*.nii.gz') cond_file = func_file.replace('_bold.nii.gz', '_events.tsv') cond_data = pd.read_csv(cond_file, sep='\t') # Get condition info, to search if betas have been done. conditions = cond_data['trial_type'].tolist() all_conds = np.unique(conditions) all_beta_files = [get_beta_filepath(func_file, cond) for cond in all_conds] # All betas are done. if np.all([op.exists(f) for f in all_beta_files]): beta_files += all_beta_files continue if verbose >= 0: print('Preprocessing file %d of %d' % ( fi + 1, len(func_files))) # Need to do regression. tr = cond_data['duration'].as_matrix().mean() onsets = cond_data['onset'].tolist() img = nibabel.load(func_file) n_scans = img.shape[3] frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) # Create the design matrix paradigm = EventRelatedParadigm(conditions, onsets) design_mat = dm.make_dmtx(frametimes, paradigm, drift_model='cosine', hfcut=n_scans, hrf_model='canonical') # Do the GLM mask_img = compute_epi_mask(img) fmri_glm = FMRILinearModel(img, design_mat.matrix, mask=mask_img) fmri_glm.fit(do_scaling=True, model='ar1') # Pull out the betas beta_hat = fmri_glm.glms[0].get_beta() # Least-squares estimates mask = fmri_glm.mask.get_data() > 0 # output beta images dim = design_mat.matrix.shape[1] beta_map = np.tile(mask.astype(np.float)[..., np.newaxis], dim) beta_map[mask] = beta_hat.T beta_image = nibabel.Nifti1Image(beta_map, fmri_glm.affine) beta_image.get_header()['descrip'] = ("Parameter estimates of " "the localizer dataset") # Save beta images for ci, cond in enumerate(np.unique(conditions)): beta_cond_img = index_img(beta_image, ci) beta_filepath = get_beta_filepath(func_file, cond) nibabel.save(beta_cond_img, beta_filepath) beta_files.append(beta_filepath) return beta_files