# do stats report stats_report_filename = os.path.join(subject_session_output_dir, "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=2.3, cluster_th=15, anat=anat_img.get_data(), anat_affine=anat_img.get_affine(), design_matrices=design_matrix, subject_id="sub001", start_time=stats_start_time, title="GLM for subject %s, session %s" % (subject_data.subject_id, subject_data.session_id[x] ), # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(subject_session_output_dir) print "Statistic report written to %s\r\n" % stats_report_filename
map_dir = os.path.join(output_dir, 'z_maps') if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '2nd_level_%s.nii.gz' % ( contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(z_map, map_path) group_level_z_maps[contrast_id] = map_path # do stats report stats_report_filename = os.path.join(output_dir, "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, group_level_z_maps, group_mask, threshold=threshold, cluster_th=cluster_th, design_matrices=[design_matrix], subject_id="sub001", start_time=stats_start_time, title='Group GLM for br41nh4ck', slicer=slicer, cut_coords=cut_coords ) ProgressReport().finish_dir(output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename
def _preprocess_and_analysis_subject(subject_data, do_normalize=False, fwhm=0., slicer='z', cut_coords=6, threshold=3., cluster_th=15 ): """ Preprocesses the subject and then fits (mass-univariate) GLM thereupon. """ # sanitize run_ids: # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage, # for example run_ids = range(9) if subject_data['subject_id'] == "Sub14": run_ids = [0] + range(2, 9) subject_data['func'] = [subject_data['func'][0]] + subject_data[ 'func'][2:] subject_data['session_id'] = [subject_data['session_id'][0] ] + subject_data['session_id'][2:] # sanitize subject output dir if not 'output_dir' in subject_data: subject_data['output_dir'] = os.path.join( output_dir, subject_data['subject_id']) # preprocess the data subject_data = do_subject_preproc(SubjectData(**subject_data), do_realign=True, do_coreg=True, do_report=False, do_cv_tc=False ) assert not subject_data.anat is None # norm if do_normalize: subject_data = nipype_do_subject_preproc( subject_data, do_realign=False, do_coreg=False, do_segment=True, do_normalize=True, func_write_voxel_sizes=[3, 3, 3], anat_write_voxel_sizes=[2, 2, 2], fwhm=fwhm, hardlink_output=False, do_report=False ) # chronometry stats_start_time = pretty_time() # to-be merged lists, one item per run paradigms = [] frametimes_list = [] design_matrices = [] # one list_of_contrast_dicts = [] # one dict per run n_scans = [] for run_id in run_ids: _n_scans = len(subject_data.func[run_id]) n_scans.append(_n_scans) # make paradigm paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id]) # make design matrix tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128. frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs=np.loadtxt(getattr(subject_data, 'realignment_parameters')[run_id]), add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis' ] ) # import matplotlib.pyplot as plt # design_matrix.show() # plt.show() paradigms.append(paradigm) design_matrices.append(design_matrix) frametimes_list.append(frametimes) n_scans.append(_n_scans) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye( n_columns)[2 * i] # more interesting contrasts""" contrasts['Famous-Unfamiliar'] = contrasts[ 'Famous'] - contrasts['Unfamiliar'] contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar'] contrasts['Famous-Scrambled'] = contrasts[ 'Famous'] - contrasts['Scrambled'] contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled'] contrasts['Unfamiliar-Scrambled'] = contrasts[ 'Unfamiliar'] - contrasts['Scrambled'] contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled'] list_of_contrast_dicts.append(contrasts) # importat maps z_maps = {} effects_maps = {} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func) for sess_func in subject_data.func], [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # replicate contrasts across runs contrasts = dict((cid, [contrasts[cid] for contrasts in list_of_contrast_dicts]) for cid, cval in contrasts.iteritems()) # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_stat=False, output_effects=True, output_variance=False ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_data.output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report stats_report_filename = os.path.join(getattr(subject_data, 'reports_output_dir', subject_data.output_dir), "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, anat=nibabel.load(subject_data.anat).get_data(), anat_affine=nibabel.load(subject_data.anat).get_affine(), design_matrices=design_matrices, subject_id=subject_data.subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_data.subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id].__dict__) for run_id in run_ids), frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id]) for run_id in run_ids), # fwhm=fwhm ) ProgressReport().finish_dir(subject_data.output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path
def first_level(subject_dic): # experimental paradigm meta-params stats_start_time = time.ctime() tr = 2.4 drift_model = 'blank' hrf_model = 'canonical' # hemodynamic reponse function hfcut = 128. n_scans = 128 # make design matrices mask_images = [] design_matrices = [] fmri_files = subject_dic['func'] for x in xrange(len(fmri_files)): paradigm = paradigm_contrasts.localizer_paradigm() # build design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, ) design_matrices.append(design_matrix) # Specify contrasts contrasts = paradigm_contrasts.localizer_contrasts(design_matrix) #create output directory subject_session_output_dir = os.path.join(subject_dic['output_dir'], 'res_stats') if not os.path.exists(subject_session_output_dir): os.makedirs(subject_session_output_dir) # Fit GLM print 'Fitting a GLM (this takes time)...' fmri_glm = FMRILinearModel(fmri_files, [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') # save computed mask mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz") print "Saving mask image %s" % mask_path nibabel.save(fmri_glm.mask, mask_path) mask_images.append(mask_path) # compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, effects_map, var_map = fmri_glm.contrast( [contrast_val] * 1, con_id=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True ) # store stat maps to disk for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join( subject_session_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s%s.nii.gz' %(subject_dic['subject_id'], contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # do stats report anat_img = nibabel.load(subject_dic['anat']) stats_report_filename = os.path.join(subject_session_output_dir, "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=2.3, cluster_th=15, anat=anat_img, anat_affine=anat_img.get_affine(), design_matrices=design_matrix, subject_id="sub001", start_time=stats_start_time, title="GLM for subject %s" % subject_dic['session_id'], # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(subject_session_output_dir) print "Statistic report written to %s\r\n" % stats_report_filename return z_maps
def first_level(subject_id): ''' Launch the first level analysis for one subject ROOTDIR is an variable of environnmenent where your data are stored. ROOTDIR needs to be set up by the user. See: https://github.com/neurospin/pypreprocess/ Tape: python first_level.py Keyword arguments: subject_id -- Name of the subject ''' # Configure paths data_dir = os.path.join(os.environ["ROOTDIR"], "dataset", "bids_dataset", subject_id) output_dir = os.path.join(os.environ["ROOTDIR"], "processed_data", subject_id) subject_session_output_dir = os.path.join(output_dir, 'res_stats') if not os.path.exists(subject_session_output_dir): os.makedirs(subject_session_output_dir) # Experimental paradigm meta-params stats_start_time = time.ctime() tr = 2.4 drift_model = 'blank' #hrf_model = 'canonical' # hemodynamic reponse function hrf_model = 'spm' # hemodynamic reponse function hfcut = 128. n_scans = 128 # Preparation of paradigm events_file = glob.glob( os.path.join(data_dir, 'func/*_task-standartloc_events.tsv'))[0] print events_file paradigm = paradigm_contrasts.localizer_paradigm(events_file) # Build design matrix frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) design_matrix = make_design_matrix( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=hfcut, ) # Specify contrasts contrasts = paradigm_contrasts.localizer_contrasts(design_matrix) # Fit GLM fmri_file = glob.glob( os.path.join(output_dir, 'func/wra*_task-standartloc_bold.nii.gz'))[0] print 'Fitting a GLM (this takes time)...' # fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_files[0], # [design_matrix for design_matrix in design_matrices] # ) # fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_file, # [design_matrix for design_matrix in design_matrices] # ) fmri_glm = FirstLevelGLM(noise_model='ar1', standardize=False).fit(fmri_file, design_matrix) # Save computed mask mask_images = [] mask_path = os.path.join(subject_session_output_dir, "mask.nii.gz") nibabel.save(fmri_glm.masker_.mask_img_, mask_path) mask_images.append(mask_path) # Compute contrasts z_maps = {} effects_maps = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, t_map, effects_map, var_map = fmri_glm.transform( [contrast_val] * 1, contrast_name=contrast_id, output_z=True, output_stat=True, output_effects=True, output_variance=True) # Store stat maps to disk for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join(subject_session_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s%s.nii.gz' % (subject_id, contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # Do stats report anat_file = glob.glob(os.path.join(output_dir, 'anat/w*_T1w.nii.gz'))[0] anat_img = nibabel.load(anat_file) stats_report_filename = os.path.join(subject_session_output_dir, "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.masker_.mask_img_, threshold=2.3, cluster_th=15, anat=anat_img, anat_affine=anat_img.get_affine(), design_matrices=[design_matrix], paradigm=paradigm, subject_id=subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) ProgressReport().finish_dir(subject_session_output_dir) print "Statistic report written to %s\r\n" % stats_report_filename return z_maps
# do stats report stats_report_filename = os.path.join(subject_data.reports_output_dir, "report_stats.html") contrasts = dict((contrast_id, contrasts[contrast_id]) for contrast_id in z_maps.keys()) generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, design_matrices=[design_matrix], subject_id=subject_data.subject_id, anat=anat, anat_affine=anat_affine, slicer='ortho', cluster_th=50, # we're only interested in this 'large' clusters start_time=stats_start_time, # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=tr, nscans=nscans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) print "\r\nStatistic report written to %s\r\n" % stats_report_filename
def run_suject_level1_glm(subject_data_dir, subject_output_dir, task_id, readout_time=.01392, # seconds tr=.72, do_preproc=False, do_realign=False, do_normalize=False, fwhm=0., report=False, hrf_model="Canonical with Derivative", drift_model="Cosine", hfcut=100, regress_motion=True, slicer='y', cut_coords=6, threshold=3., cluster_th=15 ): """ Function to do preproc + analysis for a single HCP subject (task fMRI) """ # sanitize subject data_dir subject_id = int(os.path.basename(subject_data_dir)) subject_data_dir = os.path.abspath(subject_data_dir) _subject_data_dir = os.path.join(subject_data_dir, "MNINonLinear/Results/") add_regs_files = None if do_preproc: if not os.path.exists(subject_output_dir): os.makedirs(subject_output_dir) # glob fmri files fmri_files = [os.path.join( subject_data_dir, "unprocessed/3T/tfMRI_%s_%s/%s_3T_tfMRI_%s_%s.nii.gz" % ( task_id, direction, subject_id, task_id, direction)) for direction in ["LR", "RL"]] assert len(fmri_files) == 2 # glob anat file anat_file = os.path.join(subject_data_dir, "T1w/T1w_acpc_dc_restore_brain.nii.gz") # assert os.path.isfile(anat_file) if not os.path.isfile(anat_file): anat_file = None # distortion correction ? dc_output = _do_fmri_distortion_correction( fmri_files, subject_data_dir, subject_output_dir, subject_id, task_id, readout_time=readout_time, report=report ) if dc_output is None: return else: fmri_files, realignment_parameters = dc_output # preprocess the data preproc_subject_data = do_subject_preproc(SubjectData( func=fmri_files, anat=anat_file, output_dir=subject_output_dir), do_realign=True, do_normalize=do_normalize, fwhm=fwhm, report=report ) fmri_files = preproc_subject_data.func n_motion_regressions = 6 if do_realign and regress_motion: add_regs_files = realignment_parameters else: n_motion_regressions = 12 # glob fmri files fmri_files = [] for direction in ['LR', 'RL']: fmri_file = os.path.join( _subject_data_dir, "tfMRI_%s_%s/tfMRI_%s_%s.nii.gz" % ( task_id, direction, task_id, direction)) if not os.path.isfile(fmri_file): print "Can't find task fMRI file %s; skipping subject %s" % ( fmri_file, subject_id) return else: fmri_files.append(fmri_file) # glob movement confounds if regress_motion: add_regs_files = [os.path.join(_subject_data_dir, "tfMRI_%s_%s" % ( task_id, direction), "Movement_Regressors.txt") for direction in ["LR", "RL"]] # smooth images if np.sum(fwhm) > 0: print "Smoothing fMRI data (fwhm = %s)..." % fwhm fmri_files = _do_subject_smooth(SubjectData( func=fmri_files, output_dir=subject_output_dir), fwhm=fwhm, report=False ).func print "... done.\r\n" # sanitize subject_output_dir if not os.path.exists(subject_output_dir): os.makedirs(subject_output_dir) # chronometry stats_start_time = pretty_time() # merged lists paradigms = [] frametimes_list = [] design_matrices = [] # fmri_files = [] n_scans = [] for direction, direction_index in zip(['LR', 'RL'], xrange(2)): # glob the design file design_file = os.path.join(_subject_data_dir, "tfMRI_%s_%s" % ( task_id, direction), "tfMRI_%s_%s_hp200_s4_level1.fsf" % ( task_id, direction)) if not os.path.isfile(design_file): print "Can't find design file %s; skipping subject %s" % ( design_file, subject_id) return # read the experimental setup print "Reading experimental setup from %s ..." % design_file fsl_condition_ids, timing_files, fsl_contrast_ids, contrast_values = \ read_design_fsl_design_file(design_file) print "... done.\r\n" # fix timing filenames timing_files = _insert_directory_in_file_name( timing_files, "tfMRI_%s_%s" % (task_id, direction), 1) # make design matrix print "Constructing design matrix for direction %s ..." % direction _n_scans = nibabel.load(fmri_files[direction_index]).shape[-1] n_scans.append(_n_scans) add_regs_file = add_regs_files[ direction_index] if not add_regs_files is None else None, design_matrix, paradigm, frametimes = make_dmtx_from_timing_files( timing_files, fsl_condition_ids, n_scans=_n_scans, tr=tr, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs_file=add_regs_file, add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis', 'Differential Translation along x axis', 'Differential Translation along yaxis', 'Differential Translation along z axis', 'Differential Rotation along x axis', 'Differential Rotation along y axis', 'Differential Rotation along z axis' ][:n_motion_regressions] if not add_regs_files is None else None, ) print "... done." paradigms.append(paradigm) frametimes_list.append(frametimes) design_matrices.append(design_matrix) # convert contrasts to dict contrasts = dict((contrast_id, # append zeros to end of contrast to match design np.hstack((contrast_value, np.zeros(len( design_matrix.names) - len(contrast_value))))) for contrast_id, contrast_value in zip( fsl_contrast_ids, contrast_values)) # more interesting contrasts if task_id == 'MOTOR': contrasts['RH-LH'] = contrasts['RH'] - contrasts['LH'] contrasts['LH-RH'] = -contrasts['RH-LH'] contrasts['RF-LF'] = contrasts['RF'] - contrasts['LF'] contrasts['LF-RF'] = -contrasts['RF-LF'] contrasts['H'] = contrasts['RH'] + contrasts['LH'] contrasts['F'] = contrasts['RF'] + contrasts['LF'] contrasts['H-F'] = contrasts['RH'] + contrasts['LH'] - ( contrasts['RF'] - contrasts['LF']) contrasts['F-H'] = -contrasts['H-F'] # importat maps z_maps = {} effects_maps = {} # replicate contrasts across sessions contrasts = dict((cid, [cval] * 2) for cid, cval in contrasts.iteritems()) # compute effects mask_path = os.path.join(subject_output_dir, "mask.nii.gz") skip = os.path.isfile(mask_path) if skip: for contrast_id, contrast_val in contrasts.iteritems(): for map_type in ['z', 'effects']: map_dir = os.path.join( subject_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) if not os.path.exists(map_path): skip = 0 break # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path if skip: print "Skipping subject %s..." % ( subject_id) # fit GLM if not skip: print ( 'Fitting a "Fixed Effect" GLM for merging LR and RL phase-encoding ' 'directions for subject %s ...' % subject_id) fmri_glm = FMRILinearModel(fmri_files, [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_effects=True ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report if 0x0: anat_img = load_specific_vol(fmri_files[0], 0)[0] stats_report_filename = os.path.join(subject_output_dir, "reports", "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, nibabel.load(mask_path), anat=anat_img.get_data(), anat_affine=anat_img.get_affine(), threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm={'LR': paradigms[0].__dict__, 'RL': paradigms[1].__dict__}, frametimes={'LR': frametimes_list[0], 'RL': frametimes_list[1]}, fwhm=fwhm ) ProgressReport().finish_dir(subject_output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path
os.mkdir(map_dir) map_type_dir = os.path.join(map_dir, map_type) if not os.path.isdir(map_type_dir): os.mkdir(map_type_dir) map_path = os.path.join(map_type_dir, 'c' + "%03d" % (c_id) + '_' + contrast_id +'.nii.gz') if map_type == 'z_map': z_maps[contrast_id] = map_path nib.save(stat_map[map_type], map_path) # do stats report if not os.path.isdir(os.path.join(subject_path, 'reports')): os.mkdir(os.path.join(subject_path, 'reports')) stats_report_filename = os.path.join(subject_path, 'reports', "report_stats.html") glm_reporter.generate_subject_stats_report( stats_report_filename, contrasts, z_maps, glm.mask, design_matrices=[design_mat], subject_id='S'+subj_id, anat=anat, anat_affine=anat_affine, slicer='z', cut_coords=5, cluster_th=10 # we're only interested in this 'large' clusters )
if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # generate stats report anat_img = nibabel.load(subject_data.anat) stats_report_filename = os.path.join(subject_data.output_dir, "reports", "report_stats.html") generate_subject_stats_report(stats_report_filename, contrasts, z_maps, fmri_glm.masker_.mask_img_, anat=anat_img, threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr, subject_id="sub001", start_time=stats_start_time, n_scans=n_scans, title="GLM for subject %s" % subject_data.subject_id, hfcut=hfcut, paradigm=paradigm, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model) ProgressReport().finish_dir(subject_data.output_dir) print("Statistic report written to %s\r\n" % stats_report_filename)
def first_level(subject_dic, additional_regressors=None, compcorr=False, smooth=None, surface=False, mask_img=None): """ Run the first-level analysis (GLM fitting + statistical maps) in a given subject Parameters ---------- subject_dic: dict, exhaustive description of an individual acquisition additional_regressors: dict or None, additional regressors provided as an already sampled design_matrix dictionary keys are session_ids compcorr: Bool, optional, whether confound estimation and removal should be done or not smooth: float or None, optional, how much the data should spatially smoothed during masking """ start_time = time.ctime() # experimental paradigm meta-params motion_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] hrf_model = subject_dic['hrf_model'] hfcut = subject_dic['hfcut'] drift_model = subject_dic['drift_model'] tr = subject_dic['TR'] if not surface and (mask_img is None): mask_img = masking(subject_dic['func'], subject_dic['output_dir']) if additional_regressors is None: additional_regressors = dict([ (session_id, None) for session_id in subject_dic['session_id'] ]) for session_id, fmri_path, onset, motion_path in zip( subject_dic['session_id'], subject_dic['func'], subject_dic['onset'], subject_dic['realignment_parameters']): paradigm_id = _session_id_to_task_id([session_id])[0] if surface: from nibabel.gifti import read n_scans = np.array( [darrays.data for darrays in read(fmri_path).darrays]).shape[0] else: n_scans = nib.load(fmri_path).shape[3] # motion parameters motion = np.loadtxt(motion_path) # define the time stamps for different images frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans) if paradigm_id == 'audio': mask = np.array([1, 0, 1, 1, 0, 1, 1, 0, 1, 1]) n_cycles = 28 cycle_duration = 20 t_r = 2 cycle = np.arange(0, cycle_duration, t_r)[mask > 0] frametimes = np.tile(cycle, n_cycles) +\ np.repeat(np.arange(n_cycles) * cycle_duration, mask.sum()) frametimes = frametimes[:-2] # for some reason... if surface: compcorr = False # XXX Fixme if compcorr: confounds = high_variance_confounds(fmri_path, mask_img=mask_img) confounds = np.hstack((confounds, motion)) confound_names = ['conf_%d' % i for i in range(5)] + motion_names else: confounds = motion confound_names = motion_names if onset is None: warnings.warn('Onset file not provided. Trying to guess it') task = os.path.basename(fmri_path).split('task')[-1][4:] onset = os.path.join( os.path.split(os.path.dirname(fmri_path))[0], 'model001', 'onsets', 'task' + task + '_run001', 'task%s.csv' % task) if not os.path.exists(onset): warnings.warn('non-existant onset file. proceeding without it') paradigm = None else: paradigm = make_paradigm(onset, paradigm_id) # handle manually supplied regressors add_reg_names = [] if additional_regressors[session_id] is None: add_regs = confounds else: df = read_csv(additional_regressors[session_id]) add_regs = [] for regressor in df: add_reg_names.append(regressor) add_regs.append(df[regressor]) add_regs = np.array(add_regs).T add_regs = np.hstack((add_regs, confounds)) add_reg_names += confound_names # create the design matrix design_matrix = make_first_level_design_matrix( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=hfcut, add_regs=add_regs, add_reg_names=add_reg_names) _, dmtx, names = check_design_matrix(design_matrix) # create the relevant contrasts contrasts = make_contrasts(paradigm_id, names) if surface: subject_session_output_dir = os.path.join( subject_dic['output_dir'], 'res_surf_%s' % session_id) else: subject_session_output_dir = os.path.join( subject_dic['output_dir'], 'res_stats_%s' % session_id) if not os.path.exists(subject_session_output_dir): os.makedirs(subject_session_output_dir) np.savez(os.path.join(subject_session_output_dir, 'design_matrix.npz'), design_matrix=design_matrix) if surface: run_surface_glm(design_matrix, contrasts, fmri_path, subject_session_output_dir) else: z_maps = run_glm(design_matrix, contrasts, fmri_path, mask_img, subject_dic, subject_session_output_dir, tr=tr, smoothing_fwhm=smooth) # do stats report anat_img = nib.load(subject_dic['anat']) stats_report_filename = os.path.join(subject_session_output_dir, 'report_stats.html') generate_subject_stats_report( stats_report_filename, contrasts, z_maps, mask_img, threshold=3., cluster_th=15, anat=anat_img, anat_affine=anat_img.affine, design_matrices=[design_matrix], subject_id=subject_dic['subject_id'], start_time=start_time, title="GLM for subject %s" % session_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) if not surface: ProgressReport().finish_dir(subject_session_output_dir) print("Statistic report written to %s\r\n" % stats_report_filename)
"""do stats report""" stats_report_filename = os.path.join(subject_data.output_dir, "report_stats.html") contrasts = dict( (contrast_id, contrasts[contrast_id]) for contrast_id in z_maps.keys()) generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, design_matrices=[design_matrix], subject_id=subject_data.subject_id, anat=anat, anat_affine=anat_affine, cluster_th=50, # we're only interested in this 'large' clusters start_time=stats_start_time, # additional ``kwargs`` for more informative report paradigm=paradigm.__dict__, TR=TR, n_scans=n_scans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, slicer='z') # shutdown main report page ProgressReport().finish_dir(output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename
print # do stats report stats_report_filename = os.path.join(subject_data.reports_output_dir, "report_stats.html") contrasts = dict( (contrast_id, contrasts[contrast_id]) for contrast_id in z_maps.keys()) generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, design_matrices=[design_matrix], subject_id=subject_data.subject_id, anat=anat_img, display_mode='ortho', threshold=3., cluster_th=50, # 'large' clusters start_time=stats_start_time, paradigm=paradigm, TR=tr, nscans=nscans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) print "\r\nStatistic report written to %s\r\n" % stats_report_filename
def _preprocess_and_analysis_subject(subject_data, slicer='z', cut_coords=6, threshold=3., cluster_th=15, **preproc_params): """ Preprocesses the subject and then fits (mass-univariate) GLM thereup. """ # sanitize run_ids: # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage, # for example run_ids = range(9) if subject_data['subject_id'] == "Sub14": run_ids = [0] + range(2, 9) subject_data['func'] = [subject_data['func'][0] ] + subject_data['func'][2:] subject_data['session_id'] = [subject_data['session_id'][0] ] + subject_data['session_id'][2:] # sanitize subject output dir if not 'output_dir' in subject_data: subject_data['output_dir'] = os.path.join(output_dir, subject_data['subject_id']) # preprocess the data subject_data = do_subject_preproc(subject_data, **preproc_params) # chronometry stats_start_time = pretty_time() # to-be merged lists, one item per run paradigms = [] frametimes_list = [] design_matrices = [] # one list_of_contrast_dicts = [] # one dict per run n_scans = [] for run_id in run_ids: _n_scans = len(subject_data.func[run_id]) n_scans.append(_n_scans) # make paradigm paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id]) # make design matrix tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128. frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs=np.loadtxt( getattr(subject_data, 'realignment_parameters')[run_id]), add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis' ]) # import matplotlib.pyplot as plt # design_matrix.show() # plt.show() paradigms.append(paradigm) design_matrices.append(design_matrix) frametimes_list.append(frametimes) n_scans.append(_n_scans) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # more interesting contrasts""" contrasts['Famous-Unfamiliar'] = contrasts['Famous'] - contrasts[ 'Unfamiliar'] contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar'] contrasts[ 'Famous-Scrambled'] = contrasts['Famous'] - contrasts['Scrambled'] contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled'] contrasts['Unfamiliar-Scrambled'] = contrasts[ 'Unfamiliar'] - contrasts['Scrambled'] contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled'] list_of_contrast_dicts.append(contrasts) # importat maps z_maps = {} effects_maps = {} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FMRILinearModel( [nibabel.concat_images(sess_func) for sess_func in subject_data.func], [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # replicate contrasts across runs contrasts = dict( (cid, [contrasts[cid] for contrasts in list_of_contrast_dicts]) for cid, cval in contrasts.iteritems()) # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True, output_stat=False, output_effects=True, output_variance=False) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join(subject_data.output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report stats_report_filename = os.path.join( getattr(subject_data, 'reports_output_dir', subject_data.output_dir), "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_data.subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_data.subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id]) for run_id in run_ids), frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id]) for run_id in run_ids), # fwhm=fwhm ) ProgressReport().finish_dir(subject_data.output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path
con_id='one_sample %s' % contrast_id, output_z=True) # save map map_dir = os.path.join(output_dir, 'z_maps') if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '2nd_level_%s.nii.gz' % (contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(z_map, map_path) group_level_z_maps[contrast_id] = map_path # do stats report stats_report_filename = os.path.join(output_dir, "report_stats.html") generate_subject_stats_report(stats_report_filename, contrasts, group_level_z_maps, group_mask, threshold=threshold, cluster_th=cluster_th, design_matrices=[design_matrix], subject_id="sub001", start_time=stats_start_time, title='Group GLM for br41nh4ck', slicer=slicer, cut_coords=cut_coords) ProgressReport().finish_dir(output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename
# general linear model glm = FMRILinearModel(input_image, design_mat.matrix, mask='compute') glm.fit() # contrasts c = np.hstack([1, np.zeros(len(design_mat.names)-1)]) z_map, t_map, eff_map, var_map = glm.contrast(c, contrast_type='t', output_z=True, output_stat=True, output_effects=True, output_variance=True) # save maps for result, filename in zip([z_map, t_map, eff_map, var_map], ['z_map', 't_map', 'eff_map', 'var_map']): nib.save(result, os.path.join(BASE_DIR, 'results', 'maps', filename)) # glm reporting contrasts = {'Listen': 1} zmaps = {'Listen': os.path.join(BASE_DIR, 'results', 'maps', 'z_map.nii')} glm_reporter.generate_subject_stats_report( os.path.join(BASE_DIR, 'results', 'reports', 'report.html'), contrasts, zmaps, glm.mask)
print # do stats report stats_report_filename = os.path.join(subject_data.reports_output_dir, "report_stats.html") contrasts = dict((contrast_id, contrasts[contrast_id]) for contrast_id in z_maps.keys()) generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, design_matrices=[design_matrix], subject_id=subject_data.subject_id, anat=anat_img, display_mode='ortho', threshold=3., cluster_th=50, # 'large' clusters start_time=stats_start_time, paradigm=paradigm.__dict__, TR=tr, nscans=nscans, hfcut=hfcut, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model, ) print "\r\nStatistic report written to %s\r\n" % stats_report_filename
def do_glm_for_subject(subject_id, bold_base_folder, trial_base_folder, output_base_folder): subject_dir = path(bold_base_folder) / ("sub%03d" % subject_id) output_dir = (path(output_base_folder) / ("sub%03d" % subject_id) / "model001") print output_dir if not output_dir.exists(): output_dir.makedirs() anat_file = subject_dir / "highres001.nii" anat = nb.load(anat_file) run_ids = range(1, 10) task_bold_files = [subject_dir.glob("task001_run%03d/rbold*.nii" % rid)[0] for rid in run_ids] task_mvt_files = [subject_dir.glob("task001_run%03d/rp_bold*.txt" % rid)[0] for rid in run_ids] trial_files = [(path(trial_base_folder) / ("Sub%02d" % subject_id) / "BOLD" / "Trials" / ("run_%02d_spmdef.txt" % rid)) for rid in range(1, 10)] stats_start_time = pretty_time() paradigms = [] design_matrices = [] n_scans = [] all_frametimes = [] list_of_contrast_dicts = [] # one dict per run for bold_file, mvt_file, trial_file in zip(task_bold_files, task_mvt_files, trial_files): _n_scans = nb.load(bold_file).shape[-1] n_scans.append(_n_scans) paradigm = make_paradigm(trial_file) paradigms.append(paradigm) movements = np.loadtxt(mvt_file) tr = 2. drift_model = "Cosine" hrf_model = "Canonical With Derivative" hfcut = 128. frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs=movements, add_reg_names=[ "Tx", "Ty", "Tz", "R1", "R2", "R3"]) design_matrices.append(design_matrix) all_frametimes.append(frametimes) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye( n_columns)[2 * i] # more interesting contrasts""" contrasts['Famous-Unfamiliar'] = contrasts[ 'Famous'] - contrasts['Unfamiliar'] contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar'] contrasts['Famous-Scrambled'] = contrasts[ 'Famous'] - contrasts['Scrambled'] contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled'] contrasts['Unfamiliar-Scrambled'] = contrasts[ 'Unfamiliar'] - contrasts['Scrambled'] contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled'] list_of_contrast_dicts.append(contrasts) # importat maps z_maps = {} effects_maps = {} fmri_glm = FMRILinearModel(task_bold_files, [dm.matrix for dm in design_matrices], mask="compute") fmri_glm.fit(do_scaling=True, model="ar1") # replicate contrasts across runs contrasts = dict((cid, [contrasts[cid] for contrasts in list_of_contrast_dicts]) for cid, cval in contrasts.iteritems()) # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map, var_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_stat=False, output_effects=True, output_variance=True ) for map_type, out_map in zip(['z', 'effects', 'variance'], [z_map, eff_map, var_map]): map_dir = output_dir / ('%s_maps' % map_type) if not map_dir.exists(): map_dir.makedirs() map_path = map_dir / ('%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nb.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path if map_type == "variance": effects_maps[contrast_id] = map_path stats_report_dir = output_dir / "report" if not stats_report_dir.exists(): stats_report_dir.makedirs() stats_report_filename = stats_report_dir / "report_stats.html" # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) slicer = 'z' cut_coords = [-20, -10, 0, 10, 20, 30, 40, 50] threshold = 3. cluster_th = 15 generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, anat_affine=anat.get_affine(), anat=anat.get_data(), threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id="sub%03d" % subject_id, start_time=stats_start_time, title="GLM for subject %s" % ("sub%03d" % subject_id), # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm=dict(("Run_%02i" % (run_id), paradigms[run_id - 1]) for run_id in run_ids), frametimes=dict(("Run_%02i" % (run_id), all_frametimes[run_id - 1]) for run_id in run_ids), # fwhm=fwhm ) return fmri_glm
def run_suject_level1_glm( subject_data, readout_time=.01392, # seconds tr=.72, dc=True, hrf_model="Canonical with Derivative", drift_model="Cosine", hfcut=100, regress_motion=True, slicer='ortho', cut_coords=None, threshold=3., cluster_th=15, normalize=True, fwhm=0., protocol="MOTOR", func_write_voxel_sizes=None, anat_write_voxel_sizes=None, **other_preproc_kwargs): """ Function to do preproc + analysis for a single HCP subject (task fMRI) """ add_regs_files = None n_motion_regressions = 6 subject_data.n_sessions = 2 subject_data.tmp_output_dir = os.path.join(subject_data.output_dir, "tmp") if not os.path.exists(subject_data.tmp_output_dir): os.makedirs(subject_data.tmp_output_dir) if not os.path.exists(subject_data.output_dir): os.makedirs(subject_data.output_dir) mem = Memory(os.path.join(subject_data.output_dir, "cache_dir"), verbose=100) # glob design files (.fsf) subject_data.design_files = [ os.path.join(subject_data.data_dir, ("MNINonLinear/Results/tfMRI_%s_%s/" "tfMRI_%s_%s_hp200_s4_level1.fsf") % (protocol, direction, protocol, direction)) for direction in ['LR', 'RL'] ] assert len(subject_data.design_files) == 2 for df in subject_data.design_files: assert os.path.isfile(df), df if 0x0: subject_data = _do_fmri_distortion_correction( subject_data, dc=dc, fwhm=fwhm, readout_time=readout_time, **other_preproc_kwargs) # chronometry stats_start_time = pretty_time() # merged lists paradigms = [] frametimes_list = [] design_matrices = [] # fmri_files = [] n_scans = [] # for direction, direction_index in zip(['LR', 'RL'], xrange(2)): for sess in xrange(subject_data.n_sessions): direction = ['LR', 'RL'][sess] # glob the design file # design_file = os.path.join(# _subject_data_dir, "tfMRI_%s_%s" % ( # protocol, direction), design_file = subject_data.design_files[sess] # "tfMRI_%s_%s_hp200_s4_level1.fsf" % ( # protocol, direction)) if not os.path.isfile(design_file): print "Can't find design file %s; skipping subject %s" % ( design_file, subject_data.subject_id) return # read the experimental setup print "Reading experimental setup from %s ..." % design_file fsl_condition_ids, timing_files, fsl_contrast_ids, contrast_values = \ read_fsl_design_file(design_file) print "... done.\r\n" # fix timing filenames timing_files = [ tf.replace("EVs", "tfMRI_%s_%s/EVs" % (protocol, direction)) for tf in timing_files ] # make design matrix print "Constructing design matrix for direction %s ..." % direction _n_scans = nibabel.load(subject_data.func[sess]).shape[-1] n_scans.append(_n_scans) add_regs_file = add_regs_files[ sess] if not add_regs_files is None else None design_matrix, paradigm, frametimes = make_dmtx_from_timing_files( timing_files, fsl_condition_ids, n_scans=_n_scans, tr=tr, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs_file=add_regs_file, add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis', 'Differential Translation along x axis', 'Differential Translation along yaxis', 'Differential Translation along z axis', 'Differential Rotation along x axis', 'Differential Rotation along y axis', 'Differential Rotation along z axis' ][:n_motion_regressions] if not add_regs_files is None else None, ) print "... done." paradigms.append(paradigm) frametimes_list.append(frametimes) design_matrices.append(design_matrix) # convert contrasts to dict contrasts = dict(( contrast_id, # append zeros to end of contrast to match design np.hstack(( contrast_value, np.zeros(len(design_matrix.names) - len(contrast_value))))) for contrast_id, contrast_value in zip( fsl_contrast_ids, contrast_values)) # more interesting contrasts if protocol == 'MOTOR': contrasts['RH-LH'] = contrasts['RH'] - contrasts['LH'] contrasts['LH-RH'] = -contrasts['RH-LH'] contrasts['RF-LF'] = contrasts['RF'] - contrasts['LF'] contrasts['LF-RF'] = -contrasts['RF-LF'] contrasts['H'] = contrasts['RH'] + contrasts['LH'] contrasts['F'] = contrasts['RF'] + contrasts['LF'] contrasts['H-F'] = contrasts['RH'] + contrasts['LH'] - ( contrasts['RF'] - contrasts['LF']) contrasts['F-H'] = -contrasts['H-F'] contrasts = dict((k, v) for k, v in contrasts.iteritems() if "-" in k) # replicate contrasts across sessions contrasts = dict((cid, [cval] * 2) for cid, cval in contrasts.iteritems()) cache_dir = cache_dir = os.path.join(subject_data.output_dir, 'cache_dir') if not os.path.exists(cache_dir): os.makedirs(cache_dir) nipype_mem = NipypeMemory(base_dir=cache_dir) if 0x0: if np.sum(fwhm) > 0.: subject_data.func = nipype_mem.cache(spm.Smooth)( in_files=subject_data.func, fwhm=fwhm, ignore_exception=False, ).outputs.smoothed_files # fit GLM def tortoise(*args): print args print( 'Fitting a "Fixed Effect" GLM for merging LR and RL ' 'phase-encoding directions for subject %s ...' % (subject_data.subject_id)) fmri_glm = FMRILinearModel( subject_data.func, [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" z_maps = {} effects_maps = {} map_dirs = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True, output_effects=True) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join(subject_data.output_dir, '%s_maps' % map_type) map_dirs[map_type] = map_dir if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s_%s.nii' % (map_type, contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path return effects_maps, z_maps, mask_path, map_dirs # compute native-space maps and mask effects_maps, z_maps, mask_path, map_dirs = mem.cache(tortoise)( subject_data.func, subject_data.anat) # do stats report if 0x0: anat_img = nibabel.load(subject_data.anat) stats_report_filename = os.path.join(subject_data.output_dir, "reports", "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, nibabel.load(mask_path), anat=anat_img.get_data(), anat_affine=anat_img.get_affine(), threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_data.subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_data.subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm={ 'LR': paradigms[0].__dict__, 'RL': paradigms[1].__dict__ }, frametimes={ 'LR': frametimes_list[0], 'RL': frametimes_list[1] }, fwhm=fwhm) ProgressReport().finish_dir(subject_data.output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) import json json.dump( dict((k, list(v)) for k, v in contrasts.iteritems()), open(os.path.join(subject_data.tmp_output_dir, "contrasts.json"), "w")) subject_data.contrasts = contrasts if normalize: assert hasattr(subject_data, "parameter_file") subject_data.native_effects_maps = effects_maps subject_data.native_z_maps = z_maps subject_data.native_mask_path = mask_path # warp effects maps and mask from native to standard space (MNI) apply_to_files = [ v for _, v in subject_data.native_effects_maps.iteritems() ] + [subject_data.native_mask_path] tmp = nipype_mem.cache(spm.Normalize)( parameter_file=getattr(subject_data, "parameter_file"), apply_to_files=apply_to_files, write_bounding_box=[[-78, -112, -50], [78, 76, 85]], write_voxel_sizes=func_write_voxel_sizes, write_wrap=[0, 0, 0], write_interp=1, jobtype='write', ignore_exception=False, ).outputs.normalized_files subject_data.mask = hard_link(tmp[-1], subject_data.output_dir) subject_data.effects_maps = dict( zip(effects_maps.keys(), hard_link(tmp[:-1], map_dirs["effects"]))) # warp anat image subject_data.anat = hard_link( nipype_mem.cache(spm.Normalize)( parameter_file=getattr(subject_data, "parameter_file"), apply_to_files=subject_data.anat, write_bounding_box=[[-78, -112, -50], [78, 76, 85]], write_voxel_sizes=anat_write_voxel_sizes, write_wrap=[0, 0, 0], write_interp=1, jobtype='write', ignore_exception=False, ).outputs.normalized_files, subject_data.anat_output_dir) else: subject_data.mask = mask_path subject_data.effects_maps = effects_maps subject_data.z_maps = z_maps return subject_data
output_stat=True, output_effects=True, output_variance=True) for map_type, out_map in zip(['z', 't', 'effects', 'variance'], [z_map, t_map, effects_map, var_map]): map_dir = os.path.join( subject_data.output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print("\t\tWriting %s ..." % map_path) nibabel.save(out_map, map_path) if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # generate stats report anat_img = nibabel.load(subject_data.anat) stats_report_filename = os.path.join(subject_data.output_dir, "reports", "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.masker_.mask_img_, anat=anat_img, threshold=2.3, cluster_th=15, design_matrices=design_matrices, TR=tr, subject_id="sub001", start_time=stats_start_time, n_scans=n_scans, title="GLM for subject %s" % subject_data.subject_id, hfcut=hfcut, paradigm=paradigm, frametimes=frametimes, drift_model=drift_model, hrf_model=hrf_model) ProgressReport().finish_dir(subject_data.output_dir) print("Statistic report written to %s\r\n" % stats_report_filename)
def run_suject_level1_glm(subject_data, readout_time=.01392, # seconds tr=.72, dc=True, hrf_model="Canonical with Derivative", drift_model="Cosine", hfcut=100, regress_motion=True, slicer='ortho', cut_coords=None, threshold=3., cluster_th=15, normalize=True, fwhm=0., protocol="MOTOR", func_write_voxel_sizes=None, anat_write_voxel_sizes=None, **other_preproc_kwargs ): """ Function to do preproc + analysis for a single HCP subject (task fMRI) """ add_regs_files = None n_motion_regressions = 6 subject_data.n_sessions = 2 subject_data.tmp_output_dir = os.path.join(subject_data.output_dir, "tmp") if not os.path.exists(subject_data.tmp_output_dir): os.makedirs(subject_data.tmp_output_dir) if not os.path.exists(subject_data.output_dir): os.makedirs(subject_data.output_dir) mem = Memory(os.path.join(subject_data.output_dir, "cache_dir"), verbose=100) # glob design files (.fsf) subject_data.design_files = [os.path.join( subject_data.data_dir, ("MNINonLinear/Results/tfMRI_%s_%s/" "tfMRI_%s_%s_hp200_s4_level1.fsf") % ( protocol, direction, protocol, direction)) for direction in ['LR', 'RL']] assert len(subject_data.design_files) == 2 for df in subject_data.design_files: assert os.path.isfile(df), df if 0x0: subject_data = _do_fmri_distortion_correction( subject_data, dc=dc, fwhm=fwhm, readout_time=readout_time, **other_preproc_kwargs) # chronometry stats_start_time = pretty_time() # merged lists paradigms = [] frametimes_list = [] design_matrices = [] # fmri_files = [] n_scans = [] # for direction, direction_index in zip(['LR', 'RL'], xrange(2)): for sess in xrange(subject_data.n_sessions): direction = ['LR', 'RL'][sess] # glob the design file # design_file = os.path.join(# _subject_data_dir, "tfMRI_%s_%s" % ( # protocol, direction), design_file = subject_data.design_files[sess] # "tfMRI_%s_%s_hp200_s4_level1.fsf" % ( # protocol, direction)) if not os.path.isfile(design_file): print "Can't find design file %s; skipping subject %s" % ( design_file, subject_data.subject_id) return # read the experimental setup print "Reading experimental setup from %s ..." % design_file fsl_condition_ids, timing_files, fsl_contrast_ids, contrast_values = \ read_fsl_design_file(design_file) print "... done.\r\n" # fix timing filenames timing_files = [tf.replace("EVs", "tfMRI_%s_%s/EVs" % ( protocol, direction)) for tf in timing_files] # make design matrix print "Constructing design matrix for direction %s ..." % direction _n_scans = nibabel.load(subject_data.func[sess]).shape[-1] n_scans.append(_n_scans) add_regs_file = add_regs_files[ sess] if not add_regs_files is None else None design_matrix, paradigm, frametimes = make_dmtx_from_timing_files( timing_files, fsl_condition_ids, n_scans=_n_scans, tr=tr, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs_file=add_regs_file, add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis', 'Differential Translation along x axis', 'Differential Translation along yaxis', 'Differential Translation along z axis', 'Differential Rotation along x axis', 'Differential Rotation along y axis', 'Differential Rotation along z axis' ][:n_motion_regressions] if not add_regs_files is None else None, ) print "... done." paradigms.append(paradigm) frametimes_list.append(frametimes) design_matrices.append(design_matrix) # convert contrasts to dict contrasts = dict((contrast_id, # append zeros to end of contrast to match design np.hstack((contrast_value, np.zeros(len( design_matrix.names) - len(contrast_value))))) for contrast_id, contrast_value in zip( fsl_contrast_ids, contrast_values)) # more interesting contrasts if protocol == 'MOTOR': contrasts['RH-LH'] = contrasts['RH'] - contrasts['LH'] contrasts['LH-RH'] = -contrasts['RH-LH'] contrasts['RF-LF'] = contrasts['RF'] - contrasts['LF'] contrasts['LF-RF'] = -contrasts['RF-LF'] contrasts['H'] = contrasts['RH'] + contrasts['LH'] contrasts['F'] = contrasts['RF'] + contrasts['LF'] contrasts['H-F'] = contrasts['RH'] + contrasts['LH'] - ( contrasts['RF'] - contrasts['LF']) contrasts['F-H'] = -contrasts['H-F'] contrasts = dict((k, v) for k, v in contrasts.iteritems() if "-" in k) # replicate contrasts across sessions contrasts = dict((cid, [cval] * 2) for cid, cval in contrasts.iteritems()) cache_dir = cache_dir = os.path.join(subject_data.output_dir, 'cache_dir') if not os.path.exists(cache_dir): os.makedirs(cache_dir) nipype_mem = NipypeMemory(base_dir=cache_dir) if 0x0: if np.sum(fwhm) > 0.: subject_data.func = nipype_mem.cache(spm.Smooth)( in_files=subject_data.func, fwhm=fwhm, ignore_exception=False, ).outputs.smoothed_files # fit GLM def tortoise(*args): print args print ( 'Fitting a "Fixed Effect" GLM for merging LR and RL ' 'phase-encoding directions for subject %s ...' % ( subject_data.subject_id)) fmri_glm = FMRILinearModel(subject_data.func, [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" z_maps = {} effects_maps = {} map_dirs = {} for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_effects=True ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_data.output_dir, '%s_maps' % map_type) map_dirs[map_type] = map_dir if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s_%s.nii' % (map_type, contrast_id)) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path return effects_maps, z_maps, mask_path, map_dirs # compute native-space maps and mask effects_maps, z_maps, mask_path, map_dirs = mem.cache(tortoise)( subject_data.func, subject_data.anat) # do stats report if 0x0: anat_img = nibabel.load(subject_data.anat) stats_report_filename = os.path.join(subject_data.output_dir, "reports", "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, nibabel.load(mask_path), anat=anat_img.get_data(), anat_affine=anat_img.get_affine(), threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_data.subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_data.subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm={'LR': paradigms[0].__dict__, 'RL': paradigms[1].__dict__}, frametimes={'LR': frametimes_list[0], 'RL': frametimes_list[1]}, fwhm=fwhm ) ProgressReport().finish_dir(subject_data.output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) import json json.dump(dict((k, list(v)) for k, v in contrasts.iteritems()), open(os.path.join(subject_data.tmp_output_dir, "contrasts.json"), "w")) subject_data.contrasts = contrasts if normalize: assert hasattr(subject_data, "parameter_file") subject_data.native_effects_maps = effects_maps subject_data.native_z_maps = z_maps subject_data.native_mask_path = mask_path # warp effects maps and mask from native to standard space (MNI) apply_to_files = [ v for _, v in subject_data.native_effects_maps.iteritems() ] + [subject_data.native_mask_path] tmp = nipype_mem.cache(spm.Normalize)( parameter_file=getattr(subject_data, "parameter_file"), apply_to_files=apply_to_files, write_bounding_box=[[-78, -112, -50], [78, 76, 85]], write_voxel_sizes=func_write_voxel_sizes, write_wrap=[0, 0, 0], write_interp=1, jobtype='write', ignore_exception=False, ).outputs.normalized_files subject_data.mask = hard_link(tmp[-1], subject_data.output_dir) subject_data.effects_maps = dict(zip(effects_maps.keys(), hard_link( tmp[:-1], map_dirs["effects"]))) # warp anat image subject_data.anat = hard_link(nipype_mem.cache(spm.Normalize)( parameter_file=getattr(subject_data, "parameter_file"), apply_to_files=subject_data.anat, write_bounding_box=[[-78, -112, -50], [78, 76, 85]], write_voxel_sizes=anat_write_voxel_sizes, write_wrap=[0, 0, 0], write_interp=1, jobtype='write', ignore_exception=False, ).outputs.normalized_files, subject_data.anat_output_dir) else: subject_data.mask = mask_path subject_data.effects_maps = effects_maps subject_data.z_maps = z_maps return subject_data