else: dataset_dir = os.path.join(this_dir, "spm_multimodal_faces") # fetch spm multimodal_faces data subject_data = fetch_spm_multimodal_fmri() dataset_dir = os.path.dirname(os.path.dirname(os.path.dirname( subject_data.anat))) # preprocess the data subject_id = "sub001" subject_data = SubjectData( output_dir=os.path.join(dataset_dir, "pypreprocess_output", subject_id), subject_id=subject_id, func=[subject_data.func1, subject_data.func2], anat=subject_data.anat, trials_ses1=subject_data.trials_ses1, trials_ses2=subject_data.trials_ses2, session_ids=["Session1", "Session2"]) subject_data = do_subject_preproc(subject_data, realign=True, coregister=True, segment=True, normalize=True) # experimental paradigm meta-params stats_start_time = time.ctime() tr = 2. drift_model = 'Cosine' hrf_model = 'spm + derivative' hfcut = 128. # make design matrices first_level_effects_maps = [] mask_images = [] design_matrices = [] for x in xrange(2): if not os.path.exists(subject_data.output_dir): os.makedirs(subject_data.output_dir)
if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) # fetch the data sd = fetch_spm_multimodal_fmri_data(DATA_DIR) subject_id = "sub001" subject_data = SubjectData(subject_id=subject_id, session_id=["Session1", "Session2"], func=[sd.func1, sd.func2], anat=sd.anat, trials_ses1=sd.trials_ses1, trials_ses2=sd.trials_ses2, output_dir=os.path.join(OUTPUT_DIR, subject_id) ) # preprocess the data subject_data = do_subject_preproc(subject_data, do_normalize=False, fwhm=[8.]) # collect preprocessed data anat_img = nibabel.load(subject_data.anat) # experimental paradigm meta-params stats_start_time = time.ctime() tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128. # make design matrices first_level_effects_maps = [] mask_images = [] for x in xrange(2):
def _do_fmri_distortion_correction( subject_data, # i'm unsure of the readout time, # but this is constant across both PE # directions and so can be scaled to 1 # (or any other nonzero float) protocol="MOTOR", readout_time=.01392, realign=True, coregister=True, coreg_func_to_anat=True, dc=True, segment=False, normalize=False, func_write_voxel_sizes=None, anat_write_voxel_sizes=None, report=False, **kwargs): """ Function to undistort task fMRI data for a given HCP subject. """ directions = ['LR', 'RL'] subject_data.sanitize() if dc: acq_params = [[1, 0, 0, readout_time], [-1, 0, 0, readout_time]] acq_params_file = os.path.join(subject_data.output_dir, "b0_acquisition_params.txt") np.savetxt(acq_params_file, acq_params, fmt='%f') fieldmap_files = [ os.path.join( os.path.dirname(subject_data.func[sess]), "%s_3T_SpinEchoFieldMap_%s.nii.gz" % (subject_data.subject_id, directions[sess])) for sess in xrange(subject_data.n_sessions) ] sbref_files = [ sess_func.replace(".nii", "_SBRef.nii") for sess_func in subject_data.func ] # prepare for smart caching mem = Memory(os.path.join(subject_data.output_dir, "cache_dir")) for x in [fieldmap_files, sbref_files, subject_data.func]: assert len(x) == 2 for y in x: assert os.path.isfile(y), y # fslroi zeroth_fieldmap_files = [] for fieldmap_file in fieldmap_files: if not os.path.isfile(fieldmap_file): print "Can't find fieldmap file %s; skipping subject %s" % ( fieldmap_file, subject_data.subject_id) return # peel 0th volume of each fieldmap zeroth_fieldmap_file = os.path.join( subject_data.output_dir, "0th_%s" % os.path.basename(fieldmap_file)) fslroi_cmd = "fsl5.0-fslroi %s %s 0 1" % (fieldmap_file, zeroth_fieldmap_file) print "\r\nExecuting '%s' ..." % fslroi_cmd print mem.cache(commands.getoutput)(fslroi_cmd) zeroth_fieldmap_files.append(zeroth_fieldmap_file) # merge the 0th volume of both fieldmaps merged_zeroth_fieldmap_file = os.path.join( subject_data.output_dir, "merged_with_other_direction_%s" % (os.path.basename(zeroth_fieldmap_files[0]))) fslmerge_cmd = "fsl5.0-fslmerge -t %s %s %s" % ( merged_zeroth_fieldmap_file, zeroth_fieldmap_files[0], zeroth_fieldmap_files[1]) print "\r\nExecuting '%s' ..." % fslmerge_cmd print mem.cache(commands.getoutput)(fslmerge_cmd) # do topup (learn distortion model) topup_results_basename = os.path.join(subject_data.output_dir, "topup_results") topup_cmd = ("fsl5.0-topup --imain=%s --datain=%s --config=b02b0.cnf " "--out=%s" % (merged_zeroth_fieldmap_file, acq_params_file, topup_results_basename)) print "\r\nExecuting '%s' ..." % topup_cmd print mem.cache(commands.getoutput)(topup_cmd) # apply learn deformations to absorb distortion dc_fmri_files = [] for sess in xrange(2): # merge SBRef + task BOLD for current PE direction assert len(subject_data.func) == 2, subject_data fourD_plus_sbref = os.path.join( subject_data.output_dir, "sbref_plus_" + os.path.basename(subject_data.func[sess])) fslmerge_cmd = "fsl5.0-fslmerge -t %s %s %s" % ( fourD_plus_sbref, sbref_files[sess], subject_data.func[sess]) print "\r\nExecuting '%s' ..." % fslmerge_cmd print mem.cache(commands.getoutput)(fslmerge_cmd) # realign task BOLD to SBRef sess_output_dir = subject_data.session_output_dirs[sess] rfourD_plus_sbref = _do_subject_realign(SubjectData( func=[fourD_plus_sbref], output_dir=subject_data.output_dir, n_sessions=1, session_output_dirs=[sess_output_dir]), report=False).func[0] # apply topup to realigned images dc_rfourD_plus_sbref = os.path.join( subject_data.output_dir, "dc" + os.path.basename(rfourD_plus_sbref)) applytopup_cmd = ( "fsl5.0-applytopup --imain=%s --verbose --inindex=%i " "--topup=%s --out=%s --datain=%s --method=jac" % (rfourD_plus_sbref, sess + 1, topup_results_basename, dc_rfourD_plus_sbref, acq_params_file)) print "\r\nExecuting '%s' ..." % applytopup_cmd print mem.cache(commands.getoutput)(applytopup_cmd) # recover undistorted task BOLD dc_rfmri_file = dc_rfourD_plus_sbref.replace("sbref_plus_", "") fslroi_cmd = "fsl5.0-fslroi %s %s 1 -1" % (dc_rfourD_plus_sbref, dc_rfmri_file) print "\r\nExecuting '%s' ..." % fslroi_cmd print mem.cache(commands.getoutput)(fslroi_cmd) # sanity tricks if dc_rfmri_file.endswith(".nii"): dc_rfmri_file = dc_rfmri_file + ".gz" dc_fmri_files.append(dc_rfmri_file) subject_data.func = dc_fmri_files if isinstance(subject_data.func, basestring): subject_data.func = [subject_data.func] # continue preprocessing subject_data = do_subject_preproc( subject_data, realign=realign, coregister=coregister, coreg_anat_to_func=not coreg_func_to_anat, segment=True, normalize=False, report=report) # ok for GLM now return subject_data
def _do_fmri_distortion_correction(subject_data, # i'm unsure of the readout time, # but this is constant across both PE # directions and so can be scaled to 1 # (or any other nonzero float) protocol="MOTOR", readout_time=.01392, realign=True, coregister=True, coreg_func_to_anat=True, dc=True, segment=False, normalize=False, func_write_voxel_sizes=None, anat_write_voxel_sizes=None, report=False, **kwargs ): """ Function to undistort task fMRI data for a given HCP subject. """ directions = ['LR', 'RL'] subject_data.sanitize() if dc: acq_params = [[1, 0, 0, readout_time], [-1, 0, 0, readout_time]] acq_params_file = os.path.join(subject_data.output_dir, "b0_acquisition_params.txt") np.savetxt(acq_params_file, acq_params, fmt='%f') fieldmap_files = [os.path.join(os.path.dirname( subject_data.func[sess]), "%s_3T_SpinEchoFieldMap_%s.nii.gz" % ( subject_data.subject_id, directions[sess])) for sess in xrange(subject_data.n_sessions)] sbref_files = [sess_func.replace(".nii", "_SBRef.nii") for sess_func in subject_data.func] # prepare for smart caching mem = Memory(os.path.join(subject_data.output_dir, "cache_dir")) for x in [fieldmap_files, sbref_files, subject_data.func]: assert len(x) == 2 for y in x: assert os.path.isfile(y), y # fslroi zeroth_fieldmap_files = [] for fieldmap_file in fieldmap_files: if not os.path.isfile(fieldmap_file): print "Can't find fieldmap file %s; skipping subject %s" % ( fieldmap_file, subject_data.subject_id) return # peel 0th volume of each fieldmap zeroth_fieldmap_file = os.path.join( subject_data.output_dir, "0th_%s" % os.path.basename( fieldmap_file)) fslroi_cmd = "fsl5.0-fslroi %s %s 0 1" % ( fieldmap_file, zeroth_fieldmap_file) print "\r\nExecuting '%s' ..." % fslroi_cmd print mem.cache(commands.getoutput)(fslroi_cmd) zeroth_fieldmap_files.append(zeroth_fieldmap_file) # merge the 0th volume of both fieldmaps merged_zeroth_fieldmap_file = os.path.join( subject_data.output_dir, "merged_with_other_direction_%s" % ( os.path.basename(zeroth_fieldmap_files[0]))) fslmerge_cmd = "fsl5.0-fslmerge -t %s %s %s" % ( merged_zeroth_fieldmap_file, zeroth_fieldmap_files[0], zeroth_fieldmap_files[1]) print "\r\nExecuting '%s' ..." % fslmerge_cmd print mem.cache(commands.getoutput)(fslmerge_cmd) # do topup (learn distortion model) topup_results_basename = os.path.join(subject_data.output_dir, "topup_results") topup_cmd = ( "fsl5.0-topup --imain=%s --datain=%s --config=b02b0.cnf " "--out=%s" % (merged_zeroth_fieldmap_file, acq_params_file, topup_results_basename)) print "\r\nExecuting '%s' ..." % topup_cmd print mem.cache(commands.getoutput)(topup_cmd) # apply learn deformations to absorb distortion dc_fmri_files = [] for sess in xrange(2): # merge SBRef + task BOLD for current PE direction assert len(subject_data.func) == 2, subject_data fourD_plus_sbref = os.path.join( subject_data.output_dir, "sbref_plus_" + os.path.basename( subject_data.func[sess])) fslmerge_cmd = "fsl5.0-fslmerge -t %s %s %s" % ( fourD_plus_sbref, sbref_files[sess], subject_data.func[sess]) print "\r\nExecuting '%s' ..." % fslmerge_cmd print mem.cache(commands.getoutput)(fslmerge_cmd) # realign task BOLD to SBRef sess_output_dir = subject_data.session_output_dirs[sess] rfourD_plus_sbref = _do_subject_realign(SubjectData( func=[fourD_plus_sbref], output_dir=subject_data.output_dir, n_sessions=1, session_output_dirs=[sess_output_dir]), report=False).func[0] # apply topup to realigned images dc_rfourD_plus_sbref = os.path.join( subject_data.output_dir, "dc" + os.path.basename( rfourD_plus_sbref)) applytopup_cmd = ( "fsl5.0-applytopup --imain=%s --verbose --inindex=%i " "--topup=%s --out=%s --datain=%s --method=jac" % ( rfourD_plus_sbref, sess + 1, topup_results_basename, dc_rfourD_plus_sbref, acq_params_file)) print "\r\nExecuting '%s' ..." % applytopup_cmd print mem.cache(commands.getoutput)(applytopup_cmd) # recover undistorted task BOLD dc_rfmri_file = dc_rfourD_plus_sbref.replace("sbref_plus_", "") fslroi_cmd = "fsl5.0-fslroi %s %s 1 -1" % ( dc_rfourD_plus_sbref, dc_rfmri_file) print "\r\nExecuting '%s' ..." % fslroi_cmd print mem.cache(commands.getoutput)(fslroi_cmd) # sanity tricks if dc_rfmri_file.endswith(".nii"): dc_rfmri_file = dc_rfmri_file + ".gz" dc_fmri_files.append(dc_rfmri_file) subject_data.func = dc_fmri_files if isinstance(subject_data.func, basestring): subject_data.func = [subject_data.func] # continue preprocessing subject_data = do_subject_preproc( subject_data, realign=realign, coregister=coregister, coreg_anat_to_func=not coreg_func_to_anat, segment=True, normalize=False, report=report) # ok for GLM now return subject_data
def do_preproc(funcfile, anatfile, subject, decimate=False): """ Function that performs the preprocessing. Parameters ---------- funcfile: str the functional volume. anatfile: str the anatomical volume. subject: str the subject identifier. decimate: bool, default False if set reduce the input functional volume size (loose information). Returns ------- subject_data: object a structure that contains the output results. """ # Grab the data splitpath = anatfile.split(os.sep) outdir = os.path.join(BIDS_DATA_DIR, "derivatives", "spmpreproc_{0}".format(splitpath[-3]), subject) if not os.path.isdir(outdir): os.makedirs(outdir) local_funcfile = os.path.join(outdir, os.path.basename(funcfile)) if not os.path.isfile(local_funcfile): shutil.copy2(funcfile, local_funcfile) if decimate: im = nibabel.load(local_funcfile) dec_im = nibabel.Nifti1Image(im.get_data()[..., :3], im.affine) nibabel.save(dec_im, local_funcfile) local_anatfile = os.path.join(outdir, os.path.basename(anatfile)) if not os.path.isfile(local_anatfile): shutil.copy2(anatfile, local_anatfile) #cwd = os.getcwd() os.chdir(outdir) subject_data = SubjectData(subject_id=subject, func=local_funcfile, anat=local_anatfile, output_dir=outdir, caching=True) # Start processing print('Starting pre-processing...') subject_data = do_subject_preproc(subject_data, deleteorient=False, slice_timing=False, ref_slice=0, TR=2.5, TA=None, realign=True, realign_reslice=True, register_to_mean=True, realign_software="spm", coregister=True, coregister_reslice=True, coreg_anat_to_func=False, coregister_software="spm", segment=False, normalize=True, fwhm=0, anat_fwhm=0, func_write_voxel_sizes=[3, 3, 3], anat_write_voxel_sizes=[1, 1, 1], hardlink_output=True, report=False, tsdiffana=True, parent_results_gallery=None, last_stage=True, preproc_undergone=None, prepreproc_undergone="", caching=True) return subject_data
def _preprocess_and_analysis_subject(subject_data, slicer='z', cut_coords=6, threshold=3., cluster_th=15, **preproc_params): """ Preprocesses the subject and then fits (mass-univariate) GLM thereup. """ # sanitize run_ids: # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage, # for example run_ids = range(9) if subject_data['subject_id'] == "Sub14": run_ids = [0] + range(2, 9) subject_data['func'] = [subject_data['func'][0]] + subject_data[ 'func'][2:] subject_data['session_id'] = [subject_data['session_id'][0] ] + subject_data['session_id'][2:] # sanitize subject output dir if not 'output_dir' in subject_data: subject_data['output_dir'] = os.path.join( output_dir, subject_data['subject_id']) # preprocess the data subject_data = do_subject_preproc(subject_data, **preproc_params ) # chronometry stats_start_time = pretty_time() # to-be merged lists, one item per run paradigms = [] frametimes_list = [] design_matrices = [] # one list_of_contrast_dicts = [] # one dict per run n_scans = [] for run_id in run_ids: _n_scans = len(subject_data.func[run_id]) n_scans.append(_n_scans) # make paradigm paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id]) # make design matrix tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128. frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs=np.loadtxt(getattr(subject_data, 'realignment_parameters')[run_id]), add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis' ] ) # import matplotlib.pyplot as plt # design_matrix.show() # plt.show() paradigms.append(paradigm) design_matrices.append(design_matrix) frametimes_list.append(frametimes) n_scans.append(_n_scans) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye( n_columns)[2 * i] # more interesting contrasts""" contrasts['Famous-Unfamiliar'] = contrasts[ 'Famous'] - contrasts['Unfamiliar'] contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar'] contrasts['Famous-Scrambled'] = contrasts[ 'Famous'] - contrasts['Scrambled'] contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled'] contrasts['Unfamiliar-Scrambled'] = contrasts[ 'Unfamiliar'] - contrasts['Scrambled'] contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled'] list_of_contrast_dicts.append(contrasts) # importat maps z_maps = {} effects_maps = {} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func) for sess_func in subject_data.func], [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # replicate contrasts across runs contrasts = dict((cid, [contrasts[cid] for contrasts in list_of_contrast_dicts]) for cid, cval in contrasts.iteritems()) # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_stat=False, output_effects=True, output_variance=False ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_data.output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report stats_report_filename = os.path.join(getattr(subject_data, 'reports_output_dir', subject_data.output_dir), "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_data.subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_data.subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id]) for run_id in run_ids), frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id]) for run_id in run_ids), # fwhm=fwhm ) ProgressReport().finish_dir(subject_data.output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path
os.path.dirname(os.path.dirname(subject_data.anat))) # preprocess the data subject_id = "sub001" subject_data = SubjectData(output_dir=os.path.join(dataset_dir, "pypreprocess_output", subject_id), subject_id=subject_id, func=[subject_data.func1, subject_data.func2], anat=subject_data.anat, trials_ses1=subject_data.trials_ses1, trials_ses2=subject_data.trials_ses2, session_ids=["Session1", "Session2"]) subject_data = do_subject_preproc(subject_data, realign=True, coregister=True, segment=True, normalize=True) # experimental paradigm meta-params stats_start_time = time.ctime() tr = 2. drift_model = 'Cosine' hrf_model = 'spm + derivative' hfcut = 1. / 128 # make design matrices first_level_effects_maps = [] mask_images = [] design_matrices = [] for x in range(2):
do_subject_preproc( subject_data, #subject data class deleteorient=False, # slice_timing=True, slice_order="ascending", interleaved=True, refslice=1, TR=2.4, TA=2.3, slice_timing_software="spm", realign=True, realign_reslice=False, register_to_mean=True, realign_software="spm", coregister=True, coregister_reslice=False, coreg_anat_to_func=True, coregister_software="spm", segment=False, normalize=True, dartel=False, fwhm=3., anat_fwhm=0., func_write_voxel_sizes=3., anat_write_voxel_sizes=1., hardlink_output=True, report=True, cv_tc=True, parent_results_gallery=None, last_stage=True, preproc_undergone=None, prepreproc_undergone="", generate_preproc_undergone=True, caching=True, )
def _preprocess_and_analysis_subject(subject_data, slicer='z', cut_coords=6, threshold=3., cluster_th=15, **preproc_params): """ Preprocesses the subject and then fits (mass-univariate) GLM thereup. """ # sanitize run_ids: # Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage, # for example run_ids = range(9) if subject_data['subject_id'] == "Sub14": run_ids = [0] + range(2, 9) subject_data['func'] = [subject_data['func'][0] ] + subject_data['func'][2:] subject_data['session_id'] = [subject_data['session_id'][0] ] + subject_data['session_id'][2:] # sanitize subject output dir if not 'output_dir' in subject_data: subject_data['output_dir'] = os.path.join(output_dir, subject_data['subject_id']) # preprocess the data subject_data = do_subject_preproc(subject_data, **preproc_params) # chronometry stats_start_time = pretty_time() # to-be merged lists, one item per run paradigms = [] frametimes_list = [] design_matrices = [] # one list_of_contrast_dicts = [] # one dict per run n_scans = [] for run_id in run_ids: _n_scans = len(subject_data.func[run_id]) n_scans.append(_n_scans) # make paradigm paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id]) # make design matrix tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128. frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans) design_matrix = make_dmtx( frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs=np.loadtxt( getattr(subject_data, 'realignment_parameters')[run_id]), add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis' ]) # import matplotlib.pyplot as plt # design_matrix.show() # plt.show() paradigms.append(paradigm) design_matrices.append(design_matrix) frametimes_list.append(frametimes) n_scans.append(_n_scans) # specify contrasts contrasts = {} n_columns = len(design_matrix.names) for i in xrange(paradigm.n_conditions): contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i] # more interesting contrasts""" contrasts['Famous-Unfamiliar'] = contrasts['Famous'] - contrasts[ 'Unfamiliar'] contrasts['Unfamiliar-Famous'] = -contrasts['Famous-Unfamiliar'] contrasts[ 'Famous-Scrambled'] = contrasts['Famous'] - contrasts['Scrambled'] contrasts['Scrambled-Famous'] = -contrasts['Famous-Scrambled'] contrasts['Unfamiliar-Scrambled'] = contrasts[ 'Unfamiliar'] - contrasts['Scrambled'] contrasts['Scrambled-Unfamiliar'] = -contrasts['Unfamiliar-Scrambled'] list_of_contrast_dicts.append(contrasts) # importat maps z_maps = {} effects_maps = {} # fit GLM print('\r\nFitting a GLM (this takes time) ..') fmri_glm = FMRILinearModel( [nibabel.concat_images(sess_func) for sess_func in subject_data.func], [design_matrix.matrix for design_matrix in design_matrices], mask='compute') fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # replicate contrasts across runs contrasts = dict( (cid, [contrasts[cid] for contrasts in list_of_contrast_dicts]) for cid, cval in contrasts.iteritems()) # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast(contrast_val, con_id=contrast_id, output_z=True, output_stat=False, output_effects=True, output_variance=False) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join(subject_data.output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join(map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report stats_report_filename = os.path.join( getattr(subject_data, 'reports_output_dir', subject_data.output_dir), "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, fmri_glm.mask, threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_data.subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_data.subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm=dict(("Run_%02i" % (run_id + 1), paradigms[run_id]) for run_id in run_ids), frametimes=dict(("Run_%02i" % (run_id + 1), frametimes_list[run_id]) for run_id in run_ids), # fwhm=fwhm ) ProgressReport().finish_dir(subject_data.output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path
def run_suject_level1_glm(subject_data_dir, subject_output_dir, task_id, readout_time=.01392, # seconds tr=.72, do_preproc=False, do_realign=False, do_normalize=False, fwhm=0., report=False, hrf_model="Canonical with Derivative", drift_model="Cosine", hfcut=100, regress_motion=True, slicer='y', cut_coords=6, threshold=3., cluster_th=15 ): """ Function to do preproc + analysis for a single HCP subject (task fMRI) """ # sanitize subject data_dir subject_id = int(os.path.basename(subject_data_dir)) subject_data_dir = os.path.abspath(subject_data_dir) _subject_data_dir = os.path.join(subject_data_dir, "MNINonLinear/Results/") add_regs_files = None if do_preproc: if not os.path.exists(subject_output_dir): os.makedirs(subject_output_dir) # glob fmri files fmri_files = [os.path.join( subject_data_dir, "unprocessed/3T/tfMRI_%s_%s/%s_3T_tfMRI_%s_%s.nii.gz" % ( task_id, direction, subject_id, task_id, direction)) for direction in ["LR", "RL"]] assert len(fmri_files) == 2 # glob anat file anat_file = os.path.join(subject_data_dir, "T1w/T1w_acpc_dc_restore_brain.nii.gz") # assert os.path.isfile(anat_file) if not os.path.isfile(anat_file): anat_file = None # distortion correction ? dc_output = _do_fmri_distortion_correction( fmri_files, subject_data_dir, subject_output_dir, subject_id, task_id, readout_time=readout_time, report=report ) if dc_output is None: return else: fmri_files, realignment_parameters = dc_output # preprocess the data preproc_subject_data = do_subject_preproc(SubjectData( func=fmri_files, anat=anat_file, output_dir=subject_output_dir), do_realign=True, do_normalize=do_normalize, fwhm=fwhm, report=report ) fmri_files = preproc_subject_data.func n_motion_regressions = 6 if do_realign and regress_motion: add_regs_files = realignment_parameters else: n_motion_regressions = 12 # glob fmri files fmri_files = [] for direction in ['LR', 'RL']: fmri_file = os.path.join( _subject_data_dir, "tfMRI_%s_%s/tfMRI_%s_%s.nii.gz" % ( task_id, direction, task_id, direction)) if not os.path.isfile(fmri_file): print "Can't find task fMRI file %s; skipping subject %s" % ( fmri_file, subject_id) return else: fmri_files.append(fmri_file) # glob movement confounds if regress_motion: add_regs_files = [os.path.join(_subject_data_dir, "tfMRI_%s_%s" % ( task_id, direction), "Movement_Regressors.txt") for direction in ["LR", "RL"]] # smooth images if np.sum(fwhm) > 0: print "Smoothing fMRI data (fwhm = %s)..." % fwhm fmri_files = _do_subject_smooth(SubjectData( func=fmri_files, output_dir=subject_output_dir), fwhm=fwhm, report=False ).func print "... done.\r\n" # sanitize subject_output_dir if not os.path.exists(subject_output_dir): os.makedirs(subject_output_dir) # chronometry stats_start_time = pretty_time() # merged lists paradigms = [] frametimes_list = [] design_matrices = [] # fmri_files = [] n_scans = [] for direction, direction_index in zip(['LR', 'RL'], xrange(2)): # glob the design file design_file = os.path.join(_subject_data_dir, "tfMRI_%s_%s" % ( task_id, direction), "tfMRI_%s_%s_hp200_s4_level1.fsf" % ( task_id, direction)) if not os.path.isfile(design_file): print "Can't find design file %s; skipping subject %s" % ( design_file, subject_id) return # read the experimental setup print "Reading experimental setup from %s ..." % design_file fsl_condition_ids, timing_files, fsl_contrast_ids, contrast_values = \ read_design_fsl_design_file(design_file) print "... done.\r\n" # fix timing filenames timing_files = _insert_directory_in_file_name( timing_files, "tfMRI_%s_%s" % (task_id, direction), 1) # make design matrix print "Constructing design matrix for direction %s ..." % direction _n_scans = nibabel.load(fmri_files[direction_index]).shape[-1] n_scans.append(_n_scans) add_regs_file = add_regs_files[ direction_index] if not add_regs_files is None else None, design_matrix, paradigm, frametimes = make_dmtx_from_timing_files( timing_files, fsl_condition_ids, n_scans=_n_scans, tr=tr, hrf_model=hrf_model, drift_model=drift_model, hfcut=hfcut, add_regs_file=add_regs_file, add_reg_names=[ 'Translation along x axis', 'Translation along yaxis', 'Translation along z axis', 'Rotation along x axis', 'Rotation along y axis', 'Rotation along z axis', 'Differential Translation along x axis', 'Differential Translation along yaxis', 'Differential Translation along z axis', 'Differential Rotation along x axis', 'Differential Rotation along y axis', 'Differential Rotation along z axis' ][:n_motion_regressions] if not add_regs_files is None else None, ) print "... done." paradigms.append(paradigm) frametimes_list.append(frametimes) design_matrices.append(design_matrix) # convert contrasts to dict contrasts = dict((contrast_id, # append zeros to end of contrast to match design np.hstack((contrast_value, np.zeros(len( design_matrix.names) - len(contrast_value))))) for contrast_id, contrast_value in zip( fsl_contrast_ids, contrast_values)) # more interesting contrasts if task_id == 'MOTOR': contrasts['RH-LH'] = contrasts['RH'] - contrasts['LH'] contrasts['LH-RH'] = -contrasts['RH-LH'] contrasts['RF-LF'] = contrasts['RF'] - contrasts['LF'] contrasts['LF-RF'] = -contrasts['RF-LF'] contrasts['H'] = contrasts['RH'] + contrasts['LH'] contrasts['F'] = contrasts['RF'] + contrasts['LF'] contrasts['H-F'] = contrasts['RH'] + contrasts['LH'] - ( contrasts['RF'] - contrasts['LF']) contrasts['F-H'] = -contrasts['H-F'] # importat maps z_maps = {} effects_maps = {} # replicate contrasts across sessions contrasts = dict((cid, [cval] * 2) for cid, cval in contrasts.iteritems()) # compute effects mask_path = os.path.join(subject_output_dir, "mask.nii.gz") skip = os.path.isfile(mask_path) if skip: for contrast_id, contrast_val in contrasts.iteritems(): for map_type in ['z', 'effects']: map_dir = os.path.join( subject_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) if not os.path.exists(map_path): skip = 0 break # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path if skip: print "Skipping subject %s..." % ( subject_id) # fit GLM if not skip: print ( 'Fitting a "Fixed Effect" GLM for merging LR and RL phase-encoding ' 'directions for subject %s ...' % subject_id) fmri_glm = FMRILinearModel(fmri_files, [design_matrix.matrix for design_matrix in design_matrices], mask='compute' ) fmri_glm.fit(do_scaling=True, model='ar1') print "... done.\r\n" # save computed mask mask_path = os.path.join(subject_output_dir, "mask.nii.gz") print "Saving mask image to %s ..." % mask_path nibabel.save(fmri_glm.mask, mask_path) print "... done.\r\n" # compute effects for contrast_id, contrast_val in contrasts.iteritems(): print "\tcontrast id: %s" % contrast_id z_map, eff_map = fmri_glm.contrast( contrast_val, con_id=contrast_id, output_z=True, output_effects=True ) # store stat maps to disk for map_type, out_map in zip(['z', 'effects'], [z_map, eff_map]): map_dir = os.path.join( subject_output_dir, '%s_maps' % map_type) if not os.path.exists(map_dir): os.makedirs(map_dir) map_path = os.path.join( map_dir, '%s.nii.gz' % contrast_id) print "\t\tWriting %s ..." % map_path nibabel.save(out_map, map_path) # collect zmaps for contrasts we're interested in if map_type == 'z': z_maps[contrast_id] = map_path if map_type == 'effects': effects_maps[contrast_id] = map_path # remove repeated contrasts contrasts = dict((cid, cval[0]) for cid, cval in contrasts.iteritems()) # do stats report if 0x0: anat_img = load_specific_vol(fmri_files[0], 0)[0] stats_report_filename = os.path.join(subject_output_dir, "reports", "report_stats.html") generate_subject_stats_report( stats_report_filename, contrasts, z_maps, nibabel.load(mask_path), anat=anat_img.get_data(), anat_affine=anat_img.get_affine(), threshold=threshold, cluster_th=cluster_th, slicer=slicer, cut_coords=cut_coords, design_matrices=design_matrices, subject_id=subject_id, start_time=stats_start_time, title="GLM for subject %s" % subject_id, # additional ``kwargs`` for more informative report TR=tr, n_scans=n_scans, hfcut=hfcut, drift_model=drift_model, hrf_model=hrf_model, paradigm={'LR': paradigms[0].__dict__, 'RL': paradigms[1].__dict__}, frametimes={'LR': frametimes_list[0], 'RL': frametimes_list[1]}, fwhm=fwhm ) ProgressReport().finish_dir(subject_output_dir) print "\r\nStatistic report written to %s\r\n" % stats_report_filename return contrasts, effects_maps, z_maps, mask_path