def _nyu_rest_factory(session=1): from pypreprocess.nipype_preproc_spm_utils import SubjectData nyu_data = fetch_nyu_rest(sessions=[session], n_subjects=7) session_func = [x for x in nyu_data.func if "session%i" % session in x] session_anat = [ x for x in nyu_data.anat_skull if "session%i" % session in x] for subject_id in set([os.path.basename(os.path.dirname (os.path.dirname(x))) for x in session_func]): # instantiate subject_data object subject_data = SubjectData() subject_data.subject_id = subject_id subject_data.session_id = session # set func subject_data.func = [x for x in session_func if subject_id in x] assert len(subject_data.func) == 1 subject_data.func = subject_data.func[0] # set anat subject_data.anat = [x for x in session_anat if subject_id in x] assert len(subject_data.anat) == 1 subject_data.anat = subject_data.anat[0] # set subject output directory subject_data.output_dir = "/tmp/%s" % subject_id subject_data.sanitize(deleteorient=True, niigz2nii=False) yield (subject_data.subject_id, subject_data.func[0], subject_data.anat)
def subject_factory(): """ Producer for HAXBY subject data. """ for subject_id in set([os.path.basename( os.path.dirname(x)) for x in haxby_data.func]): # instantiate subject_data object subject_data = SubjectData() subject_data.subject_id = subject_id subject_data.session_id = "haxby2001" # set func subject_data.func = [x for x in haxby_data.func if subject_id in x] assert len(subject_data.func) == 1 subject_data.func = subject_data.func[0] # set anat subject_data.anat = [x for x in haxby_data.func if subject_id in x] assert len(subject_data.anat) == 1 subject_data.anat = subject_data.anat[0] # set subject output directory subject_data.output_dir = os.path.join(OUTPUT_DIR, subject_data.subject_id) yield subject_data
def _abide_factory(institute="KKI"): for scans in sorted(glob.glob( "/home/elvis/CODE/datasets/ABIDE/%s_*/%s_*/scans" % ( institute, institute))): subject_data = SubjectData() subject_data.subject_id = os.path.basename(os.path.dirname( os.path.dirname(scans))) subject_data.func = os.path.join(scans, "rest/resources/NIfTI/files/rest.nii") subject_data.anat = os.path.join( scans, "anat/resources/NIfTI/files/mprage.nii") subject_data.output_dir = os.path.join(ABIDE_OUTPUT_DIR, subject_data.subject_id) yield subject_data
def subject_factory(): for subject_id in subject_ids: if subject_id in exclusions: continue # construct subject data structure subject_data = SubjectData() subject_data.session_id = session_ids subject_data.subject_id = subject_id subject_data.func = [] # glob for bold data has_bad_sessions = False for session_id in subject_data.session_id: bold_dir = os.path.join( data_dir, "%s/BOLD/%s" % (subject_id, session_id)) # # extract .nii.gz to .nii # unzip_nii_gz(bold_dir) # glob bold data for this session func = glob.glob(os.path.join(bold_dir, "bold.nii.gz")) # check that this session is OK (has bold data, etc.) if not func: has_bad_sessions = True break subject_data.func.append(func[0]) # exclude subject if necessary if has_bad_sessions: continue # glob for anatomical data # anat_dir = os.path.join( # data_dir, # "%s/anatomy" % subject_id) # # extract .nii.gz to .ni # unzip_nii_gz(anat_dir) # glob anatomical data proper subject_data.anat = glob.glob( os.path.join( data_dir, "%s/anatomy/highres001_brain.nii.gz" % subject_id))[0] # set subject output dir (all calculations for # this subject go here) subject_data.output_dir = os.path.join( output_dir, subject_id) yield subject_data
def _abide_factory(institute="KKI"): for scans in sorted( glob.glob("/home/elvis/CODE/datasets/ABIDE/%s_*/%s_*/scans" % (institute, institute))): subject_data = SubjectData() subject_data.subject_id = os.path.basename( os.path.dirname(os.path.dirname(scans))) subject_data.func = os.path.join( scans, "rest/resources/NIfTI/files/rest.nii") subject_data.anat = os.path.join( scans, "anat/resources/NIfTI/files/mprage.nii") subject_data.output_dir = os.path.join(ABIDE_OUTPUT_DIR, subject_data.subject_id) yield subject_data
def subject_factory(): for subject_id in subject_ids: subject_data = SubjectData() subject_data.subject_id = subject_id try: subject_data.func = glob.glob( os.path.join( abide_data_dir, "%s/%s/scans/rest*/resources/NIfTI/files/rest.nii" % ( subject_id, subject_id)))[0] except IndexError: ignored_because = "no rest data found" print("Ignoring subject %s (%s)" % (subject_id,) ignored_because) ignored_subject_ids.append((subject_id, ignored_because)) continue try: subject_data.anat = glob.glob( os.path.join( abide_data_dir, "%s/%s/scans/anat/resources/NIfTI/files/mprage.nii" % ( subject_id, subject_id)))[0] except IndexError: if do_dartel: # can't do DARTEL in under such conditions continue try: subject_data.hires = glob.glob( os.path.join( abide_data_dir, ("%s/%s/scans/hires/resources/NIfTI/" "files/hires.nii") % (subject_id, subject_id)))[0] except IndexError: ignored_because = "no anat/hires data found" print("Ignoring subject %s (%s)" % (subject_id,) ignored_because) ignored_subject_ids.append((subject_id, ignored_because)) continue subject_data.output_dir = os.path.join( os.path.join( institute_output_dir, subject_id)) # yield data for this subject yield subject_data
def subject_factory(): anats = glob.glob( os.path.join(data_dir, 'sub*', 'ses-*', 'anat', 'sub-*_ses-*_acq-highres_T1w.nii')) subject_sessions = [(anat.split('/')[-4], anat.split('/')[-3]) for anat in anats] subject_sessions = [('sub-01', 'ses-12')] for subject_session in subject_sessions: subject, session = subject_session subject_data = SubjectData(isdicom=False, scratch=scratch, session_output_dirs=[], n_sessions=0) subject_data.subject_id = subject subject_data.anat = os.path.join( data_dir, subject, session, 'anat', '%s_%s_acq-highres_T1w.nii' % (subject, session)) subject_data.func = [] subject_data.output_dir = os.path.join(data_dir, subject, session, 'anat', 'dartel') # yield data for this subject yield subject_data
def _do_fmri_distortion_correction( subject_data, # i'm unsure of the readout time, # but this is constant across both PE # directions and so can be scaled to 1 # (or any other nonzero float) protocol="MOTOR", readout_time=.01392, realign=True, coregister=True, coreg_func_to_anat=True, dc=True, segment=False, normalize=False, func_write_voxel_sizes=None, anat_write_voxel_sizes=None, report=False, **kwargs): """ Function to undistort task fMRI data for a given HCP subject. """ directions = ['LR', 'RL'] subject_data.sanitize() if dc: acq_params = [[1, 0, 0, readout_time], [-1, 0, 0, readout_time]] acq_params_file = os.path.join(subject_data.output_dir, "b0_acquisition_params.txt") np.savetxt(acq_params_file, acq_params, fmt='%f') fieldmap_files = [ os.path.join( os.path.dirname(subject_data.func[sess]), "%s_3T_SpinEchoFieldMap_%s.nii.gz" % (subject_data.subject_id, directions[sess])) for sess in xrange(subject_data.n_sessions) ] sbref_files = [ sess_func.replace(".nii", "_SBRef.nii") for sess_func in subject_data.func ] # prepare for smart caching mem = Memory(os.path.join(subject_data.output_dir, "cache_dir")) for x in [fieldmap_files, sbref_files, subject_data.func]: assert len(x) == 2 for y in x: assert os.path.isfile(y), y # fslroi zeroth_fieldmap_files = [] for fieldmap_file in fieldmap_files: if not os.path.isfile(fieldmap_file): print "Can't find fieldmap file %s; skipping subject %s" % ( fieldmap_file, subject_data.subject_id) return # peel 0th volume of each fieldmap zeroth_fieldmap_file = os.path.join( subject_data.output_dir, "0th_%s" % os.path.basename(fieldmap_file)) fslroi_cmd = "fsl5.0-fslroi %s %s 0 1" % (fieldmap_file, zeroth_fieldmap_file) print "\r\nExecuting '%s' ..." % fslroi_cmd print mem.cache(commands.getoutput)(fslroi_cmd) zeroth_fieldmap_files.append(zeroth_fieldmap_file) # merge the 0th volume of both fieldmaps merged_zeroth_fieldmap_file = os.path.join( subject_data.output_dir, "merged_with_other_direction_%s" % (os.path.basename(zeroth_fieldmap_files[0]))) fslmerge_cmd = "fsl5.0-fslmerge -t %s %s %s" % ( merged_zeroth_fieldmap_file, zeroth_fieldmap_files[0], zeroth_fieldmap_files[1]) print "\r\nExecuting '%s' ..." % fslmerge_cmd print mem.cache(commands.getoutput)(fslmerge_cmd) # do topup (learn distortion model) topup_results_basename = os.path.join(subject_data.output_dir, "topup_results") topup_cmd = ("fsl5.0-topup --imain=%s --datain=%s --config=b02b0.cnf " "--out=%s" % (merged_zeroth_fieldmap_file, acq_params_file, topup_results_basename)) print "\r\nExecuting '%s' ..." % topup_cmd print mem.cache(commands.getoutput)(topup_cmd) # apply learn deformations to absorb distortion dc_fmri_files = [] for sess in xrange(2): # merge SBRef + task BOLD for current PE direction assert len(subject_data.func) == 2, subject_data fourD_plus_sbref = os.path.join( subject_data.output_dir, "sbref_plus_" + os.path.basename(subject_data.func[sess])) fslmerge_cmd = "fsl5.0-fslmerge -t %s %s %s" % ( fourD_plus_sbref, sbref_files[sess], subject_data.func[sess]) print "\r\nExecuting '%s' ..." % fslmerge_cmd print mem.cache(commands.getoutput)(fslmerge_cmd) # realign task BOLD to SBRef sess_output_dir = subject_data.session_output_dirs[sess] rfourD_plus_sbref = _do_subject_realign(SubjectData( func=[fourD_plus_sbref], output_dir=subject_data.output_dir, n_sessions=1, session_output_dirs=[sess_output_dir]), report=False).func[0] # apply topup to realigned images dc_rfourD_plus_sbref = os.path.join( subject_data.output_dir, "dc" + os.path.basename(rfourD_plus_sbref)) applytopup_cmd = ( "fsl5.0-applytopup --imain=%s --verbose --inindex=%i " "--topup=%s --out=%s --datain=%s --method=jac" % (rfourD_plus_sbref, sess + 1, topup_results_basename, dc_rfourD_plus_sbref, acq_params_file)) print "\r\nExecuting '%s' ..." % applytopup_cmd print mem.cache(commands.getoutput)(applytopup_cmd) # recover undistorted task BOLD dc_rfmri_file = dc_rfourD_plus_sbref.replace("sbref_plus_", "") fslroi_cmd = "fsl5.0-fslroi %s %s 1 -1" % (dc_rfourD_plus_sbref, dc_rfmri_file) print "\r\nExecuting '%s' ..." % fslroi_cmd print mem.cache(commands.getoutput)(fslroi_cmd) # sanity tricks if dc_rfmri_file.endswith(".nii"): dc_rfmri_file = dc_rfmri_file + ".gz" dc_fmri_files.append(dc_rfmri_file) subject_data.func = dc_fmri_files if isinstance(subject_data.func, basestring): subject_data.func = [subject_data.func] # continue preprocessing subject_data = do_subject_preproc( subject_data, realign=realign, coregister=coregister, coreg_anat_to_func=not coreg_func_to_anat, segment=True, normalize=False, report=report) # ok for GLM now return subject_data
output_dir=ABIDE_OUTPUT_DIR, dataset_id='ABIDE', # do_report=False, # do_dartel=True ) if 0x0: for (with_anat, do_segment, do_normalize, fwhm, hard_link_output) in itertools.product( [False, True], [False, True], [False, True], [0, 8, [8, 8, 8]], [False, True]): # load spm auditory data sd = fetch_spm_auditory_data(os.path.join( os.environ['HOME'], 'CODE/datasets/spm_auditory')) subject_data1 = SubjectData(func=[sd.func], anat=sd.anat if with_anat else None) subject_data1.output_dir = "/tmp/kimbo/sub001/" # load spm multimodal fmri data sd = fetch_spm_multimodal_fmri_data(os.path.join( os.environ['HOME'], 'CODE/datasets/spm_multimodal_fmri')) subject_data2 = SubjectData(func=[sd.func1, sd.func2], anat=sd.anat if with_anat else None, session_id=['Session 1', "Session 2"]) subject_data2.output_dir = "/tmp/kiki/sub001/" do_subjects_preproc([subject_data1, subject_data2], do_dartel=True, do_segment=do_segment, do_normalize=do_normalize, fwhm=fwhm,
# set dataset dir if len(sys.argv) > 1: dataset_dir = sys.argv[1] else: dataset_dir = os.path.join(this_dir, "spm_multimodal_faces") # fetch spm multimodal_faces data subject_data = fetch_spm_multimodal_fmri_data(dataset_dir) # preprocess the data subject_id = "sub001" subject_data = SubjectData(output_dir=os.path.join(dataset_dir, "pypreprocess", subject_id), subject_id=subject_id, func=[subject_data.func1, subject_data.func2], anat=subject_data.anat, trials_ses1=subject_data.trials_ses1, trials_ses2=subject_data.trials_ses2) subject_data = do_subjects_preproc([subject_data], realign=True, coregister=True, segment=True, normalize=True)[0] # experimental paradigm meta-params stats_start_time = time.ctime() tr = 2. drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' hfcut = 128.
def _nyu_rest_factory(session=1): from pypreprocess.nipype_preproc_spm_utils import SubjectData nyu_data = fetch_nyu_rest(sessions=[session], n_subjects=7) session_func = [x for x in nyu_data.func if "session%i" % session in x] session_anat = [ x for x in nyu_data.anat_skull if "session%i" % session in x ] for subject_id in set([ os.path.basename(os.path.dirname(os.path.dirname(x))) for x in session_func ]): # instantiate subject_data object subject_data = SubjectData() subject_data.subject_id = subject_id subject_data.session_id = session # set func subject_data.func = [x for x in session_func if subject_id in x] assert len(subject_data.func) == 1 subject_data.func = subject_data.func[0] # set anat subject_data.anat = [x for x in session_anat if subject_id in x] assert len(subject_data.anat) == 1 subject_data.anat = subject_data.anat[0] # set subject output directory subject_data.output_dir = "/tmp/%s" % subject_id subject_data.sanitize(deleteorient=True, niigz2nii=False) yield (subject_data.subject_id, subject_data.func[0], subject_data.anat)
frametimes = np.linspace(0, (n_scans - 1) * TR, n_scans) maximum_epoch_duration = max(EV1_epoch_duration, EV2_epoch_duration) hfcut = 1.5 * maximum_epoch_duration # why ? """construct design matrix""" drift_model = 'Cosine' hrf_model = 'Canonical With Derivative' design_matrix = make_design_matrix(frame_times=frametimes, paradigm=paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=hfcut) """fetch input data""" _subject_data = fetch_fsl_feeds() subject_data = SubjectData() subject_data.subject_id = "sub001" subject_data.func = _subject_data.func subject_data.anat = _subject_data.anat output_dir = os.path.join(_subject_data.data_dir, "pypreprocess_output") if not os.path.exists(output_dir): os.makedirs(output_dir) subject_data.output_dir = os.path.join( output_dir, subject_data.subject_id) """preprocess the data""" results = do_subjects_preproc( [subject_data],
# fetch HAXBY dataset N_SUBJECTS = 2 haxby_data = fetch_haxby(n_subjects=N_SUBJECTS) # set output dir OUTPUT_DIR = os.path.join(os.path.dirname(haxby_data.mask), "haxby_runs") if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) # get subject data subjects = [] for subject_id in set( [os.path.basename(os.path.dirname(x)) for x in haxby_data.func]): # instantiate subject_data object subject_data = SubjectData() subject_data.subject_id = subject_id subject_data.session_id = "haxby2001" # set func subject_data.func = [x for x in haxby_data.func if subject_id in x] assert len(subject_data.func) == 1 subject_data.func = subject_data.func[0] # set anat subject_data.anat = [x for x in haxby_data.anat if subject_id in x] assert len(subject_data.anat) == 1 subject_data.anat = subject_data.anat[0] # set subject output directory
frametimes = np.linspace(0, (n_scans - 1) * TR, n_scans) maximum_epoch_duration = max(EV1_epoch_duration, EV2_epoch_duration) hfcut = 1.5 * maximum_epoch_duration # why ? """construct design matrix""" drift_model = 'Cosine' hrf_model = 'spm + derivative' design_matrix = make_design_matrix(frame_times=frametimes, paradigm=paradigm, hrf_model=hrf_model, drift_model=drift_model, period_cut=hfcut) """fetch input data""" _subject_data = fetch_fsl_feeds() subject_data = SubjectData() subject_data.subject_id = "sub001" subject_data.func = _subject_data.func subject_data.anat = _subject_data.anat output_dir = os.path.join(_subject_data.data_dir, "pypreprocess_output") if not os.path.exists(output_dir): os.makedirs(output_dir) subject_data.output_dir = os.path.join( output_dir, subject_data.subject_id) """preprocess the data""" results = do_subjects_preproc( [subject_data],
# fetch HAXBY dataset N_SUBJECTS = 2 haxby_data = fetch_haxby(n_subjects=N_SUBJECTS) # set output dir OUTPUT_DIR = os.path.join(os.path.dirname(haxby_data.mask), "haxby_runs") if not os.path.exists(OUTPUT_DIR): os.makedirs(OUTPUT_DIR) # get subject data subjects = [] for subject_id in set([os.path.basename(os.path.dirname(x)) for x in haxby_data.func]): # instantiate subject_data object subject_data = SubjectData() subject_data.subject_id = subject_id subject_data.session_id = "haxby2001" # set func subject_data.func = [x for x in haxby_data.func if subject_id in x] assert len(subject_data.func) == 1 subject_data.func = subject_data.func[0] # set anat subject_data.anat = [x for x in haxby_data.anat if subject_id in x] assert len(subject_data.anat) == 1 subject_data.anat = subject_data.anat[0] # set subject output directory
dataset_id='ABIDE', # do_report=False, # do_dartel=True ) if 0x0: for (with_anat, do_segment, do_normalize, fwhm, hard_link_output) in itertools.product([False, True], [False, True], [False, True], [0, 8, [8, 8, 8]], [False, True]): # load spm auditory data sd = fetch_spm_auditory_data( os.path.join(os.environ['HOME'], 'CODE/datasets/spm_auditory')) subject_data1 = SubjectData(func=[sd.func], anat=sd.anat if with_anat else None) subject_data1.output_dir = "/tmp/kimbo/sub001/" # load spm multimodal fmri data sd = fetch_spm_multimodal_fmri_data( os.path.join(os.environ['HOME'], 'CODE/datasets/spm_multimodal_fmri')) subject_data2 = SubjectData(func=[sd.func1, sd.func2], anat=sd.anat if with_anat else None, session_id=['Session 1', "Session 2"]) subject_data2.output_dir = "/tmp/kiki/sub001/" do_subjects_preproc([subject_data1, subject_data2], do_dartel=True, do_segment=do_segment, do_normalize=do_normalize,