def extract_slicetime(epi, bids_dir): # import necessary libraries from bids.grabbids import BIDSLayout import os import csv # save to node folder (go up 2 directories bc of iterfield) cwd = os.path.dirname(os.path.dirname(os.getcwd())) # specify bids layout layout = BIDSLayout(bids_dir) # get slicetiming info slice_timing = layout.get_metadata(epi)['SliceTiming'] # get TR TR = layout.get_metadata(epi)['RepetitionTime'] # set filename filename = os.path.join( cwd, '{}.SLICETIME'.format(os.path.splitext(os.path.basename(epi))[0])) # write slice timing to file with open(filename, 'w') as st_file: wr = csv.writer(st_file, delimiter=' ') wr.writerow(slice_timing) # return timing pattern and TR return ('@{}'.format(filename), str(TR))
def _get_func_and_confounds(fmriprep_folder, sourcedata_folder): fmriprep_layout = BIDSLayout(fmriprep_folder) sourcedata_layout = BIDSLayout(sourcedata_folder) files = fmriprep_layout.get(extensions=['.nii', 'nii.gz'], modality='func', type='preproc') confounds = [] metadata = [] for f in files: kwargs = {} for key in ['subject', 'run', 'task', 'session']: if hasattr(f, key): kwargs[key] = getattr(f, key) c = fmriprep_layout.get(type='confounds', **kwargs) c = c[0] confounds.append(c) sourcedata_file = sourcedata_layout.get(modality='func', extensions='nii.gz', **kwargs) assert (len(sourcedata_file) == 1) md = sourcedata_layout.get_metadata(sourcedata_file[0].filename) metadata.append(md) return list(zip(files, confounds, metadata))
def test_get_metadata(): data_dir = join(dirname(__file__), 'data', '7t_trt') layout = BIDSLayout(data_dir) target = 'sub-03/ses-2/func/sub-03_ses-2_task-' \ 'rest_acq-fullbrain_run-2_bold.nii.gz' result = layout.get_metadata(join(data_dir, target)) assert result['RepetitionTime'] == 3.0
def get_TR(in_file): from bids.grabbids import BIDSLayout data_directory = '/home1/varunk/data/ABIDE1/RawDataBIDs' layout = BIDSLayout(data_directory) metadata = layout.get_metadata(path=in_file) TR = metadata['RepetitionTime'] return TR
def volumeCorrect(data_directory, subject_list, run=1, session=None, vols=None): ''' load BIDS data grabber load the VOLUMES from metadata for each subject Create a Sub_ID_VOlumes dict select subjects that have volumes > threshold ''' from bids.grabbids import BIDSLayout print('Data Directory %s'% data_directory) print('Run %s Session %s'%(run,session)) # = '/mnt/project1/home1/varunk/data/ABIDE2RawDataBIDS' layout = BIDSLayout(data_directory) # subjects = layout.get_subjects() subjects = subject_list subid_vol_dict = {} subject_list = [] for subject_id in subjects: if session == None: func_file_path = [f.filename for f in layout.get(subject=subject_id, type='bold', run=run, extensions=['nii', 'nii.gz'])] if len(func_file_path) == 0: print('No Func file: %s'%subject_id) continue else: func_file_path = [f.filename for f in layout.get(subject=subject_id, type='bold',session = session[0], run=run, extensions=['nii', 'nii.gz'])] if len(func_file_path) == 0: func_file_path = [f.filename for f in layout.get(subject=subject_id, type='bold',session = session[1], run=run, extensions=['nii', 'nii.gz'])] if len(func_file_path) == 0: print('No Func file: %s'%subject_id) continue # print(func_file_path) metadata = layout.get_metadata(path=func_file_path[0]) volumes = metadata['NumberofMeasurements'] try: volumes = int(volumes) except ValueError: # Mixed Volumes site brain_img = nib.load(func_file_path[0]) volumes = brain_img.shape[-1] if volumes >= vols: subid_vol_dict[subject_id] = volumes subject_list.append(subject_id) return subject_list, subid_vol_dict
def _getMetadata(in_file, data_directory): from bids.grabbids import BIDSLayout import json # json_path = 'scripts/json/paths.json' # # with open(json_path, 'rt') as fp: # task_info = json.load(fp) # data_directory = task_info["data_directory"] # import logging # # logger = logging.getLogger(__name__) # logger.setLevel(logging.DEBUG) # # # create a file handler # handler = logging.FileHandler('progress.log') # # # add the handlers to the logger # logger.addHandler(handler) interleaved = True index_dir = False # data_directory = '/mnt/project1/home1/varunk/data/ABIDE2RawDataBIDS' # data_directory = '/home1/shared/ABIDE_1/UM_1' layout = BIDSLayout(data_directory) metadata = layout.get_metadata(path=in_file) print(metadata) try: tr = metadata['RepetitionTime'] except KeyError: print('Key RepetitionTime not found in task-rest_bold.json so using a default of 2.0 ') tr = 2 try: slice_order = metadata['SliceAcquisitionOrder'] except KeyError: print('Key SliceAcquisitionOrder not found in task-rest_bold.json so using a default of interleaved ascending ') return tr, index_dir, interleaved if slice_order.split(' ')[0] == 'Sequential': interleaved = False if slice_order.split(' ')[1] == 'Descending': index_dir = True return tr, index_dir, interleaved
def _getMetadata(in_file): from bids.grabbids import BIDSLayout import logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) # create a file handler handler = logging.FileHandler('progress.log') # add the handlers to the logger logger.addHandler(handler) interleaved = True index_dir = False data_directory = '/home1/varunk/data/ABIDE1/RawDataBIDs' layout = BIDSLayout(data_directory) metadata = layout.get_metadata(path=in_file) print(metadata) logger.info('Extracting Meta Data of file: %s', in_file) try: tr = metadata['RepetitionTime'] except KeyError: print( 'Key RepetitionTime not found in task-rest_bold.json so using a default of 2.0 ' ) tr = 2 logger.error( 'Key RepetitionTime not found in task-rest_bold.json for file %s so using a default of 2.0 ', in_file) try: slice_order = metadata['SliceAcquisitionOrder'] except KeyError: print( 'Key SliceAcquisitionOrder not found in task-rest_bold.json so using a default of interleaved ascending ' ) logger.error( 'Key SliceAcquisitionOrder not found in task-rest_bold.json for file %s so using a default of interleaved ascending', in_file) return tr, index_dir, interleaved if slice_order.split(' ')[0] == 'Sequential': interleaved = False if slice_order.split(' ')[1] == 'Descending': index_dir = True return tr, index_dir, interleaved
def get_preproc_data(dset, cfg, data_dir='../data'): """ Get echo-sorted list of fMRIPrep-preprocessed files and echo times in ms. """ keys = ['subject', 'run', 'task'] data_dir = op.abspath(data_dir) dset_dir = op.join(data_dir, dset, cfg['version'], 'uncompressed') layout = BIDSLayout(dset_dir) kwargs = {k: cfg[k] for k in keys if k in cfg.keys()} echoes = sorted(layout.get_echoes()) in_files = [] echo_times = [] for i, echo in enumerate(echoes): # Get echo time in ms orig_file = layout.get(modality='func', type='bold', extensions='nii.gz', echo=echo, **kwargs) if len(orig_file) != 1: raise Exception('{0} files found for echo {1} of {2}: ' '{3}'.format(len(orig_file), echo, dset, cfg)) orig_file = orig_file[0].filename metadata = layout.get_metadata(orig_file) echo_time = metadata['EchoTime'] * 1000 echo_times.append(np.round(echo_time, 3)) # be wary, but seems okay # Get preprocessed file associated with echo func_file = orig_file.replace( dset_dir, op.join(dset_dir, 'derivatives/fmriprep')) func_file = func_file.replace( 'bold.nii.gz', 'bold_space-MNI152NLin2009cAsym_preproc.nii.gz') if not op.isfile(func_file): # print('File DNE: {0}'.format(func_file)) pass in_files.append(func_file) if i == 0: mask_file = func_file.replace('_preproc.nii.gz', '_brainmask.nii.gz') if not op.isfile(mask_file): # print('File DNE: {0}'.format(mask_file)) pass return in_files, echo_times, mask_file
def get_scan_duration(output_dir, modality="func", task="rest"): """ """ layout = BIDSLayout(output_dir) subjects_list = layout.get_subjects() scan_duration = pd.DataFrame([]) # for sub_id in subjects_list: sub_dir = os.path.join(output_dir, "sub-" + sub_id) ses_id_list = layout.get_sessions(subject=sub_id) for ses_id in ses_id_list: sub_ses_path = os.path.join(sub_dir, "ses-" + ses_id) f = layout.get(subject=sub_id, session=ses_id, modality=modality, task=task, extensions='.nii.gz') if len(f) > 1: raise Exception( "something went wrong, more than one %s %s file detected: %s" % (modality, task, f)) elif len(f) == 1: duration = (layout.get_metadata( f[0].filename)["ScanDurationSec"]) scan_duration_sub = pd.DataFrame( OrderedDict([("subject_id", sub_id), ("sesssion_id", ses_id), ("scan_duration_s", [duration])])) scan_duration = scan_duration.append(scan_duration_sub) out_str = modality if task: out_str += "_" + task output_file = os.path.join(output_dir, "scan_duration_%s.tsv" % out_str) print("Writing scan duration to %s" % output_file) to_tsv(scan_duration, output_file)
def run_rsHRF(): parser = get_parser() args = parser.parse_args() arg_groups = {} for group in parser._action_groups: group_dict = { a.dest: getattr(args, a.dest, None) for a in group._group_actions } arg_groups[group.title] = group_dict para = arg_groups['Parameters'] if args.input_file is not None and args.analysis_level: parser.error( 'analysis_level cannot be used with --input_file, do not supply it' ) if args.input_file is not None and args.participant_label: parser.error( 'participant_labels are not to be used with --input_file, do not supply it' ) if args.input_file is not None and args.brainmask: parser.error( '--brainmask cannot be used with --input_file, use --atlas instead' ) if args.bids_dir is not None and not args.analysis_level: parser.error( 'analysis_level needs to be supplied with bids_dir, choices=[participant]' ) if args.input_file is not None and (not args.input_file.endswith( ('.nii', '.nii.gz'))): parser.error('--input_file should end with .nii or .nii.gz') if args.atlas is not None and (not args.atlas.endswith( ('.nii', '.nii.gz'))): parser.error('--atlas should end with .nii or .nii.gz') if args.input_file is not None and args.atlas is not None: # carry analysis with input_file and atlas TR = spm_dep.spm.spm_vol(args.input_file).header.get_zooms()[-1] if TR <= 0: if para['TR'] <= 0: parser.error('Please supply a valid TR using -TR argument') else: if para['TR'] == -1: para['TR'] = TR elif para['TR'] <= 0: print('Invalid TR supplied, using implicit TR: {0}'.format(TR)) para['TR'] = TR para['dt'] = para['TR'] / para['T'] para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']), np.fix(para['max_onset_search'] / para['dt']) + 1, dtype='int') fourD_rsHRF.demo_4d_rsHRF(args.input_file, args.atlas, args.output_dir, para, args.n_jobs, mode='input w/ atlas') if args.bids_dir is not None and args.atlas is not None: # carry analysis with bids_dir and 1 atlas layout = BIDSLayout(args.bids_dir) if args.participant_label: input_subjects = args.participant_label subjects_to_analyze = layout.get_subjects(subject=input_subjects) else: subjects_to_analyze = layout.get_subjects() if not subjects_to_analyze: parser.error( 'Could not find participants. Please make sure the BIDS data ' 'structure is present and correct. Datasets can be validated online ' 'using the BIDS Validator (http://incf.github.io/bids-validator/).' ) all_inputs = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='preproc', extensions=['nii', 'nii.gz']) if not all_inputs != []: parser.error( 'There are no files of type *preproc.nii / *preproc.nii.gz ' 'Please make sure to have at least one file of the above type ' 'in the BIDS specification') else: for file_count in range(len(all_inputs)): try: TR = layout.get_metadata( all_inputs[file_count].filename)['RepetitionTime'] except KeyError as e: TR = spm_dep.spm.spm_vol( all_inputs[file_count].filename).header.get_zooms()[-1] para['TR'] = TR para['dt'] = para['TR'] / para['T'] para['lag'] = np.arange( np.fix(para['min_onset_search'] / para['dt']), np.fix(para['max_onset_search'] / para['dt']) + 1, dtype='int') fourD_rsHRF.demo_4d_rsHRF(all_inputs[file_count], args.atlas, args.output_dir, para, args.n_jobs, mode='bids w/ atlas') if args.bids_dir is not None and args.brainmask: # carry analysis with bids_dir and brainmask layout = BIDSLayout(args.bids_dir) if args.participant_label: input_subjects = args.participant_label subjects_to_analyze = layout.get_subjects(subject=input_subjects) else: subjects_to_analyze = layout.get_subjects() if not subjects_to_analyze: parser.error( 'Could not find participants. Please make sure the BIDS data ' 'structure is present and correct. Datasets can be validated online ' 'using the BIDS Validator (http://incf.github.io/bids-validator/).' ) all_inputs = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='preproc', extensions=['nii', 'nii.gz']) all_masks = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='brainmask', extensions=['nii', 'nii.gz']) if not all_inputs != []: parser.error( 'There are no files of type *preproc.nii / *preproc.nii.gz ' 'Please make sure to have at least one file of the above type ' 'in the BIDS specification') if not all_masks != []: parser.error( 'There are no files of type *brainmask.nii / *brainmask.nii.gz ' 'Please make sure to have at least one file of the above type ' 'in the BIDS specification') if len(all_inputs) != len(all_masks): parser.error( 'The number of *preproc.nii / .nii.gz and the number of ' '*brainmask.nii / .nii.gz are different. Please make sure that ' 'there is one mask for each input_file present') all_inputs.sort() all_masks.sort() all_prefix_match = False prefix_match_count = 0 for i in range(len(all_inputs)): input_prefix = all_inputs[i].filename.split('/')[-1].split( '_preproc')[0] mask_prefix = all_masks[i].filename.split('/')[-1].split( '_brainmask')[0] if input_prefix == mask_prefix: prefix_match_count += 1 else: all_prefix_match = False break if prefix_match_count == len(all_inputs): all_prefix_match = True if not all_prefix_match: parser.error( 'The mask and input files should have the same prefix for correspondence. ' 'Please consider renaming your files') else: for file_count in range(len(all_inputs)): try: TR = layout.get_metadata( all_inputs[file_count].filename)['RepetitionTime'] except KeyError as e: TR = spm_dep.spm.spm_vol( all_inputs[file_count].filename).header.get_zooms()[-1] para['TR'] = TR para['dt'] = para['TR'] / para['T'] para['lag'] = np.arange( np.fix(para['min_onset_search'] / para['dt']), np.fix(para['max_onset_search'] / para['dt']) + 1, dtype='int') fourD_rsHRF.demo_4d_rsHRF(all_inputs[file_count], all_masks[file_count], args.output_dir, para, args.n_jobs, mode='bids')
"fmapphase": "NONE", "echodiff": "NONE", "t1samplespacing": "NONE", "t2samplespacing": "NONE", "unwarpdir": "NONE", "avgrdcmethod": "NONE", "SEPhaseNeg": "NONE", "SEPhasePos": "NONE", "echospacing": "NONE", "seunwarpdir": "NONE", "NSUHgdcoeffs": "NONE" } if fieldmap_set: #t1_spacing = layout.get_metadata(t1ws[0])["EffectiveEchoSpacing"] #Dicom(0019,1018) heudiconv t1_spacing = layout.get_metadata( t1ws[0])["global"]["const"]["CsaImage.RealDwellTime"] / ( 1E9) #in nanoseconds #t2_spacing = layout.get_metadata(t2ws[0])["EffectiveEchoSpacing"] t2_spacing = layout.get_metadata( t2ws[0])["global"]["const"]["CsaImage.RealDwellTime"] / ( 1E9) #in nanoseconds #unwarpdir = layout.get_metadata(t1ws[0])["PhaseEncodingDirection"] #unwarpdir = unwarpdir.replace("i-", "-i").replace("j-", "-j").replace("k-", "-k").replace("i","x").replace("j", "y").replace("k", "z") #if len(unwarpdir) == 2: # unwarpdir = unwarpdir[0]+"-" unwarpdir = "z" fmap_args.update({ "t1samplespacing": "%.8f" % t1_spacing, "t2samplespacing": "%.8f" % t2_spacing,
fmap_args = { "fmapmag": "NONE", "fmapphase": "NONE", "echodiff": "NONE", "t1samplespacing": "NONE", "t2samplespacing": "NONE", "unwarpdir": "NONE", "avgrdcmethod": "NONE", "SEPhaseNeg": "NONE", "SEPhasePos": "NONE", "echospacing": "NONE", "seunwarpdir": "NONE" } if fieldmap_set: t1_spacing = layout.get_metadata(t1ws[0])["EffectiveEchoSpacing"] t2_spacing = layout.get_metadata(t2ws[0])["EffectiveEchoSpacing"] unwarpdir = layout.get_metadata(t1ws[0])["PhaseEncodingDirection"] unwarpdir = unwarpdir.replace("i", "x").replace("j", "y").replace("k", "z") if len(unwarpdir) == 2: unwarpdir = unwarpdir[0] + "-" fmap_args.update({ "t1samplespacing": "%.8f" % t1_spacing, "t2samplespacing": "%.8f" % t2_spacing, "unwarpdir": unwarpdir })
def run_rsHRF(): parser = get_parser() args = parser.parse_args() arg_groups = {} for group in parser._action_groups: group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions } arg_groups[group.title] = group_dict para = arg_groups['Parameters'] nargs = len(sys.argv) temporal_mask = [] if (not args.GUI) and (args.output_dir is None): parser.error('--output_dir is required when executing in command-line interface') if (not args.GUI) and (args.estimation is None): parser.error('--estimation rule is required when executing in command-line interface') if (args.GUI): if (nargs == 2): try: from .rsHRF_GUI import run run.run() except ModuleNotFoundError: parser.error('--GUI should not be used inside a Docker container') else: parser.error('--no other arguments should be supplied with --GUI') if (args.input_file is not None or args.ts is not None) and args.analysis_level: parser.error('analysis_level cannot be used with --input_file or --ts, do not supply it') if (args.input_file is not None or args.ts is not None) and args.participant_label: parser.error('participant_labels are not to be used with --input_file or --ts, do not supply it') if args.input_file is not None and args.brainmask: parser.error('--brainmask cannot be used with --input_file, use --atlas instead') if args.ts is not None and (args.brainmask or args.atlas): parser.error('--atlas or --brainmask cannot be used with --ts, do not supply it') if args.bids_dir is not None and not (args.brainmask or args.atlas): parser.error('--atlas or --brainmask needs to be supplied with --bids_dir') if args.bids_dir is not None and not args.analysis_level: parser.error('analysis_level needs to be supplied with bids_dir, choices=[participant]') if args.input_file is not None and (not args.input_file.endswith(('.nii', '.nii.gz', '.gii', '.gii.gz'))): parser.error('--input_file should end with .gii, .gii.gz, .nii or .nii.gz') if args.atlas is not None and (not args.atlas.endswith(('.nii', '.nii.gz','.gii', '.gii.gz'))): parser.error('--atlas should end with .gii, .gii.gz, .nii or .nii.gz') if args.ts is not None and (not args.ts.endswith(('.txt'))): parser.error('--ts file should end with .txt') if args.temporal_mask is not None and (not args.temporal_mask.endswith(('.dat'))): parser.error('--temporal_mask ile should end with ".dat"') if args.temporal_mask is not None: f = open(args.temporal_mask,'r') for line in f: for each in line: if each in ['0','1']: temporal_mask.append(int(each)) if args.estimation == 'sFIR' or args.estimation == 'FIR': para['T'] = 1 if args.ts is not None: file_type = op.splitext(args.ts) if para['TR'] <= 0: parser.error('Please supply a valid TR using -TR argument') else: TR = para['TR'] para['dt'] = para['TR'] / para['T'] para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']), np.fix(para['max_onset_search'] / para['dt']) + 1, dtype='int') fourD_rsHRF.demo_rsHRF(args.ts, None, args.output_dir, para, args.n_jobs, file_type, mode='time-series', temporal_mask=temporal_mask, wiener=args.wiener) if args.input_file is not None: if args.atlas is not None: if (args.input_file.endswith(('.nii', '.nii.gz')) and args.atlas.endswith(('.gii', '.gii.gz'))) or (args.input_file.endswith(('.gii', '.gii.gz')) and args.atlas.endswith(('.nii', '.nii.gz'))): parser.error('--atlas and input_file should be of the same type [NIfTI or GIfTI]') # carry analysis with input_file and atlas file_type = op.splitext(args.input_file) if file_type[-1] == ".gz": file_type = op.splitext(file_type[-2])[-1] + file_type[-1] else: file_type = file_type[-1] if ".nii" in file_type: TR = (spm_dep.spm.spm_vol(args.input_file).header.get_zooms())[-1] else: if para['TR'] == -1: parser.error('Please supply a valid TR using -TR argument') else: TR = para['TR'] if TR <= 0: if para['TR'] <= 0: parser.error('Please supply a valid TR using -TR argument') else: if para['TR'] == -1: para['TR'] = TR elif para['TR'] <= 0: print('Invalid TR supplied, using implicit TR: {0}'.format(TR)) para['TR'] = TR para['dt'] = para['TR'] / para['T'] para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']), np.fix(para['max_onset_search'] / para['dt']) + 1, dtype='int') fourD_rsHRF.demo_rsHRF(args.input_file, args.atlas, args.output_dir, para, args.n_jobs, file_type, mode='input', temporal_mask=temporal_mask, wiener=args.wiener) if args.bids_dir is not None and args.atlas is not None: # carry analysis with bids_dir and 1 atlas layout = BIDSLayout(args.bids_dir) if args.participant_label: input_subjects = args.participant_label subjects_to_analyze = layout.get_subjects(subject=input_subjects) else: subjects_to_analyze = layout.get_subjects() if not subjects_to_analyze: parser.error('Could not find participants. Please make sure the BIDS data ' 'structure is present and correct. Datasets can be validated online ' 'using the BIDS Validator (http://incf.github.io/bids-validator/).') if not args.atlas.endswith(('.nii', '.nii.gz')): parser.error('--atlas should end with .nii or .nii.gz') all_inputs = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='preproc', extensions=['nii', 'nii.gz']) if not all_inputs != []: parser.error('There are no files of type *preproc.nii / *preproc.nii.gz ' 'Please make sure to have at least one file of the above type ' 'in the BIDS specification') else: num_errors = 0 for file_count in range(len(all_inputs)): try: TR = layout.get_metadata(all_inputs[file_count].filename)['RepetitionTime'] except KeyError as e: TR = spm_dep.spm.spm_vol(all_inputs[file_count].filename).header.get_zooms()[-1] para['TR'] = TR para['dt'] = para['TR'] / para['T'] para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']), np.fix(para['max_onset_search'] / para['dt']) + 1, dtype='int') num_errors += 1 try: fourD_rsHRF.demo_rsHRF(all_inputs[file_count], args.atlas, args.output_dir, para, args.n_jobs, file_type, mode='bids w/ atlas', temporal_mask=temporal_mask, wiener=args.wiener) num_errors -=1 except ValueError as err: print(err.args[0]) except: print("Unexpected error:", sys.exc_info()[0]) success = len(all_inputs) - num_errors if success == 0: raise RuntimeError('Dimensions were inconsistent for all input-mask pairs; \n' 'No inputs were processed!') if args.bids_dir is not None and args.brainmask: # carry analysis with bids_dir and brainmask layout = BIDSLayout(args.bids_dir) if args.participant_label: input_subjects = args.participant_label subjects_to_analyze = layout.get_subjects(subject=input_subjects) else: subjects_to_analyze = layout.get_subjects() if not subjects_to_analyze: parser.error('Could not find participants. Please make sure the BIDS data ' 'structure is present and correct. Datasets can be validated online ' 'using the BIDS Validator (http://incf.github.io/bids-validator/).') all_inputs = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='preproc', extensions=['nii', 'nii.gz']) all_masks = layout.get(modality='func', subject=subjects_to_analyze, task='rest', type='brainmask', extensions=['nii', 'nii.gz']) if not all_inputs != []: parser.error('There are no files of type *preproc.nii / *preproc.nii.gz ' 'Please make sure to have at least one file of the above type ' 'in the BIDS specification') if not all_masks != []: parser.error('There are no files of type *brainmask.nii / *brainmask.nii.gz ' 'Please make sure to have at least one file of the above type ' 'in the BIDS specification') if len(all_inputs) != len(all_masks): parser.error('The number of *preproc.nii / .nii.gz and the number of ' '*brainmask.nii / .nii.gz are different. Please make sure that ' 'there is one mask for each input_file present') all_inputs.sort() all_masks.sort() all_prefix_match = False prefix_match_count = 0 for i in range(len(all_inputs)): input_prefix = all_inputs[i].filename.split('/')[-1].split('_preproc')[0] mask_prefix = all_masks[i].filename.split('/')[-1].split('_brainmask')[0] if input_prefix == mask_prefix: prefix_match_count += 1 else: all_prefix_match = False break if prefix_match_count == len(all_inputs): all_prefix_match = True if not all_prefix_match: parser.error('The mask and input files should have the same prefix for correspondence. ' 'Please consider renaming your files') else: num_errors = 0 for file_count in range(len(all_inputs)): file_type = op.splitext(all_inputs[file_count].filename)[1] if file_type == ".nii" or file_type == ".nii.gz": try: TR = layout.get_metadata(all_inputs[file_count].filename)['RepetitionTime'] except KeyError as e: TR = spm_dep.spm.spm_vol(all_inputs[file_count].filename).header.get_zooms()[-1] para['TR'] = TR else: spm_dep.spm.spm_vol(all_inputs[file_count].filename) TR = spm_dep.spm.spm_vol(all_inputs[file_count].filename).get_arrays_from_intent("NIFTI_INTENT_TIME_SERIES")[0].meta.get_metadata()["TimeStep"] para['TR'] = float(TR) * 0.001 para['dt'] = para['TR'] / para['T'] para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']), np.fix(para['max_onset_search'] / para['dt']) + 1, dtype='int') num_errors += 1 try: fourD_rsHRF.demo_rsHRF(all_inputs[file_count], all_masks[file_count], args.output_dir, para, args.n_jobs, mode='bids', temporal_mask=temporal_mask, wiener=args.wiener) num_errors -=1 except ValueError as err: print(err.args[0]) except: print("Unexpected error:", sys.exc_info()[0]) success = len(all_inputs) - num_errors if success == 0: raise RuntimeError('Dimensions were inconsistent for all input-mask pairs; \n' 'No inputs were processed!')
"t1samplespacing": "NONE", "t2samplespacing": "NONE", "unwarpdir": "NONE", "avgrdcmethod": "NONE", "SEPhaseNeg": "NONE", "SEPhasePos": "NONE", "echospacing": "NONE", "seunwarpdir": "NONE"} if fieldmap_set: if len(fieldmap_set)>1: fieldmap_trans=dict(zip(fieldmap_set[0],zip(*[d.values() for d in fieldmap_set]))) else: fieldmap_trans = {k:[v] for k,v in fieldmap_set[0].iteritems()} t1_spacing = layout.get_metadata(t1ws[0])["EffectiveEchoSpacing"] t2_spacing = layout.get_metadata(t2ws[0])["EffectiveEchoSpacing"] unwarpdir = layout.get_metadata(t1ws[0])["PhaseEncodingDirection"] unwarpdir = unwarpdir.replace("i","x").replace("j", "y").replace("k", "z") if len(unwarpdir) == 2: unwarpdir = unwarpdir[0]+"-" fmap_args.update({"t1samplespacing": "%.8f"%t1_spacing, "t2samplespacing": "%.8f"%t2_spacing, "unwarpdir": unwarpdir}) if set(fieldmap_trans["type"]) == set(["phasediff"]): merged_file = "%s/tmp/%s/magfile.nii.gz"%(args.output_dir, subject_label) run("mkdir -p %s/tmp/%s/ && fslmerge -t %s %s %s"%(args.output_dir, subject_label,
def bidsmri2project(directory): #Parse dataset_description.json file in BIDS directory if (os.path.isdir(os.path.join(directory))): with open(os.path.join(directory, 'dataset_description.json')) as data_file: dataset = json.load(data_file) else: print("Error: BIDS directory %s does not exist!" % os.path.join(directory)) exit("-1") #print(dataset_data) #create project / nidm-exp doc project = Project() #add various attributes if they exist in BIDS dataset for key in dataset: #print(key) #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object if key in BIDS_Constants.dataset_description: if type(dataset[key]) is list: project.add_attributes({ BIDS_Constants.dataset_description[key]: "".join(dataset[key]) }) else: project.add_attributes( {BIDS_Constants.dataset_description[key]: dataset[key]}) #add absolute location of BIDS directory on disk for later finding of files which are stored relatively in NIDM document project.add_attributes({Constants.PROV['Location']: directory}) #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics session = {} participant = {} #Parse participants.tsv file in BIDS directory and create study and acquisition objects with open(os.path.join(directory, 'participants.tsv')) as csvfile: participants_data = csv.DictReader(csvfile, delimiter='\t') #print(participants_data.fieldnames) for row in participants_data: #create session object for subject to be used for participant metadata and image data #parse subject id from "sub-XXXX" string subjid = row['participant_id'].split("-") session[subjid[1]] = Session(project) #add acquisition object acq = AssessmentAcquisition(session=session[subjid[1]]) acq_entity = AssessmentObject(acquisition=acq) participant[subjid[1]] = {} participant[subjid[1]]['person'] = acq.add_person( attributes=({ Constants.NIDM_SUBJECTID: row['participant_id'] })) #add qualified association of participant with acquisition activity acq.add_qualified_association( person=participant[subjid[1]]['person'], role=Constants.NIDM_PARTICIPANT) for key, value in row.items(): #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those if key in BIDS_Constants.participants: #if this was the participant_id, we already handled it above creating agent / qualified association if not (BIDS_Constants.participants[key] == Constants.NIDM_SUBJECTID): acq_entity.add_attributes( {BIDS_Constants.participants[key]: value}) #else just put variables in bids namespace since we don't know what they mean else: #acq_entity.add_attributes({Constants.BIDS[quote(key)]:value}) acq_entity.add_attributes( {Constants.BIDS[key.replace(" ", "_")]: value}) #get BIDS layout bids_layout = BIDSLayout(directory) #create acquisition objects for each scan for each subject #loop through all subjects in dataset for subject_id in bids_layout.get_subjects(): print("Converting subject: %s" % subject_id) #skip .git directories...added to support datalad datasets if subject_id.startswith("."): continue #check if there's a session number. If so, store it in the session activity session_dirs = bids_layout.get(target='session', subject=subject_id, return_type='dir') #if session_dirs has entries then get any metadata about session and store in session activity #bids_layout.get(subject=subject_id,type='session',extensions='.tsv') #bids_layout.get(subject=subject_id,type='scans',extensions='.tsv') #bids_layout.get(extensions='.tsv',return_type='obj') for file_tpl in bids_layout.get(subject=subject_id, extensions=['.nii', '.nii.gz']): #create an acquisition activity acq = MRAcquisition(session[subject_id]) #add qualified association with person acq.add_qualified_association( person=participant[subject_id]['person'], role=Constants.NIDM_PARTICIPANT) #print(file_tpl.type) if file_tpl.modality == 'anat': #do something with anatomicals acq_obj = MRObject(acq) #add image contrast type if file_tpl.type in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_CONTRAST_TYPE: BIDS_Constants.scans[file_tpl.type] }) else: print( "WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.type) #add image usage type if file_tpl.modality in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_USAGE_TYPE: BIDS_Constants.scans[file_tpl.modality] }) else: print( "WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.modality) #add file link #make relative link to acq_obj.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(file_tpl.filename, directory) }) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: json_data[key] }) elif file_tpl.modality == 'func': #do something with functionals acq_obj = MRObject(acq) #add image contrast type if file_tpl.type in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_CONTRAST_TYPE: BIDS_Constants.scans[file_tpl.type] }) else: print( "WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.type) #add image usage type if file_tpl.modality in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_USAGE_TYPE: BIDS_Constants.scans[file_tpl.modality] }) else: print( "WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.modality) #add file link acq_obj.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(file_tpl.filename, directory) }) if 'run' in file_tpl._fields: acq_obj.add_attributes( {BIDS_Constants.json_keys["run"]: file_tpl.run}) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: json_data[key] }) #get associated events TSV file if 'run' in file_tpl._fields: events_file = bids_layout.get(subject=subject_id, extensions=['.tsv'], modality=file_tpl.modality, task=file_tpl.task, run=file_tpl.run) else: events_file = bids_layout.get(subject=subject_id, extensions=['.tsv'], modality=file_tpl.modality, task=file_tpl.task) #if there is an events file then this is task-based so create an acquisition object for the task file and link if events_file: #for now create acquisition object and link it to the associated scan events_obj = AcquisitionObject(acq) #add prov type, task name as prov:label, and link to filename of events file events_obj.add_attributes({ PROV_TYPE: Constants.NIDM_MRI_BOLD_EVENTS, BIDS_Constants.json_keys["TaskName"]: json_data["TaskName"], Constants.NIDM_FILENAME: getRelPathToBIDS(events_file[0].filename, directory) }) #link it to appropriate MR acquisition entity events_obj.wasAttributedTo(acq_obj) elif file_tpl.modality == 'dwi': #do stuff with with dwi scans... acq_obj = MRObject(acq) #add image contrast type if file_tpl.type in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_CONTRAST_TYPE: BIDS_Constants.scans[file_tpl.type] }) else: print( "WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.type) #add image usage type if file_tpl.modality in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_USAGE_TYPE: BIDS_Constants.scans["dti"] }) else: print( "WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.modality) #add file link acq_obj.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(file_tpl.filename, directory) }) if 'run' in file_tpl._fields: acq_obj.add_attributes( {BIDS_Constants.json_keys["run"]: file_tpl.run}) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: json_data[key] }) #for bval and bvec files, what to do with those? #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan? acq_obj_bval = AcquisitionObject(acq) acq_obj_bval.add_attributes( {PROV_TYPE: BIDS_Constants.scans["bval"]}) #add file link to bval files acq_obj_bval.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(bids_layout.get_bval(file_tpl.filename), directory) }) acq_obj_bvec = AcquisitionObject(acq) acq_obj_bvec.add_attributes( {PROV_TYPE: BIDS_Constants.scans["bvec"]}) #add file link to bvec files acq_obj_bvec.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(bids_layout.get_bvec(file_tpl.filename), directory) }) #link bval and bvec acquisition object entities together or is their association with DWI scan... #Added temporarily to support phenotype files #for each *.tsv / *.json file pair in the phenotypes directory for tsv_file in glob.glob(os.path.join(directory, "phenotype", "*.tsv")): #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to #the associated JSON data dictionary file with open(tsv_file) as phenofile: pheno_data = csv.DictReader(phenofile, delimiter='\t') for row in pheno_data: subjid = row['participant_id'].split("-") if not subjid[1] == subject_id: continue else: #add acquisition object acq = AssessmentAcquisition(session=session[subjid[1]]) #add qualified association with person acq.add_qualified_association( person=participant[subject_id]['person'], role=Constants.NIDM_PARTICIPANT) acq_entity = AssessmentObject(acquisition=acq) for key, value in row.items(): #we're using participant_id in NIDM in agent so don't add to assessment as a triple. #BIDS phenotype files seem to have an index column with no column header variable name so skip those if ((not key == "participant_id") and (key != "")): #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs.. acq_entity.add_attributes( {Constants.BIDS[key]: value}) #link TSV file acq_entity.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(tsv_file, directory) }) #link associated JSON file if it exists data_dict = os.path.join( directory, "phenotype", os.path.splitext(os.path.basename(tsv_file))[0] + ".json") if os.path.isfile(data_dict): acq_entity.add_attributes({ Constants.BIDS["data_dictionary"]: getRelPathToBIDS(data_dict, directory) }) return project
def get_metadata(epi_file, bids_dir): """ This function requires the "IntendedFor" field in the json sidecar of the field map to be defined. (See section 8.3.5 of the BIDS spec) """ import re import os import subprocess from bids.grabbids import BIDSLayout # save to node folder (go up 2 directories bc of iterfield) cwd = os.path.dirname(os.path.dirname(os.getcwd())) # get bids layout layout = BIDSLayout(bids_dir) # get fieldmaps for epi fieldmap = layout.get_fieldmap(epi_file) # check if None; this means the IntendedFor tag is not defined; Try to guess the field map from the data # TODO THIS POTENTIALLY UNSTABLE CODE. SOMEONE SHOULD COME UP WITH A BETTER WAY TO DO THIS! # I assume there is only 1 fieldmap per session if not fieldmap: print( '\n****************************************************************************' ) print('File: {}'.format(epi_file)) print( 'IntendedFor field undefined! I\'ll try to guess the fieldmap file...\n' ) sub = os.path.split(epi_file)[1].split("_")[0].split("sub-")[ 1] # set subject type_ = '(phase1|phasediff|epi|fieldmap)' # get all fieldmap types files = layout.get(subject=sub, type=type_, extensions=['nii.gz', 'nii']) # get the potential fmaps # check files length, if 1 then there is probable only 1 session and only one fieldmap if len(files) == 1: fieldmap = { 'phasediff': files[0].filename, 'type': files[0].type, 'magnitude1': files[0].filename.replace('phasediff', 'magnitude1'), 'magnitude2': files[0].filename.replace('phasediff', 'magnitude2') } else: # assume more than one session ses = os.path.split(epi_file)[1].split("_")[1].split("ses-")[1] for file in files: if file.session == ses: # match the session number fieldmap = { 'phasediff': file.filename, 'type': file.type, 'magnitude1': file.filename.replace('phasediff', 'magnitude1'), 'magnitude2': file.filename.replace('phasediff', 'magnitude2') } # check if we were able to get the fieldmap assert bool( fieldmap ), 'We couldn\'t find a fieldmap. Specify the IntendedFor Field or disable field map correction.' print('I think {} is the fieldmap. You should verify this is correct.'. format(fieldmap['phasediff'])) print( '****************************************************************************\n' ) # we only know how to use phasediff map, anything else is not supported... assert fieldmap[ 'type'] == 'phasediff', 'Non-phasediff map unsupported for field map correction.' # get the phase diff image phasediff = fieldmap['phasediff'] # get the list of magnitude images magnitude = [ fieldmap[key] for key in fieldmap if re.match('magnitude', key) ] # choose 1st magnitude image TODO: add setting that lets user choose magnitude image magnitude = magnitude[0] # get effective echo time of phasediff echotime1 = layout.get_metadata(phasediff)['EchoTime1'] echotime2 = layout.get_metadata(phasediff)['EchoTime2'] TE = abs(echotime2 - echotime1) * 1000 # get the echospacing for the epi image echospacing = layout.get_metadata(epi_file)['EffectiveEchoSpacing'] # get the phase encoding direction ped = layout.get_metadata(epi_file)['PhaseEncodingDirection'] # determine image orientation output = subprocess.run(['3dinfo', '-orient', phasediff], stdout=subprocess.PIPE) orientation = output.stdout.decode('utf-8').rstrip() if ped[0] == 'i': # choose orientation based on ped if orientation[0] == 'R': orient_code = 'RL' elif orientation[0] == 'L': orient_code = 'LR' else: raise ValueError('Invalid Orientation!') elif ped[0] == 'j': if orientation[1] == 'A': orient_code = 'AP' elif orientation[1] == 'P': orient_code = 'PA' else: raise ValueError('Invalid Orientation!') elif ped[0] == 'k': if orientation[2] == 'I': orient_code = 'IS' elif orientation[2] == 'S': orient_code = 'SI' else: raise ValueError('Invalid Orientation!') else: raise ValueError('Invalid Phhase Encoding Direction Parsed!') # reverse the orientation if ped was negative if ped[1] == '-': orient_code = orient_code[::-1] # Using the orient code to find the equivalent FSL ped ped = { 'RL': 'x', 'LR': 'x-', 'AP': 'y', 'PA': 'y-', 'SI': 'z', 'IS': 'z-' }[orient_code] # return the magnitude and phase image paths return (magnitude, phasediff, TE, echospacing, ped)
def bidsmri2project(directory, args): #Parse dataset_description.json file in BIDS directory if (os.path.isdir(os.path.join(directory))): try: with open(os.path.join(directory, 'dataset_description.json')) as data_file: dataset = json.load(data_file) except OSError: logging.critical( "Cannot find dataset_description.json file which is required in the BIDS spec" ) exit("-1") else: logging.critical("Error: BIDS directory %s does not exist!" % os.path.join(directory)) exit("-1") #create project / nidm-exp doc project = Project() #add various attributes if they exist in BIDS dataset for key in dataset: #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object if key in BIDS_Constants.dataset_description: if type(dataset[key]) is list: project.add_attributes({ BIDS_Constants.dataset_description[key]: "".join(dataset[key]) }) else: project.add_attributes( {BIDS_Constants.dataset_description[key]: dataset[key]}) #add absolute location of BIDS directory on disk for later finding of files which are stored relatively in NIDM document project.add_attributes({Constants.PROV['Location']: directory}) #get BIDS layout bids_layout = BIDSLayout(directory) #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics session = {} participant = {} #Parse participants.tsv file in BIDS directory and create study and acquisition objects if os.path.isfile(os.path.join(directory, 'participants.tsv')): with open(os.path.join(directory, 'participants.tsv')) as csvfile: participants_data = csv.DictReader(csvfile, delimiter='\t') #logic to map variables to terms.######################################################################################################### #first iterate over variables in dataframe and check which ones are already mapped as BIDS constants and which are not. For those that are not #we want to use the variable-term mapping functions to help the user do the mapping #iterate over columns mapping_list = [] column_to_terms = {} for field in participants_data.fieldnames: #column is not in BIDS_Constants if not (field in BIDS_Constants.participants): #add column to list for column_to_terms mapping mapping_list.append(field) #do variable-term mappings if ((args.json_map != False) or (args.key != None)): #if user didn't supply a json mapping file but we're doing some variable-term mapping create an empty one for column_to_terms to use if args.json_map == None: #defaults to participants.json because here we're mapping the participants.tsv file variables to terms args.json_map = os.path.isfile( os.path.join(directory, 'participants.json')) #maps variables in CSV file to terms temp = DataFrame(columns=mapping_list) column_to_terms.update( map_variables_to_terms(directory=directory, df=temp, apikey=args.key, output_file=os.path.join( directory, 'participants.json'), json_file=args.json_map, owl_file=args.owl)) for row in participants_data: #create session object for subject to be used for participant metadata and image data #parse subject id from "sub-XXXX" string temp = row['participant_id'].split("-") #for ambiguity in BIDS datasets. Sometimes participant_id is sub-XXXX and othertimes it's just XXXX if len(temp) > 1: subjid = temp[1] else: subjid = temp[0] logging.info(subjid) session[subjid] = Session(project) #add acquisition object acq = AssessmentAcquisition(session=session[subjid]) acq_entity = AssessmentObject(acquisition=acq) participant[subjid] = {} participant[subjid]['person'] = acq.add_person( attributes=({ Constants.NIDM_SUBJECTID: row['participant_id'] })) #add qualified association of participant with acquisition activity acq.add_qualified_association( person=participant[subjid]['person'], role=Constants.NIDM_PARTICIPANT) for key, value in row.items(): #for variables in participants.tsv file who have term mappings in BIDS_Constants.py use those, add to json_map so we don't have to map these if user #supplied arguments to map variables if key in BIDS_Constants.participants: #if this was the participant_id, we already handled it above creating agent / qualified association if not (BIDS_Constants.participants[key] == Constants.NIDM_SUBJECTID): acq_entity.add_attributes( {BIDS_Constants.participants[key]: value}) #else if user added -mapvars flag to command line then we'll use the variable-> term mapping procedures to help user map variables to terms (also used # in CSV2NIDM.py) else: if key in column_to_terms: acq_entity.add_attributes( { QualifiedName( provNamespace( Core.safe_string(None, string=str(key)), column_to_terms[key]["url"]), ""): value }) else: acq_entity.add_attributes( {Constants.BIDS[key.replace(" ", "_")]: value}) #create acquisition objects for each scan for each subject #loop through all subjects in dataset for subject_id in bids_layout.get_subjects(): logging.info("Converting subject: %s" % subject_id) #skip .git directories...added to support datalad datasets if subject_id.startswith("."): continue #check if there's a session number. If so, store it in the session activity session_dirs = bids_layout.get(target='session', subject=subject_id, return_type='dir') #if session_dirs has entries then get any metadata about session and store in session activity #bids_layout.get(subject=subject_id,type='session',extensions='.tsv') #bids_layout.get(subject=subject_id,type='scans',extensions='.tsv') #bids_layout.get(extensions='.tsv',return_type='obj') #check whether sessions have been created (i.e. was there a participants.tsv file? If not, create here if not (subject_id in session): session[subject_id] = Session(project) for file_tpl in bids_layout.get(subject=subject_id, extensions=['.nii', '.nii.gz']): #create an acquisition activity acq = MRAcquisition(session[subject_id]) #check whether participant (i.e. agent) for this subject already exists (i.e. if participants.tsv file exists) else create one if not (subject_id in participant): participant[subject_id] = {} participant[subject_id]['person'] = acq.add_person( attributes=({ Constants.NIDM_SUBJECTID: subject_id })) #add qualified association with person acq.add_qualified_association( person=participant[subject_id]['person'], role=Constants.NIDM_PARTICIPANT) if file_tpl.modality == 'anat': #do something with anatomicals acq_obj = MRObject(acq) #add image contrast type if file_tpl.type in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_CONTRAST_TYPE: BIDS_Constants.scans[file_tpl.type] }) else: logging.info( "WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.type) #add image usage type if file_tpl.modality in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_USAGE_TYPE: BIDS_Constants.scans[file_tpl.modality] }) else: logging.info( "WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.modality) #add file link #make relative link to acq_obj.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(file_tpl.filename, directory) }) #add sha512 sum if isfile(join(directory, file_tpl.filename)): acq_obj.add_attributes({ Constants.CRYPTO_SHA512: getsha512(join(directory, file_tpl.filename)) }) else: logging.info( "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..." % join(directory, file_tpl.filename)) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: json_data[key] }) elif file_tpl.modality == 'func': #do something with functionals acq_obj = MRObject(acq) #add image contrast type if file_tpl.type in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_CONTRAST_TYPE: BIDS_Constants.scans[file_tpl.type] }) else: logging.info( "WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.type) #add image usage type if file_tpl.modality in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_USAGE_TYPE: BIDS_Constants.scans[file_tpl.modality] }) else: logging.info( "WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.modality) #add file link acq_obj.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(file_tpl.filename, directory) }) #add sha512 sum if isfile(join(directory, file_tpl.filename)): acq_obj.add_attributes({ Constants.CRYPTO_SHA512: getsha512(join(directory, file_tpl.filename)) }) else: logging.info( "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..." % join(directory, file_tpl.filename)) if 'run' in file_tpl._fields: acq_obj.add_attributes( {BIDS_Constants.json_keys["run"]: file_tpl.run}) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: json_data[key] }) #get associated events TSV file if 'run' in file_tpl._fields: events_file = bids_layout.get(subject=subject_id, extensions=['.tsv'], modality=file_tpl.modality, task=file_tpl.task, run=file_tpl.run) else: events_file = bids_layout.get(subject=subject_id, extensions=['.tsv'], modality=file_tpl.modality, task=file_tpl.task) #if there is an events file then this is task-based so create an acquisition object for the task file and link if events_file: #for now create acquisition object and link it to the associated scan events_obj = AcquisitionObject(acq) #add prov type, task name as prov:label, and link to filename of events file events_obj.add_attributes({ PROV_TYPE: Constants.NIDM_MRI_BOLD_EVENTS, BIDS_Constants.json_keys["TaskName"]: json_data["TaskName"], Constants.NIDM_FILENAME: getRelPathToBIDS(events_file[0].filename, directory) }) #link it to appropriate MR acquisition entity events_obj.wasAttributedTo(acq_obj) elif file_tpl.modality == 'dwi': #do stuff with with dwi scans... acq_obj = MRObject(acq) #add image contrast type if file_tpl.type in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_CONTRAST_TYPE: BIDS_Constants.scans[file_tpl.type] }) else: logging.info( "WARNING: No matching image contrast type found in BIDS_Constants.py for %s" % file_tpl.type) #add image usage type if file_tpl.modality in BIDS_Constants.scans: acq_obj.add_attributes({ Constants.NIDM_IMAGE_USAGE_TYPE: BIDS_Constants.scans["dti"] }) else: logging.info( "WARNING: No matching image usage type found in BIDS_Constants.py for %s" % file_tpl.modality) #add file link acq_obj.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(file_tpl.filename, directory) }) #add sha512 sum if isfile(join(directory, file_tpl.filename)): acq_obj.add_attributes({ Constants.CRYPTO_SHA512: getsha512(join(directory, file_tpl.filename)) }) else: logging.info( "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..." % join(directory, file_tpl.filename)) if 'run' in file_tpl._fields: acq_obj.add_attributes( {BIDS_Constants.json_keys["run"]: file_tpl.run}) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key.replace( " ", "_")]: json_data[key] }) #for bval and bvec files, what to do with those? #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan? acq_obj_bval = AcquisitionObject(acq) acq_obj_bval.add_attributes( {PROV_TYPE: BIDS_Constants.scans["bval"]}) #add file link to bval files acq_obj_bval.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(bids_layout.get_bval(file_tpl.filename), directory) }) #add sha512 sum if isfile(join(directory, file_tpl.filename)): acq_obj_bval.add_attributes({ Constants.CRYPTO_SHA512: getsha512(join(directory, file_tpl.filename)) }) else: logging.info( "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..." % join(directory, file_tpl.filename)) acq_obj_bvec = AcquisitionObject(acq) acq_obj_bvec.add_attributes( {PROV_TYPE: BIDS_Constants.scans["bvec"]}) #add file link to bvec files acq_obj_bvec.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(bids_layout.get_bvec(file_tpl.filename), directory) }) if isfile(join(directory, file_tpl.filename)): #add sha512 sum acq_obj_bvec.add_attributes({ Constants.CRYPTO_SHA512: getsha512(join(directory, file_tpl.filename)) }) else: logging.info( "WARNINGL file %s doesn't exist! No SHA512 sum stored in NIDM files..." % join(directory, file_tpl.filename)) #link bval and bvec acquisition object entities together or is their association with DWI scan... #Added temporarily to support phenotype files #for each *.tsv / *.json file pair in the phenotypes directory for tsv_file in glob.glob(os.path.join(directory, "phenotype", "*.tsv")): #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to #the associated JSON data dictionary file with open(tsv_file) as phenofile: pheno_data = csv.DictReader(phenofile, delimiter='\t') for row in pheno_data: subjid = row['participant_id'].split("-") if not subjid[1] == subject_id: continue else: #add acquisition object acq = AssessmentAcquisition(session=session[subjid[1]]) #add qualified association with person acq.add_qualified_association( person=participant[subject_id]['person'], role=Constants.NIDM_PARTICIPANT) acq_entity = AssessmentObject(acquisition=acq) for key, value in row.items(): #we're using participant_id in NIDM in agent so don't add to assessment as a triple. #BIDS phenotype files seem to have an index column with no column header variable name so skip those if ((not key == "participant_id") and (key != "")): #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs.. acq_entity.add_attributes( {Constants.BIDS[key]: value}) #link TSV file acq_entity.add_attributes({ Constants.NIDM_FILENAME: getRelPathToBIDS(tsv_file, directory) }) #link associated JSON file if it exists data_dict = os.path.join( directory, "phenotype", os.path.splitext(os.path.basename(tsv_file))[0] + ".json") if os.path.isfile(data_dict): acq_entity.add_attributes({ Constants.BIDS["data_dictionary"]: getRelPathToBIDS(data_dict, directory) }) return project
def test_get_metadata2(): data_dir = join(dirname(__file__), 'data', '7t_trt') layout = BIDSLayout(data_dir) target = 'sub-03/ses-2/fmap/sub-03_ses-1_run-1_phasediff.nii.gz' result = layout.get_metadata(join(data_dir, target)) assert result['EchoTime1'] == 0.006
extensions='.json') assert len(m1) <= 1 if len(m1) == 0: print('no fieldmap - skipping') else: m2 = layout.get(subject=sub, session=sess, type='magnitude2', extensions='.json') assert len(m2) == 1 pd = layout.get(subject=sub, session=sess, type='phasediff', extensions='.json') assert len(pd) == 1 pd_metadata = layout.get_metadata(pd[0].filename) if 'EchoTime' in pd_metadata: pd_metadata['EchoTime1'] = layout.get_metadata( m1[0].filename)['EchoTime'] pd_metadata['EchoTime2'] = layout.get_metadata( m2[0].filename)['EchoTime'] del pd_metadata['EchoTime'] json.dump(pd_metadata, open(pd[0].filename, 'w'), sort_keys=True, indent=4) boldruns = layout.get(subject=sub, session=sess, type='bold', extensions='.json')
] if len(func_file_path) == 0: func_file_path = [ f.filename for f in layout.get(subject=subject_id, type='bold', session=session[1], run=run, extensions=['nii', 'nii.gz']) ] if len(func_file_path) == 0: if subject_id not in bugs_abide2: print('No Func file: %s' % subject_id) continue # print(func_file_path) metadata = layout.get_metadata(path=func_file_path[0]) volumes = metadata['NumberofMeasurements'] try: volumes = int(volumes) except ValueError: # Mixed Volumes site brain_img = nib.load(func_file_path[0]) volumes = brain_img.shape[-1] if volumes >= vols: subid_vol_dict[subject_id] = volumes subject_list.append(subject_id) print('Subject: %s Volumes: %s' % (subject_id, volumes)) import pdb
def main(argv): parser = ArgumentParser( description= 'This program will convert a BIDS MRI dataset to a NIDM-Experiment \ RDF document. It will parse phenotype information and simply store variables/values \ and link to the associated json data dictionary file.') parser.add_argument('-d', dest='directory', required=True, help="Path to BIDS dataset directory") parser.add_argument('-o', dest='outputfile', default="nidm.ttl", help="NIDM output turtle file") args = parser.parse_args() directory = args.directory outputfile = args.outputfile #importlib.reload(sys) #sys.setdefaultencoding('utf8') #Parse dataset_description.json file in BIDS directory with open(os.path.join(directory, 'dataset_description.json')) as data_file: dataset = json.load(data_file) #print(dataset_data) #create project / nidm-exp doc project = Project() #add various attributes if they exist in BIDS dataset for key in dataset: #print(key) #if key from dataset_description file is mapped to term in BIDS_Constants.py then add to NIDM object if key in BIDS_Constants.dataset_description: if type(dataset[key]) is list: project.add_attributes({ BIDS_Constants.dataset_description[key]: "".join(dataset[key]) }) else: project.add_attributes( {BIDS_Constants.dataset_description[key]: dataset[key]}) #create empty dictinary for sessions where key is subject id and used later to link scans to same session as demographics session = {} #Parse participants.tsv file in BIDS directory and create study and acquisition objects with open(os.path.join(directory, 'participants.tsv')) as csvfile: participants_data = csv.DictReader(csvfile, delimiter='\t') #print(participants_data.fieldnames) for row in participants_data: #create session object for subject to be used for participant metadata and image data #parse subject id from "sub-XXXX" string subjid = row['participant_id'].split("-") session[subjid[1]] = Session(project) #add acquisition object acq = Acquisition(session=session[subjid[1]]) acq_entity = DemographicsAcquisitionObject(acquisition=acq) participant = acq.add_person(role=Constants.NIDM_PARTICIPANT, attributes=({ Constants.NIDM_SUBJECTID: row['participant_id'] })) for key, value in row.items(): #for now only convert variables in participants.tsv file who have term mappings in BIDS_Constants.py if key in BIDS_Constants.participants: acq_entity.add_attributes( {BIDS_Constants.participants[key]: value}) #get BIDS layout bids_layout = BIDSLayout(directory) #create acquisition objects for each scan for each subject #loop through all subjects in dataset for subject_id in bids_layout.get_subjects(): #skip .git directories...added to support datalad datasets if subject_id.startswith("."): continue for file_tpl in bids_layout.get(subject=subject_id, extensions=['.nii', '.nii.gz']): #create an acquisition activity acq = Acquisition(session[subject_id]) #print(file_tpl.type) if file_tpl.modality == 'anat': #do something with anatomicals acq_obj = MRAcquisitionObject(acq) acq_obj.add_attributes( {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]}) #add file link #make relative link to acq_obj.add_attributes( {Constants.NIDM_FILENAME: file_tpl.filename}) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key]: json_data[key] }) elif file_tpl.modality == 'func': #do something with functionals acq_obj = MRAcquisitionObject(acq) acq_obj.add_attributes( {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]}) #add file link acq_obj.add_attributes( {Constants.NIDM_FILENAME: file_tpl.filename}) if 'run' in file_tpl._fields: acq_obj.add_attributes( {BIDS_Constants.json_keys["run"]: file_tpl.run}) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key]: json_data[key] }) #get associated events TSV file if 'run' in file_tpl._fields: events_file = bids_layout.get(subject=subject_id, extensions=['.tsv'], modality=file_tpl.modality, task=file_tpl.task, run=file_tpl.run) else: events_file = bids_layout.get(subject=subject_id, extensions=['.tsv'], modality=file_tpl.modality, task=file_tpl.task) #if there is an events file then this is task-based so create an acquisition object for the task file and link if events_file: #for now create acquisition object and link it to the associated scan events_obj = AcquisitionObject(acq) #add prov type, task name as prov:label, and link to filename of events file events_obj.add_attributes({ PROV_TYPE: Constants.NIDM_MRI_BOLD_EVENTS, BIDS_Constants.json_keys["TaskName"]: json_data["TaskName"], Constants.NFO["filename"]: events_file[0].filename }) #link it to appropriate MR acquisition entity events_obj.wasAttributedTo(acq_obj) elif file_tpl.modality == 'dwi': #do stuff with with dwi scans... acq_obj = MRAcquisitionObject(acq) acq_obj.add_attributes( {PROV_TYPE: BIDS_Constants.scans[file_tpl.modality]}) #add file link acq_obj.add_attributes( {Constants.NIDM_FILENAME: file_tpl.filename}) if 'run' in file_tpl._fields: acq_obj.add_attributes( {BIDS_Constants.json_keys["run"]: file_tpl.run}) #get associated JSON file if exists json_data = bids_layout.get_metadata(file_tpl.filename) if json_data: for key in json_data: if key in BIDS_Constants.json_keys: if type(json_data[key]) is list: acq_obj.add_attributes({ BIDS_Constants.json_keys[key]: ''.join(str(e) for e in json_data[key]) }) else: acq_obj.add_attributes({ BIDS_Constants.json_keys[key]: json_data[key] }) #for bval and bvec files, what to do with those? #for now, create new generic acquisition objects, link the files, and associate with the one for the DWI scan? acq_obj_bval = AcquisitionObject(acq) acq_obj_bval.add_attributes( {PROV_TYPE: BIDS_Constants.scans["bval"]}) #add file link to bval files acq_obj_bval.add_attributes({ Constants.NIDM_FILENAME: bids_layout.get_bval(file_tpl.filename) }) acq_obj_bvec = AcquisitionObject(acq) acq_obj_bvec.add_attributes( {PROV_TYPE: BIDS_Constants.scans["bvec"]}) #add file link to bvec files acq_obj_bvec.add_attributes({ Constants.NIDM_FILENAME: bids_layout.get_bvec(file_tpl.filename) }) #link bval and bvec acquisition object entities together or is their association with enclosing activity enough? #Added temporarily to support phenotype files #for each *.tsv / *.json file pair in the phenotypes directory for tsv_file in glob.glob(os.path.join(directory, "phenotype", "*.tsv")): #for now, open the TSV file, extract the row for this subject, store it in an acquisition object and link to #the associated JSON data dictionary file with open(tsv_file) as phenofile: pheno_data = csv.DictReader(phenofile, delimiter='\t') for row in pheno_data: subjid = row['participant_id'].split("-") if not subjid[1] == subject_id: continue else: #add acquisition object acq = Acquisition(session=session[subjid[1]]) acq_entity = AssessmentAcquisitionObject( acquisition=acq) participant = acq.add_person( role=Constants.NIDM_PARTICIPANT, attributes=({ Constants.NIDM_SUBJECTID: row['participant_id'] })) for key, value in row.items(): if not key == "participant_id": #for now we're using a placeholder namespace for BIDS and simply the variable names as the concept IDs.. acq_entity.add_attributes( {Constants.BIDS[key]: value}) #link TSV file acq_entity.add_attributes( {Constants.NIDM_FILENAME: tsv_file}) #link associated JSON file if it exists data_dict = os.path.join( directory, "phenotype", os.path.splitext(os.path.basename(tsv_file))[0] + ".json") if os.path.isfile(data_dict): acq_entity.add_attributes( {Constants.BIDS["data_dictionary"]: data_dict}) #serialize graph #print(project.graph.get_provn()) with open(outputfile, 'w') as f: f.write(project.serializeTurtle()) #f.write(project.graph.get_provn()) #save a DOT graph as PNG project.save_DotGraph(str(outputfile + ".png"), format="png")
def wfmaker(project_dir, raw_dir, subject_id, task_name='', apply_trim=False, apply_dist_corr=False, apply_smooth=False, apply_filter=False, mni_template='2mm', apply_n4=True, ants_threads=8, readable_crash_files=False): """ This function returns a "standard" workflow based on requested settings. Assumes data is in the following directory structure in BIDS format: *Work flow steps*: 1) EPI Distortion Correction (FSL; optional) 2) Trimming (nipy) 3) Realignment/Motion Correction (FSL) 4) Artifact Detection (rapidART/python) 5) Brain Extraction + N4 Bias Correction (ANTs) 6) Coregistration (rigid) (ANTs) 7) Normalization to MNI (non-linear) (ANTs) 8) Low-pass filtering (nilearn; optional) 8) Smoothing (FSL; optional) 9) Downsampling to INT16 precision to save space (nibabel) Args: project_dir (str): full path to the root of project folder, e.g. /my/data/myproject. All preprocessed data will be placed under this foler and the raw_dir folder will be searched for under this folder raw_dir (str): folder name for raw data, e.g. 'raw' which would be automatically converted to /my/data/myproject/raw subject_id (str/int): subject ID to process. Can be either a subject ID string e.g. 'sid-0001' or an integer to index the entire list of subjects in raw_dir, e.g. 0, which would process the first subject apply_trim (int/bool; optional): number of volumes to trim from the beginning of each functional run; default is None task_name (str; optional): which functional task runs to process; default is all runs apply_dist_corr (bool; optional): look for fmap files and perform distortion correction; default False smooth (int/list; optional): smoothing to perform in FWHM mm; if a list is provided will create outputs for each smoothing kernel separately; default False apply_filter (float/list; optional): low-pass/high-freq filtering cut-offs in Hz; if a list is provided will create outputs for each filter cut-off separately. With high temporal resolution scans .25Hz is a decent value to capture respitory artifacts; default None/False mni_template (str; optional): which mm resolution template to use, e.g. '3mm'; default '2mm' apply_n4 (bool; optional): perform N4 Bias Field correction on the anatomical image; default true ants_threads (int; optional): number of threads ANTs should use for its processes; default 8 readable_crash_files (bool; optional): should nipype crash files be saved as txt? This makes them easily readable, but sometimes interferes with nipype's ability to use cached results of successfully run nodes (i.e. picking up where it left off after bugs are fixed); default False Examples: >>> from cosanlab_preproc.wfmaker import wfmaker >>> # Create workflow that performs no distortion correction, trims first 5 TRs, no filtering, 6mm smoothing, and normalizes to 2mm MNI space. Run it with 16 cores. >>> >>> workflow = wfmaker( project_dir = '/data/project', raw_dir = 'raw', apply_trim = 5) >>> >>> workflow.run('MultiProc',plugin_args = {'n_procs': 16}) >>> >>> # Create workflow that performs distortion correction, trims first 25 TRs, no filtering and filtering .25hz, 6mm and 8mm smoothing, and normalizes to 3mm MNI space. Run it serially (will be super slow!). >>> >>> workflow = wfmaker( project_dir = '/data/project', raw_dir = 'raw', apply_trim = 25, apply_dist_corr = True, apply_filter = [0, .25], apply_smooth = [6.0, 8.0], mni = '3mm') >>> >>> workflow.run() """ ################## ### PATH SETUP ### ################## if mni_template not in ['1mm', '2mm', '3mm']: raise ValueError("MNI template must be: 1mm, 2mm, or 3mm") data_dir = os.path.join(project_dir, raw_dir) output_dir = os.path.join(project_dir, 'preprocessed') output_final_dir = os.path.join(output_dir, 'final') output_interm_dir = os.path.join(output_dir, 'intermediate') log_dir = os.path.join(project_dir, 'logs', 'nipype') if not os.path.exists(output_final_dir): os.makedirs(output_final_dir) if not os.path.exists(output_interm_dir): os.makedirs(output_interm_dir) if not os.path.exists(log_dir): os.makedirs(log_dir) # Set MNI template MNItemplate = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain.nii.gz') MNImask = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '_brain_mask.nii.gz') MNItemplatehasskull = os.path.join(get_resource_path(), 'MNI152_T1_' + mni_template + '.nii.gz') # Set ANTs files bet_ants_template = os.path.join(get_resource_path(), 'OASIS_template.nii.gz') bet_ants_prob_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz') bet_ants_registration_mask = os.path.join( get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz') ################################# ### NIPYPE IMPORTS AND CONFIG ### ################################# # Update nipype global config because workflow.config[] = ..., doesn't seem to work # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file from nipype import config if readable_crash_files: cfg = dict(execution={'crashfile_format': 'txt'}) config.update_config(cfg) config.update_config( {'logging': { 'log_directory': log_dir, 'log_to_file': True }}) from nipype import logging logging.update_logging(config) # Now import everything else from nipype.interfaces.io import DataSink from nipype.interfaces.utility import Merge, IdentityInterface from nipype.pipeline.engine import Node, Workflow from nipype.interfaces.nipy.preprocess import ComputeMask from nipype.algorithms.rapidart import ArtifactDetect from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection from nipype.interfaces.ants import Registration, ApplyTransforms from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP from nipype.interfaces.fsl.maths import MeanImage from nipype.interfaces.fsl import Merge as MERGE from nipype.interfaces.fsl.utils import Smooth from nipype.interfaces.nipy.preprocess import Trim from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask ################## ### INPUT NODE ### ################## layout = BIDSLayout(data_dir) # Dartmouth subjects are named with the sub- prefix, handle whether we receive an integer identifier for indexing or the full subject id with prefixg if isinstance(subject_id, six.string_types): subId = subject_id[4:] elif isinstance(subject_id, int): subId = layout.get_subjects()[subject_id] subject_id = 'sub-' + subId else: raise TypeError("subject_id should be a string or integer") #Get anat file location anat = layout.get(subject=subId, type='T1w', extensions='.nii.gz')[0].filename #Get functional file locations if task_name: funcs = [ f.filename for f in layout.get(subject=subId, type='bold', task=task_name, extensions='.nii.gz') ] else: funcs = [ f.filename for f in layout.get( subject=subId, type='bold', extensions='.nii.gz') ] #Turn functional file list into interable Node func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans') func_scans.iterables = ('scan', funcs) #Get TR for use in filtering below; we're assuming all BOLD runs have the same TR tr_length = layout.get_metadata(funcs[0])['RepetitionTime'] ##################################### ## TRIM ## ##################################### if apply_trim: trim = Node(Trim(), name='trim') trim.inputs.begin_index = apply_trim ##################################### ## DISTORTION CORRECTION ## ##################################### if apply_dist_corr: #Get fmap file locations fmaps = [ f.filename for f in layout.get( subject=subId, modality='fmap', extensions='.nii.gz') ] if not fmaps: raise IOError( "Distortion Correction requested but field map scans not found..." ) #Get fmap metadata totalReadoutTimes, measurements, fmap_pes = [], [], [] for i, fmap in enumerate(fmaps): # Grab total readout time for each fmap totalReadoutTimes.append( layout.get_metadata(fmap)['TotalReadoutTime']) # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans) measurements.append(nib.load(fmap).header['dim'][4]) # Get phase encoding direction fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"] fmap_pes.append(fmap_pe) encoding_file_writer = Node(interface=Create_Encoding_File(), name='create_encoding') encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes encoding_file_writer.inputs.fmaps = fmaps encoding_file_writer.inputs.fmap_pes = fmap_pes encoding_file_writer.inputs.measurements = measurements encoding_file_writer.inputs.file_name = 'encoding_file.txt' merge_to_file_list = Node(interface=Merge(2), infields=['in1', 'in2'], name='merge_to_file_list') merge_to_file_list.inputs.in1 = fmaps[0] merge_to_file_list.inputs.in1 = fmaps[1] #Merge AP and PA distortion correction scans merger = Node(interface=MERGE(dimension='t'), name='merger') merger.inputs.output_type = 'NIFTI_GZ' merger.inputs.in_files = fmaps merger.inputs.merged_file = 'merged_epi.nii.gz' #Create distortion correction map topup = Node(interface=TOPUP(), name='topup') topup.inputs.output_type = 'NIFTI_GZ' #Apply distortion correction to other scans apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup') apply_topup.inputs.output_type = 'NIFTI_GZ' apply_topup.inputs.method = 'jac' apply_topup.inputs.interp = 'spline' ################################### ### REALIGN ### ################################### realign_fsl = Node(MCFLIRT(), name="realign") realign_fsl.inputs.cost = 'mutualinfo' realign_fsl.inputs.mean_vol = True realign_fsl.inputs.output_type = 'NIFTI_GZ' realign_fsl.inputs.save_mats = True realign_fsl.inputs.save_rms = True realign_fsl.inputs.save_plots = True ################################### ### MEAN EPIs ### ################################### #For coregistration after realignment mean_epi = Node(MeanImage(), name='mean_epi') mean_epi.inputs.dimension = 'T' #For after normalization is done to plot checks mean_norm_epi = Node(MeanImage(), name='mean_norm_epi') mean_norm_epi.inputs.dimension = 'T' ################################### ### MASK, ART, COV CREATION ### ################################### compute_mask = Node(ComputeMask(), name='compute_mask') compute_mask.inputs.m = .05 art = Node(ArtifactDetect(), name='art') art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'FSL' make_cov = Node(Create_Covariates(), name='make_cov') ################################ ### N4 BIAS FIELD CORRECTION ### ################################ if apply_n4: n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction') n4_correction.inputs.copy_header = True n4_correction.inputs.save_bias = False n4_correction.inputs.num_threads = ants_threads n4_correction.inputs.input_image = anat ################################### ### BRAIN EXTRACTION ### ################################### brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction') brain_extraction_ants.inputs.dimension = 3 brain_extraction_ants.inputs.use_floatingpoint_precision = 1 brain_extraction_ants.inputs.num_threads = ants_threads brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask brain_extraction_ants.inputs.keep_temporary_files = 1 brain_extraction_ants.inputs.brain_template = bet_ants_template brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask brain_extraction_ants.inputs.out_prefix = 'bet' ################################### ### COREGISTRATION ### ################################### coregistration = Node(Registration(), name='coregistration') coregistration.inputs.float = False coregistration.inputs.output_transform_prefix = "meanEpi2highres" coregistration.inputs.transforms = ['Rigid'] coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )] coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]] coregistration.inputs.dimension = 3 coregistration.inputs.num_threads = ants_threads coregistration.inputs.write_composite_transform = True coregistration.inputs.collapse_output_transforms = True coregistration.inputs.metric = ['MI'] coregistration.inputs.metric_weight = [1] coregistration.inputs.radius_or_number_of_bins = [32] coregistration.inputs.sampling_strategy = ['Regular'] coregistration.inputs.sampling_percentage = [0.25] coregistration.inputs.convergence_threshold = [1e-08] coregistration.inputs.convergence_window_size = [10] coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]] coregistration.inputs.sigma_units = ['mm'] coregistration.inputs.shrink_factors = [[4, 3, 2, 1]] coregistration.inputs.use_estimate_learning_rate_once = [True] coregistration.inputs.use_histogram_matching = [False] coregistration.inputs.initial_moving_transform_com = True coregistration.inputs.output_warped_image = True coregistration.inputs.winsorize_lower_quantile = 0.01 coregistration.inputs.winsorize_upper_quantile = 0.99 ################################### ### NORMALIZATION ### ################################### # Settings Explanations # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275 # Things that matter the most: # smoothing_sigmas: # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm # Old settings [[3,2,1,0]]*3 # shrink_factors # The coarseness with which to do registration # Old settings [[8,4,2,1]] * 3 # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex # Other settings # transform_parameters: # how much regularization to do for fitting that transformation # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets # radius_or_number_of_bins # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer # use_histogram_matching # Use image intensity distribution to guide registration # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1) # convergence_threshold # threshold for optimizer # convergence_window_size # how many samples should optimizer average to compute threshold? # sampling_strategy # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass normalization = Node(Registration(), name='normalization') normalization.inputs.float = False normalization.inputs.collapse_output_transforms = True normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07] normalization.inputs.convergence_window_size = [10] normalization.inputs.dimension = 3 normalization.inputs.fixed_image = MNItemplate normalization.inputs.initial_moving_transform_com = True normalization.inputs.metric = ['MI', 'MI', 'CC'] normalization.inputs.metric_weight = [1.0] * 3 normalization.inputs.number_of_iterations = [[1000, 500, 250, 100], [1000, 500, 250, 100], [100, 70, 50, 20]] normalization.inputs.num_threads = ants_threads normalization.inputs.output_transform_prefix = 'anat2template' normalization.inputs.output_inverse_warped_image = True normalization.inputs.output_warped_image = True normalization.inputs.radius_or_number_of_bins = [32, 32, 4] normalization.inputs.sampling_percentage = [0.25, 0.25, 1] normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None'] normalization.inputs.shrink_factors = [[4, 3, 2, 1]] * 3 normalization.inputs.sigma_units = ['vox'] * 3 normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]] normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.1, 3.0, 0.0)] normalization.inputs.use_histogram_matching = True normalization.inputs.winsorize_lower_quantile = 0.005 normalization.inputs.winsorize_upper_quantile = 0.995 normalization.inputs.write_composite_transform = True ################################### ### APPLY TRANSFORMS AND SMOOTH ### ################################### merge_transforms = Node(Merge(2), iterfield=['in2'], name='merge_transforms') # Used for epi -> mni, via (coreg + norm) apply_transforms = Node(ApplyTransforms(), iterfield=['input_image'], name='apply_transforms') apply_transforms.inputs.input_image_type = 3 apply_transforms.inputs.float = False apply_transforms.inputs.num_threads = 12 apply_transforms.inputs.environ = {} apply_transforms.inputs.interpolation = 'BSpline' apply_transforms.inputs.invert_transform_flags = [False, False] apply_transforms.inputs.reference_image = MNItemplate # Used for t1 segmented -> mni, via (norm) apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg') apply_transform_seg.inputs.input_image_type = 3 apply_transform_seg.inputs.float = False apply_transform_seg.inputs.num_threads = 12 apply_transform_seg.inputs.environ = {} apply_transform_seg.inputs.interpolation = 'MultiLabel' apply_transform_seg.inputs.invert_transform_flags = [False] apply_transform_seg.inputs.reference_image = MNItemplate ################################### ### PLOTS ### ################################### plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign") plot_qa = Node(Plot_Quality_Control(), name="plot_qa") plot_normalization_check = Node(Plot_Coregistration_Montage(), name="plot_normalization_check") plot_normalization_check.inputs.canonical_img = MNItemplatehasskull ############################################ ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ### ############################################ #Use cosanlab_preproc for down sampling down_samp = Node(Down_Sample_Precision(), name="down_samp") #Use FSL for smoothing if apply_smooth: smooth = Node(Smooth(), name='smooth') if isinstance(apply_smooth, list): smooth.iterables = ("fwhm", apply_smooth) elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float): smooth.inputs.fwhm = apply_smooth else: raise ValueError("apply_smooth must be a list or int/float") #Use cosanlab_preproc for low-pass filtering if apply_filter: lp_filter = Node(Filter_In_Mask(), name='lp_filter') lp_filter.inputs.mask = MNImask lp_filter.inputs.sampling_rate = tr_length lp_filter.inputs.high_pass_cutoff = 0 if isinstance(apply_filter, list): lp_filter.iterables = ("low_pass_cutoff", apply_filter) elif isinstance(apply_filter, int) or isinstance(apply_filter, float): lp_filter.inputs.low_pass_cutoff = apply_filter else: raise ValueError("apply_filter must be a list or int/float") ################### ### OUTPUT NODE ### ################### #Collect all final outputs in the output dir and get rid of file name additions datasink = Node(DataSink(), name='datasink') datasink.inputs.base_directory = output_final_dir datasink.inputs.container = subject_id # Remove substitutions data_dir_parts = data_dir.split('/')[1:] prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func'] func_scan_names = [os.path.split(elem)[-1] for elem in funcs] to_replace = [] for elem in func_scan_names: bold_name = elem.split(subject_id + '_')[-1] bold_name = bold_name.split('.nii.gz')[0] to_replace.append(('..'.join(prefix + [elem]), bold_name)) datasink.inputs.substitutions = to_replace ##################### ### INIT WORKFLOW ### ##################### workflow = Workflow(name=subId) workflow.base_dir = output_interm_dir ############################ ######### PART (1a) ######### # func -> discorr -> trim -> realign # OR # func -> trim -> realign # OR # func -> discorr -> realign # OR # func -> realign ############################ if apply_dist_corr: workflow.connect([(encoding_file_writer, topup, [('encoding_file', 'encoding_file')]), (encoding_file_writer, apply_topup, [('encoding_file', 'encoding_file')]), (merger, topup, [('merged_file', 'in_file')]), (func_scans, apply_topup, [('scan', 'in_files')]), (topup, apply_topup, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar')])]) if apply_trim: # Dist Corr + Trim workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file') ]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # Dist Corr + No Trim workflow.connect([(apply_topup, realign_fsl, [('out_corrected', 'in_file')])]) else: if apply_trim: # No Dist Corr + Trim workflow.connect([(func_scans, trim, [('scan', 'in_file')]), (trim, realign_fsl, [('out_file', 'in_file')])]) else: # No Dist Corr + No Trim workflow.connect([ (func_scans, realign_fsl, [('scan', 'in_file')]), ]) ############################ ######### PART (1n) ######### # anat -> N4 -> bet # OR # anat -> bet ############################ if apply_n4: workflow.connect([(n4_correction, brain_extraction_ants, [('output_image', 'anatomical_image')])]) else: brain_extraction_ants.inputs.anatomical_image = anat ########################################## ############### PART (2) ################# # realign -> coreg -> mni (via t1) # t1 -> mni # covariate creation # plot creation ########################################### workflow.connect([ (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]), (realign_fsl, plot_qa, [('out_file', 'dat_img')]), (realign_fsl, art, [('out_file', 'realigned_files'), ('par_file', 'realignment_parameters')]), (realign_fsl, mean_epi, [('out_file', 'in_file')]), (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]), (mean_epi, compute_mask, [('out_file', 'mean_volume')]), (compute_mask, art, [('brain_mask', 'mask_file')]), (art, make_cov, [('outlier_files', 'spike_id')]), (art, plot_realign, [('outlier_files', 'outliers')]), (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]), (brain_extraction_ants, coregistration, [('BrainExtractionBrain', 'fixed_image')]), (mean_epi, coregistration, [('out_file', 'moving_image')]), (brain_extraction_ants, normalization, [('BrainExtractionBrain', 'moving_image')]), (coregistration, merge_transforms, [('composite_transform', 'in2')]), (normalization, merge_transforms, [('composite_transform', 'in1')]), (merge_transforms, apply_transforms, [('out', 'transforms')]), (realign_fsl, apply_transforms, [('out_file', 'input_image')]), (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]), (normalization, apply_transform_seg, [('composite_transform', 'transforms')]), (brain_extraction_ants, apply_transform_seg, [('BrainExtractionSegmentation', 'input_image')]), (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')]) ]) ################################################## ################### PART (3) ##################### # epi (in mni) -> filter -> smooth -> down sample # OR # epi (in mni) -> filter -> down sample # OR # epi (in mni) -> smooth -> down sample # OR # epi (in mni) -> down sample ################################################### if apply_filter: workflow.connect([(apply_transforms, lp_filter, [('output_image', 'in_file')])]) if apply_smooth: # Filtering + Smoothing workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file') ])]) else: # Filtering + No Smoothing workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')]) ]) else: if apply_smooth: # No Filtering + Smoothing workflow.connect([ (apply_transforms, smooth, [('output_image', 'in_file')]), (smooth, down_samp, [('smoothed_file', 'in_file')]) ]) else: # No Filtering + No Smoothing workflow.connect([(apply_transforms, down_samp, [('output_image', 'in_file')])]) ########################################## ############### PART (4) ################# # down sample -> save # plots -> save # covs -> save # t1 (in mni) -> save # t1 segmented masks (in mni) -> save ########################################## workflow.connect([ (down_samp, datasink, [('out_file', 'functional.@down_samp')]), (plot_realign, datasink, [('plot', 'functional.@plot_realign')]), (plot_qa, datasink, [('plot', 'functional.@plot_qa')]), (plot_normalization_check, datasink, [('plot', 'functional.@plot_normalization')]), (make_cov, datasink, [('covariates', 'functional.@covariates')]), (normalization, datasink, [('warped_image', 'structural.@normanat')]), (apply_transform_seg, datasink, [('output_image', 'structural.@normanatseg')]) ]) if not os.path.exists(os.path.join(output_dir, 'pipeline.png')): workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'), format='png') print(f"Creating workflow for subject: {subject_id}") if ants_threads == 8: print( f"ANTs will utilize the default of {ants_threads} threads for parallel processing." ) else: print( f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing." ) return workflow
def main(argv=sys.argv): parser = generate_parser() args = parser.parse_args() # Load the bids layout layout = BIDSLayout(args.bids_dir) subsess = read_bids_layout(layout, subject_list=args.subject_list, collect_on_subject=args.collect) print(subsess) for subject, sessions in subsess: # fmap directory = base dir fmap = layout.get(subject=subject, session=sessions, modality='fmap', extensions='.nii.gz') base_temp_dir = os.path.dirname(fmap[0].filename) # Check if fieldmaps are concatenated print(fmap[0].filename) print("-both_" in fmap[0].filename) if "-both_" in fmap[0].filename: print("Running seperate_concatenate_fm") seperate_concatenated_fm(layout, subject, sessions) # recreate layout with the additional SEFMS layout = BIDSLayout(args.bids_dir) # Return a list of each SEFM pos/neg pair bes_pos, best_neg = sefm_select(layout, subject, sessions, base_temp_dir, args.debug) # Additional edits to the anat json sidecar anat = layout.get(subject=subject, session=sessions, modality='anat', extensions='.nii.gz') for TX in [x.filename for x in anat]: TX_json = TX.replace('.nii.gz', '.json') TX_metadata = layout.get_metadata(TX) #if 'T1' in TX_metadata['SeriesDescription']: if 'Philips' in TX_metadata['Manufacturer']: insert_edit_json(TX_json, 'DwellTime', 0.00062771) if 'GE' in TX_metadata['Manufacturer']: insert_edit_json(TX_json, 'DwellTime', 0.000536) if 'Siemens' in TX_metadata['Manufacturer']: insert_edit_json(TX_json, 'DwellTime', 0.00051001152626) # add EffectiveEchoSpacing if it doesn't already exist fmap = layout.get(subject=subject, session=sessions, modality='fmap', extensions='.nii.gz') for sefm in [x.filename for x in fmap]: sefm_json = sefm.replace('.nii.gz', '.json') sefm_metadata = layout.get_metadata(sefm) if 'Philips' in sefm_metadata['Manufacturer']: insert_edit_json(sefm_json, 'EffectiveEchoSpacing', 0.00062771) if 'GE' in sefm_metadata['Manufacturer']: insert_edit_json(sefm_json, 'EffectiveEchoSpacing', 0.000536) if 'Siemens' in sefm_metadata['Manufacturer']: insert_edit_json(sefm_json, 'EffectiveEchoSpacing', 0.00051001152626) # PE direction vs axis func = layout.get(subject=subject, session=sessions, modality='func', extensions='.nii.gz') for task in [x.filename for x in func]: task_json = task.replace('.nii.gz', '.json') task_metadata = layout.get_metadata(task) print('Inserting PE into func') if "PhaseEncodingAxis" in task_metadata: print('Adding PEDirection') print(task_json) print('PhaseEncodingDirection') print(task_metadata['PhaseEncodingAxis']) insert_edit_json(task_json, 'PhaseEncodingDirection', task_metadata['PhaseEncodingAxis']) elif "PhaseEncodingDirection" in task_metadata: insert_edit_json(task_json, 'PhaseEncodingAxis', task_metadata['PhaseEncodingDirection'])
] slice_times = [ 0.0023, 1.0023, 0.0499, 1.0499, 0.0975, 1.0975, 0.1451, 1.1451, 0.1927, 1.1928, 0.2404, 1.2404, 0.288, 1.288, 0.3356, 1.3356, 0.3832, 1.3832, 0.4308, 1.4309, 0.4785, 1.4785, 0.5261, 1.5261, 0.5737, 1.5737, 0.6213, 1.6213, 0.6689, 1.6689, 0.7166, 1.7166, 0.7642, 1.7642, 0.8118, 1.8118, 0.8594, 1.8594, 0.907, 1.907, 0.9547, 1.9546 ] subjects = layout.get_subjects() for subj in subjects: # Functional scans scans = layout.get(subject=subj, extensions='nii.gz', type='bold') for scan in scans: json_file = layout.get_nearest(scan.filename, extensions='json') metadata = layout.get_metadata(scan.filename) if 'dcmmeta_shape' in metadata.keys() or not metadata: metadata2 = { key: metadata[key] for key in keep_keys if key in metadata.keys() } for key in keep_keys: if key not in metadata.keys( ) and key in metadata['global']['const'].keys(): metadata2[key] = metadata['global']['const'][key] metadata2['SliceTiming'] = slice_times if 'EAT' in metadata['ProtocolName']: metadata2['TaskName'] = 'error awareness' else: metadata2['TaskName'] = 'resting state'
def write_physio_to_bids(physio_dir, bids_dir): # read the acqtimes out of the bids dataset use these to match with the physio files l = BIDSLayout(bids_dir) files = l.get(type='bold', return_type='file', extensions='.nii.gz') acqtimes = {} endtimes = {} for f in files: acqtime = convert_time( l.get_metadata(f)['time']['samples']['AcquisitionTime'][0]) if acqtime not in acqtimes.keys(): acqtimes[acqtime] = [f] endtimes[acqtime] = [ convert_time( l.get_metadata(f)['time']['samples']['AcquisitionTime'][0]) ] else: acqtimes[acqtime].append(f) endtimes[acqtime].append( convert_time( l.get_metadata(f)['time']['samples']['AcquisitionTime'] [0])) if l.get_metadata( f)['MagneticFieldStrength'] != 6.98 and l.get_metadata( f)['Manufacturer'] != "Siemens": print( "Scanners other than the Siemens 7T are not currently supported" ) raise (NameError) # read in physio files and match based on acqtime time and date # to functional data matches = [] for root, dirnames, filenames in os.walk(physio_dir): for filename in filenames: if '.puls' in filename: matches.append(os.path.join(root, filename)) files = set([f[0:-4] for f in matches]) for f in files: try: ctime, csf, cardiac = read_physio_7T(f, 'puls') rtime, rsf, respiratory = read_physio_7T(f, 'resp') except: print('Reading file failed: ' + f) # matched based on files that start before and end after a functional run acqmatches = [ fname for acqtime, fname in acqtimes.items() if acqtime - ctime[0] > 0 and ctime[1] - endtimes[acqtime][0] > 0 ] if len(acqmatches) > 0: for image in acqmatches[0]: # if matched find the difference between the MDHTime and the Acquisition time as start time # https://cfn.upenn.edu/aguirre/wiki/public:pulse-oximetry_during_fmri_scanning imtime = convert_time( l.get_metadata(image)['time']['samples']['AcquisitionTime'] [0]) filematches = create_filebase(image) print((ctime[0] - imtime) * csf / 1000) cardiac_json = { 'SamplingFrequency': csf, 'StartTime': (ctime[0] - imtime) / 1000.0, 'Columns': ["cardiac"] } with open(filematches + '_recording-cardiac_physio.json', 'w+') as fp: json.dump(cardiac_json, fp) with gzip.open( filematches + '_recording-cardiac_physio.tsv.gz', 'wt+') as tsv: writer = csv.writer(tsv, delimiter='\t') for val in cardiac: try: writer.writerow([int(val)]) except: continue resp_json = { 'SamplingFrequency': rsf, 'StartTime': (rtime[0] - imtime) / 1000.0, 'Columns': ["respiratory"] } with open(filematches + '_recording-respiratory_physio.json', 'w+') as fp: json.dump(resp_json, fp) with gzip.open( filematches + '_recording-respiratory_physio.tsv', 'wt+') as tsv: writer = csv.writer(tsv, delimiter='\t') for val in respiratory: try: writer.writerow([int(val)]) except: continue