def deface(bidsdir: str, pattern: str, subjects: list, output: str, cluster: bool, nativespec: str, kwargs: dict): """ :param bidsdir: The bids-directory with the (multi-echo) subject data :param pattern: Globlike search pattern (relative to the subject/session folder) to select the images that need to be defaced, e.g. 'anat/*_T1w*' :param subjects: List of sub-# identifiers to be processed (the sub- prefix can be left out). If not specified then all sub-folders in the bidsfolder will be processed :param output: Determines where the defaced images are saved. It can be the name of a BIDS datatype folder, such as 'anat', or of the derivatives folder, i.e. 'derivatives'. If output is left empty then the original images are replaced by the defaced images :param cluster: Flag to submit the deface jobs to the high-performance compute (HPC) cluster :param nativespec: DRMAA native specifications for submitting deface jobs to the HPC cluster :param kwargs: Additional arguments (in dict/json-style) that are passed to pydeface. See examples for usage :return: """ # Input checking bidsdir = Path(bidsdir).resolve() # Start logging bids.setup_logging(bidsdir/'code'/'bidscoin'/'deface.log') LOGGER.info('') LOGGER.info('------------ START deface ------------') LOGGER.info(f">>> deface bidsfolder={bidsdir} pattern={pattern} subjects={subjects} output={output}" f" cluster={cluster} nativespec={nativespec} {kwargs}") # Get the list of subjects if not subjects: subjects = bids.lsdirs(bidsdir, 'sub-*') if not subjects: LOGGER.warning(f"No subjects found in: {bidsdir/'sub-*'}") else: subjects = ['sub-' + subject.replace('^sub-', '') for subject in subjects] # Make sure there is a "sub-" prefix subjects = [bidsdir/subject for subject in subjects if (bidsdir/subject).is_dir()] # Prepare the HPC job submission with drmaa.Session() as pbatch: if cluster: jt = pbatch.createJobTemplate() jt.jobEnvironment = os.environ jt.remoteCommand = shutil.which('pydeface') jt.nativeSpecification = nativespec jt.joinFiles = True # Loop over bids subject/session-directories for n, subject in enumerate(subjects, 1): sessions = bids.lsdirs(subject, 'ses-*') if not sessions: sessions = [subject] for session in sessions: LOGGER.info('--------------------------------------') LOGGER.info(f"Processing ({n}/{len(subjects)}): {session}") sub_id, ses_id = bids.get_subid_sesid(session/'dum.my') # Search for images that need to be defaced for match in sorted([match for match in session.glob(pattern) if '.nii' in match.suffixes]): # Construct the output filename and relative path name (used in BIDS) match_rel = str(match.relative_to(session)) if not output: outputfile = match outputfile_rel = match_rel elif output == 'derivatives': outputfile = bidsdir/'derivatives'/'deface'/sub_id/ses_id/match.parent.name/match.name outputfile_rel = str(outputfile.relative_to(bidsdir)) else: outputfile = session/output/match.name outputfile_rel = str(outputfile.relative_to(session)) outputfile.parent.mkdir(parents=True, exist_ok=True) # Deface the image LOGGER.info(f"Defacing: {match_rel} -> {outputfile_rel}") if cluster: jt.args = [str(match), '--outfile', str(outputfile), '--force'] + [item for pair in [[f"--{key}",val] for key,val in kwargs.items()] for item in pair] jt.jobName = f"pydeface_{sub_id}_{ses_id}" jobid = pbatch.runJob(jt) LOGGER.info(f"Your deface job has been submitted with ID: {jobid}") else: pdu.deface_image(str(match), str(outputfile), force=True, forcecleanup=True, **kwargs) # Overwrite or add a json sidecar-file inputjson = match.with_suffix('').with_suffix('.json') outputjson = outputfile.with_suffix('').with_suffix('.json') if inputjson.is_file() and inputjson != outputjson: if outputjson.is_file(): LOGGER.info(f"Overwriting the json sidecar-file: {outputjson}") outputjson.unlink() else: LOGGER.info(f"Adding a json sidecar-file: {outputjson}") shutil.copyfile(inputjson, outputjson) # Add a custom "Defaced" field to the json sidecar-file with outputjson.open('r') as output_fid: data = json.load(output_fid) data['Defaced'] = True with outputjson.open('w') as output_fid: json.dump(data, output_fid, indent=4) # Update the IntendedFor fields in the fieldmap sidecar-files if output and output != 'derivatives' and (session/'fmap').is_dir(): for fmap in (session/'fmap').glob('*.json'): with fmap.open('r') as fmap_fid: fmap_data = json.load(fmap_fid) intendedfor = fmap_data['IntendedFor'] if type(intendedfor)==str: intendedfor = [intendedfor] if match_rel in intendedfor: LOGGER.info(f"Updating 'IntendedFor' to {outputfile_rel} in {fmap}") fmap_data['IntendedFor'] = intendedfor + [outputfile_rel] with fmap.open('w') as fmap_fid: json.dump(fmap_data, fmap_fid, indent=4) # Update the scans.tsv file if (bidsdir/'.bidsignore').is_file(): with (bidsdir/'.bidsignore').open('r') as fid: bidsignore = fid.read().splitlines() else: bidsignore = [bids.unknowndatatype + '/'] bidsignore.append('derivatives/') scans_tsv = session/f"{sub_id}{bids.add_prefix('_',ses_id)}_scans.tsv" if output and output+'/' not in bidsignore and scans_tsv.is_file(): LOGGER.info(f"Adding {outputfile_rel} to {scans_tsv}") scans_table = pd.read_csv(scans_tsv, sep='\t', index_col='filename') scans_table.loc[outputfile_rel] = scans_table.loc[match_rel] scans_table.sort_values(by=['acq_time','filename'], inplace=True) scans_table.to_csv(scans_tsv, sep='\t', encoding='utf-8') if cluster: LOGGER.info('Waiting for the deface jobs to finish...') pbatch.synchronize(jobIds=[pbatch.JOB_IDS_SESSION_ALL], timeout=pbatch.TIMEOUT_WAIT_FOREVER, dispose=True) pbatch.deleteJobTemplate(jt) LOGGER.info('-------------- FINISHED! -------------') LOGGER.info('')
def coin_data2bids(dataformat: str, session: Path, bidsmap: dict, bidsfolder: Path, personals: dict, subprefix: str, sesprefix: str) -> None: """ Converts the session source-files into BIDS-valid nifti-files in the corresponding bidsfolder and extracts personals (e.g. Age, Sex) from the source header :param dataformat: The format of the raw input data that is to be coined (e.g. 'DICOM' or 'PAR', see bids.get_dataformat) :param session: The full-path name of the subject/session source file/folder :param bidsmap: The full mapping heuristics from the bidsmap YAML-file :param bidsfolder: The full-path name of the BIDS root-folder :param personals: The dictionary with the personal information :param subprefix: The prefix common for all source subject-folders :param sesprefix: The prefix common for all source session-folders :return: Nothing """ # Get valid BIDS subject/session identifiers from the (first) DICOM- or PAR/XML source file if dataformat == 'DICOM': sourcefile = Path() sources = bids.lsdirs(session) for source in sources: sourcefile = bids.get_dicomfile(source) manufacturer = bids.get_dicomfield('Manufacturer', sourcefile) if sourcefile.name: break elif dataformat == 'PAR': sources = bids.get_parfiles(session) manufacturer = 'Philips Medical Systems' if sources: sourcefile = sources[0] else: LOGGER.error( f"Unsupported data format: {dataformat}\nPlease report this bug") return if not sources: LOGGER.info(f"No data found for: {session}") return subid, sesid = bids.get_subid_sesid(sourcefile, bidsmap[dataformat]['subject'], bidsmap[dataformat]['session'], subprefix, sesprefix) if subid == subprefix: LOGGER.error(f"No valid subject identifier found for: {session}") return # Create the BIDS session-folder and a scans.tsv file bidsses = bidsfolder / subid / sesid if bidsses.is_dir(): LOGGER.warning( f"Existing BIDS output-directory found, which may result in duplicate data (with increased run-index). Make sure {bidsses} was cleaned-up from old data before (re)running the bidscoiner" ) bidsses.mkdir(parents=True, exist_ok=True) scans_tsv = bidsses / f"{subid}{bids.add_prefix('_',sesid)}_scans.tsv" if scans_tsv.is_file(): scans_table = pd.read_csv(scans_tsv, sep='\t', index_col='filename') else: scans_table = pd.DataFrame(columns=['acq_time'], dtype='str') scans_table.index.name = 'filename' # Process all the source files or run subfolders for source in sources: # Get a source-file if dataformat == 'DICOM': sourcefile = bids.get_dicomfile(source) elif dataformat == 'PAR': sourcefile = source if not sourcefile.name: continue # Get a matching run from the bidsmap run, datatype, index = bids.get_matching_run(sourcefile, bidsmap, dataformat) # Check if we should ignore this run if datatype == bids.ignoredatatype: LOGGER.info(f"Leaving out: {source}") continue # Check if we already know this run if index is None: LOGGER.error( f"Skipping unknown '{datatype}' run: {sourcefile}\n-> Re-run the bidsmapper and delete {bidsses} to solve this warning" ) continue LOGGER.info(f"Processing: {source}") # Create the BIDS session/datatype output folder if run['bids']['suffix'] in bids.get_derivatives(datatype): outfolder = bidsfolder / 'derivatives' / manufacturer.replace( ' ', '') / subid / sesid / datatype else: outfolder = bidsses / datatype outfolder.mkdir(parents=True, exist_ok=True) # Compose the BIDS filename using the matched run bidsname = bids.get_bidsname(subid, sesid, run) runindex = run['bids'].get('run', '') if runindex.startswith('<<') and runindex.endswith('>>'): bidsname = bids.increment_runindex(outfolder, bidsname) jsonfiles = [ (outfolder / bidsname).with_suffix('.json') ] # List -> Collect the associated json-files (for updating them later) -- possibly > 1 # Check if file already exists (-> e.g. when a static runindex is used) if (outfolder / bidsname).with_suffix('.json').is_file(): LOGGER.warning( f"{outfolder/bidsname}.* already exists and will be deleted -- check your results carefully!" ) for ext in ('.nii.gz', '.nii', '.json', '.bval', '.bvec', 'tsv.gz'): (outfolder / bidsname).with_suffix(ext).unlink(missing_ok=True) # Convert physiological log files (dcm2niix can't handle these) if run['bids']['suffix'] == 'physio': if bids.get_dicomfile(source, 2).name: LOGGER.warning( f"Found > 1 DICOM file in {source}, using: {sourcefile}") physiodata = physio.readphysio(sourcefile) physio.physio2tsv(physiodata, outfolder / bidsname) # Convert the source-files in the run folder to nifti's in the BIDS-folder else: command = '{path}dcm2niix {args} -f "{filename}" -o "{outfolder}" "{source}"'.format( path=bidsmap['Options']['dcm2niix']['path'], args=bidsmap['Options']['dcm2niix']['args'], filename=bidsname, outfolder=outfolder, source=source) if not bids.run_command(command): continue # Replace uncropped output image with the cropped one if '-x y' in bidsmap['Options']['dcm2niix']['args']: for dcm2niixfile in sorted( outfolder.glob(bidsname + '*_Crop_*')): # e.g. *_Crop_1.nii.gz ext = ''.join(dcm2niixfile.suffixes) newbidsfile = str(dcm2niixfile).rsplit(ext, 1)[0].rsplit( '_Crop_', 1)[0] + ext LOGGER.info( f"Found dcm2niix _Crop_ postfix, replacing original file\n{dcm2niixfile} ->\n{newbidsfile}" ) dcm2niixfile.replace(newbidsfile) # Rename all files that got additional postfixes from dcm2niix. See: https://github.com/rordenlab/dcm2niix/blob/master/FILENAMING.md dcm2niixpostfixes = ('_c', '_i', '_Eq', '_real', '_imaginary', '_MoCo', '_t', '_Tilt', '_e', '_ph') dcm2niixfiles = sorted( set([ dcm2niixfile for dcm2niixpostfix in dcm2niixpostfixes for dcm2niixfile in outfolder.glob( f"{bidsname}*{dcm2niixpostfix}*") ])) for dcm2niixfile in dcm2niixfiles: ext = ''.join(dcm2niixfile.suffixes) postfixes = str(dcm2niixfile).split(bidsname)[1].rsplit( ext)[0].split('_')[1:] newbidsname = dcm2niixfile.name # Strip the additional postfixes and assign them to bids entities in the for-loop below for postfix in postfixes: # dcm2niix postfixes _c%d, _e%d and _ph (and any combination of these in that order) are for multi-coil data, multi-echo data and phase data # Patch the echo entity in the newbidsname with the dcm2niix echo info # NB: We can't rely on the bids-entity info here because manufacturers can e.g. put multiple echos in one series / run-folder if postfix[0] == 'e' and bids.get_bidsvalue( newbidsname, 'echo' ): # NB: Check if postfix[0]=='e' uniquely refers to the right dcm2niixpostfix echonr = f"_{postfix}" # E.g. echonr='_e1' or echonr='_pha' for dcm2niixpostfix in dcm2niixpostfixes: echonr = echonr.replace( dcm2niixpostfix, '' ) # Strip the dcm2niixpostfix to keep the echonr info. E.g. [echonr='_e1' or echonr='_pha'] -> [echonr='1' or echonr='a'] if echonr.isalpha(): echonr = ord( echonr ) - 95 # dcm2niix adds an alphabetically ordered character if it outputs more than one image with the same name. Convert character to echo-number: '' -> 1, 'a'->2, etc elif not echonr: echonr = 1 newbidsname = bids.get_bidsvalue( newbidsname, 'echo', str(echonr) ) # In contrast to other labels, run and echo labels MUST be integers. Those labels MAY include zero padding, but this is NOT RECOMMENDED to maintain their uniqueness # Patch fieldmap images (NB: datatype=='fmap' is too broad, see the fmap.yaml file) elif run['bids']['suffix'] in ('magnitude', 'magnitude1', 'magnitude2', 'phase1', 'phase2', 'phasediff', 'fieldmap'): if len(dcm2niixfiles) not in ( 0, 2, 4, 6, 8 ): # Phase / echo data may be stored in the same data source / run folder LOGGER.warning( f"Unknown fieldmap {outfolder/bidsname} for '{postfix}'" ) newbidsname = newbidsname.replace( '_fieldmap_ph', '_fieldmap') newbidsname = newbidsname.replace( '_magnitude_e1', '_magnitude') newbidsname = newbidsname.replace( '_magnitude_ph', '_fieldmap') newbidsname = newbidsname.replace( '_magnitude1_e1', '_magnitude1') newbidsname = newbidsname.replace( '_magnitude2_e1', '_magnitude1' ) # This can happen when the e2 image is stored in the same directory as the e1 image, but with the e2 listed first newbidsname = newbidsname.replace( '_magnitude1_e2', '_magnitude2') if len(dcm2niixfiles) == 8: newbidsname = newbidsname.replace( '_magnitude1_ph', '_phase1' ) # Two magnitude + 2 phase images in one folder / datasource else: newbidsname = newbidsname.replace( '_magnitude1_ph', '_phasediff' ) # One or two magnitude + 1 phasediff image newbidsname = newbidsname.replace( '_magnitude1a', '_magnitude2') newbidsname = newbidsname.replace( '_magnitude1_pha', '_phase2') newbidsname = newbidsname.replace( '_magnitude2_e2', '_magnitude2') newbidsname = newbidsname.replace( '_magnitude2_ph', '_phase2') newbidsname = newbidsname.replace( '_phase1_e1', '_phase1') newbidsname = newbidsname.replace( '_phase2_e1', '_phase1' ) # This can happen when the e2 image is stored in the same directory as the e1 image, but with the e2 listed first newbidsname = newbidsname.replace( '_phase1_ph', '_phase1') newbidsname = newbidsname.replace( '_phase1_e2', '_phase2') newbidsname = newbidsname.replace( '_phase2_e2', '_phase2') newbidsname = newbidsname.replace( '_phase2_ph', '_phase2') # Append the dcm2niix info to acq-label, may need to be improved / elaborated for future BIDS standards, supporting multi-coil data else: newbidsname = bids.get_bidsvalue( newbidsname, 'dummy', postfix) # Remove the added postfix from the new bidsname newbidsname = newbidsname.replace(f"_{postfix}_", '_') # If it is not last newbidsname = newbidsname.replace(f"_{postfix}.", '.') # If it is last # Save the file with a new name if runindex.startswith('<<') and runindex.endswith('>>'): newbidsname = bids.increment_runindex( outfolder, newbidsname, '' ) # Update the runindex now that the acq-label has changed newbidsfile = outfolder / newbidsname LOGGER.info( f"Found dcm2niix {postfixes} postfixes, renaming\n{dcm2niixfile} ->\n{newbidsfile}" ) if newbidsfile.is_file(): LOGGER.warning( f"Overwriting existing {newbidsfile} file -- check your results carefully!" ) dcm2niixfile.replace(newbidsfile) if ext == '.json': oldjsonfile = (outfolder / bidsname).with_suffix('.json') if oldjsonfile in jsonfiles and not oldjsonfile.is_file(): jsonfiles.remove( (outfolder / bidsname).with_suffix('.json')) jsonfiles.append(newbidsfile) # Loop over and adapt all the newly produced json files and write to the scans.tsv file (every nifti-file comes with a json-file) for jsonfile in sorted(set(jsonfiles)): # Check if dcm2niix behaved as expected if not jsonfile.is_file(): LOGGER.error( f"Unexpected file conversion result: {jsonfile} not found") continue # Add a dummy b0 bval- and bvec-file for any file without a bval/bvec file (e.g. sbref, b0 scans) if datatype == 'dwi': bvecfile = jsonfile.with_suffix('.bvec') bvalfile = jsonfile.with_suffix('.bval') if not bvecfile.is_file(): LOGGER.info(f"Adding dummy bvec file: {bvecfile}") with bvecfile.open('w') as bvec_fid: bvec_fid.write('0\n0\n0\n') if not bvalfile.is_file(): LOGGER.info(f"Adding dummy bval file: {bvalfile}") with bvalfile.open('w') as bval_fid: bval_fid.write('0\n') # Add the TaskName to the func json-file elif datatype == 'func': with jsonfile.open('r') as json_fid: data = json.load(json_fid) if not 'TaskName' in data: LOGGER.info(f"Adding TaskName to: {jsonfile}") data['TaskName'] = run['bids']['task'] with jsonfile.open('w') as json_fid: json.dump(data, json_fid, indent=4) # Parse the acquisition time from the json file or else from the source header (NB: assuming the source file represents the first acquisition) niifile = list( jsonfile.parent.glob(jsonfile.stem + '.nii*') ) # Find the corresponding nifti file (there should be only one, let's not make assumptions about the .gz extension) if niifile and datatype not in bidsmap['Options']['bidscoin'][ 'bidsignore'] and not run['bids'][ 'suffix'] in bids.get_derivatives(datatype): with jsonfile.open('r') as json_fid: data = json.load(json_fid) if 'AcquisitionTime' not in data or not data['AcquisitionTime']: data['AcquisitionTime'] = bids.get_sourcefield( 'AcquisitionTime', sourcefile) # DICOM if not data['AcquisitionTime']: data['AcquisitionTime'] = bids.get_sourcefield( 'exam_date', sourcefile) # PAR/XML try: acq_time = dateutil.parser.parse(data['AcquisitionTime']) except: LOGGER.warning( f"Could not parse the acquisition time from: '{data['AcquisitionTime']}' in {sourcefile}" ) acq_time = dateutil.parser.parse('00:00:00') scanpath = niifile[0].relative_to(bidsses) scans_table.loc[ scanpath.as_posix(), 'acq_time'] = '1925-01-01T' + acq_time.strftime('%H:%M:%S') # Write the scans_table to disk LOGGER.info(f"Writing acquisition time data to: {scans_tsv}") scans_table.sort_values(by=['acq_time', 'filename'], inplace=True) scans_table.to_csv(scans_tsv, sep='\t', encoding='utf-8') # Add IntendedFor and TE1+TE2 meta-data to the fieldmap json-files. This has been postponed untill all datatypes have been processed (i.e. so that all target images are indeed on disk) if bidsmap[dataformat]['fmap'] is not None: for fieldmap in bidsmap[dataformat]['fmap']: bidsname = bids.get_bidsname(subid, sesid, fieldmap) niifiles = [] intendedfor = fieldmap['bids']['IntendedFor'] # Search for the imaging files that match the IntendedFor search criteria if intendedfor: if intendedfor.startswith('<<') and intendedfor.endswith('>>'): intendedfor = intendedfor[2:-2].split('><') elif not isinstance(intendedfor, list): intendedfor = [intendedfor] for selector in intendedfor: niifiles.extend( [ Path(niifile).relative_to(bidsfolder / subid) for niifile in sorted( bidsses.rglob(f"*{selector}*.nii*")) if selector ] ) # Search in all runs using a relative path to the subject folder else: intendedfor = [] # Get the set of json-files (account for multiple runs in one data source and dcm2niix postfixes inserted into the acquisition label) jsonfiles = [] acqlabel = bids.get_bidsvalue(bidsname, 'acq') patterns = (bidsname.replace('_run-1_', '_run-[0-9]*_').replace( '_magnitude1', '_magnitude*').replace('_magnitude2', '_magnitude*').replace( '_phase1', '_phase*').replace('_phase2', '_phase*'), bidsname.replace('_run-1_', '_run-[0-9]*_').replace( '_magnitude1', '_phase*').replace('_magnitude2', '_phase*')) for pattern in patterns: jsonfiles.extend((bidsses / 'fmap').glob(pattern + '.json')) if acqlabel: cepattern = bids.get_bidsvalue(pattern, 'acq', acqlabel + '[CE][0-9]*') jsonfiles.extend( list((bidsses / 'fmap').glob(cepattern + '.json'))) # Save the meta-data in the jsonfiles for jsonfile in sorted(set(jsonfiles)): # Add the IntendedFor data with jsonfile.open('r') as json_fid: data = json.load(json_fid) if 'IntendedFor' not in data: if niifiles: LOGGER.info(f"Adding IntendedFor to: {jsonfile}") elif intendedfor: LOGGER.warning( f"Empty 'IntendedFor' fieldmap value in {jsonfile}: the search for {intendedfor} gave no results" ) else: LOGGER.warning( f"Empty 'IntendedFor' fieldmap value in {jsonfile}: the IntendedFor value of the bidsmap entry was empty" ) data['IntendedFor'] = [ niifile.as_posix() for niifile in niifiles ] # The path needs to use forward slashes instead of backward slashes with jsonfile.open('w') as json_fid: json.dump(data, json_fid, indent=4) # Extract the echo times from magnitude1 and magnitude2 and add them to the phasediff json-file if jsonfile.name.endswith('phasediff.json'): json_magnitude = [None, None] TE = [None, None] for n in (0, 1): json_magnitude[ n] = jsonfile.parent / jsonfile.name.replace( '_phasediff', f"_magnitude{n+1}") if not json_magnitude[n].is_file(): LOGGER.error( f"Could not find expected magnitude{n+1} image associated with: {jsonfile}" ) else: with json_magnitude[n].open('r') as json_fid: data = json.load(json_fid) TE[n] = data['EchoTime'] if None in TE: LOGGER.error( f"Cannot find and add valid EchoTime1={TE[0]} and EchoTime2={TE[1]} data to: {jsonfile}" ) elif TE[0] > TE[1]: LOGGER.error( f"Found invalid EchoTime1={TE[0]} > EchoTime2={TE[1]} for: {jsonfile}" ) else: with jsonfile.open('r') as json_fid: data = json.load(json_fid) data['EchoTime1'] = TE[0] data['EchoTime2'] = TE[1] LOGGER.info( f"Adding EchoTime1: {TE[0]} and EchoTime2: {TE[1]} to {jsonfile}" ) with jsonfile.open('w') as json_fid: json.dump(data, json_fid, indent=4) # Collect personal data from a source header (PAR/XML does not contain personal info) if dataformat == 'DICOM' and sourcefile.name: personals['participant_id'] = subid if sesid: if 'session_id' not in personals: personals['session_id'] = sesid else: return # Only take data from the first session -> BIDS specification age = bids.get_dicomfield( 'PatientAge', sourcefile ) # A string of characters with one of the following formats: nnnD, nnnW, nnnM, nnnY if age.endswith('D'): personals['age'] = str(int(float(age.rstrip('D')) / 365.2524)) elif age.endswith('W'): personals['age'] = str(int(float(age.rstrip('W')) / 52.1775)) elif age.endswith('M'): personals['age'] = str(int(float(age.rstrip('M')) / 12)) elif age.endswith('Y'): personals['age'] = str(int(float(age.rstrip('Y')))) elif age: personals['age'] = age personals['sex'] = bids.get_dicomfield('PatientSex', sourcefile) personals['size'] = bids.get_dicomfield('PatientSize', sourcefile) personals['weight'] = bids.get_dicomfield('PatientWeight', sourcefile)
def bidscoiner(rawfolder: str, bidsfolder: str, subjects: list = (), force: bool = False, participants: bool = False, bidsmapfile: str = 'bidsmap.yaml', subprefix: str = 'sub-', sesprefix: str = 'ses-') -> None: """ Main function that processes all the subjects and session in the sourcefolder and uses the bidsmap.yaml file in bidsfolder/code/bidscoin to cast the data into the BIDS folder. :param rawfolder: The root folder-name of the sub/ses/data/file tree containing the source data files :param bidsfolder: The name of the BIDS root folder :param subjects: List of selected subjects / participants (i.e. sub-# names / folders) to be processed (the sub- prefix can be removed). Otherwise all subjects in the sourcefolder will be selected :param force: If True, subjects will be processed, regardless of existing folders in the bidsfolder. Otherwise existing folders will be skipped :param participants: If True, subjects in particpants.tsv will not be processed (this could be used e.g. to protect these subjects from being reprocessed), also when force=True :param bidsmapfile: The name of the bidsmap YAML-file. If the bidsmap pathname is relative (i.e. no "/" in the name) then it is assumed to be located in bidsfolder/code/bidscoin :param subprefix: The prefix common for all source subject-folders :param sesprefix: The prefix common for all source session-folders :return: Nothing """ # Input checking & defaults rawfolder = Path(rawfolder).resolve() bidsfolder = Path(bidsfolder).resolve() bidsmapfile = Path(bidsmapfile) # Start logging bids.setup_logging(bidsfolder / 'code' / 'bidscoin' / 'bidscoiner.log') LOGGER.info('') LOGGER.info( f"-------------- START BIDScoiner {bids.version()}: BIDS {bids.bidsversion()} ------------" ) LOGGER.info( f">>> bidscoiner sourcefolder={rawfolder} bidsfolder={bidsfolder} subjects={subjects} force={force}" f" participants={participants} bidsmap={bidsmapfile} subprefix={subprefix} sesprefix={sesprefix}" ) # Create a code/bidscoin subfolder (bidsfolder / 'code' / 'bidscoin').mkdir(parents=True, exist_ok=True) # Create a dataset description file if it does not exist dataset_file = bidsfolder / 'dataset_description.json' if not dataset_file.is_file(): dataset_description = { "Name": "REQUIRED. Name of the dataset", "BIDSVersion": str(bids.bidsversion()), "DatasetType": "raw", "License": "RECOMMENDED. The license for the dataset. The use of license name abbreviations is RECOMMENDED for specifying a license. The corresponding full license text MAY be specified in an additional LICENSE file", "Authors": [ "OPTIONAL. List of individuals who contributed to the creation/curation of the dataset" ], "Acknowledgements": "OPTIONAL. Text acknowledging contributions of individuals or institutions beyond those listed in Authors or Funding", "HowToAcknowledge": "OPTIONAL. Instructions how researchers using this dataset should acknowledge the original authors. This field can also be used to define a publication that should be cited in publications that use the dataset", "Funding": ["OPTIONAL. List of sources of funding (grant numbers)"], "EthicsApprovals": [ "OPTIONAL. List of ethics committee approvals of the research protocols and/or protocol identifiers" ], "ReferencesAndLinks": [ "OPTIONAL. List of references to publication that contain information on the dataset, or links", "https://github.com/Donders-Institute/bidscoin" ], "DatasetDOI": "OPTIONAL. The Document Object Identifier of the dataset (not the corresponding paper)" } LOGGER.info(f"Creating dataset description file: {dataset_file}") with open(dataset_file, 'w') as fid: json.dump(dataset_description, fid, indent=4) # Create a README file if it does not exist readme_file = bidsfolder / 'README' if not readme_file.is_file(): LOGGER.info(f"Creating README file: {readme_file}") with open(readme_file, 'w') as fid: fid.write( f"A free form text ( README ) describing the dataset in more details that SHOULD be provided\n\n" f"The raw BIDS data was created using BIDScoin {bids.version()}\n" f"All provenance information and settings can be found in ./code/bidscoin\n" f"For more information see: https://github.com/Donders-Institute/bidscoin" ) # Get the bidsmap heuristics from the bidsmap YAML-file bidsmap, _ = bids.load_bidsmap(bidsmapfile, bidsfolder / 'code' / 'bidscoin') if not bidsmap: LOGGER.error( f"No bidsmap file found in {bidsfolder}. Please run the bidsmapper first and / or use the correct bidsfolder" ) return # Save options to the .bidsignore file bidsignore_items = [ item.strip() for item in bidsmap['Options']['bidscoin']['bidsignore'].split(';') ] LOGGER.info( f"Writing {bidsignore_items} entries to {bidsfolder}.bidsignore") with (bidsfolder / '.bidsignore').open('w') as bidsignore: for item in bidsignore_items: bidsignore.write(item + '\n') # Get the table & dictionary of the subjects that have been processed participants_tsv = bidsfolder / 'participants.tsv' participants_json = participants_tsv.with_suffix('.json') if participants_tsv.is_file(): participants_table = pd.read_csv(participants_tsv, sep='\t') participants_table.set_index(['participant_id'], verify_integrity=True, inplace=True) else: participants_table = pd.DataFrame() participants_table.index.name = 'participant_id' if participants_json.is_file(): with participants_json.open('r') as json_fid: participants_dict = json.load(json_fid) else: participants_dict = { 'participant_id': { 'Description': 'Unique participant identifier' } } # Get the list of subjects if not subjects: subjects = bids.lsdirs(rawfolder, subprefix + '*') if not subjects: LOGGER.warning(f"No subjects found in: {rawfolder/subprefix}*") else: subjects = [ subprefix + re.sub(f"^{subprefix}", '', subject) for subject in subjects ] # Make sure there is a "sub-" prefix subjects = [ rawfolder / subject for subject in subjects if (rawfolder / subject).is_dir() ] # Loop over all subjects and sessions and convert them using the bidsmap entries for n, subject in enumerate(subjects, 1): LOGGER.info( f"------------------- Subject {n}/{len(subjects)} -------------------" ) if participants and subject.name in list(participants_table.index): LOGGER.info(f"Skipping subject: {subject} ({n}/{len(subjects)})") continue personals = dict() sessions = bids.lsdirs(subject, sesprefix + '*') if not sessions: sessions = [subject] for session in sessions: # Unpack the data in a temporary folder if it is tarballed/zipped and/or contains a DICOMDIR file session, unpacked = bids.unpack(session, subprefix, sesprefix) # See what dataformat we have dataformat = bids.get_dataformat(session) if not dataformat: LOGGER.info(f"Skipping unknown session: {session}") continue # Check if we should skip the session-folder if not force: subid, sesid = bids.get_subid_sesid(session / 'dum.my', subprefix=subprefix, sesprefix=sesprefix) bidssession = bidsfolder / subid / sesid if not bidsmap[dataformat]['session']: bidssession = bidssession.parent datatypes = [] for datatype in bids.lsdirs( bidssession ): # See what datatypes we already have in the bids session-folder if datatype.glob('*') and bidsmap[dataformat].get( datatype.name ): # See if we are going to add data for this datatype datatypes.append(datatype.name) if datatypes: LOGGER.info( f"Skipping processed session: {bidssession} already has {datatypes} data (use the -f option to overrule)" ) continue LOGGER.info(f"Coining session: {session}") # Update / append the source data mapping if dataformat in ('DICOM', 'PAR'): coin_data2bids(dataformat, session, bidsmap, bidsfolder, personals, subprefix, sesprefix) # Update / append the P7 mapping if dataformat == 'P7': LOGGER.error( f"{dataformat} not (yet) supported, skipping session: {session}" ) continue # Update / append the nifti mapping if dataformat == 'Nifti': coin_nifti(session, bidsmap, bidsfolder, personals) # Update / append the file-system mapping if dataformat == 'FileSystem': coin_filesystem(session, bidsmap, bidsfolder, personals) # Update / append the plugin mapping if bidsmap['PlugIns']: coin_plugin(session, bidsmap, bidsfolder, personals) # Clean-up the temporary unpacked data if unpacked: shutil.rmtree(session) # Store the collected personals in the participant_table for key in personals: # participant_id is the index of the participants_table assert 'participant_id' in personals if key == 'participant_id': continue # TODO: Check that only values that are consistent over sessions go in the participants.tsv file, otherwise put them in a sessions.tsv file if key not in participants_dict: participants_dict[key] = dict( LongName='Long (unabbreviated) name of the column', Description='Description of the the column', Levels=dict( Key= 'Value (This is for categorical variables: a dictionary of possible values (keys) and their descriptions (values))' ), Units= 'Measurement units. [<prefix symbol>]<unit symbol> format following the SI standard is RECOMMENDED', TermURL= 'URL pointing to a formal definition of this type of data in an ontology available on the web' ) participants_table.loc[personals['participant_id'], key] = personals[key] # Write the collected data to the participant files LOGGER.info(f"Writing subject data to: {participants_tsv}") participants_table.replace('', 'n/a').to_csv(participants_tsv, sep='\t', encoding='utf-8', na_rep='n/a') LOGGER.info(f"Writing subject data dictionary to: {participants_json}") with participants_json.open('w') as json_fid: json.dump(participants_dict, json_fid, indent=4) LOGGER.info('-------------- FINISHED! ------------') LOGGER.info('') bids.reporterrors()
def deface(bidsdir: str, pattern: str, subjects: list, output: str, cluster: bool, nativespec: str, kwargs: dict): # Input checking bidsdir = Path(bidsdir) # Start logging bids.setup_logging(bidsdir / 'code' / 'bidscoin' / 'deface.log') LOGGER.info('') LOGGER.info('------------ START deface ------------') LOGGER.info( f">>> deface bidsfolder={bidsdir} pattern={pattern} subjects={subjects} output={output}" f" cluster={cluster} nativespec={nativespec} {kwargs}") # Get the list of subjects if not subjects: subjects = bids.lsdirs(bidsdir, 'sub-*') if not subjects: LOGGER.warning(f"No subjects found in: {bidsdir/'sub-*'}") else: subjects = [ 'sub-' + subject.replace('^sub-', '') for subject in subjects ] # Make sure there is a "sub-" prefix subjects = [ bidsdir / subject for subject in subjects if (bidsdir / subject).is_dir() ] # Prepare the HPC job submission with drmaa.Session() as pbatch: if cluster: jt = pbatch.createJobTemplate() jt.jobEnvironment = os.environ jt.remoteCommand = shutil.which('pydeface') jt.nativeSpecification = nativespec jt.joinFiles = True # Loop over bids subject/session-directories for n, subject in enumerate(subjects, 1): sessions = bids.lsdirs(subject, 'ses-*') if not sessions: sessions = [subject] for session in sessions: LOGGER.info('--------------------------------------') LOGGER.info(f"Processing ({n}/{len(subjects)}): {session}") sub_id, ses_id = bids.get_subid_sesid(session / 'dum.my') # Search for images that need to be defaced for match in sorted([ match for match in session.glob(pattern) if '.nii' in match.suffixes ]): # Construct the output filename and relative path name (used in BIDS) match_rel = str(match.relative_to(session)) if not output: outputfile = match outputfile_rel = match_rel elif output == 'derivatives': outputfile = bidsdir / 'derivatives' / 'deface' / sub_id / ses_id / match.parent.name / match.name outputfile_rel = str(outputfile.relative_to(bidsdir)) else: outputfile = session / output / match.name outputfile_rel = str(outputfile.relative_to(session)) outputfile.parent.mkdir(parents=True, exist_ok=True) # Deface the image LOGGER.info(f"Defacing: {match_rel} -> {outputfile_rel}") if cluster: jt.args = [ str(match), '--outfile', str(outputfile), '--force' ] + [ item for pair in [[f"--{key}", val] for key, val in kwargs.items()] for item in pair ] jt.jobName = f"pydeface_{sub_id}_{ses_id}" jobid = pbatch.runJob(jt) LOGGER.info( f"Your deface job has been submitted with ID: {jobid}" ) else: pdu.deface_image(str(match), str(outputfile), force=True, forcecleanup=True, **kwargs) # Add a json sidecar-file outputjson = outputfile.with_suffix('').with_suffix( '.json') LOGGER.info(f"Adding a json sidecar-file: {outputjson}") shutil.copyfile( match.with_suffix('').with_suffix('.json'), outputjson) # Update the IntendedFor fields in the fieldmap sidecar files if output and output != 'derivatives' and ( session / 'fmap').is_dir(): for fmap in (session / 'fmap').glob('*.json'): with fmap.open('r') as fmap_fid: fmap_data = json.load(fmap_fid) intendedfor = fmap_data['IntendedFor'] if type(intendedfor) == str: intendedfor = [intendedfor] if match_rel in intendedfor: LOGGER.info( f"Updating 'IntendedFor' to {outputfile_rel} in {fmap}" ) fmap_data['IntendedFor'] = intendedfor + [ outputfile_rel ] with fmap.open('w') as fmap_fid: json.dump(fmap_data, fmap_fid, indent=4) # Update the scans.tsv file scans_tsv = session / f"{sub_id}{bids.add_prefix('_',ses_id)}_scans.tsv" if output and output != 'derivatives' and scans_tsv.is_file( ): LOGGER.info(f"Adding {outputfile_rel} to {scans_tsv}") scans_table = pd.read_csv(scans_tsv, sep='\t', index_col='filename') scans_table.loc[outputfile_rel] = scans_table.loc[ match_rel] scans_table.sort_values(by=['acq_time', 'filename'], inplace=True) scans_table.to_csv(scans_tsv, sep='\t', encoding='utf-8') if cluster: LOGGER.info('Waiting for the deface jobs to finish...') pbatch.synchronize(jobIds=[pbatch.JOB_IDS_SESSION_ALL], timeout=pbatch.TIMEOUT_WAIT_FOREVER, dispose=True) pbatch.deleteJobTemplate(jt) LOGGER.info('-------------- FINISHED! -------------') LOGGER.info('')
def echocombine(bidsdir: str, pattern: str, subjects: list, output: str, algorithm: str, weights: list, force: bool = False): """ :param bidsdir: The bids-directory with the (multi-echo) subject data :param pattern: Globlike recursive search pattern (relative to the subject/session folder) to select the first echo of the images that need to be combined, e.g. '*task-*echo-1*' :param subjects: List of sub-# identifiers to be processed (the sub- prefix can be left out). If not specified then all sub-folders in the bidsfolder will be processed :param output: Determines where the output is saved. It can be the name of a BIDS datatype folder, such as 'func', or of the derivatives folder, i.e. 'derivatives'. If output = [the name of the input datatype folder] then the original echo images are replaced by one combined image. If output is left empty then the combined image is saved in the input datatype folder and the original echo images are moved to the {bids.unknowndatatype} folder :param algorithm: Combination algorithm, either 'PAID', 'TE' or 'average' :param weights: Weights for each echo :param force: Boolean to overwrite existing ME target files :return: """ # Input checking bidsdir = Path(bidsdir).resolve() # Start logging bids.setup_logging(bidsdir / 'code' / 'bidscoin' / 'echocombine.log') LOGGER.info('') LOGGER.info(f"--------- START echocombine ---------") LOGGER.info( f">>> echocombine bidsfolder={bidsdir} pattern={pattern} subjects={subjects} output={output}" f" algorithm={algorithm} weights={weights}") if 'echo' not in pattern: LOGGER.warning( f"Missing 'echo-#' substring in glob-like search pattern, i.e. '{pattern}' does not seem to select the first echo" ) # Get the list of subjects if not subjects: subjects = bids.lsdirs(bidsdir, 'sub-*') if not subjects: LOGGER.warning(f"No subjects found in: {bidsdir/'sub-*'}") else: subjects = [ 'sub-' + subject.replace('^sub-', '') for subject in subjects ] # Make sure there is a "sub-" prefix subjects = [ bidsdir / subject for subject in subjects if (bidsdir / subject).is_dir() ] # Loop over bids subject/session-directories for n, subject in enumerate(subjects, 1): sessions = bids.lsdirs(subject, 'ses-*') if not sessions: sessions = [subject] for session in sessions: LOGGER.info('-------------------------------------') LOGGER.info( f"Combining echos for ({n}/{len(subjects)}): {session}") sub_id, ses_id = bids.get_subid_sesid(session / 'dum.my') # Search for multi-echo matches for match in sorted([ match for match in session.rglob(pattern) if '.nii' in match.suffixes ]): # Check if it is normal/BIDS multi-echo data datatype = match.parent.name echonr = bids.get_bidsvalue(match, 'echo') mepattern = bids.get_bidsvalue(match, 'echo', '*') echos = sorted(match.parent.glob(mepattern.name)) newechos = [ echo.parents[1] / bids.unknowndatatype / echo.name for echo in echos ] if not echonr: LOGGER.warning( f"No 'echo' key-value pair found in the filename, skipping: {match}" ) continue if len(echos) == 1: LOGGER.warning( f"Only one echo image found, nothing to do for: {match}" ) continue # Construct the combined-echo output filename and check if that file already exists cename = match.name.replace(f"_echo-{echonr}", '') if not output: cefile = session / datatype / cename elif output == 'derivatives': cefile = bidsdir / 'derivatives' / 'multiecho' / sub_id / ses_id / datatype / cename else: cefile = session / output / cename cefile.parent.mkdir(parents=True, exist_ok=True) if cefile.is_file() and not force: LOGGER.warning( f"Outputfile {cefile} already exists, skipping: {match}" ) continue # Combine the multi-echo images me.me_combine(mepattern, cefile, algorithm, weights, saveweights=False, logger=LOGGER.name) # (Re)move the original multi-echo images if not output: for echo, newecho in zip(echos, newechos): LOGGER.info( f"Moving original echo image: {echo} -> {newecho}") newecho.parent.mkdir(parents=True, exist_ok=True) echo.replace(newecho) echo.with_suffix('').with_suffix('.json').replace( newecho.with_suffix('').with_suffix('.json')) elif output == datatype: for echo in echos: LOGGER.info(f"Removing original echo image: {echo}") echo.unlink() echo.with_suffix('').with_suffix('.json').unlink() # Construct relative path names as they are used in BIDS echos_rel = [str(echo.relative_to(session)) for echo in echos] newechos_rel = [ str(echo.relative_to(session)) for echo in newechos ] if output != 'derivatives': cefile_rel = str(cefile.relative_to(session)) # Update the IntendedFor fields in the fieldmap sidecar files (i.e. remove the old echos, add the echo-combined image and, optionally, the new echos) if output != 'derivatives' and (session / 'fmap').is_dir(): for fmap in (session / 'fmap').glob('*.json'): with fmap.open('r') as fmap_fid: fmap_data = json.load(fmap_fid) if 'IntendedFor' in fmap_data: intendedfor = fmap_data['IntendedFor'] if type(intendedfor) == str: intendedfor = [intendedfor] if echos_rel[0] in intendedfor: LOGGER.info( f"Updating 'IntendedFor' to {cefile_rel} in {fmap}" ) if not output: intendedfor = [ file for file in intendedfor if not file in echos_rel ] + [cefile_rel] + [ newecho for newecho in newechos_rel ] elif output == datatype: intendedfor = [ file for file in intendedfor if not file in echos_rel ] + [cefile_rel] else: intendedfor = intendedfor + [cefile_rel] fmap_data['IntendedFor'] = intendedfor with fmap.open('w') as fmap_fid: json.dump(fmap_data, fmap_fid, indent=4) # Update the scans.tsv file if (bidsdir / '.bidsignore').is_file(): with (bidsdir / '.bidsignore').open('r') as fid: bidsignore = fid.read().splitlines() else: bidsignore = [bids.unknowndatatype + '/'] bidsignore.append('derivatives/') scans_tsv = session / f"{sub_id}{bids.add_prefix('_',ses_id)}_scans.tsv" if output + '/' not in bidsignore and scans_tsv.is_file(): LOGGER.info(f"Adding {cefile_rel} to {scans_tsv}") scans_table = pd.read_csv(scans_tsv, sep='\t', index_col='filename') scans_table.loc[cefile_rel] = scans_table.loc[echos_rel[0]] for echo, newecho in zip(echos_rel, newechos_rel): if not output: LOGGER.info( f"Updating {echo} -> {newecho} in {scans_tsv}") scans_table.loc[newecho] = scans_table.loc[echo] scans_table.drop(echo, inplace=True) elif output == datatype: LOGGER.info(f"Removing {echo} from {scans_tsv}") scans_table.drop(echo, inplace=True) scans_table.sort_values(by=['acq_time', 'filename'], inplace=True) scans_table.to_csv(scans_tsv, sep='\t', encoding='utf-8') LOGGER.info('-------------- FINISHED! -------------') LOGGER.info('')
def coin_dicom(session: Path, bidsmap: dict, bidsfolder: Path, personals: dict, subprefix: str, sesprefix: str) -> None: """ Converts the session dicom-files into BIDS-valid nifti-files in the corresponding bidsfolder and extracts personals (e.g. Age, Sex) from the dicom header :param session: The full-path name of the subject/session source folder :param bidsmap: The full mapping heuristics from the bidsmap YAML-file :param bidsfolder: The full-path name of the BIDS root-folder :param personals: The dictionary with the personal information :param subprefix: The prefix common for all source subject-folders :param sesprefix: The prefix common for all source session-folders :return: Nothing """ if not bids.lsdirs(session): LOGGER.warning(f"No run subfolder(s) found in: {session}") return TE = [None, None] # Get valid BIDS subject/session identifiers from the (first) dicom-header or from the session source folder subid, sesid = bids.get_subid_sesid( bids.get_dicomfile(bids.lsdirs(session)[0]), bidsmap['DICOM']['subject'], bidsmap['DICOM']['session'], subprefix, sesprefix) if subid == subprefix: LOGGER.error(f"No valid subject identifier found for: {session}") return # Create the BIDS session-folder and a scans.tsv file bidsses = bidsfolder / subid / sesid if bidsses.is_dir(): LOGGER.warning( f"Existing BIDS output-directory found, which may result in duplicate data (with increased run-index). Make sure {bidsses} was cleaned-up from old data before (re)running the bidscoiner" ) bidsses.mkdir(parents=True, exist_ok=True) scans_tsv = bidsses / f"{subid}{bids.add_prefix('_',sesid)}_scans.tsv" if scans_tsv.is_file(): scans_table = pd.read_csv(scans_tsv, sep='\t', index_col='filename') else: scans_table = pd.DataFrame(columns=['acq_time'], dtype='str') scans_table.index.name = 'filename' # Process all the dicom run subfolders for runfolder in bids.lsdirs(session): # Get a dicom-file dicomfile = bids.get_dicomfile(runfolder) if not dicomfile.name: continue # Get a matching run from the bidsmap run, modality, index = bids.get_matching_run(dicomfile, bidsmap) # Check if we should ignore this run if modality == bids.ignoremodality: LOGGER.info(f"Leaving out: {runfolder}") continue # Check if we already know this run if index is None: LOGGER.warning( f"Skipping unknown '{modality}': {dicomfile}\n-> re-run the bidsmapper and delete {session} to solve this warning" ) continue LOGGER.info(f"Processing: {runfolder}") # Create the BIDS session/modality folder bidsmodality = bidsses / modality bidsmodality.mkdir(parents=True, exist_ok=True) # Compose the BIDS filename using the matched run bidsname = bids.get_bidsname(subid, sesid, modality, run) runindex = run['bids']['run'] if runindex.startswith('<<') and runindex.endswith('>>'): bidsname = bids.increment_runindex(bidsmodality, bidsname) # Check if file already exists (-> e.g. when a static runindex is used) if (bidsmodality / bidsname).with_suffix('.json').is_file(): LOGGER.warning( f"{bidsmodality/bidsname}.* already exists -- check your results carefully!" ) # Convert the dicom-files in the run folder to nifti's in the BIDS-folder command = '{path}dcm2niix {args} -f "{filename}" -o "{outfolder}" "{infolder}"'.format( path=bidsmap['Options']['dcm2niix']['path'], args=bidsmap['Options']['dcm2niix']['args'], filename=bidsname, outfolder=bidsmodality, infolder=runfolder) if not bids.run_command(command): continue # Replace uncropped output image with the cropped one if '-x y' in bidsmap['Options']['dcm2niix']['args']: for filename in sorted( bidsmodality.glob(bidsname + '*_Crop_*')): # e.g. *_Crop_1.nii.gz ext = ''.join(filename.suffixes) newfilename = str(filename).rsplit(ext, 1)[0].rsplit( '_Crop_', 1)[0] + ext LOGGER.info( f"Found dcm2niix _Crop_ suffix, replacing original file\n{filename} ->\n{newfilename}" ) filename.replace(newfilename) # Rename all files ending with _c%d, _e%d and _ph (and any combination of these): These are produced by dcm2niix for multi-coil data, multi-echo data and phase data, respectively jsonfiles = [ ] # Collect the associated json-files (for updating them later) -- possibly > 1 for dcm2niisuffix in ('_c', '_e', '_ph', '_i'): for filename in sorted( bidsmodality.glob(bidsname + dcm2niisuffix + '*')): ext = ''.join(filename.suffixes) basepath, index = str(filename).rsplit(ext, 1)[0].rsplit( dcm2niisuffix, 1 ) # basepath = the name without the added stuff (i.e. bidsmodality/bidsname), index = added dcm2niix index (e.g. _c1 -> index=1) basesuffix = basepath.rsplit( '_', 1 )[1] # The BIDS suffix, e.g. basepath = *_magnitude1 -> basesuffix=magnitude1 index = index.split('_')[0].zfill( 2 ) # Zero padd as specified in the BIDS-standard (assuming two digits is sufficient); strip following suffices (fieldmaps produce *_e2_ph files) # This is a special hack: dcm2niix does not always add a _c/_e suffix for the first(?) coil/echo image -> add it when we encounter a **_e2/_c2 file if dcm2niisuffix in ( '_c', '_e' ) and int(index) == 2 and basesuffix not in [ 'magnitude1', 'phase1' ]: # For fieldmaps: *_magnitude1_e[index] -> *_magnitude[index] (This is handled below) filename_ce = Path( basepath + ext) # The file without the _c1/_e1 suffix if dcm2niisuffix == '_e' and bids.get_bidsvalue( basepath, 'echo'): newbasepath_ce = Path( bids.get_bidsvalue(basepath, 'echo', '1')) else: newbasepath_ce = Path( bids.get_bidsvalue( basepath, 'dummy', dcm2niisuffix.upper() + '1'.zfill(len(index))) ) # --> append to acq-label, may need to be elaborated for future BIDS standards, supporting multi-coil data newfilename_ce = newbasepath_ce.with_suffix( ext) # The file as it should have been if filename_ce.is_file(): if filename_ce != newfilename_ce: LOGGER.info( f"Found no dcm2niix {dcm2niisuffix} suffix for image instance 1, renaming\n{filename_ce} ->\n{newfilename_ce}" ) filename_ce.replace(newfilename_ce) if ext == '.json': jsonfiles.append( newbasepath_ce.with_suffix('.json')) # Patch the basepath with the dcm2niix suffix info (we can't rely on the basepath info here because Siemens can e.g. put multiple echos in one series / run-folder) if dcm2niisuffix == '_e' and bids.get_bidsvalue( basepath, 'echo') and index: basepath = bids.get_bidsvalue( basepath, 'echo', str(int(index)) ) # In contrast to other labels, run and echo labels MUST be integers. Those labels MAY include zero padding, but this is NOT RECOMMENDED to maintain their uniqueness elif dcm2niisuffix == '_e' and basesuffix in ( 'magnitude1', 'magnitude2') and index: # i.e. modality == 'fmap' basepath = basepath[0:-1] + str( int(index) ) # basepath: *_magnitude1_e[index] -> *_magnitude[index] # Collect the echo times that need to be added to the json-file (see below) if filename.suffix == '.json': with filename.open('r') as json_fid: data = json.load(json_fid) TE[int(index) - 1] = data['EchoTime'] LOGGER.info( f"Collected EchoTime{index} = {data['EchoTime']} from: {filename}" ) elif dcm2niisuffix == '_e' and basesuffix == 'phasediff' and index: # i.e. modality == 'fmap' pass elif dcm2niisuffix == '_e' and basesuffix in [ 'phase1', 'phase2' ] and index: # i.e. modality == 'fmap' basepath = basepath[0:-1] + str( int(index) ) # basepath: *_phase1_e[index]_ph -> *_phase[index] else: basepath = bids.get_bidsvalue( basepath, 'dummy', dcm2niisuffix.upper() + index ) # --> append to acq-label, may need to be elaborated for future BIDS standards, supporting multi-coil data # Save the file with a new name newbidsname = str(Path(basepath).name) if runindex.startswith('<<') and runindex.endswith('>>'): newbidsname = bids.increment_runindex( bidsmodality, newbidsname, ext ) # Update the runindex now that the acq-label has changed newfilename = (bidsmodality / newbidsname).with_suffix(ext) LOGGER.info( f"Found dcm2niix {dcm2niisuffix} suffix, renaming\n{filename} ->\n{newfilename}" ) filename.replace(newfilename) if ext == '.json': jsonfiles.append( (bidsmodality / newbidsname).with_suffix('.json')) # Loop over and adapt all the newly produced json files and write to the scans.tsv file (every nifti-file comes with a json-file) if not jsonfiles: jsonfiles = [(bidsmodality / bidsname).with_suffix('.json')] for jsonfile in set(jsonfiles): # Check if dcm2niix behaved as expected if not jsonfile.is_file(): LOGGER.error( f"Unexpected file conversion result: {jsonfile} not found") continue # Add a dummy b0 bval- and bvec-file for any file without a bval/bvec file (e.g. sbref, b0 scans) if modality == 'dwi': bvecfile = jsonfile.with_suffix('.bvec') bvalfile = jsonfile.with_suffix('.bval') if not bvecfile.is_file(): LOGGER.info(f"Adding dummy bvec file: {bvecfile}") with bvecfile.open('w') as bvec_fid: bvec_fid.write('0\n0\n0\n') if not bvalfile.is_file(): LOGGER.info(f"Adding dummy bval file: {bvalfile}") with bvalfile.open('w') as bval_fid: bval_fid.write('0\n') # Add the TaskName to the func json-file elif modality == 'func': with jsonfile.open('r') as json_fid: data = json.load(json_fid) if not 'TaskName' in data: LOGGER.info(f"Adding TaskName to: {jsonfile}") data['TaskName'] = run['bids']['task'] with jsonfile.open('w') as json_fid: json.dump(data, json_fid, indent=4) # Add the EchoTime(s) used to create the difference image to the fmap json-file. NB: This assumes the magnitude runs have already been parsed (i.e. their nifti's had an _e suffix) -- This is normally the case for Siemens (phase-runs being saved after the magnitude runs elif modality == 'fmap': if run['bids']['suffix'] == 'phasediff': LOGGER.info( f"Adding EchoTime1: {TE[0]} and EchoTime2: {TE[1]} to {jsonfile}" ) if TE[0] is None or TE[1] is None: LOGGER.warning( f"Missing Echo-Time data for: {jsonfile}") elif TE[0] > TE[1]: LOGGER.warning( f"EchoTime1 > EchoTime2 for: {jsonfile}") with jsonfile.open('r') as json_fid: data = json.load(json_fid) data['EchoTime1'] = TE[0] data['EchoTime2'] = TE[1] with jsonfile.open('w') as json_fid: json.dump(data, json_fid, indent=4) # Parse the acquisition time from the json file or else from the dicom header (NB: assuming the dicom file represents the first aqcuisition) with jsonfile.open('r') as json_fid: data = json.load(json_fid) if 'AcquisitionTime' not in data: data['AcquisitionTime'] = bids.get_dicomfield( 'AcquisitionTime', dicomfile) acq_time = dateutil.parser.parse(data['AcquisitionTime']) scanpath = list( jsonfile.parent.glob(jsonfile.stem + '.nii*') )[0].relative_to( bidsses ) # Find the corresponding nifti file (there should be only one, let's not make assumptions about the .gz extension) scans_table.loc[ scanpath.as_posix(), 'acq_time'] = '1900-01-01T' + acq_time.strftime('%H:%M:%S') # Write the scans_table to disk LOGGER.info(f"Writing acquisition time data to: {scans_tsv}") scans_table.sort_values(by=['acq_time', 'filename'], inplace=True) scans_table.to_csv(scans_tsv, sep='\t', encoding='utf-8') # Search for the IntendedFor images and add them to the json-files. This has been postponed untill all modalities have been processed (i.e. so that all target images are indeed on disk) if bidsmap['DICOM']['fmap'] is not None: for fieldmap in bidsmap['DICOM']['fmap']: bidsname = bids.get_bidsname(subid, sesid, 'fmap', fieldmap) niifiles = [] intendedfor = fieldmap['bids']['IntendedFor'] # Search for the imaging files that match the IntendedFor search criteria if intendedfor: if intendedfor.startswith('<<') and intendedfor.endswith('>>'): intendedfor = intendedfor[2:-2].split('><') elif not isinstance(intendedfor, list): intendedfor = [intendedfor] for selector in intendedfor: niifiles.extend( [ Path(niifile).relative_to(bidsfolder / subid) for niifile in sorted( bidsses.rglob(f"*{selector}*.nii*")) if selector ] ) # Search in all runs using a relative path to the subject folder else: intendedfor = [] # Save the IntendedFor data in the json-files (account for multiple runs and dcm2niix suffixes inserted into the acquisition label) acqlabel = bids.get_bidsvalue(bidsname, 'acq') for jsonfile in list((bidsses/'fmap').glob(bidsname.replace('_run-1_', '_run-[0-9]*_') + '.json')) + \ list((bidsses/'fmap').glob(bidsname.replace('_run-1_', '_run-[0-9]*_').replace(acqlabel, acqlabel+'[CE][0-9]*') + '.json')): if niifiles: LOGGER.info(f"Adding IntendedFor to: {jsonfile}") elif intendedfor: LOGGER.warning( f"Empty 'IntendedFor' fieldmap value in {jsonfile}: the search for {intendedfor} gave no results" ) else: LOGGER.warning( f"Empty 'IntendedFor' fieldmap value in {jsonfile}: the IntendedFor value of the bidsmap entry was empty" ) with jsonfile.open('r') as json_fid: data = json.load(json_fid) data['IntendedFor'] = [ niifile.as_posix() for niifile in niifiles ] # The path needs to use forward slashes instead of backward slashes with jsonfile.open('w') as json_fid: json.dump(data, json_fid, indent=4) # Catch magnitude2 and phase2 files produced by dcm2niix (i.e. magnitude1 & magnitude2 both in the same runfolder) if jsonfile.name.endswith( 'magnitude1.json') or jsonfile.name.endswith( 'phase1.json'): jsonfile2 = jsonfile.with_name( jsonfile.name.rsplit('1.json', 1)[0] + '2.json') if jsonfile2.is_file(): with jsonfile2.open('r') as json_fid: data = json.load(json_fid) if 'IntendedFor' not in data: if niifiles: LOGGER.info( f"Adding IntendedFor to: {jsonfile2}") else: LOGGER.warning( f"Empty 'IntendedFor' fieldmap value in {jsonfile2}: the search for {intendedfor} gave no results" ) data['IntendedFor'] = [ niifile.as_posix() for niifile in niifiles ] # The path needs to use forward slashes instead of backward slashes with jsonfile2.open('w') as json_fid: json.dump(data, json_fid, indent=4) # Collect personal data from the DICOM header: only from the first session (-> BIDS specification) if 'runfolder' in locals(): dicomfile = bids.get_dicomfile(runfolder) personals['participant_id'] = subid if sesid: if 'session_id' not in personals: personals['session_id'] = sesid else: return age = bids.get_dicomfield( 'PatientAge', dicomfile ) # A string of characters with one of the following formats: nnnD, nnnW, nnnM, nnnY if age.endswith('D'): personals['age'] = str(int(float(age.rstrip('D')) / 365.2524)) elif age.endswith('W'): personals['age'] = str(int(float(age.rstrip('W')) / 52.1775)) elif age.endswith('M'): personals['age'] = str(int(float(age.rstrip('M')) / 12)) elif age.endswith('Y'): personals['age'] = str(int(float(age.rstrip('Y')))) elif age: personals['age'] = age personals['sex'] = bids.get_dicomfield('PatientSex', dicomfile) personals['size'] = bids.get_dicomfield('PatientSize', dicomfile) personals['weight'] = bids.get_dicomfield('PatientWeight', dicomfile)
def rawmapper(rawfolder, outfolder: Path = Path(), sessions: list = [], rename: bool = False, dicomfield: tuple = ('PatientComments', ), wildcard: str = '*', subprefix: str = 'sub-', sesprefix: str = 'ses-', dryrun: bool = False) -> None: """ :param rawfolder: The root folder-name of the sub/ses/data/file tree containing the source data files :param outfolder: The name of the folder where the mapping-file is saved (default = sourcefolder) :param sessions: Space separated list of selected sub-#/ses-# names / folders to be processed. Otherwise all sessions in the bidsfolder will be selected :param rename: Flag for renaming the sub-subid folders to sub-dicomfield :param dicomfield: The names of the dicomfields that are mapped (/ renamed to sub-dcmval/ses-dcmval) :param wildcard: The Unix style pathname pattern expansion that is used by glob to select the series from which the dicomfield is being mapped :param subprefix: The prefix common for all source subject-folders :param sesprefix: The prefix common for all source session-folders :param dryrun: Flag for dry-running renaming the sub-subid folders :return: Nothing """ # Input checking rawfolder = Path(rawfolder) if not outfolder: outfolder = rawfolder print(f"Outfolder: {outfolder}") outfolder = Path(outfolder) # Create or append the output to a mapper logfile mapperfile = outfolder / f"rawmapper_{'_'.join(dicomfield)}.tsv" if not dryrun: if rename: with mapperfile.open('a') as fid: fid.write('subid\tsesid\tnewsubid\tnewsesid\n') else: with mapperfile.open('x') as fid: fid.write('subid\tsesid\tseriesname\t{}\n'.format( '\t'.join(dicomfield))) # Map the sessions in the sourcefolder if not sessions: sessions = list(rawfolder.glob(f"{subprefix}*/{sesprefix}*")) if not sessions: sessions = rawfolder.glob( f"{subprefix}*") # Try without session-subfolders else: sessions = [ sessionitem for session in sessions for sessionitem in rawfolder.rglob(session) ] # Loop over the selected sessions in the sourcefolder for session in sessions: # Get the subject and session identifiers from the raw folder subid, sesid = bids.get_subid_sesid(session) # Parse the new subject and session identifiers from the dicomfield series = bids.lsdirs(session, wildcard) if not series: series = '' dcmval = '' else: series = series[0] # TODO: loop over series? dcmval = '' for dcmfield in dicomfield: dcmval = dcmval + '/' + str( bids.get_dicomfield(dcmfield, bids.get_dicomfile(series))) dcmval = dcmval[1:] # Rename the session subfolder in the sourcefolder and print & save this info if rename: # Get the new subid and sesid if not dcmval or dcmval == 'None': warnings.warn( f"Skipping renaming because the dicom-field was empty for: {session}" ) continue else: if '/' in dcmval: # Allow for different sub/ses delimiters that could be entered at the console (i.e. in PatientComments) delim = '/' elif '\\' in dcmval: delim = '\\' else: delim = '\r\n' newsubsesid = [val for val in dcmval.split(delim) if val] # Skip empty lines / entries newsubid = subprefix + bids.cleanup_value( re.sub(f'^{subprefix}', '', newsubsesid[0])) if newsubid == subprefix or newsubid == subprefix + 'None': newsubid = subid warnings.warn( 'Could not rename {} because the dicom-field was empty for: {}' .format(subid, session)) if len(newsubsesid) == 1: newsesid = sesid elif len(newsubsesid) == 2: newsesid = sesprefix + bids.cleanup_value( re.sub(f'^{sesprefix}', '', newsubsesid[1])) else: warnings.warn( f"Skipping renaming of {session} because the dicom-field '{dcmval}' could not be parsed into [subid, sesid]" ) continue if newsesid == sesprefix or newsesid == subprefix + 'None': newsesid = sesid warnings.warn( f"Could not rename {sesid} because the dicom-field was empty for: {session}" ) # Save the dicomfield / sub-ses mapping to disk and rename the session subfolder (but skip if it already exists) newsession = rawfolder / newsubid / newsesid print(f"{session} -> {newsession}") if newsession == session: continue if newsession.is_dir(): warnings.warn( f"{newsession} already exists, skipping renaming of {session}" ) elif not dryrun: with mapperfile.open('a') as fid: fid.write(f"{subid}\t{sesid}\t{newsubid}\t{newsesid}\n") session.rename(newsession) # Print & save the dicom values else: print('{}/{}/{}\t-> {}'.format(subid, sesid, series.name, '\t'.join(dcmval.split('/')))) if not dryrun: with mapperfile.open('a') as fid: fid.write('{}\t{}\t{}\t{}\n'.format( subid, sesid, series.name, '\t'.join(dcmval.split('/'))))
def bidsparticipants(rawfolder: str, bidsfolder: str, keys: str, subprefix: str = 'sub-', sesprefix: str = 'ses-', dryrun: bool = False) -> None: """ Main function that processes all the subjects and session in the sourcefolder to (re)generate the particpants.tsv file in the BIDS folder. :param rawfolder: The root folder-name of the sub/ses/data/file tree containing the source data files :param bidsfolder: The name of the BIDS root folder :param keys: The keys that are extracted fro mthe source data when populating the participants.tsv file :param subprefix: The prefix common for all source subject-folders :param sesprefix: The prefix common for all source session-folders :param dryrun: Boolean to just display the participants info :return: Nothing """ # Input checking & defaults rawfolder = Path(rawfolder).resolve() bidsfolder = Path(bidsfolder).resolve() # Start logging if dryrun: bids.setup_logging() else: bids.setup_logging(bidsfolder / 'code' / 'bidscoin' / 'bidsparticipants.log') LOGGER.info('') LOGGER.info( f"-------------- START bidsparticipants {bids.version()} ------------") LOGGER.info( f">>> bidsparticipants sourcefolder={rawfolder} bidsfolder={bidsfolder} subprefix={subprefix} sesprefix={sesprefix}" ) # Get the table & dictionary of the subjects that have been processed participants_tsv = bidsfolder / 'participants.tsv' participants_json = participants_tsv.with_suffix('.json') if participants_tsv.is_file(): participants_table = pd.read_csv(participants_tsv, sep='\t') participants_table.set_index(['participant_id'], verify_integrity=True, inplace=True) else: participants_table = pd.DataFrame() participants_table.index.name = 'participant_id' if participants_json.is_file(): with participants_json.open('r') as json_fid: participants_dict = json.load(json_fid) else: participants_dict = { 'participant_id': { 'Description': 'Unique participant identifier' } } # Get the list of subjects subjects = bids.lsdirs(bidsfolder, 'sub-*') if not subjects: LOGGER.warning(f"No subjects found in: {bidsfolder}") # Remove obsolete participants from the participants table for participant in participants_table.index: if participant not in subjects: participants_table = participants_table.drop(participant) # Loop over all subjects in the bids-folder and add them to the participants table for n, subject in enumerate(subjects, 1): LOGGER.info( f"------------------- Subject {n}/{len(subjects)} -------------------" ) personals = dict() subid, sesid = bids.get_subid_sesid(subject / 'dum.my') subject = rawfolder / subid.replace( 'sub-', subprefix ) # TODO: This assumes that the subject-ids in the rawfolder did not contain BIDS-invalid characters (such as '_') sessions = bids.lsdirs(subject, sesprefix + '*') if not subject.is_dir(): LOGGER.error(f"Could not find source-folder: {subject}") continue if not sessions: sessions = [subject] for session in sessions: # Unpack the data in a temporary folder if it is tarballed/zipped and/or contains a DICOMDIR file session, unpacked = bids.unpack(session, subprefix, sesprefix, '*') LOGGER.info(f"Scanning session: {session}") # Update / append the sourde data mapping success = scanparticipant('DICOM', session, personals, subid, sesid) # Clean-up the temporary unpacked data if unpacked: shutil.rmtree(session) if success: break # Store the collected personals in the participant_table for key in personals: # participant_id is the index of the participants_table assert 'participant_id' in personals if key == 'participant_id' or key not in keys: continue # TODO: Check that only values that are consistent over sessions go in the participants.tsv file, otherwise put them in a sessions.tsv file if key not in participants_dict: participants_dict[key] = dict( LongName='Long (unabbreviated) name of the column', Description='Description of the the column', Levels=dict( Key= 'Value (This is for categorical variables: a dictionary of possible values (keys) and their descriptions (values))' ), Units= 'Measurement units. [<prefix symbol>]<unit symbol> format following the SI standard is RECOMMENDED', TermURL= 'URL pointing to a formal definition of this type of data in an ontology available on the web' ) participants_table.loc[personals['participant_id'], key] = personals[key] # Write the collected data to the participant files LOGGER.info(f"Writing subject data to: {participants_tsv}") if not dryrun: participants_table.replace('', 'n/a').to_csv(participants_tsv, sep='\t', encoding='utf-8', na_rep='n/a') LOGGER.info(f"Writing subject data dictionary to: {participants_json}") if not dryrun: with participants_json.open('w') as json_fid: json.dump(participants_dict, json_fid, indent=4) print(participants_table) LOGGER.info('-------------- FINISHED! ------------') LOGGER.info('') bids.reporterrors()