def sortsession(outfolder: str, session: BidsSession, recording: object, dry_run: bool) -> None: recording.setBidsSession(session) if plugins.RunPlugin("SequenceEP", recording) < 0: logger.warning("Sequence {} discarded by {}" .format(recording.recIdentity(False), "SequenceEP")) return logger.info("Processing: sub '{}', ses '{}' ({} files)" .format(recording.subId(), recording.sesId(), len(recording.files))) recording.index = -1 while recording.loadNextFile(): if session.subject is None: recording.getBidsSession().unlock_subject() recording.getBidsSession().subject = None if session.session is None: recording.getBidsSession().unlock_session() recording.getBidsSession().session = None if plugins.RunPlugin("RecordingEP", recording) < 0: logger.warning("Recording {} discarded by {}" .format(recording.recIdentity(), "RecordingEP")) continue if session.subject is None: recording.setSubId() if session.session is None: recording.setSesId() recording.getBidsSession().registerFields(True) serie = os.path.join( outfolder, recording.getBidsSession().getPath(True), recording.Module(), recording.recIdentity(index=False)) if not dry_run: os.makedirs(serie, exist_ok=True) outfile = recording.copyRawFile(serie) if recording.switches["exportHeader"]: recording.exportHeader(serie) plugins.RunPlugin("FileEP", outfile, recording) else: plugins.RunPlugin("FileEP", None, recording) plugins.RunPlugin("SequenceEndEP", outfolder, recording)
def createmap(destination, recording: Modules.baseModule, bidsmap, template, bidsmap_unk) -> None: if plugins.RunPlugin("SequenceEP", recording) < 0: logger.warning("Sequence {} discarded by {}".format( recording.recIdentity(False), "SequenceEP")) return logger.info("Processing: sub '{}', ses '{}', {} ({} files)".format( recording.subId(), recording.sesId(), recording.recIdentity(), len(recording.files))) first_name = None recording.index = -1 while recording.loadNextFile(): if plugins.RunPlugin("RecordingEP", recording) < 0: logger.warning("Recording {} discarded by {}".format( recording.recIdentity(), "RecordingEP")) continue # checking in the current map modality, r_index, run = bidsmap.match_run(recording) if not modality: logger.warning("{}/{}: No run found in bidsmap. " "Looking into template".format( recording.Module(), recording.recIdentity())) # checking in the template map modality, r_index, run = template.match_run(recording, fix=True) if not modality: logger.error("{}/{}: No compatible run found".format( recording.Module(), recording.recIdentity())) bidsmap_unk.add_run(run, recording.Module(), recording.Type()) continue run.template = True modality, r_index, run = bidsmap.add_run(run, recording.Module(), recording.Type()) if modality != "__ignore__": bidsified_name = "{}/{}".format(modality, recording.getBidsname()) logger.debug("{}/{}: {}".format(recording.Module(), recording.recIdentity(), bidsified_name)) else: bidsified_name = None if first_name is None: first_name = bidsified_name elif modality != "__ignore__": if first_name == bidsified_name: logger.error("{}/{}: Bidsified name same " "as first file of recording: {}".format( recording.Module(), recording.recIdentity(), bidsified_name)) if not run.checked: if not run.entity: run.genEntities(recording.bidsmodalities.get(run.model, [])) recording.fillMissingJSON(run) elif "IntendedFor" in recording.metaAuxiliary: sub_path = os.path.join(destination, recording.subId()) out_path = os.path.join(destination, recording.getBidsSession().getPath()) bidsname = recording.getBidsname() bidsmodality = os.path.join(out_path, recording.Modality()) if os.path.isfile(os.path.join(bidsmodality, bidsname + '.json')): # checking the IntendedFor validity intended = recording.metaAuxiliary["IntendedFor"] for i in intended: dest = os.path.join(sub_path, i.value) if not glob.glob(dest): logger.error("{}/{}({}): IntendedFor value {} " "not found".format( modality, r_index, run.example, i.value)) plugins.RunPlugin("SequenceEndEP", None, recording) return first_name
def mapper(source: str, destination: str, plugin_file: str = "", plugin_opt: dict = {}, sub_list: list = [], sub_skip_tsv: bool = False, sub_skip_dir: bool = False, ses_skip_dir: bool = False, process_all: bool = False, bidsmapfile: str = "bidsmap.yaml", map_template: str = "bidsmap_template.yaml", dry_run: bool = False) -> None: """ Generates bidsmap.yaml from prepeared dataset and map template. Only subjects in source/participants.tsv are treated, this list can be narrowed using sub_list, sub_skip_tsv and sub_skip_dir options Parameters ---------- source: str folder containing source dataset destination: str folder for prepeared dataset plugin_file: str path to the plugin file to use plugin_opt: dict named options passed to plugin sub_list: list list of subject to process. Subjects are checked after plugin and must start with 'sub-', as in destination folder sub_skip_tsv: bool if set to True, subjects found in destination/participants.tsv will be ignored sub_skip_dir: bool if set to true, subjects with already created directories will be ignored Can conflict with sub_no_dir ses_skip_dir: bool if set to True, sessions with already created directories will be ignored Can conflict with ses_no_dir bidsmapfile: str The name of bidsmap file, will be searched for in destination/code/bidsmap directory, unless path is absolute map_template: str The name of template map. The file is searched in heuristics folder dry_run: bool if set to True, no disk writing operations will be performed """ logger.info("------------ Generating bidsmap ------------") logger.info("Current directory: {}".format(os.getcwd())) logger.info("Source directory: {}".format(source)) logger.info("Destination directory: {}".format(destination)) # Input checking if not os.path.isdir(source): logger.critical("Source directory {} don't exists".format(source)) raise NotADirectoryError(source) if not os.path.isdir(destination): logger.critical( "Destination directory {} don't exists".format(destination)) raise NotADirectoryError(destination) bidscodefolder = os.path.join(destination, 'code', 'bidsme') os.makedirs(bidscodefolder, exist_ok=True) # Get the heuristics for filling the new bidsmap logger.info("loading template bidsmap {}".format(map_template)) fname = paths.findFile(map_template, paths.local, paths.config, paths.heuristics) if not fname: logger.warning("Unable to find template map {}".format(map_template)) template = bidsmap.Bidsmap(fname) fname = paths.findFile(bidsmapfile, bidscodefolder, paths.local, paths.config) if not fname: bidsmapfile = os.path.join(bidscodefolder, bidsmapfile) else: bidsmapfile = fname logger.info("loading working bidsmap {}".format(bidsmapfile)) bidsmap_new = bidsmap.Bidsmap(bidsmapfile) logger.debug("Creating bidsmap for unknown modalities") # removing old unknown files bidsunknown = os.path.join(bidscodefolder, 'unknown.yaml') if os.path.isfile(bidsunknown): os.remove(bidsunknown) bidsmap_unk = bidsmap.Bidsmap(bidsunknown) ############### # Plugin setup ############### if plugin_file: plugins.ImportPlugins(plugin_file) plugins.InitPlugin(source=source, destination=destination, dry=True, **plugin_opt) ############################### # Checking participants list ############################### source_sub_file = os.path.join(source, "participants.tsv") source_sub_table = BidsTable(source_sub_file, index="participant_id", duplicatedFile="__duplicated.tsv", checkDefinitions=True) source_sub_table.drop_duplicates() df_dupl = source_sub_table.check_duplicates() if df_dupl.any(): logger.critical("Participant list contains one or several " "duplicated entries: {}".format( source_sub_table.getIndexes(df_dupl, True))) raise Exception("Duplicated subjects") BidsSession.loadSubjectFields(source_sub_table.getDefinitionsPath()) dest_sub_file = os.path.join(destination, "participants.tsv") dest_json_file = os.path.join(paths.templates, "participants.json") dest_sub_table = BidsTable(dest_sub_file, index="participant_id", definitionsFile=dest_json_file, duplicatedFile="__duplicated.tsv", checkDefinitions=False) ############################## # Subjects loop ############################## n_subjects = len(source_sub_table.df["participant_id"]) for index, sub_row in source_sub_table.df.iterrows(): skip_subject = False sub_no = index + 1 sub_id = sub_row["participant_id"] sub_dir = os.path.join(source, sub_id) if not os.path.isdir(sub_dir): logger.error("{}: Not found in {}".format(sub_id, source)) continue scan = BidsSession() scan.in_path = sub_dir scan.subject = sub_id ################################################# # Cloning df_sub row values in scans sub_values ################################################# for column in source_sub_table.df.columns: if pandas.isna(sub_row[column]): scan.sub_values[column] = None else: scan.sub_values[column] = sub_row[column] if plugins.RunPlugin("SubjectEP", scan) < 0: logger.warning("Subject {} discarded by {}".format( scan.subject, "SubjectEP")) continue scan.lock_subject() if not scan.isSubValid(): logger.error("{}: Subject id '{}' is not valid".format( sub_id, scan.subject)) continue if tools.skipEntity( scan.subject, sub_list, dest_sub_table.getIndexes() if sub_skip_tsv else None, destination if sub_skip_dir else ""): logger.info("Skipping subject '{}'".format(scan.subject)) continue ses_dirs = tools.lsdirs(sub_dir, 'ses-*') if not ses_dirs: logger.error("{}: No sessions found in: {}".format( scan.subject, sub_dir)) continue for ses_dir in ses_dirs: scan.in_path = ses_dir logger.info("{} ({}/{}): Scanning folder {}".format( scan.subject, sub_no, n_subjects, ses_dir)) scan.unlock_session() scan.session = os.path.basename(ses_dir) if plugins.RunPlugin("SessionEP", scan) < 0: logger.warning("Session {} discarded by {}".format( scan.session, "SessionEP")) continue scan.lock() if ses_skip_dir and tools.skipEntity( scan.session, [], None, os.path.join(destination, scan.subject)): logger.info("Skipping session '{}'".format(scan.session)) continue bidsified_list = [] for module in Modules.selector.types_list: mod_dir = os.path.join(ses_dir, module) if not os.path.isdir(mod_dir): logger.debug("Module {} not found in {}".format( module, ses_dir)) continue for run in tools.lsdirs(mod_dir): cls = Modules.selector.select(run, module) if cls is None: logger.error( "Failed to identify data in {}".format(mod_dir)) continue recording = cls(rec_path=run) if not recording or len(recording.files) == 0: logger.error( "unable to load data in folder {}".format(run)) recording.setBidsSession(scan) err_count = info.counthandler.level2count.copy() try: first_name = createmap(destination, recording, bidsmap_new, template, bidsmap_unk) if first_name in bidsified_list: logger.error( "Matches example of " "already processed run {}".format(first_name)) elif first_name is not None: bidsified_list.append(first_name) except Exception as err: logger.error("Error processing folder {} " "in file {}: {}".format( run, recording.currentFile(True), err)) err_count = info.msg_count(err_count) if err_count: logger.info("Recording generated several " "errors/warnings") skip_subject = True and (not process_all) break if skip_subject: break if skip_subject: break if skip_subject: break if not dry_run: # Save the bidsmap to the bidsmap YAML-file bidsmap_new.save(bidsmapfile, empty_attributes=False) # Sanity checks for map prov_duplicates, example_duplicates = bidsmap_new.checkSanity() dupl_counter = 0 logger.info("Sanity check:") for dupl, count in prov_duplicates.items(): if count > 1: logger.warning("{} matches {} runs".format(dupl, count)) dupl_counter += 1 if dupl_counter == 0: logger.info("Passed: No files matching several runs") else: logger.error( "Failed: {} files matching several runs".format(dupl_counter)) dupl_counter = 0 for dupl, count in example_duplicates.items(): if count > 1: logger.warning("{} created by {} runs".format(dupl, count)) dupl_counter += 1 if dupl_counter == 0: logger.info("Passed: No examples matching several runs") else: logger.error( "Failed: {} examples matching several runs".format(dupl_counter)) ntotal, ntemplate, nunchecked = bidsmap_new.countRuns() logger.info("Map contains {} runs".format(ntotal)) if ntemplate != 0: logger.warning("Map contains {} template runs".format(ntemplate)) if nunchecked != 0: logger.warning("Map contains {} unchecked runs".format(nunchecked)) # Scanning unknowing and exporting them to yaml file unkn_recordings = bidsmap_unk.countRuns()[0] if unkn_recordings > 0: logger.error("Was unable to identify {} recordings. " "See {} for details".format(unkn_recordings, bidsunknown)) if not dry_run: bidsmap_unk.save(bidsunknown)
def coin(destination: str, recording: Modules.baseModule, bidsmap: Bidsmap, dry_run: bool) -> None: """ Converts the session dicom-files into BIDS-valid nifti-files in the corresponding bidsfolder and extracts personals (e.g. Age, Sex) from the dicom header :param session: The full-path name of the subject/session source folder :param bidsmap: The full mapping heuristics from the bidsmap YAML-file :param personals: The dictionary with the personal information :param subprefix: The prefix common for all source subject-folders :param sesprefix: The prefix common for all source session-folders :return: Nothing """ if plugins.RunPlugin("SequenceEP", recording) < 0: logger.warning("Sequence {} discarded by {}".format( recording.recIdentity(False), "SequenceEP")) return logger.info("Processing: sub '{}', ses '{}', {} ({} files)".format( recording.subId(), recording.sesId(), recording.recIdentity(), len(recording.files))) recording.sub_BIDSvalues["participant_id"] = recording.subId() recording.index = -1 while recording.loadNextFile(): if plugins.RunPlugin("RecordingEP", recording) < 0: logger.warning("Recording {} discarded by {}".format( recording.recIdentity(), "RecordingEP")) continue recording.getBidsSession().registerFields(True) out_path = os.path.join(destination, recording.getBidsPrefix("/")) # checking in the current map modality, r_index, r_obj = bidsmap.match_run(recording) if not modality: e = "{}: No compatible run found"\ .format(recording.recIdentity()) logger.error(e) raise ValueError(e) if modality == Modules.ignoremodality: logger.info('{}: ignored modality'.format(recording.recIdentity())) continue recording.setLabels(r_obj) recording.generateMeta() bidsname = recording.getBidsname() bidsmodality = os.path.join(out_path, recording.Modality()) # Check if file already exists if os.path.isfile(os.path.join(bidsmodality, bidsname + '.json')): e = "{}/{}.json exists at destination"\ .format(bidsmodality, bidsname) logger.error(e) raise FileExistsError(e) if not dry_run: outfile = recording.bidsify(destination) plugins.RunPlugin("FileEP", outfile, recording) if not dry_run: plugins.RunPlugin("SequenceEndEP", out_path, recording) else: plugins.RunPlugin("SequenceEndEP", None, recording)
def bidsify(source: str, destination: str, plugin_file: str = "", plugin_opt: dict = {}, sub_list: list = [], sub_skip_tsv: bool = False, sub_skip_dir: bool = False, ses_skip_dir: bool = False, part_template: str = "", bidsmapfile: str = "bidsmap.yaml", dry_run: bool = False) -> None: """ Bidsify prepearde dataset in source and place it in destination folder. Only subjects in source/participants.tsv are treated, this list can be narrowed using sub_list, sub_skip_tsv and sub_skip_dir options Parameters ---------- source: str folder containing source dataset destination: str folder for prepeared dataset plugin_file: str path to the plugin file to use plugin_opt: dict named options passed to plugin sub_list: list list of subject to process. Subjects are checked after plugin and must start with 'sub-', as in destination folder sub_skip_tsv: bool if set to True, subjects found in destination/participants.tsv will be ignored sub_skip_dir: bool if set to true, subjects with already created directories will be ignored Can conflict with sub_no_dir ses_skip_dir: bool if set to True, sessions with already created directories will be ignored Can conflict with ses_no_dir part_template: str path to template json file, from whitch participants.tsv will be modeled. If unset the defeault one "source/participants.tsv" is used. Setting this variable may break workflow bidsmapfile: str The name of bidsmap file, will be searched for in destination/code/bidsmap directory, unless path is absolute dry_run: bool if set to True, no disk writing operations will be performed """ logger.info("-------------- Prepearing data -------------") logger.info("Source directory: {}".format(source)) logger.info("Destination directory: {}".format(destination)) # Input checking # source = os.path.abspath(source) if not os.path.isdir(source): logger.critical("Source directory {} don't exists".format(source)) raise NotADirectoryError(source) if not os.path.isdir(destination): logger.critical( "Destination directory {} don't exists".format(destination)) raise NotADirectoryError(destination) # Input checking & defaults bidscodefolder = os.path.join(destination, 'code', 'bidsme') # Create a code/bidsme subfolder os.makedirs(bidscodefolder, exist_ok=True) # Check for dataset description file dataset_file = os.path.join(destination, 'dataset_description.json') if not os.path.isfile(dataset_file): logger.warning("Dataset description file 'dataset_description.json' " "not found in '{}'".format(destination)) # Check for README file readme_file = os.path.join(destination, 'README') if not os.path.isfile(readme_file): logger.warning("Dataset readme file 'README' " "not found in '{}'".format(destination)) # Get the bidsmap heuristics from the bidsmap YAML-file fname = paths.findFile(bidsmapfile, bidscodefolder, paths.local, paths.config) if not fname: logger.critical('Bidsmap file {} not found.'.format(bidsmapfile)) raise FileNotFoundError(bidsmapfile) else: bidsmapfile = fname logger.info("loading bidsmap {}".format(bidsmapfile)) bidsmap = Bidsmap(bidsmapfile) ntotal, ntemplate, nunchecked = bidsmap.countRuns() logger.debug("Map contains {} runs".format(ntotal)) if ntemplate != 0: logger.warning("Map contains {} template runs".format(ntemplate)) if nunchecked != 0: logger.critical("Map contains {} unchecked runs".format(nunchecked)) raise Exception("Unchecked runs present") ############### # Plugin setup ############### if plugin_file: plugins.ImportPlugins(plugin_file) plugins.InitPlugin(source=source, destination=destination, dry=dry_run, **plugin_opt) ############################### # Checking participants list ############################### if not part_template: part_template = os.path.join(source, "participants.json") else: logger.warning( "Loading exterior participant template {}".format(part_template)) BidsSession.loadSubjectFields(part_template) source_sub_file = os.path.join(source, "participants.tsv") source_sub_table = BidsTable(source_sub_file, definitionsFile=part_template, index="participant_id", duplicatedFile="__duplicated.tsv", checkDefinitions=True) source_sub_table.drop_duplicates() df_dupl = source_sub_table.check_duplicates() if df_dupl.any(): logger.critical("Participant list contains one or several " "duplicated entries: {}".format( source_sub_table.getIndexes(df_dupl, True))) raise Exception("Duplicated subjects") dest_sub_file = os.path.join(destination, "participants.tsv") dest_sub_table = BidsTable(dest_sub_file, index="participant_id", definitionsFile=part_template, duplicatedFile="__duplicated.tsv", checkDefinitions=False) # df_sub = pandas.read_csv(new_sub_file, # old_sub = None ############################## # Subjects loop ############################## n_subjects = len(source_sub_table.df["participant_id"]) for index, sub_row in source_sub_table.df.iterrows(): sub_no = index + 1 sub_id = sub_row["participant_id"] sub_dir = os.path.join(source, sub_id) if not os.path.isdir(sub_dir): logger.error("{}: Not found in {}".format(sub_id, source)) continue scan = BidsSession() scan.in_path = sub_dir scan.subject = sub_id ################################################# # Cloning df_sub row values in scans sub_values ################################################# for column in source_sub_table.df.columns: if pandas.isna(sub_row[column]): scan.sub_values[column] = None else: scan.sub_values[column] = sub_row[column] if plugins.RunPlugin("SubjectEP", scan) < 0: logger.warning("Subject {} discarded by {}".format( scan.subject, "SubjectEP")) continue # locking subjects here allows renaming in bidsification # the files will be stored in appropriate folders scan.lock_subject() if not scan.isSubValid(): logger.error("{}: Subject id '{}' is not valid".format( sub_id, scan.subject)) continue if tools.skipEntity( scan.subject, sub_list, dest_sub_table.getIndexes() if sub_skip_tsv else None, destination if sub_skip_dir else ""): logger.info("Skipping subject '{}'".format(scan.subject)) continue ses_dirs = tools.lsdirs(sub_dir, 'ses-*') if not ses_dirs: logger.error("{}: No sessions found in: {}".format( scan.subject, sub_dir)) continue for ses_dir in ses_dirs: scan.in_path = ses_dir logger.info("{} ({}/{}): Scanning folder {}".format( scan.subject, sub_no, n_subjects, ses_dir)) scan.unlock_session() scan.session = os.path.basename(ses_dir) if plugins.RunPlugin("SessionEP", scan) < 0: logger.warning("Session {} discarded by {}".format( scan.session, "SessionEP")) continue scan.lock() if ses_skip_dir and tools.skipEntity( scan.session, [], None, os.path.join(destination, scan.subject)): logger.info("Skipping session '{}'".format(scan.session)) continue for module in Modules.selector.types_list: mod_dir = os.path.join(ses_dir, module) if not os.path.isdir(mod_dir): logger.debug("Module {} not found in {}".format( module, ses_dir)) continue for run in tools.lsdirs(mod_dir): scan.in_path = run cls = Modules.select(run, module) if cls is None: logger.error( "Failed to identify data in {}".format(run)) continue recording = cls(rec_path=run) if not recording or len(recording.files) == 0: logger.error( "unable to load data in folder {}".format(run)) continue recording.setBidsSession(scan) try: coin(destination, recording, bidsmap, dry_run) except Exception as err: exceptions.ReportError(err) logger.error( "Error processing folder {} in file {}".format( run, recording.currentFile(True))) plugins.RunPlugin("SessionEndEP", scan) scan.in_path = sub_dir plugins.RunPlugin("SubjectEndEP", scan) ################################## # Merging the participants table ################################## df_processed = BidsSession.exportAsDataFrame() try: dest_sub_table.append(df_processed) except Exception as e: logger.critical("Failed to merge participants table for: {}".format(e)) logger.info("Saving incompatible table to {}".format( dest_sub_table.getDuplicatesPath())) dest_sub_table.write_data(dest_sub_table.getDuplicatesPath(), df_processed) else: dest_sub_table.drop_duplicates() df_dupl = dest_sub_table.check_duplicates() if df_dupl.any(): logger.critical("Participant list contains one or several " "duplicated entries: {}".format( dest_sub_table.getIndexes(df_dupl, True))) if not dry_run: dest_sub_table.save_table(selection=~df_dupl) if df_dupl.any(): logger.info( "Saving the list to be merged manually to {}".format( dest_sub_table.getDuplicatesPath())) dest_sub_table.save_table(selection=df_dupl, useDuplicates=True) plugins.RunPlugin("FinaliseEP")
def prepare(source: str, destination: str, plugin_file: str = "", plugin_opt: dict = {}, sub_list: list = [], sub_skip_tsv: bool = False, sub_skip_dir: bool = False, ses_skip_dir: bool = False, part_template: str = "", sub_prefix: str = "", ses_prefix: str = "", sub_no_dir: bool = False, ses_no_dir: bool = False, data_dirs: dict = {}, dry_run: bool = False ) -> None: """ Prepare data from surce folder and place it in sestination folder. Source folder is expected to have structure source/[subId/][sesId/][data/]file. Absence of subId and sesId levels must be communicated via sub_no_dir and ses_no_dir options. List of data folders must be given in data_dirs. Prepeared data will have structure destination/sub-<subId>/ses-<sesId>/<type>/<sequence>/file A list of treated subjects will be created/updated in destination/participants.tsv file Parameters ---------- source: str folder containing source dataset destination: str folder for prepeared dataset plugin_file: str path to the plugin file to use plugin_opt: dict named options passed to plugin sub_list: list list of subject to process. Subjects are checked after plugin and must start with 'sub-', as in destination folder sub_skip_tsv: bool if set to True, subjects found in destination/participants.tsv will be ignored sub_skip_dir: bool if set to true, subjects with already created directories will be ignored Can conflict with sub_no_dir ses_skip_dir: bool if set to True, sessions with already created directories will be ignored Can conflict with ses_no_dir part_template: str path to template json file, from whitch participants.tsv will be modeled. Must be formated as usual BIDS sidecar json file for tsv files sub_prefix: str prefix for subject folders in source dataset. If set, subject folders without prefix will be ignored, and will be stripped from subject Ids: sub001 -> 001 if sub_prefix==sub Option has no effect if sub_no_dir==True ses_prefix: str prefix for session folders in source dataset. If set, session folders without prefix will be ignored, and will be stripped from session Ids: sesTest -> Test if ses_prefix==ses Option has no effect if ses_no_dir==True sub_no_dir: bool if set to True, source dataset will not be expected to have subject folders. ses_no_dir: bool if set to True, source dataset will not be expected to have session folders. data_dirs: dict dictionary containing list of folders with recording data as key and data type as value. If folder contain several types of data, then value must be set to empty string dry_run: bool if set to True, no disk writing operations will be performed """ logger.info("-------------- Prepearing data -------------") logger.info("Source directory: {}".format(source)) logger.info("Destination directory: {}".format(destination)) # Input checking # source = os.path.abspath(source) if not os.path.isdir(source): logger.critical("Source directory {} don't exists" .format(source)) raise NotADirectoryError(source) if not os.path.isdir(destination): logger.critical("Destination directory {} don't exists" .format(destination)) raise NotADirectoryError(destination) if sub_no_dir and sub_skip_dir: logger.warning("Both sub_no_dir and sub_skip_dir are set. " "Subjects will not be skipped " "unless subId defined in plugin") if ses_no_dir and ses_skip_dir: logger.warning("Both ses_no_dir and ses_skip_dir are set. " "Sessions will not be skipped " "unless sesId defined in plugin") ############### # Plugin setup ############### if plugin_file: plugins.ImportPlugins(plugin_file) plugins.InitPlugin(source=source, destination=destination, dry=dry_run, **plugin_opt) ############################### # Checking participants list ############################### new_sub_json = os.path.join(destination, "participants.json") if not part_template: if os.path.isfile(new_sub_json): part_template = new_sub_json else: part_template = os.path.join(paths.templates, "participants.json" ) BidsSession.loadSubjectFields(part_template) sub_table = BidsTable(os.path.join(destination, "participants.tsv"), definitionsFile=part_template, index="participant_id", duplicatedFile="__duplicated.tsv", checkDefinitions=True) ################ # Subject loop # ################ sub_prefix_dir, sub_prefix = os.path.split(sub_prefix) ses_prefix_dir, ses_prefix = os.path.split(ses_prefix) if not sub_no_dir: sub_dirs = tools.lsdirs( os.path.join(source, sub_prefix_dir), sub_prefix + '*') else: sub_dirs = [source] if not sub_dirs: logger.warning("No subject folders found") if not data_dirs: data_dirs = {} for sub_dir in sub_dirs: scan = BidsSession() scan.in_path = sub_dir # get name of subject from folder name if not sub_no_dir: scan.subject = os.path.basename(sub_dir) scan.subject = scan.subject[len(sub_prefix):] if plugins.RunPlugin("SubjectEP", scan) < 0: logger.warning("Subject {} discarded by {}" .format(scan.subject, "SubjectEP")) continue scan.lock_subject() if scan.subject is not None: if tools.skipEntity(scan.subject, sub_list, sub_table.getIndexes() if sub_skip_tsv else None, destination if sub_skip_dir else ""): logger.info("Skipping subject '{}'" .format(scan.subject)) continue if not ses_no_dir: ses_dirs = tools.lsdirs( os.path.join(sub_dir, ses_prefix_dir), ses_prefix + '*') else: ses_dirs = [sub_dir] if not ses_dirs: logger.warning("No session folders found") for ses_dir in ses_dirs: scan.in_path = ses_dir logger.info("Scanning folder {}".format(ses_dir)) if not ses_no_dir: scan.unlock_session() scan.session = os.path.basename(ses_dir) scan.session = scan.session[len(ses_prefix):] else: scan.unlock_session() scan.session = "" if plugins.RunPlugin("SessionEP", scan) < 0: logger.warning("Session {} discarded by {}" .format(scan.session, "SessionEP")) continue scan.lock() if scan.session is not None: skip = False if ses_skip_dir: if os.path.isdir(os.path.join(destination, scan.subject, scan.session)): logger.debug("{} dir exists".format(scan.session)) skip = True if skip: logger.info("Skipping session '{}'" .format(scan.session)) continue if not data_dirs: data_dirs[""] = "" for rec_dirs, rec_type in data_dirs.items(): rec_dirs = tools.lsdirs(ses_dir, rec_dirs) for rec_dir in rec_dirs: if not os.path.isdir(rec_dir): logger.warning("Sub: '{}', Ses: '{}': " "'{}' don't exists " "or not a folder" .format(scan.subject, scan.session, rec_dir)) continue cls = Modules.select(rec_dir, rec_type) if cls is None: logger.warning("Unable to identify data in folder {}" .format(rec_dir)) continue recording = cls(rec_path=rec_dir) if not recording or len(recording.files) == 0: logger.warning("unable to load data in folder {}" .format(rec_dir)) try: sortsession(destination, scan, recording, dry_run) except Exception as err: exceptions.ReportError(err) logger.error("Error processing folder {} in file {}" .format(rec_dir, recording.currentFile(True))) plugins.RunPlugin("SessionEndEP", scan) scan.in_path = sub_dir plugins.RunPlugin("SubjectEndEP", scan) ################################## # Merging the participants table ################################## df_processed = BidsSession.exportAsDataFrame() try: sub_table.append(df_processed) except Exception as e: logger.critical("Failed to merge participants table for: {}" .format(e)) logger.info("Saving incompatible table to {}" .format(sub_table.getDuplicatesPath())) sub_table.write_data(sub_table.getDuplicatesPath(), df_processed) else: sub_table.drop_duplicates() df_dupl = sub_table.check_duplicates() if df_dupl.any(): logger.critical("Participant list contains one or several " "duplicated entries: {}" .format(sub_table.getIndexes(df_dupl, True)) ) if not dry_run: sub_table.save_table(selection=~df_dupl) if df_dupl.any(): logger.info("Saving the list to be merged manually to {}" .format(sub_table.getDuplicatesPath())) sub_table.save_table(selection=df_dupl, useDuplicates=True) plugins.RunPlugin("FinaliseEP")